2 * NET3 IP device support routines.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Derived from the IP parts of dev.c 1.0.19
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
15 * Alan Cox, <gw4pts@gw4pts.ampr.org>
16 * Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
19 * Alexey Kuznetsov: pa_* fields are replaced with ifaddr
21 * Cyrus Durgin: updated for kmod
22 * Matthias Andree: in devinet_ioctl, compare label and
23 * address (4.4BSD alias style support),
24 * fall back to comparing just the label
29 #include <asm/uaccess.h>
30 #include <linux/bitops.h>
31 #include <linux/capability.h>
32 #include <linux/module.h>
33 #include <linux/types.h>
34 #include <linux/kernel.h>
35 #include <linux/string.h>
37 #include <linux/socket.h>
38 #include <linux/sockios.h>
40 #include <linux/errno.h>
41 #include <linux/interrupt.h>
42 #include <linux/if_addr.h>
43 #include <linux/if_ether.h>
44 #include <linux/inet.h>
45 #include <linux/netdevice.h>
46 #include <linux/etherdevice.h>
47 #include <linux/skbuff.h>
48 #include <linux/init.h>
49 #include <linux/notifier.h>
50 #include <linux/inetdevice.h>
51 #include <linux/igmp.h>
52 #include <linux/slab.h>
53 #include <linux/hash.h>
55 #include <linux/sysctl.h>
57 #include <linux/kmod.h>
58 #include <linux/netconf.h>
62 #include <net/route.h>
63 #include <net/ip_fib.h>
64 #include <net/rtnetlink.h>
65 #include <net/net_namespace.h>
66 #include <net/addrconf.h>
68 #include "fib_lookup.h"
70 static struct ipv4_devconf ipv4_devconf
= {
72 [IPV4_DEVCONF_ACCEPT_REDIRECTS
- 1] = 1,
73 [IPV4_DEVCONF_SEND_REDIRECTS
- 1] = 1,
74 [IPV4_DEVCONF_SECURE_REDIRECTS
- 1] = 1,
75 [IPV4_DEVCONF_SHARED_MEDIA
- 1] = 1,
76 [IPV4_DEVCONF_IGMPV2_UNSOLICITED_REPORT_INTERVAL
- 1] = 10000 /*ms*/,
77 [IPV4_DEVCONF_IGMPV3_UNSOLICITED_REPORT_INTERVAL
- 1] = 1000 /*ms*/,
81 static struct ipv4_devconf ipv4_devconf_dflt
= {
83 [IPV4_DEVCONF_ACCEPT_REDIRECTS
- 1] = 1,
84 [IPV4_DEVCONF_SEND_REDIRECTS
- 1] = 1,
85 [IPV4_DEVCONF_SECURE_REDIRECTS
- 1] = 1,
86 [IPV4_DEVCONF_SHARED_MEDIA
- 1] = 1,
87 [IPV4_DEVCONF_ACCEPT_SOURCE_ROUTE
- 1] = 1,
88 [IPV4_DEVCONF_IGMPV2_UNSOLICITED_REPORT_INTERVAL
- 1] = 10000 /*ms*/,
89 [IPV4_DEVCONF_IGMPV3_UNSOLICITED_REPORT_INTERVAL
- 1] = 1000 /*ms*/,
93 #define IPV4_DEVCONF_DFLT(net, attr) \
94 IPV4_DEVCONF((*net->ipv4.devconf_dflt), attr)
96 static const struct nla_policy ifa_ipv4_policy
[IFA_MAX
+1] = {
97 [IFA_LOCAL
] = { .type
= NLA_U32
},
98 [IFA_ADDRESS
] = { .type
= NLA_U32
},
99 [IFA_BROADCAST
] = { .type
= NLA_U32
},
100 [IFA_LABEL
] = { .type
= NLA_STRING
, .len
= IFNAMSIZ
- 1 },
101 [IFA_CACHEINFO
] = { .len
= sizeof(struct ifa_cacheinfo
) },
104 #define IN4_ADDR_HSIZE_SHIFT 8
105 #define IN4_ADDR_HSIZE (1U << IN4_ADDR_HSIZE_SHIFT)
107 static struct hlist_head inet_addr_lst
[IN4_ADDR_HSIZE
];
108 static DEFINE_SPINLOCK(inet_addr_hash_lock
);
110 static u32
inet_addr_hash(struct net
*net
, __be32 addr
)
112 u32 val
= (__force u32
) addr
^ net_hash_mix(net
);
114 return hash_32(val
, IN4_ADDR_HSIZE_SHIFT
);
117 static void inet_hash_insert(struct net
*net
, struct in_ifaddr
*ifa
)
119 u32 hash
= inet_addr_hash(net
, ifa
->ifa_local
);
121 spin_lock(&inet_addr_hash_lock
);
122 hlist_add_head_rcu(&ifa
->hash
, &inet_addr_lst
[hash
]);
123 spin_unlock(&inet_addr_hash_lock
);
126 static void inet_hash_remove(struct in_ifaddr
*ifa
)
128 spin_lock(&inet_addr_hash_lock
);
129 hlist_del_init_rcu(&ifa
->hash
);
130 spin_unlock(&inet_addr_hash_lock
);
134 * __ip_dev_find - find the first device with a given source address.
135 * @net: the net namespace
136 * @addr: the source address
137 * @devref: if true, take a reference on the found device
139 * If a caller uses devref=false, it should be protected by RCU, or RTNL
141 struct net_device
*__ip_dev_find(struct net
*net
, __be32 addr
, bool devref
)
143 u32 hash
= inet_addr_hash(net
, addr
);
144 struct net_device
*result
= NULL
;
145 struct in_ifaddr
*ifa
;
148 hlist_for_each_entry_rcu(ifa
, &inet_addr_lst
[hash
], hash
) {
149 if (ifa
->ifa_local
== addr
) {
150 struct net_device
*dev
= ifa
->ifa_dev
->dev
;
152 if (!net_eq(dev_net(dev
), net
))
159 struct flowi4 fl4
= { .daddr
= addr
};
160 struct fib_result res
= { 0 };
161 struct fib_table
*local
;
163 /* Fallback to FIB local table so that communication
164 * over loopback subnets work.
166 local
= fib_get_table(net
, RT_TABLE_LOCAL
);
168 !fib_table_lookup(local
, &fl4
, &res
, FIB_LOOKUP_NOREF
) &&
169 res
.type
== RTN_LOCAL
)
170 result
= FIB_RES_DEV(res
);
172 if (result
&& devref
)
177 EXPORT_SYMBOL(__ip_dev_find
);
179 static void rtmsg_ifa(int event
, struct in_ifaddr
*, struct nlmsghdr
*, u32
);
181 static BLOCKING_NOTIFIER_HEAD(inetaddr_chain
);
182 static void inet_del_ifa(struct in_device
*in_dev
, struct in_ifaddr
**ifap
,
185 static void devinet_sysctl_register(struct in_device
*idev
);
186 static void devinet_sysctl_unregister(struct in_device
*idev
);
188 static void devinet_sysctl_register(struct in_device
*idev
)
191 static void devinet_sysctl_unregister(struct in_device
*idev
)
196 /* Locks all the inet devices. */
198 static struct in_ifaddr
*inet_alloc_ifa(void)
200 return kzalloc(sizeof(struct in_ifaddr
), GFP_KERNEL
);
203 static void inet_rcu_free_ifa(struct rcu_head
*head
)
205 struct in_ifaddr
*ifa
= container_of(head
, struct in_ifaddr
, rcu_head
);
207 in_dev_put(ifa
->ifa_dev
);
211 static void inet_free_ifa(struct in_ifaddr
*ifa
)
213 call_rcu(&ifa
->rcu_head
, inet_rcu_free_ifa
);
216 void in_dev_finish_destroy(struct in_device
*idev
)
218 struct net_device
*dev
= idev
->dev
;
220 WARN_ON(idev
->ifa_list
);
221 WARN_ON(idev
->mc_list
);
222 kfree(rcu_dereference_protected(idev
->mc_hash
, 1));
223 #ifdef NET_REFCNT_DEBUG
224 pr_debug("%s: %p=%s\n", __func__
, idev
, dev
? dev
->name
: "NIL");
228 pr_err("Freeing alive in_device %p\n", idev
);
232 EXPORT_SYMBOL(in_dev_finish_destroy
);
234 static struct in_device
*inetdev_init(struct net_device
*dev
)
236 struct in_device
*in_dev
;
240 in_dev
= kzalloc(sizeof(*in_dev
), GFP_KERNEL
);
243 memcpy(&in_dev
->cnf
, dev_net(dev
)->ipv4
.devconf_dflt
,
244 sizeof(in_dev
->cnf
));
245 in_dev
->cnf
.sysctl
= NULL
;
247 in_dev
->arp_parms
= neigh_parms_alloc(dev
, &arp_tbl
);
248 if (!in_dev
->arp_parms
)
250 if (IPV4_DEVCONF(in_dev
->cnf
, FORWARDING
))
251 dev_disable_lro(dev
);
252 /* Reference in_dev->dev */
254 /* Account for reference dev->ip_ptr (below) */
257 devinet_sysctl_register(in_dev
);
258 ip_mc_init_dev(in_dev
);
259 if (dev
->flags
& IFF_UP
)
262 /* we can receive as soon as ip_ptr is set -- do this last */
263 rcu_assign_pointer(dev
->ip_ptr
, in_dev
);
272 static void in_dev_rcu_put(struct rcu_head
*head
)
274 struct in_device
*idev
= container_of(head
, struct in_device
, rcu_head
);
278 static void inetdev_destroy(struct in_device
*in_dev
)
280 struct in_ifaddr
*ifa
;
281 struct net_device
*dev
;
289 ip_mc_destroy_dev(in_dev
);
291 while ((ifa
= in_dev
->ifa_list
) != NULL
) {
292 inet_del_ifa(in_dev
, &in_dev
->ifa_list
, 0);
296 RCU_INIT_POINTER(dev
->ip_ptr
, NULL
);
298 devinet_sysctl_unregister(in_dev
);
299 neigh_parms_release(&arp_tbl
, in_dev
->arp_parms
);
302 call_rcu(&in_dev
->rcu_head
, in_dev_rcu_put
);
305 int inet_addr_onlink(struct in_device
*in_dev
, __be32 a
, __be32 b
)
308 for_primary_ifa(in_dev
) {
309 if (inet_ifa_match(a
, ifa
)) {
310 if (!b
|| inet_ifa_match(b
, ifa
)) {
315 } endfor_ifa(in_dev
);
320 static void __inet_del_ifa(struct in_device
*in_dev
, struct in_ifaddr
**ifap
,
321 int destroy
, struct nlmsghdr
*nlh
, u32 portid
)
323 struct in_ifaddr
*promote
= NULL
;
324 struct in_ifaddr
*ifa
, *ifa1
= *ifap
;
325 struct in_ifaddr
*last_prim
= in_dev
->ifa_list
;
326 struct in_ifaddr
*prev_prom
= NULL
;
327 int do_promote
= IN_DEV_PROMOTE_SECONDARIES(in_dev
);
334 /* 1. Deleting primary ifaddr forces deletion all secondaries
335 * unless alias promotion is set
338 if (!(ifa1
->ifa_flags
& IFA_F_SECONDARY
)) {
339 struct in_ifaddr
**ifap1
= &ifa1
->ifa_next
;
341 while ((ifa
= *ifap1
) != NULL
) {
342 if (!(ifa
->ifa_flags
& IFA_F_SECONDARY
) &&
343 ifa1
->ifa_scope
<= ifa
->ifa_scope
)
346 if (!(ifa
->ifa_flags
& IFA_F_SECONDARY
) ||
347 ifa1
->ifa_mask
!= ifa
->ifa_mask
||
348 !inet_ifa_match(ifa1
->ifa_address
, ifa
)) {
349 ifap1
= &ifa
->ifa_next
;
355 inet_hash_remove(ifa
);
356 *ifap1
= ifa
->ifa_next
;
358 rtmsg_ifa(RTM_DELADDR
, ifa
, nlh
, portid
);
359 blocking_notifier_call_chain(&inetaddr_chain
,
369 /* On promotion all secondaries from subnet are changing
370 * the primary IP, we must remove all their routes silently
371 * and later to add them back with new prefsrc. Do this
372 * while all addresses are on the device list.
374 for (ifa
= promote
; ifa
; ifa
= ifa
->ifa_next
) {
375 if (ifa1
->ifa_mask
== ifa
->ifa_mask
&&
376 inet_ifa_match(ifa1
->ifa_address
, ifa
))
377 fib_del_ifaddr(ifa
, ifa1
);
383 *ifap
= ifa1
->ifa_next
;
384 inet_hash_remove(ifa1
);
386 /* 3. Announce address deletion */
388 /* Send message first, then call notifier.
389 At first sight, FIB update triggered by notifier
390 will refer to already deleted ifaddr, that could confuse
391 netlink listeners. It is not true: look, gated sees
392 that route deleted and if it still thinks that ifaddr
393 is valid, it will try to restore deleted routes... Grr.
394 So that, this order is correct.
396 rtmsg_ifa(RTM_DELADDR
, ifa1
, nlh
, portid
);
397 blocking_notifier_call_chain(&inetaddr_chain
, NETDEV_DOWN
, ifa1
);
400 struct in_ifaddr
*next_sec
= promote
->ifa_next
;
403 prev_prom
->ifa_next
= promote
->ifa_next
;
404 promote
->ifa_next
= last_prim
->ifa_next
;
405 last_prim
->ifa_next
= promote
;
408 promote
->ifa_flags
&= ~IFA_F_SECONDARY
;
409 rtmsg_ifa(RTM_NEWADDR
, promote
, nlh
, portid
);
410 blocking_notifier_call_chain(&inetaddr_chain
,
412 for (ifa
= next_sec
; ifa
; ifa
= ifa
->ifa_next
) {
413 if (ifa1
->ifa_mask
!= ifa
->ifa_mask
||
414 !inet_ifa_match(ifa1
->ifa_address
, ifa
))
424 static void inet_del_ifa(struct in_device
*in_dev
, struct in_ifaddr
**ifap
,
427 __inet_del_ifa(in_dev
, ifap
, destroy
, NULL
, 0);
430 static void check_lifetime(struct work_struct
*work
);
432 static DECLARE_DELAYED_WORK(check_lifetime_work
, check_lifetime
);
434 static int __inet_insert_ifa(struct in_ifaddr
*ifa
, struct nlmsghdr
*nlh
,
437 struct in_device
*in_dev
= ifa
->ifa_dev
;
438 struct in_ifaddr
*ifa1
, **ifap
, **last_primary
;
442 if (!ifa
->ifa_local
) {
447 ifa
->ifa_flags
&= ~IFA_F_SECONDARY
;
448 last_primary
= &in_dev
->ifa_list
;
450 for (ifap
= &in_dev
->ifa_list
; (ifa1
= *ifap
) != NULL
;
451 ifap
= &ifa1
->ifa_next
) {
452 if (!(ifa1
->ifa_flags
& IFA_F_SECONDARY
) &&
453 ifa
->ifa_scope
<= ifa1
->ifa_scope
)
454 last_primary
= &ifa1
->ifa_next
;
455 if (ifa1
->ifa_mask
== ifa
->ifa_mask
&&
456 inet_ifa_match(ifa1
->ifa_address
, ifa
)) {
457 if (ifa1
->ifa_local
== ifa
->ifa_local
) {
461 if (ifa1
->ifa_scope
!= ifa
->ifa_scope
) {
465 ifa
->ifa_flags
|= IFA_F_SECONDARY
;
469 if (!(ifa
->ifa_flags
& IFA_F_SECONDARY
)) {
470 net_srandom(ifa
->ifa_local
);
474 ifa
->ifa_next
= *ifap
;
477 inet_hash_insert(dev_net(in_dev
->dev
), ifa
);
479 cancel_delayed_work(&check_lifetime_work
);
480 schedule_delayed_work(&check_lifetime_work
, 0);
482 /* Send message first, then call notifier.
483 Notifier will trigger FIB update, so that
484 listeners of netlink will know about new ifaddr */
485 rtmsg_ifa(RTM_NEWADDR
, ifa
, nlh
, portid
);
486 blocking_notifier_call_chain(&inetaddr_chain
, NETDEV_UP
, ifa
);
491 static int inet_insert_ifa(struct in_ifaddr
*ifa
)
493 return __inet_insert_ifa(ifa
, NULL
, 0);
496 static int inet_set_ifa(struct net_device
*dev
, struct in_ifaddr
*ifa
)
498 struct in_device
*in_dev
= __in_dev_get_rtnl(dev
);
506 ipv4_devconf_setall(in_dev
);
507 if (ifa
->ifa_dev
!= in_dev
) {
508 WARN_ON(ifa
->ifa_dev
);
510 ifa
->ifa_dev
= in_dev
;
512 if (ipv4_is_loopback(ifa
->ifa_local
))
513 ifa
->ifa_scope
= RT_SCOPE_HOST
;
514 return inet_insert_ifa(ifa
);
517 /* Caller must hold RCU or RTNL :
518 * We dont take a reference on found in_device
520 struct in_device
*inetdev_by_index(struct net
*net
, int ifindex
)
522 struct net_device
*dev
;
523 struct in_device
*in_dev
= NULL
;
526 dev
= dev_get_by_index_rcu(net
, ifindex
);
528 in_dev
= rcu_dereference_rtnl(dev
->ip_ptr
);
532 EXPORT_SYMBOL(inetdev_by_index
);
534 /* Called only from RTNL semaphored context. No locks. */
536 struct in_ifaddr
*inet_ifa_byprefix(struct in_device
*in_dev
, __be32 prefix
,
541 for_primary_ifa(in_dev
) {
542 if (ifa
->ifa_mask
== mask
&& inet_ifa_match(prefix
, ifa
))
544 } endfor_ifa(in_dev
);
548 static int inet_rtm_deladdr(struct sk_buff
*skb
, struct nlmsghdr
*nlh
)
550 struct net
*net
= sock_net(skb
->sk
);
551 struct nlattr
*tb
[IFA_MAX
+1];
552 struct in_device
*in_dev
;
553 struct ifaddrmsg
*ifm
;
554 struct in_ifaddr
*ifa
, **ifap
;
559 err
= nlmsg_parse(nlh
, sizeof(*ifm
), tb
, IFA_MAX
, ifa_ipv4_policy
);
563 ifm
= nlmsg_data(nlh
);
564 in_dev
= inetdev_by_index(net
, ifm
->ifa_index
);
565 if (in_dev
== NULL
) {
570 for (ifap
= &in_dev
->ifa_list
; (ifa
= *ifap
) != NULL
;
571 ifap
= &ifa
->ifa_next
) {
573 ifa
->ifa_local
!= nla_get_be32(tb
[IFA_LOCAL
]))
576 if (tb
[IFA_LABEL
] && nla_strcmp(tb
[IFA_LABEL
], ifa
->ifa_label
))
579 if (tb
[IFA_ADDRESS
] &&
580 (ifm
->ifa_prefixlen
!= ifa
->ifa_prefixlen
||
581 !inet_ifa_match(nla_get_be32(tb
[IFA_ADDRESS
]), ifa
)))
584 __inet_del_ifa(in_dev
, ifap
, 1, nlh
, NETLINK_CB(skb
).portid
);
588 err
= -EADDRNOTAVAIL
;
593 #define INFINITY_LIFE_TIME 0xFFFFFFFF
595 static void check_lifetime(struct work_struct
*work
)
597 unsigned long now
, next
, next_sec
, next_sched
;
598 struct in_ifaddr
*ifa
;
599 struct hlist_node
*n
;
603 next
= round_jiffies_up(now
+ ADDR_CHECK_FREQUENCY
);
605 for (i
= 0; i
< IN4_ADDR_HSIZE
; i
++) {
606 bool change_needed
= false;
609 hlist_for_each_entry_rcu(ifa
, &inet_addr_lst
[i
], hash
) {
612 if (ifa
->ifa_flags
& IFA_F_PERMANENT
)
615 /* We try to batch several events at once. */
616 age
= (now
- ifa
->ifa_tstamp
+
617 ADDRCONF_TIMER_FUZZ_MINUS
) / HZ
;
619 if (ifa
->ifa_valid_lft
!= INFINITY_LIFE_TIME
&&
620 age
>= ifa
->ifa_valid_lft
) {
621 change_needed
= true;
622 } else if (ifa
->ifa_preferred_lft
==
623 INFINITY_LIFE_TIME
) {
625 } else if (age
>= ifa
->ifa_preferred_lft
) {
626 if (time_before(ifa
->ifa_tstamp
+
627 ifa
->ifa_valid_lft
* HZ
, next
))
628 next
= ifa
->ifa_tstamp
+
629 ifa
->ifa_valid_lft
* HZ
;
631 if (!(ifa
->ifa_flags
& IFA_F_DEPRECATED
))
632 change_needed
= true;
633 } else if (time_before(ifa
->ifa_tstamp
+
634 ifa
->ifa_preferred_lft
* HZ
,
636 next
= ifa
->ifa_tstamp
+
637 ifa
->ifa_preferred_lft
* HZ
;
644 hlist_for_each_entry_safe(ifa
, n
, &inet_addr_lst
[i
], hash
) {
647 if (ifa
->ifa_flags
& IFA_F_PERMANENT
)
650 /* We try to batch several events at once. */
651 age
= (now
- ifa
->ifa_tstamp
+
652 ADDRCONF_TIMER_FUZZ_MINUS
) / HZ
;
654 if (ifa
->ifa_valid_lft
!= INFINITY_LIFE_TIME
&&
655 age
>= ifa
->ifa_valid_lft
) {
656 struct in_ifaddr
**ifap
;
658 for (ifap
= &ifa
->ifa_dev
->ifa_list
;
659 *ifap
!= NULL
; ifap
= &(*ifap
)->ifa_next
) {
661 inet_del_ifa(ifa
->ifa_dev
,
666 } else if (ifa
->ifa_preferred_lft
!=
667 INFINITY_LIFE_TIME
&&
668 age
>= ifa
->ifa_preferred_lft
&&
669 !(ifa
->ifa_flags
& IFA_F_DEPRECATED
)) {
670 ifa
->ifa_flags
|= IFA_F_DEPRECATED
;
671 rtmsg_ifa(RTM_NEWADDR
, ifa
, NULL
, 0);
677 next_sec
= round_jiffies_up(next
);
680 /* If rounded timeout is accurate enough, accept it. */
681 if (time_before(next_sec
, next
+ ADDRCONF_TIMER_FUZZ
))
682 next_sched
= next_sec
;
685 /* And minimum interval is ADDRCONF_TIMER_FUZZ_MAX. */
686 if (time_before(next_sched
, now
+ ADDRCONF_TIMER_FUZZ_MAX
))
687 next_sched
= now
+ ADDRCONF_TIMER_FUZZ_MAX
;
689 schedule_delayed_work(&check_lifetime_work
, next_sched
- now
);
692 static void set_ifa_lifetime(struct in_ifaddr
*ifa
, __u32 valid_lft
,
695 unsigned long timeout
;
697 ifa
->ifa_flags
&= ~(IFA_F_PERMANENT
| IFA_F_DEPRECATED
);
699 timeout
= addrconf_timeout_fixup(valid_lft
, HZ
);
700 if (addrconf_finite_timeout(timeout
))
701 ifa
->ifa_valid_lft
= timeout
;
703 ifa
->ifa_flags
|= IFA_F_PERMANENT
;
705 timeout
= addrconf_timeout_fixup(prefered_lft
, HZ
);
706 if (addrconf_finite_timeout(timeout
)) {
708 ifa
->ifa_flags
|= IFA_F_DEPRECATED
;
709 ifa
->ifa_preferred_lft
= timeout
;
711 ifa
->ifa_tstamp
= jiffies
;
712 if (!ifa
->ifa_cstamp
)
713 ifa
->ifa_cstamp
= ifa
->ifa_tstamp
;
716 static struct in_ifaddr
*rtm_to_ifaddr(struct net
*net
, struct nlmsghdr
*nlh
,
717 __u32
*pvalid_lft
, __u32
*pprefered_lft
)
719 struct nlattr
*tb
[IFA_MAX
+1];
720 struct in_ifaddr
*ifa
;
721 struct ifaddrmsg
*ifm
;
722 struct net_device
*dev
;
723 struct in_device
*in_dev
;
726 err
= nlmsg_parse(nlh
, sizeof(*ifm
), tb
, IFA_MAX
, ifa_ipv4_policy
);
730 ifm
= nlmsg_data(nlh
);
732 if (ifm
->ifa_prefixlen
> 32 || tb
[IFA_LOCAL
] == NULL
)
735 dev
= __dev_get_by_index(net
, ifm
->ifa_index
);
740 in_dev
= __in_dev_get_rtnl(dev
);
745 ifa
= inet_alloc_ifa();
748 * A potential indev allocation can be left alive, it stays
749 * assigned to its device and is destroy with it.
753 ipv4_devconf_setall(in_dev
);
756 if (tb
[IFA_ADDRESS
] == NULL
)
757 tb
[IFA_ADDRESS
] = tb
[IFA_LOCAL
];
759 INIT_HLIST_NODE(&ifa
->hash
);
760 ifa
->ifa_prefixlen
= ifm
->ifa_prefixlen
;
761 ifa
->ifa_mask
= inet_make_mask(ifm
->ifa_prefixlen
);
762 ifa
->ifa_flags
= ifm
->ifa_flags
;
763 ifa
->ifa_scope
= ifm
->ifa_scope
;
764 ifa
->ifa_dev
= in_dev
;
766 ifa
->ifa_local
= nla_get_be32(tb
[IFA_LOCAL
]);
767 ifa
->ifa_address
= nla_get_be32(tb
[IFA_ADDRESS
]);
769 if (tb
[IFA_BROADCAST
])
770 ifa
->ifa_broadcast
= nla_get_be32(tb
[IFA_BROADCAST
]);
773 nla_strlcpy(ifa
->ifa_label
, tb
[IFA_LABEL
], IFNAMSIZ
);
775 memcpy(ifa
->ifa_label
, dev
->name
, IFNAMSIZ
);
777 if (tb
[IFA_CACHEINFO
]) {
778 struct ifa_cacheinfo
*ci
;
780 ci
= nla_data(tb
[IFA_CACHEINFO
]);
781 if (!ci
->ifa_valid
|| ci
->ifa_prefered
> ci
->ifa_valid
) {
785 *pvalid_lft
= ci
->ifa_valid
;
786 *pprefered_lft
= ci
->ifa_prefered
;
797 static struct in_ifaddr
*find_matching_ifa(struct in_ifaddr
*ifa
)
799 struct in_device
*in_dev
= ifa
->ifa_dev
;
800 struct in_ifaddr
*ifa1
, **ifap
;
805 for (ifap
= &in_dev
->ifa_list
; (ifa1
= *ifap
) != NULL
;
806 ifap
= &ifa1
->ifa_next
) {
807 if (ifa1
->ifa_mask
== ifa
->ifa_mask
&&
808 inet_ifa_match(ifa1
->ifa_address
, ifa
) &&
809 ifa1
->ifa_local
== ifa
->ifa_local
)
815 static int inet_rtm_newaddr(struct sk_buff
*skb
, struct nlmsghdr
*nlh
)
817 struct net
*net
= sock_net(skb
->sk
);
818 struct in_ifaddr
*ifa
;
819 struct in_ifaddr
*ifa_existing
;
820 __u32 valid_lft
= INFINITY_LIFE_TIME
;
821 __u32 prefered_lft
= INFINITY_LIFE_TIME
;
825 ifa
= rtm_to_ifaddr(net
, nlh
, &valid_lft
, &prefered_lft
);
829 ifa_existing
= find_matching_ifa(ifa
);
831 /* It would be best to check for !NLM_F_CREATE here but
832 * userspace alreay relies on not having to provide this.
834 set_ifa_lifetime(ifa
, valid_lft
, prefered_lft
);
835 return __inet_insert_ifa(ifa
, nlh
, NETLINK_CB(skb
).portid
);
839 if (nlh
->nlmsg_flags
& NLM_F_EXCL
||
840 !(nlh
->nlmsg_flags
& NLM_F_REPLACE
))
843 set_ifa_lifetime(ifa
, valid_lft
, prefered_lft
);
844 cancel_delayed_work(&check_lifetime_work
);
845 schedule_delayed_work(&check_lifetime_work
, 0);
846 rtmsg_ifa(RTM_NEWADDR
, ifa
, nlh
, NETLINK_CB(skb
).portid
);
847 blocking_notifier_call_chain(&inetaddr_chain
, NETDEV_UP
, ifa
);
853 * Determine a default network mask, based on the IP address.
856 static int inet_abc_len(__be32 addr
)
858 int rc
= -1; /* Something else, probably a multicast. */
860 if (ipv4_is_zeronet(addr
))
863 __u32 haddr
= ntohl(addr
);
865 if (IN_CLASSA(haddr
))
867 else if (IN_CLASSB(haddr
))
869 else if (IN_CLASSC(haddr
))
877 int devinet_ioctl(struct net
*net
, unsigned int cmd
, void __user
*arg
)
880 struct sockaddr_in sin_orig
;
881 struct sockaddr_in
*sin
= (struct sockaddr_in
*)&ifr
.ifr_addr
;
882 struct in_device
*in_dev
;
883 struct in_ifaddr
**ifap
= NULL
;
884 struct in_ifaddr
*ifa
= NULL
;
885 struct net_device
*dev
;
888 int tryaddrmatch
= 0;
891 * Fetch the caller's info block into kernel space
894 if (copy_from_user(&ifr
, arg
, sizeof(struct ifreq
)))
896 ifr
.ifr_name
[IFNAMSIZ
- 1] = 0;
898 /* save original address for comparison */
899 memcpy(&sin_orig
, sin
, sizeof(*sin
));
901 colon
= strchr(ifr
.ifr_name
, ':');
905 dev_load(net
, ifr
.ifr_name
);
908 case SIOCGIFADDR
: /* Get interface address */
909 case SIOCGIFBRDADDR
: /* Get the broadcast address */
910 case SIOCGIFDSTADDR
: /* Get the destination address */
911 case SIOCGIFNETMASK
: /* Get the netmask for the interface */
912 /* Note that these ioctls will not sleep,
913 so that we do not impose a lock.
914 One day we will be forced to put shlock here (I mean SMP)
916 tryaddrmatch
= (sin_orig
.sin_family
== AF_INET
);
917 memset(sin
, 0, sizeof(*sin
));
918 sin
->sin_family
= AF_INET
;
923 if (!ns_capable(net
->user_ns
, CAP_NET_ADMIN
))
926 case SIOCSIFADDR
: /* Set interface address (and family) */
927 case SIOCSIFBRDADDR
: /* Set the broadcast address */
928 case SIOCSIFDSTADDR
: /* Set the destination address */
929 case SIOCSIFNETMASK
: /* Set the netmask for the interface */
931 if (!ns_capable(net
->user_ns
, CAP_NET_ADMIN
))
934 if (sin
->sin_family
!= AF_INET
)
945 dev
= __dev_get_by_name(net
, ifr
.ifr_name
);
952 in_dev
= __in_dev_get_rtnl(dev
);
955 /* Matthias Andree */
956 /* compare label and address (4.4BSD style) */
957 /* note: we only do this for a limited set of ioctls
958 and only if the original address family was AF_INET.
959 This is checked above. */
960 for (ifap
= &in_dev
->ifa_list
; (ifa
= *ifap
) != NULL
;
961 ifap
= &ifa
->ifa_next
) {
962 if (!strcmp(ifr
.ifr_name
, ifa
->ifa_label
) &&
963 sin_orig
.sin_addr
.s_addr
==
969 /* we didn't get a match, maybe the application is
970 4.3BSD-style and passed in junk so we fall back to
971 comparing just the label */
973 for (ifap
= &in_dev
->ifa_list
; (ifa
= *ifap
) != NULL
;
974 ifap
= &ifa
->ifa_next
)
975 if (!strcmp(ifr
.ifr_name
, ifa
->ifa_label
))
980 ret
= -EADDRNOTAVAIL
;
981 if (!ifa
&& cmd
!= SIOCSIFADDR
&& cmd
!= SIOCSIFFLAGS
)
985 case SIOCGIFADDR
: /* Get interface address */
986 sin
->sin_addr
.s_addr
= ifa
->ifa_local
;
989 case SIOCGIFBRDADDR
: /* Get the broadcast address */
990 sin
->sin_addr
.s_addr
= ifa
->ifa_broadcast
;
993 case SIOCGIFDSTADDR
: /* Get the destination address */
994 sin
->sin_addr
.s_addr
= ifa
->ifa_address
;
997 case SIOCGIFNETMASK
: /* Get the netmask for the interface */
998 sin
->sin_addr
.s_addr
= ifa
->ifa_mask
;
1003 ret
= -EADDRNOTAVAIL
;
1007 if (!(ifr
.ifr_flags
& IFF_UP
))
1008 inet_del_ifa(in_dev
, ifap
, 1);
1011 ret
= dev_change_flags(dev
, ifr
.ifr_flags
);
1014 case SIOCSIFADDR
: /* Set interface address (and family) */
1016 if (inet_abc_len(sin
->sin_addr
.s_addr
) < 0)
1021 ifa
= inet_alloc_ifa();
1024 INIT_HLIST_NODE(&ifa
->hash
);
1026 memcpy(ifa
->ifa_label
, ifr
.ifr_name
, IFNAMSIZ
);
1028 memcpy(ifa
->ifa_label
, dev
->name
, IFNAMSIZ
);
1031 if (ifa
->ifa_local
== sin
->sin_addr
.s_addr
)
1033 inet_del_ifa(in_dev
, ifap
, 0);
1034 ifa
->ifa_broadcast
= 0;
1038 ifa
->ifa_address
= ifa
->ifa_local
= sin
->sin_addr
.s_addr
;
1040 if (!(dev
->flags
& IFF_POINTOPOINT
)) {
1041 ifa
->ifa_prefixlen
= inet_abc_len(ifa
->ifa_address
);
1042 ifa
->ifa_mask
= inet_make_mask(ifa
->ifa_prefixlen
);
1043 if ((dev
->flags
& IFF_BROADCAST
) &&
1044 ifa
->ifa_prefixlen
< 31)
1045 ifa
->ifa_broadcast
= ifa
->ifa_address
|
1048 ifa
->ifa_prefixlen
= 32;
1049 ifa
->ifa_mask
= inet_make_mask(32);
1051 set_ifa_lifetime(ifa
, INFINITY_LIFE_TIME
, INFINITY_LIFE_TIME
);
1052 ret
= inet_set_ifa(dev
, ifa
);
1055 case SIOCSIFBRDADDR
: /* Set the broadcast address */
1057 if (ifa
->ifa_broadcast
!= sin
->sin_addr
.s_addr
) {
1058 inet_del_ifa(in_dev
, ifap
, 0);
1059 ifa
->ifa_broadcast
= sin
->sin_addr
.s_addr
;
1060 inet_insert_ifa(ifa
);
1064 case SIOCSIFDSTADDR
: /* Set the destination address */
1066 if (ifa
->ifa_address
== sin
->sin_addr
.s_addr
)
1069 if (inet_abc_len(sin
->sin_addr
.s_addr
) < 0)
1072 inet_del_ifa(in_dev
, ifap
, 0);
1073 ifa
->ifa_address
= sin
->sin_addr
.s_addr
;
1074 inet_insert_ifa(ifa
);
1077 case SIOCSIFNETMASK
: /* Set the netmask for the interface */
1080 * The mask we set must be legal.
1083 if (bad_mask(sin
->sin_addr
.s_addr
, 0))
1086 if (ifa
->ifa_mask
!= sin
->sin_addr
.s_addr
) {
1087 __be32 old_mask
= ifa
->ifa_mask
;
1088 inet_del_ifa(in_dev
, ifap
, 0);
1089 ifa
->ifa_mask
= sin
->sin_addr
.s_addr
;
1090 ifa
->ifa_prefixlen
= inet_mask_len(ifa
->ifa_mask
);
1092 /* See if current broadcast address matches
1093 * with current netmask, then recalculate
1094 * the broadcast address. Otherwise it's a
1095 * funny address, so don't touch it since
1096 * the user seems to know what (s)he's doing...
1098 if ((dev
->flags
& IFF_BROADCAST
) &&
1099 (ifa
->ifa_prefixlen
< 31) &&
1100 (ifa
->ifa_broadcast
==
1101 (ifa
->ifa_local
|~old_mask
))) {
1102 ifa
->ifa_broadcast
= (ifa
->ifa_local
|
1103 ~sin
->sin_addr
.s_addr
);
1105 inet_insert_ifa(ifa
);
1115 ret
= copy_to_user(arg
, &ifr
, sizeof(struct ifreq
)) ? -EFAULT
: 0;
1119 static int inet_gifconf(struct net_device
*dev
, char __user
*buf
, int len
)
1121 struct in_device
*in_dev
= __in_dev_get_rtnl(dev
);
1122 struct in_ifaddr
*ifa
;
1129 for (ifa
= in_dev
->ifa_list
; ifa
; ifa
= ifa
->ifa_next
) {
1131 done
+= sizeof(ifr
);
1134 if (len
< (int) sizeof(ifr
))
1136 memset(&ifr
, 0, sizeof(struct ifreq
));
1137 strcpy(ifr
.ifr_name
, ifa
->ifa_label
);
1139 (*(struct sockaddr_in
*)&ifr
.ifr_addr
).sin_family
= AF_INET
;
1140 (*(struct sockaddr_in
*)&ifr
.ifr_addr
).sin_addr
.s_addr
=
1143 if (copy_to_user(buf
, &ifr
, sizeof(struct ifreq
))) {
1147 buf
+= sizeof(struct ifreq
);
1148 len
-= sizeof(struct ifreq
);
1149 done
+= sizeof(struct ifreq
);
1155 __be32
inet_select_addr(const struct net_device
*dev
, __be32 dst
, int scope
)
1158 struct in_device
*in_dev
;
1159 struct net
*net
= dev_net(dev
);
1162 in_dev
= __in_dev_get_rcu(dev
);
1166 for_primary_ifa(in_dev
) {
1167 if (ifa
->ifa_scope
> scope
)
1169 if (!dst
|| inet_ifa_match(dst
, ifa
)) {
1170 addr
= ifa
->ifa_local
;
1174 addr
= ifa
->ifa_local
;
1175 } endfor_ifa(in_dev
);
1181 /* Not loopback addresses on loopback should be preferred
1182 in this case. It is importnat that lo is the first interface
1185 for_each_netdev_rcu(net
, dev
) {
1186 in_dev
= __in_dev_get_rcu(dev
);
1190 for_primary_ifa(in_dev
) {
1191 if (ifa
->ifa_scope
!= RT_SCOPE_LINK
&&
1192 ifa
->ifa_scope
<= scope
) {
1193 addr
= ifa
->ifa_local
;
1196 } endfor_ifa(in_dev
);
1202 EXPORT_SYMBOL(inet_select_addr
);
1204 static __be32
confirm_addr_indev(struct in_device
*in_dev
, __be32 dst
,
1205 __be32 local
, int scope
)
1212 (local
== ifa
->ifa_local
|| !local
) &&
1213 ifa
->ifa_scope
<= scope
) {
1214 addr
= ifa
->ifa_local
;
1219 same
= (!local
|| inet_ifa_match(local
, ifa
)) &&
1220 (!dst
|| inet_ifa_match(dst
, ifa
));
1224 /* Is the selected addr into dst subnet? */
1225 if (inet_ifa_match(addr
, ifa
))
1227 /* No, then can we use new local src? */
1228 if (ifa
->ifa_scope
<= scope
) {
1229 addr
= ifa
->ifa_local
;
1232 /* search for large dst subnet for addr */
1236 } endfor_ifa(in_dev
);
1238 return same
? addr
: 0;
1242 * Confirm that local IP address exists using wildcards:
1243 * - in_dev: only on this interface, 0=any interface
1244 * - dst: only in the same subnet as dst, 0=any dst
1245 * - local: address, 0=autoselect the local address
1246 * - scope: maximum allowed scope value for the local address
1248 __be32
inet_confirm_addr(struct in_device
*in_dev
,
1249 __be32 dst
, __be32 local
, int scope
)
1252 struct net_device
*dev
;
1255 if (scope
!= RT_SCOPE_LINK
)
1256 return confirm_addr_indev(in_dev
, dst
, local
, scope
);
1258 net
= dev_net(in_dev
->dev
);
1260 for_each_netdev_rcu(net
, dev
) {
1261 in_dev
= __in_dev_get_rcu(dev
);
1263 addr
= confirm_addr_indev(in_dev
, dst
, local
, scope
);
1272 EXPORT_SYMBOL(inet_confirm_addr
);
1278 int register_inetaddr_notifier(struct notifier_block
*nb
)
1280 return blocking_notifier_chain_register(&inetaddr_chain
, nb
);
1282 EXPORT_SYMBOL(register_inetaddr_notifier
);
1284 int unregister_inetaddr_notifier(struct notifier_block
*nb
)
1286 return blocking_notifier_chain_unregister(&inetaddr_chain
, nb
);
1288 EXPORT_SYMBOL(unregister_inetaddr_notifier
);
1290 /* Rename ifa_labels for a device name change. Make some effort to preserve
1291 * existing alias numbering and to create unique labels if possible.
1293 static void inetdev_changename(struct net_device
*dev
, struct in_device
*in_dev
)
1295 struct in_ifaddr
*ifa
;
1298 for (ifa
= in_dev
->ifa_list
; ifa
; ifa
= ifa
->ifa_next
) {
1299 char old
[IFNAMSIZ
], *dot
;
1301 memcpy(old
, ifa
->ifa_label
, IFNAMSIZ
);
1302 memcpy(ifa
->ifa_label
, dev
->name
, IFNAMSIZ
);
1305 dot
= strchr(old
, ':');
1307 sprintf(old
, ":%d", named
);
1310 if (strlen(dot
) + strlen(dev
->name
) < IFNAMSIZ
)
1311 strcat(ifa
->ifa_label
, dot
);
1313 strcpy(ifa
->ifa_label
+ (IFNAMSIZ
- strlen(dot
) - 1), dot
);
1315 rtmsg_ifa(RTM_NEWADDR
, ifa
, NULL
, 0);
1319 static bool inetdev_valid_mtu(unsigned int mtu
)
1324 static void inetdev_send_gratuitous_arp(struct net_device
*dev
,
1325 struct in_device
*in_dev
)
1328 struct in_ifaddr
*ifa
;
1330 for (ifa
= in_dev
->ifa_list
; ifa
;
1331 ifa
= ifa
->ifa_next
) {
1332 arp_send(ARPOP_REQUEST
, ETH_P_ARP
,
1333 ifa
->ifa_local
, dev
,
1334 ifa
->ifa_local
, NULL
,
1335 dev
->dev_addr
, NULL
);
1339 /* Called only under RTNL semaphore */
1341 static int inetdev_event(struct notifier_block
*this, unsigned long event
,
1344 struct net_device
*dev
= netdev_notifier_info_to_dev(ptr
);
1345 struct in_device
*in_dev
= __in_dev_get_rtnl(dev
);
1350 if (event
== NETDEV_REGISTER
) {
1351 in_dev
= inetdev_init(dev
);
1353 return notifier_from_errno(-ENOMEM
);
1354 if (dev
->flags
& IFF_LOOPBACK
) {
1355 IN_DEV_CONF_SET(in_dev
, NOXFRM
, 1);
1356 IN_DEV_CONF_SET(in_dev
, NOPOLICY
, 1);
1358 } else if (event
== NETDEV_CHANGEMTU
) {
1359 /* Re-enabling IP */
1360 if (inetdev_valid_mtu(dev
->mtu
))
1361 in_dev
= inetdev_init(dev
);
1367 case NETDEV_REGISTER
:
1368 pr_debug("%s: bug\n", __func__
);
1369 RCU_INIT_POINTER(dev
->ip_ptr
, NULL
);
1372 if (!inetdev_valid_mtu(dev
->mtu
))
1374 if (dev
->flags
& IFF_LOOPBACK
) {
1375 struct in_ifaddr
*ifa
= inet_alloc_ifa();
1378 INIT_HLIST_NODE(&ifa
->hash
);
1380 ifa
->ifa_address
= htonl(INADDR_LOOPBACK
);
1381 ifa
->ifa_prefixlen
= 8;
1382 ifa
->ifa_mask
= inet_make_mask(8);
1383 in_dev_hold(in_dev
);
1384 ifa
->ifa_dev
= in_dev
;
1385 ifa
->ifa_scope
= RT_SCOPE_HOST
;
1386 memcpy(ifa
->ifa_label
, dev
->name
, IFNAMSIZ
);
1387 set_ifa_lifetime(ifa
, INFINITY_LIFE_TIME
,
1388 INFINITY_LIFE_TIME
);
1389 inet_insert_ifa(ifa
);
1394 case NETDEV_CHANGEADDR
:
1395 if (!IN_DEV_ARP_NOTIFY(in_dev
))
1398 case NETDEV_NOTIFY_PEERS
:
1399 /* Send gratuitous ARP to notify of link change */
1400 inetdev_send_gratuitous_arp(dev
, in_dev
);
1405 case NETDEV_PRE_TYPE_CHANGE
:
1406 ip_mc_unmap(in_dev
);
1408 case NETDEV_POST_TYPE_CHANGE
:
1409 ip_mc_remap(in_dev
);
1411 case NETDEV_CHANGEMTU
:
1412 if (inetdev_valid_mtu(dev
->mtu
))
1414 /* disable IP when MTU is not enough */
1415 case NETDEV_UNREGISTER
:
1416 inetdev_destroy(in_dev
);
1418 case NETDEV_CHANGENAME
:
1419 /* Do not notify about label change, this event is
1420 * not interesting to applications using netlink.
1422 inetdev_changename(dev
, in_dev
);
1424 devinet_sysctl_unregister(in_dev
);
1425 devinet_sysctl_register(in_dev
);
1432 static struct notifier_block ip_netdev_notifier
= {
1433 .notifier_call
= inetdev_event
,
1436 static size_t inet_nlmsg_size(void)
1438 return NLMSG_ALIGN(sizeof(struct ifaddrmsg
))
1439 + nla_total_size(4) /* IFA_ADDRESS */
1440 + nla_total_size(4) /* IFA_LOCAL */
1441 + nla_total_size(4) /* IFA_BROADCAST */
1442 + nla_total_size(IFNAMSIZ
) /* IFA_LABEL */
1443 + nla_total_size(sizeof(struct ifa_cacheinfo
)); /* IFA_CACHEINFO */
1446 static inline u32
cstamp_delta(unsigned long cstamp
)
1448 return (cstamp
- INITIAL_JIFFIES
) * 100UL / HZ
;
1451 static int put_cacheinfo(struct sk_buff
*skb
, unsigned long cstamp
,
1452 unsigned long tstamp
, u32 preferred
, u32 valid
)
1454 struct ifa_cacheinfo ci
;
1456 ci
.cstamp
= cstamp_delta(cstamp
);
1457 ci
.tstamp
= cstamp_delta(tstamp
);
1458 ci
.ifa_prefered
= preferred
;
1459 ci
.ifa_valid
= valid
;
1461 return nla_put(skb
, IFA_CACHEINFO
, sizeof(ci
), &ci
);
1464 static int inet_fill_ifaddr(struct sk_buff
*skb
, struct in_ifaddr
*ifa
,
1465 u32 portid
, u32 seq
, int event
, unsigned int flags
)
1467 struct ifaddrmsg
*ifm
;
1468 struct nlmsghdr
*nlh
;
1469 u32 preferred
, valid
;
1471 nlh
= nlmsg_put(skb
, portid
, seq
, event
, sizeof(*ifm
), flags
);
1475 ifm
= nlmsg_data(nlh
);
1476 ifm
->ifa_family
= AF_INET
;
1477 ifm
->ifa_prefixlen
= ifa
->ifa_prefixlen
;
1478 ifm
->ifa_flags
= ifa
->ifa_flags
;
1479 ifm
->ifa_scope
= ifa
->ifa_scope
;
1480 ifm
->ifa_index
= ifa
->ifa_dev
->dev
->ifindex
;
1482 if (!(ifm
->ifa_flags
& IFA_F_PERMANENT
)) {
1483 preferred
= ifa
->ifa_preferred_lft
;
1484 valid
= ifa
->ifa_valid_lft
;
1485 if (preferred
!= INFINITY_LIFE_TIME
) {
1486 long tval
= (jiffies
- ifa
->ifa_tstamp
) / HZ
;
1488 if (preferred
> tval
)
1492 if (valid
!= INFINITY_LIFE_TIME
) {
1500 preferred
= INFINITY_LIFE_TIME
;
1501 valid
= INFINITY_LIFE_TIME
;
1503 if ((ifa
->ifa_address
&&
1504 nla_put_be32(skb
, IFA_ADDRESS
, ifa
->ifa_address
)) ||
1506 nla_put_be32(skb
, IFA_LOCAL
, ifa
->ifa_local
)) ||
1507 (ifa
->ifa_broadcast
&&
1508 nla_put_be32(skb
, IFA_BROADCAST
, ifa
->ifa_broadcast
)) ||
1509 (ifa
->ifa_label
[0] &&
1510 nla_put_string(skb
, IFA_LABEL
, ifa
->ifa_label
)) ||
1511 put_cacheinfo(skb
, ifa
->ifa_cstamp
, ifa
->ifa_tstamp
,
1513 goto nla_put_failure
;
1515 return nlmsg_end(skb
, nlh
);
1518 nlmsg_cancel(skb
, nlh
);
1522 static int inet_dump_ifaddr(struct sk_buff
*skb
, struct netlink_callback
*cb
)
1524 struct net
*net
= sock_net(skb
->sk
);
1527 int ip_idx
, s_ip_idx
;
1528 struct net_device
*dev
;
1529 struct in_device
*in_dev
;
1530 struct in_ifaddr
*ifa
;
1531 struct hlist_head
*head
;
1534 s_idx
= idx
= cb
->args
[1];
1535 s_ip_idx
= ip_idx
= cb
->args
[2];
1537 for (h
= s_h
; h
< NETDEV_HASHENTRIES
; h
++, s_idx
= 0) {
1539 head
= &net
->dev_index_head
[h
];
1541 cb
->seq
= atomic_read(&net
->ipv4
.dev_addr_genid
) ^
1543 hlist_for_each_entry_rcu(dev
, head
, index_hlist
) {
1546 if (h
> s_h
|| idx
> s_idx
)
1548 in_dev
= __in_dev_get_rcu(dev
);
1552 for (ifa
= in_dev
->ifa_list
, ip_idx
= 0; ifa
;
1553 ifa
= ifa
->ifa_next
, ip_idx
++) {
1554 if (ip_idx
< s_ip_idx
)
1556 if (inet_fill_ifaddr(skb
, ifa
,
1557 NETLINK_CB(cb
->skb
).portid
,
1559 RTM_NEWADDR
, NLM_F_MULTI
) <= 0) {
1563 nl_dump_check_consistent(cb
, nlmsg_hdr(skb
));
1574 cb
->args
[2] = ip_idx
;
1579 static void rtmsg_ifa(int event
, struct in_ifaddr
*ifa
, struct nlmsghdr
*nlh
,
1582 struct sk_buff
*skb
;
1583 u32 seq
= nlh
? nlh
->nlmsg_seq
: 0;
1587 net
= dev_net(ifa
->ifa_dev
->dev
);
1588 skb
= nlmsg_new(inet_nlmsg_size(), GFP_KERNEL
);
1592 err
= inet_fill_ifaddr(skb
, ifa
, portid
, seq
, event
, 0);
1594 /* -EMSGSIZE implies BUG in inet_nlmsg_size() */
1595 WARN_ON(err
== -EMSGSIZE
);
1599 rtnl_notify(skb
, net
, portid
, RTNLGRP_IPV4_IFADDR
, nlh
, GFP_KERNEL
);
1603 rtnl_set_sk_err(net
, RTNLGRP_IPV4_IFADDR
, err
);
1606 static size_t inet_get_link_af_size(const struct net_device
*dev
)
1608 struct in_device
*in_dev
= rcu_dereference_rtnl(dev
->ip_ptr
);
1613 return nla_total_size(IPV4_DEVCONF_MAX
* 4); /* IFLA_INET_CONF */
1616 static int inet_fill_link_af(struct sk_buff
*skb
, const struct net_device
*dev
)
1618 struct in_device
*in_dev
= rcu_dereference_rtnl(dev
->ip_ptr
);
1625 nla
= nla_reserve(skb
, IFLA_INET_CONF
, IPV4_DEVCONF_MAX
* 4);
1629 for (i
= 0; i
< IPV4_DEVCONF_MAX
; i
++)
1630 ((u32
*) nla_data(nla
))[i
] = in_dev
->cnf
.data
[i
];
1635 static const struct nla_policy inet_af_policy
[IFLA_INET_MAX
+1] = {
1636 [IFLA_INET_CONF
] = { .type
= NLA_NESTED
},
1639 static int inet_validate_link_af(const struct net_device
*dev
,
1640 const struct nlattr
*nla
)
1642 struct nlattr
*a
, *tb
[IFLA_INET_MAX
+1];
1645 if (dev
&& !__in_dev_get_rtnl(dev
))
1646 return -EAFNOSUPPORT
;
1648 err
= nla_parse_nested(tb
, IFLA_INET_MAX
, nla
, inet_af_policy
);
1652 if (tb
[IFLA_INET_CONF
]) {
1653 nla_for_each_nested(a
, tb
[IFLA_INET_CONF
], rem
) {
1654 int cfgid
= nla_type(a
);
1659 if (cfgid
<= 0 || cfgid
> IPV4_DEVCONF_MAX
)
1667 static int inet_set_link_af(struct net_device
*dev
, const struct nlattr
*nla
)
1669 struct in_device
*in_dev
= __in_dev_get_rtnl(dev
);
1670 struct nlattr
*a
, *tb
[IFLA_INET_MAX
+1];
1674 return -EAFNOSUPPORT
;
1676 if (nla_parse_nested(tb
, IFLA_INET_MAX
, nla
, NULL
) < 0)
1679 if (tb
[IFLA_INET_CONF
]) {
1680 nla_for_each_nested(a
, tb
[IFLA_INET_CONF
], rem
)
1681 ipv4_devconf_set(in_dev
, nla_type(a
), nla_get_u32(a
));
1687 static int inet_netconf_msgsize_devconf(int type
)
1689 int size
= NLMSG_ALIGN(sizeof(struct netconfmsg
))
1690 + nla_total_size(4); /* NETCONFA_IFINDEX */
1692 /* type -1 is used for ALL */
1693 if (type
== -1 || type
== NETCONFA_FORWARDING
)
1694 size
+= nla_total_size(4);
1695 if (type
== -1 || type
== NETCONFA_RP_FILTER
)
1696 size
+= nla_total_size(4);
1697 if (type
== -1 || type
== NETCONFA_MC_FORWARDING
)
1698 size
+= nla_total_size(4);
1703 static int inet_netconf_fill_devconf(struct sk_buff
*skb
, int ifindex
,
1704 struct ipv4_devconf
*devconf
, u32 portid
,
1705 u32 seq
, int event
, unsigned int flags
,
1708 struct nlmsghdr
*nlh
;
1709 struct netconfmsg
*ncm
;
1711 nlh
= nlmsg_put(skb
, portid
, seq
, event
, sizeof(struct netconfmsg
),
1716 ncm
= nlmsg_data(nlh
);
1717 ncm
->ncm_family
= AF_INET
;
1719 if (nla_put_s32(skb
, NETCONFA_IFINDEX
, ifindex
) < 0)
1720 goto nla_put_failure
;
1722 /* type -1 is used for ALL */
1723 if ((type
== -1 || type
== NETCONFA_FORWARDING
) &&
1724 nla_put_s32(skb
, NETCONFA_FORWARDING
,
1725 IPV4_DEVCONF(*devconf
, FORWARDING
)) < 0)
1726 goto nla_put_failure
;
1727 if ((type
== -1 || type
== NETCONFA_RP_FILTER
) &&
1728 nla_put_s32(skb
, NETCONFA_RP_FILTER
,
1729 IPV4_DEVCONF(*devconf
, RP_FILTER
)) < 0)
1730 goto nla_put_failure
;
1731 if ((type
== -1 || type
== NETCONFA_MC_FORWARDING
) &&
1732 nla_put_s32(skb
, NETCONFA_MC_FORWARDING
,
1733 IPV4_DEVCONF(*devconf
, MC_FORWARDING
)) < 0)
1734 goto nla_put_failure
;
1736 return nlmsg_end(skb
, nlh
);
1739 nlmsg_cancel(skb
, nlh
);
1743 void inet_netconf_notify_devconf(struct net
*net
, int type
, int ifindex
,
1744 struct ipv4_devconf
*devconf
)
1746 struct sk_buff
*skb
;
1749 skb
= nlmsg_new(inet_netconf_msgsize_devconf(type
), GFP_ATOMIC
);
1753 err
= inet_netconf_fill_devconf(skb
, ifindex
, devconf
, 0, 0,
1754 RTM_NEWNETCONF
, 0, type
);
1756 /* -EMSGSIZE implies BUG in inet_netconf_msgsize_devconf() */
1757 WARN_ON(err
== -EMSGSIZE
);
1761 rtnl_notify(skb
, net
, 0, RTNLGRP_IPV4_NETCONF
, NULL
, GFP_ATOMIC
);
1765 rtnl_set_sk_err(net
, RTNLGRP_IPV4_NETCONF
, err
);
1768 static const struct nla_policy devconf_ipv4_policy
[NETCONFA_MAX
+1] = {
1769 [NETCONFA_IFINDEX
] = { .len
= sizeof(int) },
1770 [NETCONFA_FORWARDING
] = { .len
= sizeof(int) },
1771 [NETCONFA_RP_FILTER
] = { .len
= sizeof(int) },
1774 static int inet_netconf_get_devconf(struct sk_buff
*in_skb
,
1775 struct nlmsghdr
*nlh
)
1777 struct net
*net
= sock_net(in_skb
->sk
);
1778 struct nlattr
*tb
[NETCONFA_MAX
+1];
1779 struct netconfmsg
*ncm
;
1780 struct sk_buff
*skb
;
1781 struct ipv4_devconf
*devconf
;
1782 struct in_device
*in_dev
;
1783 struct net_device
*dev
;
1787 err
= nlmsg_parse(nlh
, sizeof(*ncm
), tb
, NETCONFA_MAX
,
1788 devconf_ipv4_policy
);
1793 if (!tb
[NETCONFA_IFINDEX
])
1796 ifindex
= nla_get_s32(tb
[NETCONFA_IFINDEX
]);
1798 case NETCONFA_IFINDEX_ALL
:
1799 devconf
= net
->ipv4
.devconf_all
;
1801 case NETCONFA_IFINDEX_DEFAULT
:
1802 devconf
= net
->ipv4
.devconf_dflt
;
1805 dev
= __dev_get_by_index(net
, ifindex
);
1808 in_dev
= __in_dev_get_rtnl(dev
);
1811 devconf
= &in_dev
->cnf
;
1816 skb
= nlmsg_new(inet_netconf_msgsize_devconf(-1), GFP_ATOMIC
);
1820 err
= inet_netconf_fill_devconf(skb
, ifindex
, devconf
,
1821 NETLINK_CB(in_skb
).portid
,
1822 nlh
->nlmsg_seq
, RTM_NEWNETCONF
, 0,
1825 /* -EMSGSIZE implies BUG in inet_netconf_msgsize_devconf() */
1826 WARN_ON(err
== -EMSGSIZE
);
1830 err
= rtnl_unicast(skb
, net
, NETLINK_CB(in_skb
).portid
);
1835 static int inet_netconf_dump_devconf(struct sk_buff
*skb
,
1836 struct netlink_callback
*cb
)
1838 struct net
*net
= sock_net(skb
->sk
);
1841 struct net_device
*dev
;
1842 struct in_device
*in_dev
;
1843 struct hlist_head
*head
;
1846 s_idx
= idx
= cb
->args
[1];
1848 for (h
= s_h
; h
< NETDEV_HASHENTRIES
; h
++, s_idx
= 0) {
1850 head
= &net
->dev_index_head
[h
];
1852 cb
->seq
= atomic_read(&net
->ipv4
.dev_addr_genid
) ^
1854 hlist_for_each_entry_rcu(dev
, head
, index_hlist
) {
1857 in_dev
= __in_dev_get_rcu(dev
);
1861 if (inet_netconf_fill_devconf(skb
, dev
->ifindex
,
1863 NETLINK_CB(cb
->skb
).portid
,
1871 nl_dump_check_consistent(cb
, nlmsg_hdr(skb
));
1877 if (h
== NETDEV_HASHENTRIES
) {
1878 if (inet_netconf_fill_devconf(skb
, NETCONFA_IFINDEX_ALL
,
1879 net
->ipv4
.devconf_all
,
1880 NETLINK_CB(cb
->skb
).portid
,
1882 RTM_NEWNETCONF
, NLM_F_MULTI
,
1888 if (h
== NETDEV_HASHENTRIES
+ 1) {
1889 if (inet_netconf_fill_devconf(skb
, NETCONFA_IFINDEX_DEFAULT
,
1890 net
->ipv4
.devconf_dflt
,
1891 NETLINK_CB(cb
->skb
).portid
,
1893 RTM_NEWNETCONF
, NLM_F_MULTI
,
1906 #ifdef CONFIG_SYSCTL
1908 static void devinet_copy_dflt_conf(struct net
*net
, int i
)
1910 struct net_device
*dev
;
1913 for_each_netdev_rcu(net
, dev
) {
1914 struct in_device
*in_dev
;
1916 in_dev
= __in_dev_get_rcu(dev
);
1917 if (in_dev
&& !test_bit(i
, in_dev
->cnf
.state
))
1918 in_dev
->cnf
.data
[i
] = net
->ipv4
.devconf_dflt
->data
[i
];
1923 /* called with RTNL locked */
1924 static void inet_forward_change(struct net
*net
)
1926 struct net_device
*dev
;
1927 int on
= IPV4_DEVCONF_ALL(net
, FORWARDING
);
1929 IPV4_DEVCONF_ALL(net
, ACCEPT_REDIRECTS
) = !on
;
1930 IPV4_DEVCONF_DFLT(net
, FORWARDING
) = on
;
1931 inet_netconf_notify_devconf(net
, NETCONFA_FORWARDING
,
1932 NETCONFA_IFINDEX_ALL
,
1933 net
->ipv4
.devconf_all
);
1934 inet_netconf_notify_devconf(net
, NETCONFA_FORWARDING
,
1935 NETCONFA_IFINDEX_DEFAULT
,
1936 net
->ipv4
.devconf_dflt
);
1938 for_each_netdev(net
, dev
) {
1939 struct in_device
*in_dev
;
1941 dev_disable_lro(dev
);
1943 in_dev
= __in_dev_get_rcu(dev
);
1945 IN_DEV_CONF_SET(in_dev
, FORWARDING
, on
);
1946 inet_netconf_notify_devconf(net
, NETCONFA_FORWARDING
,
1947 dev
->ifindex
, &in_dev
->cnf
);
1953 static int devinet_conf_proc(struct ctl_table
*ctl
, int write
,
1954 void __user
*buffer
,
1955 size_t *lenp
, loff_t
*ppos
)
1957 int old_value
= *(int *)ctl
->data
;
1958 int ret
= proc_dointvec(ctl
, write
, buffer
, lenp
, ppos
);
1959 int new_value
= *(int *)ctl
->data
;
1962 struct ipv4_devconf
*cnf
= ctl
->extra1
;
1963 struct net
*net
= ctl
->extra2
;
1964 int i
= (int *)ctl
->data
- cnf
->data
;
1966 set_bit(i
, cnf
->state
);
1968 if (cnf
== net
->ipv4
.devconf_dflt
)
1969 devinet_copy_dflt_conf(net
, i
);
1970 if (i
== IPV4_DEVCONF_ACCEPT_LOCAL
- 1 ||
1971 i
== IPV4_DEVCONF_ROUTE_LOCALNET
- 1)
1972 if ((new_value
== 0) && (old_value
!= 0))
1973 rt_cache_flush(net
);
1974 if (i
== IPV4_DEVCONF_RP_FILTER
- 1 &&
1975 new_value
!= old_value
) {
1978 if (cnf
== net
->ipv4
.devconf_dflt
)
1979 ifindex
= NETCONFA_IFINDEX_DEFAULT
;
1980 else if (cnf
== net
->ipv4
.devconf_all
)
1981 ifindex
= NETCONFA_IFINDEX_ALL
;
1983 struct in_device
*idev
=
1984 container_of(cnf
, struct in_device
,
1986 ifindex
= idev
->dev
->ifindex
;
1988 inet_netconf_notify_devconf(net
, NETCONFA_RP_FILTER
,
1996 static int devinet_sysctl_forward(struct ctl_table
*ctl
, int write
,
1997 void __user
*buffer
,
1998 size_t *lenp
, loff_t
*ppos
)
2000 int *valp
= ctl
->data
;
2003 int ret
= proc_dointvec(ctl
, write
, buffer
, lenp
, ppos
);
2005 if (write
&& *valp
!= val
) {
2006 struct net
*net
= ctl
->extra2
;
2008 if (valp
!= &IPV4_DEVCONF_DFLT(net
, FORWARDING
)) {
2009 if (!rtnl_trylock()) {
2010 /* Restore the original values before restarting */
2013 return restart_syscall();
2015 if (valp
== &IPV4_DEVCONF_ALL(net
, FORWARDING
)) {
2016 inet_forward_change(net
);
2018 struct ipv4_devconf
*cnf
= ctl
->extra1
;
2019 struct in_device
*idev
=
2020 container_of(cnf
, struct in_device
, cnf
);
2022 dev_disable_lro(idev
->dev
);
2023 inet_netconf_notify_devconf(net
,
2024 NETCONFA_FORWARDING
,
2029 rt_cache_flush(net
);
2031 inet_netconf_notify_devconf(net
, NETCONFA_FORWARDING
,
2032 NETCONFA_IFINDEX_DEFAULT
,
2033 net
->ipv4
.devconf_dflt
);
2039 static int ipv4_doint_and_flush(struct ctl_table
*ctl
, int write
,
2040 void __user
*buffer
,
2041 size_t *lenp
, loff_t
*ppos
)
2043 int *valp
= ctl
->data
;
2045 int ret
= proc_dointvec(ctl
, write
, buffer
, lenp
, ppos
);
2046 struct net
*net
= ctl
->extra2
;
2048 if (write
&& *valp
!= val
)
2049 rt_cache_flush(net
);
2054 #define DEVINET_SYSCTL_ENTRY(attr, name, mval, proc) \
2057 .data = ipv4_devconf.data + \
2058 IPV4_DEVCONF_ ## attr - 1, \
2059 .maxlen = sizeof(int), \
2061 .proc_handler = proc, \
2062 .extra1 = &ipv4_devconf, \
2065 #define DEVINET_SYSCTL_RW_ENTRY(attr, name) \
2066 DEVINET_SYSCTL_ENTRY(attr, name, 0644, devinet_conf_proc)
2068 #define DEVINET_SYSCTL_RO_ENTRY(attr, name) \
2069 DEVINET_SYSCTL_ENTRY(attr, name, 0444, devinet_conf_proc)
2071 #define DEVINET_SYSCTL_COMPLEX_ENTRY(attr, name, proc) \
2072 DEVINET_SYSCTL_ENTRY(attr, name, 0644, proc)
2074 #define DEVINET_SYSCTL_FLUSHING_ENTRY(attr, name) \
2075 DEVINET_SYSCTL_COMPLEX_ENTRY(attr, name, ipv4_doint_and_flush)
2077 static struct devinet_sysctl_table
{
2078 struct ctl_table_header
*sysctl_header
;
2079 struct ctl_table devinet_vars
[__IPV4_DEVCONF_MAX
];
2080 } devinet_sysctl
= {
2082 DEVINET_SYSCTL_COMPLEX_ENTRY(FORWARDING
, "forwarding",
2083 devinet_sysctl_forward
),
2084 DEVINET_SYSCTL_RO_ENTRY(MC_FORWARDING
, "mc_forwarding"),
2086 DEVINET_SYSCTL_RW_ENTRY(ACCEPT_REDIRECTS
, "accept_redirects"),
2087 DEVINET_SYSCTL_RW_ENTRY(SECURE_REDIRECTS
, "secure_redirects"),
2088 DEVINET_SYSCTL_RW_ENTRY(SHARED_MEDIA
, "shared_media"),
2089 DEVINET_SYSCTL_RW_ENTRY(RP_FILTER
, "rp_filter"),
2090 DEVINET_SYSCTL_RW_ENTRY(SEND_REDIRECTS
, "send_redirects"),
2091 DEVINET_SYSCTL_RW_ENTRY(ACCEPT_SOURCE_ROUTE
,
2092 "accept_source_route"),
2093 DEVINET_SYSCTL_RW_ENTRY(ACCEPT_LOCAL
, "accept_local"),
2094 DEVINET_SYSCTL_RW_ENTRY(SRC_VMARK
, "src_valid_mark"),
2095 DEVINET_SYSCTL_RW_ENTRY(PROXY_ARP
, "proxy_arp"),
2096 DEVINET_SYSCTL_RW_ENTRY(MEDIUM_ID
, "medium_id"),
2097 DEVINET_SYSCTL_RW_ENTRY(BOOTP_RELAY
, "bootp_relay"),
2098 DEVINET_SYSCTL_RW_ENTRY(LOG_MARTIANS
, "log_martians"),
2099 DEVINET_SYSCTL_RW_ENTRY(TAG
, "tag"),
2100 DEVINET_SYSCTL_RW_ENTRY(ARPFILTER
, "arp_filter"),
2101 DEVINET_SYSCTL_RW_ENTRY(ARP_ANNOUNCE
, "arp_announce"),
2102 DEVINET_SYSCTL_RW_ENTRY(ARP_IGNORE
, "arp_ignore"),
2103 DEVINET_SYSCTL_RW_ENTRY(ARP_ACCEPT
, "arp_accept"),
2104 DEVINET_SYSCTL_RW_ENTRY(ARP_NOTIFY
, "arp_notify"),
2105 DEVINET_SYSCTL_RW_ENTRY(PROXY_ARP_PVLAN
, "proxy_arp_pvlan"),
2106 DEVINET_SYSCTL_RW_ENTRY(FORCE_IGMP_VERSION
,
2107 "force_igmp_version"),
2108 DEVINET_SYSCTL_RW_ENTRY(IGMPV2_UNSOLICITED_REPORT_INTERVAL
,
2109 "igmpv2_unsolicited_report_interval"),
2110 DEVINET_SYSCTL_RW_ENTRY(IGMPV3_UNSOLICITED_REPORT_INTERVAL
,
2111 "igmpv3_unsolicited_report_interval"),
2113 DEVINET_SYSCTL_FLUSHING_ENTRY(NOXFRM
, "disable_xfrm"),
2114 DEVINET_SYSCTL_FLUSHING_ENTRY(NOPOLICY
, "disable_policy"),
2115 DEVINET_SYSCTL_FLUSHING_ENTRY(PROMOTE_SECONDARIES
,
2116 "promote_secondaries"),
2117 DEVINET_SYSCTL_FLUSHING_ENTRY(ROUTE_LOCALNET
,
2122 static int __devinet_sysctl_register(struct net
*net
, char *dev_name
,
2123 struct ipv4_devconf
*p
)
2126 struct devinet_sysctl_table
*t
;
2127 char path
[sizeof("net/ipv4/conf/") + IFNAMSIZ
];
2129 t
= kmemdup(&devinet_sysctl
, sizeof(*t
), GFP_KERNEL
);
2133 for (i
= 0; i
< ARRAY_SIZE(t
->devinet_vars
) - 1; i
++) {
2134 t
->devinet_vars
[i
].data
+= (char *)p
- (char *)&ipv4_devconf
;
2135 t
->devinet_vars
[i
].extra1
= p
;
2136 t
->devinet_vars
[i
].extra2
= net
;
2139 snprintf(path
, sizeof(path
), "net/ipv4/conf/%s", dev_name
);
2141 t
->sysctl_header
= register_net_sysctl(net
, path
, t
->devinet_vars
);
2142 if (!t
->sysctl_header
)
2154 static void __devinet_sysctl_unregister(struct ipv4_devconf
*cnf
)
2156 struct devinet_sysctl_table
*t
= cnf
->sysctl
;
2162 unregister_net_sysctl_table(t
->sysctl_header
);
2166 static void devinet_sysctl_register(struct in_device
*idev
)
2168 neigh_sysctl_register(idev
->dev
, idev
->arp_parms
, "ipv4", NULL
);
2169 __devinet_sysctl_register(dev_net(idev
->dev
), idev
->dev
->name
,
2173 static void devinet_sysctl_unregister(struct in_device
*idev
)
2175 __devinet_sysctl_unregister(&idev
->cnf
);
2176 neigh_sysctl_unregister(idev
->arp_parms
);
2179 static struct ctl_table ctl_forward_entry
[] = {
2181 .procname
= "ip_forward",
2182 .data
= &ipv4_devconf
.data
[
2183 IPV4_DEVCONF_FORWARDING
- 1],
2184 .maxlen
= sizeof(int),
2186 .proc_handler
= devinet_sysctl_forward
,
2187 .extra1
= &ipv4_devconf
,
2188 .extra2
= &init_net
,
2194 static __net_init
int devinet_init_net(struct net
*net
)
2197 struct ipv4_devconf
*all
, *dflt
;
2198 #ifdef CONFIG_SYSCTL
2199 struct ctl_table
*tbl
= ctl_forward_entry
;
2200 struct ctl_table_header
*forw_hdr
;
2204 all
= &ipv4_devconf
;
2205 dflt
= &ipv4_devconf_dflt
;
2207 if (!net_eq(net
, &init_net
)) {
2208 all
= kmemdup(all
, sizeof(ipv4_devconf
), GFP_KERNEL
);
2212 dflt
= kmemdup(dflt
, sizeof(ipv4_devconf_dflt
), GFP_KERNEL
);
2214 goto err_alloc_dflt
;
2216 #ifdef CONFIG_SYSCTL
2217 tbl
= kmemdup(tbl
, sizeof(ctl_forward_entry
), GFP_KERNEL
);
2221 tbl
[0].data
= &all
->data
[IPV4_DEVCONF_FORWARDING
- 1];
2222 tbl
[0].extra1
= all
;
2223 tbl
[0].extra2
= net
;
2227 #ifdef CONFIG_SYSCTL
2228 err
= __devinet_sysctl_register(net
, "all", all
);
2232 err
= __devinet_sysctl_register(net
, "default", dflt
);
2237 forw_hdr
= register_net_sysctl(net
, "net/ipv4", tbl
);
2238 if (forw_hdr
== NULL
)
2240 net
->ipv4
.forw_hdr
= forw_hdr
;
2243 net
->ipv4
.devconf_all
= all
;
2244 net
->ipv4
.devconf_dflt
= dflt
;
2247 #ifdef CONFIG_SYSCTL
2249 __devinet_sysctl_unregister(dflt
);
2251 __devinet_sysctl_unregister(all
);
2253 if (tbl
!= ctl_forward_entry
)
2257 if (dflt
!= &ipv4_devconf_dflt
)
2260 if (all
!= &ipv4_devconf
)
2266 static __net_exit
void devinet_exit_net(struct net
*net
)
2268 #ifdef CONFIG_SYSCTL
2269 struct ctl_table
*tbl
;
2271 tbl
= net
->ipv4
.forw_hdr
->ctl_table_arg
;
2272 unregister_net_sysctl_table(net
->ipv4
.forw_hdr
);
2273 __devinet_sysctl_unregister(net
->ipv4
.devconf_dflt
);
2274 __devinet_sysctl_unregister(net
->ipv4
.devconf_all
);
2277 kfree(net
->ipv4
.devconf_dflt
);
2278 kfree(net
->ipv4
.devconf_all
);
2281 static __net_initdata
struct pernet_operations devinet_ops
= {
2282 .init
= devinet_init_net
,
2283 .exit
= devinet_exit_net
,
2286 static struct rtnl_af_ops inet_af_ops
= {
2288 .fill_link_af
= inet_fill_link_af
,
2289 .get_link_af_size
= inet_get_link_af_size
,
2290 .validate_link_af
= inet_validate_link_af
,
2291 .set_link_af
= inet_set_link_af
,
2294 void __init
devinet_init(void)
2298 for (i
= 0; i
< IN4_ADDR_HSIZE
; i
++)
2299 INIT_HLIST_HEAD(&inet_addr_lst
[i
]);
2301 register_pernet_subsys(&devinet_ops
);
2303 register_gifconf(PF_INET
, inet_gifconf
);
2304 register_netdevice_notifier(&ip_netdev_notifier
);
2306 schedule_delayed_work(&check_lifetime_work
, 0);
2308 rtnl_af_register(&inet_af_ops
);
2310 rtnl_register(PF_INET
, RTM_NEWADDR
, inet_rtm_newaddr
, NULL
, NULL
);
2311 rtnl_register(PF_INET
, RTM_DELADDR
, inet_rtm_deladdr
, NULL
, NULL
);
2312 rtnl_register(PF_INET
, RTM_GETADDR
, NULL
, inet_dump_ifaddr
, NULL
);
2313 rtnl_register(PF_INET
, RTM_GETNETCONF
, inet_netconf_get_devconf
,
2314 inet_netconf_dump_devconf
, NULL
);