Linux 4.19.133
[linux/fpc-iii.git] / net / ipv6 / addrconf.c
blob627cd24b7c0d0ffdefc9020e5c4f514fd48c9eea
1 /*
2 * IPv6 Address [auto]configuration
3 * Linux INET6 implementation
5 * Authors:
6 * Pedro Roque <roque@di.fc.ul.pt>
7 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
16 * Changes:
18 * Janos Farkas : delete timer on ifdown
19 * <chexum@bankinf.banki.hu>
20 * Andi Kleen : kill double kfree on module
21 * unload.
22 * Maciej W. Rozycki : FDDI support
23 * sekiya@USAGI : Don't send too many RS
24 * packets.
25 * yoshfuji@USAGI : Fixed interval between DAD
26 * packets.
27 * YOSHIFUJI Hideaki @USAGI : improved accuracy of
28 * address validation timer.
29 * YOSHIFUJI Hideaki @USAGI : Privacy Extensions (RFC3041)
30 * support.
31 * Yuji SEKIYA @USAGI : Don't assign a same IPv6
32 * address on a same interface.
33 * YOSHIFUJI Hideaki @USAGI : ARCnet support
34 * YOSHIFUJI Hideaki @USAGI : convert /proc/net/if_inet6 to
35 * seq_file.
36 * YOSHIFUJI Hideaki @USAGI : improved source address
37 * selection; consider scope,
38 * status etc.
41 #define pr_fmt(fmt) "IPv6: " fmt
43 #include <linux/errno.h>
44 #include <linux/types.h>
45 #include <linux/kernel.h>
46 #include <linux/sched/signal.h>
47 #include <linux/socket.h>
48 #include <linux/sockios.h>
49 #include <linux/net.h>
50 #include <linux/inet.h>
51 #include <linux/in6.h>
52 #include <linux/netdevice.h>
53 #include <linux/if_addr.h>
54 #include <linux/if_arp.h>
55 #include <linux/if_arcnet.h>
56 #include <linux/if_infiniband.h>
57 #include <linux/route.h>
58 #include <linux/inetdevice.h>
59 #include <linux/init.h>
60 #include <linux/slab.h>
61 #ifdef CONFIG_SYSCTL
62 #include <linux/sysctl.h>
63 #endif
64 #include <linux/capability.h>
65 #include <linux/delay.h>
66 #include <linux/notifier.h>
67 #include <linux/string.h>
68 #include <linux/hash.h>
70 #include <net/net_namespace.h>
71 #include <net/sock.h>
72 #include <net/snmp.h>
74 #include <net/6lowpan.h>
75 #include <net/firewire.h>
76 #include <net/ipv6.h>
77 #include <net/protocol.h>
78 #include <net/ndisc.h>
79 #include <net/ip6_route.h>
80 #include <net/addrconf.h>
81 #include <net/tcp.h>
82 #include <net/ip.h>
83 #include <net/netlink.h>
84 #include <net/pkt_sched.h>
85 #include <net/l3mdev.h>
86 #include <linux/if_tunnel.h>
87 #include <linux/rtnetlink.h>
88 #include <linux/netconf.h>
89 #include <linux/random.h>
90 #include <linux/uaccess.h>
91 #include <asm/unaligned.h>
93 #include <linux/proc_fs.h>
94 #include <linux/seq_file.h>
95 #include <linux/export.h>
97 #define INFINITY_LIFE_TIME 0xFFFFFFFF
99 #define IPV6_MAX_STRLEN \
100 sizeof("ffff:ffff:ffff:ffff:ffff:ffff:255.255.255.255")
102 static inline u32 cstamp_delta(unsigned long cstamp)
104 return (cstamp - INITIAL_JIFFIES) * 100UL / HZ;
107 static inline s32 rfc3315_s14_backoff_init(s32 irt)
109 /* multiply 'initial retransmission time' by 0.9 .. 1.1 */
110 u64 tmp = (900000 + prandom_u32() % 200001) * (u64)irt;
111 do_div(tmp, 1000000);
112 return (s32)tmp;
115 static inline s32 rfc3315_s14_backoff_update(s32 rt, s32 mrt)
117 /* multiply 'retransmission timeout' by 1.9 .. 2.1 */
118 u64 tmp = (1900000 + prandom_u32() % 200001) * (u64)rt;
119 do_div(tmp, 1000000);
120 if ((s32)tmp > mrt) {
121 /* multiply 'maximum retransmission time' by 0.9 .. 1.1 */
122 tmp = (900000 + prandom_u32() % 200001) * (u64)mrt;
123 do_div(tmp, 1000000);
125 return (s32)tmp;
128 #ifdef CONFIG_SYSCTL
129 static int addrconf_sysctl_register(struct inet6_dev *idev);
130 static void addrconf_sysctl_unregister(struct inet6_dev *idev);
131 #else
132 static inline int addrconf_sysctl_register(struct inet6_dev *idev)
134 return 0;
137 static inline void addrconf_sysctl_unregister(struct inet6_dev *idev)
140 #endif
142 static void ipv6_regen_rndid(struct inet6_dev *idev);
143 static void ipv6_try_regen_rndid(struct inet6_dev *idev, struct in6_addr *tmpaddr);
145 static int ipv6_generate_eui64(u8 *eui, struct net_device *dev);
146 static int ipv6_count_addresses(const struct inet6_dev *idev);
147 static int ipv6_generate_stable_address(struct in6_addr *addr,
148 u8 dad_count,
149 const struct inet6_dev *idev);
151 #define IN6_ADDR_HSIZE_SHIFT 8
152 #define IN6_ADDR_HSIZE (1 << IN6_ADDR_HSIZE_SHIFT)
154 * Configured unicast address hash table
156 static struct hlist_head inet6_addr_lst[IN6_ADDR_HSIZE];
157 static DEFINE_SPINLOCK(addrconf_hash_lock);
159 static void addrconf_verify(void);
160 static void addrconf_verify_rtnl(void);
161 static void addrconf_verify_work(struct work_struct *);
163 static struct workqueue_struct *addrconf_wq;
164 static DECLARE_DELAYED_WORK(addr_chk_work, addrconf_verify_work);
166 static void addrconf_join_anycast(struct inet6_ifaddr *ifp);
167 static void addrconf_leave_anycast(struct inet6_ifaddr *ifp);
169 static void addrconf_type_change(struct net_device *dev,
170 unsigned long event);
171 static int addrconf_ifdown(struct net_device *dev, int how);
173 static struct fib6_info *addrconf_get_prefix_route(const struct in6_addr *pfx,
174 int plen,
175 const struct net_device *dev,
176 u32 flags, u32 noflags);
178 static void addrconf_dad_start(struct inet6_ifaddr *ifp);
179 static void addrconf_dad_work(struct work_struct *w);
180 static void addrconf_dad_completed(struct inet6_ifaddr *ifp, bool bump_id,
181 bool send_na);
182 static void addrconf_dad_run(struct inet6_dev *idev, bool restart);
183 static void addrconf_rs_timer(struct timer_list *t);
184 static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifa);
185 static void ipv6_ifa_notify(int event, struct inet6_ifaddr *ifa);
187 static void inet6_prefix_notify(int event, struct inet6_dev *idev,
188 struct prefix_info *pinfo);
190 static struct ipv6_devconf ipv6_devconf __read_mostly = {
191 .forwarding = 0,
192 .hop_limit = IPV6_DEFAULT_HOPLIMIT,
193 .mtu6 = IPV6_MIN_MTU,
194 .accept_ra = 1,
195 .accept_redirects = 1,
196 .autoconf = 1,
197 .force_mld_version = 0,
198 .mldv1_unsolicited_report_interval = 10 * HZ,
199 .mldv2_unsolicited_report_interval = HZ,
200 .dad_transmits = 1,
201 .rtr_solicits = MAX_RTR_SOLICITATIONS,
202 .rtr_solicit_interval = RTR_SOLICITATION_INTERVAL,
203 .rtr_solicit_max_interval = RTR_SOLICITATION_MAX_INTERVAL,
204 .rtr_solicit_delay = MAX_RTR_SOLICITATION_DELAY,
205 .use_tempaddr = 0,
206 .temp_valid_lft = TEMP_VALID_LIFETIME,
207 .temp_prefered_lft = TEMP_PREFERRED_LIFETIME,
208 .regen_max_retry = REGEN_MAX_RETRY,
209 .max_desync_factor = MAX_DESYNC_FACTOR,
210 .max_addresses = IPV6_MAX_ADDRESSES,
211 .accept_ra_defrtr = 1,
212 .accept_ra_from_local = 0,
213 .accept_ra_min_hop_limit= 1,
214 .accept_ra_pinfo = 1,
215 #ifdef CONFIG_IPV6_ROUTER_PREF
216 .accept_ra_rtr_pref = 1,
217 .rtr_probe_interval = 60 * HZ,
218 #ifdef CONFIG_IPV6_ROUTE_INFO
219 .accept_ra_rt_info_min_plen = 0,
220 .accept_ra_rt_info_max_plen = 0,
221 #endif
222 #endif
223 .proxy_ndp = 0,
224 .accept_source_route = 0, /* we do not accept RH0 by default. */
225 .disable_ipv6 = 0,
226 .accept_dad = 0,
227 .suppress_frag_ndisc = 1,
228 .accept_ra_mtu = 1,
229 .stable_secret = {
230 .initialized = false,
232 .use_oif_addrs_only = 0,
233 .ignore_routes_with_linkdown = 0,
234 .keep_addr_on_down = 0,
235 .seg6_enabled = 0,
236 #ifdef CONFIG_IPV6_SEG6_HMAC
237 .seg6_require_hmac = 0,
238 #endif
239 .enhanced_dad = 1,
240 .addr_gen_mode = IN6_ADDR_GEN_MODE_EUI64,
241 .disable_policy = 0,
244 static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = {
245 .forwarding = 0,
246 .hop_limit = IPV6_DEFAULT_HOPLIMIT,
247 .mtu6 = IPV6_MIN_MTU,
248 .accept_ra = 1,
249 .accept_redirects = 1,
250 .autoconf = 1,
251 .force_mld_version = 0,
252 .mldv1_unsolicited_report_interval = 10 * HZ,
253 .mldv2_unsolicited_report_interval = HZ,
254 .dad_transmits = 1,
255 .rtr_solicits = MAX_RTR_SOLICITATIONS,
256 .rtr_solicit_interval = RTR_SOLICITATION_INTERVAL,
257 .rtr_solicit_max_interval = RTR_SOLICITATION_MAX_INTERVAL,
258 .rtr_solicit_delay = MAX_RTR_SOLICITATION_DELAY,
259 .use_tempaddr = 0,
260 .temp_valid_lft = TEMP_VALID_LIFETIME,
261 .temp_prefered_lft = TEMP_PREFERRED_LIFETIME,
262 .regen_max_retry = REGEN_MAX_RETRY,
263 .max_desync_factor = MAX_DESYNC_FACTOR,
264 .max_addresses = IPV6_MAX_ADDRESSES,
265 .accept_ra_defrtr = 1,
266 .accept_ra_from_local = 0,
267 .accept_ra_min_hop_limit= 1,
268 .accept_ra_pinfo = 1,
269 #ifdef CONFIG_IPV6_ROUTER_PREF
270 .accept_ra_rtr_pref = 1,
271 .rtr_probe_interval = 60 * HZ,
272 #ifdef CONFIG_IPV6_ROUTE_INFO
273 .accept_ra_rt_info_min_plen = 0,
274 .accept_ra_rt_info_max_plen = 0,
275 #endif
276 #endif
277 .proxy_ndp = 0,
278 .accept_source_route = 0, /* we do not accept RH0 by default. */
279 .disable_ipv6 = 0,
280 .accept_dad = 1,
281 .suppress_frag_ndisc = 1,
282 .accept_ra_mtu = 1,
283 .stable_secret = {
284 .initialized = false,
286 .use_oif_addrs_only = 0,
287 .ignore_routes_with_linkdown = 0,
288 .keep_addr_on_down = 0,
289 .seg6_enabled = 0,
290 #ifdef CONFIG_IPV6_SEG6_HMAC
291 .seg6_require_hmac = 0,
292 #endif
293 .enhanced_dad = 1,
294 .addr_gen_mode = IN6_ADDR_GEN_MODE_EUI64,
295 .disable_policy = 0,
298 /* Check if link is ready: is it up and is a valid qdisc available */
299 static inline bool addrconf_link_ready(const struct net_device *dev)
301 return netif_oper_up(dev) && !qdisc_tx_is_noop(dev);
304 static void addrconf_del_rs_timer(struct inet6_dev *idev)
306 if (del_timer(&idev->rs_timer))
307 __in6_dev_put(idev);
310 static void addrconf_del_dad_work(struct inet6_ifaddr *ifp)
312 if (cancel_delayed_work(&ifp->dad_work))
313 __in6_ifa_put(ifp);
316 static void addrconf_mod_rs_timer(struct inet6_dev *idev,
317 unsigned long when)
319 if (!timer_pending(&idev->rs_timer))
320 in6_dev_hold(idev);
321 mod_timer(&idev->rs_timer, jiffies + when);
324 static void addrconf_mod_dad_work(struct inet6_ifaddr *ifp,
325 unsigned long delay)
327 in6_ifa_hold(ifp);
328 if (mod_delayed_work(addrconf_wq, &ifp->dad_work, delay))
329 in6_ifa_put(ifp);
332 static int snmp6_alloc_dev(struct inet6_dev *idev)
334 int i;
336 idev->stats.ipv6 = alloc_percpu(struct ipstats_mib);
337 if (!idev->stats.ipv6)
338 goto err_ip;
340 for_each_possible_cpu(i) {
341 struct ipstats_mib *addrconf_stats;
342 addrconf_stats = per_cpu_ptr(idev->stats.ipv6, i);
343 u64_stats_init(&addrconf_stats->syncp);
347 idev->stats.icmpv6dev = kzalloc(sizeof(struct icmpv6_mib_device),
348 GFP_KERNEL);
349 if (!idev->stats.icmpv6dev)
350 goto err_icmp;
351 idev->stats.icmpv6msgdev = kzalloc(sizeof(struct icmpv6msg_mib_device),
352 GFP_KERNEL);
353 if (!idev->stats.icmpv6msgdev)
354 goto err_icmpmsg;
356 return 0;
358 err_icmpmsg:
359 kfree(idev->stats.icmpv6dev);
360 err_icmp:
361 free_percpu(idev->stats.ipv6);
362 err_ip:
363 return -ENOMEM;
366 static struct inet6_dev *ipv6_add_dev(struct net_device *dev)
368 struct inet6_dev *ndev;
369 int err = -ENOMEM;
371 ASSERT_RTNL();
373 if (dev->mtu < IPV6_MIN_MTU)
374 return ERR_PTR(-EINVAL);
376 ndev = kzalloc(sizeof(struct inet6_dev), GFP_KERNEL);
377 if (!ndev)
378 return ERR_PTR(err);
380 rwlock_init(&ndev->lock);
381 ndev->dev = dev;
382 INIT_LIST_HEAD(&ndev->addr_list);
383 timer_setup(&ndev->rs_timer, addrconf_rs_timer, 0);
384 memcpy(&ndev->cnf, dev_net(dev)->ipv6.devconf_dflt, sizeof(ndev->cnf));
386 if (ndev->cnf.stable_secret.initialized)
387 ndev->cnf.addr_gen_mode = IN6_ADDR_GEN_MODE_STABLE_PRIVACY;
389 ndev->cnf.mtu6 = dev->mtu;
390 ndev->nd_parms = neigh_parms_alloc(dev, &nd_tbl);
391 if (!ndev->nd_parms) {
392 kfree(ndev);
393 return ERR_PTR(err);
395 if (ndev->cnf.forwarding)
396 dev_disable_lro(dev);
397 /* We refer to the device */
398 dev_hold(dev);
400 if (snmp6_alloc_dev(ndev) < 0) {
401 netdev_dbg(dev, "%s: cannot allocate memory for statistics\n",
402 __func__);
403 neigh_parms_release(&nd_tbl, ndev->nd_parms);
404 dev_put(dev);
405 kfree(ndev);
406 return ERR_PTR(err);
409 if (snmp6_register_dev(ndev) < 0) {
410 netdev_dbg(dev, "%s: cannot create /proc/net/dev_snmp6/%s\n",
411 __func__, dev->name);
412 goto err_release;
415 /* One reference from device. */
416 refcount_set(&ndev->refcnt, 1);
418 if (dev->flags & (IFF_NOARP | IFF_LOOPBACK))
419 ndev->cnf.accept_dad = -1;
421 #if IS_ENABLED(CONFIG_IPV6_SIT)
422 if (dev->type == ARPHRD_SIT && (dev->priv_flags & IFF_ISATAP)) {
423 pr_info("%s: Disabled Multicast RS\n", dev->name);
424 ndev->cnf.rtr_solicits = 0;
426 #endif
428 INIT_LIST_HEAD(&ndev->tempaddr_list);
429 ndev->desync_factor = U32_MAX;
430 if ((dev->flags&IFF_LOOPBACK) ||
431 dev->type == ARPHRD_TUNNEL ||
432 dev->type == ARPHRD_TUNNEL6 ||
433 dev->type == ARPHRD_SIT ||
434 dev->type == ARPHRD_NONE) {
435 ndev->cnf.use_tempaddr = -1;
436 } else
437 ipv6_regen_rndid(ndev);
439 ndev->token = in6addr_any;
441 if (netif_running(dev) && addrconf_link_ready(dev))
442 ndev->if_flags |= IF_READY;
444 ipv6_mc_init_dev(ndev);
445 ndev->tstamp = jiffies;
446 err = addrconf_sysctl_register(ndev);
447 if (err) {
448 ipv6_mc_destroy_dev(ndev);
449 snmp6_unregister_dev(ndev);
450 goto err_release;
452 /* protected by rtnl_lock */
453 rcu_assign_pointer(dev->ip6_ptr, ndev);
455 /* Join interface-local all-node multicast group */
456 ipv6_dev_mc_inc(dev, &in6addr_interfacelocal_allnodes);
458 /* Join all-node multicast group */
459 ipv6_dev_mc_inc(dev, &in6addr_linklocal_allnodes);
461 /* Join all-router multicast group if forwarding is set */
462 if (ndev->cnf.forwarding && (dev->flags & IFF_MULTICAST))
463 ipv6_dev_mc_inc(dev, &in6addr_linklocal_allrouters);
465 return ndev;
467 err_release:
468 neigh_parms_release(&nd_tbl, ndev->nd_parms);
469 ndev->dead = 1;
470 in6_dev_finish_destroy(ndev);
471 return ERR_PTR(err);
474 static struct inet6_dev *ipv6_find_idev(struct net_device *dev)
476 struct inet6_dev *idev;
478 ASSERT_RTNL();
480 idev = __in6_dev_get(dev);
481 if (!idev) {
482 idev = ipv6_add_dev(dev);
483 if (IS_ERR(idev))
484 return NULL;
487 if (dev->flags&IFF_UP)
488 ipv6_mc_up(idev);
489 return idev;
492 static int inet6_netconf_msgsize_devconf(int type)
494 int size = NLMSG_ALIGN(sizeof(struct netconfmsg))
495 + nla_total_size(4); /* NETCONFA_IFINDEX */
496 bool all = false;
498 if (type == NETCONFA_ALL)
499 all = true;
501 if (all || type == NETCONFA_FORWARDING)
502 size += nla_total_size(4);
503 #ifdef CONFIG_IPV6_MROUTE
504 if (all || type == NETCONFA_MC_FORWARDING)
505 size += nla_total_size(4);
506 #endif
507 if (all || type == NETCONFA_PROXY_NEIGH)
508 size += nla_total_size(4);
510 if (all || type == NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN)
511 size += nla_total_size(4);
513 return size;
516 static int inet6_netconf_fill_devconf(struct sk_buff *skb, int ifindex,
517 struct ipv6_devconf *devconf, u32 portid,
518 u32 seq, int event, unsigned int flags,
519 int type)
521 struct nlmsghdr *nlh;
522 struct netconfmsg *ncm;
523 bool all = false;
525 nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct netconfmsg),
526 flags);
527 if (!nlh)
528 return -EMSGSIZE;
530 if (type == NETCONFA_ALL)
531 all = true;
533 ncm = nlmsg_data(nlh);
534 ncm->ncm_family = AF_INET6;
536 if (nla_put_s32(skb, NETCONFA_IFINDEX, ifindex) < 0)
537 goto nla_put_failure;
539 if (!devconf)
540 goto out;
542 if ((all || type == NETCONFA_FORWARDING) &&
543 nla_put_s32(skb, NETCONFA_FORWARDING, devconf->forwarding) < 0)
544 goto nla_put_failure;
545 #ifdef CONFIG_IPV6_MROUTE
546 if ((all || type == NETCONFA_MC_FORWARDING) &&
547 nla_put_s32(skb, NETCONFA_MC_FORWARDING,
548 devconf->mc_forwarding) < 0)
549 goto nla_put_failure;
550 #endif
551 if ((all || type == NETCONFA_PROXY_NEIGH) &&
552 nla_put_s32(skb, NETCONFA_PROXY_NEIGH, devconf->proxy_ndp) < 0)
553 goto nla_put_failure;
555 if ((all || type == NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN) &&
556 nla_put_s32(skb, NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN,
557 devconf->ignore_routes_with_linkdown) < 0)
558 goto nla_put_failure;
560 out:
561 nlmsg_end(skb, nlh);
562 return 0;
564 nla_put_failure:
565 nlmsg_cancel(skb, nlh);
566 return -EMSGSIZE;
569 void inet6_netconf_notify_devconf(struct net *net, int event, int type,
570 int ifindex, struct ipv6_devconf *devconf)
572 struct sk_buff *skb;
573 int err = -ENOBUFS;
575 skb = nlmsg_new(inet6_netconf_msgsize_devconf(type), GFP_KERNEL);
576 if (!skb)
577 goto errout;
579 err = inet6_netconf_fill_devconf(skb, ifindex, devconf, 0, 0,
580 event, 0, type);
581 if (err < 0) {
582 /* -EMSGSIZE implies BUG in inet6_netconf_msgsize_devconf() */
583 WARN_ON(err == -EMSGSIZE);
584 kfree_skb(skb);
585 goto errout;
587 rtnl_notify(skb, net, 0, RTNLGRP_IPV6_NETCONF, NULL, GFP_KERNEL);
588 return;
589 errout:
590 rtnl_set_sk_err(net, RTNLGRP_IPV6_NETCONF, err);
593 static const struct nla_policy devconf_ipv6_policy[NETCONFA_MAX+1] = {
594 [NETCONFA_IFINDEX] = { .len = sizeof(int) },
595 [NETCONFA_FORWARDING] = { .len = sizeof(int) },
596 [NETCONFA_PROXY_NEIGH] = { .len = sizeof(int) },
597 [NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN] = { .len = sizeof(int) },
600 static int inet6_netconf_get_devconf(struct sk_buff *in_skb,
601 struct nlmsghdr *nlh,
602 struct netlink_ext_ack *extack)
604 struct net *net = sock_net(in_skb->sk);
605 struct nlattr *tb[NETCONFA_MAX+1];
606 struct inet6_dev *in6_dev = NULL;
607 struct net_device *dev = NULL;
608 struct netconfmsg *ncm;
609 struct sk_buff *skb;
610 struct ipv6_devconf *devconf;
611 int ifindex;
612 int err;
614 err = nlmsg_parse(nlh, sizeof(*ncm), tb, NETCONFA_MAX,
615 devconf_ipv6_policy, extack);
616 if (err < 0)
617 return err;
619 if (!tb[NETCONFA_IFINDEX])
620 return -EINVAL;
622 err = -EINVAL;
623 ifindex = nla_get_s32(tb[NETCONFA_IFINDEX]);
624 switch (ifindex) {
625 case NETCONFA_IFINDEX_ALL:
626 devconf = net->ipv6.devconf_all;
627 break;
628 case NETCONFA_IFINDEX_DEFAULT:
629 devconf = net->ipv6.devconf_dflt;
630 break;
631 default:
632 dev = dev_get_by_index(net, ifindex);
633 if (!dev)
634 return -EINVAL;
635 in6_dev = in6_dev_get(dev);
636 if (!in6_dev)
637 goto errout;
638 devconf = &in6_dev->cnf;
639 break;
642 err = -ENOBUFS;
643 skb = nlmsg_new(inet6_netconf_msgsize_devconf(NETCONFA_ALL), GFP_KERNEL);
644 if (!skb)
645 goto errout;
647 err = inet6_netconf_fill_devconf(skb, ifindex, devconf,
648 NETLINK_CB(in_skb).portid,
649 nlh->nlmsg_seq, RTM_NEWNETCONF, 0,
650 NETCONFA_ALL);
651 if (err < 0) {
652 /* -EMSGSIZE implies BUG in inet6_netconf_msgsize_devconf() */
653 WARN_ON(err == -EMSGSIZE);
654 kfree_skb(skb);
655 goto errout;
657 err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
658 errout:
659 if (in6_dev)
660 in6_dev_put(in6_dev);
661 if (dev)
662 dev_put(dev);
663 return err;
666 static int inet6_netconf_dump_devconf(struct sk_buff *skb,
667 struct netlink_callback *cb)
669 struct net *net = sock_net(skb->sk);
670 int h, s_h;
671 int idx, s_idx;
672 struct net_device *dev;
673 struct inet6_dev *idev;
674 struct hlist_head *head;
676 s_h = cb->args[0];
677 s_idx = idx = cb->args[1];
679 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
680 idx = 0;
681 head = &net->dev_index_head[h];
682 rcu_read_lock();
683 cb->seq = atomic_read(&net->ipv6.dev_addr_genid) ^
684 net->dev_base_seq;
685 hlist_for_each_entry_rcu(dev, head, index_hlist) {
686 if (idx < s_idx)
687 goto cont;
688 idev = __in6_dev_get(dev);
689 if (!idev)
690 goto cont;
692 if (inet6_netconf_fill_devconf(skb, dev->ifindex,
693 &idev->cnf,
694 NETLINK_CB(cb->skb).portid,
695 cb->nlh->nlmsg_seq,
696 RTM_NEWNETCONF,
697 NLM_F_MULTI,
698 NETCONFA_ALL) < 0) {
699 rcu_read_unlock();
700 goto done;
702 nl_dump_check_consistent(cb, nlmsg_hdr(skb));
703 cont:
704 idx++;
706 rcu_read_unlock();
708 if (h == NETDEV_HASHENTRIES) {
709 if (inet6_netconf_fill_devconf(skb, NETCONFA_IFINDEX_ALL,
710 net->ipv6.devconf_all,
711 NETLINK_CB(cb->skb).portid,
712 cb->nlh->nlmsg_seq,
713 RTM_NEWNETCONF, NLM_F_MULTI,
714 NETCONFA_ALL) < 0)
715 goto done;
716 else
717 h++;
719 if (h == NETDEV_HASHENTRIES + 1) {
720 if (inet6_netconf_fill_devconf(skb, NETCONFA_IFINDEX_DEFAULT,
721 net->ipv6.devconf_dflt,
722 NETLINK_CB(cb->skb).portid,
723 cb->nlh->nlmsg_seq,
724 RTM_NEWNETCONF, NLM_F_MULTI,
725 NETCONFA_ALL) < 0)
726 goto done;
727 else
728 h++;
730 done:
731 cb->args[0] = h;
732 cb->args[1] = idx;
734 return skb->len;
737 #ifdef CONFIG_SYSCTL
738 static void dev_forward_change(struct inet6_dev *idev)
740 struct net_device *dev;
741 struct inet6_ifaddr *ifa;
743 if (!idev)
744 return;
745 dev = idev->dev;
746 if (idev->cnf.forwarding)
747 dev_disable_lro(dev);
748 if (dev->flags & IFF_MULTICAST) {
749 if (idev->cnf.forwarding) {
750 ipv6_dev_mc_inc(dev, &in6addr_linklocal_allrouters);
751 ipv6_dev_mc_inc(dev, &in6addr_interfacelocal_allrouters);
752 ipv6_dev_mc_inc(dev, &in6addr_sitelocal_allrouters);
753 } else {
754 ipv6_dev_mc_dec(dev, &in6addr_linklocal_allrouters);
755 ipv6_dev_mc_dec(dev, &in6addr_interfacelocal_allrouters);
756 ipv6_dev_mc_dec(dev, &in6addr_sitelocal_allrouters);
760 list_for_each_entry(ifa, &idev->addr_list, if_list) {
761 if (ifa->flags&IFA_F_TENTATIVE)
762 continue;
763 if (idev->cnf.forwarding)
764 addrconf_join_anycast(ifa);
765 else
766 addrconf_leave_anycast(ifa);
768 inet6_netconf_notify_devconf(dev_net(dev), RTM_NEWNETCONF,
769 NETCONFA_FORWARDING,
770 dev->ifindex, &idev->cnf);
774 static void addrconf_forward_change(struct net *net, __s32 newf)
776 struct net_device *dev;
777 struct inet6_dev *idev;
779 for_each_netdev(net, dev) {
780 idev = __in6_dev_get(dev);
781 if (idev) {
782 int changed = (!idev->cnf.forwarding) ^ (!newf);
783 idev->cnf.forwarding = newf;
784 if (changed)
785 dev_forward_change(idev);
790 static int addrconf_fixup_forwarding(struct ctl_table *table, int *p, int newf)
792 struct net *net;
793 int old;
795 if (!rtnl_trylock())
796 return restart_syscall();
798 net = (struct net *)table->extra2;
799 old = *p;
800 *p = newf;
802 if (p == &net->ipv6.devconf_dflt->forwarding) {
803 if ((!newf) ^ (!old))
804 inet6_netconf_notify_devconf(net, RTM_NEWNETCONF,
805 NETCONFA_FORWARDING,
806 NETCONFA_IFINDEX_DEFAULT,
807 net->ipv6.devconf_dflt);
808 rtnl_unlock();
809 return 0;
812 if (p == &net->ipv6.devconf_all->forwarding) {
813 int old_dflt = net->ipv6.devconf_dflt->forwarding;
815 net->ipv6.devconf_dflt->forwarding = newf;
816 if ((!newf) ^ (!old_dflt))
817 inet6_netconf_notify_devconf(net, RTM_NEWNETCONF,
818 NETCONFA_FORWARDING,
819 NETCONFA_IFINDEX_DEFAULT,
820 net->ipv6.devconf_dflt);
822 addrconf_forward_change(net, newf);
823 if ((!newf) ^ (!old))
824 inet6_netconf_notify_devconf(net, RTM_NEWNETCONF,
825 NETCONFA_FORWARDING,
826 NETCONFA_IFINDEX_ALL,
827 net->ipv6.devconf_all);
828 } else if ((!newf) ^ (!old))
829 dev_forward_change((struct inet6_dev *)table->extra1);
830 rtnl_unlock();
832 if (newf)
833 rt6_purge_dflt_routers(net);
834 return 1;
837 static void addrconf_linkdown_change(struct net *net, __s32 newf)
839 struct net_device *dev;
840 struct inet6_dev *idev;
842 for_each_netdev(net, dev) {
843 idev = __in6_dev_get(dev);
844 if (idev) {
845 int changed = (!idev->cnf.ignore_routes_with_linkdown) ^ (!newf);
847 idev->cnf.ignore_routes_with_linkdown = newf;
848 if (changed)
849 inet6_netconf_notify_devconf(dev_net(dev),
850 RTM_NEWNETCONF,
851 NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN,
852 dev->ifindex,
853 &idev->cnf);
858 static int addrconf_fixup_linkdown(struct ctl_table *table, int *p, int newf)
860 struct net *net;
861 int old;
863 if (!rtnl_trylock())
864 return restart_syscall();
866 net = (struct net *)table->extra2;
867 old = *p;
868 *p = newf;
870 if (p == &net->ipv6.devconf_dflt->ignore_routes_with_linkdown) {
871 if ((!newf) ^ (!old))
872 inet6_netconf_notify_devconf(net,
873 RTM_NEWNETCONF,
874 NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN,
875 NETCONFA_IFINDEX_DEFAULT,
876 net->ipv6.devconf_dflt);
877 rtnl_unlock();
878 return 0;
881 if (p == &net->ipv6.devconf_all->ignore_routes_with_linkdown) {
882 net->ipv6.devconf_dflt->ignore_routes_with_linkdown = newf;
883 addrconf_linkdown_change(net, newf);
884 if ((!newf) ^ (!old))
885 inet6_netconf_notify_devconf(net,
886 RTM_NEWNETCONF,
887 NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN,
888 NETCONFA_IFINDEX_ALL,
889 net->ipv6.devconf_all);
891 rtnl_unlock();
893 return 1;
896 #endif
898 /* Nobody refers to this ifaddr, destroy it */
899 void inet6_ifa_finish_destroy(struct inet6_ifaddr *ifp)
901 WARN_ON(!hlist_unhashed(&ifp->addr_lst));
903 #ifdef NET_REFCNT_DEBUG
904 pr_debug("%s\n", __func__);
905 #endif
907 in6_dev_put(ifp->idev);
909 if (cancel_delayed_work(&ifp->dad_work))
910 pr_notice("delayed DAD work was pending while freeing ifa=%p\n",
911 ifp);
913 if (ifp->state != INET6_IFADDR_STATE_DEAD) {
914 pr_warn("Freeing alive inet6 address %p\n", ifp);
915 return;
918 kfree_rcu(ifp, rcu);
921 static void
922 ipv6_link_dev_addr(struct inet6_dev *idev, struct inet6_ifaddr *ifp)
924 struct list_head *p;
925 int ifp_scope = ipv6_addr_src_scope(&ifp->addr);
928 * Each device address list is sorted in order of scope -
929 * global before linklocal.
931 list_for_each(p, &idev->addr_list) {
932 struct inet6_ifaddr *ifa
933 = list_entry(p, struct inet6_ifaddr, if_list);
934 if (ifp_scope >= ipv6_addr_src_scope(&ifa->addr))
935 break;
938 list_add_tail_rcu(&ifp->if_list, p);
941 static u32 inet6_addr_hash(const struct net *net, const struct in6_addr *addr)
943 u32 val = ipv6_addr_hash(addr) ^ net_hash_mix(net);
945 return hash_32(val, IN6_ADDR_HSIZE_SHIFT);
948 static bool ipv6_chk_same_addr(struct net *net, const struct in6_addr *addr,
949 struct net_device *dev, unsigned int hash)
951 struct inet6_ifaddr *ifp;
953 hlist_for_each_entry(ifp, &inet6_addr_lst[hash], addr_lst) {
954 if (!net_eq(dev_net(ifp->idev->dev), net))
955 continue;
956 if (ipv6_addr_equal(&ifp->addr, addr)) {
957 if (!dev || ifp->idev->dev == dev)
958 return true;
961 return false;
964 static int ipv6_add_addr_hash(struct net_device *dev, struct inet6_ifaddr *ifa)
966 unsigned int hash = inet6_addr_hash(dev_net(dev), &ifa->addr);
967 int err = 0;
969 spin_lock(&addrconf_hash_lock);
971 /* Ignore adding duplicate addresses on an interface */
972 if (ipv6_chk_same_addr(dev_net(dev), &ifa->addr, dev, hash)) {
973 netdev_dbg(dev, "ipv6_add_addr: already assigned\n");
974 err = -EEXIST;
975 } else {
976 hlist_add_head_rcu(&ifa->addr_lst, &inet6_addr_lst[hash]);
979 spin_unlock(&addrconf_hash_lock);
981 return err;
984 /* On success it returns ifp with increased reference count */
986 static struct inet6_ifaddr *
987 ipv6_add_addr(struct inet6_dev *idev, struct ifa6_config *cfg,
988 bool can_block, struct netlink_ext_ack *extack)
990 gfp_t gfp_flags = can_block ? GFP_KERNEL : GFP_ATOMIC;
991 int addr_type = ipv6_addr_type(cfg->pfx);
992 struct net *net = dev_net(idev->dev);
993 struct inet6_ifaddr *ifa = NULL;
994 struct fib6_info *f6i = NULL;
995 int err = 0;
997 if (addr_type == IPV6_ADDR_ANY ||
998 (addr_type & IPV6_ADDR_MULTICAST &&
999 !(cfg->ifa_flags & IFA_F_MCAUTOJOIN)) ||
1000 (!(idev->dev->flags & IFF_LOOPBACK) &&
1001 addr_type & IPV6_ADDR_LOOPBACK))
1002 return ERR_PTR(-EADDRNOTAVAIL);
1004 if (idev->dead) {
1005 err = -ENODEV; /*XXX*/
1006 goto out;
1009 if (idev->cnf.disable_ipv6) {
1010 err = -EACCES;
1011 goto out;
1014 /* validator notifier needs to be blocking;
1015 * do not call in atomic context
1017 if (can_block) {
1018 struct in6_validator_info i6vi = {
1019 .i6vi_addr = *cfg->pfx,
1020 .i6vi_dev = idev,
1021 .extack = extack,
1024 err = inet6addr_validator_notifier_call_chain(NETDEV_UP, &i6vi);
1025 err = notifier_to_errno(err);
1026 if (err < 0)
1027 goto out;
1030 ifa = kzalloc(sizeof(*ifa), gfp_flags);
1031 if (!ifa) {
1032 err = -ENOBUFS;
1033 goto out;
1036 f6i = addrconf_f6i_alloc(net, idev, cfg->pfx, false, gfp_flags);
1037 if (IS_ERR(f6i)) {
1038 err = PTR_ERR(f6i);
1039 f6i = NULL;
1040 goto out;
1043 if (net->ipv6.devconf_all->disable_policy ||
1044 idev->cnf.disable_policy)
1045 f6i->dst_nopolicy = true;
1047 neigh_parms_data_state_setall(idev->nd_parms);
1049 ifa->addr = *cfg->pfx;
1050 if (cfg->peer_pfx)
1051 ifa->peer_addr = *cfg->peer_pfx;
1053 spin_lock_init(&ifa->lock);
1054 INIT_DELAYED_WORK(&ifa->dad_work, addrconf_dad_work);
1055 INIT_HLIST_NODE(&ifa->addr_lst);
1056 ifa->scope = cfg->scope;
1057 ifa->prefix_len = cfg->plen;
1058 ifa->rt_priority = cfg->rt_priority;
1059 ifa->flags = cfg->ifa_flags;
1060 /* No need to add the TENTATIVE flag for addresses with NODAD */
1061 if (!(cfg->ifa_flags & IFA_F_NODAD))
1062 ifa->flags |= IFA_F_TENTATIVE;
1063 ifa->valid_lft = cfg->valid_lft;
1064 ifa->prefered_lft = cfg->preferred_lft;
1065 ifa->cstamp = ifa->tstamp = jiffies;
1066 ifa->tokenized = false;
1068 ifa->rt = f6i;
1070 ifa->idev = idev;
1071 in6_dev_hold(idev);
1073 /* For caller */
1074 refcount_set(&ifa->refcnt, 1);
1076 rcu_read_lock_bh();
1078 err = ipv6_add_addr_hash(idev->dev, ifa);
1079 if (err < 0) {
1080 rcu_read_unlock_bh();
1081 goto out;
1084 write_lock(&idev->lock);
1086 /* Add to inet6_dev unicast addr list. */
1087 ipv6_link_dev_addr(idev, ifa);
1089 if (ifa->flags&IFA_F_TEMPORARY) {
1090 list_add(&ifa->tmp_list, &idev->tempaddr_list);
1091 in6_ifa_hold(ifa);
1094 in6_ifa_hold(ifa);
1095 write_unlock(&idev->lock);
1097 rcu_read_unlock_bh();
1099 inet6addr_notifier_call_chain(NETDEV_UP, ifa);
1100 out:
1101 if (unlikely(err < 0)) {
1102 fib6_info_release(f6i);
1104 if (ifa) {
1105 if (ifa->idev)
1106 in6_dev_put(ifa->idev);
1107 kfree(ifa);
1109 ifa = ERR_PTR(err);
1112 return ifa;
1115 enum cleanup_prefix_rt_t {
1116 CLEANUP_PREFIX_RT_NOP, /* no cleanup action for prefix route */
1117 CLEANUP_PREFIX_RT_DEL, /* delete the prefix route */
1118 CLEANUP_PREFIX_RT_EXPIRE, /* update the lifetime of the prefix route */
1122 * Check, whether the prefix for ifp would still need a prefix route
1123 * after deleting ifp. The function returns one of the CLEANUP_PREFIX_RT_*
1124 * constants.
1126 * 1) we don't purge prefix if address was not permanent.
1127 * prefix is managed by its own lifetime.
1128 * 2) we also don't purge, if the address was IFA_F_NOPREFIXROUTE.
1129 * 3) if there are no addresses, delete prefix.
1130 * 4) if there are still other permanent address(es),
1131 * corresponding prefix is still permanent.
1132 * 5) if there are still other addresses with IFA_F_NOPREFIXROUTE,
1133 * don't purge the prefix, assume user space is managing it.
1134 * 6) otherwise, update prefix lifetime to the
1135 * longest valid lifetime among the corresponding
1136 * addresses on the device.
1137 * Note: subsequent RA will update lifetime.
1139 static enum cleanup_prefix_rt_t
1140 check_cleanup_prefix_route(struct inet6_ifaddr *ifp, unsigned long *expires)
1142 struct inet6_ifaddr *ifa;
1143 struct inet6_dev *idev = ifp->idev;
1144 unsigned long lifetime;
1145 enum cleanup_prefix_rt_t action = CLEANUP_PREFIX_RT_DEL;
1147 *expires = jiffies;
1149 list_for_each_entry(ifa, &idev->addr_list, if_list) {
1150 if (ifa == ifp)
1151 continue;
1152 if (ifa->prefix_len != ifp->prefix_len ||
1153 !ipv6_prefix_equal(&ifa->addr, &ifp->addr,
1154 ifp->prefix_len))
1155 continue;
1156 if (ifa->flags & (IFA_F_PERMANENT | IFA_F_NOPREFIXROUTE))
1157 return CLEANUP_PREFIX_RT_NOP;
1159 action = CLEANUP_PREFIX_RT_EXPIRE;
1161 spin_lock(&ifa->lock);
1163 lifetime = addrconf_timeout_fixup(ifa->valid_lft, HZ);
1165 * Note: Because this address is
1166 * not permanent, lifetime <
1167 * LONG_MAX / HZ here.
1169 if (time_before(*expires, ifa->tstamp + lifetime * HZ))
1170 *expires = ifa->tstamp + lifetime * HZ;
1171 spin_unlock(&ifa->lock);
1174 return action;
1177 static void
1178 cleanup_prefix_route(struct inet6_ifaddr *ifp, unsigned long expires,
1179 bool del_rt, bool del_peer)
1181 struct fib6_info *f6i;
1183 f6i = addrconf_get_prefix_route(del_peer ? &ifp->peer_addr : &ifp->addr,
1184 ifp->prefix_len,
1185 ifp->idev->dev,
1186 0, RTF_GATEWAY | RTF_DEFAULT);
1187 if (f6i) {
1188 if (del_rt)
1189 ip6_del_rt(dev_net(ifp->idev->dev), f6i);
1190 else {
1191 if (!(f6i->fib6_flags & RTF_EXPIRES))
1192 fib6_set_expires(f6i, expires);
1193 fib6_info_release(f6i);
1199 /* This function wants to get referenced ifp and releases it before return */
1201 static void ipv6_del_addr(struct inet6_ifaddr *ifp)
1203 int state;
1204 enum cleanup_prefix_rt_t action = CLEANUP_PREFIX_RT_NOP;
1205 unsigned long expires;
1207 ASSERT_RTNL();
1209 spin_lock_bh(&ifp->lock);
1210 state = ifp->state;
1211 ifp->state = INET6_IFADDR_STATE_DEAD;
1212 spin_unlock_bh(&ifp->lock);
1214 if (state == INET6_IFADDR_STATE_DEAD)
1215 goto out;
1217 spin_lock_bh(&addrconf_hash_lock);
1218 hlist_del_init_rcu(&ifp->addr_lst);
1219 spin_unlock_bh(&addrconf_hash_lock);
1221 write_lock_bh(&ifp->idev->lock);
1223 if (ifp->flags&IFA_F_TEMPORARY) {
1224 list_del(&ifp->tmp_list);
1225 if (ifp->ifpub) {
1226 in6_ifa_put(ifp->ifpub);
1227 ifp->ifpub = NULL;
1229 __in6_ifa_put(ifp);
1232 if (ifp->flags & IFA_F_PERMANENT && !(ifp->flags & IFA_F_NOPREFIXROUTE))
1233 action = check_cleanup_prefix_route(ifp, &expires);
1235 list_del_rcu(&ifp->if_list);
1236 __in6_ifa_put(ifp);
1238 write_unlock_bh(&ifp->idev->lock);
1240 addrconf_del_dad_work(ifp);
1242 ipv6_ifa_notify(RTM_DELADDR, ifp);
1244 inet6addr_notifier_call_chain(NETDEV_DOWN, ifp);
1246 if (action != CLEANUP_PREFIX_RT_NOP) {
1247 cleanup_prefix_route(ifp, expires,
1248 action == CLEANUP_PREFIX_RT_DEL, false);
1251 /* clean up prefsrc entries */
1252 rt6_remove_prefsrc(ifp);
1253 out:
1254 in6_ifa_put(ifp);
1257 static int ipv6_create_tempaddr(struct inet6_ifaddr *ifp,
1258 struct inet6_ifaddr *ift,
1259 bool block)
1261 struct inet6_dev *idev = ifp->idev;
1262 struct in6_addr addr, *tmpaddr;
1263 unsigned long tmp_tstamp, age;
1264 unsigned long regen_advance;
1265 struct ifa6_config cfg;
1266 int ret = 0;
1267 unsigned long now = jiffies;
1268 long max_desync_factor;
1269 s32 cnf_temp_preferred_lft;
1271 write_lock_bh(&idev->lock);
1272 if (ift) {
1273 spin_lock_bh(&ift->lock);
1274 memcpy(&addr.s6_addr[8], &ift->addr.s6_addr[8], 8);
1275 spin_unlock_bh(&ift->lock);
1276 tmpaddr = &addr;
1277 } else {
1278 tmpaddr = NULL;
1280 retry:
1281 in6_dev_hold(idev);
1282 if (idev->cnf.use_tempaddr <= 0) {
1283 write_unlock_bh(&idev->lock);
1284 pr_info("%s: use_tempaddr is disabled\n", __func__);
1285 in6_dev_put(idev);
1286 ret = -1;
1287 goto out;
1289 spin_lock_bh(&ifp->lock);
1290 if (ifp->regen_count++ >= idev->cnf.regen_max_retry) {
1291 idev->cnf.use_tempaddr = -1; /*XXX*/
1292 spin_unlock_bh(&ifp->lock);
1293 write_unlock_bh(&idev->lock);
1294 pr_warn("%s: regeneration time exceeded - disabled temporary address support\n",
1295 __func__);
1296 in6_dev_put(idev);
1297 ret = -1;
1298 goto out;
1300 in6_ifa_hold(ifp);
1301 memcpy(addr.s6_addr, ifp->addr.s6_addr, 8);
1302 ipv6_try_regen_rndid(idev, tmpaddr);
1303 memcpy(&addr.s6_addr[8], idev->rndid, 8);
1304 age = (now - ifp->tstamp) / HZ;
1306 regen_advance = idev->cnf.regen_max_retry *
1307 idev->cnf.dad_transmits *
1308 NEIGH_VAR(idev->nd_parms, RETRANS_TIME) / HZ;
1310 /* recalculate max_desync_factor each time and update
1311 * idev->desync_factor if it's larger
1313 cnf_temp_preferred_lft = READ_ONCE(idev->cnf.temp_prefered_lft);
1314 max_desync_factor = min_t(__u32,
1315 idev->cnf.max_desync_factor,
1316 cnf_temp_preferred_lft - regen_advance);
1318 if (unlikely(idev->desync_factor > max_desync_factor)) {
1319 if (max_desync_factor > 0) {
1320 get_random_bytes(&idev->desync_factor,
1321 sizeof(idev->desync_factor));
1322 idev->desync_factor %= max_desync_factor;
1323 } else {
1324 idev->desync_factor = 0;
1328 memset(&cfg, 0, sizeof(cfg));
1329 cfg.valid_lft = min_t(__u32, ifp->valid_lft,
1330 idev->cnf.temp_valid_lft + age);
1331 cfg.preferred_lft = cnf_temp_preferred_lft + age - idev->desync_factor;
1332 cfg.preferred_lft = min_t(__u32, ifp->prefered_lft, cfg.preferred_lft);
1334 cfg.plen = ifp->prefix_len;
1335 tmp_tstamp = ifp->tstamp;
1336 spin_unlock_bh(&ifp->lock);
1338 write_unlock_bh(&idev->lock);
1340 /* A temporary address is created only if this calculated Preferred
1341 * Lifetime is greater than REGEN_ADVANCE time units. In particular,
1342 * an implementation must not create a temporary address with a zero
1343 * Preferred Lifetime.
1344 * Use age calculation as in addrconf_verify to avoid unnecessary
1345 * temporary addresses being generated.
1347 age = (now - tmp_tstamp + ADDRCONF_TIMER_FUZZ_MINUS) / HZ;
1348 if (cfg.preferred_lft <= regen_advance + age) {
1349 in6_ifa_put(ifp);
1350 in6_dev_put(idev);
1351 ret = -1;
1352 goto out;
1355 cfg.ifa_flags = IFA_F_TEMPORARY;
1356 /* set in addrconf_prefix_rcv() */
1357 if (ifp->flags & IFA_F_OPTIMISTIC)
1358 cfg.ifa_flags |= IFA_F_OPTIMISTIC;
1360 cfg.pfx = &addr;
1361 cfg.scope = ipv6_addr_scope(cfg.pfx);
1363 ift = ipv6_add_addr(idev, &cfg, block, NULL);
1364 if (IS_ERR(ift)) {
1365 in6_ifa_put(ifp);
1366 in6_dev_put(idev);
1367 pr_info("%s: retry temporary address regeneration\n", __func__);
1368 tmpaddr = &addr;
1369 write_lock_bh(&idev->lock);
1370 goto retry;
1373 spin_lock_bh(&ift->lock);
1374 ift->ifpub = ifp;
1375 ift->cstamp = now;
1376 ift->tstamp = tmp_tstamp;
1377 spin_unlock_bh(&ift->lock);
1379 addrconf_dad_start(ift);
1380 in6_ifa_put(ift);
1381 in6_dev_put(idev);
1382 out:
1383 return ret;
1387 * Choose an appropriate source address (RFC3484)
1389 enum {
1390 IPV6_SADDR_RULE_INIT = 0,
1391 IPV6_SADDR_RULE_LOCAL,
1392 IPV6_SADDR_RULE_SCOPE,
1393 IPV6_SADDR_RULE_PREFERRED,
1394 #ifdef CONFIG_IPV6_MIP6
1395 IPV6_SADDR_RULE_HOA,
1396 #endif
1397 IPV6_SADDR_RULE_OIF,
1398 IPV6_SADDR_RULE_LABEL,
1399 IPV6_SADDR_RULE_PRIVACY,
1400 IPV6_SADDR_RULE_ORCHID,
1401 IPV6_SADDR_RULE_PREFIX,
1402 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
1403 IPV6_SADDR_RULE_NOT_OPTIMISTIC,
1404 #endif
1405 IPV6_SADDR_RULE_MAX
1408 struct ipv6_saddr_score {
1409 int rule;
1410 int addr_type;
1411 struct inet6_ifaddr *ifa;
1412 DECLARE_BITMAP(scorebits, IPV6_SADDR_RULE_MAX);
1413 int scopedist;
1414 int matchlen;
1417 struct ipv6_saddr_dst {
1418 const struct in6_addr *addr;
1419 int ifindex;
1420 int scope;
1421 int label;
1422 unsigned int prefs;
1425 static inline int ipv6_saddr_preferred(int type)
1427 if (type & (IPV6_ADDR_MAPPED|IPV6_ADDR_COMPATv4|IPV6_ADDR_LOOPBACK))
1428 return 1;
1429 return 0;
1432 static bool ipv6_use_optimistic_addr(struct net *net,
1433 struct inet6_dev *idev)
1435 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
1436 if (!idev)
1437 return false;
1438 if (!net->ipv6.devconf_all->optimistic_dad && !idev->cnf.optimistic_dad)
1439 return false;
1440 if (!net->ipv6.devconf_all->use_optimistic && !idev->cnf.use_optimistic)
1441 return false;
1443 return true;
1444 #else
1445 return false;
1446 #endif
1449 static bool ipv6_allow_optimistic_dad(struct net *net,
1450 struct inet6_dev *idev)
1452 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
1453 if (!idev)
1454 return false;
1455 if (!net->ipv6.devconf_all->optimistic_dad && !idev->cnf.optimistic_dad)
1456 return false;
1458 return true;
1459 #else
1460 return false;
1461 #endif
1464 static int ipv6_get_saddr_eval(struct net *net,
1465 struct ipv6_saddr_score *score,
1466 struct ipv6_saddr_dst *dst,
1467 int i)
1469 int ret;
1471 if (i <= score->rule) {
1472 switch (i) {
1473 case IPV6_SADDR_RULE_SCOPE:
1474 ret = score->scopedist;
1475 break;
1476 case IPV6_SADDR_RULE_PREFIX:
1477 ret = score->matchlen;
1478 break;
1479 default:
1480 ret = !!test_bit(i, score->scorebits);
1482 goto out;
1485 switch (i) {
1486 case IPV6_SADDR_RULE_INIT:
1487 /* Rule 0: remember if hiscore is not ready yet */
1488 ret = !!score->ifa;
1489 break;
1490 case IPV6_SADDR_RULE_LOCAL:
1491 /* Rule 1: Prefer same address */
1492 ret = ipv6_addr_equal(&score->ifa->addr, dst->addr);
1493 break;
1494 case IPV6_SADDR_RULE_SCOPE:
1495 /* Rule 2: Prefer appropriate scope
1497 * ret
1499 * -1 | d 15
1500 * ---+--+-+---> scope
1502 * | d is scope of the destination.
1503 * B-d | \
1504 * | \ <- smaller scope is better if
1505 * B-15 | \ if scope is enough for destination.
1506 * | ret = B - scope (-1 <= scope >= d <= 15).
1507 * d-C-1 | /
1508 * |/ <- greater is better
1509 * -C / if scope is not enough for destination.
1510 * /| ret = scope - C (-1 <= d < scope <= 15).
1512 * d - C - 1 < B -15 (for all -1 <= d <= 15).
1513 * C > d + 14 - B >= 15 + 14 - B = 29 - B.
1514 * Assume B = 0 and we get C > 29.
1516 ret = __ipv6_addr_src_scope(score->addr_type);
1517 if (ret >= dst->scope)
1518 ret = -ret;
1519 else
1520 ret -= 128; /* 30 is enough */
1521 score->scopedist = ret;
1522 break;
1523 case IPV6_SADDR_RULE_PREFERRED:
1525 /* Rule 3: Avoid deprecated and optimistic addresses */
1526 u8 avoid = IFA_F_DEPRECATED;
1528 if (!ipv6_use_optimistic_addr(net, score->ifa->idev))
1529 avoid |= IFA_F_OPTIMISTIC;
1530 ret = ipv6_saddr_preferred(score->addr_type) ||
1531 !(score->ifa->flags & avoid);
1532 break;
1534 #ifdef CONFIG_IPV6_MIP6
1535 case IPV6_SADDR_RULE_HOA:
1537 /* Rule 4: Prefer home address */
1538 int prefhome = !(dst->prefs & IPV6_PREFER_SRC_COA);
1539 ret = !(score->ifa->flags & IFA_F_HOMEADDRESS) ^ prefhome;
1540 break;
1542 #endif
1543 case IPV6_SADDR_RULE_OIF:
1544 /* Rule 5: Prefer outgoing interface */
1545 ret = (!dst->ifindex ||
1546 dst->ifindex == score->ifa->idev->dev->ifindex);
1547 break;
1548 case IPV6_SADDR_RULE_LABEL:
1549 /* Rule 6: Prefer matching label */
1550 ret = ipv6_addr_label(net,
1551 &score->ifa->addr, score->addr_type,
1552 score->ifa->idev->dev->ifindex) == dst->label;
1553 break;
1554 case IPV6_SADDR_RULE_PRIVACY:
1556 /* Rule 7: Prefer public address
1557 * Note: prefer temporary address if use_tempaddr >= 2
1559 int preftmp = dst->prefs & (IPV6_PREFER_SRC_PUBLIC|IPV6_PREFER_SRC_TMP) ?
1560 !!(dst->prefs & IPV6_PREFER_SRC_TMP) :
1561 score->ifa->idev->cnf.use_tempaddr >= 2;
1562 ret = (!(score->ifa->flags & IFA_F_TEMPORARY)) ^ preftmp;
1563 break;
1565 case IPV6_SADDR_RULE_ORCHID:
1566 /* Rule 8-: Prefer ORCHID vs ORCHID or
1567 * non-ORCHID vs non-ORCHID
1569 ret = !(ipv6_addr_orchid(&score->ifa->addr) ^
1570 ipv6_addr_orchid(dst->addr));
1571 break;
1572 case IPV6_SADDR_RULE_PREFIX:
1573 /* Rule 8: Use longest matching prefix */
1574 ret = ipv6_addr_diff(&score->ifa->addr, dst->addr);
1575 if (ret > score->ifa->prefix_len)
1576 ret = score->ifa->prefix_len;
1577 score->matchlen = ret;
1578 break;
1579 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
1580 case IPV6_SADDR_RULE_NOT_OPTIMISTIC:
1581 /* Optimistic addresses still have lower precedence than other
1582 * preferred addresses.
1584 ret = !(score->ifa->flags & IFA_F_OPTIMISTIC);
1585 break;
1586 #endif
1587 default:
1588 ret = 0;
1591 if (ret)
1592 __set_bit(i, score->scorebits);
1593 score->rule = i;
1594 out:
1595 return ret;
1598 static int __ipv6_dev_get_saddr(struct net *net,
1599 struct ipv6_saddr_dst *dst,
1600 struct inet6_dev *idev,
1601 struct ipv6_saddr_score *scores,
1602 int hiscore_idx)
1604 struct ipv6_saddr_score *score = &scores[1 - hiscore_idx], *hiscore = &scores[hiscore_idx];
1606 list_for_each_entry_rcu(score->ifa, &idev->addr_list, if_list) {
1607 int i;
1610 * - Tentative Address (RFC2462 section 5.4)
1611 * - A tentative address is not considered
1612 * "assigned to an interface" in the traditional
1613 * sense, unless it is also flagged as optimistic.
1614 * - Candidate Source Address (section 4)
1615 * - In any case, anycast addresses, multicast
1616 * addresses, and the unspecified address MUST
1617 * NOT be included in a candidate set.
1619 if ((score->ifa->flags & IFA_F_TENTATIVE) &&
1620 (!(score->ifa->flags & IFA_F_OPTIMISTIC)))
1621 continue;
1623 score->addr_type = __ipv6_addr_type(&score->ifa->addr);
1625 if (unlikely(score->addr_type == IPV6_ADDR_ANY ||
1626 score->addr_type & IPV6_ADDR_MULTICAST)) {
1627 net_dbg_ratelimited("ADDRCONF: unspecified / multicast address assigned as unicast address on %s",
1628 idev->dev->name);
1629 continue;
1632 score->rule = -1;
1633 bitmap_zero(score->scorebits, IPV6_SADDR_RULE_MAX);
1635 for (i = 0; i < IPV6_SADDR_RULE_MAX; i++) {
1636 int minihiscore, miniscore;
1638 minihiscore = ipv6_get_saddr_eval(net, hiscore, dst, i);
1639 miniscore = ipv6_get_saddr_eval(net, score, dst, i);
1641 if (minihiscore > miniscore) {
1642 if (i == IPV6_SADDR_RULE_SCOPE &&
1643 score->scopedist > 0) {
1645 * special case:
1646 * each remaining entry
1647 * has too small (not enough)
1648 * scope, because ifa entries
1649 * are sorted by their scope
1650 * values.
1652 goto out;
1654 break;
1655 } else if (minihiscore < miniscore) {
1656 swap(hiscore, score);
1657 hiscore_idx = 1 - hiscore_idx;
1659 /* restore our iterator */
1660 score->ifa = hiscore->ifa;
1662 break;
1666 out:
1667 return hiscore_idx;
1670 static int ipv6_get_saddr_master(struct net *net,
1671 const struct net_device *dst_dev,
1672 const struct net_device *master,
1673 struct ipv6_saddr_dst *dst,
1674 struct ipv6_saddr_score *scores,
1675 int hiscore_idx)
1677 struct inet6_dev *idev;
1679 idev = __in6_dev_get(dst_dev);
1680 if (idev)
1681 hiscore_idx = __ipv6_dev_get_saddr(net, dst, idev,
1682 scores, hiscore_idx);
1684 idev = __in6_dev_get(master);
1685 if (idev)
1686 hiscore_idx = __ipv6_dev_get_saddr(net, dst, idev,
1687 scores, hiscore_idx);
1689 return hiscore_idx;
1692 int ipv6_dev_get_saddr(struct net *net, const struct net_device *dst_dev,
1693 const struct in6_addr *daddr, unsigned int prefs,
1694 struct in6_addr *saddr)
1696 struct ipv6_saddr_score scores[2], *hiscore;
1697 struct ipv6_saddr_dst dst;
1698 struct inet6_dev *idev;
1699 struct net_device *dev;
1700 int dst_type;
1701 bool use_oif_addr = false;
1702 int hiscore_idx = 0;
1703 int ret = 0;
1705 dst_type = __ipv6_addr_type(daddr);
1706 dst.addr = daddr;
1707 dst.ifindex = dst_dev ? dst_dev->ifindex : 0;
1708 dst.scope = __ipv6_addr_src_scope(dst_type);
1709 dst.label = ipv6_addr_label(net, daddr, dst_type, dst.ifindex);
1710 dst.prefs = prefs;
1712 scores[hiscore_idx].rule = -1;
1713 scores[hiscore_idx].ifa = NULL;
1715 rcu_read_lock();
1717 /* Candidate Source Address (section 4)
1718 * - multicast and link-local destination address,
1719 * the set of candidate source address MUST only
1720 * include addresses assigned to interfaces
1721 * belonging to the same link as the outgoing
1722 * interface.
1723 * (- For site-local destination addresses, the
1724 * set of candidate source addresses MUST only
1725 * include addresses assigned to interfaces
1726 * belonging to the same site as the outgoing
1727 * interface.)
1728 * - "It is RECOMMENDED that the candidate source addresses
1729 * be the set of unicast addresses assigned to the
1730 * interface that will be used to send to the destination
1731 * (the 'outgoing' interface)." (RFC 6724)
1733 if (dst_dev) {
1734 idev = __in6_dev_get(dst_dev);
1735 if ((dst_type & IPV6_ADDR_MULTICAST) ||
1736 dst.scope <= IPV6_ADDR_SCOPE_LINKLOCAL ||
1737 (idev && idev->cnf.use_oif_addrs_only)) {
1738 use_oif_addr = true;
1742 if (use_oif_addr) {
1743 if (idev)
1744 hiscore_idx = __ipv6_dev_get_saddr(net, &dst, idev, scores, hiscore_idx);
1745 } else {
1746 const struct net_device *master;
1747 int master_idx = 0;
1749 /* if dst_dev exists and is enslaved to an L3 device, then
1750 * prefer addresses from dst_dev and then the master over
1751 * any other enslaved devices in the L3 domain.
1753 master = l3mdev_master_dev_rcu(dst_dev);
1754 if (master) {
1755 master_idx = master->ifindex;
1757 hiscore_idx = ipv6_get_saddr_master(net, dst_dev,
1758 master, &dst,
1759 scores, hiscore_idx);
1761 if (scores[hiscore_idx].ifa)
1762 goto out;
1765 for_each_netdev_rcu(net, dev) {
1766 /* only consider addresses on devices in the
1767 * same L3 domain
1769 if (l3mdev_master_ifindex_rcu(dev) != master_idx)
1770 continue;
1771 idev = __in6_dev_get(dev);
1772 if (!idev)
1773 continue;
1774 hiscore_idx = __ipv6_dev_get_saddr(net, &dst, idev, scores, hiscore_idx);
1778 out:
1779 hiscore = &scores[hiscore_idx];
1780 if (!hiscore->ifa)
1781 ret = -EADDRNOTAVAIL;
1782 else
1783 *saddr = hiscore->ifa->addr;
1785 rcu_read_unlock();
1786 return ret;
1788 EXPORT_SYMBOL(ipv6_dev_get_saddr);
1790 int __ipv6_get_lladdr(struct inet6_dev *idev, struct in6_addr *addr,
1791 u32 banned_flags)
1793 struct inet6_ifaddr *ifp;
1794 int err = -EADDRNOTAVAIL;
1796 list_for_each_entry_reverse(ifp, &idev->addr_list, if_list) {
1797 if (ifp->scope > IFA_LINK)
1798 break;
1799 if (ifp->scope == IFA_LINK &&
1800 !(ifp->flags & banned_flags)) {
1801 *addr = ifp->addr;
1802 err = 0;
1803 break;
1806 return err;
1809 int ipv6_get_lladdr(struct net_device *dev, struct in6_addr *addr,
1810 u32 banned_flags)
1812 struct inet6_dev *idev;
1813 int err = -EADDRNOTAVAIL;
1815 rcu_read_lock();
1816 idev = __in6_dev_get(dev);
1817 if (idev) {
1818 read_lock_bh(&idev->lock);
1819 err = __ipv6_get_lladdr(idev, addr, banned_flags);
1820 read_unlock_bh(&idev->lock);
1822 rcu_read_unlock();
1823 return err;
1826 static int ipv6_count_addresses(const struct inet6_dev *idev)
1828 const struct inet6_ifaddr *ifp;
1829 int cnt = 0;
1831 rcu_read_lock();
1832 list_for_each_entry_rcu(ifp, &idev->addr_list, if_list)
1833 cnt++;
1834 rcu_read_unlock();
1835 return cnt;
1838 int ipv6_chk_addr(struct net *net, const struct in6_addr *addr,
1839 const struct net_device *dev, int strict)
1841 return ipv6_chk_addr_and_flags(net, addr, dev, !dev,
1842 strict, IFA_F_TENTATIVE);
1844 EXPORT_SYMBOL(ipv6_chk_addr);
1846 /* device argument is used to find the L3 domain of interest. If
1847 * skip_dev_check is set, then the ifp device is not checked against
1848 * the passed in dev argument. So the 2 cases for addresses checks are:
1849 * 1. does the address exist in the L3 domain that dev is part of
1850 * (skip_dev_check = true), or
1852 * 2. does the address exist on the specific device
1853 * (skip_dev_check = false)
1855 int ipv6_chk_addr_and_flags(struct net *net, const struct in6_addr *addr,
1856 const struct net_device *dev, bool skip_dev_check,
1857 int strict, u32 banned_flags)
1859 unsigned int hash = inet6_addr_hash(net, addr);
1860 const struct net_device *l3mdev;
1861 struct inet6_ifaddr *ifp;
1862 u32 ifp_flags;
1864 rcu_read_lock();
1866 l3mdev = l3mdev_master_dev_rcu(dev);
1867 if (skip_dev_check)
1868 dev = NULL;
1870 hlist_for_each_entry_rcu(ifp, &inet6_addr_lst[hash], addr_lst) {
1871 if (!net_eq(dev_net(ifp->idev->dev), net))
1872 continue;
1874 if (l3mdev_master_dev_rcu(ifp->idev->dev) != l3mdev)
1875 continue;
1877 /* Decouple optimistic from tentative for evaluation here.
1878 * Ban optimistic addresses explicitly, when required.
1880 ifp_flags = (ifp->flags&IFA_F_OPTIMISTIC)
1881 ? (ifp->flags&~IFA_F_TENTATIVE)
1882 : ifp->flags;
1883 if (ipv6_addr_equal(&ifp->addr, addr) &&
1884 !(ifp_flags&banned_flags) &&
1885 (!dev || ifp->idev->dev == dev ||
1886 !(ifp->scope&(IFA_LINK|IFA_HOST) || strict))) {
1887 rcu_read_unlock();
1888 return 1;
1892 rcu_read_unlock();
1893 return 0;
1895 EXPORT_SYMBOL(ipv6_chk_addr_and_flags);
1898 /* Compares an address/prefix_len with addresses on device @dev.
1899 * If one is found it returns true.
1901 bool ipv6_chk_custom_prefix(const struct in6_addr *addr,
1902 const unsigned int prefix_len, struct net_device *dev)
1904 const struct inet6_ifaddr *ifa;
1905 const struct inet6_dev *idev;
1906 bool ret = false;
1908 rcu_read_lock();
1909 idev = __in6_dev_get(dev);
1910 if (idev) {
1911 list_for_each_entry_rcu(ifa, &idev->addr_list, if_list) {
1912 ret = ipv6_prefix_equal(addr, &ifa->addr, prefix_len);
1913 if (ret)
1914 break;
1917 rcu_read_unlock();
1919 return ret;
1921 EXPORT_SYMBOL(ipv6_chk_custom_prefix);
1923 int ipv6_chk_prefix(const struct in6_addr *addr, struct net_device *dev)
1925 const struct inet6_ifaddr *ifa;
1926 const struct inet6_dev *idev;
1927 int onlink;
1929 onlink = 0;
1930 rcu_read_lock();
1931 idev = __in6_dev_get(dev);
1932 if (idev) {
1933 list_for_each_entry_rcu(ifa, &idev->addr_list, if_list) {
1934 onlink = ipv6_prefix_equal(addr, &ifa->addr,
1935 ifa->prefix_len);
1936 if (onlink)
1937 break;
1940 rcu_read_unlock();
1941 return onlink;
1943 EXPORT_SYMBOL(ipv6_chk_prefix);
1945 struct inet6_ifaddr *ipv6_get_ifaddr(struct net *net, const struct in6_addr *addr,
1946 struct net_device *dev, int strict)
1948 unsigned int hash = inet6_addr_hash(net, addr);
1949 struct inet6_ifaddr *ifp, *result = NULL;
1951 rcu_read_lock();
1952 hlist_for_each_entry_rcu(ifp, &inet6_addr_lst[hash], addr_lst) {
1953 if (!net_eq(dev_net(ifp->idev->dev), net))
1954 continue;
1955 if (ipv6_addr_equal(&ifp->addr, addr)) {
1956 if (!dev || ifp->idev->dev == dev ||
1957 !(ifp->scope&(IFA_LINK|IFA_HOST) || strict)) {
1958 result = ifp;
1959 in6_ifa_hold(ifp);
1960 break;
1964 rcu_read_unlock();
1966 return result;
1969 /* Gets referenced address, destroys ifaddr */
1971 static void addrconf_dad_stop(struct inet6_ifaddr *ifp, int dad_failed)
1973 if (dad_failed)
1974 ifp->flags |= IFA_F_DADFAILED;
1976 if (ifp->flags&IFA_F_TEMPORARY) {
1977 struct inet6_ifaddr *ifpub;
1978 spin_lock_bh(&ifp->lock);
1979 ifpub = ifp->ifpub;
1980 if (ifpub) {
1981 in6_ifa_hold(ifpub);
1982 spin_unlock_bh(&ifp->lock);
1983 ipv6_create_tempaddr(ifpub, ifp, true);
1984 in6_ifa_put(ifpub);
1985 } else {
1986 spin_unlock_bh(&ifp->lock);
1988 ipv6_del_addr(ifp);
1989 } else if (ifp->flags&IFA_F_PERMANENT || !dad_failed) {
1990 spin_lock_bh(&ifp->lock);
1991 addrconf_del_dad_work(ifp);
1992 ifp->flags |= IFA_F_TENTATIVE;
1993 if (dad_failed)
1994 ifp->flags &= ~IFA_F_OPTIMISTIC;
1995 spin_unlock_bh(&ifp->lock);
1996 if (dad_failed)
1997 ipv6_ifa_notify(0, ifp);
1998 in6_ifa_put(ifp);
1999 } else {
2000 ipv6_del_addr(ifp);
2004 static int addrconf_dad_end(struct inet6_ifaddr *ifp)
2006 int err = -ENOENT;
2008 spin_lock_bh(&ifp->lock);
2009 if (ifp->state == INET6_IFADDR_STATE_DAD) {
2010 ifp->state = INET6_IFADDR_STATE_POSTDAD;
2011 err = 0;
2013 spin_unlock_bh(&ifp->lock);
2015 return err;
2018 void addrconf_dad_failure(struct sk_buff *skb, struct inet6_ifaddr *ifp)
2020 struct inet6_dev *idev = ifp->idev;
2021 struct net *net = dev_net(ifp->idev->dev);
2023 if (addrconf_dad_end(ifp)) {
2024 in6_ifa_put(ifp);
2025 return;
2028 net_info_ratelimited("%s: IPv6 duplicate address %pI6c used by %pM detected!\n",
2029 ifp->idev->dev->name, &ifp->addr, eth_hdr(skb)->h_source);
2031 spin_lock_bh(&ifp->lock);
2033 if (ifp->flags & IFA_F_STABLE_PRIVACY) {
2034 struct in6_addr new_addr;
2035 struct inet6_ifaddr *ifp2;
2036 int retries = ifp->stable_privacy_retry + 1;
2037 struct ifa6_config cfg = {
2038 .pfx = &new_addr,
2039 .plen = ifp->prefix_len,
2040 .ifa_flags = ifp->flags,
2041 .valid_lft = ifp->valid_lft,
2042 .preferred_lft = ifp->prefered_lft,
2043 .scope = ifp->scope,
2046 if (retries > net->ipv6.sysctl.idgen_retries) {
2047 net_info_ratelimited("%s: privacy stable address generation failed because of DAD conflicts!\n",
2048 ifp->idev->dev->name);
2049 goto errdad;
2052 new_addr = ifp->addr;
2053 if (ipv6_generate_stable_address(&new_addr, retries,
2054 idev))
2055 goto errdad;
2057 spin_unlock_bh(&ifp->lock);
2059 if (idev->cnf.max_addresses &&
2060 ipv6_count_addresses(idev) >=
2061 idev->cnf.max_addresses)
2062 goto lock_errdad;
2064 net_info_ratelimited("%s: generating new stable privacy address because of DAD conflict\n",
2065 ifp->idev->dev->name);
2067 ifp2 = ipv6_add_addr(idev, &cfg, false, NULL);
2068 if (IS_ERR(ifp2))
2069 goto lock_errdad;
2071 spin_lock_bh(&ifp2->lock);
2072 ifp2->stable_privacy_retry = retries;
2073 ifp2->state = INET6_IFADDR_STATE_PREDAD;
2074 spin_unlock_bh(&ifp2->lock);
2076 addrconf_mod_dad_work(ifp2, net->ipv6.sysctl.idgen_delay);
2077 in6_ifa_put(ifp2);
2078 lock_errdad:
2079 spin_lock_bh(&ifp->lock);
2082 errdad:
2083 /* transition from _POSTDAD to _ERRDAD */
2084 ifp->state = INET6_IFADDR_STATE_ERRDAD;
2085 spin_unlock_bh(&ifp->lock);
2087 addrconf_mod_dad_work(ifp, 0);
2088 in6_ifa_put(ifp);
2091 /* Join to solicited addr multicast group.
2092 * caller must hold RTNL */
2093 void addrconf_join_solict(struct net_device *dev, const struct in6_addr *addr)
2095 struct in6_addr maddr;
2097 if (dev->flags&(IFF_LOOPBACK|IFF_NOARP))
2098 return;
2100 addrconf_addr_solict_mult(addr, &maddr);
2101 ipv6_dev_mc_inc(dev, &maddr);
2104 /* caller must hold RTNL */
2105 void addrconf_leave_solict(struct inet6_dev *idev, const struct in6_addr *addr)
2107 struct in6_addr maddr;
2109 if (idev->dev->flags&(IFF_LOOPBACK|IFF_NOARP))
2110 return;
2112 addrconf_addr_solict_mult(addr, &maddr);
2113 __ipv6_dev_mc_dec(idev, &maddr);
2116 /* caller must hold RTNL */
2117 static void addrconf_join_anycast(struct inet6_ifaddr *ifp)
2119 struct in6_addr addr;
2121 if (ifp->prefix_len >= 127) /* RFC 6164 */
2122 return;
2123 ipv6_addr_prefix(&addr, &ifp->addr, ifp->prefix_len);
2124 if (ipv6_addr_any(&addr))
2125 return;
2126 __ipv6_dev_ac_inc(ifp->idev, &addr);
2129 /* caller must hold RTNL */
2130 static void addrconf_leave_anycast(struct inet6_ifaddr *ifp)
2132 struct in6_addr addr;
2134 if (ifp->prefix_len >= 127) /* RFC 6164 */
2135 return;
2136 ipv6_addr_prefix(&addr, &ifp->addr, ifp->prefix_len);
2137 if (ipv6_addr_any(&addr))
2138 return;
2139 __ipv6_dev_ac_dec(ifp->idev, &addr);
2142 static int addrconf_ifid_6lowpan(u8 *eui, struct net_device *dev)
2144 switch (dev->addr_len) {
2145 case ETH_ALEN:
2146 memcpy(eui, dev->dev_addr, 3);
2147 eui[3] = 0xFF;
2148 eui[4] = 0xFE;
2149 memcpy(eui + 5, dev->dev_addr + 3, 3);
2150 break;
2151 case EUI64_ADDR_LEN:
2152 memcpy(eui, dev->dev_addr, EUI64_ADDR_LEN);
2153 eui[0] ^= 2;
2154 break;
2155 default:
2156 return -1;
2159 return 0;
2162 static int addrconf_ifid_ieee1394(u8 *eui, struct net_device *dev)
2164 union fwnet_hwaddr *ha;
2166 if (dev->addr_len != FWNET_ALEN)
2167 return -1;
2169 ha = (union fwnet_hwaddr *)dev->dev_addr;
2171 memcpy(eui, &ha->uc.uniq_id, sizeof(ha->uc.uniq_id));
2172 eui[0] ^= 2;
2173 return 0;
2176 static int addrconf_ifid_arcnet(u8 *eui, struct net_device *dev)
2178 /* XXX: inherit EUI-64 from other interface -- yoshfuji */
2179 if (dev->addr_len != ARCNET_ALEN)
2180 return -1;
2181 memset(eui, 0, 7);
2182 eui[7] = *(u8 *)dev->dev_addr;
2183 return 0;
2186 static int addrconf_ifid_infiniband(u8 *eui, struct net_device *dev)
2188 if (dev->addr_len != INFINIBAND_ALEN)
2189 return -1;
2190 memcpy(eui, dev->dev_addr + 12, 8);
2191 eui[0] |= 2;
2192 return 0;
2195 static int __ipv6_isatap_ifid(u8 *eui, __be32 addr)
2197 if (addr == 0)
2198 return -1;
2199 eui[0] = (ipv4_is_zeronet(addr) || ipv4_is_private_10(addr) ||
2200 ipv4_is_loopback(addr) || ipv4_is_linklocal_169(addr) ||
2201 ipv4_is_private_172(addr) || ipv4_is_test_192(addr) ||
2202 ipv4_is_anycast_6to4(addr) || ipv4_is_private_192(addr) ||
2203 ipv4_is_test_198(addr) || ipv4_is_multicast(addr) ||
2204 ipv4_is_lbcast(addr)) ? 0x00 : 0x02;
2205 eui[1] = 0;
2206 eui[2] = 0x5E;
2207 eui[3] = 0xFE;
2208 memcpy(eui + 4, &addr, 4);
2209 return 0;
2212 static int addrconf_ifid_sit(u8 *eui, struct net_device *dev)
2214 if (dev->priv_flags & IFF_ISATAP)
2215 return __ipv6_isatap_ifid(eui, *(__be32 *)dev->dev_addr);
2216 return -1;
2219 static int addrconf_ifid_gre(u8 *eui, struct net_device *dev)
2221 return __ipv6_isatap_ifid(eui, *(__be32 *)dev->dev_addr);
2224 static int addrconf_ifid_ip6tnl(u8 *eui, struct net_device *dev)
2226 memcpy(eui, dev->perm_addr, 3);
2227 memcpy(eui + 5, dev->perm_addr + 3, 3);
2228 eui[3] = 0xFF;
2229 eui[4] = 0xFE;
2230 eui[0] ^= 2;
2231 return 0;
2234 static int ipv6_generate_eui64(u8 *eui, struct net_device *dev)
2236 switch (dev->type) {
2237 case ARPHRD_ETHER:
2238 case ARPHRD_FDDI:
2239 return addrconf_ifid_eui48(eui, dev);
2240 case ARPHRD_ARCNET:
2241 return addrconf_ifid_arcnet(eui, dev);
2242 case ARPHRD_INFINIBAND:
2243 return addrconf_ifid_infiniband(eui, dev);
2244 case ARPHRD_SIT:
2245 return addrconf_ifid_sit(eui, dev);
2246 case ARPHRD_IPGRE:
2247 case ARPHRD_TUNNEL:
2248 return addrconf_ifid_gre(eui, dev);
2249 case ARPHRD_6LOWPAN:
2250 return addrconf_ifid_6lowpan(eui, dev);
2251 case ARPHRD_IEEE1394:
2252 return addrconf_ifid_ieee1394(eui, dev);
2253 case ARPHRD_TUNNEL6:
2254 case ARPHRD_IP6GRE:
2255 case ARPHRD_RAWIP:
2256 return addrconf_ifid_ip6tnl(eui, dev);
2258 return -1;
2261 static int ipv6_inherit_eui64(u8 *eui, struct inet6_dev *idev)
2263 int err = -1;
2264 struct inet6_ifaddr *ifp;
2266 read_lock_bh(&idev->lock);
2267 list_for_each_entry_reverse(ifp, &idev->addr_list, if_list) {
2268 if (ifp->scope > IFA_LINK)
2269 break;
2270 if (ifp->scope == IFA_LINK && !(ifp->flags&IFA_F_TENTATIVE)) {
2271 memcpy(eui, ifp->addr.s6_addr+8, 8);
2272 err = 0;
2273 break;
2276 read_unlock_bh(&idev->lock);
2277 return err;
2280 /* (re)generation of randomized interface identifier (RFC 3041 3.2, 3.5) */
2281 static void ipv6_regen_rndid(struct inet6_dev *idev)
2283 regen:
2284 get_random_bytes(idev->rndid, sizeof(idev->rndid));
2285 idev->rndid[0] &= ~0x02;
2288 * <draft-ietf-ipngwg-temp-addresses-v2-00.txt>:
2289 * check if generated address is not inappropriate
2291 * - Reserved subnet anycast (RFC 2526)
2292 * 11111101 11....11 1xxxxxxx
2293 * - ISATAP (RFC4214) 6.1
2294 * 00-00-5E-FE-xx-xx-xx-xx
2295 * - value 0
2296 * - XXX: already assigned to an address on the device
2298 if (idev->rndid[0] == 0xfd &&
2299 (idev->rndid[1]&idev->rndid[2]&idev->rndid[3]&idev->rndid[4]&idev->rndid[5]&idev->rndid[6]) == 0xff &&
2300 (idev->rndid[7]&0x80))
2301 goto regen;
2302 if ((idev->rndid[0]|idev->rndid[1]) == 0) {
2303 if (idev->rndid[2] == 0x5e && idev->rndid[3] == 0xfe)
2304 goto regen;
2305 if ((idev->rndid[2]|idev->rndid[3]|idev->rndid[4]|idev->rndid[5]|idev->rndid[6]|idev->rndid[7]) == 0x00)
2306 goto regen;
2310 static void ipv6_try_regen_rndid(struct inet6_dev *idev, struct in6_addr *tmpaddr)
2312 if (tmpaddr && memcmp(idev->rndid, &tmpaddr->s6_addr[8], 8) == 0)
2313 ipv6_regen_rndid(idev);
2317 * Add prefix route.
2320 static void
2321 addrconf_prefix_route(struct in6_addr *pfx, int plen, u32 metric,
2322 struct net_device *dev, unsigned long expires,
2323 u32 flags, gfp_t gfp_flags)
2325 struct fib6_config cfg = {
2326 .fc_table = l3mdev_fib_table(dev) ? : RT6_TABLE_PREFIX,
2327 .fc_metric = metric ? : IP6_RT_PRIO_ADDRCONF,
2328 .fc_ifindex = dev->ifindex,
2329 .fc_expires = expires,
2330 .fc_dst_len = plen,
2331 .fc_flags = RTF_UP | flags,
2332 .fc_nlinfo.nl_net = dev_net(dev),
2333 .fc_protocol = RTPROT_KERNEL,
2334 .fc_type = RTN_UNICAST,
2337 cfg.fc_dst = *pfx;
2339 /* Prevent useless cloning on PtP SIT.
2340 This thing is done here expecting that the whole
2341 class of non-broadcast devices need not cloning.
2343 #if IS_ENABLED(CONFIG_IPV6_SIT)
2344 if (dev->type == ARPHRD_SIT && (dev->flags & IFF_POINTOPOINT))
2345 cfg.fc_flags |= RTF_NONEXTHOP;
2346 #endif
2348 ip6_route_add(&cfg, gfp_flags, NULL);
2352 static struct fib6_info *addrconf_get_prefix_route(const struct in6_addr *pfx,
2353 int plen,
2354 const struct net_device *dev,
2355 u32 flags, u32 noflags)
2357 struct fib6_node *fn;
2358 struct fib6_info *rt = NULL;
2359 struct fib6_table *table;
2360 u32 tb_id = l3mdev_fib_table(dev) ? : RT6_TABLE_PREFIX;
2362 table = fib6_get_table(dev_net(dev), tb_id);
2363 if (!table)
2364 return NULL;
2366 rcu_read_lock();
2367 fn = fib6_locate(&table->tb6_root, pfx, plen, NULL, 0, true);
2368 if (!fn)
2369 goto out;
2371 for_each_fib6_node_rt_rcu(fn) {
2372 if (rt->fib6_nh.nh_dev->ifindex != dev->ifindex)
2373 continue;
2374 if ((rt->fib6_flags & flags) != flags)
2375 continue;
2376 if ((rt->fib6_flags & noflags) != 0)
2377 continue;
2378 if (!fib6_info_hold_safe(rt))
2379 continue;
2380 break;
2382 out:
2383 rcu_read_unlock();
2384 return rt;
2388 /* Create "default" multicast route to the interface */
2390 static void addrconf_add_mroute(struct net_device *dev)
2392 struct fib6_config cfg = {
2393 .fc_table = l3mdev_fib_table(dev) ? : RT6_TABLE_LOCAL,
2394 .fc_metric = IP6_RT_PRIO_ADDRCONF,
2395 .fc_ifindex = dev->ifindex,
2396 .fc_dst_len = 8,
2397 .fc_flags = RTF_UP,
2398 .fc_type = RTN_UNICAST,
2399 .fc_nlinfo.nl_net = dev_net(dev),
2402 ipv6_addr_set(&cfg.fc_dst, htonl(0xFF000000), 0, 0, 0);
2404 ip6_route_add(&cfg, GFP_KERNEL, NULL);
2407 static struct inet6_dev *addrconf_add_dev(struct net_device *dev)
2409 struct inet6_dev *idev;
2411 ASSERT_RTNL();
2413 idev = ipv6_find_idev(dev);
2414 if (!idev)
2415 return ERR_PTR(-ENOBUFS);
2417 if (idev->cnf.disable_ipv6)
2418 return ERR_PTR(-EACCES);
2420 /* Add default multicast route */
2421 if (!(dev->flags & IFF_LOOPBACK) && !netif_is_l3_master(dev))
2422 addrconf_add_mroute(dev);
2424 return idev;
2427 static void manage_tempaddrs(struct inet6_dev *idev,
2428 struct inet6_ifaddr *ifp,
2429 __u32 valid_lft, __u32 prefered_lft,
2430 bool create, unsigned long now)
2432 u32 flags;
2433 struct inet6_ifaddr *ift;
2435 read_lock_bh(&idev->lock);
2436 /* update all temporary addresses in the list */
2437 list_for_each_entry(ift, &idev->tempaddr_list, tmp_list) {
2438 int age, max_valid, max_prefered;
2440 if (ifp != ift->ifpub)
2441 continue;
2443 /* RFC 4941 section 3.3:
2444 * If a received option will extend the lifetime of a public
2445 * address, the lifetimes of temporary addresses should
2446 * be extended, subject to the overall constraint that no
2447 * temporary addresses should ever remain "valid" or "preferred"
2448 * for a time longer than (TEMP_VALID_LIFETIME) or
2449 * (TEMP_PREFERRED_LIFETIME - DESYNC_FACTOR), respectively.
2451 age = (now - ift->cstamp) / HZ;
2452 max_valid = idev->cnf.temp_valid_lft - age;
2453 if (max_valid < 0)
2454 max_valid = 0;
2456 max_prefered = idev->cnf.temp_prefered_lft -
2457 idev->desync_factor - age;
2458 if (max_prefered < 0)
2459 max_prefered = 0;
2461 if (valid_lft > max_valid)
2462 valid_lft = max_valid;
2464 if (prefered_lft > max_prefered)
2465 prefered_lft = max_prefered;
2467 spin_lock(&ift->lock);
2468 flags = ift->flags;
2469 ift->valid_lft = valid_lft;
2470 ift->prefered_lft = prefered_lft;
2471 ift->tstamp = now;
2472 if (prefered_lft > 0)
2473 ift->flags &= ~IFA_F_DEPRECATED;
2475 spin_unlock(&ift->lock);
2476 if (!(flags&IFA_F_TENTATIVE))
2477 ipv6_ifa_notify(0, ift);
2480 if ((create || list_empty(&idev->tempaddr_list)) &&
2481 idev->cnf.use_tempaddr > 0) {
2482 /* When a new public address is created as described
2483 * in [ADDRCONF], also create a new temporary address.
2484 * Also create a temporary address if it's enabled but
2485 * no temporary address currently exists.
2487 read_unlock_bh(&idev->lock);
2488 ipv6_create_tempaddr(ifp, NULL, false);
2489 } else {
2490 read_unlock_bh(&idev->lock);
2494 static bool is_addr_mode_generate_stable(struct inet6_dev *idev)
2496 return idev->cnf.addr_gen_mode == IN6_ADDR_GEN_MODE_STABLE_PRIVACY ||
2497 idev->cnf.addr_gen_mode == IN6_ADDR_GEN_MODE_RANDOM;
2500 int addrconf_prefix_rcv_add_addr(struct net *net, struct net_device *dev,
2501 const struct prefix_info *pinfo,
2502 struct inet6_dev *in6_dev,
2503 const struct in6_addr *addr, int addr_type,
2504 u32 addr_flags, bool sllao, bool tokenized,
2505 __u32 valid_lft, u32 prefered_lft)
2507 struct inet6_ifaddr *ifp = ipv6_get_ifaddr(net, addr, dev, 1);
2508 int create = 0, update_lft = 0;
2510 if (!ifp && valid_lft) {
2511 int max_addresses = in6_dev->cnf.max_addresses;
2512 struct ifa6_config cfg = {
2513 .pfx = addr,
2514 .plen = pinfo->prefix_len,
2515 .ifa_flags = addr_flags,
2516 .valid_lft = valid_lft,
2517 .preferred_lft = prefered_lft,
2518 .scope = addr_type & IPV6_ADDR_SCOPE_MASK,
2521 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
2522 if ((net->ipv6.devconf_all->optimistic_dad ||
2523 in6_dev->cnf.optimistic_dad) &&
2524 !net->ipv6.devconf_all->forwarding && sllao)
2525 cfg.ifa_flags |= IFA_F_OPTIMISTIC;
2526 #endif
2528 /* Do not allow to create too much of autoconfigured
2529 * addresses; this would be too easy way to crash kernel.
2531 if (!max_addresses ||
2532 ipv6_count_addresses(in6_dev) < max_addresses)
2533 ifp = ipv6_add_addr(in6_dev, &cfg, false, NULL);
2535 if (IS_ERR_OR_NULL(ifp))
2536 return -1;
2538 create = 1;
2539 spin_lock_bh(&ifp->lock);
2540 ifp->flags |= IFA_F_MANAGETEMPADDR;
2541 ifp->cstamp = jiffies;
2542 ifp->tokenized = tokenized;
2543 spin_unlock_bh(&ifp->lock);
2544 addrconf_dad_start(ifp);
2547 if (ifp) {
2548 u32 flags;
2549 unsigned long now;
2550 u32 stored_lft;
2552 /* update lifetime (RFC2462 5.5.3 e) */
2553 spin_lock_bh(&ifp->lock);
2554 now = jiffies;
2555 if (ifp->valid_lft > (now - ifp->tstamp) / HZ)
2556 stored_lft = ifp->valid_lft - (now - ifp->tstamp) / HZ;
2557 else
2558 stored_lft = 0;
2559 if (!create && stored_lft) {
2560 const u32 minimum_lft = min_t(u32,
2561 stored_lft, MIN_VALID_LIFETIME);
2562 valid_lft = max(valid_lft, minimum_lft);
2564 /* RFC4862 Section 5.5.3e:
2565 * "Note that the preferred lifetime of the
2566 * corresponding address is always reset to
2567 * the Preferred Lifetime in the received
2568 * Prefix Information option, regardless of
2569 * whether the valid lifetime is also reset or
2570 * ignored."
2572 * So we should always update prefered_lft here.
2574 update_lft = 1;
2577 if (update_lft) {
2578 ifp->valid_lft = valid_lft;
2579 ifp->prefered_lft = prefered_lft;
2580 ifp->tstamp = now;
2581 flags = ifp->flags;
2582 ifp->flags &= ~IFA_F_DEPRECATED;
2583 spin_unlock_bh(&ifp->lock);
2585 if (!(flags&IFA_F_TENTATIVE))
2586 ipv6_ifa_notify(0, ifp);
2587 } else
2588 spin_unlock_bh(&ifp->lock);
2590 manage_tempaddrs(in6_dev, ifp, valid_lft, prefered_lft,
2591 create, now);
2593 in6_ifa_put(ifp);
2594 addrconf_verify();
2597 return 0;
2599 EXPORT_SYMBOL_GPL(addrconf_prefix_rcv_add_addr);
2601 void addrconf_prefix_rcv(struct net_device *dev, u8 *opt, int len, bool sllao)
2603 struct prefix_info *pinfo;
2604 __u32 valid_lft;
2605 __u32 prefered_lft;
2606 int addr_type, err;
2607 u32 addr_flags = 0;
2608 struct inet6_dev *in6_dev;
2609 struct net *net = dev_net(dev);
2611 pinfo = (struct prefix_info *) opt;
2613 if (len < sizeof(struct prefix_info)) {
2614 netdev_dbg(dev, "addrconf: prefix option too short\n");
2615 return;
2619 * Validation checks ([ADDRCONF], page 19)
2622 addr_type = ipv6_addr_type(&pinfo->prefix);
2624 if (addr_type & (IPV6_ADDR_MULTICAST|IPV6_ADDR_LINKLOCAL))
2625 return;
2627 valid_lft = ntohl(pinfo->valid);
2628 prefered_lft = ntohl(pinfo->prefered);
2630 if (prefered_lft > valid_lft) {
2631 net_warn_ratelimited("addrconf: prefix option has invalid lifetime\n");
2632 return;
2635 in6_dev = in6_dev_get(dev);
2637 if (!in6_dev) {
2638 net_dbg_ratelimited("addrconf: device %s not configured\n",
2639 dev->name);
2640 return;
2644 * Two things going on here:
2645 * 1) Add routes for on-link prefixes
2646 * 2) Configure prefixes with the auto flag set
2649 if (pinfo->onlink) {
2650 struct fib6_info *rt;
2651 unsigned long rt_expires;
2653 /* Avoid arithmetic overflow. Really, we could
2654 * save rt_expires in seconds, likely valid_lft,
2655 * but it would require division in fib gc, that it
2656 * not good.
2658 if (HZ > USER_HZ)
2659 rt_expires = addrconf_timeout_fixup(valid_lft, HZ);
2660 else
2661 rt_expires = addrconf_timeout_fixup(valid_lft, USER_HZ);
2663 if (addrconf_finite_timeout(rt_expires))
2664 rt_expires *= HZ;
2666 rt = addrconf_get_prefix_route(&pinfo->prefix,
2667 pinfo->prefix_len,
2668 dev,
2669 RTF_ADDRCONF | RTF_PREFIX_RT,
2670 RTF_GATEWAY | RTF_DEFAULT);
2672 if (rt) {
2673 /* Autoconf prefix route */
2674 if (valid_lft == 0) {
2675 ip6_del_rt(net, rt);
2676 rt = NULL;
2677 } else if (addrconf_finite_timeout(rt_expires)) {
2678 /* not infinity */
2679 fib6_set_expires(rt, jiffies + rt_expires);
2680 } else {
2681 fib6_clean_expires(rt);
2683 } else if (valid_lft) {
2684 clock_t expires = 0;
2685 int flags = RTF_ADDRCONF | RTF_PREFIX_RT;
2686 if (addrconf_finite_timeout(rt_expires)) {
2687 /* not infinity */
2688 flags |= RTF_EXPIRES;
2689 expires = jiffies_to_clock_t(rt_expires);
2691 addrconf_prefix_route(&pinfo->prefix, pinfo->prefix_len,
2692 0, dev, expires, flags,
2693 GFP_ATOMIC);
2695 fib6_info_release(rt);
2698 /* Try to figure out our local address for this prefix */
2700 if (pinfo->autoconf && in6_dev->cnf.autoconf) {
2701 struct in6_addr addr;
2702 bool tokenized = false, dev_addr_generated = false;
2704 if (pinfo->prefix_len == 64) {
2705 memcpy(&addr, &pinfo->prefix, 8);
2707 if (!ipv6_addr_any(&in6_dev->token)) {
2708 read_lock_bh(&in6_dev->lock);
2709 memcpy(addr.s6_addr + 8,
2710 in6_dev->token.s6_addr + 8, 8);
2711 read_unlock_bh(&in6_dev->lock);
2712 tokenized = true;
2713 } else if (is_addr_mode_generate_stable(in6_dev) &&
2714 !ipv6_generate_stable_address(&addr, 0,
2715 in6_dev)) {
2716 addr_flags |= IFA_F_STABLE_PRIVACY;
2717 goto ok;
2718 } else if (ipv6_generate_eui64(addr.s6_addr + 8, dev) &&
2719 ipv6_inherit_eui64(addr.s6_addr + 8, in6_dev)) {
2720 goto put;
2721 } else {
2722 dev_addr_generated = true;
2724 goto ok;
2726 net_dbg_ratelimited("IPv6 addrconf: prefix with wrong length %d\n",
2727 pinfo->prefix_len);
2728 goto put;
2731 err = addrconf_prefix_rcv_add_addr(net, dev, pinfo, in6_dev,
2732 &addr, addr_type,
2733 addr_flags, sllao,
2734 tokenized, valid_lft,
2735 prefered_lft);
2736 if (err)
2737 goto put;
2739 /* Ignore error case here because previous prefix add addr was
2740 * successful which will be notified.
2742 ndisc_ops_prefix_rcv_add_addr(net, dev, pinfo, in6_dev, &addr,
2743 addr_type, addr_flags, sllao,
2744 tokenized, valid_lft,
2745 prefered_lft,
2746 dev_addr_generated);
2748 inet6_prefix_notify(RTM_NEWPREFIX, in6_dev, pinfo);
2749 put:
2750 in6_dev_put(in6_dev);
2754 * Set destination address.
2755 * Special case for SIT interfaces where we create a new "virtual"
2756 * device.
2758 int addrconf_set_dstaddr(struct net *net, void __user *arg)
2760 struct in6_ifreq ireq;
2761 struct net_device *dev;
2762 int err = -EINVAL;
2764 rtnl_lock();
2766 err = -EFAULT;
2767 if (copy_from_user(&ireq, arg, sizeof(struct in6_ifreq)))
2768 goto err_exit;
2770 dev = __dev_get_by_index(net, ireq.ifr6_ifindex);
2772 err = -ENODEV;
2773 if (!dev)
2774 goto err_exit;
2776 #if IS_ENABLED(CONFIG_IPV6_SIT)
2777 if (dev->type == ARPHRD_SIT) {
2778 const struct net_device_ops *ops = dev->netdev_ops;
2779 struct ifreq ifr;
2780 struct ip_tunnel_parm p;
2782 err = -EADDRNOTAVAIL;
2783 if (!(ipv6_addr_type(&ireq.ifr6_addr) & IPV6_ADDR_COMPATv4))
2784 goto err_exit;
2786 memset(&p, 0, sizeof(p));
2787 p.iph.daddr = ireq.ifr6_addr.s6_addr32[3];
2788 p.iph.saddr = 0;
2789 p.iph.version = 4;
2790 p.iph.ihl = 5;
2791 p.iph.protocol = IPPROTO_IPV6;
2792 p.iph.ttl = 64;
2793 ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
2795 if (ops->ndo_do_ioctl) {
2796 mm_segment_t oldfs = get_fs();
2798 set_fs(KERNEL_DS);
2799 err = ops->ndo_do_ioctl(dev, &ifr, SIOCADDTUNNEL);
2800 set_fs(oldfs);
2801 } else
2802 err = -EOPNOTSUPP;
2804 if (err == 0) {
2805 err = -ENOBUFS;
2806 dev = __dev_get_by_name(net, p.name);
2807 if (!dev)
2808 goto err_exit;
2809 err = dev_open(dev);
2812 #endif
2814 err_exit:
2815 rtnl_unlock();
2816 return err;
2819 static int ipv6_mc_config(struct sock *sk, bool join,
2820 const struct in6_addr *addr, int ifindex)
2822 int ret;
2824 ASSERT_RTNL();
2826 lock_sock(sk);
2827 if (join)
2828 ret = ipv6_sock_mc_join(sk, ifindex, addr);
2829 else
2830 ret = ipv6_sock_mc_drop(sk, ifindex, addr);
2831 release_sock(sk);
2833 return ret;
2837 * Manual configuration of address on an interface
2839 static int inet6_addr_add(struct net *net, int ifindex,
2840 struct ifa6_config *cfg,
2841 struct netlink_ext_ack *extack)
2843 struct inet6_ifaddr *ifp;
2844 struct inet6_dev *idev;
2845 struct net_device *dev;
2846 unsigned long timeout;
2847 clock_t expires;
2848 u32 flags;
2850 ASSERT_RTNL();
2852 if (cfg->plen > 128)
2853 return -EINVAL;
2855 /* check the lifetime */
2856 if (!cfg->valid_lft || cfg->preferred_lft > cfg->valid_lft)
2857 return -EINVAL;
2859 if (cfg->ifa_flags & IFA_F_MANAGETEMPADDR && cfg->plen != 64)
2860 return -EINVAL;
2862 dev = __dev_get_by_index(net, ifindex);
2863 if (!dev)
2864 return -ENODEV;
2866 idev = addrconf_add_dev(dev);
2867 if (IS_ERR(idev))
2868 return PTR_ERR(idev);
2870 if (cfg->ifa_flags & IFA_F_MCAUTOJOIN) {
2871 int ret = ipv6_mc_config(net->ipv6.mc_autojoin_sk,
2872 true, cfg->pfx, ifindex);
2874 if (ret < 0)
2875 return ret;
2878 cfg->scope = ipv6_addr_scope(cfg->pfx);
2880 timeout = addrconf_timeout_fixup(cfg->valid_lft, HZ);
2881 if (addrconf_finite_timeout(timeout)) {
2882 expires = jiffies_to_clock_t(timeout * HZ);
2883 cfg->valid_lft = timeout;
2884 flags = RTF_EXPIRES;
2885 } else {
2886 expires = 0;
2887 flags = 0;
2888 cfg->ifa_flags |= IFA_F_PERMANENT;
2891 timeout = addrconf_timeout_fixup(cfg->preferred_lft, HZ);
2892 if (addrconf_finite_timeout(timeout)) {
2893 if (timeout == 0)
2894 cfg->ifa_flags |= IFA_F_DEPRECATED;
2895 cfg->preferred_lft = timeout;
2898 ifp = ipv6_add_addr(idev, cfg, true, extack);
2899 if (!IS_ERR(ifp)) {
2900 if (!(cfg->ifa_flags & IFA_F_NOPREFIXROUTE)) {
2901 addrconf_prefix_route(&ifp->addr, ifp->prefix_len,
2902 ifp->rt_priority, dev, expires,
2903 flags, GFP_KERNEL);
2906 /* Send a netlink notification if DAD is enabled and
2907 * optimistic flag is not set
2909 if (!(ifp->flags & (IFA_F_OPTIMISTIC | IFA_F_NODAD)))
2910 ipv6_ifa_notify(0, ifp);
2912 * Note that section 3.1 of RFC 4429 indicates
2913 * that the Optimistic flag should not be set for
2914 * manually configured addresses
2916 addrconf_dad_start(ifp);
2917 if (cfg->ifa_flags & IFA_F_MANAGETEMPADDR)
2918 manage_tempaddrs(idev, ifp, cfg->valid_lft,
2919 cfg->preferred_lft, true, jiffies);
2920 in6_ifa_put(ifp);
2921 addrconf_verify_rtnl();
2922 return 0;
2923 } else if (cfg->ifa_flags & IFA_F_MCAUTOJOIN) {
2924 ipv6_mc_config(net->ipv6.mc_autojoin_sk, false,
2925 cfg->pfx, ifindex);
2928 return PTR_ERR(ifp);
2931 static int inet6_addr_del(struct net *net, int ifindex, u32 ifa_flags,
2932 const struct in6_addr *pfx, unsigned int plen)
2934 struct inet6_ifaddr *ifp;
2935 struct inet6_dev *idev;
2936 struct net_device *dev;
2938 if (plen > 128)
2939 return -EINVAL;
2941 dev = __dev_get_by_index(net, ifindex);
2942 if (!dev)
2943 return -ENODEV;
2945 idev = __in6_dev_get(dev);
2946 if (!idev)
2947 return -ENXIO;
2949 read_lock_bh(&idev->lock);
2950 list_for_each_entry(ifp, &idev->addr_list, if_list) {
2951 if (ifp->prefix_len == plen &&
2952 ipv6_addr_equal(pfx, &ifp->addr)) {
2953 in6_ifa_hold(ifp);
2954 read_unlock_bh(&idev->lock);
2956 if (!(ifp->flags & IFA_F_TEMPORARY) &&
2957 (ifa_flags & IFA_F_MANAGETEMPADDR))
2958 manage_tempaddrs(idev, ifp, 0, 0, false,
2959 jiffies);
2960 ipv6_del_addr(ifp);
2961 addrconf_verify_rtnl();
2962 if (ipv6_addr_is_multicast(pfx)) {
2963 ipv6_mc_config(net->ipv6.mc_autojoin_sk,
2964 false, pfx, dev->ifindex);
2966 return 0;
2969 read_unlock_bh(&idev->lock);
2970 return -EADDRNOTAVAIL;
2974 int addrconf_add_ifaddr(struct net *net, void __user *arg)
2976 struct ifa6_config cfg = {
2977 .ifa_flags = IFA_F_PERMANENT,
2978 .preferred_lft = INFINITY_LIFE_TIME,
2979 .valid_lft = INFINITY_LIFE_TIME,
2981 struct in6_ifreq ireq;
2982 int err;
2984 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
2985 return -EPERM;
2987 if (copy_from_user(&ireq, arg, sizeof(struct in6_ifreq)))
2988 return -EFAULT;
2990 cfg.pfx = &ireq.ifr6_addr;
2991 cfg.plen = ireq.ifr6_prefixlen;
2993 rtnl_lock();
2994 err = inet6_addr_add(net, ireq.ifr6_ifindex, &cfg, NULL);
2995 rtnl_unlock();
2996 return err;
2999 int addrconf_del_ifaddr(struct net *net, void __user *arg)
3001 struct in6_ifreq ireq;
3002 int err;
3004 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
3005 return -EPERM;
3007 if (copy_from_user(&ireq, arg, sizeof(struct in6_ifreq)))
3008 return -EFAULT;
3010 rtnl_lock();
3011 err = inet6_addr_del(net, ireq.ifr6_ifindex, 0, &ireq.ifr6_addr,
3012 ireq.ifr6_prefixlen);
3013 rtnl_unlock();
3014 return err;
3017 static void add_addr(struct inet6_dev *idev, const struct in6_addr *addr,
3018 int plen, int scope)
3020 struct inet6_ifaddr *ifp;
3021 struct ifa6_config cfg = {
3022 .pfx = addr,
3023 .plen = plen,
3024 .ifa_flags = IFA_F_PERMANENT,
3025 .valid_lft = INFINITY_LIFE_TIME,
3026 .preferred_lft = INFINITY_LIFE_TIME,
3027 .scope = scope
3030 ifp = ipv6_add_addr(idev, &cfg, true, NULL);
3031 if (!IS_ERR(ifp)) {
3032 spin_lock_bh(&ifp->lock);
3033 ifp->flags &= ~IFA_F_TENTATIVE;
3034 spin_unlock_bh(&ifp->lock);
3035 rt_genid_bump_ipv6(dev_net(idev->dev));
3036 ipv6_ifa_notify(RTM_NEWADDR, ifp);
3037 in6_ifa_put(ifp);
3041 #if IS_ENABLED(CONFIG_IPV6_SIT)
3042 static void sit_add_v4_addrs(struct inet6_dev *idev)
3044 struct in6_addr addr;
3045 struct net_device *dev;
3046 struct net *net = dev_net(idev->dev);
3047 int scope, plen;
3048 u32 pflags = 0;
3050 ASSERT_RTNL();
3052 memset(&addr, 0, sizeof(struct in6_addr));
3053 memcpy(&addr.s6_addr32[3], idev->dev->dev_addr, 4);
3055 if (idev->dev->flags&IFF_POINTOPOINT) {
3056 addr.s6_addr32[0] = htonl(0xfe800000);
3057 scope = IFA_LINK;
3058 plen = 64;
3059 } else {
3060 scope = IPV6_ADDR_COMPATv4;
3061 plen = 96;
3062 pflags |= RTF_NONEXTHOP;
3065 if (addr.s6_addr32[3]) {
3066 add_addr(idev, &addr, plen, scope);
3067 addrconf_prefix_route(&addr, plen, 0, idev->dev, 0, pflags,
3068 GFP_KERNEL);
3069 return;
3072 for_each_netdev(net, dev) {
3073 struct in_device *in_dev = __in_dev_get_rtnl(dev);
3074 if (in_dev && (dev->flags & IFF_UP)) {
3075 struct in_ifaddr *ifa;
3077 int flag = scope;
3079 for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) {
3081 addr.s6_addr32[3] = ifa->ifa_local;
3083 if (ifa->ifa_scope == RT_SCOPE_LINK)
3084 continue;
3085 if (ifa->ifa_scope >= RT_SCOPE_HOST) {
3086 if (idev->dev->flags&IFF_POINTOPOINT)
3087 continue;
3088 flag |= IFA_HOST;
3091 add_addr(idev, &addr, plen, flag);
3092 addrconf_prefix_route(&addr, plen, 0, idev->dev,
3093 0, pflags, GFP_KERNEL);
3098 #endif
3100 static void init_loopback(struct net_device *dev)
3102 struct inet6_dev *idev;
3104 /* ::1 */
3106 ASSERT_RTNL();
3108 idev = ipv6_find_idev(dev);
3109 if (!idev) {
3110 pr_debug("%s: add_dev failed\n", __func__);
3111 return;
3114 add_addr(idev, &in6addr_loopback, 128, IFA_HOST);
3117 void addrconf_add_linklocal(struct inet6_dev *idev,
3118 const struct in6_addr *addr, u32 flags)
3120 struct ifa6_config cfg = {
3121 .pfx = addr,
3122 .plen = 64,
3123 .ifa_flags = flags | IFA_F_PERMANENT,
3124 .valid_lft = INFINITY_LIFE_TIME,
3125 .preferred_lft = INFINITY_LIFE_TIME,
3126 .scope = IFA_LINK
3128 struct inet6_ifaddr *ifp;
3130 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
3131 if ((dev_net(idev->dev)->ipv6.devconf_all->optimistic_dad ||
3132 idev->cnf.optimistic_dad) &&
3133 !dev_net(idev->dev)->ipv6.devconf_all->forwarding)
3134 cfg.ifa_flags |= IFA_F_OPTIMISTIC;
3135 #endif
3137 ifp = ipv6_add_addr(idev, &cfg, true, NULL);
3138 if (!IS_ERR(ifp)) {
3139 addrconf_prefix_route(&ifp->addr, ifp->prefix_len, 0, idev->dev,
3140 0, 0, GFP_ATOMIC);
3141 addrconf_dad_start(ifp);
3142 in6_ifa_put(ifp);
3145 EXPORT_SYMBOL_GPL(addrconf_add_linklocal);
3147 static bool ipv6_reserved_interfaceid(struct in6_addr address)
3149 if ((address.s6_addr32[2] | address.s6_addr32[3]) == 0)
3150 return true;
3152 if (address.s6_addr32[2] == htonl(0x02005eff) &&
3153 ((address.s6_addr32[3] & htonl(0xfe000000)) == htonl(0xfe000000)))
3154 return true;
3156 if (address.s6_addr32[2] == htonl(0xfdffffff) &&
3157 ((address.s6_addr32[3] & htonl(0xffffff80)) == htonl(0xffffff80)))
3158 return true;
3160 return false;
3163 static int ipv6_generate_stable_address(struct in6_addr *address,
3164 u8 dad_count,
3165 const struct inet6_dev *idev)
3167 static DEFINE_SPINLOCK(lock);
3168 static __u32 digest[SHA_DIGEST_WORDS];
3169 static __u32 workspace[SHA_WORKSPACE_WORDS];
3171 static union {
3172 char __data[SHA_MESSAGE_BYTES];
3173 struct {
3174 struct in6_addr secret;
3175 __be32 prefix[2];
3176 unsigned char hwaddr[MAX_ADDR_LEN];
3177 u8 dad_count;
3178 } __packed;
3179 } data;
3181 struct in6_addr secret;
3182 struct in6_addr temp;
3183 struct net *net = dev_net(idev->dev);
3185 BUILD_BUG_ON(sizeof(data.__data) != sizeof(data));
3187 if (idev->cnf.stable_secret.initialized)
3188 secret = idev->cnf.stable_secret.secret;
3189 else if (net->ipv6.devconf_dflt->stable_secret.initialized)
3190 secret = net->ipv6.devconf_dflt->stable_secret.secret;
3191 else
3192 return -1;
3194 retry:
3195 spin_lock_bh(&lock);
3197 sha_init(digest);
3198 memset(&data, 0, sizeof(data));
3199 memset(workspace, 0, sizeof(workspace));
3200 memcpy(data.hwaddr, idev->dev->perm_addr, idev->dev->addr_len);
3201 data.prefix[0] = address->s6_addr32[0];
3202 data.prefix[1] = address->s6_addr32[1];
3203 data.secret = secret;
3204 data.dad_count = dad_count;
3206 sha_transform(digest, data.__data, workspace);
3208 temp = *address;
3209 temp.s6_addr32[2] = (__force __be32)digest[0];
3210 temp.s6_addr32[3] = (__force __be32)digest[1];
3212 spin_unlock_bh(&lock);
3214 if (ipv6_reserved_interfaceid(temp)) {
3215 dad_count++;
3216 if (dad_count > dev_net(idev->dev)->ipv6.sysctl.idgen_retries)
3217 return -1;
3218 goto retry;
3221 *address = temp;
3222 return 0;
3225 static void ipv6_gen_mode_random_init(struct inet6_dev *idev)
3227 struct ipv6_stable_secret *s = &idev->cnf.stable_secret;
3229 if (s->initialized)
3230 return;
3231 s = &idev->cnf.stable_secret;
3232 get_random_bytes(&s->secret, sizeof(s->secret));
3233 s->initialized = true;
3236 static void addrconf_addr_gen(struct inet6_dev *idev, bool prefix_route)
3238 struct in6_addr addr;
3240 /* no link local addresses on L3 master devices */
3241 if (netif_is_l3_master(idev->dev))
3242 return;
3244 /* no link local addresses on devices flagged as slaves */
3245 if (idev->dev->flags & IFF_SLAVE)
3246 return;
3248 ipv6_addr_set(&addr, htonl(0xFE800000), 0, 0, 0);
3250 switch (idev->cnf.addr_gen_mode) {
3251 case IN6_ADDR_GEN_MODE_RANDOM:
3252 ipv6_gen_mode_random_init(idev);
3253 /* fallthrough */
3254 case IN6_ADDR_GEN_MODE_STABLE_PRIVACY:
3255 if (!ipv6_generate_stable_address(&addr, 0, idev))
3256 addrconf_add_linklocal(idev, &addr,
3257 IFA_F_STABLE_PRIVACY);
3258 else if (prefix_route)
3259 addrconf_prefix_route(&addr, 64, 0, idev->dev,
3260 0, 0, GFP_KERNEL);
3261 break;
3262 case IN6_ADDR_GEN_MODE_EUI64:
3263 /* addrconf_add_linklocal also adds a prefix_route and we
3264 * only need to care about prefix routes if ipv6_generate_eui64
3265 * couldn't generate one.
3267 if (ipv6_generate_eui64(addr.s6_addr + 8, idev->dev) == 0)
3268 addrconf_add_linklocal(idev, &addr, 0);
3269 else if (prefix_route)
3270 addrconf_prefix_route(&addr, 64, 0, idev->dev,
3271 0, 0, GFP_KERNEL);
3272 break;
3273 case IN6_ADDR_GEN_MODE_NONE:
3274 default:
3275 /* will not add any link local address */
3276 break;
3280 static void addrconf_dev_config(struct net_device *dev)
3282 struct inet6_dev *idev;
3284 ASSERT_RTNL();
3286 if ((dev->type != ARPHRD_ETHER) &&
3287 (dev->type != ARPHRD_FDDI) &&
3288 (dev->type != ARPHRD_ARCNET) &&
3289 (dev->type != ARPHRD_INFINIBAND) &&
3290 (dev->type != ARPHRD_IEEE1394) &&
3291 (dev->type != ARPHRD_TUNNEL6) &&
3292 (dev->type != ARPHRD_6LOWPAN) &&
3293 (dev->type != ARPHRD_IP6GRE) &&
3294 (dev->type != ARPHRD_IPGRE) &&
3295 (dev->type != ARPHRD_TUNNEL) &&
3296 (dev->type != ARPHRD_NONE) &&
3297 (dev->type != ARPHRD_RAWIP)) {
3298 /* Alas, we support only Ethernet autoconfiguration. */
3299 idev = __in6_dev_get(dev);
3300 if (!IS_ERR_OR_NULL(idev) && dev->flags & IFF_UP &&
3301 dev->flags & IFF_MULTICAST)
3302 ipv6_mc_up(idev);
3303 return;
3306 idev = addrconf_add_dev(dev);
3307 if (IS_ERR(idev))
3308 return;
3310 /* this device type has no EUI support */
3311 if (dev->type == ARPHRD_NONE &&
3312 idev->cnf.addr_gen_mode == IN6_ADDR_GEN_MODE_EUI64)
3313 idev->cnf.addr_gen_mode = IN6_ADDR_GEN_MODE_RANDOM;
3315 addrconf_addr_gen(idev, false);
3318 #if IS_ENABLED(CONFIG_IPV6_SIT)
3319 static void addrconf_sit_config(struct net_device *dev)
3321 struct inet6_dev *idev;
3323 ASSERT_RTNL();
3326 * Configure the tunnel with one of our IPv4
3327 * addresses... we should configure all of
3328 * our v4 addrs in the tunnel
3331 idev = ipv6_find_idev(dev);
3332 if (!idev) {
3333 pr_debug("%s: add_dev failed\n", __func__);
3334 return;
3337 if (dev->priv_flags & IFF_ISATAP) {
3338 addrconf_addr_gen(idev, false);
3339 return;
3342 sit_add_v4_addrs(idev);
3344 if (dev->flags&IFF_POINTOPOINT)
3345 addrconf_add_mroute(dev);
3347 #endif
3349 #if IS_ENABLED(CONFIG_NET_IPGRE)
3350 static void addrconf_gre_config(struct net_device *dev)
3352 struct inet6_dev *idev;
3354 ASSERT_RTNL();
3356 idev = ipv6_find_idev(dev);
3357 if (!idev) {
3358 pr_debug("%s: add_dev failed\n", __func__);
3359 return;
3362 addrconf_addr_gen(idev, true);
3363 if (dev->flags & IFF_POINTOPOINT)
3364 addrconf_add_mroute(dev);
3366 #endif
3368 static int fixup_permanent_addr(struct net *net,
3369 struct inet6_dev *idev,
3370 struct inet6_ifaddr *ifp)
3372 /* !fib6_node means the host route was removed from the
3373 * FIB, for example, if 'lo' device is taken down. In that
3374 * case regenerate the host route.
3376 if (!ifp->rt || !ifp->rt->fib6_node) {
3377 struct fib6_info *f6i, *prev;
3379 f6i = addrconf_f6i_alloc(net, idev, &ifp->addr, false,
3380 GFP_ATOMIC);
3381 if (IS_ERR(f6i))
3382 return PTR_ERR(f6i);
3384 /* ifp->rt can be accessed outside of rtnl */
3385 spin_lock(&ifp->lock);
3386 prev = ifp->rt;
3387 ifp->rt = f6i;
3388 spin_unlock(&ifp->lock);
3390 fib6_info_release(prev);
3393 if (!(ifp->flags & IFA_F_NOPREFIXROUTE)) {
3394 addrconf_prefix_route(&ifp->addr, ifp->prefix_len,
3395 ifp->rt_priority, idev->dev, 0, 0,
3396 GFP_ATOMIC);
3399 if (ifp->state == INET6_IFADDR_STATE_PREDAD)
3400 addrconf_dad_start(ifp);
3402 return 0;
3405 static void addrconf_permanent_addr(struct net *net, struct net_device *dev)
3407 struct inet6_ifaddr *ifp, *tmp;
3408 struct inet6_dev *idev;
3410 idev = __in6_dev_get(dev);
3411 if (!idev)
3412 return;
3414 write_lock_bh(&idev->lock);
3416 list_for_each_entry_safe(ifp, tmp, &idev->addr_list, if_list) {
3417 if ((ifp->flags & IFA_F_PERMANENT) &&
3418 fixup_permanent_addr(net, idev, ifp) < 0) {
3419 write_unlock_bh(&idev->lock);
3420 in6_ifa_hold(ifp);
3421 ipv6_del_addr(ifp);
3422 write_lock_bh(&idev->lock);
3424 net_info_ratelimited("%s: Failed to add prefix route for address %pI6c; dropping\n",
3425 idev->dev->name, &ifp->addr);
3429 write_unlock_bh(&idev->lock);
3432 static int addrconf_notify(struct notifier_block *this, unsigned long event,
3433 void *ptr)
3435 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
3436 struct netdev_notifier_change_info *change_info;
3437 struct netdev_notifier_changeupper_info *info;
3438 struct inet6_dev *idev = __in6_dev_get(dev);
3439 struct net *net = dev_net(dev);
3440 int run_pending = 0;
3441 int err;
3443 switch (event) {
3444 case NETDEV_REGISTER:
3445 if (!idev && dev->mtu >= IPV6_MIN_MTU) {
3446 idev = ipv6_add_dev(dev);
3447 if (IS_ERR(idev))
3448 return notifier_from_errno(PTR_ERR(idev));
3450 break;
3452 case NETDEV_CHANGEMTU:
3453 /* if MTU under IPV6_MIN_MTU stop IPv6 on this interface. */
3454 if (dev->mtu < IPV6_MIN_MTU) {
3455 addrconf_ifdown(dev, dev != net->loopback_dev);
3456 break;
3459 if (idev) {
3460 rt6_mtu_change(dev, dev->mtu);
3461 idev->cnf.mtu6 = dev->mtu;
3462 break;
3465 /* allocate new idev */
3466 idev = ipv6_add_dev(dev);
3467 if (IS_ERR(idev))
3468 break;
3470 /* device is still not ready */
3471 if (!(idev->if_flags & IF_READY))
3472 break;
3474 run_pending = 1;
3476 /* fall through */
3478 case NETDEV_UP:
3479 case NETDEV_CHANGE:
3480 if (dev->flags & IFF_SLAVE)
3481 break;
3483 if (idev && idev->cnf.disable_ipv6)
3484 break;
3486 if (event == NETDEV_UP) {
3487 /* restore routes for permanent addresses */
3488 addrconf_permanent_addr(net, dev);
3490 if (!addrconf_link_ready(dev)) {
3491 /* device is not ready yet. */
3492 pr_info("ADDRCONF(NETDEV_UP): %s: link is not ready\n",
3493 dev->name);
3494 break;
3497 if (!idev && dev->mtu >= IPV6_MIN_MTU)
3498 idev = ipv6_add_dev(dev);
3500 if (!IS_ERR_OR_NULL(idev)) {
3501 idev->if_flags |= IF_READY;
3502 run_pending = 1;
3504 } else if (event == NETDEV_CHANGE) {
3505 if (!addrconf_link_ready(dev)) {
3506 /* device is still not ready. */
3507 rt6_sync_down_dev(dev, event);
3508 break;
3511 if (!IS_ERR_OR_NULL(idev)) {
3512 if (idev->if_flags & IF_READY) {
3513 /* device is already configured -
3514 * but resend MLD reports, we might
3515 * have roamed and need to update
3516 * multicast snooping switches
3518 ipv6_mc_up(idev);
3519 change_info = ptr;
3520 if (change_info->flags_changed & IFF_NOARP)
3521 addrconf_dad_run(idev, true);
3522 rt6_sync_up(dev, RTNH_F_LINKDOWN);
3523 break;
3525 idev->if_flags |= IF_READY;
3528 pr_info("ADDRCONF(NETDEV_CHANGE): %s: link becomes ready\n",
3529 dev->name);
3531 run_pending = 1;
3534 switch (dev->type) {
3535 #if IS_ENABLED(CONFIG_IPV6_SIT)
3536 case ARPHRD_SIT:
3537 addrconf_sit_config(dev);
3538 break;
3539 #endif
3540 #if IS_ENABLED(CONFIG_NET_IPGRE)
3541 case ARPHRD_IPGRE:
3542 addrconf_gre_config(dev);
3543 break;
3544 #endif
3545 case ARPHRD_LOOPBACK:
3546 init_loopback(dev);
3547 break;
3549 default:
3550 addrconf_dev_config(dev);
3551 break;
3554 if (!IS_ERR_OR_NULL(idev)) {
3555 if (run_pending)
3556 addrconf_dad_run(idev, false);
3558 /* Device has an address by now */
3559 rt6_sync_up(dev, RTNH_F_DEAD);
3562 * If the MTU changed during the interface down,
3563 * when the interface up, the changed MTU must be
3564 * reflected in the idev as well as routers.
3566 if (idev->cnf.mtu6 != dev->mtu &&
3567 dev->mtu >= IPV6_MIN_MTU) {
3568 rt6_mtu_change(dev, dev->mtu);
3569 idev->cnf.mtu6 = dev->mtu;
3571 idev->tstamp = jiffies;
3572 inet6_ifinfo_notify(RTM_NEWLINK, idev);
3575 * If the changed mtu during down is lower than
3576 * IPV6_MIN_MTU stop IPv6 on this interface.
3578 if (dev->mtu < IPV6_MIN_MTU)
3579 addrconf_ifdown(dev, dev != net->loopback_dev);
3581 break;
3583 case NETDEV_DOWN:
3584 case NETDEV_UNREGISTER:
3586 * Remove all addresses from this interface.
3588 addrconf_ifdown(dev, event != NETDEV_DOWN);
3589 break;
3591 case NETDEV_CHANGENAME:
3592 if (idev) {
3593 snmp6_unregister_dev(idev);
3594 addrconf_sysctl_unregister(idev);
3595 err = addrconf_sysctl_register(idev);
3596 if (err)
3597 return notifier_from_errno(err);
3598 err = snmp6_register_dev(idev);
3599 if (err) {
3600 addrconf_sysctl_unregister(idev);
3601 return notifier_from_errno(err);
3604 break;
3606 case NETDEV_PRE_TYPE_CHANGE:
3607 case NETDEV_POST_TYPE_CHANGE:
3608 if (idev)
3609 addrconf_type_change(dev, event);
3610 break;
3612 case NETDEV_CHANGEUPPER:
3613 info = ptr;
3615 /* flush all routes if dev is linked to or unlinked from
3616 * an L3 master device (e.g., VRF)
3618 if (info->upper_dev && netif_is_l3_master(info->upper_dev))
3619 addrconf_ifdown(dev, 0);
3622 return NOTIFY_OK;
3626 * addrconf module should be notified of a device going up
3628 static struct notifier_block ipv6_dev_notf = {
3629 .notifier_call = addrconf_notify,
3630 .priority = ADDRCONF_NOTIFY_PRIORITY,
3633 static void addrconf_type_change(struct net_device *dev, unsigned long event)
3635 struct inet6_dev *idev;
3636 ASSERT_RTNL();
3638 idev = __in6_dev_get(dev);
3640 if (event == NETDEV_POST_TYPE_CHANGE)
3641 ipv6_mc_remap(idev);
3642 else if (event == NETDEV_PRE_TYPE_CHANGE)
3643 ipv6_mc_unmap(idev);
3646 static bool addr_is_local(const struct in6_addr *addr)
3648 return ipv6_addr_type(addr) &
3649 (IPV6_ADDR_LINKLOCAL | IPV6_ADDR_LOOPBACK);
3652 static int addrconf_ifdown(struct net_device *dev, int how)
3654 unsigned long event = how ? NETDEV_UNREGISTER : NETDEV_DOWN;
3655 struct net *net = dev_net(dev);
3656 struct inet6_dev *idev;
3657 struct inet6_ifaddr *ifa, *tmp;
3658 bool keep_addr = false;
3659 int state, i;
3661 ASSERT_RTNL();
3663 rt6_disable_ip(dev, event);
3665 idev = __in6_dev_get(dev);
3666 if (!idev)
3667 return -ENODEV;
3670 * Step 1: remove reference to ipv6 device from parent device.
3671 * Do not dev_put!
3673 if (how) {
3674 idev->dead = 1;
3676 /* protected by rtnl_lock */
3677 RCU_INIT_POINTER(dev->ip6_ptr, NULL);
3679 /* Step 1.5: remove snmp6 entry */
3680 snmp6_unregister_dev(idev);
3684 /* combine the user config with event to determine if permanent
3685 * addresses are to be removed from address hash table
3687 if (!how && !idev->cnf.disable_ipv6) {
3688 /* aggregate the system setting and interface setting */
3689 int _keep_addr = net->ipv6.devconf_all->keep_addr_on_down;
3691 if (!_keep_addr)
3692 _keep_addr = idev->cnf.keep_addr_on_down;
3694 keep_addr = (_keep_addr > 0);
3697 /* Step 2: clear hash table */
3698 for (i = 0; i < IN6_ADDR_HSIZE; i++) {
3699 struct hlist_head *h = &inet6_addr_lst[i];
3701 spin_lock_bh(&addrconf_hash_lock);
3702 restart:
3703 hlist_for_each_entry_rcu(ifa, h, addr_lst) {
3704 if (ifa->idev == idev) {
3705 addrconf_del_dad_work(ifa);
3706 /* combined flag + permanent flag decide if
3707 * address is retained on a down event
3709 if (!keep_addr ||
3710 !(ifa->flags & IFA_F_PERMANENT) ||
3711 addr_is_local(&ifa->addr)) {
3712 hlist_del_init_rcu(&ifa->addr_lst);
3713 goto restart;
3717 spin_unlock_bh(&addrconf_hash_lock);
3720 write_lock_bh(&idev->lock);
3722 addrconf_del_rs_timer(idev);
3724 /* Step 2: clear flags for stateless addrconf */
3725 if (!how)
3726 idev->if_flags &= ~(IF_RS_SENT|IF_RA_RCVD|IF_READY);
3728 /* Step 3: clear tempaddr list */
3729 while (!list_empty(&idev->tempaddr_list)) {
3730 ifa = list_first_entry(&idev->tempaddr_list,
3731 struct inet6_ifaddr, tmp_list);
3732 list_del(&ifa->tmp_list);
3733 write_unlock_bh(&idev->lock);
3734 spin_lock_bh(&ifa->lock);
3736 if (ifa->ifpub) {
3737 in6_ifa_put(ifa->ifpub);
3738 ifa->ifpub = NULL;
3740 spin_unlock_bh(&ifa->lock);
3741 in6_ifa_put(ifa);
3742 write_lock_bh(&idev->lock);
3745 list_for_each_entry_safe(ifa, tmp, &idev->addr_list, if_list) {
3746 struct fib6_info *rt = NULL;
3747 bool keep;
3749 addrconf_del_dad_work(ifa);
3751 keep = keep_addr && (ifa->flags & IFA_F_PERMANENT) &&
3752 !addr_is_local(&ifa->addr);
3754 write_unlock_bh(&idev->lock);
3755 spin_lock_bh(&ifa->lock);
3757 if (keep) {
3758 /* set state to skip the notifier below */
3759 state = INET6_IFADDR_STATE_DEAD;
3760 ifa->state = INET6_IFADDR_STATE_PREDAD;
3761 if (!(ifa->flags & IFA_F_NODAD))
3762 ifa->flags |= IFA_F_TENTATIVE;
3764 rt = ifa->rt;
3765 ifa->rt = NULL;
3766 } else {
3767 state = ifa->state;
3768 ifa->state = INET6_IFADDR_STATE_DEAD;
3771 spin_unlock_bh(&ifa->lock);
3773 if (rt)
3774 ip6_del_rt(net, rt);
3776 if (state != INET6_IFADDR_STATE_DEAD) {
3777 __ipv6_ifa_notify(RTM_DELADDR, ifa);
3778 inet6addr_notifier_call_chain(NETDEV_DOWN, ifa);
3779 } else {
3780 if (idev->cnf.forwarding)
3781 addrconf_leave_anycast(ifa);
3782 addrconf_leave_solict(ifa->idev, &ifa->addr);
3785 write_lock_bh(&idev->lock);
3786 if (!keep) {
3787 list_del_rcu(&ifa->if_list);
3788 in6_ifa_put(ifa);
3792 write_unlock_bh(&idev->lock);
3794 /* Step 5: Discard anycast and multicast list */
3795 if (how) {
3796 ipv6_ac_destroy_dev(idev);
3797 ipv6_mc_destroy_dev(idev);
3798 } else {
3799 ipv6_mc_down(idev);
3802 idev->tstamp = jiffies;
3804 /* Last: Shot the device (if unregistered) */
3805 if (how) {
3806 addrconf_sysctl_unregister(idev);
3807 neigh_parms_release(&nd_tbl, idev->nd_parms);
3808 neigh_ifdown(&nd_tbl, dev);
3809 in6_dev_put(idev);
3811 return 0;
3814 static void addrconf_rs_timer(struct timer_list *t)
3816 struct inet6_dev *idev = from_timer(idev, t, rs_timer);
3817 struct net_device *dev = idev->dev;
3818 struct in6_addr lladdr;
3820 write_lock(&idev->lock);
3821 if (idev->dead || !(idev->if_flags & IF_READY))
3822 goto out;
3824 if (!ipv6_accept_ra(idev))
3825 goto out;
3827 /* Announcement received after solicitation was sent */
3828 if (idev->if_flags & IF_RA_RCVD)
3829 goto out;
3831 if (idev->rs_probes++ < idev->cnf.rtr_solicits || idev->cnf.rtr_solicits < 0) {
3832 write_unlock(&idev->lock);
3833 if (!ipv6_get_lladdr(dev, &lladdr, IFA_F_TENTATIVE))
3834 ndisc_send_rs(dev, &lladdr,
3835 &in6addr_linklocal_allrouters);
3836 else
3837 goto put;
3839 write_lock(&idev->lock);
3840 idev->rs_interval = rfc3315_s14_backoff_update(
3841 idev->rs_interval, idev->cnf.rtr_solicit_max_interval);
3842 /* The wait after the last probe can be shorter */
3843 addrconf_mod_rs_timer(idev, (idev->rs_probes ==
3844 idev->cnf.rtr_solicits) ?
3845 idev->cnf.rtr_solicit_delay :
3846 idev->rs_interval);
3847 } else {
3849 * Note: we do not support deprecated "all on-link"
3850 * assumption any longer.
3852 pr_debug("%s: no IPv6 routers present\n", idev->dev->name);
3855 out:
3856 write_unlock(&idev->lock);
3857 put:
3858 in6_dev_put(idev);
3862 * Duplicate Address Detection
3864 static void addrconf_dad_kick(struct inet6_ifaddr *ifp)
3866 unsigned long rand_num;
3867 struct inet6_dev *idev = ifp->idev;
3868 u64 nonce;
3870 if (ifp->flags & IFA_F_OPTIMISTIC)
3871 rand_num = 0;
3872 else
3873 rand_num = prandom_u32() % (idev->cnf.rtr_solicit_delay ? : 1);
3875 nonce = 0;
3876 if (idev->cnf.enhanced_dad ||
3877 dev_net(idev->dev)->ipv6.devconf_all->enhanced_dad) {
3879 get_random_bytes(&nonce, 6);
3880 while (nonce == 0);
3882 ifp->dad_nonce = nonce;
3883 ifp->dad_probes = idev->cnf.dad_transmits;
3884 addrconf_mod_dad_work(ifp, rand_num);
3887 static void addrconf_dad_begin(struct inet6_ifaddr *ifp)
3889 struct inet6_dev *idev = ifp->idev;
3890 struct net_device *dev = idev->dev;
3891 bool bump_id, notify = false;
3892 struct net *net;
3894 addrconf_join_solict(dev, &ifp->addr);
3896 prandom_seed((__force u32) ifp->addr.s6_addr32[3]);
3898 read_lock_bh(&idev->lock);
3899 spin_lock(&ifp->lock);
3900 if (ifp->state == INET6_IFADDR_STATE_DEAD)
3901 goto out;
3903 net = dev_net(dev);
3904 if (dev->flags&(IFF_NOARP|IFF_LOOPBACK) ||
3905 (net->ipv6.devconf_all->accept_dad < 1 &&
3906 idev->cnf.accept_dad < 1) ||
3907 !(ifp->flags&IFA_F_TENTATIVE) ||
3908 ifp->flags & IFA_F_NODAD) {
3909 bool send_na = false;
3911 if (ifp->flags & IFA_F_TENTATIVE &&
3912 !(ifp->flags & IFA_F_OPTIMISTIC))
3913 send_na = true;
3914 bump_id = ifp->flags & IFA_F_TENTATIVE;
3915 ifp->flags &= ~(IFA_F_TENTATIVE|IFA_F_OPTIMISTIC|IFA_F_DADFAILED);
3916 spin_unlock(&ifp->lock);
3917 read_unlock_bh(&idev->lock);
3919 addrconf_dad_completed(ifp, bump_id, send_na);
3920 return;
3923 if (!(idev->if_flags & IF_READY)) {
3924 spin_unlock(&ifp->lock);
3925 read_unlock_bh(&idev->lock);
3927 * If the device is not ready:
3928 * - keep it tentative if it is a permanent address.
3929 * - otherwise, kill it.
3931 in6_ifa_hold(ifp);
3932 addrconf_dad_stop(ifp, 0);
3933 return;
3937 * Optimistic nodes can start receiving
3938 * Frames right away
3940 if (ifp->flags & IFA_F_OPTIMISTIC) {
3941 ip6_ins_rt(net, ifp->rt);
3942 if (ipv6_use_optimistic_addr(net, idev)) {
3943 /* Because optimistic nodes can use this address,
3944 * notify listeners. If DAD fails, RTM_DELADDR is sent.
3946 notify = true;
3950 addrconf_dad_kick(ifp);
3951 out:
3952 spin_unlock(&ifp->lock);
3953 read_unlock_bh(&idev->lock);
3954 if (notify)
3955 ipv6_ifa_notify(RTM_NEWADDR, ifp);
3958 static void addrconf_dad_start(struct inet6_ifaddr *ifp)
3960 bool begin_dad = false;
3962 spin_lock_bh(&ifp->lock);
3963 if (ifp->state != INET6_IFADDR_STATE_DEAD) {
3964 ifp->state = INET6_IFADDR_STATE_PREDAD;
3965 begin_dad = true;
3967 spin_unlock_bh(&ifp->lock);
3969 if (begin_dad)
3970 addrconf_mod_dad_work(ifp, 0);
3973 static void addrconf_dad_work(struct work_struct *w)
3975 struct inet6_ifaddr *ifp = container_of(to_delayed_work(w),
3976 struct inet6_ifaddr,
3977 dad_work);
3978 struct inet6_dev *idev = ifp->idev;
3979 bool bump_id, disable_ipv6 = false;
3980 struct in6_addr mcaddr;
3982 enum {
3983 DAD_PROCESS,
3984 DAD_BEGIN,
3985 DAD_ABORT,
3986 } action = DAD_PROCESS;
3988 rtnl_lock();
3990 spin_lock_bh(&ifp->lock);
3991 if (ifp->state == INET6_IFADDR_STATE_PREDAD) {
3992 action = DAD_BEGIN;
3993 ifp->state = INET6_IFADDR_STATE_DAD;
3994 } else if (ifp->state == INET6_IFADDR_STATE_ERRDAD) {
3995 action = DAD_ABORT;
3996 ifp->state = INET6_IFADDR_STATE_POSTDAD;
3998 if ((dev_net(idev->dev)->ipv6.devconf_all->accept_dad > 1 ||
3999 idev->cnf.accept_dad > 1) &&
4000 !idev->cnf.disable_ipv6 &&
4001 !(ifp->flags & IFA_F_STABLE_PRIVACY)) {
4002 struct in6_addr addr;
4004 addr.s6_addr32[0] = htonl(0xfe800000);
4005 addr.s6_addr32[1] = 0;
4007 if (!ipv6_generate_eui64(addr.s6_addr + 8, idev->dev) &&
4008 ipv6_addr_equal(&ifp->addr, &addr)) {
4009 /* DAD failed for link-local based on MAC */
4010 idev->cnf.disable_ipv6 = 1;
4012 pr_info("%s: IPv6 being disabled!\n",
4013 ifp->idev->dev->name);
4014 disable_ipv6 = true;
4018 spin_unlock_bh(&ifp->lock);
4020 if (action == DAD_BEGIN) {
4021 addrconf_dad_begin(ifp);
4022 goto out;
4023 } else if (action == DAD_ABORT) {
4024 in6_ifa_hold(ifp);
4025 addrconf_dad_stop(ifp, 1);
4026 if (disable_ipv6)
4027 addrconf_ifdown(idev->dev, 0);
4028 goto out;
4031 if (!ifp->dad_probes && addrconf_dad_end(ifp))
4032 goto out;
4034 write_lock_bh(&idev->lock);
4035 if (idev->dead || !(idev->if_flags & IF_READY)) {
4036 write_unlock_bh(&idev->lock);
4037 goto out;
4040 spin_lock(&ifp->lock);
4041 if (ifp->state == INET6_IFADDR_STATE_DEAD) {
4042 spin_unlock(&ifp->lock);
4043 write_unlock_bh(&idev->lock);
4044 goto out;
4047 if (ifp->dad_probes == 0) {
4048 bool send_na = false;
4051 * DAD was successful
4054 if (ifp->flags & IFA_F_TENTATIVE &&
4055 !(ifp->flags & IFA_F_OPTIMISTIC))
4056 send_na = true;
4057 bump_id = ifp->flags & IFA_F_TENTATIVE;
4058 ifp->flags &= ~(IFA_F_TENTATIVE|IFA_F_OPTIMISTIC|IFA_F_DADFAILED);
4059 spin_unlock(&ifp->lock);
4060 write_unlock_bh(&idev->lock);
4062 addrconf_dad_completed(ifp, bump_id, send_na);
4064 goto out;
4067 ifp->dad_probes--;
4068 addrconf_mod_dad_work(ifp,
4069 NEIGH_VAR(ifp->idev->nd_parms, RETRANS_TIME));
4070 spin_unlock(&ifp->lock);
4071 write_unlock_bh(&idev->lock);
4073 /* send a neighbour solicitation for our addr */
4074 addrconf_addr_solict_mult(&ifp->addr, &mcaddr);
4075 ndisc_send_ns(ifp->idev->dev, &ifp->addr, &mcaddr, &in6addr_any,
4076 ifp->dad_nonce);
4077 out:
4078 in6_ifa_put(ifp);
4079 rtnl_unlock();
4082 /* ifp->idev must be at least read locked */
4083 static bool ipv6_lonely_lladdr(struct inet6_ifaddr *ifp)
4085 struct inet6_ifaddr *ifpiter;
4086 struct inet6_dev *idev = ifp->idev;
4088 list_for_each_entry_reverse(ifpiter, &idev->addr_list, if_list) {
4089 if (ifpiter->scope > IFA_LINK)
4090 break;
4091 if (ifp != ifpiter && ifpiter->scope == IFA_LINK &&
4092 (ifpiter->flags & (IFA_F_PERMANENT|IFA_F_TENTATIVE|
4093 IFA_F_OPTIMISTIC|IFA_F_DADFAILED)) ==
4094 IFA_F_PERMANENT)
4095 return false;
4097 return true;
4100 static void addrconf_dad_completed(struct inet6_ifaddr *ifp, bool bump_id,
4101 bool send_na)
4103 struct net_device *dev = ifp->idev->dev;
4104 struct in6_addr lladdr;
4105 bool send_rs, send_mld;
4107 addrconf_del_dad_work(ifp);
4110 * Configure the address for reception. Now it is valid.
4113 ipv6_ifa_notify(RTM_NEWADDR, ifp);
4115 /* If added prefix is link local and we are prepared to process
4116 router advertisements, start sending router solicitations.
4119 read_lock_bh(&ifp->idev->lock);
4120 send_mld = ifp->scope == IFA_LINK && ipv6_lonely_lladdr(ifp);
4121 send_rs = send_mld &&
4122 ipv6_accept_ra(ifp->idev) &&
4123 ifp->idev->cnf.rtr_solicits != 0 &&
4124 (dev->flags&IFF_LOOPBACK) == 0;
4125 read_unlock_bh(&ifp->idev->lock);
4127 /* While dad is in progress mld report's source address is in6_addrany.
4128 * Resend with proper ll now.
4130 if (send_mld)
4131 ipv6_mc_dad_complete(ifp->idev);
4133 /* send unsolicited NA if enabled */
4134 if (send_na &&
4135 (ifp->idev->cnf.ndisc_notify ||
4136 dev_net(dev)->ipv6.devconf_all->ndisc_notify)) {
4137 ndisc_send_na(dev, &in6addr_linklocal_allnodes, &ifp->addr,
4138 /*router=*/ !!ifp->idev->cnf.forwarding,
4139 /*solicited=*/ false, /*override=*/ true,
4140 /*inc_opt=*/ true);
4143 if (send_rs) {
4145 * If a host as already performed a random delay
4146 * [...] as part of DAD [...] there is no need
4147 * to delay again before sending the first RS
4149 if (ipv6_get_lladdr(dev, &lladdr, IFA_F_TENTATIVE))
4150 return;
4151 ndisc_send_rs(dev, &lladdr, &in6addr_linklocal_allrouters);
4153 write_lock_bh(&ifp->idev->lock);
4154 spin_lock(&ifp->lock);
4155 ifp->idev->rs_interval = rfc3315_s14_backoff_init(
4156 ifp->idev->cnf.rtr_solicit_interval);
4157 ifp->idev->rs_probes = 1;
4158 ifp->idev->if_flags |= IF_RS_SENT;
4159 addrconf_mod_rs_timer(ifp->idev, ifp->idev->rs_interval);
4160 spin_unlock(&ifp->lock);
4161 write_unlock_bh(&ifp->idev->lock);
4164 if (bump_id)
4165 rt_genid_bump_ipv6(dev_net(dev));
4167 /* Make sure that a new temporary address will be created
4168 * before this temporary address becomes deprecated.
4170 if (ifp->flags & IFA_F_TEMPORARY)
4171 addrconf_verify_rtnl();
4174 static void addrconf_dad_run(struct inet6_dev *idev, bool restart)
4176 struct inet6_ifaddr *ifp;
4178 read_lock_bh(&idev->lock);
4179 list_for_each_entry(ifp, &idev->addr_list, if_list) {
4180 spin_lock(&ifp->lock);
4181 if ((ifp->flags & IFA_F_TENTATIVE &&
4182 ifp->state == INET6_IFADDR_STATE_DAD) || restart) {
4183 if (restart)
4184 ifp->state = INET6_IFADDR_STATE_PREDAD;
4185 addrconf_dad_kick(ifp);
4187 spin_unlock(&ifp->lock);
4189 read_unlock_bh(&idev->lock);
4192 #ifdef CONFIG_PROC_FS
4193 struct if6_iter_state {
4194 struct seq_net_private p;
4195 int bucket;
4196 int offset;
4199 static struct inet6_ifaddr *if6_get_first(struct seq_file *seq, loff_t pos)
4201 struct if6_iter_state *state = seq->private;
4202 struct net *net = seq_file_net(seq);
4203 struct inet6_ifaddr *ifa = NULL;
4204 int p = 0;
4206 /* initial bucket if pos is 0 */
4207 if (pos == 0) {
4208 state->bucket = 0;
4209 state->offset = 0;
4212 for (; state->bucket < IN6_ADDR_HSIZE; ++state->bucket) {
4213 hlist_for_each_entry_rcu(ifa, &inet6_addr_lst[state->bucket],
4214 addr_lst) {
4215 if (!net_eq(dev_net(ifa->idev->dev), net))
4216 continue;
4217 /* sync with offset */
4218 if (p < state->offset) {
4219 p++;
4220 continue;
4222 return ifa;
4225 /* prepare for next bucket */
4226 state->offset = 0;
4227 p = 0;
4229 return NULL;
4232 static struct inet6_ifaddr *if6_get_next(struct seq_file *seq,
4233 struct inet6_ifaddr *ifa)
4235 struct if6_iter_state *state = seq->private;
4236 struct net *net = seq_file_net(seq);
4238 hlist_for_each_entry_continue_rcu(ifa, addr_lst) {
4239 if (!net_eq(dev_net(ifa->idev->dev), net))
4240 continue;
4241 state->offset++;
4242 return ifa;
4245 state->offset = 0;
4246 while (++state->bucket < IN6_ADDR_HSIZE) {
4247 hlist_for_each_entry_rcu(ifa,
4248 &inet6_addr_lst[state->bucket], addr_lst) {
4249 if (!net_eq(dev_net(ifa->idev->dev), net))
4250 continue;
4251 return ifa;
4255 return NULL;
4258 static void *if6_seq_start(struct seq_file *seq, loff_t *pos)
4259 __acquires(rcu)
4261 rcu_read_lock();
4262 return if6_get_first(seq, *pos);
4265 static void *if6_seq_next(struct seq_file *seq, void *v, loff_t *pos)
4267 struct inet6_ifaddr *ifa;
4269 ifa = if6_get_next(seq, v);
4270 ++*pos;
4271 return ifa;
4274 static void if6_seq_stop(struct seq_file *seq, void *v)
4275 __releases(rcu)
4277 rcu_read_unlock();
4280 static int if6_seq_show(struct seq_file *seq, void *v)
4282 struct inet6_ifaddr *ifp = (struct inet6_ifaddr *)v;
4283 seq_printf(seq, "%pi6 %02x %02x %02x %02x %8s\n",
4284 &ifp->addr,
4285 ifp->idev->dev->ifindex,
4286 ifp->prefix_len,
4287 ifp->scope,
4288 (u8) ifp->flags,
4289 ifp->idev->dev->name);
4290 return 0;
4293 static const struct seq_operations if6_seq_ops = {
4294 .start = if6_seq_start,
4295 .next = if6_seq_next,
4296 .show = if6_seq_show,
4297 .stop = if6_seq_stop,
4300 static int __net_init if6_proc_net_init(struct net *net)
4302 if (!proc_create_net("if_inet6", 0444, net->proc_net, &if6_seq_ops,
4303 sizeof(struct if6_iter_state)))
4304 return -ENOMEM;
4305 return 0;
4308 static void __net_exit if6_proc_net_exit(struct net *net)
4310 remove_proc_entry("if_inet6", net->proc_net);
4313 static struct pernet_operations if6_proc_net_ops = {
4314 .init = if6_proc_net_init,
4315 .exit = if6_proc_net_exit,
4318 int __init if6_proc_init(void)
4320 return register_pernet_subsys(&if6_proc_net_ops);
4323 void if6_proc_exit(void)
4325 unregister_pernet_subsys(&if6_proc_net_ops);
4327 #endif /* CONFIG_PROC_FS */
4329 #if IS_ENABLED(CONFIG_IPV6_MIP6)
4330 /* Check if address is a home address configured on any interface. */
4331 int ipv6_chk_home_addr(struct net *net, const struct in6_addr *addr)
4333 unsigned int hash = inet6_addr_hash(net, addr);
4334 struct inet6_ifaddr *ifp = NULL;
4335 int ret = 0;
4337 rcu_read_lock();
4338 hlist_for_each_entry_rcu(ifp, &inet6_addr_lst[hash], addr_lst) {
4339 if (!net_eq(dev_net(ifp->idev->dev), net))
4340 continue;
4341 if (ipv6_addr_equal(&ifp->addr, addr) &&
4342 (ifp->flags & IFA_F_HOMEADDRESS)) {
4343 ret = 1;
4344 break;
4347 rcu_read_unlock();
4348 return ret;
4350 #endif
4353 * Periodic address status verification
4356 static void addrconf_verify_rtnl(void)
4358 unsigned long now, next, next_sec, next_sched;
4359 struct inet6_ifaddr *ifp;
4360 int i;
4362 ASSERT_RTNL();
4364 rcu_read_lock_bh();
4365 now = jiffies;
4366 next = round_jiffies_up(now + ADDR_CHECK_FREQUENCY);
4368 cancel_delayed_work(&addr_chk_work);
4370 for (i = 0; i < IN6_ADDR_HSIZE; i++) {
4371 restart:
4372 hlist_for_each_entry_rcu_bh(ifp, &inet6_addr_lst[i], addr_lst) {
4373 unsigned long age;
4375 /* When setting preferred_lft to a value not zero or
4376 * infinity, while valid_lft is infinity
4377 * IFA_F_PERMANENT has a non-infinity life time.
4379 if ((ifp->flags & IFA_F_PERMANENT) &&
4380 (ifp->prefered_lft == INFINITY_LIFE_TIME))
4381 continue;
4383 spin_lock(&ifp->lock);
4384 /* We try to batch several events at once. */
4385 age = (now - ifp->tstamp + ADDRCONF_TIMER_FUZZ_MINUS) / HZ;
4387 if (ifp->valid_lft != INFINITY_LIFE_TIME &&
4388 age >= ifp->valid_lft) {
4389 spin_unlock(&ifp->lock);
4390 in6_ifa_hold(ifp);
4391 ipv6_del_addr(ifp);
4392 goto restart;
4393 } else if (ifp->prefered_lft == INFINITY_LIFE_TIME) {
4394 spin_unlock(&ifp->lock);
4395 continue;
4396 } else if (age >= ifp->prefered_lft) {
4397 /* jiffies - ifp->tstamp > age >= ifp->prefered_lft */
4398 int deprecate = 0;
4400 if (!(ifp->flags&IFA_F_DEPRECATED)) {
4401 deprecate = 1;
4402 ifp->flags |= IFA_F_DEPRECATED;
4405 if ((ifp->valid_lft != INFINITY_LIFE_TIME) &&
4406 (time_before(ifp->tstamp + ifp->valid_lft * HZ, next)))
4407 next = ifp->tstamp + ifp->valid_lft * HZ;
4409 spin_unlock(&ifp->lock);
4411 if (deprecate) {
4412 in6_ifa_hold(ifp);
4414 ipv6_ifa_notify(0, ifp);
4415 in6_ifa_put(ifp);
4416 goto restart;
4418 } else if ((ifp->flags&IFA_F_TEMPORARY) &&
4419 !(ifp->flags&IFA_F_TENTATIVE)) {
4420 unsigned long regen_advance = ifp->idev->cnf.regen_max_retry *
4421 ifp->idev->cnf.dad_transmits *
4422 NEIGH_VAR(ifp->idev->nd_parms, RETRANS_TIME) / HZ;
4424 if (age >= ifp->prefered_lft - regen_advance) {
4425 struct inet6_ifaddr *ifpub = ifp->ifpub;
4426 if (time_before(ifp->tstamp + ifp->prefered_lft * HZ, next))
4427 next = ifp->tstamp + ifp->prefered_lft * HZ;
4428 if (!ifp->regen_count && ifpub) {
4429 ifp->regen_count++;
4430 in6_ifa_hold(ifp);
4431 in6_ifa_hold(ifpub);
4432 spin_unlock(&ifp->lock);
4434 spin_lock(&ifpub->lock);
4435 ifpub->regen_count = 0;
4436 spin_unlock(&ifpub->lock);
4437 rcu_read_unlock_bh();
4438 ipv6_create_tempaddr(ifpub, ifp, true);
4439 in6_ifa_put(ifpub);
4440 in6_ifa_put(ifp);
4441 rcu_read_lock_bh();
4442 goto restart;
4444 } else if (time_before(ifp->tstamp + ifp->prefered_lft * HZ - regen_advance * HZ, next))
4445 next = ifp->tstamp + ifp->prefered_lft * HZ - regen_advance * HZ;
4446 spin_unlock(&ifp->lock);
4447 } else {
4448 /* ifp->prefered_lft <= ifp->valid_lft */
4449 if (time_before(ifp->tstamp + ifp->prefered_lft * HZ, next))
4450 next = ifp->tstamp + ifp->prefered_lft * HZ;
4451 spin_unlock(&ifp->lock);
4456 next_sec = round_jiffies_up(next);
4457 next_sched = next;
4459 /* If rounded timeout is accurate enough, accept it. */
4460 if (time_before(next_sec, next + ADDRCONF_TIMER_FUZZ))
4461 next_sched = next_sec;
4463 /* And minimum interval is ADDRCONF_TIMER_FUZZ_MAX. */
4464 if (time_before(next_sched, jiffies + ADDRCONF_TIMER_FUZZ_MAX))
4465 next_sched = jiffies + ADDRCONF_TIMER_FUZZ_MAX;
4467 pr_debug("now = %lu, schedule = %lu, rounded schedule = %lu => %lu\n",
4468 now, next, next_sec, next_sched);
4469 mod_delayed_work(addrconf_wq, &addr_chk_work, next_sched - now);
4470 rcu_read_unlock_bh();
4473 static void addrconf_verify_work(struct work_struct *w)
4475 rtnl_lock();
4476 addrconf_verify_rtnl();
4477 rtnl_unlock();
4480 static void addrconf_verify(void)
4482 mod_delayed_work(addrconf_wq, &addr_chk_work, 0);
4485 static struct in6_addr *extract_addr(struct nlattr *addr, struct nlattr *local,
4486 struct in6_addr **peer_pfx)
4488 struct in6_addr *pfx = NULL;
4490 *peer_pfx = NULL;
4492 if (addr)
4493 pfx = nla_data(addr);
4495 if (local) {
4496 if (pfx && nla_memcmp(local, pfx, sizeof(*pfx)))
4497 *peer_pfx = pfx;
4498 pfx = nla_data(local);
4501 return pfx;
4504 static const struct nla_policy ifa_ipv6_policy[IFA_MAX+1] = {
4505 [IFA_ADDRESS] = { .len = sizeof(struct in6_addr) },
4506 [IFA_LOCAL] = { .len = sizeof(struct in6_addr) },
4507 [IFA_CACHEINFO] = { .len = sizeof(struct ifa_cacheinfo) },
4508 [IFA_FLAGS] = { .len = sizeof(u32) },
4509 [IFA_RT_PRIORITY] = { .len = sizeof(u32) },
4512 static int
4513 inet6_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh,
4514 struct netlink_ext_ack *extack)
4516 struct net *net = sock_net(skb->sk);
4517 struct ifaddrmsg *ifm;
4518 struct nlattr *tb[IFA_MAX+1];
4519 struct in6_addr *pfx, *peer_pfx;
4520 u32 ifa_flags;
4521 int err;
4523 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, ifa_ipv6_policy,
4524 extack);
4525 if (err < 0)
4526 return err;
4528 ifm = nlmsg_data(nlh);
4529 pfx = extract_addr(tb[IFA_ADDRESS], tb[IFA_LOCAL], &peer_pfx);
4530 if (!pfx)
4531 return -EINVAL;
4533 ifa_flags = tb[IFA_FLAGS] ? nla_get_u32(tb[IFA_FLAGS]) : ifm->ifa_flags;
4535 /* We ignore other flags so far. */
4536 ifa_flags &= IFA_F_MANAGETEMPADDR;
4538 return inet6_addr_del(net, ifm->ifa_index, ifa_flags, pfx,
4539 ifm->ifa_prefixlen);
4542 static int modify_prefix_route(struct inet6_ifaddr *ifp,
4543 unsigned long expires, u32 flags,
4544 bool modify_peer)
4546 struct fib6_info *f6i;
4547 u32 prio;
4549 f6i = addrconf_get_prefix_route(modify_peer ? &ifp->peer_addr : &ifp->addr,
4550 ifp->prefix_len,
4551 ifp->idev->dev,
4552 0, RTF_GATEWAY | RTF_DEFAULT);
4553 if (!f6i)
4554 return -ENOENT;
4556 prio = ifp->rt_priority ? : IP6_RT_PRIO_ADDRCONF;
4557 if (f6i->fib6_metric != prio) {
4558 /* delete old one */
4559 ip6_del_rt(dev_net(ifp->idev->dev), f6i);
4561 /* add new one */
4562 addrconf_prefix_route(modify_peer ? &ifp->peer_addr : &ifp->addr,
4563 ifp->prefix_len,
4564 ifp->rt_priority, ifp->idev->dev,
4565 expires, flags, GFP_KERNEL);
4566 } else {
4567 if (!expires)
4568 fib6_clean_expires(f6i);
4569 else
4570 fib6_set_expires(f6i, expires);
4572 fib6_info_release(f6i);
4575 return 0;
4578 static int inet6_addr_modify(struct inet6_ifaddr *ifp, struct ifa6_config *cfg)
4580 u32 flags;
4581 clock_t expires;
4582 unsigned long timeout;
4583 bool was_managetempaddr;
4584 bool had_prefixroute;
4585 bool new_peer = false;
4587 ASSERT_RTNL();
4589 if (!cfg->valid_lft || cfg->preferred_lft > cfg->valid_lft)
4590 return -EINVAL;
4592 if (cfg->ifa_flags & IFA_F_MANAGETEMPADDR &&
4593 (ifp->flags & IFA_F_TEMPORARY || ifp->prefix_len != 64))
4594 return -EINVAL;
4596 if (!(ifp->flags & IFA_F_TENTATIVE) || ifp->flags & IFA_F_DADFAILED)
4597 cfg->ifa_flags &= ~IFA_F_OPTIMISTIC;
4599 timeout = addrconf_timeout_fixup(cfg->valid_lft, HZ);
4600 if (addrconf_finite_timeout(timeout)) {
4601 expires = jiffies_to_clock_t(timeout * HZ);
4602 cfg->valid_lft = timeout;
4603 flags = RTF_EXPIRES;
4604 } else {
4605 expires = 0;
4606 flags = 0;
4607 cfg->ifa_flags |= IFA_F_PERMANENT;
4610 timeout = addrconf_timeout_fixup(cfg->preferred_lft, HZ);
4611 if (addrconf_finite_timeout(timeout)) {
4612 if (timeout == 0)
4613 cfg->ifa_flags |= IFA_F_DEPRECATED;
4614 cfg->preferred_lft = timeout;
4617 if (cfg->peer_pfx &&
4618 memcmp(&ifp->peer_addr, cfg->peer_pfx, sizeof(struct in6_addr))) {
4619 if (!ipv6_addr_any(&ifp->peer_addr))
4620 cleanup_prefix_route(ifp, expires, true, true);
4621 new_peer = true;
4624 spin_lock_bh(&ifp->lock);
4625 was_managetempaddr = ifp->flags & IFA_F_MANAGETEMPADDR;
4626 had_prefixroute = ifp->flags & IFA_F_PERMANENT &&
4627 !(ifp->flags & IFA_F_NOPREFIXROUTE);
4628 ifp->flags &= ~(IFA_F_DEPRECATED | IFA_F_PERMANENT | IFA_F_NODAD |
4629 IFA_F_HOMEADDRESS | IFA_F_MANAGETEMPADDR |
4630 IFA_F_NOPREFIXROUTE);
4631 ifp->flags |= cfg->ifa_flags;
4632 ifp->tstamp = jiffies;
4633 ifp->valid_lft = cfg->valid_lft;
4634 ifp->prefered_lft = cfg->preferred_lft;
4636 if (cfg->rt_priority && cfg->rt_priority != ifp->rt_priority)
4637 ifp->rt_priority = cfg->rt_priority;
4639 if (new_peer)
4640 ifp->peer_addr = *cfg->peer_pfx;
4642 spin_unlock_bh(&ifp->lock);
4643 if (!(ifp->flags&IFA_F_TENTATIVE))
4644 ipv6_ifa_notify(0, ifp);
4646 if (!(cfg->ifa_flags & IFA_F_NOPREFIXROUTE)) {
4647 int rc = -ENOENT;
4649 if (had_prefixroute)
4650 rc = modify_prefix_route(ifp, expires, flags, false);
4652 /* prefix route could have been deleted; if so restore it */
4653 if (rc == -ENOENT) {
4654 addrconf_prefix_route(&ifp->addr, ifp->prefix_len,
4655 ifp->rt_priority, ifp->idev->dev,
4656 expires, flags, GFP_KERNEL);
4659 if (had_prefixroute && !ipv6_addr_any(&ifp->peer_addr))
4660 rc = modify_prefix_route(ifp, expires, flags, true);
4662 if (rc == -ENOENT && !ipv6_addr_any(&ifp->peer_addr)) {
4663 addrconf_prefix_route(&ifp->peer_addr, ifp->prefix_len,
4664 ifp->rt_priority, ifp->idev->dev,
4665 expires, flags, GFP_KERNEL);
4667 } else if (had_prefixroute) {
4668 enum cleanup_prefix_rt_t action;
4669 unsigned long rt_expires;
4671 write_lock_bh(&ifp->idev->lock);
4672 action = check_cleanup_prefix_route(ifp, &rt_expires);
4673 write_unlock_bh(&ifp->idev->lock);
4675 if (action != CLEANUP_PREFIX_RT_NOP) {
4676 cleanup_prefix_route(ifp, rt_expires,
4677 action == CLEANUP_PREFIX_RT_DEL, false);
4681 if (was_managetempaddr || ifp->flags & IFA_F_MANAGETEMPADDR) {
4682 if (was_managetempaddr &&
4683 !(ifp->flags & IFA_F_MANAGETEMPADDR)) {
4684 cfg->valid_lft = 0;
4685 cfg->preferred_lft = 0;
4687 manage_tempaddrs(ifp->idev, ifp, cfg->valid_lft,
4688 cfg->preferred_lft, !was_managetempaddr,
4689 jiffies);
4692 addrconf_verify_rtnl();
4694 return 0;
4697 static int
4698 inet6_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh,
4699 struct netlink_ext_ack *extack)
4701 struct net *net = sock_net(skb->sk);
4702 struct ifaddrmsg *ifm;
4703 struct nlattr *tb[IFA_MAX+1];
4704 struct in6_addr *peer_pfx;
4705 struct inet6_ifaddr *ifa;
4706 struct net_device *dev;
4707 struct inet6_dev *idev;
4708 struct ifa6_config cfg;
4709 int err;
4711 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, ifa_ipv6_policy,
4712 extack);
4713 if (err < 0)
4714 return err;
4716 memset(&cfg, 0, sizeof(cfg));
4718 ifm = nlmsg_data(nlh);
4719 cfg.pfx = extract_addr(tb[IFA_ADDRESS], tb[IFA_LOCAL], &peer_pfx);
4720 if (!cfg.pfx)
4721 return -EINVAL;
4723 cfg.peer_pfx = peer_pfx;
4724 cfg.plen = ifm->ifa_prefixlen;
4725 if (tb[IFA_RT_PRIORITY])
4726 cfg.rt_priority = nla_get_u32(tb[IFA_RT_PRIORITY]);
4728 cfg.valid_lft = INFINITY_LIFE_TIME;
4729 cfg.preferred_lft = INFINITY_LIFE_TIME;
4731 if (tb[IFA_CACHEINFO]) {
4732 struct ifa_cacheinfo *ci;
4734 ci = nla_data(tb[IFA_CACHEINFO]);
4735 cfg.valid_lft = ci->ifa_valid;
4736 cfg.preferred_lft = ci->ifa_prefered;
4739 dev = __dev_get_by_index(net, ifm->ifa_index);
4740 if (!dev)
4741 return -ENODEV;
4743 if (tb[IFA_FLAGS])
4744 cfg.ifa_flags = nla_get_u32(tb[IFA_FLAGS]);
4745 else
4746 cfg.ifa_flags = ifm->ifa_flags;
4748 /* We ignore other flags so far. */
4749 cfg.ifa_flags &= IFA_F_NODAD | IFA_F_HOMEADDRESS |
4750 IFA_F_MANAGETEMPADDR | IFA_F_NOPREFIXROUTE |
4751 IFA_F_MCAUTOJOIN | IFA_F_OPTIMISTIC;
4753 idev = ipv6_find_idev(dev);
4754 if (!idev)
4755 return -ENOBUFS;
4757 if (!ipv6_allow_optimistic_dad(net, idev))
4758 cfg.ifa_flags &= ~IFA_F_OPTIMISTIC;
4760 if (cfg.ifa_flags & IFA_F_NODAD &&
4761 cfg.ifa_flags & IFA_F_OPTIMISTIC) {
4762 NL_SET_ERR_MSG(extack, "IFA_F_NODAD and IFA_F_OPTIMISTIC are mutually exclusive");
4763 return -EINVAL;
4766 ifa = ipv6_get_ifaddr(net, cfg.pfx, dev, 1);
4767 if (!ifa) {
4769 * It would be best to check for !NLM_F_CREATE here but
4770 * userspace already relies on not having to provide this.
4772 return inet6_addr_add(net, ifm->ifa_index, &cfg, extack);
4775 if (nlh->nlmsg_flags & NLM_F_EXCL ||
4776 !(nlh->nlmsg_flags & NLM_F_REPLACE))
4777 err = -EEXIST;
4778 else
4779 err = inet6_addr_modify(ifa, &cfg);
4781 in6_ifa_put(ifa);
4783 return err;
4786 static void put_ifaddrmsg(struct nlmsghdr *nlh, u8 prefixlen, u32 flags,
4787 u8 scope, int ifindex)
4789 struct ifaddrmsg *ifm;
4791 ifm = nlmsg_data(nlh);
4792 ifm->ifa_family = AF_INET6;
4793 ifm->ifa_prefixlen = prefixlen;
4794 ifm->ifa_flags = flags;
4795 ifm->ifa_scope = scope;
4796 ifm->ifa_index = ifindex;
4799 static int put_cacheinfo(struct sk_buff *skb, unsigned long cstamp,
4800 unsigned long tstamp, u32 preferred, u32 valid)
4802 struct ifa_cacheinfo ci;
4804 ci.cstamp = cstamp_delta(cstamp);
4805 ci.tstamp = cstamp_delta(tstamp);
4806 ci.ifa_prefered = preferred;
4807 ci.ifa_valid = valid;
4809 return nla_put(skb, IFA_CACHEINFO, sizeof(ci), &ci);
4812 static inline int rt_scope(int ifa_scope)
4814 if (ifa_scope & IFA_HOST)
4815 return RT_SCOPE_HOST;
4816 else if (ifa_scope & IFA_LINK)
4817 return RT_SCOPE_LINK;
4818 else if (ifa_scope & IFA_SITE)
4819 return RT_SCOPE_SITE;
4820 else
4821 return RT_SCOPE_UNIVERSE;
4824 static inline int inet6_ifaddr_msgsize(void)
4826 return NLMSG_ALIGN(sizeof(struct ifaddrmsg))
4827 + nla_total_size(16) /* IFA_LOCAL */
4828 + nla_total_size(16) /* IFA_ADDRESS */
4829 + nla_total_size(sizeof(struct ifa_cacheinfo))
4830 + nla_total_size(4) /* IFA_FLAGS */
4831 + nla_total_size(4) /* IFA_RT_PRIORITY */;
4834 static int inet6_fill_ifaddr(struct sk_buff *skb, struct inet6_ifaddr *ifa,
4835 u32 portid, u32 seq, int event, unsigned int flags)
4837 struct nlmsghdr *nlh;
4838 u32 preferred, valid;
4840 nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct ifaddrmsg), flags);
4841 if (!nlh)
4842 return -EMSGSIZE;
4844 put_ifaddrmsg(nlh, ifa->prefix_len, ifa->flags, rt_scope(ifa->scope),
4845 ifa->idev->dev->ifindex);
4847 if (!((ifa->flags&IFA_F_PERMANENT) &&
4848 (ifa->prefered_lft == INFINITY_LIFE_TIME))) {
4849 preferred = ifa->prefered_lft;
4850 valid = ifa->valid_lft;
4851 if (preferred != INFINITY_LIFE_TIME) {
4852 long tval = (jiffies - ifa->tstamp)/HZ;
4853 if (preferred > tval)
4854 preferred -= tval;
4855 else
4856 preferred = 0;
4857 if (valid != INFINITY_LIFE_TIME) {
4858 if (valid > tval)
4859 valid -= tval;
4860 else
4861 valid = 0;
4864 } else {
4865 preferred = INFINITY_LIFE_TIME;
4866 valid = INFINITY_LIFE_TIME;
4869 if (!ipv6_addr_any(&ifa->peer_addr)) {
4870 if (nla_put_in6_addr(skb, IFA_LOCAL, &ifa->addr) < 0 ||
4871 nla_put_in6_addr(skb, IFA_ADDRESS, &ifa->peer_addr) < 0)
4872 goto error;
4873 } else
4874 if (nla_put_in6_addr(skb, IFA_ADDRESS, &ifa->addr) < 0)
4875 goto error;
4877 if (ifa->rt_priority &&
4878 nla_put_u32(skb, IFA_RT_PRIORITY, ifa->rt_priority))
4879 goto error;
4881 if (put_cacheinfo(skb, ifa->cstamp, ifa->tstamp, preferred, valid) < 0)
4882 goto error;
4884 if (nla_put_u32(skb, IFA_FLAGS, ifa->flags) < 0)
4885 goto error;
4887 nlmsg_end(skb, nlh);
4888 return 0;
4890 error:
4891 nlmsg_cancel(skb, nlh);
4892 return -EMSGSIZE;
4895 static int inet6_fill_ifmcaddr(struct sk_buff *skb, struct ifmcaddr6 *ifmca,
4896 u32 portid, u32 seq, int event, u16 flags)
4898 struct nlmsghdr *nlh;
4899 u8 scope = RT_SCOPE_UNIVERSE;
4900 int ifindex = ifmca->idev->dev->ifindex;
4902 if (ipv6_addr_scope(&ifmca->mca_addr) & IFA_SITE)
4903 scope = RT_SCOPE_SITE;
4905 nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct ifaddrmsg), flags);
4906 if (!nlh)
4907 return -EMSGSIZE;
4909 put_ifaddrmsg(nlh, 128, IFA_F_PERMANENT, scope, ifindex);
4910 if (nla_put_in6_addr(skb, IFA_MULTICAST, &ifmca->mca_addr) < 0 ||
4911 put_cacheinfo(skb, ifmca->mca_cstamp, ifmca->mca_tstamp,
4912 INFINITY_LIFE_TIME, INFINITY_LIFE_TIME) < 0) {
4913 nlmsg_cancel(skb, nlh);
4914 return -EMSGSIZE;
4917 nlmsg_end(skb, nlh);
4918 return 0;
4921 static int inet6_fill_ifacaddr(struct sk_buff *skb, struct ifacaddr6 *ifaca,
4922 u32 portid, u32 seq, int event, unsigned int flags)
4924 struct net_device *dev = fib6_info_nh_dev(ifaca->aca_rt);
4925 int ifindex = dev ? dev->ifindex : 1;
4926 struct nlmsghdr *nlh;
4927 u8 scope = RT_SCOPE_UNIVERSE;
4929 if (ipv6_addr_scope(&ifaca->aca_addr) & IFA_SITE)
4930 scope = RT_SCOPE_SITE;
4932 nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct ifaddrmsg), flags);
4933 if (!nlh)
4934 return -EMSGSIZE;
4936 put_ifaddrmsg(nlh, 128, IFA_F_PERMANENT, scope, ifindex);
4937 if (nla_put_in6_addr(skb, IFA_ANYCAST, &ifaca->aca_addr) < 0 ||
4938 put_cacheinfo(skb, ifaca->aca_cstamp, ifaca->aca_tstamp,
4939 INFINITY_LIFE_TIME, INFINITY_LIFE_TIME) < 0) {
4940 nlmsg_cancel(skb, nlh);
4941 return -EMSGSIZE;
4944 nlmsg_end(skb, nlh);
4945 return 0;
4948 enum addr_type_t {
4949 UNICAST_ADDR,
4950 MULTICAST_ADDR,
4951 ANYCAST_ADDR,
4954 /* called with rcu_read_lock() */
4955 static int in6_dump_addrs(struct inet6_dev *idev, struct sk_buff *skb,
4956 struct netlink_callback *cb, enum addr_type_t type,
4957 int s_ip_idx, int *p_ip_idx)
4959 struct ifmcaddr6 *ifmca;
4960 struct ifacaddr6 *ifaca;
4961 int err = 1;
4962 int ip_idx = *p_ip_idx;
4964 read_lock_bh(&idev->lock);
4965 switch (type) {
4966 case UNICAST_ADDR: {
4967 struct inet6_ifaddr *ifa;
4969 /* unicast address incl. temp addr */
4970 list_for_each_entry(ifa, &idev->addr_list, if_list) {
4971 if (ip_idx < s_ip_idx)
4972 goto next;
4973 err = inet6_fill_ifaddr(skb, ifa,
4974 NETLINK_CB(cb->skb).portid,
4975 cb->nlh->nlmsg_seq,
4976 RTM_NEWADDR,
4977 NLM_F_MULTI);
4978 if (err < 0)
4979 break;
4980 nl_dump_check_consistent(cb, nlmsg_hdr(skb));
4981 next:
4982 ip_idx++;
4984 break;
4986 case MULTICAST_ADDR:
4987 /* multicast address */
4988 for (ifmca = idev->mc_list; ifmca;
4989 ifmca = ifmca->next, ip_idx++) {
4990 if (ip_idx < s_ip_idx)
4991 continue;
4992 err = inet6_fill_ifmcaddr(skb, ifmca,
4993 NETLINK_CB(cb->skb).portid,
4994 cb->nlh->nlmsg_seq,
4995 RTM_GETMULTICAST,
4996 NLM_F_MULTI);
4997 if (err < 0)
4998 break;
5000 break;
5001 case ANYCAST_ADDR:
5002 /* anycast address */
5003 for (ifaca = idev->ac_list; ifaca;
5004 ifaca = ifaca->aca_next, ip_idx++) {
5005 if (ip_idx < s_ip_idx)
5006 continue;
5007 err = inet6_fill_ifacaddr(skb, ifaca,
5008 NETLINK_CB(cb->skb).portid,
5009 cb->nlh->nlmsg_seq,
5010 RTM_GETANYCAST,
5011 NLM_F_MULTI);
5012 if (err < 0)
5013 break;
5015 break;
5016 default:
5017 break;
5019 read_unlock_bh(&idev->lock);
5020 *p_ip_idx = ip_idx;
5021 return err;
5024 static int inet6_dump_addr(struct sk_buff *skb, struct netlink_callback *cb,
5025 enum addr_type_t type)
5027 struct net *net = sock_net(skb->sk);
5028 int h, s_h;
5029 int idx, ip_idx;
5030 int s_idx, s_ip_idx;
5031 struct net_device *dev;
5032 struct inet6_dev *idev;
5033 struct hlist_head *head;
5035 s_h = cb->args[0];
5036 s_idx = idx = cb->args[1];
5037 s_ip_idx = ip_idx = cb->args[2];
5039 rcu_read_lock();
5040 cb->seq = atomic_read(&net->ipv6.dev_addr_genid) ^ net->dev_base_seq;
5041 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
5042 idx = 0;
5043 head = &net->dev_index_head[h];
5044 hlist_for_each_entry_rcu(dev, head, index_hlist) {
5045 if (idx < s_idx)
5046 goto cont;
5047 if (h > s_h || idx > s_idx)
5048 s_ip_idx = 0;
5049 ip_idx = 0;
5050 idev = __in6_dev_get(dev);
5051 if (!idev)
5052 goto cont;
5054 if (in6_dump_addrs(idev, skb, cb, type,
5055 s_ip_idx, &ip_idx) < 0)
5056 goto done;
5057 cont:
5058 idx++;
5061 done:
5062 rcu_read_unlock();
5063 cb->args[0] = h;
5064 cb->args[1] = idx;
5065 cb->args[2] = ip_idx;
5067 return skb->len;
5070 static int inet6_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
5072 enum addr_type_t type = UNICAST_ADDR;
5074 return inet6_dump_addr(skb, cb, type);
5077 static int inet6_dump_ifmcaddr(struct sk_buff *skb, struct netlink_callback *cb)
5079 enum addr_type_t type = MULTICAST_ADDR;
5081 return inet6_dump_addr(skb, cb, type);
5085 static int inet6_dump_ifacaddr(struct sk_buff *skb, struct netlink_callback *cb)
5087 enum addr_type_t type = ANYCAST_ADDR;
5089 return inet6_dump_addr(skb, cb, type);
5092 static int inet6_rtm_getaddr(struct sk_buff *in_skb, struct nlmsghdr *nlh,
5093 struct netlink_ext_ack *extack)
5095 struct net *net = sock_net(in_skb->sk);
5096 struct ifaddrmsg *ifm;
5097 struct nlattr *tb[IFA_MAX+1];
5098 struct in6_addr *addr = NULL, *peer;
5099 struct net_device *dev = NULL;
5100 struct inet6_ifaddr *ifa;
5101 struct sk_buff *skb;
5102 int err;
5104 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, ifa_ipv6_policy,
5105 extack);
5106 if (err < 0)
5107 return err;
5109 addr = extract_addr(tb[IFA_ADDRESS], tb[IFA_LOCAL], &peer);
5110 if (!addr)
5111 return -EINVAL;
5113 ifm = nlmsg_data(nlh);
5114 if (ifm->ifa_index)
5115 dev = dev_get_by_index(net, ifm->ifa_index);
5117 ifa = ipv6_get_ifaddr(net, addr, dev, 1);
5118 if (!ifa) {
5119 err = -EADDRNOTAVAIL;
5120 goto errout;
5123 skb = nlmsg_new(inet6_ifaddr_msgsize(), GFP_KERNEL);
5124 if (!skb) {
5125 err = -ENOBUFS;
5126 goto errout_ifa;
5129 err = inet6_fill_ifaddr(skb, ifa, NETLINK_CB(in_skb).portid,
5130 nlh->nlmsg_seq, RTM_NEWADDR, 0);
5131 if (err < 0) {
5132 /* -EMSGSIZE implies BUG in inet6_ifaddr_msgsize() */
5133 WARN_ON(err == -EMSGSIZE);
5134 kfree_skb(skb);
5135 goto errout_ifa;
5137 err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
5138 errout_ifa:
5139 in6_ifa_put(ifa);
5140 errout:
5141 if (dev)
5142 dev_put(dev);
5143 return err;
5146 static void inet6_ifa_notify(int event, struct inet6_ifaddr *ifa)
5148 struct sk_buff *skb;
5149 struct net *net = dev_net(ifa->idev->dev);
5150 int err = -ENOBUFS;
5152 skb = nlmsg_new(inet6_ifaddr_msgsize(), GFP_ATOMIC);
5153 if (!skb)
5154 goto errout;
5156 err = inet6_fill_ifaddr(skb, ifa, 0, 0, event, 0);
5157 if (err < 0) {
5158 /* -EMSGSIZE implies BUG in inet6_ifaddr_msgsize() */
5159 WARN_ON(err == -EMSGSIZE);
5160 kfree_skb(skb);
5161 goto errout;
5163 rtnl_notify(skb, net, 0, RTNLGRP_IPV6_IFADDR, NULL, GFP_ATOMIC);
5164 return;
5165 errout:
5166 if (err < 0)
5167 rtnl_set_sk_err(net, RTNLGRP_IPV6_IFADDR, err);
5170 static inline void ipv6_store_devconf(struct ipv6_devconf *cnf,
5171 __s32 *array, int bytes)
5173 BUG_ON(bytes < (DEVCONF_MAX * 4));
5175 memset(array, 0, bytes);
5176 array[DEVCONF_FORWARDING] = cnf->forwarding;
5177 array[DEVCONF_HOPLIMIT] = cnf->hop_limit;
5178 array[DEVCONF_MTU6] = cnf->mtu6;
5179 array[DEVCONF_ACCEPT_RA] = cnf->accept_ra;
5180 array[DEVCONF_ACCEPT_REDIRECTS] = cnf->accept_redirects;
5181 array[DEVCONF_AUTOCONF] = cnf->autoconf;
5182 array[DEVCONF_DAD_TRANSMITS] = cnf->dad_transmits;
5183 array[DEVCONF_RTR_SOLICITS] = cnf->rtr_solicits;
5184 array[DEVCONF_RTR_SOLICIT_INTERVAL] =
5185 jiffies_to_msecs(cnf->rtr_solicit_interval);
5186 array[DEVCONF_RTR_SOLICIT_MAX_INTERVAL] =
5187 jiffies_to_msecs(cnf->rtr_solicit_max_interval);
5188 array[DEVCONF_RTR_SOLICIT_DELAY] =
5189 jiffies_to_msecs(cnf->rtr_solicit_delay);
5190 array[DEVCONF_FORCE_MLD_VERSION] = cnf->force_mld_version;
5191 array[DEVCONF_MLDV1_UNSOLICITED_REPORT_INTERVAL] =
5192 jiffies_to_msecs(cnf->mldv1_unsolicited_report_interval);
5193 array[DEVCONF_MLDV2_UNSOLICITED_REPORT_INTERVAL] =
5194 jiffies_to_msecs(cnf->mldv2_unsolicited_report_interval);
5195 array[DEVCONF_USE_TEMPADDR] = cnf->use_tempaddr;
5196 array[DEVCONF_TEMP_VALID_LFT] = cnf->temp_valid_lft;
5197 array[DEVCONF_TEMP_PREFERED_LFT] = cnf->temp_prefered_lft;
5198 array[DEVCONF_REGEN_MAX_RETRY] = cnf->regen_max_retry;
5199 array[DEVCONF_MAX_DESYNC_FACTOR] = cnf->max_desync_factor;
5200 array[DEVCONF_MAX_ADDRESSES] = cnf->max_addresses;
5201 array[DEVCONF_ACCEPT_RA_DEFRTR] = cnf->accept_ra_defrtr;
5202 array[DEVCONF_ACCEPT_RA_MIN_HOP_LIMIT] = cnf->accept_ra_min_hop_limit;
5203 array[DEVCONF_ACCEPT_RA_PINFO] = cnf->accept_ra_pinfo;
5204 #ifdef CONFIG_IPV6_ROUTER_PREF
5205 array[DEVCONF_ACCEPT_RA_RTR_PREF] = cnf->accept_ra_rtr_pref;
5206 array[DEVCONF_RTR_PROBE_INTERVAL] =
5207 jiffies_to_msecs(cnf->rtr_probe_interval);
5208 #ifdef CONFIG_IPV6_ROUTE_INFO
5209 array[DEVCONF_ACCEPT_RA_RT_INFO_MIN_PLEN] = cnf->accept_ra_rt_info_min_plen;
5210 array[DEVCONF_ACCEPT_RA_RT_INFO_MAX_PLEN] = cnf->accept_ra_rt_info_max_plen;
5211 #endif
5212 #endif
5213 array[DEVCONF_PROXY_NDP] = cnf->proxy_ndp;
5214 array[DEVCONF_ACCEPT_SOURCE_ROUTE] = cnf->accept_source_route;
5215 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
5216 array[DEVCONF_OPTIMISTIC_DAD] = cnf->optimistic_dad;
5217 array[DEVCONF_USE_OPTIMISTIC] = cnf->use_optimistic;
5218 #endif
5219 #ifdef CONFIG_IPV6_MROUTE
5220 array[DEVCONF_MC_FORWARDING] = cnf->mc_forwarding;
5221 #endif
5222 array[DEVCONF_DISABLE_IPV6] = cnf->disable_ipv6;
5223 array[DEVCONF_ACCEPT_DAD] = cnf->accept_dad;
5224 array[DEVCONF_FORCE_TLLAO] = cnf->force_tllao;
5225 array[DEVCONF_NDISC_NOTIFY] = cnf->ndisc_notify;
5226 array[DEVCONF_SUPPRESS_FRAG_NDISC] = cnf->suppress_frag_ndisc;
5227 array[DEVCONF_ACCEPT_RA_FROM_LOCAL] = cnf->accept_ra_from_local;
5228 array[DEVCONF_ACCEPT_RA_MTU] = cnf->accept_ra_mtu;
5229 array[DEVCONF_IGNORE_ROUTES_WITH_LINKDOWN] = cnf->ignore_routes_with_linkdown;
5230 /* we omit DEVCONF_STABLE_SECRET for now */
5231 array[DEVCONF_USE_OIF_ADDRS_ONLY] = cnf->use_oif_addrs_only;
5232 array[DEVCONF_DROP_UNICAST_IN_L2_MULTICAST] = cnf->drop_unicast_in_l2_multicast;
5233 array[DEVCONF_DROP_UNSOLICITED_NA] = cnf->drop_unsolicited_na;
5234 array[DEVCONF_KEEP_ADDR_ON_DOWN] = cnf->keep_addr_on_down;
5235 array[DEVCONF_SEG6_ENABLED] = cnf->seg6_enabled;
5236 #ifdef CONFIG_IPV6_SEG6_HMAC
5237 array[DEVCONF_SEG6_REQUIRE_HMAC] = cnf->seg6_require_hmac;
5238 #endif
5239 array[DEVCONF_ENHANCED_DAD] = cnf->enhanced_dad;
5240 array[DEVCONF_ADDR_GEN_MODE] = cnf->addr_gen_mode;
5241 array[DEVCONF_DISABLE_POLICY] = cnf->disable_policy;
5242 array[DEVCONF_NDISC_TCLASS] = cnf->ndisc_tclass;
5245 static inline size_t inet6_ifla6_size(void)
5247 return nla_total_size(4) /* IFLA_INET6_FLAGS */
5248 + nla_total_size(sizeof(struct ifla_cacheinfo))
5249 + nla_total_size(DEVCONF_MAX * 4) /* IFLA_INET6_CONF */
5250 + nla_total_size(IPSTATS_MIB_MAX * 8) /* IFLA_INET6_STATS */
5251 + nla_total_size(ICMP6_MIB_MAX * 8) /* IFLA_INET6_ICMP6STATS */
5252 + nla_total_size(sizeof(struct in6_addr)) /* IFLA_INET6_TOKEN */
5253 + nla_total_size(1) /* IFLA_INET6_ADDR_GEN_MODE */
5254 + 0;
5257 static inline size_t inet6_if_nlmsg_size(void)
5259 return NLMSG_ALIGN(sizeof(struct ifinfomsg))
5260 + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */
5261 + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */
5262 + nla_total_size(4) /* IFLA_MTU */
5263 + nla_total_size(4) /* IFLA_LINK */
5264 + nla_total_size(1) /* IFLA_OPERSTATE */
5265 + nla_total_size(inet6_ifla6_size()); /* IFLA_PROTINFO */
5268 static inline void __snmp6_fill_statsdev(u64 *stats, atomic_long_t *mib,
5269 int bytes)
5271 int i;
5272 int pad = bytes - sizeof(u64) * ICMP6_MIB_MAX;
5273 BUG_ON(pad < 0);
5275 /* Use put_unaligned() because stats may not be aligned for u64. */
5276 put_unaligned(ICMP6_MIB_MAX, &stats[0]);
5277 for (i = 1; i < ICMP6_MIB_MAX; i++)
5278 put_unaligned(atomic_long_read(&mib[i]), &stats[i]);
5280 memset(&stats[ICMP6_MIB_MAX], 0, pad);
5283 static inline void __snmp6_fill_stats64(u64 *stats, void __percpu *mib,
5284 int bytes, size_t syncpoff)
5286 int i, c;
5287 u64 buff[IPSTATS_MIB_MAX];
5288 int pad = bytes - sizeof(u64) * IPSTATS_MIB_MAX;
5290 BUG_ON(pad < 0);
5292 memset(buff, 0, sizeof(buff));
5293 buff[0] = IPSTATS_MIB_MAX;
5295 for_each_possible_cpu(c) {
5296 for (i = 1; i < IPSTATS_MIB_MAX; i++)
5297 buff[i] += snmp_get_cpu_field64(mib, c, i, syncpoff);
5300 memcpy(stats, buff, IPSTATS_MIB_MAX * sizeof(u64));
5301 memset(&stats[IPSTATS_MIB_MAX], 0, pad);
5304 static void snmp6_fill_stats(u64 *stats, struct inet6_dev *idev, int attrtype,
5305 int bytes)
5307 switch (attrtype) {
5308 case IFLA_INET6_STATS:
5309 __snmp6_fill_stats64(stats, idev->stats.ipv6, bytes,
5310 offsetof(struct ipstats_mib, syncp));
5311 break;
5312 case IFLA_INET6_ICMP6STATS:
5313 __snmp6_fill_statsdev(stats, idev->stats.icmpv6dev->mibs, bytes);
5314 break;
5318 static int inet6_fill_ifla6_attrs(struct sk_buff *skb, struct inet6_dev *idev,
5319 u32 ext_filter_mask)
5321 struct nlattr *nla;
5322 struct ifla_cacheinfo ci;
5324 if (nla_put_u32(skb, IFLA_INET6_FLAGS, idev->if_flags))
5325 goto nla_put_failure;
5326 ci.max_reasm_len = IPV6_MAXPLEN;
5327 ci.tstamp = cstamp_delta(idev->tstamp);
5328 ci.reachable_time = jiffies_to_msecs(idev->nd_parms->reachable_time);
5329 ci.retrans_time = jiffies_to_msecs(NEIGH_VAR(idev->nd_parms, RETRANS_TIME));
5330 if (nla_put(skb, IFLA_INET6_CACHEINFO, sizeof(ci), &ci))
5331 goto nla_put_failure;
5332 nla = nla_reserve(skb, IFLA_INET6_CONF, DEVCONF_MAX * sizeof(s32));
5333 if (!nla)
5334 goto nla_put_failure;
5335 ipv6_store_devconf(&idev->cnf, nla_data(nla), nla_len(nla));
5337 /* XXX - MC not implemented */
5339 if (ext_filter_mask & RTEXT_FILTER_SKIP_STATS)
5340 return 0;
5342 nla = nla_reserve(skb, IFLA_INET6_STATS, IPSTATS_MIB_MAX * sizeof(u64));
5343 if (!nla)
5344 goto nla_put_failure;
5345 snmp6_fill_stats(nla_data(nla), idev, IFLA_INET6_STATS, nla_len(nla));
5347 nla = nla_reserve(skb, IFLA_INET6_ICMP6STATS, ICMP6_MIB_MAX * sizeof(u64));
5348 if (!nla)
5349 goto nla_put_failure;
5350 snmp6_fill_stats(nla_data(nla), idev, IFLA_INET6_ICMP6STATS, nla_len(nla));
5352 nla = nla_reserve(skb, IFLA_INET6_TOKEN, sizeof(struct in6_addr));
5353 if (!nla)
5354 goto nla_put_failure;
5356 if (nla_put_u8(skb, IFLA_INET6_ADDR_GEN_MODE, idev->cnf.addr_gen_mode))
5357 goto nla_put_failure;
5359 read_lock_bh(&idev->lock);
5360 memcpy(nla_data(nla), idev->token.s6_addr, nla_len(nla));
5361 read_unlock_bh(&idev->lock);
5363 return 0;
5365 nla_put_failure:
5366 return -EMSGSIZE;
5369 static size_t inet6_get_link_af_size(const struct net_device *dev,
5370 u32 ext_filter_mask)
5372 if (!__in6_dev_get(dev))
5373 return 0;
5375 return inet6_ifla6_size();
5378 static int inet6_fill_link_af(struct sk_buff *skb, const struct net_device *dev,
5379 u32 ext_filter_mask)
5381 struct inet6_dev *idev = __in6_dev_get(dev);
5383 if (!idev)
5384 return -ENODATA;
5386 if (inet6_fill_ifla6_attrs(skb, idev, ext_filter_mask) < 0)
5387 return -EMSGSIZE;
5389 return 0;
5392 static int inet6_set_iftoken(struct inet6_dev *idev, struct in6_addr *token)
5394 struct inet6_ifaddr *ifp;
5395 struct net_device *dev = idev->dev;
5396 bool clear_token, update_rs = false;
5397 struct in6_addr ll_addr;
5399 ASSERT_RTNL();
5401 if (!token)
5402 return -EINVAL;
5403 if (dev->flags & (IFF_LOOPBACK | IFF_NOARP))
5404 return -EINVAL;
5405 if (!ipv6_accept_ra(idev))
5406 return -EINVAL;
5407 if (idev->cnf.rtr_solicits == 0)
5408 return -EINVAL;
5410 write_lock_bh(&idev->lock);
5412 BUILD_BUG_ON(sizeof(token->s6_addr) != 16);
5413 memcpy(idev->token.s6_addr + 8, token->s6_addr + 8, 8);
5415 write_unlock_bh(&idev->lock);
5417 clear_token = ipv6_addr_any(token);
5418 if (clear_token)
5419 goto update_lft;
5421 if (!idev->dead && (idev->if_flags & IF_READY) &&
5422 !ipv6_get_lladdr(dev, &ll_addr, IFA_F_TENTATIVE |
5423 IFA_F_OPTIMISTIC)) {
5424 /* If we're not ready, then normal ifup will take care
5425 * of this. Otherwise, we need to request our rs here.
5427 ndisc_send_rs(dev, &ll_addr, &in6addr_linklocal_allrouters);
5428 update_rs = true;
5431 update_lft:
5432 write_lock_bh(&idev->lock);
5434 if (update_rs) {
5435 idev->if_flags |= IF_RS_SENT;
5436 idev->rs_interval = rfc3315_s14_backoff_init(
5437 idev->cnf.rtr_solicit_interval);
5438 idev->rs_probes = 1;
5439 addrconf_mod_rs_timer(idev, idev->rs_interval);
5442 /* Well, that's kinda nasty ... */
5443 list_for_each_entry(ifp, &idev->addr_list, if_list) {
5444 spin_lock(&ifp->lock);
5445 if (ifp->tokenized) {
5446 ifp->valid_lft = 0;
5447 ifp->prefered_lft = 0;
5449 spin_unlock(&ifp->lock);
5452 write_unlock_bh(&idev->lock);
5453 inet6_ifinfo_notify(RTM_NEWLINK, idev);
5454 addrconf_verify_rtnl();
5455 return 0;
5458 static const struct nla_policy inet6_af_policy[IFLA_INET6_MAX + 1] = {
5459 [IFLA_INET6_ADDR_GEN_MODE] = { .type = NLA_U8 },
5460 [IFLA_INET6_TOKEN] = { .len = sizeof(struct in6_addr) },
5463 static int inet6_validate_link_af(const struct net_device *dev,
5464 const struct nlattr *nla)
5466 struct nlattr *tb[IFLA_INET6_MAX + 1];
5468 if (dev && !__in6_dev_get(dev))
5469 return -EAFNOSUPPORT;
5471 return nla_parse_nested(tb, IFLA_INET6_MAX, nla, inet6_af_policy,
5472 NULL);
5475 static int check_addr_gen_mode(int mode)
5477 if (mode != IN6_ADDR_GEN_MODE_EUI64 &&
5478 mode != IN6_ADDR_GEN_MODE_NONE &&
5479 mode != IN6_ADDR_GEN_MODE_STABLE_PRIVACY &&
5480 mode != IN6_ADDR_GEN_MODE_RANDOM)
5481 return -EINVAL;
5482 return 1;
5485 static int check_stable_privacy(struct inet6_dev *idev, struct net *net,
5486 int mode)
5488 if (mode == IN6_ADDR_GEN_MODE_STABLE_PRIVACY &&
5489 !idev->cnf.stable_secret.initialized &&
5490 !net->ipv6.devconf_dflt->stable_secret.initialized)
5491 return -EINVAL;
5492 return 1;
5495 static int inet6_set_link_af(struct net_device *dev, const struct nlattr *nla)
5497 int err = -EINVAL;
5498 struct inet6_dev *idev = __in6_dev_get(dev);
5499 struct nlattr *tb[IFLA_INET6_MAX + 1];
5501 if (!idev)
5502 return -EAFNOSUPPORT;
5504 if (nla_parse_nested(tb, IFLA_INET6_MAX, nla, NULL, NULL) < 0)
5505 BUG();
5507 if (tb[IFLA_INET6_TOKEN]) {
5508 err = inet6_set_iftoken(idev, nla_data(tb[IFLA_INET6_TOKEN]));
5509 if (err)
5510 return err;
5513 if (tb[IFLA_INET6_ADDR_GEN_MODE]) {
5514 u8 mode = nla_get_u8(tb[IFLA_INET6_ADDR_GEN_MODE]);
5516 if (check_addr_gen_mode(mode) < 0 ||
5517 check_stable_privacy(idev, dev_net(dev), mode) < 0)
5518 return -EINVAL;
5520 idev->cnf.addr_gen_mode = mode;
5521 err = 0;
5524 return err;
5527 static int inet6_fill_ifinfo(struct sk_buff *skb, struct inet6_dev *idev,
5528 u32 portid, u32 seq, int event, unsigned int flags)
5530 struct net_device *dev = idev->dev;
5531 struct ifinfomsg *hdr;
5532 struct nlmsghdr *nlh;
5533 void *protoinfo;
5535 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*hdr), flags);
5536 if (!nlh)
5537 return -EMSGSIZE;
5539 hdr = nlmsg_data(nlh);
5540 hdr->ifi_family = AF_INET6;
5541 hdr->__ifi_pad = 0;
5542 hdr->ifi_type = dev->type;
5543 hdr->ifi_index = dev->ifindex;
5544 hdr->ifi_flags = dev_get_flags(dev);
5545 hdr->ifi_change = 0;
5547 if (nla_put_string(skb, IFLA_IFNAME, dev->name) ||
5548 (dev->addr_len &&
5549 nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr)) ||
5550 nla_put_u32(skb, IFLA_MTU, dev->mtu) ||
5551 (dev->ifindex != dev_get_iflink(dev) &&
5552 nla_put_u32(skb, IFLA_LINK, dev_get_iflink(dev))) ||
5553 nla_put_u8(skb, IFLA_OPERSTATE,
5554 netif_running(dev) ? dev->operstate : IF_OPER_DOWN))
5555 goto nla_put_failure;
5556 protoinfo = nla_nest_start(skb, IFLA_PROTINFO);
5557 if (!protoinfo)
5558 goto nla_put_failure;
5560 if (inet6_fill_ifla6_attrs(skb, idev, 0) < 0)
5561 goto nla_put_failure;
5563 nla_nest_end(skb, protoinfo);
5564 nlmsg_end(skb, nlh);
5565 return 0;
5567 nla_put_failure:
5568 nlmsg_cancel(skb, nlh);
5569 return -EMSGSIZE;
5572 static int inet6_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
5574 struct net *net = sock_net(skb->sk);
5575 int h, s_h;
5576 int idx = 0, s_idx;
5577 struct net_device *dev;
5578 struct inet6_dev *idev;
5579 struct hlist_head *head;
5581 s_h = cb->args[0];
5582 s_idx = cb->args[1];
5584 rcu_read_lock();
5585 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
5586 idx = 0;
5587 head = &net->dev_index_head[h];
5588 hlist_for_each_entry_rcu(dev, head, index_hlist) {
5589 if (idx < s_idx)
5590 goto cont;
5591 idev = __in6_dev_get(dev);
5592 if (!idev)
5593 goto cont;
5594 if (inet6_fill_ifinfo(skb, idev,
5595 NETLINK_CB(cb->skb).portid,
5596 cb->nlh->nlmsg_seq,
5597 RTM_NEWLINK, NLM_F_MULTI) < 0)
5598 goto out;
5599 cont:
5600 idx++;
5603 out:
5604 rcu_read_unlock();
5605 cb->args[1] = idx;
5606 cb->args[0] = h;
5608 return skb->len;
5611 void inet6_ifinfo_notify(int event, struct inet6_dev *idev)
5613 struct sk_buff *skb;
5614 struct net *net = dev_net(idev->dev);
5615 int err = -ENOBUFS;
5617 skb = nlmsg_new(inet6_if_nlmsg_size(), GFP_ATOMIC);
5618 if (!skb)
5619 goto errout;
5621 err = inet6_fill_ifinfo(skb, idev, 0, 0, event, 0);
5622 if (err < 0) {
5623 /* -EMSGSIZE implies BUG in inet6_if_nlmsg_size() */
5624 WARN_ON(err == -EMSGSIZE);
5625 kfree_skb(skb);
5626 goto errout;
5628 rtnl_notify(skb, net, 0, RTNLGRP_IPV6_IFINFO, NULL, GFP_ATOMIC);
5629 return;
5630 errout:
5631 if (err < 0)
5632 rtnl_set_sk_err(net, RTNLGRP_IPV6_IFINFO, err);
5635 static inline size_t inet6_prefix_nlmsg_size(void)
5637 return NLMSG_ALIGN(sizeof(struct prefixmsg))
5638 + nla_total_size(sizeof(struct in6_addr))
5639 + nla_total_size(sizeof(struct prefix_cacheinfo));
5642 static int inet6_fill_prefix(struct sk_buff *skb, struct inet6_dev *idev,
5643 struct prefix_info *pinfo, u32 portid, u32 seq,
5644 int event, unsigned int flags)
5646 struct prefixmsg *pmsg;
5647 struct nlmsghdr *nlh;
5648 struct prefix_cacheinfo ci;
5650 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*pmsg), flags);
5651 if (!nlh)
5652 return -EMSGSIZE;
5654 pmsg = nlmsg_data(nlh);
5655 pmsg->prefix_family = AF_INET6;
5656 pmsg->prefix_pad1 = 0;
5657 pmsg->prefix_pad2 = 0;
5658 pmsg->prefix_ifindex = idev->dev->ifindex;
5659 pmsg->prefix_len = pinfo->prefix_len;
5660 pmsg->prefix_type = pinfo->type;
5661 pmsg->prefix_pad3 = 0;
5662 pmsg->prefix_flags = 0;
5663 if (pinfo->onlink)
5664 pmsg->prefix_flags |= IF_PREFIX_ONLINK;
5665 if (pinfo->autoconf)
5666 pmsg->prefix_flags |= IF_PREFIX_AUTOCONF;
5668 if (nla_put(skb, PREFIX_ADDRESS, sizeof(pinfo->prefix), &pinfo->prefix))
5669 goto nla_put_failure;
5670 ci.preferred_time = ntohl(pinfo->prefered);
5671 ci.valid_time = ntohl(pinfo->valid);
5672 if (nla_put(skb, PREFIX_CACHEINFO, sizeof(ci), &ci))
5673 goto nla_put_failure;
5674 nlmsg_end(skb, nlh);
5675 return 0;
5677 nla_put_failure:
5678 nlmsg_cancel(skb, nlh);
5679 return -EMSGSIZE;
5682 static void inet6_prefix_notify(int event, struct inet6_dev *idev,
5683 struct prefix_info *pinfo)
5685 struct sk_buff *skb;
5686 struct net *net = dev_net(idev->dev);
5687 int err = -ENOBUFS;
5689 skb = nlmsg_new(inet6_prefix_nlmsg_size(), GFP_ATOMIC);
5690 if (!skb)
5691 goto errout;
5693 err = inet6_fill_prefix(skb, idev, pinfo, 0, 0, event, 0);
5694 if (err < 0) {
5695 /* -EMSGSIZE implies BUG in inet6_prefix_nlmsg_size() */
5696 WARN_ON(err == -EMSGSIZE);
5697 kfree_skb(skb);
5698 goto errout;
5700 rtnl_notify(skb, net, 0, RTNLGRP_IPV6_PREFIX, NULL, GFP_ATOMIC);
5701 return;
5702 errout:
5703 if (err < 0)
5704 rtnl_set_sk_err(net, RTNLGRP_IPV6_PREFIX, err);
5707 static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
5709 struct net *net = dev_net(ifp->idev->dev);
5711 if (event)
5712 ASSERT_RTNL();
5714 inet6_ifa_notify(event ? : RTM_NEWADDR, ifp);
5716 switch (event) {
5717 case RTM_NEWADDR:
5719 * If the address was optimistic we inserted the route at the
5720 * start of our DAD process, so we don't need to do it again.
5721 * If the device was taken down in the middle of the DAD
5722 * cycle there is a race where we could get here without a
5723 * host route, so nothing to insert. That will be fixed when
5724 * the device is brought up.
5726 if (ifp->rt && !rcu_access_pointer(ifp->rt->fib6_node)) {
5727 ip6_ins_rt(net, ifp->rt);
5728 } else if (!ifp->rt && (ifp->idev->dev->flags & IFF_UP)) {
5729 pr_warn("BUG: Address %pI6c on device %s is missing its host route.\n",
5730 &ifp->addr, ifp->idev->dev->name);
5733 if (ifp->idev->cnf.forwarding)
5734 addrconf_join_anycast(ifp);
5735 if (!ipv6_addr_any(&ifp->peer_addr))
5736 addrconf_prefix_route(&ifp->peer_addr, 128,
5737 ifp->rt_priority, ifp->idev->dev,
5738 0, 0, GFP_ATOMIC);
5739 break;
5740 case RTM_DELADDR:
5741 if (ifp->idev->cnf.forwarding)
5742 addrconf_leave_anycast(ifp);
5743 addrconf_leave_solict(ifp->idev, &ifp->addr);
5744 if (!ipv6_addr_any(&ifp->peer_addr)) {
5745 struct fib6_info *rt;
5747 rt = addrconf_get_prefix_route(&ifp->peer_addr, 128,
5748 ifp->idev->dev, 0, 0);
5749 if (rt)
5750 ip6_del_rt(net, rt);
5752 if (ifp->rt) {
5753 ip6_del_rt(net, ifp->rt);
5754 ifp->rt = NULL;
5756 rt_genid_bump_ipv6(net);
5757 break;
5759 atomic_inc(&net->ipv6.dev_addr_genid);
5762 static void ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
5764 rcu_read_lock_bh();
5765 if (likely(ifp->idev->dead == 0))
5766 __ipv6_ifa_notify(event, ifp);
5767 rcu_read_unlock_bh();
5770 #ifdef CONFIG_SYSCTL
5772 static
5773 int addrconf_sysctl_forward(struct ctl_table *ctl, int write,
5774 void __user *buffer, size_t *lenp, loff_t *ppos)
5776 int *valp = ctl->data;
5777 int val = *valp;
5778 loff_t pos = *ppos;
5779 struct ctl_table lctl;
5780 int ret;
5783 * ctl->data points to idev->cnf.forwarding, we should
5784 * not modify it until we get the rtnl lock.
5786 lctl = *ctl;
5787 lctl.data = &val;
5789 ret = proc_dointvec(&lctl, write, buffer, lenp, ppos);
5791 if (write)
5792 ret = addrconf_fixup_forwarding(ctl, valp, val);
5793 if (ret)
5794 *ppos = pos;
5795 return ret;
5798 static
5799 int addrconf_sysctl_mtu(struct ctl_table *ctl, int write,
5800 void __user *buffer, size_t *lenp, loff_t *ppos)
5802 struct inet6_dev *idev = ctl->extra1;
5803 int min_mtu = IPV6_MIN_MTU;
5804 struct ctl_table lctl;
5806 lctl = *ctl;
5807 lctl.extra1 = &min_mtu;
5808 lctl.extra2 = idev ? &idev->dev->mtu : NULL;
5810 return proc_dointvec_minmax(&lctl, write, buffer, lenp, ppos);
5813 static void dev_disable_change(struct inet6_dev *idev)
5815 struct netdev_notifier_info info;
5817 if (!idev || !idev->dev)
5818 return;
5820 netdev_notifier_info_init(&info, idev->dev);
5821 if (idev->cnf.disable_ipv6)
5822 addrconf_notify(NULL, NETDEV_DOWN, &info);
5823 else
5824 addrconf_notify(NULL, NETDEV_UP, &info);
5827 static void addrconf_disable_change(struct net *net, __s32 newf)
5829 struct net_device *dev;
5830 struct inet6_dev *idev;
5832 for_each_netdev(net, dev) {
5833 idev = __in6_dev_get(dev);
5834 if (idev) {
5835 int changed = (!idev->cnf.disable_ipv6) ^ (!newf);
5836 idev->cnf.disable_ipv6 = newf;
5837 if (changed)
5838 dev_disable_change(idev);
5843 static int addrconf_disable_ipv6(struct ctl_table *table, int *p, int newf)
5845 struct net *net;
5846 int old;
5848 if (!rtnl_trylock())
5849 return restart_syscall();
5851 net = (struct net *)table->extra2;
5852 old = *p;
5853 *p = newf;
5855 if (p == &net->ipv6.devconf_dflt->disable_ipv6) {
5856 rtnl_unlock();
5857 return 0;
5860 if (p == &net->ipv6.devconf_all->disable_ipv6) {
5861 net->ipv6.devconf_dflt->disable_ipv6 = newf;
5862 addrconf_disable_change(net, newf);
5863 } else if ((!newf) ^ (!old))
5864 dev_disable_change((struct inet6_dev *)table->extra1);
5866 rtnl_unlock();
5867 return 0;
5870 static
5871 int addrconf_sysctl_disable(struct ctl_table *ctl, int write,
5872 void __user *buffer, size_t *lenp, loff_t *ppos)
5874 int *valp = ctl->data;
5875 int val = *valp;
5876 loff_t pos = *ppos;
5877 struct ctl_table lctl;
5878 int ret;
5881 * ctl->data points to idev->cnf.disable_ipv6, we should
5882 * not modify it until we get the rtnl lock.
5884 lctl = *ctl;
5885 lctl.data = &val;
5887 ret = proc_dointvec(&lctl, write, buffer, lenp, ppos);
5889 if (write)
5890 ret = addrconf_disable_ipv6(ctl, valp, val);
5891 if (ret)
5892 *ppos = pos;
5893 return ret;
5896 static
5897 int addrconf_sysctl_proxy_ndp(struct ctl_table *ctl, int write,
5898 void __user *buffer, size_t *lenp, loff_t *ppos)
5900 int *valp = ctl->data;
5901 int ret;
5902 int old, new;
5904 old = *valp;
5905 ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
5906 new = *valp;
5908 if (write && old != new) {
5909 struct net *net = ctl->extra2;
5911 if (!rtnl_trylock())
5912 return restart_syscall();
5914 if (valp == &net->ipv6.devconf_dflt->proxy_ndp)
5915 inet6_netconf_notify_devconf(net, RTM_NEWNETCONF,
5916 NETCONFA_PROXY_NEIGH,
5917 NETCONFA_IFINDEX_DEFAULT,
5918 net->ipv6.devconf_dflt);
5919 else if (valp == &net->ipv6.devconf_all->proxy_ndp)
5920 inet6_netconf_notify_devconf(net, RTM_NEWNETCONF,
5921 NETCONFA_PROXY_NEIGH,
5922 NETCONFA_IFINDEX_ALL,
5923 net->ipv6.devconf_all);
5924 else {
5925 struct inet6_dev *idev = ctl->extra1;
5927 inet6_netconf_notify_devconf(net, RTM_NEWNETCONF,
5928 NETCONFA_PROXY_NEIGH,
5929 idev->dev->ifindex,
5930 &idev->cnf);
5932 rtnl_unlock();
5935 return ret;
5938 static int addrconf_sysctl_addr_gen_mode(struct ctl_table *ctl, int write,
5939 void __user *buffer, size_t *lenp,
5940 loff_t *ppos)
5942 int ret = 0;
5943 u32 new_val;
5944 struct inet6_dev *idev = (struct inet6_dev *)ctl->extra1;
5945 struct net *net = (struct net *)ctl->extra2;
5946 struct ctl_table tmp = {
5947 .data = &new_val,
5948 .maxlen = sizeof(new_val),
5949 .mode = ctl->mode,
5952 if (!rtnl_trylock())
5953 return restart_syscall();
5955 new_val = *((u32 *)ctl->data);
5957 ret = proc_douintvec(&tmp, write, buffer, lenp, ppos);
5958 if (ret != 0)
5959 goto out;
5961 if (write) {
5962 if (check_addr_gen_mode(new_val) < 0) {
5963 ret = -EINVAL;
5964 goto out;
5967 if (idev) {
5968 if (check_stable_privacy(idev, net, new_val) < 0) {
5969 ret = -EINVAL;
5970 goto out;
5973 if (idev->cnf.addr_gen_mode != new_val) {
5974 idev->cnf.addr_gen_mode = new_val;
5975 addrconf_dev_config(idev->dev);
5977 } else if (&net->ipv6.devconf_all->addr_gen_mode == ctl->data) {
5978 struct net_device *dev;
5980 net->ipv6.devconf_dflt->addr_gen_mode = new_val;
5981 for_each_netdev(net, dev) {
5982 idev = __in6_dev_get(dev);
5983 if (idev &&
5984 idev->cnf.addr_gen_mode != new_val) {
5985 idev->cnf.addr_gen_mode = new_val;
5986 addrconf_dev_config(idev->dev);
5991 *((u32 *)ctl->data) = new_val;
5994 out:
5995 rtnl_unlock();
5997 return ret;
6000 static int addrconf_sysctl_stable_secret(struct ctl_table *ctl, int write,
6001 void __user *buffer, size_t *lenp,
6002 loff_t *ppos)
6004 int err;
6005 struct in6_addr addr;
6006 char str[IPV6_MAX_STRLEN];
6007 struct ctl_table lctl = *ctl;
6008 struct net *net = ctl->extra2;
6009 struct ipv6_stable_secret *secret = ctl->data;
6011 if (&net->ipv6.devconf_all->stable_secret == ctl->data)
6012 return -EIO;
6014 lctl.maxlen = IPV6_MAX_STRLEN;
6015 lctl.data = str;
6017 if (!rtnl_trylock())
6018 return restart_syscall();
6020 if (!write && !secret->initialized) {
6021 err = -EIO;
6022 goto out;
6025 err = snprintf(str, sizeof(str), "%pI6", &secret->secret);
6026 if (err >= sizeof(str)) {
6027 err = -EIO;
6028 goto out;
6031 err = proc_dostring(&lctl, write, buffer, lenp, ppos);
6032 if (err || !write)
6033 goto out;
6035 if (in6_pton(str, -1, addr.in6_u.u6_addr8, -1, NULL) != 1) {
6036 err = -EIO;
6037 goto out;
6040 secret->initialized = true;
6041 secret->secret = addr;
6043 if (&net->ipv6.devconf_dflt->stable_secret == ctl->data) {
6044 struct net_device *dev;
6046 for_each_netdev(net, dev) {
6047 struct inet6_dev *idev = __in6_dev_get(dev);
6049 if (idev) {
6050 idev->cnf.addr_gen_mode =
6051 IN6_ADDR_GEN_MODE_STABLE_PRIVACY;
6054 } else {
6055 struct inet6_dev *idev = ctl->extra1;
6057 idev->cnf.addr_gen_mode = IN6_ADDR_GEN_MODE_STABLE_PRIVACY;
6060 out:
6061 rtnl_unlock();
6063 return err;
6066 static
6067 int addrconf_sysctl_ignore_routes_with_linkdown(struct ctl_table *ctl,
6068 int write,
6069 void __user *buffer,
6070 size_t *lenp,
6071 loff_t *ppos)
6073 int *valp = ctl->data;
6074 int val = *valp;
6075 loff_t pos = *ppos;
6076 struct ctl_table lctl;
6077 int ret;
6079 /* ctl->data points to idev->cnf.ignore_routes_when_linkdown
6080 * we should not modify it until we get the rtnl lock.
6082 lctl = *ctl;
6083 lctl.data = &val;
6085 ret = proc_dointvec(&lctl, write, buffer, lenp, ppos);
6087 if (write)
6088 ret = addrconf_fixup_linkdown(ctl, valp, val);
6089 if (ret)
6090 *ppos = pos;
6091 return ret;
6094 static
6095 void addrconf_set_nopolicy(struct rt6_info *rt, int action)
6097 if (rt) {
6098 if (action)
6099 rt->dst.flags |= DST_NOPOLICY;
6100 else
6101 rt->dst.flags &= ~DST_NOPOLICY;
6105 static
6106 void addrconf_disable_policy_idev(struct inet6_dev *idev, int val)
6108 struct inet6_ifaddr *ifa;
6110 read_lock_bh(&idev->lock);
6111 list_for_each_entry(ifa, &idev->addr_list, if_list) {
6112 spin_lock(&ifa->lock);
6113 if (ifa->rt) {
6114 struct fib6_info *rt = ifa->rt;
6115 int cpu;
6117 rcu_read_lock();
6118 ifa->rt->dst_nopolicy = val ? true : false;
6119 if (rt->rt6i_pcpu) {
6120 for_each_possible_cpu(cpu) {
6121 struct rt6_info **rtp;
6123 rtp = per_cpu_ptr(rt->rt6i_pcpu, cpu);
6124 addrconf_set_nopolicy(*rtp, val);
6127 rcu_read_unlock();
6129 spin_unlock(&ifa->lock);
6131 read_unlock_bh(&idev->lock);
6134 static
6135 int addrconf_disable_policy(struct ctl_table *ctl, int *valp, int val)
6137 struct inet6_dev *idev;
6138 struct net *net;
6140 if (!rtnl_trylock())
6141 return restart_syscall();
6143 *valp = val;
6145 net = (struct net *)ctl->extra2;
6146 if (valp == &net->ipv6.devconf_dflt->disable_policy) {
6147 rtnl_unlock();
6148 return 0;
6151 if (valp == &net->ipv6.devconf_all->disable_policy) {
6152 struct net_device *dev;
6154 for_each_netdev(net, dev) {
6155 idev = __in6_dev_get(dev);
6156 if (idev)
6157 addrconf_disable_policy_idev(idev, val);
6159 } else {
6160 idev = (struct inet6_dev *)ctl->extra1;
6161 addrconf_disable_policy_idev(idev, val);
6164 rtnl_unlock();
6165 return 0;
6168 static
6169 int addrconf_sysctl_disable_policy(struct ctl_table *ctl, int write,
6170 void __user *buffer, size_t *lenp,
6171 loff_t *ppos)
6173 int *valp = ctl->data;
6174 int val = *valp;
6175 loff_t pos = *ppos;
6176 struct ctl_table lctl;
6177 int ret;
6179 lctl = *ctl;
6180 lctl.data = &val;
6181 ret = proc_dointvec(&lctl, write, buffer, lenp, ppos);
6183 if (write && (*valp != val))
6184 ret = addrconf_disable_policy(ctl, valp, val);
6186 if (ret)
6187 *ppos = pos;
6189 return ret;
6192 static int minus_one = -1;
6193 static const int zero = 0;
6194 static const int one = 1;
6195 static const int two_five_five = 255;
6197 static const struct ctl_table addrconf_sysctl[] = {
6199 .procname = "forwarding",
6200 .data = &ipv6_devconf.forwarding,
6201 .maxlen = sizeof(int),
6202 .mode = 0644,
6203 .proc_handler = addrconf_sysctl_forward,
6206 .procname = "hop_limit",
6207 .data = &ipv6_devconf.hop_limit,
6208 .maxlen = sizeof(int),
6209 .mode = 0644,
6210 .proc_handler = proc_dointvec_minmax,
6211 .extra1 = (void *)&one,
6212 .extra2 = (void *)&two_five_five,
6215 .procname = "mtu",
6216 .data = &ipv6_devconf.mtu6,
6217 .maxlen = sizeof(int),
6218 .mode = 0644,
6219 .proc_handler = addrconf_sysctl_mtu,
6222 .procname = "accept_ra",
6223 .data = &ipv6_devconf.accept_ra,
6224 .maxlen = sizeof(int),
6225 .mode = 0644,
6226 .proc_handler = proc_dointvec,
6229 .procname = "accept_redirects",
6230 .data = &ipv6_devconf.accept_redirects,
6231 .maxlen = sizeof(int),
6232 .mode = 0644,
6233 .proc_handler = proc_dointvec,
6236 .procname = "autoconf",
6237 .data = &ipv6_devconf.autoconf,
6238 .maxlen = sizeof(int),
6239 .mode = 0644,
6240 .proc_handler = proc_dointvec,
6243 .procname = "dad_transmits",
6244 .data = &ipv6_devconf.dad_transmits,
6245 .maxlen = sizeof(int),
6246 .mode = 0644,
6247 .proc_handler = proc_dointvec,
6250 .procname = "router_solicitations",
6251 .data = &ipv6_devconf.rtr_solicits,
6252 .maxlen = sizeof(int),
6253 .mode = 0644,
6254 .proc_handler = proc_dointvec_minmax,
6255 .extra1 = &minus_one,
6258 .procname = "router_solicitation_interval",
6259 .data = &ipv6_devconf.rtr_solicit_interval,
6260 .maxlen = sizeof(int),
6261 .mode = 0644,
6262 .proc_handler = proc_dointvec_jiffies,
6265 .procname = "router_solicitation_max_interval",
6266 .data = &ipv6_devconf.rtr_solicit_max_interval,
6267 .maxlen = sizeof(int),
6268 .mode = 0644,
6269 .proc_handler = proc_dointvec_jiffies,
6272 .procname = "router_solicitation_delay",
6273 .data = &ipv6_devconf.rtr_solicit_delay,
6274 .maxlen = sizeof(int),
6275 .mode = 0644,
6276 .proc_handler = proc_dointvec_jiffies,
6279 .procname = "force_mld_version",
6280 .data = &ipv6_devconf.force_mld_version,
6281 .maxlen = sizeof(int),
6282 .mode = 0644,
6283 .proc_handler = proc_dointvec,
6286 .procname = "mldv1_unsolicited_report_interval",
6287 .data =
6288 &ipv6_devconf.mldv1_unsolicited_report_interval,
6289 .maxlen = sizeof(int),
6290 .mode = 0644,
6291 .proc_handler = proc_dointvec_ms_jiffies,
6294 .procname = "mldv2_unsolicited_report_interval",
6295 .data =
6296 &ipv6_devconf.mldv2_unsolicited_report_interval,
6297 .maxlen = sizeof(int),
6298 .mode = 0644,
6299 .proc_handler = proc_dointvec_ms_jiffies,
6302 .procname = "use_tempaddr",
6303 .data = &ipv6_devconf.use_tempaddr,
6304 .maxlen = sizeof(int),
6305 .mode = 0644,
6306 .proc_handler = proc_dointvec,
6309 .procname = "temp_valid_lft",
6310 .data = &ipv6_devconf.temp_valid_lft,
6311 .maxlen = sizeof(int),
6312 .mode = 0644,
6313 .proc_handler = proc_dointvec,
6316 .procname = "temp_prefered_lft",
6317 .data = &ipv6_devconf.temp_prefered_lft,
6318 .maxlen = sizeof(int),
6319 .mode = 0644,
6320 .proc_handler = proc_dointvec,
6323 .procname = "regen_max_retry",
6324 .data = &ipv6_devconf.regen_max_retry,
6325 .maxlen = sizeof(int),
6326 .mode = 0644,
6327 .proc_handler = proc_dointvec,
6330 .procname = "max_desync_factor",
6331 .data = &ipv6_devconf.max_desync_factor,
6332 .maxlen = sizeof(int),
6333 .mode = 0644,
6334 .proc_handler = proc_dointvec,
6337 .procname = "max_addresses",
6338 .data = &ipv6_devconf.max_addresses,
6339 .maxlen = sizeof(int),
6340 .mode = 0644,
6341 .proc_handler = proc_dointvec,
6344 .procname = "accept_ra_defrtr",
6345 .data = &ipv6_devconf.accept_ra_defrtr,
6346 .maxlen = sizeof(int),
6347 .mode = 0644,
6348 .proc_handler = proc_dointvec,
6351 .procname = "accept_ra_min_hop_limit",
6352 .data = &ipv6_devconf.accept_ra_min_hop_limit,
6353 .maxlen = sizeof(int),
6354 .mode = 0644,
6355 .proc_handler = proc_dointvec,
6358 .procname = "accept_ra_pinfo",
6359 .data = &ipv6_devconf.accept_ra_pinfo,
6360 .maxlen = sizeof(int),
6361 .mode = 0644,
6362 .proc_handler = proc_dointvec,
6364 #ifdef CONFIG_IPV6_ROUTER_PREF
6366 .procname = "accept_ra_rtr_pref",
6367 .data = &ipv6_devconf.accept_ra_rtr_pref,
6368 .maxlen = sizeof(int),
6369 .mode = 0644,
6370 .proc_handler = proc_dointvec,
6373 .procname = "router_probe_interval",
6374 .data = &ipv6_devconf.rtr_probe_interval,
6375 .maxlen = sizeof(int),
6376 .mode = 0644,
6377 .proc_handler = proc_dointvec_jiffies,
6379 #ifdef CONFIG_IPV6_ROUTE_INFO
6381 .procname = "accept_ra_rt_info_min_plen",
6382 .data = &ipv6_devconf.accept_ra_rt_info_min_plen,
6383 .maxlen = sizeof(int),
6384 .mode = 0644,
6385 .proc_handler = proc_dointvec,
6388 .procname = "accept_ra_rt_info_max_plen",
6389 .data = &ipv6_devconf.accept_ra_rt_info_max_plen,
6390 .maxlen = sizeof(int),
6391 .mode = 0644,
6392 .proc_handler = proc_dointvec,
6394 #endif
6395 #endif
6397 .procname = "proxy_ndp",
6398 .data = &ipv6_devconf.proxy_ndp,
6399 .maxlen = sizeof(int),
6400 .mode = 0644,
6401 .proc_handler = addrconf_sysctl_proxy_ndp,
6404 .procname = "accept_source_route",
6405 .data = &ipv6_devconf.accept_source_route,
6406 .maxlen = sizeof(int),
6407 .mode = 0644,
6408 .proc_handler = proc_dointvec,
6410 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
6412 .procname = "optimistic_dad",
6413 .data = &ipv6_devconf.optimistic_dad,
6414 .maxlen = sizeof(int),
6415 .mode = 0644,
6416 .proc_handler = proc_dointvec,
6419 .procname = "use_optimistic",
6420 .data = &ipv6_devconf.use_optimistic,
6421 .maxlen = sizeof(int),
6422 .mode = 0644,
6423 .proc_handler = proc_dointvec,
6425 #endif
6426 #ifdef CONFIG_IPV6_MROUTE
6428 .procname = "mc_forwarding",
6429 .data = &ipv6_devconf.mc_forwarding,
6430 .maxlen = sizeof(int),
6431 .mode = 0444,
6432 .proc_handler = proc_dointvec,
6434 #endif
6436 .procname = "disable_ipv6",
6437 .data = &ipv6_devconf.disable_ipv6,
6438 .maxlen = sizeof(int),
6439 .mode = 0644,
6440 .proc_handler = addrconf_sysctl_disable,
6443 .procname = "accept_dad",
6444 .data = &ipv6_devconf.accept_dad,
6445 .maxlen = sizeof(int),
6446 .mode = 0644,
6447 .proc_handler = proc_dointvec,
6450 .procname = "force_tllao",
6451 .data = &ipv6_devconf.force_tllao,
6452 .maxlen = sizeof(int),
6453 .mode = 0644,
6454 .proc_handler = proc_dointvec
6457 .procname = "ndisc_notify",
6458 .data = &ipv6_devconf.ndisc_notify,
6459 .maxlen = sizeof(int),
6460 .mode = 0644,
6461 .proc_handler = proc_dointvec
6464 .procname = "suppress_frag_ndisc",
6465 .data = &ipv6_devconf.suppress_frag_ndisc,
6466 .maxlen = sizeof(int),
6467 .mode = 0644,
6468 .proc_handler = proc_dointvec
6471 .procname = "accept_ra_from_local",
6472 .data = &ipv6_devconf.accept_ra_from_local,
6473 .maxlen = sizeof(int),
6474 .mode = 0644,
6475 .proc_handler = proc_dointvec,
6478 .procname = "accept_ra_mtu",
6479 .data = &ipv6_devconf.accept_ra_mtu,
6480 .maxlen = sizeof(int),
6481 .mode = 0644,
6482 .proc_handler = proc_dointvec,
6485 .procname = "stable_secret",
6486 .data = &ipv6_devconf.stable_secret,
6487 .maxlen = IPV6_MAX_STRLEN,
6488 .mode = 0600,
6489 .proc_handler = addrconf_sysctl_stable_secret,
6492 .procname = "use_oif_addrs_only",
6493 .data = &ipv6_devconf.use_oif_addrs_only,
6494 .maxlen = sizeof(int),
6495 .mode = 0644,
6496 .proc_handler = proc_dointvec,
6499 .procname = "ignore_routes_with_linkdown",
6500 .data = &ipv6_devconf.ignore_routes_with_linkdown,
6501 .maxlen = sizeof(int),
6502 .mode = 0644,
6503 .proc_handler = addrconf_sysctl_ignore_routes_with_linkdown,
6506 .procname = "drop_unicast_in_l2_multicast",
6507 .data = &ipv6_devconf.drop_unicast_in_l2_multicast,
6508 .maxlen = sizeof(int),
6509 .mode = 0644,
6510 .proc_handler = proc_dointvec,
6513 .procname = "drop_unsolicited_na",
6514 .data = &ipv6_devconf.drop_unsolicited_na,
6515 .maxlen = sizeof(int),
6516 .mode = 0644,
6517 .proc_handler = proc_dointvec,
6520 .procname = "keep_addr_on_down",
6521 .data = &ipv6_devconf.keep_addr_on_down,
6522 .maxlen = sizeof(int),
6523 .mode = 0644,
6524 .proc_handler = proc_dointvec,
6528 .procname = "seg6_enabled",
6529 .data = &ipv6_devconf.seg6_enabled,
6530 .maxlen = sizeof(int),
6531 .mode = 0644,
6532 .proc_handler = proc_dointvec,
6534 #ifdef CONFIG_IPV6_SEG6_HMAC
6536 .procname = "seg6_require_hmac",
6537 .data = &ipv6_devconf.seg6_require_hmac,
6538 .maxlen = sizeof(int),
6539 .mode = 0644,
6540 .proc_handler = proc_dointvec,
6542 #endif
6544 .procname = "enhanced_dad",
6545 .data = &ipv6_devconf.enhanced_dad,
6546 .maxlen = sizeof(int),
6547 .mode = 0644,
6548 .proc_handler = proc_dointvec,
6551 .procname = "addr_gen_mode",
6552 .data = &ipv6_devconf.addr_gen_mode,
6553 .maxlen = sizeof(int),
6554 .mode = 0644,
6555 .proc_handler = addrconf_sysctl_addr_gen_mode,
6558 .procname = "disable_policy",
6559 .data = &ipv6_devconf.disable_policy,
6560 .maxlen = sizeof(int),
6561 .mode = 0644,
6562 .proc_handler = addrconf_sysctl_disable_policy,
6565 .procname = "ndisc_tclass",
6566 .data = &ipv6_devconf.ndisc_tclass,
6567 .maxlen = sizeof(int),
6568 .mode = 0644,
6569 .proc_handler = proc_dointvec_minmax,
6570 .extra1 = (void *)&zero,
6571 .extra2 = (void *)&two_five_five,
6574 /* sentinel */
6578 static int __addrconf_sysctl_register(struct net *net, char *dev_name,
6579 struct inet6_dev *idev, struct ipv6_devconf *p)
6581 int i, ifindex;
6582 struct ctl_table *table;
6583 char path[sizeof("net/ipv6/conf/") + IFNAMSIZ];
6585 table = kmemdup(addrconf_sysctl, sizeof(addrconf_sysctl), GFP_KERNEL);
6586 if (!table)
6587 goto out;
6589 for (i = 0; table[i].data; i++) {
6590 table[i].data += (char *)p - (char *)&ipv6_devconf;
6591 /* If one of these is already set, then it is not safe to
6592 * overwrite either of them: this makes proc_dointvec_minmax
6593 * usable.
6595 if (!table[i].extra1 && !table[i].extra2) {
6596 table[i].extra1 = idev; /* embedded; no ref */
6597 table[i].extra2 = net;
6601 snprintf(path, sizeof(path), "net/ipv6/conf/%s", dev_name);
6603 p->sysctl_header = register_net_sysctl(net, path, table);
6604 if (!p->sysctl_header)
6605 goto free;
6607 if (!strcmp(dev_name, "all"))
6608 ifindex = NETCONFA_IFINDEX_ALL;
6609 else if (!strcmp(dev_name, "default"))
6610 ifindex = NETCONFA_IFINDEX_DEFAULT;
6611 else
6612 ifindex = idev->dev->ifindex;
6613 inet6_netconf_notify_devconf(net, RTM_NEWNETCONF, NETCONFA_ALL,
6614 ifindex, p);
6615 return 0;
6617 free:
6618 kfree(table);
6619 out:
6620 return -ENOBUFS;
6623 static void __addrconf_sysctl_unregister(struct net *net,
6624 struct ipv6_devconf *p, int ifindex)
6626 struct ctl_table *table;
6628 if (!p->sysctl_header)
6629 return;
6631 table = p->sysctl_header->ctl_table_arg;
6632 unregister_net_sysctl_table(p->sysctl_header);
6633 p->sysctl_header = NULL;
6634 kfree(table);
6636 inet6_netconf_notify_devconf(net, RTM_DELNETCONF, 0, ifindex, NULL);
6639 static int addrconf_sysctl_register(struct inet6_dev *idev)
6641 int err;
6643 if (!sysctl_dev_name_is_allowed(idev->dev->name))
6644 return -EINVAL;
6646 err = neigh_sysctl_register(idev->dev, idev->nd_parms,
6647 &ndisc_ifinfo_sysctl_change);
6648 if (err)
6649 return err;
6650 err = __addrconf_sysctl_register(dev_net(idev->dev), idev->dev->name,
6651 idev, &idev->cnf);
6652 if (err)
6653 neigh_sysctl_unregister(idev->nd_parms);
6655 return err;
6658 static void addrconf_sysctl_unregister(struct inet6_dev *idev)
6660 __addrconf_sysctl_unregister(dev_net(idev->dev), &idev->cnf,
6661 idev->dev->ifindex);
6662 neigh_sysctl_unregister(idev->nd_parms);
6666 #endif
6668 static int __net_init addrconf_init_net(struct net *net)
6670 int err = -ENOMEM;
6671 struct ipv6_devconf *all, *dflt;
6673 all = kmemdup(&ipv6_devconf, sizeof(ipv6_devconf), GFP_KERNEL);
6674 if (!all)
6675 goto err_alloc_all;
6677 dflt = kmemdup(&ipv6_devconf_dflt, sizeof(ipv6_devconf_dflt), GFP_KERNEL);
6678 if (!dflt)
6679 goto err_alloc_dflt;
6681 /* these will be inherited by all namespaces */
6682 dflt->autoconf = ipv6_defaults.autoconf;
6683 dflt->disable_ipv6 = ipv6_defaults.disable_ipv6;
6685 dflt->stable_secret.initialized = false;
6686 all->stable_secret.initialized = false;
6688 net->ipv6.devconf_all = all;
6689 net->ipv6.devconf_dflt = dflt;
6691 #ifdef CONFIG_SYSCTL
6692 err = __addrconf_sysctl_register(net, "all", NULL, all);
6693 if (err < 0)
6694 goto err_reg_all;
6696 err = __addrconf_sysctl_register(net, "default", NULL, dflt);
6697 if (err < 0)
6698 goto err_reg_dflt;
6699 #endif
6700 return 0;
6702 #ifdef CONFIG_SYSCTL
6703 err_reg_dflt:
6704 __addrconf_sysctl_unregister(net, all, NETCONFA_IFINDEX_ALL);
6705 err_reg_all:
6706 kfree(dflt);
6707 #endif
6708 err_alloc_dflt:
6709 kfree(all);
6710 err_alloc_all:
6711 return err;
6714 static void __net_exit addrconf_exit_net(struct net *net)
6716 #ifdef CONFIG_SYSCTL
6717 __addrconf_sysctl_unregister(net, net->ipv6.devconf_dflt,
6718 NETCONFA_IFINDEX_DEFAULT);
6719 __addrconf_sysctl_unregister(net, net->ipv6.devconf_all,
6720 NETCONFA_IFINDEX_ALL);
6721 #endif
6722 kfree(net->ipv6.devconf_dflt);
6723 kfree(net->ipv6.devconf_all);
6726 static struct pernet_operations addrconf_ops = {
6727 .init = addrconf_init_net,
6728 .exit = addrconf_exit_net,
6731 static struct rtnl_af_ops inet6_ops __read_mostly = {
6732 .family = AF_INET6,
6733 .fill_link_af = inet6_fill_link_af,
6734 .get_link_af_size = inet6_get_link_af_size,
6735 .validate_link_af = inet6_validate_link_af,
6736 .set_link_af = inet6_set_link_af,
6740 * Init / cleanup code
6743 int __init addrconf_init(void)
6745 struct inet6_dev *idev;
6746 int i, err;
6748 err = ipv6_addr_label_init();
6749 if (err < 0) {
6750 pr_crit("%s: cannot initialize default policy table: %d\n",
6751 __func__, err);
6752 goto out;
6755 err = register_pernet_subsys(&addrconf_ops);
6756 if (err < 0)
6757 goto out_addrlabel;
6759 addrconf_wq = create_workqueue("ipv6_addrconf");
6760 if (!addrconf_wq) {
6761 err = -ENOMEM;
6762 goto out_nowq;
6765 /* The addrconf netdev notifier requires that loopback_dev
6766 * has it's ipv6 private information allocated and setup
6767 * before it can bring up and give link-local addresses
6768 * to other devices which are up.
6770 * Unfortunately, loopback_dev is not necessarily the first
6771 * entry in the global dev_base list of net devices. In fact,
6772 * it is likely to be the very last entry on that list.
6773 * So this causes the notifier registry below to try and
6774 * give link-local addresses to all devices besides loopback_dev
6775 * first, then loopback_dev, which cases all the non-loopback_dev
6776 * devices to fail to get a link-local address.
6778 * So, as a temporary fix, allocate the ipv6 structure for
6779 * loopback_dev first by hand.
6780 * Longer term, all of the dependencies ipv6 has upon the loopback
6781 * device and it being up should be removed.
6783 rtnl_lock();
6784 idev = ipv6_add_dev(init_net.loopback_dev);
6785 rtnl_unlock();
6786 if (IS_ERR(idev)) {
6787 err = PTR_ERR(idev);
6788 goto errlo;
6791 ip6_route_init_special_entries();
6793 for (i = 0; i < IN6_ADDR_HSIZE; i++)
6794 INIT_HLIST_HEAD(&inet6_addr_lst[i]);
6796 register_netdevice_notifier(&ipv6_dev_notf);
6798 addrconf_verify();
6800 rtnl_af_register(&inet6_ops);
6802 err = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_GETLINK,
6803 NULL, inet6_dump_ifinfo, 0);
6804 if (err < 0)
6805 goto errout;
6807 err = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_NEWADDR,
6808 inet6_rtm_newaddr, NULL, 0);
6809 if (err < 0)
6810 goto errout;
6811 err = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_DELADDR,
6812 inet6_rtm_deladdr, NULL, 0);
6813 if (err < 0)
6814 goto errout;
6815 err = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_GETADDR,
6816 inet6_rtm_getaddr, inet6_dump_ifaddr,
6817 RTNL_FLAG_DOIT_UNLOCKED);
6818 if (err < 0)
6819 goto errout;
6820 err = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_GETMULTICAST,
6821 NULL, inet6_dump_ifmcaddr, 0);
6822 if (err < 0)
6823 goto errout;
6824 err = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_GETANYCAST,
6825 NULL, inet6_dump_ifacaddr, 0);
6826 if (err < 0)
6827 goto errout;
6828 err = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_GETNETCONF,
6829 inet6_netconf_get_devconf,
6830 inet6_netconf_dump_devconf,
6831 RTNL_FLAG_DOIT_UNLOCKED);
6832 if (err < 0)
6833 goto errout;
6834 err = ipv6_addr_label_rtnl_register();
6835 if (err < 0)
6836 goto errout;
6838 return 0;
6839 errout:
6840 rtnl_unregister_all(PF_INET6);
6841 rtnl_af_unregister(&inet6_ops);
6842 unregister_netdevice_notifier(&ipv6_dev_notf);
6843 errlo:
6844 destroy_workqueue(addrconf_wq);
6845 out_nowq:
6846 unregister_pernet_subsys(&addrconf_ops);
6847 out_addrlabel:
6848 ipv6_addr_label_cleanup();
6849 out:
6850 return err;
6853 void addrconf_cleanup(void)
6855 struct net_device *dev;
6856 int i;
6858 unregister_netdevice_notifier(&ipv6_dev_notf);
6859 unregister_pernet_subsys(&addrconf_ops);
6860 ipv6_addr_label_cleanup();
6862 rtnl_af_unregister(&inet6_ops);
6864 rtnl_lock();
6866 /* clean dev list */
6867 for_each_netdev(&init_net, dev) {
6868 if (__in6_dev_get(dev) == NULL)
6869 continue;
6870 addrconf_ifdown(dev, 1);
6872 addrconf_ifdown(init_net.loopback_dev, 2);
6875 * Check hash table.
6877 spin_lock_bh(&addrconf_hash_lock);
6878 for (i = 0; i < IN6_ADDR_HSIZE; i++)
6879 WARN_ON(!hlist_empty(&inet6_addr_lst[i]));
6880 spin_unlock_bh(&addrconf_hash_lock);
6881 cancel_delayed_work(&addr_chk_work);
6882 rtnl_unlock();
6884 destroy_workqueue(addrconf_wq);