]> git.ipfire.org Git - thirdparty/linux.git/blob - net/ipv6/addrconf.c
treewide: setup_timer() -> timer_setup()
[thirdparty/linux.git] / net / ipv6 / addrconf.c
1 /*
2 * IPv6 Address [auto]configuration
3 * Linux INET6 implementation
4 *
5 * Authors:
6 * Pedro Roque <roque@di.fc.ul.pt>
7 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
13 */
14
15 /*
16 * Changes:
17 *
18 * Janos Farkas : delete timer on ifdown
19 * <chexum@bankinf.banki.hu>
20 * Andi Kleen : kill double kfree on module
21 * unload.
22 * Maciej W. Rozycki : FDDI support
23 * sekiya@USAGI : Don't send too many RS
24 * packets.
25 * yoshfuji@USAGI : Fixed interval between DAD
26 * packets.
27 * YOSHIFUJI Hideaki @USAGI : improved accuracy of
28 * address validation timer.
29 * YOSHIFUJI Hideaki @USAGI : Privacy Extensions (RFC3041)
30 * support.
31 * Yuji SEKIYA @USAGI : Don't assign a same IPv6
32 * address on a same interface.
33 * YOSHIFUJI Hideaki @USAGI : ARCnet support
34 * YOSHIFUJI Hideaki @USAGI : convert /proc/net/if_inet6 to
35 * seq_file.
36 * YOSHIFUJI Hideaki @USAGI : improved source address
37 * selection; consider scope,
38 * status etc.
39 */
40
41 #define pr_fmt(fmt) "IPv6: " fmt
42
43 #include <linux/errno.h>
44 #include <linux/types.h>
45 #include <linux/kernel.h>
46 #include <linux/sched/signal.h>
47 #include <linux/socket.h>
48 #include <linux/sockios.h>
49 #include <linux/net.h>
50 #include <linux/inet.h>
51 #include <linux/in6.h>
52 #include <linux/netdevice.h>
53 #include <linux/if_addr.h>
54 #include <linux/if_arp.h>
55 #include <linux/if_arcnet.h>
56 #include <linux/if_infiniband.h>
57 #include <linux/route.h>
58 #include <linux/inetdevice.h>
59 #include <linux/init.h>
60 #include <linux/slab.h>
61 #ifdef CONFIG_SYSCTL
62 #include <linux/sysctl.h>
63 #endif
64 #include <linux/capability.h>
65 #include <linux/delay.h>
66 #include <linux/notifier.h>
67 #include <linux/string.h>
68 #include <linux/hash.h>
69
70 #include <net/net_namespace.h>
71 #include <net/sock.h>
72 #include <net/snmp.h>
73
74 #include <net/6lowpan.h>
75 #include <net/firewire.h>
76 #include <net/ipv6.h>
77 #include <net/protocol.h>
78 #include <net/ndisc.h>
79 #include <net/ip6_route.h>
80 #include <net/addrconf.h>
81 #include <net/tcp.h>
82 #include <net/ip.h>
83 #include <net/netlink.h>
84 #include <net/pkt_sched.h>
85 #include <net/l3mdev.h>
86 #include <linux/if_tunnel.h>
87 #include <linux/rtnetlink.h>
88 #include <linux/netconf.h>
89 #include <linux/random.h>
90 #include <linux/uaccess.h>
91 #include <asm/unaligned.h>
92
93 #include <linux/proc_fs.h>
94 #include <linux/seq_file.h>
95 #include <linux/export.h>
96
97 /* Set to 3 to get tracing... */
98 #define ACONF_DEBUG 2
99
100 #if ACONF_DEBUG >= 3
101 #define ADBG(fmt, ...) printk(fmt, ##__VA_ARGS__)
102 #else
103 #define ADBG(fmt, ...) do { if (0) printk(fmt, ##__VA_ARGS__); } while (0)
104 #endif
105
106 #define INFINITY_LIFE_TIME 0xFFFFFFFF
107
108 #define IPV6_MAX_STRLEN \
109 sizeof("ffff:ffff:ffff:ffff:ffff:ffff:255.255.255.255")
110
111 static inline u32 cstamp_delta(unsigned long cstamp)
112 {
113 return (cstamp - INITIAL_JIFFIES) * 100UL / HZ;
114 }
115
116 static inline s32 rfc3315_s14_backoff_init(s32 irt)
117 {
118 /* multiply 'initial retransmission time' by 0.9 .. 1.1 */
119 u64 tmp = (900000 + prandom_u32() % 200001) * (u64)irt;
120 do_div(tmp, 1000000);
121 return (s32)tmp;
122 }
123
124 static inline s32 rfc3315_s14_backoff_update(s32 rt, s32 mrt)
125 {
126 /* multiply 'retransmission timeout' by 1.9 .. 2.1 */
127 u64 tmp = (1900000 + prandom_u32() % 200001) * (u64)rt;
128 do_div(tmp, 1000000);
129 if ((s32)tmp > mrt) {
130 /* multiply 'maximum retransmission time' by 0.9 .. 1.1 */
131 tmp = (900000 + prandom_u32() % 200001) * (u64)mrt;
132 do_div(tmp, 1000000);
133 }
134 return (s32)tmp;
135 }
136
137 #ifdef CONFIG_SYSCTL
138 static int addrconf_sysctl_register(struct inet6_dev *idev);
139 static void addrconf_sysctl_unregister(struct inet6_dev *idev);
140 #else
141 static inline int addrconf_sysctl_register(struct inet6_dev *idev)
142 {
143 return 0;
144 }
145
146 static inline void addrconf_sysctl_unregister(struct inet6_dev *idev)
147 {
148 }
149 #endif
150
151 static void ipv6_regen_rndid(struct inet6_dev *idev);
152 static void ipv6_try_regen_rndid(struct inet6_dev *idev, struct in6_addr *tmpaddr);
153
154 static int ipv6_generate_eui64(u8 *eui, struct net_device *dev);
155 static int ipv6_count_addresses(const struct inet6_dev *idev);
156 static int ipv6_generate_stable_address(struct in6_addr *addr,
157 u8 dad_count,
158 const struct inet6_dev *idev);
159
160 #define IN6_ADDR_HSIZE_SHIFT 8
161 #define IN6_ADDR_HSIZE (1 << IN6_ADDR_HSIZE_SHIFT)
162 /*
163 * Configured unicast address hash table
164 */
165 static struct hlist_head inet6_addr_lst[IN6_ADDR_HSIZE];
166 static DEFINE_SPINLOCK(addrconf_hash_lock);
167
168 static void addrconf_verify(void);
169 static void addrconf_verify_rtnl(void);
170 static void addrconf_verify_work(struct work_struct *);
171
172 static struct workqueue_struct *addrconf_wq;
173 static DECLARE_DELAYED_WORK(addr_chk_work, addrconf_verify_work);
174
175 static void addrconf_join_anycast(struct inet6_ifaddr *ifp);
176 static void addrconf_leave_anycast(struct inet6_ifaddr *ifp);
177
178 static void addrconf_type_change(struct net_device *dev,
179 unsigned long event);
180 static int addrconf_ifdown(struct net_device *dev, int how);
181
182 static struct rt6_info *addrconf_get_prefix_route(const struct in6_addr *pfx,
183 int plen,
184 const struct net_device *dev,
185 u32 flags, u32 noflags);
186
187 static void addrconf_dad_start(struct inet6_ifaddr *ifp);
188 static void addrconf_dad_work(struct work_struct *w);
189 static void addrconf_dad_completed(struct inet6_ifaddr *ifp, bool bump_id);
190 static void addrconf_dad_run(struct inet6_dev *idev);
191 static void addrconf_rs_timer(struct timer_list *t);
192 static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifa);
193 static void ipv6_ifa_notify(int event, struct inet6_ifaddr *ifa);
194
195 static void inet6_prefix_notify(int event, struct inet6_dev *idev,
196 struct prefix_info *pinfo);
197
198 static struct ipv6_devconf ipv6_devconf __read_mostly = {
199 .forwarding = 0,
200 .hop_limit = IPV6_DEFAULT_HOPLIMIT,
201 .mtu6 = IPV6_MIN_MTU,
202 .accept_ra = 1,
203 .accept_redirects = 1,
204 .autoconf = 1,
205 .force_mld_version = 0,
206 .mldv1_unsolicited_report_interval = 10 * HZ,
207 .mldv2_unsolicited_report_interval = HZ,
208 .dad_transmits = 1,
209 .rtr_solicits = MAX_RTR_SOLICITATIONS,
210 .rtr_solicit_interval = RTR_SOLICITATION_INTERVAL,
211 .rtr_solicit_max_interval = RTR_SOLICITATION_MAX_INTERVAL,
212 .rtr_solicit_delay = MAX_RTR_SOLICITATION_DELAY,
213 .use_tempaddr = 0,
214 .temp_valid_lft = TEMP_VALID_LIFETIME,
215 .temp_prefered_lft = TEMP_PREFERRED_LIFETIME,
216 .regen_max_retry = REGEN_MAX_RETRY,
217 .max_desync_factor = MAX_DESYNC_FACTOR,
218 .max_addresses = IPV6_MAX_ADDRESSES,
219 .accept_ra_defrtr = 1,
220 .accept_ra_from_local = 0,
221 .accept_ra_min_hop_limit= 1,
222 .accept_ra_pinfo = 1,
223 #ifdef CONFIG_IPV6_ROUTER_PREF
224 .accept_ra_rtr_pref = 1,
225 .rtr_probe_interval = 60 * HZ,
226 #ifdef CONFIG_IPV6_ROUTE_INFO
227 .accept_ra_rt_info_min_plen = 0,
228 .accept_ra_rt_info_max_plen = 0,
229 #endif
230 #endif
231 .proxy_ndp = 0,
232 .accept_source_route = 0, /* we do not accept RH0 by default. */
233 .disable_ipv6 = 0,
234 .accept_dad = 0,
235 .suppress_frag_ndisc = 1,
236 .accept_ra_mtu = 1,
237 .stable_secret = {
238 .initialized = false,
239 },
240 .use_oif_addrs_only = 0,
241 .ignore_routes_with_linkdown = 0,
242 .keep_addr_on_down = 0,
243 .seg6_enabled = 0,
244 #ifdef CONFIG_IPV6_SEG6_HMAC
245 .seg6_require_hmac = 0,
246 #endif
247 .enhanced_dad = 1,
248 .addr_gen_mode = IN6_ADDR_GEN_MODE_EUI64,
249 .disable_policy = 0,
250 };
251
252 static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = {
253 .forwarding = 0,
254 .hop_limit = IPV6_DEFAULT_HOPLIMIT,
255 .mtu6 = IPV6_MIN_MTU,
256 .accept_ra = 1,
257 .accept_redirects = 1,
258 .autoconf = 1,
259 .force_mld_version = 0,
260 .mldv1_unsolicited_report_interval = 10 * HZ,
261 .mldv2_unsolicited_report_interval = HZ,
262 .dad_transmits = 1,
263 .rtr_solicits = MAX_RTR_SOLICITATIONS,
264 .rtr_solicit_interval = RTR_SOLICITATION_INTERVAL,
265 .rtr_solicit_max_interval = RTR_SOLICITATION_MAX_INTERVAL,
266 .rtr_solicit_delay = MAX_RTR_SOLICITATION_DELAY,
267 .use_tempaddr = 0,
268 .temp_valid_lft = TEMP_VALID_LIFETIME,
269 .temp_prefered_lft = TEMP_PREFERRED_LIFETIME,
270 .regen_max_retry = REGEN_MAX_RETRY,
271 .max_desync_factor = MAX_DESYNC_FACTOR,
272 .max_addresses = IPV6_MAX_ADDRESSES,
273 .accept_ra_defrtr = 1,
274 .accept_ra_from_local = 0,
275 .accept_ra_min_hop_limit= 1,
276 .accept_ra_pinfo = 1,
277 #ifdef CONFIG_IPV6_ROUTER_PREF
278 .accept_ra_rtr_pref = 1,
279 .rtr_probe_interval = 60 * HZ,
280 #ifdef CONFIG_IPV6_ROUTE_INFO
281 .accept_ra_rt_info_min_plen = 0,
282 .accept_ra_rt_info_max_plen = 0,
283 #endif
284 #endif
285 .proxy_ndp = 0,
286 .accept_source_route = 0, /* we do not accept RH0 by default. */
287 .disable_ipv6 = 0,
288 .accept_dad = 1,
289 .suppress_frag_ndisc = 1,
290 .accept_ra_mtu = 1,
291 .stable_secret = {
292 .initialized = false,
293 },
294 .use_oif_addrs_only = 0,
295 .ignore_routes_with_linkdown = 0,
296 .keep_addr_on_down = 0,
297 .seg6_enabled = 0,
298 #ifdef CONFIG_IPV6_SEG6_HMAC
299 .seg6_require_hmac = 0,
300 #endif
301 .enhanced_dad = 1,
302 .addr_gen_mode = IN6_ADDR_GEN_MODE_EUI64,
303 .disable_policy = 0,
304 };
305
306 /* Check if link is ready: is it up and is a valid qdisc available */
307 static inline bool addrconf_link_ready(const struct net_device *dev)
308 {
309 return netif_oper_up(dev) && !qdisc_tx_is_noop(dev);
310 }
311
312 static void addrconf_del_rs_timer(struct inet6_dev *idev)
313 {
314 if (del_timer(&idev->rs_timer))
315 __in6_dev_put(idev);
316 }
317
318 static void addrconf_del_dad_work(struct inet6_ifaddr *ifp)
319 {
320 if (cancel_delayed_work(&ifp->dad_work))
321 __in6_ifa_put(ifp);
322 }
323
324 static void addrconf_mod_rs_timer(struct inet6_dev *idev,
325 unsigned long when)
326 {
327 if (!timer_pending(&idev->rs_timer))
328 in6_dev_hold(idev);
329 mod_timer(&idev->rs_timer, jiffies + when);
330 }
331
332 static void addrconf_mod_dad_work(struct inet6_ifaddr *ifp,
333 unsigned long delay)
334 {
335 in6_ifa_hold(ifp);
336 if (mod_delayed_work(addrconf_wq, &ifp->dad_work, delay))
337 in6_ifa_put(ifp);
338 }
339
340 static int snmp6_alloc_dev(struct inet6_dev *idev)
341 {
342 int i;
343
344 idev->stats.ipv6 = alloc_percpu(struct ipstats_mib);
345 if (!idev->stats.ipv6)
346 goto err_ip;
347
348 for_each_possible_cpu(i) {
349 struct ipstats_mib *addrconf_stats;
350 addrconf_stats = per_cpu_ptr(idev->stats.ipv6, i);
351 u64_stats_init(&addrconf_stats->syncp);
352 }
353
354
355 idev->stats.icmpv6dev = kzalloc(sizeof(struct icmpv6_mib_device),
356 GFP_KERNEL);
357 if (!idev->stats.icmpv6dev)
358 goto err_icmp;
359 idev->stats.icmpv6msgdev = kzalloc(sizeof(struct icmpv6msg_mib_device),
360 GFP_KERNEL);
361 if (!idev->stats.icmpv6msgdev)
362 goto err_icmpmsg;
363
364 return 0;
365
366 err_icmpmsg:
367 kfree(idev->stats.icmpv6dev);
368 err_icmp:
369 free_percpu(idev->stats.ipv6);
370 err_ip:
371 return -ENOMEM;
372 }
373
374 static struct inet6_dev *ipv6_add_dev(struct net_device *dev)
375 {
376 struct inet6_dev *ndev;
377 int err = -ENOMEM;
378
379 ASSERT_RTNL();
380
381 if (dev->mtu < IPV6_MIN_MTU)
382 return ERR_PTR(-EINVAL);
383
384 ndev = kzalloc(sizeof(struct inet6_dev), GFP_KERNEL);
385 if (!ndev)
386 return ERR_PTR(err);
387
388 rwlock_init(&ndev->lock);
389 ndev->dev = dev;
390 INIT_LIST_HEAD(&ndev->addr_list);
391 timer_setup(&ndev->rs_timer, addrconf_rs_timer, 0);
392 memcpy(&ndev->cnf, dev_net(dev)->ipv6.devconf_dflt, sizeof(ndev->cnf));
393
394 if (ndev->cnf.stable_secret.initialized)
395 ndev->cnf.addr_gen_mode = IN6_ADDR_GEN_MODE_STABLE_PRIVACY;
396 else
397 ndev->cnf.addr_gen_mode = ipv6_devconf_dflt.addr_gen_mode;
398
399 ndev->cnf.mtu6 = dev->mtu;
400 ndev->nd_parms = neigh_parms_alloc(dev, &nd_tbl);
401 if (!ndev->nd_parms) {
402 kfree(ndev);
403 return ERR_PTR(err);
404 }
405 if (ndev->cnf.forwarding)
406 dev_disable_lro(dev);
407 /* We refer to the device */
408 dev_hold(dev);
409
410 if (snmp6_alloc_dev(ndev) < 0) {
411 ADBG(KERN_WARNING
412 "%s: cannot allocate memory for statistics; dev=%s.\n",
413 __func__, dev->name);
414 neigh_parms_release(&nd_tbl, ndev->nd_parms);
415 dev_put(dev);
416 kfree(ndev);
417 return ERR_PTR(err);
418 }
419
420 if (snmp6_register_dev(ndev) < 0) {
421 ADBG(KERN_WARNING
422 "%s: cannot create /proc/net/dev_snmp6/%s\n",
423 __func__, dev->name);
424 goto err_release;
425 }
426
427 /* One reference from device. */
428 refcount_set(&ndev->refcnt, 1);
429
430 if (dev->flags & (IFF_NOARP | IFF_LOOPBACK))
431 ndev->cnf.accept_dad = -1;
432
433 #if IS_ENABLED(CONFIG_IPV6_SIT)
434 if (dev->type == ARPHRD_SIT && (dev->priv_flags & IFF_ISATAP)) {
435 pr_info("%s: Disabled Multicast RS\n", dev->name);
436 ndev->cnf.rtr_solicits = 0;
437 }
438 #endif
439
440 INIT_LIST_HEAD(&ndev->tempaddr_list);
441 ndev->desync_factor = U32_MAX;
442 if ((dev->flags&IFF_LOOPBACK) ||
443 dev->type == ARPHRD_TUNNEL ||
444 dev->type == ARPHRD_TUNNEL6 ||
445 dev->type == ARPHRD_SIT ||
446 dev->type == ARPHRD_NONE) {
447 ndev->cnf.use_tempaddr = -1;
448 } else
449 ipv6_regen_rndid(ndev);
450
451 ndev->token = in6addr_any;
452
453 if (netif_running(dev) && addrconf_link_ready(dev))
454 ndev->if_flags |= IF_READY;
455
456 ipv6_mc_init_dev(ndev);
457 ndev->tstamp = jiffies;
458 err = addrconf_sysctl_register(ndev);
459 if (err) {
460 ipv6_mc_destroy_dev(ndev);
461 snmp6_unregister_dev(ndev);
462 goto err_release;
463 }
464 /* protected by rtnl_lock */
465 rcu_assign_pointer(dev->ip6_ptr, ndev);
466
467 /* Join interface-local all-node multicast group */
468 ipv6_dev_mc_inc(dev, &in6addr_interfacelocal_allnodes);
469
470 /* Join all-node multicast group */
471 ipv6_dev_mc_inc(dev, &in6addr_linklocal_allnodes);
472
473 /* Join all-router multicast group if forwarding is set */
474 if (ndev->cnf.forwarding && (dev->flags & IFF_MULTICAST))
475 ipv6_dev_mc_inc(dev, &in6addr_linklocal_allrouters);
476
477 return ndev;
478
479 err_release:
480 neigh_parms_release(&nd_tbl, ndev->nd_parms);
481 ndev->dead = 1;
482 in6_dev_finish_destroy(ndev);
483 return ERR_PTR(err);
484 }
485
486 static struct inet6_dev *ipv6_find_idev(struct net_device *dev)
487 {
488 struct inet6_dev *idev;
489
490 ASSERT_RTNL();
491
492 idev = __in6_dev_get(dev);
493 if (!idev) {
494 idev = ipv6_add_dev(dev);
495 if (IS_ERR(idev))
496 return NULL;
497 }
498
499 if (dev->flags&IFF_UP)
500 ipv6_mc_up(idev);
501 return idev;
502 }
503
504 static int inet6_netconf_msgsize_devconf(int type)
505 {
506 int size = NLMSG_ALIGN(sizeof(struct netconfmsg))
507 + nla_total_size(4); /* NETCONFA_IFINDEX */
508 bool all = false;
509
510 if (type == NETCONFA_ALL)
511 all = true;
512
513 if (all || type == NETCONFA_FORWARDING)
514 size += nla_total_size(4);
515 #ifdef CONFIG_IPV6_MROUTE
516 if (all || type == NETCONFA_MC_FORWARDING)
517 size += nla_total_size(4);
518 #endif
519 if (all || type == NETCONFA_PROXY_NEIGH)
520 size += nla_total_size(4);
521
522 if (all || type == NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN)
523 size += nla_total_size(4);
524
525 return size;
526 }
527
528 static int inet6_netconf_fill_devconf(struct sk_buff *skb, int ifindex,
529 struct ipv6_devconf *devconf, u32 portid,
530 u32 seq, int event, unsigned int flags,
531 int type)
532 {
533 struct nlmsghdr *nlh;
534 struct netconfmsg *ncm;
535 bool all = false;
536
537 nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct netconfmsg),
538 flags);
539 if (!nlh)
540 return -EMSGSIZE;
541
542 if (type == NETCONFA_ALL)
543 all = true;
544
545 ncm = nlmsg_data(nlh);
546 ncm->ncm_family = AF_INET6;
547
548 if (nla_put_s32(skb, NETCONFA_IFINDEX, ifindex) < 0)
549 goto nla_put_failure;
550
551 if (!devconf)
552 goto out;
553
554 if ((all || type == NETCONFA_FORWARDING) &&
555 nla_put_s32(skb, NETCONFA_FORWARDING, devconf->forwarding) < 0)
556 goto nla_put_failure;
557 #ifdef CONFIG_IPV6_MROUTE
558 if ((all || type == NETCONFA_MC_FORWARDING) &&
559 nla_put_s32(skb, NETCONFA_MC_FORWARDING,
560 devconf->mc_forwarding) < 0)
561 goto nla_put_failure;
562 #endif
563 if ((all || type == NETCONFA_PROXY_NEIGH) &&
564 nla_put_s32(skb, NETCONFA_PROXY_NEIGH, devconf->proxy_ndp) < 0)
565 goto nla_put_failure;
566
567 if ((all || type == NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN) &&
568 nla_put_s32(skb, NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN,
569 devconf->ignore_routes_with_linkdown) < 0)
570 goto nla_put_failure;
571
572 out:
573 nlmsg_end(skb, nlh);
574 return 0;
575
576 nla_put_failure:
577 nlmsg_cancel(skb, nlh);
578 return -EMSGSIZE;
579 }
580
581 void inet6_netconf_notify_devconf(struct net *net, int event, int type,
582 int ifindex, struct ipv6_devconf *devconf)
583 {
584 struct sk_buff *skb;
585 int err = -ENOBUFS;
586
587 skb = nlmsg_new(inet6_netconf_msgsize_devconf(type), GFP_KERNEL);
588 if (!skb)
589 goto errout;
590
591 err = inet6_netconf_fill_devconf(skb, ifindex, devconf, 0, 0,
592 event, 0, type);
593 if (err < 0) {
594 /* -EMSGSIZE implies BUG in inet6_netconf_msgsize_devconf() */
595 WARN_ON(err == -EMSGSIZE);
596 kfree_skb(skb);
597 goto errout;
598 }
599 rtnl_notify(skb, net, 0, RTNLGRP_IPV6_NETCONF, NULL, GFP_KERNEL);
600 return;
601 errout:
602 rtnl_set_sk_err(net, RTNLGRP_IPV6_NETCONF, err);
603 }
604
605 static const struct nla_policy devconf_ipv6_policy[NETCONFA_MAX+1] = {
606 [NETCONFA_IFINDEX] = { .len = sizeof(int) },
607 [NETCONFA_FORWARDING] = { .len = sizeof(int) },
608 [NETCONFA_PROXY_NEIGH] = { .len = sizeof(int) },
609 [NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN] = { .len = sizeof(int) },
610 };
611
612 static int inet6_netconf_get_devconf(struct sk_buff *in_skb,
613 struct nlmsghdr *nlh,
614 struct netlink_ext_ack *extack)
615 {
616 struct net *net = sock_net(in_skb->sk);
617 struct nlattr *tb[NETCONFA_MAX+1];
618 struct inet6_dev *in6_dev = NULL;
619 struct net_device *dev = NULL;
620 struct netconfmsg *ncm;
621 struct sk_buff *skb;
622 struct ipv6_devconf *devconf;
623 int ifindex;
624 int err;
625
626 err = nlmsg_parse(nlh, sizeof(*ncm), tb, NETCONFA_MAX,
627 devconf_ipv6_policy, extack);
628 if (err < 0)
629 return err;
630
631 if (!tb[NETCONFA_IFINDEX])
632 return -EINVAL;
633
634 err = -EINVAL;
635 ifindex = nla_get_s32(tb[NETCONFA_IFINDEX]);
636 switch (ifindex) {
637 case NETCONFA_IFINDEX_ALL:
638 devconf = net->ipv6.devconf_all;
639 break;
640 case NETCONFA_IFINDEX_DEFAULT:
641 devconf = net->ipv6.devconf_dflt;
642 break;
643 default:
644 dev = dev_get_by_index(net, ifindex);
645 if (!dev)
646 return -EINVAL;
647 in6_dev = in6_dev_get(dev);
648 if (!in6_dev)
649 goto errout;
650 devconf = &in6_dev->cnf;
651 break;
652 }
653
654 err = -ENOBUFS;
655 skb = nlmsg_new(inet6_netconf_msgsize_devconf(NETCONFA_ALL), GFP_KERNEL);
656 if (!skb)
657 goto errout;
658
659 err = inet6_netconf_fill_devconf(skb, ifindex, devconf,
660 NETLINK_CB(in_skb).portid,
661 nlh->nlmsg_seq, RTM_NEWNETCONF, 0,
662 NETCONFA_ALL);
663 if (err < 0) {
664 /* -EMSGSIZE implies BUG in inet6_netconf_msgsize_devconf() */
665 WARN_ON(err == -EMSGSIZE);
666 kfree_skb(skb);
667 goto errout;
668 }
669 err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
670 errout:
671 if (in6_dev)
672 in6_dev_put(in6_dev);
673 if (dev)
674 dev_put(dev);
675 return err;
676 }
677
678 static int inet6_netconf_dump_devconf(struct sk_buff *skb,
679 struct netlink_callback *cb)
680 {
681 struct net *net = sock_net(skb->sk);
682 int h, s_h;
683 int idx, s_idx;
684 struct net_device *dev;
685 struct inet6_dev *idev;
686 struct hlist_head *head;
687
688 s_h = cb->args[0];
689 s_idx = idx = cb->args[1];
690
691 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
692 idx = 0;
693 head = &net->dev_index_head[h];
694 rcu_read_lock();
695 cb->seq = atomic_read(&net->ipv6.dev_addr_genid) ^
696 net->dev_base_seq;
697 hlist_for_each_entry_rcu(dev, head, index_hlist) {
698 if (idx < s_idx)
699 goto cont;
700 idev = __in6_dev_get(dev);
701 if (!idev)
702 goto cont;
703
704 if (inet6_netconf_fill_devconf(skb, dev->ifindex,
705 &idev->cnf,
706 NETLINK_CB(cb->skb).portid,
707 cb->nlh->nlmsg_seq,
708 RTM_NEWNETCONF,
709 NLM_F_MULTI,
710 NETCONFA_ALL) < 0) {
711 rcu_read_unlock();
712 goto done;
713 }
714 nl_dump_check_consistent(cb, nlmsg_hdr(skb));
715 cont:
716 idx++;
717 }
718 rcu_read_unlock();
719 }
720 if (h == NETDEV_HASHENTRIES) {
721 if (inet6_netconf_fill_devconf(skb, NETCONFA_IFINDEX_ALL,
722 net->ipv6.devconf_all,
723 NETLINK_CB(cb->skb).portid,
724 cb->nlh->nlmsg_seq,
725 RTM_NEWNETCONF, NLM_F_MULTI,
726 NETCONFA_ALL) < 0)
727 goto done;
728 else
729 h++;
730 }
731 if (h == NETDEV_HASHENTRIES + 1) {
732 if (inet6_netconf_fill_devconf(skb, NETCONFA_IFINDEX_DEFAULT,
733 net->ipv6.devconf_dflt,
734 NETLINK_CB(cb->skb).portid,
735 cb->nlh->nlmsg_seq,
736 RTM_NEWNETCONF, NLM_F_MULTI,
737 NETCONFA_ALL) < 0)
738 goto done;
739 else
740 h++;
741 }
742 done:
743 cb->args[0] = h;
744 cb->args[1] = idx;
745
746 return skb->len;
747 }
748
749 #ifdef CONFIG_SYSCTL
750 static void dev_forward_change(struct inet6_dev *idev)
751 {
752 struct net_device *dev;
753 struct inet6_ifaddr *ifa;
754
755 if (!idev)
756 return;
757 dev = idev->dev;
758 if (idev->cnf.forwarding)
759 dev_disable_lro(dev);
760 if (dev->flags & IFF_MULTICAST) {
761 if (idev->cnf.forwarding) {
762 ipv6_dev_mc_inc(dev, &in6addr_linklocal_allrouters);
763 ipv6_dev_mc_inc(dev, &in6addr_interfacelocal_allrouters);
764 ipv6_dev_mc_inc(dev, &in6addr_sitelocal_allrouters);
765 } else {
766 ipv6_dev_mc_dec(dev, &in6addr_linklocal_allrouters);
767 ipv6_dev_mc_dec(dev, &in6addr_interfacelocal_allrouters);
768 ipv6_dev_mc_dec(dev, &in6addr_sitelocal_allrouters);
769 }
770 }
771
772 list_for_each_entry(ifa, &idev->addr_list, if_list) {
773 if (ifa->flags&IFA_F_TENTATIVE)
774 continue;
775 if (idev->cnf.forwarding)
776 addrconf_join_anycast(ifa);
777 else
778 addrconf_leave_anycast(ifa);
779 }
780 inet6_netconf_notify_devconf(dev_net(dev), RTM_NEWNETCONF,
781 NETCONFA_FORWARDING,
782 dev->ifindex, &idev->cnf);
783 }
784
785
786 static void addrconf_forward_change(struct net *net, __s32 newf)
787 {
788 struct net_device *dev;
789 struct inet6_dev *idev;
790
791 for_each_netdev(net, dev) {
792 idev = __in6_dev_get(dev);
793 if (idev) {
794 int changed = (!idev->cnf.forwarding) ^ (!newf);
795 idev->cnf.forwarding = newf;
796 if (changed)
797 dev_forward_change(idev);
798 }
799 }
800 }
801
802 static int addrconf_fixup_forwarding(struct ctl_table *table, int *p, int newf)
803 {
804 struct net *net;
805 int old;
806
807 if (!rtnl_trylock())
808 return restart_syscall();
809
810 net = (struct net *)table->extra2;
811 old = *p;
812 *p = newf;
813
814 if (p == &net->ipv6.devconf_dflt->forwarding) {
815 if ((!newf) ^ (!old))
816 inet6_netconf_notify_devconf(net, RTM_NEWNETCONF,
817 NETCONFA_FORWARDING,
818 NETCONFA_IFINDEX_DEFAULT,
819 net->ipv6.devconf_dflt);
820 rtnl_unlock();
821 return 0;
822 }
823
824 if (p == &net->ipv6.devconf_all->forwarding) {
825 int old_dflt = net->ipv6.devconf_dflt->forwarding;
826
827 net->ipv6.devconf_dflt->forwarding = newf;
828 if ((!newf) ^ (!old_dflt))
829 inet6_netconf_notify_devconf(net, RTM_NEWNETCONF,
830 NETCONFA_FORWARDING,
831 NETCONFA_IFINDEX_DEFAULT,
832 net->ipv6.devconf_dflt);
833
834 addrconf_forward_change(net, newf);
835 if ((!newf) ^ (!old))
836 inet6_netconf_notify_devconf(net, RTM_NEWNETCONF,
837 NETCONFA_FORWARDING,
838 NETCONFA_IFINDEX_ALL,
839 net->ipv6.devconf_all);
840 } else if ((!newf) ^ (!old))
841 dev_forward_change((struct inet6_dev *)table->extra1);
842 rtnl_unlock();
843
844 if (newf)
845 rt6_purge_dflt_routers(net);
846 return 1;
847 }
848
849 static void addrconf_linkdown_change(struct net *net, __s32 newf)
850 {
851 struct net_device *dev;
852 struct inet6_dev *idev;
853
854 for_each_netdev(net, dev) {
855 idev = __in6_dev_get(dev);
856 if (idev) {
857 int changed = (!idev->cnf.ignore_routes_with_linkdown) ^ (!newf);
858
859 idev->cnf.ignore_routes_with_linkdown = newf;
860 if (changed)
861 inet6_netconf_notify_devconf(dev_net(dev),
862 RTM_NEWNETCONF,
863 NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN,
864 dev->ifindex,
865 &idev->cnf);
866 }
867 }
868 }
869
870 static int addrconf_fixup_linkdown(struct ctl_table *table, int *p, int newf)
871 {
872 struct net *net;
873 int old;
874
875 if (!rtnl_trylock())
876 return restart_syscall();
877
878 net = (struct net *)table->extra2;
879 old = *p;
880 *p = newf;
881
882 if (p == &net->ipv6.devconf_dflt->ignore_routes_with_linkdown) {
883 if ((!newf) ^ (!old))
884 inet6_netconf_notify_devconf(net,
885 RTM_NEWNETCONF,
886 NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN,
887 NETCONFA_IFINDEX_DEFAULT,
888 net->ipv6.devconf_dflt);
889 rtnl_unlock();
890 return 0;
891 }
892
893 if (p == &net->ipv6.devconf_all->ignore_routes_with_linkdown) {
894 net->ipv6.devconf_dflt->ignore_routes_with_linkdown = newf;
895 addrconf_linkdown_change(net, newf);
896 if ((!newf) ^ (!old))
897 inet6_netconf_notify_devconf(net,
898 RTM_NEWNETCONF,
899 NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN,
900 NETCONFA_IFINDEX_ALL,
901 net->ipv6.devconf_all);
902 }
903 rtnl_unlock();
904
905 return 1;
906 }
907
908 #endif
909
910 /* Nobody refers to this ifaddr, destroy it */
911 void inet6_ifa_finish_destroy(struct inet6_ifaddr *ifp)
912 {
913 WARN_ON(!hlist_unhashed(&ifp->addr_lst));
914
915 #ifdef NET_REFCNT_DEBUG
916 pr_debug("%s\n", __func__);
917 #endif
918
919 in6_dev_put(ifp->idev);
920
921 if (cancel_delayed_work(&ifp->dad_work))
922 pr_notice("delayed DAD work was pending while freeing ifa=%p\n",
923 ifp);
924
925 if (ifp->state != INET6_IFADDR_STATE_DEAD) {
926 pr_warn("Freeing alive inet6 address %p\n", ifp);
927 return;
928 }
929 ip6_rt_put(ifp->rt);
930
931 kfree_rcu(ifp, rcu);
932 }
933
934 static void
935 ipv6_link_dev_addr(struct inet6_dev *idev, struct inet6_ifaddr *ifp)
936 {
937 struct list_head *p;
938 int ifp_scope = ipv6_addr_src_scope(&ifp->addr);
939
940 /*
941 * Each device address list is sorted in order of scope -
942 * global before linklocal.
943 */
944 list_for_each(p, &idev->addr_list) {
945 struct inet6_ifaddr *ifa
946 = list_entry(p, struct inet6_ifaddr, if_list);
947 if (ifp_scope >= ipv6_addr_src_scope(&ifa->addr))
948 break;
949 }
950
951 list_add_tail_rcu(&ifp->if_list, p);
952 }
953
954 static u32 inet6_addr_hash(const struct net *net, const struct in6_addr *addr)
955 {
956 u32 val = ipv6_addr_hash(addr) ^ net_hash_mix(net);
957
958 return hash_32(val, IN6_ADDR_HSIZE_SHIFT);
959 }
960
961 static bool ipv6_chk_same_addr(struct net *net, const struct in6_addr *addr,
962 struct net_device *dev, unsigned int hash)
963 {
964 struct inet6_ifaddr *ifp;
965
966 hlist_for_each_entry(ifp, &inet6_addr_lst[hash], addr_lst) {
967 if (!net_eq(dev_net(ifp->idev->dev), net))
968 continue;
969 if (ipv6_addr_equal(&ifp->addr, addr)) {
970 if (!dev || ifp->idev->dev == dev)
971 return true;
972 }
973 }
974 return false;
975 }
976
977 static int ipv6_add_addr_hash(struct net_device *dev, struct inet6_ifaddr *ifa)
978 {
979 unsigned int hash = inet6_addr_hash(dev_net(dev), &ifa->addr);
980 int err = 0;
981
982 spin_lock(&addrconf_hash_lock);
983
984 /* Ignore adding duplicate addresses on an interface */
985 if (ipv6_chk_same_addr(dev_net(dev), &ifa->addr, dev, hash)) {
986 ADBG("ipv6_add_addr: already assigned\n");
987 err = -EEXIST;
988 } else {
989 hlist_add_head_rcu(&ifa->addr_lst, &inet6_addr_lst[hash]);
990 }
991
992 spin_unlock(&addrconf_hash_lock);
993
994 return err;
995 }
996
997 /* On success it returns ifp with increased reference count */
998
999 static struct inet6_ifaddr *
1000 ipv6_add_addr(struct inet6_dev *idev, const struct in6_addr *addr,
1001 const struct in6_addr *peer_addr, int pfxlen,
1002 int scope, u32 flags, u32 valid_lft, u32 prefered_lft,
1003 bool can_block, struct netlink_ext_ack *extack)
1004 {
1005 gfp_t gfp_flags = can_block ? GFP_KERNEL : GFP_ATOMIC;
1006 struct net *net = dev_net(idev->dev);
1007 struct inet6_ifaddr *ifa = NULL;
1008 struct rt6_info *rt = NULL;
1009 int err = 0;
1010 int addr_type = ipv6_addr_type(addr);
1011
1012 if (addr_type == IPV6_ADDR_ANY ||
1013 addr_type & IPV6_ADDR_MULTICAST ||
1014 (!(idev->dev->flags & IFF_LOOPBACK) &&
1015 addr_type & IPV6_ADDR_LOOPBACK))
1016 return ERR_PTR(-EADDRNOTAVAIL);
1017
1018 if (idev->dead) {
1019 err = -ENODEV; /*XXX*/
1020 goto out;
1021 }
1022
1023 if (idev->cnf.disable_ipv6) {
1024 err = -EACCES;
1025 goto out;
1026 }
1027
1028 /* validator notifier needs to be blocking;
1029 * do not call in atomic context
1030 */
1031 if (can_block) {
1032 struct in6_validator_info i6vi = {
1033 .i6vi_addr = *addr,
1034 .i6vi_dev = idev,
1035 .extack = extack,
1036 };
1037
1038 err = inet6addr_validator_notifier_call_chain(NETDEV_UP, &i6vi);
1039 err = notifier_to_errno(err);
1040 if (err < 0)
1041 goto out;
1042 }
1043
1044 ifa = kzalloc(sizeof(*ifa), gfp_flags);
1045 if (!ifa) {
1046 ADBG("ipv6_add_addr: malloc failed\n");
1047 err = -ENOBUFS;
1048 goto out;
1049 }
1050
1051 rt = addrconf_dst_alloc(idev, addr, false);
1052 if (IS_ERR(rt)) {
1053 err = PTR_ERR(rt);
1054 rt = NULL;
1055 goto out;
1056 }
1057
1058 if (net->ipv6.devconf_all->disable_policy ||
1059 idev->cnf.disable_policy)
1060 rt->dst.flags |= DST_NOPOLICY;
1061
1062 neigh_parms_data_state_setall(idev->nd_parms);
1063
1064 ifa->addr = *addr;
1065 if (peer_addr)
1066 ifa->peer_addr = *peer_addr;
1067
1068 spin_lock_init(&ifa->lock);
1069 INIT_DELAYED_WORK(&ifa->dad_work, addrconf_dad_work);
1070 INIT_HLIST_NODE(&ifa->addr_lst);
1071 ifa->scope = scope;
1072 ifa->prefix_len = pfxlen;
1073 ifa->flags = flags;
1074 /* No need to add the TENTATIVE flag for addresses with NODAD */
1075 if (!(flags & IFA_F_NODAD))
1076 ifa->flags |= IFA_F_TENTATIVE;
1077 ifa->valid_lft = valid_lft;
1078 ifa->prefered_lft = prefered_lft;
1079 ifa->cstamp = ifa->tstamp = jiffies;
1080 ifa->tokenized = false;
1081
1082 ifa->rt = rt;
1083
1084 ifa->idev = idev;
1085 in6_dev_hold(idev);
1086
1087 /* For caller */
1088 refcount_set(&ifa->refcnt, 1);
1089
1090 rcu_read_lock_bh();
1091
1092 err = ipv6_add_addr_hash(idev->dev, ifa);
1093 if (err < 0) {
1094 rcu_read_unlock_bh();
1095 goto out;
1096 }
1097
1098 write_lock(&idev->lock);
1099
1100 /* Add to inet6_dev unicast addr list. */
1101 ipv6_link_dev_addr(idev, ifa);
1102
1103 if (ifa->flags&IFA_F_TEMPORARY) {
1104 list_add(&ifa->tmp_list, &idev->tempaddr_list);
1105 in6_ifa_hold(ifa);
1106 }
1107
1108 in6_ifa_hold(ifa);
1109 write_unlock(&idev->lock);
1110
1111 rcu_read_unlock_bh();
1112
1113 inet6addr_notifier_call_chain(NETDEV_UP, ifa);
1114 out:
1115 if (unlikely(err < 0)) {
1116 if (rt)
1117 ip6_rt_put(rt);
1118 if (ifa) {
1119 if (ifa->idev)
1120 in6_dev_put(ifa->idev);
1121 kfree(ifa);
1122 }
1123 ifa = ERR_PTR(err);
1124 }
1125
1126 return ifa;
1127 }
1128
1129 enum cleanup_prefix_rt_t {
1130 CLEANUP_PREFIX_RT_NOP, /* no cleanup action for prefix route */
1131 CLEANUP_PREFIX_RT_DEL, /* delete the prefix route */
1132 CLEANUP_PREFIX_RT_EXPIRE, /* update the lifetime of the prefix route */
1133 };
1134
1135 /*
1136 * Check, whether the prefix for ifp would still need a prefix route
1137 * after deleting ifp. The function returns one of the CLEANUP_PREFIX_RT_*
1138 * constants.
1139 *
1140 * 1) we don't purge prefix if address was not permanent.
1141 * prefix is managed by its own lifetime.
1142 * 2) we also don't purge, if the address was IFA_F_NOPREFIXROUTE.
1143 * 3) if there are no addresses, delete prefix.
1144 * 4) if there are still other permanent address(es),
1145 * corresponding prefix is still permanent.
1146 * 5) if there are still other addresses with IFA_F_NOPREFIXROUTE,
1147 * don't purge the prefix, assume user space is managing it.
1148 * 6) otherwise, update prefix lifetime to the
1149 * longest valid lifetime among the corresponding
1150 * addresses on the device.
1151 * Note: subsequent RA will update lifetime.
1152 **/
1153 static enum cleanup_prefix_rt_t
1154 check_cleanup_prefix_route(struct inet6_ifaddr *ifp, unsigned long *expires)
1155 {
1156 struct inet6_ifaddr *ifa;
1157 struct inet6_dev *idev = ifp->idev;
1158 unsigned long lifetime;
1159 enum cleanup_prefix_rt_t action = CLEANUP_PREFIX_RT_DEL;
1160
1161 *expires = jiffies;
1162
1163 list_for_each_entry(ifa, &idev->addr_list, if_list) {
1164 if (ifa == ifp)
1165 continue;
1166 if (!ipv6_prefix_equal(&ifa->addr, &ifp->addr,
1167 ifp->prefix_len))
1168 continue;
1169 if (ifa->flags & (IFA_F_PERMANENT | IFA_F_NOPREFIXROUTE))
1170 return CLEANUP_PREFIX_RT_NOP;
1171
1172 action = CLEANUP_PREFIX_RT_EXPIRE;
1173
1174 spin_lock(&ifa->lock);
1175
1176 lifetime = addrconf_timeout_fixup(ifa->valid_lft, HZ);
1177 /*
1178 * Note: Because this address is
1179 * not permanent, lifetime <
1180 * LONG_MAX / HZ here.
1181 */
1182 if (time_before(*expires, ifa->tstamp + lifetime * HZ))
1183 *expires = ifa->tstamp + lifetime * HZ;
1184 spin_unlock(&ifa->lock);
1185 }
1186
1187 return action;
1188 }
1189
1190 static void
1191 cleanup_prefix_route(struct inet6_ifaddr *ifp, unsigned long expires, bool del_rt)
1192 {
1193 struct rt6_info *rt;
1194
1195 rt = addrconf_get_prefix_route(&ifp->addr,
1196 ifp->prefix_len,
1197 ifp->idev->dev,
1198 0, RTF_GATEWAY | RTF_DEFAULT);
1199 if (rt) {
1200 if (del_rt)
1201 ip6_del_rt(rt);
1202 else {
1203 if (!(rt->rt6i_flags & RTF_EXPIRES))
1204 rt6_set_expires(rt, expires);
1205 ip6_rt_put(rt);
1206 }
1207 }
1208 }
1209
1210
1211 /* This function wants to get referenced ifp and releases it before return */
1212
1213 static void ipv6_del_addr(struct inet6_ifaddr *ifp)
1214 {
1215 int state;
1216 enum cleanup_prefix_rt_t action = CLEANUP_PREFIX_RT_NOP;
1217 unsigned long expires;
1218
1219 ASSERT_RTNL();
1220
1221 spin_lock_bh(&ifp->lock);
1222 state = ifp->state;
1223 ifp->state = INET6_IFADDR_STATE_DEAD;
1224 spin_unlock_bh(&ifp->lock);
1225
1226 if (state == INET6_IFADDR_STATE_DEAD)
1227 goto out;
1228
1229 spin_lock_bh(&addrconf_hash_lock);
1230 hlist_del_init_rcu(&ifp->addr_lst);
1231 spin_unlock_bh(&addrconf_hash_lock);
1232
1233 write_lock_bh(&ifp->idev->lock);
1234
1235 if (ifp->flags&IFA_F_TEMPORARY) {
1236 list_del(&ifp->tmp_list);
1237 if (ifp->ifpub) {
1238 in6_ifa_put(ifp->ifpub);
1239 ifp->ifpub = NULL;
1240 }
1241 __in6_ifa_put(ifp);
1242 }
1243
1244 if (ifp->flags & IFA_F_PERMANENT && !(ifp->flags & IFA_F_NOPREFIXROUTE))
1245 action = check_cleanup_prefix_route(ifp, &expires);
1246
1247 list_del_rcu(&ifp->if_list);
1248 __in6_ifa_put(ifp);
1249
1250 write_unlock_bh(&ifp->idev->lock);
1251
1252 addrconf_del_dad_work(ifp);
1253
1254 ipv6_ifa_notify(RTM_DELADDR, ifp);
1255
1256 inet6addr_notifier_call_chain(NETDEV_DOWN, ifp);
1257
1258 if (action != CLEANUP_PREFIX_RT_NOP) {
1259 cleanup_prefix_route(ifp, expires,
1260 action == CLEANUP_PREFIX_RT_DEL);
1261 }
1262
1263 /* clean up prefsrc entries */
1264 rt6_remove_prefsrc(ifp);
1265 out:
1266 in6_ifa_put(ifp);
1267 }
1268
1269 static int ipv6_create_tempaddr(struct inet6_ifaddr *ifp,
1270 struct inet6_ifaddr *ift,
1271 bool block)
1272 {
1273 struct inet6_dev *idev = ifp->idev;
1274 struct in6_addr addr, *tmpaddr;
1275 unsigned long tmp_prefered_lft, tmp_valid_lft, tmp_tstamp, age;
1276 unsigned long regen_advance;
1277 int tmp_plen;
1278 int ret = 0;
1279 u32 addr_flags;
1280 unsigned long now = jiffies;
1281 long max_desync_factor;
1282 s32 cnf_temp_preferred_lft;
1283
1284 write_lock_bh(&idev->lock);
1285 if (ift) {
1286 spin_lock_bh(&ift->lock);
1287 memcpy(&addr.s6_addr[8], &ift->addr.s6_addr[8], 8);
1288 spin_unlock_bh(&ift->lock);
1289 tmpaddr = &addr;
1290 } else {
1291 tmpaddr = NULL;
1292 }
1293 retry:
1294 in6_dev_hold(idev);
1295 if (idev->cnf.use_tempaddr <= 0) {
1296 write_unlock_bh(&idev->lock);
1297 pr_info("%s: use_tempaddr is disabled\n", __func__);
1298 in6_dev_put(idev);
1299 ret = -1;
1300 goto out;
1301 }
1302 spin_lock_bh(&ifp->lock);
1303 if (ifp->regen_count++ >= idev->cnf.regen_max_retry) {
1304 idev->cnf.use_tempaddr = -1; /*XXX*/
1305 spin_unlock_bh(&ifp->lock);
1306 write_unlock_bh(&idev->lock);
1307 pr_warn("%s: regeneration time exceeded - disabled temporary address support\n",
1308 __func__);
1309 in6_dev_put(idev);
1310 ret = -1;
1311 goto out;
1312 }
1313 in6_ifa_hold(ifp);
1314 memcpy(addr.s6_addr, ifp->addr.s6_addr, 8);
1315 ipv6_try_regen_rndid(idev, tmpaddr);
1316 memcpy(&addr.s6_addr[8], idev->rndid, 8);
1317 age = (now - ifp->tstamp) / HZ;
1318
1319 regen_advance = idev->cnf.regen_max_retry *
1320 idev->cnf.dad_transmits *
1321 NEIGH_VAR(idev->nd_parms, RETRANS_TIME) / HZ;
1322
1323 /* recalculate max_desync_factor each time and update
1324 * idev->desync_factor if it's larger
1325 */
1326 cnf_temp_preferred_lft = READ_ONCE(idev->cnf.temp_prefered_lft);
1327 max_desync_factor = min_t(__u32,
1328 idev->cnf.max_desync_factor,
1329 cnf_temp_preferred_lft - regen_advance);
1330
1331 if (unlikely(idev->desync_factor > max_desync_factor)) {
1332 if (max_desync_factor > 0) {
1333 get_random_bytes(&idev->desync_factor,
1334 sizeof(idev->desync_factor));
1335 idev->desync_factor %= max_desync_factor;
1336 } else {
1337 idev->desync_factor = 0;
1338 }
1339 }
1340
1341 tmp_valid_lft = min_t(__u32,
1342 ifp->valid_lft,
1343 idev->cnf.temp_valid_lft + age);
1344 tmp_prefered_lft = cnf_temp_preferred_lft + age -
1345 idev->desync_factor;
1346 tmp_prefered_lft = min_t(__u32, ifp->prefered_lft, tmp_prefered_lft);
1347 tmp_plen = ifp->prefix_len;
1348 tmp_tstamp = ifp->tstamp;
1349 spin_unlock_bh(&ifp->lock);
1350
1351 write_unlock_bh(&idev->lock);
1352
1353 /* A temporary address is created only if this calculated Preferred
1354 * Lifetime is greater than REGEN_ADVANCE time units. In particular,
1355 * an implementation must not create a temporary address with a zero
1356 * Preferred Lifetime.
1357 * Use age calculation as in addrconf_verify to avoid unnecessary
1358 * temporary addresses being generated.
1359 */
1360 age = (now - tmp_tstamp + ADDRCONF_TIMER_FUZZ_MINUS) / HZ;
1361 if (tmp_prefered_lft <= regen_advance + age) {
1362 in6_ifa_put(ifp);
1363 in6_dev_put(idev);
1364 ret = -1;
1365 goto out;
1366 }
1367
1368 addr_flags = IFA_F_TEMPORARY;
1369 /* set in addrconf_prefix_rcv() */
1370 if (ifp->flags & IFA_F_OPTIMISTIC)
1371 addr_flags |= IFA_F_OPTIMISTIC;
1372
1373 ift = ipv6_add_addr(idev, &addr, NULL, tmp_plen,
1374 ipv6_addr_scope(&addr), addr_flags,
1375 tmp_valid_lft, tmp_prefered_lft, block, NULL);
1376 if (IS_ERR(ift)) {
1377 in6_ifa_put(ifp);
1378 in6_dev_put(idev);
1379 pr_info("%s: retry temporary address regeneration\n", __func__);
1380 tmpaddr = &addr;
1381 write_lock_bh(&idev->lock);
1382 goto retry;
1383 }
1384
1385 spin_lock_bh(&ift->lock);
1386 ift->ifpub = ifp;
1387 ift->cstamp = now;
1388 ift->tstamp = tmp_tstamp;
1389 spin_unlock_bh(&ift->lock);
1390
1391 addrconf_dad_start(ift);
1392 in6_ifa_put(ift);
1393 in6_dev_put(idev);
1394 out:
1395 return ret;
1396 }
1397
1398 /*
1399 * Choose an appropriate source address (RFC3484)
1400 */
1401 enum {
1402 IPV6_SADDR_RULE_INIT = 0,
1403 IPV6_SADDR_RULE_LOCAL,
1404 IPV6_SADDR_RULE_SCOPE,
1405 IPV6_SADDR_RULE_PREFERRED,
1406 #ifdef CONFIG_IPV6_MIP6
1407 IPV6_SADDR_RULE_HOA,
1408 #endif
1409 IPV6_SADDR_RULE_OIF,
1410 IPV6_SADDR_RULE_LABEL,
1411 IPV6_SADDR_RULE_PRIVACY,
1412 IPV6_SADDR_RULE_ORCHID,
1413 IPV6_SADDR_RULE_PREFIX,
1414 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
1415 IPV6_SADDR_RULE_NOT_OPTIMISTIC,
1416 #endif
1417 IPV6_SADDR_RULE_MAX
1418 };
1419
1420 struct ipv6_saddr_score {
1421 int rule;
1422 int addr_type;
1423 struct inet6_ifaddr *ifa;
1424 DECLARE_BITMAP(scorebits, IPV6_SADDR_RULE_MAX);
1425 int scopedist;
1426 int matchlen;
1427 };
1428
1429 struct ipv6_saddr_dst {
1430 const struct in6_addr *addr;
1431 int ifindex;
1432 int scope;
1433 int label;
1434 unsigned int prefs;
1435 };
1436
1437 static inline int ipv6_saddr_preferred(int type)
1438 {
1439 if (type & (IPV6_ADDR_MAPPED|IPV6_ADDR_COMPATv4|IPV6_ADDR_LOOPBACK))
1440 return 1;
1441 return 0;
1442 }
1443
1444 static bool ipv6_use_optimistic_addr(struct net *net,
1445 struct inet6_dev *idev)
1446 {
1447 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
1448 if (!idev)
1449 return false;
1450 if (!net->ipv6.devconf_all->optimistic_dad && !idev->cnf.optimistic_dad)
1451 return false;
1452 if (!net->ipv6.devconf_all->use_optimistic && !idev->cnf.use_optimistic)
1453 return false;
1454
1455 return true;
1456 #else
1457 return false;
1458 #endif
1459 }
1460
1461 static int ipv6_get_saddr_eval(struct net *net,
1462 struct ipv6_saddr_score *score,
1463 struct ipv6_saddr_dst *dst,
1464 int i)
1465 {
1466 int ret;
1467
1468 if (i <= score->rule) {
1469 switch (i) {
1470 case IPV6_SADDR_RULE_SCOPE:
1471 ret = score->scopedist;
1472 break;
1473 case IPV6_SADDR_RULE_PREFIX:
1474 ret = score->matchlen;
1475 break;
1476 default:
1477 ret = !!test_bit(i, score->scorebits);
1478 }
1479 goto out;
1480 }
1481
1482 switch (i) {
1483 case IPV6_SADDR_RULE_INIT:
1484 /* Rule 0: remember if hiscore is not ready yet */
1485 ret = !!score->ifa;
1486 break;
1487 case IPV6_SADDR_RULE_LOCAL:
1488 /* Rule 1: Prefer same address */
1489 ret = ipv6_addr_equal(&score->ifa->addr, dst->addr);
1490 break;
1491 case IPV6_SADDR_RULE_SCOPE:
1492 /* Rule 2: Prefer appropriate scope
1493 *
1494 * ret
1495 * ^
1496 * -1 | d 15
1497 * ---+--+-+---> scope
1498 * |
1499 * | d is scope of the destination.
1500 * B-d | \
1501 * | \ <- smaller scope is better if
1502 * B-15 | \ if scope is enough for destination.
1503 * | ret = B - scope (-1 <= scope >= d <= 15).
1504 * d-C-1 | /
1505 * |/ <- greater is better
1506 * -C / if scope is not enough for destination.
1507 * /| ret = scope - C (-1 <= d < scope <= 15).
1508 *
1509 * d - C - 1 < B -15 (for all -1 <= d <= 15).
1510 * C > d + 14 - B >= 15 + 14 - B = 29 - B.
1511 * Assume B = 0 and we get C > 29.
1512 */
1513 ret = __ipv6_addr_src_scope(score->addr_type);
1514 if (ret >= dst->scope)
1515 ret = -ret;
1516 else
1517 ret -= 128; /* 30 is enough */
1518 score->scopedist = ret;
1519 break;
1520 case IPV6_SADDR_RULE_PREFERRED:
1521 {
1522 /* Rule 3: Avoid deprecated and optimistic addresses */
1523 u8 avoid = IFA_F_DEPRECATED;
1524
1525 if (!ipv6_use_optimistic_addr(net, score->ifa->idev))
1526 avoid |= IFA_F_OPTIMISTIC;
1527 ret = ipv6_saddr_preferred(score->addr_type) ||
1528 !(score->ifa->flags & avoid);
1529 break;
1530 }
1531 #ifdef CONFIG_IPV6_MIP6
1532 case IPV6_SADDR_RULE_HOA:
1533 {
1534 /* Rule 4: Prefer home address */
1535 int prefhome = !(dst->prefs & IPV6_PREFER_SRC_COA);
1536 ret = !(score->ifa->flags & IFA_F_HOMEADDRESS) ^ prefhome;
1537 break;
1538 }
1539 #endif
1540 case IPV6_SADDR_RULE_OIF:
1541 /* Rule 5: Prefer outgoing interface */
1542 ret = (!dst->ifindex ||
1543 dst->ifindex == score->ifa->idev->dev->ifindex);
1544 break;
1545 case IPV6_SADDR_RULE_LABEL:
1546 /* Rule 6: Prefer matching label */
1547 ret = ipv6_addr_label(net,
1548 &score->ifa->addr, score->addr_type,
1549 score->ifa->idev->dev->ifindex) == dst->label;
1550 break;
1551 case IPV6_SADDR_RULE_PRIVACY:
1552 {
1553 /* Rule 7: Prefer public address
1554 * Note: prefer temporary address if use_tempaddr >= 2
1555 */
1556 int preftmp = dst->prefs & (IPV6_PREFER_SRC_PUBLIC|IPV6_PREFER_SRC_TMP) ?
1557 !!(dst->prefs & IPV6_PREFER_SRC_TMP) :
1558 score->ifa->idev->cnf.use_tempaddr >= 2;
1559 ret = (!(score->ifa->flags & IFA_F_TEMPORARY)) ^ preftmp;
1560 break;
1561 }
1562 case IPV6_SADDR_RULE_ORCHID:
1563 /* Rule 8-: Prefer ORCHID vs ORCHID or
1564 * non-ORCHID vs non-ORCHID
1565 */
1566 ret = !(ipv6_addr_orchid(&score->ifa->addr) ^
1567 ipv6_addr_orchid(dst->addr));
1568 break;
1569 case IPV6_SADDR_RULE_PREFIX:
1570 /* Rule 8: Use longest matching prefix */
1571 ret = ipv6_addr_diff(&score->ifa->addr, dst->addr);
1572 if (ret > score->ifa->prefix_len)
1573 ret = score->ifa->prefix_len;
1574 score->matchlen = ret;
1575 break;
1576 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
1577 case IPV6_SADDR_RULE_NOT_OPTIMISTIC:
1578 /* Optimistic addresses still have lower precedence than other
1579 * preferred addresses.
1580 */
1581 ret = !(score->ifa->flags & IFA_F_OPTIMISTIC);
1582 break;
1583 #endif
1584 default:
1585 ret = 0;
1586 }
1587
1588 if (ret)
1589 __set_bit(i, score->scorebits);
1590 score->rule = i;
1591 out:
1592 return ret;
1593 }
1594
1595 static int __ipv6_dev_get_saddr(struct net *net,
1596 struct ipv6_saddr_dst *dst,
1597 struct inet6_dev *idev,
1598 struct ipv6_saddr_score *scores,
1599 int hiscore_idx)
1600 {
1601 struct ipv6_saddr_score *score = &scores[1 - hiscore_idx], *hiscore = &scores[hiscore_idx];
1602
1603 list_for_each_entry_rcu(score->ifa, &idev->addr_list, if_list) {
1604 int i;
1605
1606 /*
1607 * - Tentative Address (RFC2462 section 5.4)
1608 * - A tentative address is not considered
1609 * "assigned to an interface" in the traditional
1610 * sense, unless it is also flagged as optimistic.
1611 * - Candidate Source Address (section 4)
1612 * - In any case, anycast addresses, multicast
1613 * addresses, and the unspecified address MUST
1614 * NOT be included in a candidate set.
1615 */
1616 if ((score->ifa->flags & IFA_F_TENTATIVE) &&
1617 (!(score->ifa->flags & IFA_F_OPTIMISTIC)))
1618 continue;
1619
1620 score->addr_type = __ipv6_addr_type(&score->ifa->addr);
1621
1622 if (unlikely(score->addr_type == IPV6_ADDR_ANY ||
1623 score->addr_type & IPV6_ADDR_MULTICAST)) {
1624 net_dbg_ratelimited("ADDRCONF: unspecified / multicast address assigned as unicast address on %s",
1625 idev->dev->name);
1626 continue;
1627 }
1628
1629 score->rule = -1;
1630 bitmap_zero(score->scorebits, IPV6_SADDR_RULE_MAX);
1631
1632 for (i = 0; i < IPV6_SADDR_RULE_MAX; i++) {
1633 int minihiscore, miniscore;
1634
1635 minihiscore = ipv6_get_saddr_eval(net, hiscore, dst, i);
1636 miniscore = ipv6_get_saddr_eval(net, score, dst, i);
1637
1638 if (minihiscore > miniscore) {
1639 if (i == IPV6_SADDR_RULE_SCOPE &&
1640 score->scopedist > 0) {
1641 /*
1642 * special case:
1643 * each remaining entry
1644 * has too small (not enough)
1645 * scope, because ifa entries
1646 * are sorted by their scope
1647 * values.
1648 */
1649 goto out;
1650 }
1651 break;
1652 } else if (minihiscore < miniscore) {
1653 swap(hiscore, score);
1654 hiscore_idx = 1 - hiscore_idx;
1655
1656 /* restore our iterator */
1657 score->ifa = hiscore->ifa;
1658
1659 break;
1660 }
1661 }
1662 }
1663 out:
1664 return hiscore_idx;
1665 }
1666
1667 static int ipv6_get_saddr_master(struct net *net,
1668 const struct net_device *dst_dev,
1669 const struct net_device *master,
1670 struct ipv6_saddr_dst *dst,
1671 struct ipv6_saddr_score *scores,
1672 int hiscore_idx)
1673 {
1674 struct inet6_dev *idev;
1675
1676 idev = __in6_dev_get(dst_dev);
1677 if (idev)
1678 hiscore_idx = __ipv6_dev_get_saddr(net, dst, idev,
1679 scores, hiscore_idx);
1680
1681 idev = __in6_dev_get(master);
1682 if (idev)
1683 hiscore_idx = __ipv6_dev_get_saddr(net, dst, idev,
1684 scores, hiscore_idx);
1685
1686 return hiscore_idx;
1687 }
1688
1689 int ipv6_dev_get_saddr(struct net *net, const struct net_device *dst_dev,
1690 const struct in6_addr *daddr, unsigned int prefs,
1691 struct in6_addr *saddr)
1692 {
1693 struct ipv6_saddr_score scores[2], *hiscore;
1694 struct ipv6_saddr_dst dst;
1695 struct inet6_dev *idev;
1696 struct net_device *dev;
1697 int dst_type;
1698 bool use_oif_addr = false;
1699 int hiscore_idx = 0;
1700 int ret = 0;
1701
1702 dst_type = __ipv6_addr_type(daddr);
1703 dst.addr = daddr;
1704 dst.ifindex = dst_dev ? dst_dev->ifindex : 0;
1705 dst.scope = __ipv6_addr_src_scope(dst_type);
1706 dst.label = ipv6_addr_label(net, daddr, dst_type, dst.ifindex);
1707 dst.prefs = prefs;
1708
1709 scores[hiscore_idx].rule = -1;
1710 scores[hiscore_idx].ifa = NULL;
1711
1712 rcu_read_lock();
1713
1714 /* Candidate Source Address (section 4)
1715 * - multicast and link-local destination address,
1716 * the set of candidate source address MUST only
1717 * include addresses assigned to interfaces
1718 * belonging to the same link as the outgoing
1719 * interface.
1720 * (- For site-local destination addresses, the
1721 * set of candidate source addresses MUST only
1722 * include addresses assigned to interfaces
1723 * belonging to the same site as the outgoing
1724 * interface.)
1725 * - "It is RECOMMENDED that the candidate source addresses
1726 * be the set of unicast addresses assigned to the
1727 * interface that will be used to send to the destination
1728 * (the 'outgoing' interface)." (RFC 6724)
1729 */
1730 if (dst_dev) {
1731 idev = __in6_dev_get(dst_dev);
1732 if ((dst_type & IPV6_ADDR_MULTICAST) ||
1733 dst.scope <= IPV6_ADDR_SCOPE_LINKLOCAL ||
1734 (idev && idev->cnf.use_oif_addrs_only)) {
1735 use_oif_addr = true;
1736 }
1737 }
1738
1739 if (use_oif_addr) {
1740 if (idev)
1741 hiscore_idx = __ipv6_dev_get_saddr(net, &dst, idev, scores, hiscore_idx);
1742 } else {
1743 const struct net_device *master;
1744 int master_idx = 0;
1745
1746 /* if dst_dev exists and is enslaved to an L3 device, then
1747 * prefer addresses from dst_dev and then the master over
1748 * any other enslaved devices in the L3 domain.
1749 */
1750 master = l3mdev_master_dev_rcu(dst_dev);
1751 if (master) {
1752 master_idx = master->ifindex;
1753
1754 hiscore_idx = ipv6_get_saddr_master(net, dst_dev,
1755 master, &dst,
1756 scores, hiscore_idx);
1757
1758 if (scores[hiscore_idx].ifa)
1759 goto out;
1760 }
1761
1762 for_each_netdev_rcu(net, dev) {
1763 /* only consider addresses on devices in the
1764 * same L3 domain
1765 */
1766 if (l3mdev_master_ifindex_rcu(dev) != master_idx)
1767 continue;
1768 idev = __in6_dev_get(dev);
1769 if (!idev)
1770 continue;
1771 hiscore_idx = __ipv6_dev_get_saddr(net, &dst, idev, scores, hiscore_idx);
1772 }
1773 }
1774
1775 out:
1776 hiscore = &scores[hiscore_idx];
1777 if (!hiscore->ifa)
1778 ret = -EADDRNOTAVAIL;
1779 else
1780 *saddr = hiscore->ifa->addr;
1781
1782 rcu_read_unlock();
1783 return ret;
1784 }
1785 EXPORT_SYMBOL(ipv6_dev_get_saddr);
1786
1787 int __ipv6_get_lladdr(struct inet6_dev *idev, struct in6_addr *addr,
1788 u32 banned_flags)
1789 {
1790 struct inet6_ifaddr *ifp;
1791 int err = -EADDRNOTAVAIL;
1792
1793 list_for_each_entry_reverse(ifp, &idev->addr_list, if_list) {
1794 if (ifp->scope > IFA_LINK)
1795 break;
1796 if (ifp->scope == IFA_LINK &&
1797 !(ifp->flags & banned_flags)) {
1798 *addr = ifp->addr;
1799 err = 0;
1800 break;
1801 }
1802 }
1803 return err;
1804 }
1805
1806 int ipv6_get_lladdr(struct net_device *dev, struct in6_addr *addr,
1807 u32 banned_flags)
1808 {
1809 struct inet6_dev *idev;
1810 int err = -EADDRNOTAVAIL;
1811
1812 rcu_read_lock();
1813 idev = __in6_dev_get(dev);
1814 if (idev) {
1815 read_lock_bh(&idev->lock);
1816 err = __ipv6_get_lladdr(idev, addr, banned_flags);
1817 read_unlock_bh(&idev->lock);
1818 }
1819 rcu_read_unlock();
1820 return err;
1821 }
1822
1823 static int ipv6_count_addresses(const struct inet6_dev *idev)
1824 {
1825 const struct inet6_ifaddr *ifp;
1826 int cnt = 0;
1827
1828 rcu_read_lock();
1829 list_for_each_entry_rcu(ifp, &idev->addr_list, if_list)
1830 cnt++;
1831 rcu_read_unlock();
1832 return cnt;
1833 }
1834
1835 int ipv6_chk_addr(struct net *net, const struct in6_addr *addr,
1836 const struct net_device *dev, int strict)
1837 {
1838 return ipv6_chk_addr_and_flags(net, addr, dev, strict, IFA_F_TENTATIVE);
1839 }
1840 EXPORT_SYMBOL(ipv6_chk_addr);
1841
1842 int ipv6_chk_addr_and_flags(struct net *net, const struct in6_addr *addr,
1843 const struct net_device *dev, int strict,
1844 u32 banned_flags)
1845 {
1846 unsigned int hash = inet6_addr_hash(net, addr);
1847 struct inet6_ifaddr *ifp;
1848 u32 ifp_flags;
1849
1850 rcu_read_lock();
1851 hlist_for_each_entry_rcu(ifp, &inet6_addr_lst[hash], addr_lst) {
1852 if (!net_eq(dev_net(ifp->idev->dev), net))
1853 continue;
1854 /* Decouple optimistic from tentative for evaluation here.
1855 * Ban optimistic addresses explicitly, when required.
1856 */
1857 ifp_flags = (ifp->flags&IFA_F_OPTIMISTIC)
1858 ? (ifp->flags&~IFA_F_TENTATIVE)
1859 : ifp->flags;
1860 if (ipv6_addr_equal(&ifp->addr, addr) &&
1861 !(ifp_flags&banned_flags) &&
1862 (!dev || ifp->idev->dev == dev ||
1863 !(ifp->scope&(IFA_LINK|IFA_HOST) || strict))) {
1864 rcu_read_unlock();
1865 return 1;
1866 }
1867 }
1868
1869 rcu_read_unlock();
1870 return 0;
1871 }
1872 EXPORT_SYMBOL(ipv6_chk_addr_and_flags);
1873
1874
1875 /* Compares an address/prefix_len with addresses on device @dev.
1876 * If one is found it returns true.
1877 */
1878 bool ipv6_chk_custom_prefix(const struct in6_addr *addr,
1879 const unsigned int prefix_len, struct net_device *dev)
1880 {
1881 const struct inet6_ifaddr *ifa;
1882 const struct inet6_dev *idev;
1883 bool ret = false;
1884
1885 rcu_read_lock();
1886 idev = __in6_dev_get(dev);
1887 if (idev) {
1888 list_for_each_entry_rcu(ifa, &idev->addr_list, if_list) {
1889 ret = ipv6_prefix_equal(addr, &ifa->addr, prefix_len);
1890 if (ret)
1891 break;
1892 }
1893 }
1894 rcu_read_unlock();
1895
1896 return ret;
1897 }
1898 EXPORT_SYMBOL(ipv6_chk_custom_prefix);
1899
1900 int ipv6_chk_prefix(const struct in6_addr *addr, struct net_device *dev)
1901 {
1902 const struct inet6_ifaddr *ifa;
1903 const struct inet6_dev *idev;
1904 int onlink;
1905
1906 onlink = 0;
1907 rcu_read_lock();
1908 idev = __in6_dev_get(dev);
1909 if (idev) {
1910 list_for_each_entry_rcu(ifa, &idev->addr_list, if_list) {
1911 onlink = ipv6_prefix_equal(addr, &ifa->addr,
1912 ifa->prefix_len);
1913 if (onlink)
1914 break;
1915 }
1916 }
1917 rcu_read_unlock();
1918 return onlink;
1919 }
1920 EXPORT_SYMBOL(ipv6_chk_prefix);
1921
1922 struct inet6_ifaddr *ipv6_get_ifaddr(struct net *net, const struct in6_addr *addr,
1923 struct net_device *dev, int strict)
1924 {
1925 unsigned int hash = inet6_addr_hash(net, addr);
1926 struct inet6_ifaddr *ifp, *result = NULL;
1927
1928 rcu_read_lock();
1929 hlist_for_each_entry_rcu(ifp, &inet6_addr_lst[hash], addr_lst) {
1930 if (!net_eq(dev_net(ifp->idev->dev), net))
1931 continue;
1932 if (ipv6_addr_equal(&ifp->addr, addr)) {
1933 if (!dev || ifp->idev->dev == dev ||
1934 !(ifp->scope&(IFA_LINK|IFA_HOST) || strict)) {
1935 result = ifp;
1936 in6_ifa_hold(ifp);
1937 break;
1938 }
1939 }
1940 }
1941 rcu_read_unlock();
1942
1943 return result;
1944 }
1945
1946 /* Gets referenced address, destroys ifaddr */
1947
1948 static void addrconf_dad_stop(struct inet6_ifaddr *ifp, int dad_failed)
1949 {
1950 if (dad_failed)
1951 ifp->flags |= IFA_F_DADFAILED;
1952
1953 if (ifp->flags&IFA_F_TEMPORARY) {
1954 struct inet6_ifaddr *ifpub;
1955 spin_lock_bh(&ifp->lock);
1956 ifpub = ifp->ifpub;
1957 if (ifpub) {
1958 in6_ifa_hold(ifpub);
1959 spin_unlock_bh(&ifp->lock);
1960 ipv6_create_tempaddr(ifpub, ifp, true);
1961 in6_ifa_put(ifpub);
1962 } else {
1963 spin_unlock_bh(&ifp->lock);
1964 }
1965 ipv6_del_addr(ifp);
1966 } else if (ifp->flags&IFA_F_PERMANENT || !dad_failed) {
1967 spin_lock_bh(&ifp->lock);
1968 addrconf_del_dad_work(ifp);
1969 ifp->flags |= IFA_F_TENTATIVE;
1970 spin_unlock_bh(&ifp->lock);
1971 if (dad_failed)
1972 ipv6_ifa_notify(0, ifp);
1973 in6_ifa_put(ifp);
1974 } else {
1975 ipv6_del_addr(ifp);
1976 }
1977 }
1978
1979 static int addrconf_dad_end(struct inet6_ifaddr *ifp)
1980 {
1981 int err = -ENOENT;
1982
1983 spin_lock_bh(&ifp->lock);
1984 if (ifp->state == INET6_IFADDR_STATE_DAD) {
1985 ifp->state = INET6_IFADDR_STATE_POSTDAD;
1986 err = 0;
1987 }
1988 spin_unlock_bh(&ifp->lock);
1989
1990 return err;
1991 }
1992
1993 void addrconf_dad_failure(struct sk_buff *skb, struct inet6_ifaddr *ifp)
1994 {
1995 struct inet6_dev *idev = ifp->idev;
1996 struct net *net = dev_net(ifp->idev->dev);
1997
1998 if (addrconf_dad_end(ifp)) {
1999 in6_ifa_put(ifp);
2000 return;
2001 }
2002
2003 net_info_ratelimited("%s: IPv6 duplicate address %pI6c used by %pM detected!\n",
2004 ifp->idev->dev->name, &ifp->addr, eth_hdr(skb)->h_source);
2005
2006 spin_lock_bh(&ifp->lock);
2007
2008 if (ifp->flags & IFA_F_STABLE_PRIVACY) {
2009 int scope = ifp->scope;
2010 u32 flags = ifp->flags;
2011 struct in6_addr new_addr;
2012 struct inet6_ifaddr *ifp2;
2013 u32 valid_lft, preferred_lft;
2014 int pfxlen = ifp->prefix_len;
2015 int retries = ifp->stable_privacy_retry + 1;
2016
2017 if (retries > net->ipv6.sysctl.idgen_retries) {
2018 net_info_ratelimited("%s: privacy stable address generation failed because of DAD conflicts!\n",
2019 ifp->idev->dev->name);
2020 goto errdad;
2021 }
2022
2023 new_addr = ifp->addr;
2024 if (ipv6_generate_stable_address(&new_addr, retries,
2025 idev))
2026 goto errdad;
2027
2028 valid_lft = ifp->valid_lft;
2029 preferred_lft = ifp->prefered_lft;
2030
2031 spin_unlock_bh(&ifp->lock);
2032
2033 if (idev->cnf.max_addresses &&
2034 ipv6_count_addresses(idev) >=
2035 idev->cnf.max_addresses)
2036 goto lock_errdad;
2037
2038 net_info_ratelimited("%s: generating new stable privacy address because of DAD conflict\n",
2039 ifp->idev->dev->name);
2040
2041 ifp2 = ipv6_add_addr(idev, &new_addr, NULL, pfxlen,
2042 scope, flags, valid_lft,
2043 preferred_lft, false, NULL);
2044 if (IS_ERR(ifp2))
2045 goto lock_errdad;
2046
2047 spin_lock_bh(&ifp2->lock);
2048 ifp2->stable_privacy_retry = retries;
2049 ifp2->state = INET6_IFADDR_STATE_PREDAD;
2050 spin_unlock_bh(&ifp2->lock);
2051
2052 addrconf_mod_dad_work(ifp2, net->ipv6.sysctl.idgen_delay);
2053 in6_ifa_put(ifp2);
2054 lock_errdad:
2055 spin_lock_bh(&ifp->lock);
2056 }
2057
2058 errdad:
2059 /* transition from _POSTDAD to _ERRDAD */
2060 ifp->state = INET6_IFADDR_STATE_ERRDAD;
2061 spin_unlock_bh(&ifp->lock);
2062
2063 addrconf_mod_dad_work(ifp, 0);
2064 in6_ifa_put(ifp);
2065 }
2066
2067 /* Join to solicited addr multicast group.
2068 * caller must hold RTNL */
2069 void addrconf_join_solict(struct net_device *dev, const struct in6_addr *addr)
2070 {
2071 struct in6_addr maddr;
2072
2073 if (dev->flags&(IFF_LOOPBACK|IFF_NOARP))
2074 return;
2075
2076 addrconf_addr_solict_mult(addr, &maddr);
2077 ipv6_dev_mc_inc(dev, &maddr);
2078 }
2079
2080 /* caller must hold RTNL */
2081 void addrconf_leave_solict(struct inet6_dev *idev, const struct in6_addr *addr)
2082 {
2083 struct in6_addr maddr;
2084
2085 if (idev->dev->flags&(IFF_LOOPBACK|IFF_NOARP))
2086 return;
2087
2088 addrconf_addr_solict_mult(addr, &maddr);
2089 __ipv6_dev_mc_dec(idev, &maddr);
2090 }
2091
2092 /* caller must hold RTNL */
2093 static void addrconf_join_anycast(struct inet6_ifaddr *ifp)
2094 {
2095 struct in6_addr addr;
2096
2097 if (ifp->prefix_len >= 127) /* RFC 6164 */
2098 return;
2099 ipv6_addr_prefix(&addr, &ifp->addr, ifp->prefix_len);
2100 if (ipv6_addr_any(&addr))
2101 return;
2102 __ipv6_dev_ac_inc(ifp->idev, &addr);
2103 }
2104
2105 /* caller must hold RTNL */
2106 static void addrconf_leave_anycast(struct inet6_ifaddr *ifp)
2107 {
2108 struct in6_addr addr;
2109
2110 if (ifp->prefix_len >= 127) /* RFC 6164 */
2111 return;
2112 ipv6_addr_prefix(&addr, &ifp->addr, ifp->prefix_len);
2113 if (ipv6_addr_any(&addr))
2114 return;
2115 __ipv6_dev_ac_dec(ifp->idev, &addr);
2116 }
2117
2118 static int addrconf_ifid_6lowpan(u8 *eui, struct net_device *dev)
2119 {
2120 switch (dev->addr_len) {
2121 case ETH_ALEN:
2122 memcpy(eui, dev->dev_addr, 3);
2123 eui[3] = 0xFF;
2124 eui[4] = 0xFE;
2125 memcpy(eui + 5, dev->dev_addr + 3, 3);
2126 break;
2127 case EUI64_ADDR_LEN:
2128 memcpy(eui, dev->dev_addr, EUI64_ADDR_LEN);
2129 eui[0] ^= 2;
2130 break;
2131 default:
2132 return -1;
2133 }
2134
2135 return 0;
2136 }
2137
2138 static int addrconf_ifid_ieee1394(u8 *eui, struct net_device *dev)
2139 {
2140 union fwnet_hwaddr *ha;
2141
2142 if (dev->addr_len != FWNET_ALEN)
2143 return -1;
2144
2145 ha = (union fwnet_hwaddr *)dev->dev_addr;
2146
2147 memcpy(eui, &ha->uc.uniq_id, sizeof(ha->uc.uniq_id));
2148 eui[0] ^= 2;
2149 return 0;
2150 }
2151
2152 static int addrconf_ifid_arcnet(u8 *eui, struct net_device *dev)
2153 {
2154 /* XXX: inherit EUI-64 from other interface -- yoshfuji */
2155 if (dev->addr_len != ARCNET_ALEN)
2156 return -1;
2157 memset(eui, 0, 7);
2158 eui[7] = *(u8 *)dev->dev_addr;
2159 return 0;
2160 }
2161
2162 static int addrconf_ifid_infiniband(u8 *eui, struct net_device *dev)
2163 {
2164 if (dev->addr_len != INFINIBAND_ALEN)
2165 return -1;
2166 memcpy(eui, dev->dev_addr + 12, 8);
2167 eui[0] |= 2;
2168 return 0;
2169 }
2170
2171 static int __ipv6_isatap_ifid(u8 *eui, __be32 addr)
2172 {
2173 if (addr == 0)
2174 return -1;
2175 eui[0] = (ipv4_is_zeronet(addr) || ipv4_is_private_10(addr) ||
2176 ipv4_is_loopback(addr) || ipv4_is_linklocal_169(addr) ||
2177 ipv4_is_private_172(addr) || ipv4_is_test_192(addr) ||
2178 ipv4_is_anycast_6to4(addr) || ipv4_is_private_192(addr) ||
2179 ipv4_is_test_198(addr) || ipv4_is_multicast(addr) ||
2180 ipv4_is_lbcast(addr)) ? 0x00 : 0x02;
2181 eui[1] = 0;
2182 eui[2] = 0x5E;
2183 eui[3] = 0xFE;
2184 memcpy(eui + 4, &addr, 4);
2185 return 0;
2186 }
2187
2188 static int addrconf_ifid_sit(u8 *eui, struct net_device *dev)
2189 {
2190 if (dev->priv_flags & IFF_ISATAP)
2191 return __ipv6_isatap_ifid(eui, *(__be32 *)dev->dev_addr);
2192 return -1;
2193 }
2194
2195 static int addrconf_ifid_gre(u8 *eui, struct net_device *dev)
2196 {
2197 return __ipv6_isatap_ifid(eui, *(__be32 *)dev->dev_addr);
2198 }
2199
2200 static int addrconf_ifid_ip6tnl(u8 *eui, struct net_device *dev)
2201 {
2202 memcpy(eui, dev->perm_addr, 3);
2203 memcpy(eui + 5, dev->perm_addr + 3, 3);
2204 eui[3] = 0xFF;
2205 eui[4] = 0xFE;
2206 eui[0] ^= 2;
2207 return 0;
2208 }
2209
2210 static int ipv6_generate_eui64(u8 *eui, struct net_device *dev)
2211 {
2212 switch (dev->type) {
2213 case ARPHRD_ETHER:
2214 case ARPHRD_FDDI:
2215 return addrconf_ifid_eui48(eui, dev);
2216 case ARPHRD_ARCNET:
2217 return addrconf_ifid_arcnet(eui, dev);
2218 case ARPHRD_INFINIBAND:
2219 return addrconf_ifid_infiniband(eui, dev);
2220 case ARPHRD_SIT:
2221 return addrconf_ifid_sit(eui, dev);
2222 case ARPHRD_IPGRE:
2223 case ARPHRD_TUNNEL:
2224 return addrconf_ifid_gre(eui, dev);
2225 case ARPHRD_6LOWPAN:
2226 return addrconf_ifid_6lowpan(eui, dev);
2227 case ARPHRD_IEEE1394:
2228 return addrconf_ifid_ieee1394(eui, dev);
2229 case ARPHRD_TUNNEL6:
2230 case ARPHRD_IP6GRE:
2231 return addrconf_ifid_ip6tnl(eui, dev);
2232 }
2233 return -1;
2234 }
2235
2236 static int ipv6_inherit_eui64(u8 *eui, struct inet6_dev *idev)
2237 {
2238 int err = -1;
2239 struct inet6_ifaddr *ifp;
2240
2241 read_lock_bh(&idev->lock);
2242 list_for_each_entry_reverse(ifp, &idev->addr_list, if_list) {
2243 if (ifp->scope > IFA_LINK)
2244 break;
2245 if (ifp->scope == IFA_LINK && !(ifp->flags&IFA_F_TENTATIVE)) {
2246 memcpy(eui, ifp->addr.s6_addr+8, 8);
2247 err = 0;
2248 break;
2249 }
2250 }
2251 read_unlock_bh(&idev->lock);
2252 return err;
2253 }
2254
2255 /* (re)generation of randomized interface identifier (RFC 3041 3.2, 3.5) */
2256 static void ipv6_regen_rndid(struct inet6_dev *idev)
2257 {
2258 regen:
2259 get_random_bytes(idev->rndid, sizeof(idev->rndid));
2260 idev->rndid[0] &= ~0x02;
2261
2262 /*
2263 * <draft-ietf-ipngwg-temp-addresses-v2-00.txt>:
2264 * check if generated address is not inappropriate
2265 *
2266 * - Reserved subnet anycast (RFC 2526)
2267 * 11111101 11....11 1xxxxxxx
2268 * - ISATAP (RFC4214) 6.1
2269 * 00-00-5E-FE-xx-xx-xx-xx
2270 * - value 0
2271 * - XXX: already assigned to an address on the device
2272 */
2273 if (idev->rndid[0] == 0xfd &&
2274 (idev->rndid[1]&idev->rndid[2]&idev->rndid[3]&idev->rndid[4]&idev->rndid[5]&idev->rndid[6]) == 0xff &&
2275 (idev->rndid[7]&0x80))
2276 goto regen;
2277 if ((idev->rndid[0]|idev->rndid[1]) == 0) {
2278 if (idev->rndid[2] == 0x5e && idev->rndid[3] == 0xfe)
2279 goto regen;
2280 if ((idev->rndid[2]|idev->rndid[3]|idev->rndid[4]|idev->rndid[5]|idev->rndid[6]|idev->rndid[7]) == 0x00)
2281 goto regen;
2282 }
2283 }
2284
2285 static void ipv6_try_regen_rndid(struct inet6_dev *idev, struct in6_addr *tmpaddr)
2286 {
2287 if (tmpaddr && memcmp(idev->rndid, &tmpaddr->s6_addr[8], 8) == 0)
2288 ipv6_regen_rndid(idev);
2289 }
2290
2291 /*
2292 * Add prefix route.
2293 */
2294
2295 static void
2296 addrconf_prefix_route(struct in6_addr *pfx, int plen, struct net_device *dev,
2297 unsigned long expires, u32 flags)
2298 {
2299 struct fib6_config cfg = {
2300 .fc_table = l3mdev_fib_table(dev) ? : RT6_TABLE_PREFIX,
2301 .fc_metric = IP6_RT_PRIO_ADDRCONF,
2302 .fc_ifindex = dev->ifindex,
2303 .fc_expires = expires,
2304 .fc_dst_len = plen,
2305 .fc_flags = RTF_UP | flags,
2306 .fc_nlinfo.nl_net = dev_net(dev),
2307 .fc_protocol = RTPROT_KERNEL,
2308 };
2309
2310 cfg.fc_dst = *pfx;
2311
2312 /* Prevent useless cloning on PtP SIT.
2313 This thing is done here expecting that the whole
2314 class of non-broadcast devices need not cloning.
2315 */
2316 #if IS_ENABLED(CONFIG_IPV6_SIT)
2317 if (dev->type == ARPHRD_SIT && (dev->flags & IFF_POINTOPOINT))
2318 cfg.fc_flags |= RTF_NONEXTHOP;
2319 #endif
2320
2321 ip6_route_add(&cfg, NULL);
2322 }
2323
2324
2325 static struct rt6_info *addrconf_get_prefix_route(const struct in6_addr *pfx,
2326 int plen,
2327 const struct net_device *dev,
2328 u32 flags, u32 noflags)
2329 {
2330 struct fib6_node *fn;
2331 struct rt6_info *rt = NULL;
2332 struct fib6_table *table;
2333 u32 tb_id = l3mdev_fib_table(dev) ? : RT6_TABLE_PREFIX;
2334
2335 table = fib6_get_table(dev_net(dev), tb_id);
2336 if (!table)
2337 return NULL;
2338
2339 rcu_read_lock();
2340 fn = fib6_locate(&table->tb6_root, pfx, plen, NULL, 0, true);
2341 if (!fn)
2342 goto out;
2343
2344 for_each_fib6_node_rt_rcu(fn) {
2345 if (rt->dst.dev->ifindex != dev->ifindex)
2346 continue;
2347 if ((rt->rt6i_flags & flags) != flags)
2348 continue;
2349 if ((rt->rt6i_flags & noflags) != 0)
2350 continue;
2351 if (!dst_hold_safe(&rt->dst))
2352 rt = NULL;
2353 break;
2354 }
2355 out:
2356 rcu_read_unlock();
2357 return rt;
2358 }
2359
2360
2361 /* Create "default" multicast route to the interface */
2362
2363 static void addrconf_add_mroute(struct net_device *dev)
2364 {
2365 struct fib6_config cfg = {
2366 .fc_table = l3mdev_fib_table(dev) ? : RT6_TABLE_LOCAL,
2367 .fc_metric = IP6_RT_PRIO_ADDRCONF,
2368 .fc_ifindex = dev->ifindex,
2369 .fc_dst_len = 8,
2370 .fc_flags = RTF_UP,
2371 .fc_nlinfo.nl_net = dev_net(dev),
2372 };
2373
2374 ipv6_addr_set(&cfg.fc_dst, htonl(0xFF000000), 0, 0, 0);
2375
2376 ip6_route_add(&cfg, NULL);
2377 }
2378
2379 static struct inet6_dev *addrconf_add_dev(struct net_device *dev)
2380 {
2381 struct inet6_dev *idev;
2382
2383 ASSERT_RTNL();
2384
2385 idev = ipv6_find_idev(dev);
2386 if (!idev)
2387 return ERR_PTR(-ENOBUFS);
2388
2389 if (idev->cnf.disable_ipv6)
2390 return ERR_PTR(-EACCES);
2391
2392 /* Add default multicast route */
2393 if (!(dev->flags & IFF_LOOPBACK) && !netif_is_l3_master(dev))
2394 addrconf_add_mroute(dev);
2395
2396 return idev;
2397 }
2398
2399 static void manage_tempaddrs(struct inet6_dev *idev,
2400 struct inet6_ifaddr *ifp,
2401 __u32 valid_lft, __u32 prefered_lft,
2402 bool create, unsigned long now)
2403 {
2404 u32 flags;
2405 struct inet6_ifaddr *ift;
2406
2407 read_lock_bh(&idev->lock);
2408 /* update all temporary addresses in the list */
2409 list_for_each_entry(ift, &idev->tempaddr_list, tmp_list) {
2410 int age, max_valid, max_prefered;
2411
2412 if (ifp != ift->ifpub)
2413 continue;
2414
2415 /* RFC 4941 section 3.3:
2416 * If a received option will extend the lifetime of a public
2417 * address, the lifetimes of temporary addresses should
2418 * be extended, subject to the overall constraint that no
2419 * temporary addresses should ever remain "valid" or "preferred"
2420 * for a time longer than (TEMP_VALID_LIFETIME) or
2421 * (TEMP_PREFERRED_LIFETIME - DESYNC_FACTOR), respectively.
2422 */
2423 age = (now - ift->cstamp) / HZ;
2424 max_valid = idev->cnf.temp_valid_lft - age;
2425 if (max_valid < 0)
2426 max_valid = 0;
2427
2428 max_prefered = idev->cnf.temp_prefered_lft -
2429 idev->desync_factor - age;
2430 if (max_prefered < 0)
2431 max_prefered = 0;
2432
2433 if (valid_lft > max_valid)
2434 valid_lft = max_valid;
2435
2436 if (prefered_lft > max_prefered)
2437 prefered_lft = max_prefered;
2438
2439 spin_lock(&ift->lock);
2440 flags = ift->flags;
2441 ift->valid_lft = valid_lft;
2442 ift->prefered_lft = prefered_lft;
2443 ift->tstamp = now;
2444 if (prefered_lft > 0)
2445 ift->flags &= ~IFA_F_DEPRECATED;
2446
2447 spin_unlock(&ift->lock);
2448 if (!(flags&IFA_F_TENTATIVE))
2449 ipv6_ifa_notify(0, ift);
2450 }
2451
2452 if ((create || list_empty(&idev->tempaddr_list)) &&
2453 idev->cnf.use_tempaddr > 0) {
2454 /* When a new public address is created as described
2455 * in [ADDRCONF], also create a new temporary address.
2456 * Also create a temporary address if it's enabled but
2457 * no temporary address currently exists.
2458 */
2459 read_unlock_bh(&idev->lock);
2460 ipv6_create_tempaddr(ifp, NULL, false);
2461 } else {
2462 read_unlock_bh(&idev->lock);
2463 }
2464 }
2465
2466 static bool is_addr_mode_generate_stable(struct inet6_dev *idev)
2467 {
2468 return idev->cnf.addr_gen_mode == IN6_ADDR_GEN_MODE_STABLE_PRIVACY ||
2469 idev->cnf.addr_gen_mode == IN6_ADDR_GEN_MODE_RANDOM;
2470 }
2471
2472 int addrconf_prefix_rcv_add_addr(struct net *net, struct net_device *dev,
2473 const struct prefix_info *pinfo,
2474 struct inet6_dev *in6_dev,
2475 const struct in6_addr *addr, int addr_type,
2476 u32 addr_flags, bool sllao, bool tokenized,
2477 __u32 valid_lft, u32 prefered_lft)
2478 {
2479 struct inet6_ifaddr *ifp = ipv6_get_ifaddr(net, addr, dev, 1);
2480 int create = 0, update_lft = 0;
2481
2482 if (!ifp && valid_lft) {
2483 int max_addresses = in6_dev->cnf.max_addresses;
2484
2485 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
2486 if ((net->ipv6.devconf_all->optimistic_dad ||
2487 in6_dev->cnf.optimistic_dad) &&
2488 !net->ipv6.devconf_all->forwarding && sllao)
2489 addr_flags |= IFA_F_OPTIMISTIC;
2490 #endif
2491
2492 /* Do not allow to create too much of autoconfigured
2493 * addresses; this would be too easy way to crash kernel.
2494 */
2495 if (!max_addresses ||
2496 ipv6_count_addresses(in6_dev) < max_addresses)
2497 ifp = ipv6_add_addr(in6_dev, addr, NULL,
2498 pinfo->prefix_len,
2499 addr_type&IPV6_ADDR_SCOPE_MASK,
2500 addr_flags, valid_lft,
2501 prefered_lft, false, NULL);
2502
2503 if (IS_ERR_OR_NULL(ifp))
2504 return -1;
2505
2506 update_lft = 0;
2507 create = 1;
2508 spin_lock_bh(&ifp->lock);
2509 ifp->flags |= IFA_F_MANAGETEMPADDR;
2510 ifp->cstamp = jiffies;
2511 ifp->tokenized = tokenized;
2512 spin_unlock_bh(&ifp->lock);
2513 addrconf_dad_start(ifp);
2514 }
2515
2516 if (ifp) {
2517 u32 flags;
2518 unsigned long now;
2519 u32 stored_lft;
2520
2521 /* update lifetime (RFC2462 5.5.3 e) */
2522 spin_lock_bh(&ifp->lock);
2523 now = jiffies;
2524 if (ifp->valid_lft > (now - ifp->tstamp) / HZ)
2525 stored_lft = ifp->valid_lft - (now - ifp->tstamp) / HZ;
2526 else
2527 stored_lft = 0;
2528 if (!update_lft && !create && stored_lft) {
2529 const u32 minimum_lft = min_t(u32,
2530 stored_lft, MIN_VALID_LIFETIME);
2531 valid_lft = max(valid_lft, minimum_lft);
2532
2533 /* RFC4862 Section 5.5.3e:
2534 * "Note that the preferred lifetime of the
2535 * corresponding address is always reset to
2536 * the Preferred Lifetime in the received
2537 * Prefix Information option, regardless of
2538 * whether the valid lifetime is also reset or
2539 * ignored."
2540 *
2541 * So we should always update prefered_lft here.
2542 */
2543 update_lft = 1;
2544 }
2545
2546 if (update_lft) {
2547 ifp->valid_lft = valid_lft;
2548 ifp->prefered_lft = prefered_lft;
2549 ifp->tstamp = now;
2550 flags = ifp->flags;
2551 ifp->flags &= ~IFA_F_DEPRECATED;
2552 spin_unlock_bh(&ifp->lock);
2553
2554 if (!(flags&IFA_F_TENTATIVE))
2555 ipv6_ifa_notify(0, ifp);
2556 } else
2557 spin_unlock_bh(&ifp->lock);
2558
2559 manage_tempaddrs(in6_dev, ifp, valid_lft, prefered_lft,
2560 create, now);
2561
2562 in6_ifa_put(ifp);
2563 addrconf_verify();
2564 }
2565
2566 return 0;
2567 }
2568 EXPORT_SYMBOL_GPL(addrconf_prefix_rcv_add_addr);
2569
2570 void addrconf_prefix_rcv(struct net_device *dev, u8 *opt, int len, bool sllao)
2571 {
2572 struct prefix_info *pinfo;
2573 __u32 valid_lft;
2574 __u32 prefered_lft;
2575 int addr_type, err;
2576 u32 addr_flags = 0;
2577 struct inet6_dev *in6_dev;
2578 struct net *net = dev_net(dev);
2579
2580 pinfo = (struct prefix_info *) opt;
2581
2582 if (len < sizeof(struct prefix_info)) {
2583 ADBG("addrconf: prefix option too short\n");
2584 return;
2585 }
2586
2587 /*
2588 * Validation checks ([ADDRCONF], page 19)
2589 */
2590
2591 addr_type = ipv6_addr_type(&pinfo->prefix);
2592
2593 if (addr_type & (IPV6_ADDR_MULTICAST|IPV6_ADDR_LINKLOCAL))
2594 return;
2595
2596 valid_lft = ntohl(pinfo->valid);
2597 prefered_lft = ntohl(pinfo->prefered);
2598
2599 if (prefered_lft > valid_lft) {
2600 net_warn_ratelimited("addrconf: prefix option has invalid lifetime\n");
2601 return;
2602 }
2603
2604 in6_dev = in6_dev_get(dev);
2605
2606 if (!in6_dev) {
2607 net_dbg_ratelimited("addrconf: device %s not configured\n",
2608 dev->name);
2609 return;
2610 }
2611
2612 /*
2613 * Two things going on here:
2614 * 1) Add routes for on-link prefixes
2615 * 2) Configure prefixes with the auto flag set
2616 */
2617
2618 if (pinfo->onlink) {
2619 struct rt6_info *rt;
2620 unsigned long rt_expires;
2621
2622 /* Avoid arithmetic overflow. Really, we could
2623 * save rt_expires in seconds, likely valid_lft,
2624 * but it would require division in fib gc, that it
2625 * not good.
2626 */
2627 if (HZ > USER_HZ)
2628 rt_expires = addrconf_timeout_fixup(valid_lft, HZ);
2629 else
2630 rt_expires = addrconf_timeout_fixup(valid_lft, USER_HZ);
2631
2632 if (addrconf_finite_timeout(rt_expires))
2633 rt_expires *= HZ;
2634
2635 rt = addrconf_get_prefix_route(&pinfo->prefix,
2636 pinfo->prefix_len,
2637 dev,
2638 RTF_ADDRCONF | RTF_PREFIX_RT,
2639 RTF_GATEWAY | RTF_DEFAULT);
2640
2641 if (rt) {
2642 /* Autoconf prefix route */
2643 if (valid_lft == 0) {
2644 ip6_del_rt(rt);
2645 rt = NULL;
2646 } else if (addrconf_finite_timeout(rt_expires)) {
2647 /* not infinity */
2648 rt6_set_expires(rt, jiffies + rt_expires);
2649 } else {
2650 rt6_clean_expires(rt);
2651 }
2652 } else if (valid_lft) {
2653 clock_t expires = 0;
2654 int flags = RTF_ADDRCONF | RTF_PREFIX_RT;
2655 if (addrconf_finite_timeout(rt_expires)) {
2656 /* not infinity */
2657 flags |= RTF_EXPIRES;
2658 expires = jiffies_to_clock_t(rt_expires);
2659 }
2660 addrconf_prefix_route(&pinfo->prefix, pinfo->prefix_len,
2661 dev, expires, flags);
2662 }
2663 ip6_rt_put(rt);
2664 }
2665
2666 /* Try to figure out our local address for this prefix */
2667
2668 if (pinfo->autoconf && in6_dev->cnf.autoconf) {
2669 struct in6_addr addr;
2670 bool tokenized = false, dev_addr_generated = false;
2671
2672 if (pinfo->prefix_len == 64) {
2673 memcpy(&addr, &pinfo->prefix, 8);
2674
2675 if (!ipv6_addr_any(&in6_dev->token)) {
2676 read_lock_bh(&in6_dev->lock);
2677 memcpy(addr.s6_addr + 8,
2678 in6_dev->token.s6_addr + 8, 8);
2679 read_unlock_bh(&in6_dev->lock);
2680 tokenized = true;
2681 } else if (is_addr_mode_generate_stable(in6_dev) &&
2682 !ipv6_generate_stable_address(&addr, 0,
2683 in6_dev)) {
2684 addr_flags |= IFA_F_STABLE_PRIVACY;
2685 goto ok;
2686 } else if (ipv6_generate_eui64(addr.s6_addr + 8, dev) &&
2687 ipv6_inherit_eui64(addr.s6_addr + 8, in6_dev)) {
2688 goto put;
2689 } else {
2690 dev_addr_generated = true;
2691 }
2692 goto ok;
2693 }
2694 net_dbg_ratelimited("IPv6 addrconf: prefix with wrong length %d\n",
2695 pinfo->prefix_len);
2696 goto put;
2697
2698 ok:
2699 err = addrconf_prefix_rcv_add_addr(net, dev, pinfo, in6_dev,
2700 &addr, addr_type,
2701 addr_flags, sllao,
2702 tokenized, valid_lft,
2703 prefered_lft);
2704 if (err)
2705 goto put;
2706
2707 /* Ignore error case here because previous prefix add addr was
2708 * successful which will be notified.
2709 */
2710 ndisc_ops_prefix_rcv_add_addr(net, dev, pinfo, in6_dev, &addr,
2711 addr_type, addr_flags, sllao,
2712 tokenized, valid_lft,
2713 prefered_lft,
2714 dev_addr_generated);
2715 }
2716 inet6_prefix_notify(RTM_NEWPREFIX, in6_dev, pinfo);
2717 put:
2718 in6_dev_put(in6_dev);
2719 }
2720
2721 /*
2722 * Set destination address.
2723 * Special case for SIT interfaces where we create a new "virtual"
2724 * device.
2725 */
2726 int addrconf_set_dstaddr(struct net *net, void __user *arg)
2727 {
2728 struct in6_ifreq ireq;
2729 struct net_device *dev;
2730 int err = -EINVAL;
2731
2732 rtnl_lock();
2733
2734 err = -EFAULT;
2735 if (copy_from_user(&ireq, arg, sizeof(struct in6_ifreq)))
2736 goto err_exit;
2737
2738 dev = __dev_get_by_index(net, ireq.ifr6_ifindex);
2739
2740 err = -ENODEV;
2741 if (!dev)
2742 goto err_exit;
2743
2744 #if IS_ENABLED(CONFIG_IPV6_SIT)
2745 if (dev->type == ARPHRD_SIT) {
2746 const struct net_device_ops *ops = dev->netdev_ops;
2747 struct ifreq ifr;
2748 struct ip_tunnel_parm p;
2749
2750 err = -EADDRNOTAVAIL;
2751 if (!(ipv6_addr_type(&ireq.ifr6_addr) & IPV6_ADDR_COMPATv4))
2752 goto err_exit;
2753
2754 memset(&p, 0, sizeof(p));
2755 p.iph.daddr = ireq.ifr6_addr.s6_addr32[3];
2756 p.iph.saddr = 0;
2757 p.iph.version = 4;
2758 p.iph.ihl = 5;
2759 p.iph.protocol = IPPROTO_IPV6;
2760 p.iph.ttl = 64;
2761 ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
2762
2763 if (ops->ndo_do_ioctl) {
2764 mm_segment_t oldfs = get_fs();
2765
2766 set_fs(KERNEL_DS);
2767 err = ops->ndo_do_ioctl(dev, &ifr, SIOCADDTUNNEL);
2768 set_fs(oldfs);
2769 } else
2770 err = -EOPNOTSUPP;
2771
2772 if (err == 0) {
2773 err = -ENOBUFS;
2774 dev = __dev_get_by_name(net, p.name);
2775 if (!dev)
2776 goto err_exit;
2777 err = dev_open(dev);
2778 }
2779 }
2780 #endif
2781
2782 err_exit:
2783 rtnl_unlock();
2784 return err;
2785 }
2786
2787 static int ipv6_mc_config(struct sock *sk, bool join,
2788 const struct in6_addr *addr, int ifindex)
2789 {
2790 int ret;
2791
2792 ASSERT_RTNL();
2793
2794 lock_sock(sk);
2795 if (join)
2796 ret = ipv6_sock_mc_join(sk, ifindex, addr);
2797 else
2798 ret = ipv6_sock_mc_drop(sk, ifindex, addr);
2799 release_sock(sk);
2800
2801 return ret;
2802 }
2803
2804 /*
2805 * Manual configuration of address on an interface
2806 */
2807 static int inet6_addr_add(struct net *net, int ifindex,
2808 const struct in6_addr *pfx,
2809 const struct in6_addr *peer_pfx,
2810 unsigned int plen, __u32 ifa_flags,
2811 __u32 prefered_lft, __u32 valid_lft,
2812 struct netlink_ext_ack *extack)
2813 {
2814 struct inet6_ifaddr *ifp;
2815 struct inet6_dev *idev;
2816 struct net_device *dev;
2817 unsigned long timeout;
2818 clock_t expires;
2819 int scope;
2820 u32 flags;
2821
2822 ASSERT_RTNL();
2823
2824 if (plen > 128)
2825 return -EINVAL;
2826
2827 /* check the lifetime */
2828 if (!valid_lft || prefered_lft > valid_lft)
2829 return -EINVAL;
2830
2831 if (ifa_flags & IFA_F_MANAGETEMPADDR && plen != 64)
2832 return -EINVAL;
2833
2834 dev = __dev_get_by_index(net, ifindex);
2835 if (!dev)
2836 return -ENODEV;
2837
2838 idev = addrconf_add_dev(dev);
2839 if (IS_ERR(idev))
2840 return PTR_ERR(idev);
2841
2842 if (ifa_flags & IFA_F_MCAUTOJOIN) {
2843 int ret = ipv6_mc_config(net->ipv6.mc_autojoin_sk,
2844 true, pfx, ifindex);
2845
2846 if (ret < 0)
2847 return ret;
2848 }
2849
2850 scope = ipv6_addr_scope(pfx);
2851
2852 timeout = addrconf_timeout_fixup(valid_lft, HZ);
2853 if (addrconf_finite_timeout(timeout)) {
2854 expires = jiffies_to_clock_t(timeout * HZ);
2855 valid_lft = timeout;
2856 flags = RTF_EXPIRES;
2857 } else {
2858 expires = 0;
2859 flags = 0;
2860 ifa_flags |= IFA_F_PERMANENT;
2861 }
2862
2863 timeout = addrconf_timeout_fixup(prefered_lft, HZ);
2864 if (addrconf_finite_timeout(timeout)) {
2865 if (timeout == 0)
2866 ifa_flags |= IFA_F_DEPRECATED;
2867 prefered_lft = timeout;
2868 }
2869
2870 ifp = ipv6_add_addr(idev, pfx, peer_pfx, plen, scope, ifa_flags,
2871 valid_lft, prefered_lft, true, extack);
2872
2873 if (!IS_ERR(ifp)) {
2874 if (!(ifa_flags & IFA_F_NOPREFIXROUTE)) {
2875 addrconf_prefix_route(&ifp->addr, ifp->prefix_len, dev,
2876 expires, flags);
2877 }
2878
2879 /*
2880 * Note that section 3.1 of RFC 4429 indicates
2881 * that the Optimistic flag should not be set for
2882 * manually configured addresses
2883 */
2884 addrconf_dad_start(ifp);
2885 if (ifa_flags & IFA_F_MANAGETEMPADDR)
2886 manage_tempaddrs(idev, ifp, valid_lft, prefered_lft,
2887 true, jiffies);
2888 in6_ifa_put(ifp);
2889 addrconf_verify_rtnl();
2890 return 0;
2891 } else if (ifa_flags & IFA_F_MCAUTOJOIN) {
2892 ipv6_mc_config(net->ipv6.mc_autojoin_sk,
2893 false, pfx, ifindex);
2894 }
2895
2896 return PTR_ERR(ifp);
2897 }
2898
2899 static int inet6_addr_del(struct net *net, int ifindex, u32 ifa_flags,
2900 const struct in6_addr *pfx, unsigned int plen)
2901 {
2902 struct inet6_ifaddr *ifp;
2903 struct inet6_dev *idev;
2904 struct net_device *dev;
2905
2906 if (plen > 128)
2907 return -EINVAL;
2908
2909 dev = __dev_get_by_index(net, ifindex);
2910 if (!dev)
2911 return -ENODEV;
2912
2913 idev = __in6_dev_get(dev);
2914 if (!idev)
2915 return -ENXIO;
2916
2917 read_lock_bh(&idev->lock);
2918 list_for_each_entry(ifp, &idev->addr_list, if_list) {
2919 if (ifp->prefix_len == plen &&
2920 ipv6_addr_equal(pfx, &ifp->addr)) {
2921 in6_ifa_hold(ifp);
2922 read_unlock_bh(&idev->lock);
2923
2924 if (!(ifp->flags & IFA_F_TEMPORARY) &&
2925 (ifa_flags & IFA_F_MANAGETEMPADDR))
2926 manage_tempaddrs(idev, ifp, 0, 0, false,
2927 jiffies);
2928 ipv6_del_addr(ifp);
2929 addrconf_verify_rtnl();
2930 if (ipv6_addr_is_multicast(pfx)) {
2931 ipv6_mc_config(net->ipv6.mc_autojoin_sk,
2932 false, pfx, dev->ifindex);
2933 }
2934 return 0;
2935 }
2936 }
2937 read_unlock_bh(&idev->lock);
2938 return -EADDRNOTAVAIL;
2939 }
2940
2941
2942 int addrconf_add_ifaddr(struct net *net, void __user *arg)
2943 {
2944 struct in6_ifreq ireq;
2945 int err;
2946
2947 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
2948 return -EPERM;
2949
2950 if (copy_from_user(&ireq, arg, sizeof(struct in6_ifreq)))
2951 return -EFAULT;
2952
2953 rtnl_lock();
2954 err = inet6_addr_add(net, ireq.ifr6_ifindex, &ireq.ifr6_addr, NULL,
2955 ireq.ifr6_prefixlen, IFA_F_PERMANENT,
2956 INFINITY_LIFE_TIME, INFINITY_LIFE_TIME, NULL);
2957 rtnl_unlock();
2958 return err;
2959 }
2960
2961 int addrconf_del_ifaddr(struct net *net, void __user *arg)
2962 {
2963 struct in6_ifreq ireq;
2964 int err;
2965
2966 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
2967 return -EPERM;
2968
2969 if (copy_from_user(&ireq, arg, sizeof(struct in6_ifreq)))
2970 return -EFAULT;
2971
2972 rtnl_lock();
2973 err = inet6_addr_del(net, ireq.ifr6_ifindex, 0, &ireq.ifr6_addr,
2974 ireq.ifr6_prefixlen);
2975 rtnl_unlock();
2976 return err;
2977 }
2978
2979 static void add_addr(struct inet6_dev *idev, const struct in6_addr *addr,
2980 int plen, int scope)
2981 {
2982 struct inet6_ifaddr *ifp;
2983
2984 ifp = ipv6_add_addr(idev, addr, NULL, plen,
2985 scope, IFA_F_PERMANENT,
2986 INFINITY_LIFE_TIME, INFINITY_LIFE_TIME,
2987 true, NULL);
2988 if (!IS_ERR(ifp)) {
2989 spin_lock_bh(&ifp->lock);
2990 ifp->flags &= ~IFA_F_TENTATIVE;
2991 spin_unlock_bh(&ifp->lock);
2992 rt_genid_bump_ipv6(dev_net(idev->dev));
2993 ipv6_ifa_notify(RTM_NEWADDR, ifp);
2994 in6_ifa_put(ifp);
2995 }
2996 }
2997
2998 #if IS_ENABLED(CONFIG_IPV6_SIT)
2999 static void sit_add_v4_addrs(struct inet6_dev *idev)
3000 {
3001 struct in6_addr addr;
3002 struct net_device *dev;
3003 struct net *net = dev_net(idev->dev);
3004 int scope, plen;
3005 u32 pflags = 0;
3006
3007 ASSERT_RTNL();
3008
3009 memset(&addr, 0, sizeof(struct in6_addr));
3010 memcpy(&addr.s6_addr32[3], idev->dev->dev_addr, 4);
3011
3012 if (idev->dev->flags&IFF_POINTOPOINT) {
3013 addr.s6_addr32[0] = htonl(0xfe800000);
3014 scope = IFA_LINK;
3015 plen = 64;
3016 } else {
3017 scope = IPV6_ADDR_COMPATv4;
3018 plen = 96;
3019 pflags |= RTF_NONEXTHOP;
3020 }
3021
3022 if (addr.s6_addr32[3]) {
3023 add_addr(idev, &addr, plen, scope);
3024 addrconf_prefix_route(&addr, plen, idev->dev, 0, pflags);
3025 return;
3026 }
3027
3028 for_each_netdev(net, dev) {
3029 struct in_device *in_dev = __in_dev_get_rtnl(dev);
3030 if (in_dev && (dev->flags & IFF_UP)) {
3031 struct in_ifaddr *ifa;
3032
3033 int flag = scope;
3034
3035 for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) {
3036
3037 addr.s6_addr32[3] = ifa->ifa_local;
3038
3039 if (ifa->ifa_scope == RT_SCOPE_LINK)
3040 continue;
3041 if (ifa->ifa_scope >= RT_SCOPE_HOST) {
3042 if (idev->dev->flags&IFF_POINTOPOINT)
3043 continue;
3044 flag |= IFA_HOST;
3045 }
3046
3047 add_addr(idev, &addr, plen, flag);
3048 addrconf_prefix_route(&addr, plen, idev->dev, 0,
3049 pflags);
3050 }
3051 }
3052 }
3053 }
3054 #endif
3055
3056 static void init_loopback(struct net_device *dev)
3057 {
3058 struct inet6_dev *idev;
3059
3060 /* ::1 */
3061
3062 ASSERT_RTNL();
3063
3064 idev = ipv6_find_idev(dev);
3065 if (!idev) {
3066 pr_debug("%s: add_dev failed\n", __func__);
3067 return;
3068 }
3069
3070 add_addr(idev, &in6addr_loopback, 128, IFA_HOST);
3071 }
3072
3073 void addrconf_add_linklocal(struct inet6_dev *idev,
3074 const struct in6_addr *addr, u32 flags)
3075 {
3076 struct inet6_ifaddr *ifp;
3077 u32 addr_flags = flags | IFA_F_PERMANENT;
3078
3079 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
3080 if ((dev_net(idev->dev)->ipv6.devconf_all->optimistic_dad ||
3081 idev->cnf.optimistic_dad) &&
3082 !dev_net(idev->dev)->ipv6.devconf_all->forwarding)
3083 addr_flags |= IFA_F_OPTIMISTIC;
3084 #endif
3085
3086 ifp = ipv6_add_addr(idev, addr, NULL, 64, IFA_LINK, addr_flags,
3087 INFINITY_LIFE_TIME, INFINITY_LIFE_TIME, true, NULL);
3088 if (!IS_ERR(ifp)) {
3089 addrconf_prefix_route(&ifp->addr, ifp->prefix_len, idev->dev, 0, 0);
3090 addrconf_dad_start(ifp);
3091 in6_ifa_put(ifp);
3092 }
3093 }
3094 EXPORT_SYMBOL_GPL(addrconf_add_linklocal);
3095
3096 static bool ipv6_reserved_interfaceid(struct in6_addr address)
3097 {
3098 if ((address.s6_addr32[2] | address.s6_addr32[3]) == 0)
3099 return true;
3100
3101 if (address.s6_addr32[2] == htonl(0x02005eff) &&
3102 ((address.s6_addr32[3] & htonl(0xfe000000)) == htonl(0xfe000000)))
3103 return true;
3104
3105 if (address.s6_addr32[2] == htonl(0xfdffffff) &&
3106 ((address.s6_addr32[3] & htonl(0xffffff80)) == htonl(0xffffff80)))
3107 return true;
3108
3109 return false;
3110 }
3111
3112 static int ipv6_generate_stable_address(struct in6_addr *address,
3113 u8 dad_count,
3114 const struct inet6_dev *idev)
3115 {
3116 static DEFINE_SPINLOCK(lock);
3117 static __u32 digest[SHA_DIGEST_WORDS];
3118 static __u32 workspace[SHA_WORKSPACE_WORDS];
3119
3120 static union {
3121 char __data[SHA_MESSAGE_BYTES];
3122 struct {
3123 struct in6_addr secret;
3124 __be32 prefix[2];
3125 unsigned char hwaddr[MAX_ADDR_LEN];
3126 u8 dad_count;
3127 } __packed;
3128 } data;
3129
3130 struct in6_addr secret;
3131 struct in6_addr temp;
3132 struct net *net = dev_net(idev->dev);
3133
3134 BUILD_BUG_ON(sizeof(data.__data) != sizeof(data));
3135
3136 if (idev->cnf.stable_secret.initialized)
3137 secret = idev->cnf.stable_secret.secret;
3138 else if (net->ipv6.devconf_dflt->stable_secret.initialized)
3139 secret = net->ipv6.devconf_dflt->stable_secret.secret;
3140 else
3141 return -1;
3142
3143 retry:
3144 spin_lock_bh(&lock);
3145
3146 sha_init(digest);
3147 memset(&data, 0, sizeof(data));
3148 memset(workspace, 0, sizeof(workspace));
3149 memcpy(data.hwaddr, idev->dev->perm_addr, idev->dev->addr_len);
3150 data.prefix[0] = address->s6_addr32[0];
3151 data.prefix[1] = address->s6_addr32[1];
3152 data.secret = secret;
3153 data.dad_count = dad_count;
3154
3155 sha_transform(digest, data.__data, workspace);
3156
3157 temp = *address;
3158 temp.s6_addr32[2] = (__force __be32)digest[0];
3159 temp.s6_addr32[3] = (__force __be32)digest[1];
3160
3161 spin_unlock_bh(&lock);
3162
3163 if (ipv6_reserved_interfaceid(temp)) {
3164 dad_count++;
3165 if (dad_count > dev_net(idev->dev)->ipv6.sysctl.idgen_retries)
3166 return -1;
3167 goto retry;
3168 }
3169
3170 *address = temp;
3171 return 0;
3172 }
3173
3174 static void ipv6_gen_mode_random_init(struct inet6_dev *idev)
3175 {
3176 struct ipv6_stable_secret *s = &idev->cnf.stable_secret;
3177
3178 if (s->initialized)
3179 return;
3180 s = &idev->cnf.stable_secret;
3181 get_random_bytes(&s->secret, sizeof(s->secret));
3182 s->initialized = true;
3183 }
3184
3185 static void addrconf_addr_gen(struct inet6_dev *idev, bool prefix_route)
3186 {
3187 struct in6_addr addr;
3188
3189 /* no link local addresses on L3 master devices */
3190 if (netif_is_l3_master(idev->dev))
3191 return;
3192
3193 ipv6_addr_set(&addr, htonl(0xFE800000), 0, 0, 0);
3194
3195 switch (idev->cnf.addr_gen_mode) {
3196 case IN6_ADDR_GEN_MODE_RANDOM:
3197 ipv6_gen_mode_random_init(idev);
3198 /* fallthrough */
3199 case IN6_ADDR_GEN_MODE_STABLE_PRIVACY:
3200 if (!ipv6_generate_stable_address(&addr, 0, idev))
3201 addrconf_add_linklocal(idev, &addr,
3202 IFA_F_STABLE_PRIVACY);
3203 else if (prefix_route)
3204 addrconf_prefix_route(&addr, 64, idev->dev, 0, 0);
3205 break;
3206 case IN6_ADDR_GEN_MODE_EUI64:
3207 /* addrconf_add_linklocal also adds a prefix_route and we
3208 * only need to care about prefix routes if ipv6_generate_eui64
3209 * couldn't generate one.
3210 */
3211 if (ipv6_generate_eui64(addr.s6_addr + 8, idev->dev) == 0)
3212 addrconf_add_linklocal(idev, &addr, 0);
3213 else if (prefix_route)
3214 addrconf_prefix_route(&addr, 64, idev->dev, 0, 0);
3215 break;
3216 case IN6_ADDR_GEN_MODE_NONE:
3217 default:
3218 /* will not add any link local address */
3219 break;
3220 }
3221 }
3222
3223 static void addrconf_dev_config(struct net_device *dev)
3224 {
3225 struct inet6_dev *idev;
3226
3227 ASSERT_RTNL();
3228
3229 if ((dev->type != ARPHRD_ETHER) &&
3230 (dev->type != ARPHRD_FDDI) &&
3231 (dev->type != ARPHRD_ARCNET) &&
3232 (dev->type != ARPHRD_INFINIBAND) &&
3233 (dev->type != ARPHRD_IEEE1394) &&
3234 (dev->type != ARPHRD_TUNNEL6) &&
3235 (dev->type != ARPHRD_6LOWPAN) &&
3236 (dev->type != ARPHRD_IP6GRE) &&
3237 (dev->type != ARPHRD_IPGRE) &&
3238 (dev->type != ARPHRD_TUNNEL) &&
3239 (dev->type != ARPHRD_NONE)) {
3240 /* Alas, we support only Ethernet autoconfiguration. */
3241 return;
3242 }
3243
3244 idev = addrconf_add_dev(dev);
3245 if (IS_ERR(idev))
3246 return;
3247
3248 /* this device type has no EUI support */
3249 if (dev->type == ARPHRD_NONE &&
3250 idev->cnf.addr_gen_mode == IN6_ADDR_GEN_MODE_EUI64)
3251 idev->cnf.addr_gen_mode = IN6_ADDR_GEN_MODE_RANDOM;
3252
3253 addrconf_addr_gen(idev, false);
3254 }
3255
3256 #if IS_ENABLED(CONFIG_IPV6_SIT)
3257 static void addrconf_sit_config(struct net_device *dev)
3258 {
3259 struct inet6_dev *idev;
3260
3261 ASSERT_RTNL();
3262
3263 /*
3264 * Configure the tunnel with one of our IPv4
3265 * addresses... we should configure all of
3266 * our v4 addrs in the tunnel
3267 */
3268
3269 idev = ipv6_find_idev(dev);
3270 if (!idev) {
3271 pr_debug("%s: add_dev failed\n", __func__);
3272 return;
3273 }
3274
3275 if (dev->priv_flags & IFF_ISATAP) {
3276 addrconf_addr_gen(idev, false);
3277 return;
3278 }
3279
3280 sit_add_v4_addrs(idev);
3281
3282 if (dev->flags&IFF_POINTOPOINT)
3283 addrconf_add_mroute(dev);
3284 }
3285 #endif
3286
3287 #if IS_ENABLED(CONFIG_NET_IPGRE)
3288 static void addrconf_gre_config(struct net_device *dev)
3289 {
3290 struct inet6_dev *idev;
3291
3292 ASSERT_RTNL();
3293
3294 idev = ipv6_find_idev(dev);
3295 if (!idev) {
3296 pr_debug("%s: add_dev failed\n", __func__);
3297 return;
3298 }
3299
3300 addrconf_addr_gen(idev, true);
3301 if (dev->flags & IFF_POINTOPOINT)
3302 addrconf_add_mroute(dev);
3303 }
3304 #endif
3305
3306 static int fixup_permanent_addr(struct inet6_dev *idev,
3307 struct inet6_ifaddr *ifp)
3308 {
3309 /* !rt6i_node means the host route was removed from the
3310 * FIB, for example, if 'lo' device is taken down. In that
3311 * case regenerate the host route.
3312 */
3313 if (!ifp->rt || !ifp->rt->rt6i_node) {
3314 struct rt6_info *rt, *prev;
3315
3316 rt = addrconf_dst_alloc(idev, &ifp->addr, false);
3317 if (IS_ERR(rt))
3318 return PTR_ERR(rt);
3319
3320 /* ifp->rt can be accessed outside of rtnl */
3321 spin_lock(&ifp->lock);
3322 prev = ifp->rt;
3323 ifp->rt = rt;
3324 spin_unlock(&ifp->lock);
3325
3326 ip6_rt_put(prev);
3327 }
3328
3329 if (!(ifp->flags & IFA_F_NOPREFIXROUTE)) {
3330 addrconf_prefix_route(&ifp->addr, ifp->prefix_len,
3331 idev->dev, 0, 0);
3332 }
3333
3334 if (ifp->state == INET6_IFADDR_STATE_PREDAD)
3335 addrconf_dad_start(ifp);
3336
3337 return 0;
3338 }
3339
3340 static void addrconf_permanent_addr(struct net_device *dev)
3341 {
3342 struct inet6_ifaddr *ifp, *tmp;
3343 struct inet6_dev *idev;
3344
3345 idev = __in6_dev_get(dev);
3346 if (!idev)
3347 return;
3348
3349 write_lock_bh(&idev->lock);
3350
3351 list_for_each_entry_safe(ifp, tmp, &idev->addr_list, if_list) {
3352 if ((ifp->flags & IFA_F_PERMANENT) &&
3353 fixup_permanent_addr(idev, ifp) < 0) {
3354 write_unlock_bh(&idev->lock);
3355 in6_ifa_hold(ifp);
3356 ipv6_del_addr(ifp);
3357 write_lock_bh(&idev->lock);
3358
3359 net_info_ratelimited("%s: Failed to add prefix route for address %pI6c; dropping\n",
3360 idev->dev->name, &ifp->addr);
3361 }
3362 }
3363
3364 write_unlock_bh(&idev->lock);
3365 }
3366
3367 static int addrconf_notify(struct notifier_block *this, unsigned long event,
3368 void *ptr)
3369 {
3370 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
3371 struct netdev_notifier_changeupper_info *info;
3372 struct inet6_dev *idev = __in6_dev_get(dev);
3373 struct net *net = dev_net(dev);
3374 int run_pending = 0;
3375 int err;
3376
3377 switch (event) {
3378 case NETDEV_REGISTER:
3379 if (!idev && dev->mtu >= IPV6_MIN_MTU) {
3380 idev = ipv6_add_dev(dev);
3381 if (IS_ERR(idev))
3382 return notifier_from_errno(PTR_ERR(idev));
3383 }
3384 break;
3385
3386 case NETDEV_CHANGEMTU:
3387 /* if MTU under IPV6_MIN_MTU stop IPv6 on this interface. */
3388 if (dev->mtu < IPV6_MIN_MTU) {
3389 addrconf_ifdown(dev, dev != net->loopback_dev);
3390 break;
3391 }
3392
3393 if (idev) {
3394 rt6_mtu_change(dev, dev->mtu);
3395 idev->cnf.mtu6 = dev->mtu;
3396 break;
3397 }
3398
3399 /* allocate new idev */
3400 idev = ipv6_add_dev(dev);
3401 if (IS_ERR(idev))
3402 break;
3403
3404 /* device is still not ready */
3405 if (!(idev->if_flags & IF_READY))
3406 break;
3407
3408 run_pending = 1;
3409
3410 /* fall through */
3411
3412 case NETDEV_UP:
3413 case NETDEV_CHANGE:
3414 if (dev->flags & IFF_SLAVE)
3415 break;
3416
3417 if (idev && idev->cnf.disable_ipv6)
3418 break;
3419
3420 if (event == NETDEV_UP) {
3421 /* restore routes for permanent addresses */
3422 addrconf_permanent_addr(dev);
3423
3424 if (!addrconf_link_ready(dev)) {
3425 /* device is not ready yet. */
3426 pr_info("ADDRCONF(NETDEV_UP): %s: link is not ready\n",
3427 dev->name);
3428 break;
3429 }
3430
3431 if (!idev && dev->mtu >= IPV6_MIN_MTU)
3432 idev = ipv6_add_dev(dev);
3433
3434 if (!IS_ERR_OR_NULL(idev)) {
3435 idev->if_flags |= IF_READY;
3436 run_pending = 1;
3437 }
3438 } else if (event == NETDEV_CHANGE) {
3439 if (!addrconf_link_ready(dev)) {
3440 /* device is still not ready. */
3441 break;
3442 }
3443
3444 if (idev) {
3445 if (idev->if_flags & IF_READY) {
3446 /* device is already configured -
3447 * but resend MLD reports, we might
3448 * have roamed and need to update
3449 * multicast snooping switches
3450 */
3451 ipv6_mc_up(idev);
3452 break;
3453 }
3454 idev->if_flags |= IF_READY;
3455 }
3456
3457 pr_info("ADDRCONF(NETDEV_CHANGE): %s: link becomes ready\n",
3458 dev->name);
3459
3460 run_pending = 1;
3461 }
3462
3463 switch (dev->type) {
3464 #if IS_ENABLED(CONFIG_IPV6_SIT)
3465 case ARPHRD_SIT:
3466 addrconf_sit_config(dev);
3467 break;
3468 #endif
3469 #if IS_ENABLED(CONFIG_NET_IPGRE)
3470 case ARPHRD_IPGRE:
3471 addrconf_gre_config(dev);
3472 break;
3473 #endif
3474 case ARPHRD_LOOPBACK:
3475 init_loopback(dev);
3476 break;
3477
3478 default:
3479 addrconf_dev_config(dev);
3480 break;
3481 }
3482
3483 if (!IS_ERR_OR_NULL(idev)) {
3484 if (run_pending)
3485 addrconf_dad_run(idev);
3486
3487 /*
3488 * If the MTU changed during the interface down,
3489 * when the interface up, the changed MTU must be
3490 * reflected in the idev as well as routers.
3491 */
3492 if (idev->cnf.mtu6 != dev->mtu &&
3493 dev->mtu >= IPV6_MIN_MTU) {
3494 rt6_mtu_change(dev, dev->mtu);
3495 idev->cnf.mtu6 = dev->mtu;
3496 }
3497 idev->tstamp = jiffies;
3498 inet6_ifinfo_notify(RTM_NEWLINK, idev);
3499
3500 /*
3501 * If the changed mtu during down is lower than
3502 * IPV6_MIN_MTU stop IPv6 on this interface.
3503 */
3504 if (dev->mtu < IPV6_MIN_MTU)
3505 addrconf_ifdown(dev, dev != net->loopback_dev);
3506 }
3507 break;
3508
3509 case NETDEV_DOWN:
3510 case NETDEV_UNREGISTER:
3511 /*
3512 * Remove all addresses from this interface.
3513 */
3514 addrconf_ifdown(dev, event != NETDEV_DOWN);
3515 break;
3516
3517 case NETDEV_CHANGENAME:
3518 if (idev) {
3519 snmp6_unregister_dev(idev);
3520 addrconf_sysctl_unregister(idev);
3521 err = addrconf_sysctl_register(idev);
3522 if (err)
3523 return notifier_from_errno(err);
3524 err = snmp6_register_dev(idev);
3525 if (err) {
3526 addrconf_sysctl_unregister(idev);
3527 return notifier_from_errno(err);
3528 }
3529 }
3530 break;
3531
3532 case NETDEV_PRE_TYPE_CHANGE:
3533 case NETDEV_POST_TYPE_CHANGE:
3534 if (idev)
3535 addrconf_type_change(dev, event);
3536 break;
3537
3538 case NETDEV_CHANGEUPPER:
3539 info = ptr;
3540
3541 /* flush all routes if dev is linked to or unlinked from
3542 * an L3 master device (e.g., VRF)
3543 */
3544 if (info->upper_dev && netif_is_l3_master(info->upper_dev))
3545 addrconf_ifdown(dev, 0);
3546 }
3547
3548 return NOTIFY_OK;
3549 }
3550
3551 /*
3552 * addrconf module should be notified of a device going up
3553 */
3554 static struct notifier_block ipv6_dev_notf = {
3555 .notifier_call = addrconf_notify,
3556 .priority = ADDRCONF_NOTIFY_PRIORITY,
3557 };
3558
3559 static void addrconf_type_change(struct net_device *dev, unsigned long event)
3560 {
3561 struct inet6_dev *idev;
3562 ASSERT_RTNL();
3563
3564 idev = __in6_dev_get(dev);
3565
3566 if (event == NETDEV_POST_TYPE_CHANGE)
3567 ipv6_mc_remap(idev);
3568 else if (event == NETDEV_PRE_TYPE_CHANGE)
3569 ipv6_mc_unmap(idev);
3570 }
3571
3572 static bool addr_is_local(const struct in6_addr *addr)
3573 {
3574 return ipv6_addr_type(addr) &
3575 (IPV6_ADDR_LINKLOCAL | IPV6_ADDR_LOOPBACK);
3576 }
3577
3578 static int addrconf_ifdown(struct net_device *dev, int how)
3579 {
3580 struct net *net = dev_net(dev);
3581 struct inet6_dev *idev;
3582 struct inet6_ifaddr *ifa, *tmp;
3583 int _keep_addr;
3584 bool keep_addr;
3585 int state, i;
3586
3587 ASSERT_RTNL();
3588
3589 rt6_ifdown(net, dev);
3590 neigh_ifdown(&nd_tbl, dev);
3591
3592 idev = __in6_dev_get(dev);
3593 if (!idev)
3594 return -ENODEV;
3595
3596 /*
3597 * Step 1: remove reference to ipv6 device from parent device.
3598 * Do not dev_put!
3599 */
3600 if (how) {
3601 idev->dead = 1;
3602
3603 /* protected by rtnl_lock */
3604 RCU_INIT_POINTER(dev->ip6_ptr, NULL);
3605
3606 /* Step 1.5: remove snmp6 entry */
3607 snmp6_unregister_dev(idev);
3608
3609 }
3610
3611 /* aggregate the system setting and interface setting */
3612 _keep_addr = net->ipv6.devconf_all->keep_addr_on_down;
3613 if (!_keep_addr)
3614 _keep_addr = idev->cnf.keep_addr_on_down;
3615
3616 /* combine the user config with event to determine if permanent
3617 * addresses are to be removed from address hash table
3618 */
3619 keep_addr = !(how || _keep_addr <= 0 || idev->cnf.disable_ipv6);
3620
3621 /* Step 2: clear hash table */
3622 for (i = 0; i < IN6_ADDR_HSIZE; i++) {
3623 struct hlist_head *h = &inet6_addr_lst[i];
3624
3625 spin_lock_bh(&addrconf_hash_lock);
3626 restart:
3627 hlist_for_each_entry_rcu(ifa, h, addr_lst) {
3628 if (ifa->idev == idev) {
3629 addrconf_del_dad_work(ifa);
3630 /* combined flag + permanent flag decide if
3631 * address is retained on a down event
3632 */
3633 if (!keep_addr ||
3634 !(ifa->flags & IFA_F_PERMANENT) ||
3635 addr_is_local(&ifa->addr)) {
3636 hlist_del_init_rcu(&ifa->addr_lst);
3637 goto restart;
3638 }
3639 }
3640 }
3641 spin_unlock_bh(&addrconf_hash_lock);
3642 }
3643
3644 write_lock_bh(&idev->lock);
3645
3646 addrconf_del_rs_timer(idev);
3647
3648 /* Step 2: clear flags for stateless addrconf */
3649 if (!how)
3650 idev->if_flags &= ~(IF_RS_SENT|IF_RA_RCVD|IF_READY);
3651
3652 /* Step 3: clear tempaddr list */
3653 while (!list_empty(&idev->tempaddr_list)) {
3654 ifa = list_first_entry(&idev->tempaddr_list,
3655 struct inet6_ifaddr, tmp_list);
3656 list_del(&ifa->tmp_list);
3657 write_unlock_bh(&idev->lock);
3658 spin_lock_bh(&ifa->lock);
3659
3660 if (ifa->ifpub) {
3661 in6_ifa_put(ifa->ifpub);
3662 ifa->ifpub = NULL;
3663 }
3664 spin_unlock_bh(&ifa->lock);
3665 in6_ifa_put(ifa);
3666 write_lock_bh(&idev->lock);
3667 }
3668
3669 /* re-combine the user config with event to determine if permanent
3670 * addresses are to be removed from the interface list
3671 */
3672 keep_addr = (!how && _keep_addr > 0 && !idev->cnf.disable_ipv6);
3673
3674 list_for_each_entry_safe(ifa, tmp, &idev->addr_list, if_list) {
3675 struct rt6_info *rt = NULL;
3676 bool keep;
3677
3678 addrconf_del_dad_work(ifa);
3679
3680 keep = keep_addr && (ifa->flags & IFA_F_PERMANENT) &&
3681 !addr_is_local(&ifa->addr);
3682
3683 write_unlock_bh(&idev->lock);
3684 spin_lock_bh(&ifa->lock);
3685
3686 if (keep) {
3687 /* set state to skip the notifier below */
3688 state = INET6_IFADDR_STATE_DEAD;
3689 ifa->state = INET6_IFADDR_STATE_PREDAD;
3690 if (!(ifa->flags & IFA_F_NODAD))
3691 ifa->flags |= IFA_F_TENTATIVE;
3692
3693 rt = ifa->rt;
3694 ifa->rt = NULL;
3695 } else {
3696 state = ifa->state;
3697 ifa->state = INET6_IFADDR_STATE_DEAD;
3698 }
3699
3700 spin_unlock_bh(&ifa->lock);
3701
3702 if (rt)
3703 ip6_del_rt(rt);
3704
3705 if (state != INET6_IFADDR_STATE_DEAD) {
3706 __ipv6_ifa_notify(RTM_DELADDR, ifa);
3707 inet6addr_notifier_call_chain(NETDEV_DOWN, ifa);
3708 } else {
3709 if (idev->cnf.forwarding)
3710 addrconf_leave_anycast(ifa);
3711 addrconf_leave_solict(ifa->idev, &ifa->addr);
3712 }
3713
3714 write_lock_bh(&idev->lock);
3715 if (!keep) {
3716 list_del_rcu(&ifa->if_list);
3717 in6_ifa_put(ifa);
3718 }
3719 }
3720
3721 write_unlock_bh(&idev->lock);
3722
3723 /* Step 5: Discard anycast and multicast list */
3724 if (how) {
3725 ipv6_ac_destroy_dev(idev);
3726 ipv6_mc_destroy_dev(idev);
3727 } else {
3728 ipv6_mc_down(idev);
3729 }
3730
3731 idev->tstamp = jiffies;
3732
3733 /* Last: Shot the device (if unregistered) */
3734 if (how) {
3735 addrconf_sysctl_unregister(idev);
3736 neigh_parms_release(&nd_tbl, idev->nd_parms);
3737 neigh_ifdown(&nd_tbl, dev);
3738 in6_dev_put(idev);
3739 }
3740 return 0;
3741 }
3742
3743 static void addrconf_rs_timer(struct timer_list *t)
3744 {
3745 struct inet6_dev *idev = from_timer(idev, t, rs_timer);
3746 struct net_device *dev = idev->dev;
3747 struct in6_addr lladdr;
3748
3749 write_lock(&idev->lock);
3750 if (idev->dead || !(idev->if_flags & IF_READY))
3751 goto out;
3752
3753 if (!ipv6_accept_ra(idev))
3754 goto out;
3755
3756 /* Announcement received after solicitation was sent */
3757 if (idev->if_flags & IF_RA_RCVD)
3758 goto out;
3759
3760 if (idev->rs_probes++ < idev->cnf.rtr_solicits || idev->cnf.rtr_solicits < 0) {
3761 write_unlock(&idev->lock);
3762 if (!ipv6_get_lladdr(dev, &lladdr, IFA_F_TENTATIVE))
3763 ndisc_send_rs(dev, &lladdr,
3764 &in6addr_linklocal_allrouters);
3765 else
3766 goto put;
3767
3768 write_lock(&idev->lock);
3769 idev->rs_interval = rfc3315_s14_backoff_update(
3770 idev->rs_interval, idev->cnf.rtr_solicit_max_interval);
3771 /* The wait after the last probe can be shorter */
3772 addrconf_mod_rs_timer(idev, (idev->rs_probes ==
3773 idev->cnf.rtr_solicits) ?
3774 idev->cnf.rtr_solicit_delay :
3775 idev->rs_interval);
3776 } else {
3777 /*
3778 * Note: we do not support deprecated "all on-link"
3779 * assumption any longer.
3780 */
3781 pr_debug("%s: no IPv6 routers present\n", idev->dev->name);
3782 }
3783
3784 out:
3785 write_unlock(&idev->lock);
3786 put:
3787 in6_dev_put(idev);
3788 }
3789
3790 /*
3791 * Duplicate Address Detection
3792 */
3793 static void addrconf_dad_kick(struct inet6_ifaddr *ifp)
3794 {
3795 unsigned long rand_num;
3796 struct inet6_dev *idev = ifp->idev;
3797 u64 nonce;
3798
3799 if (ifp->flags & IFA_F_OPTIMISTIC)
3800 rand_num = 0;
3801 else
3802 rand_num = prandom_u32() % (idev->cnf.rtr_solicit_delay ? : 1);
3803
3804 nonce = 0;
3805 if (idev->cnf.enhanced_dad ||
3806 dev_net(idev->dev)->ipv6.devconf_all->enhanced_dad) {
3807 do
3808 get_random_bytes(&nonce, 6);
3809 while (nonce == 0);
3810 }
3811 ifp->dad_nonce = nonce;
3812 ifp->dad_probes = idev->cnf.dad_transmits;
3813 addrconf_mod_dad_work(ifp, rand_num);
3814 }
3815
3816 static void addrconf_dad_begin(struct inet6_ifaddr *ifp)
3817 {
3818 struct inet6_dev *idev = ifp->idev;
3819 struct net_device *dev = idev->dev;
3820 bool bump_id, notify = false;
3821
3822 addrconf_join_solict(dev, &ifp->addr);
3823
3824 prandom_seed((__force u32) ifp->addr.s6_addr32[3]);
3825
3826 read_lock_bh(&idev->lock);
3827 spin_lock(&ifp->lock);
3828 if (ifp->state == INET6_IFADDR_STATE_DEAD)
3829 goto out;
3830
3831 if (dev->flags&(IFF_NOARP|IFF_LOOPBACK) ||
3832 (dev_net(dev)->ipv6.devconf_all->accept_dad < 1 &&
3833 idev->cnf.accept_dad < 1) ||
3834 !(ifp->flags&IFA_F_TENTATIVE) ||
3835 ifp->flags & IFA_F_NODAD) {
3836 bump_id = ifp->flags & IFA_F_TENTATIVE;
3837 ifp->flags &= ~(IFA_F_TENTATIVE|IFA_F_OPTIMISTIC|IFA_F_DADFAILED);
3838 spin_unlock(&ifp->lock);
3839 read_unlock_bh(&idev->lock);
3840
3841 addrconf_dad_completed(ifp, bump_id);
3842 return;
3843 }
3844
3845 if (!(idev->if_flags & IF_READY)) {
3846 spin_unlock(&ifp->lock);
3847 read_unlock_bh(&idev->lock);
3848 /*
3849 * If the device is not ready:
3850 * - keep it tentative if it is a permanent address.
3851 * - otherwise, kill it.
3852 */
3853 in6_ifa_hold(ifp);
3854 addrconf_dad_stop(ifp, 0);
3855 return;
3856 }
3857
3858 /*
3859 * Optimistic nodes can start receiving
3860 * Frames right away
3861 */
3862 if (ifp->flags & IFA_F_OPTIMISTIC) {
3863 ip6_ins_rt(ifp->rt);
3864 if (ipv6_use_optimistic_addr(dev_net(dev), idev)) {
3865 /* Because optimistic nodes can use this address,
3866 * notify listeners. If DAD fails, RTM_DELADDR is sent.
3867 */
3868 notify = true;
3869 }
3870 }
3871
3872 addrconf_dad_kick(ifp);
3873 out:
3874 spin_unlock(&ifp->lock);
3875 read_unlock_bh(&idev->lock);
3876 if (notify)
3877 ipv6_ifa_notify(RTM_NEWADDR, ifp);
3878 }
3879
3880 static void addrconf_dad_start(struct inet6_ifaddr *ifp)
3881 {
3882 bool begin_dad = false;
3883
3884 spin_lock_bh(&ifp->lock);
3885 if (ifp->state != INET6_IFADDR_STATE_DEAD) {
3886 ifp->state = INET6_IFADDR_STATE_PREDAD;
3887 begin_dad = true;
3888 }
3889 spin_unlock_bh(&ifp->lock);
3890
3891 if (begin_dad)
3892 addrconf_mod_dad_work(ifp, 0);
3893 }
3894
3895 static void addrconf_dad_work(struct work_struct *w)
3896 {
3897 struct inet6_ifaddr *ifp = container_of(to_delayed_work(w),
3898 struct inet6_ifaddr,
3899 dad_work);
3900 struct inet6_dev *idev = ifp->idev;
3901 bool bump_id, disable_ipv6 = false;
3902 struct in6_addr mcaddr;
3903
3904 enum {
3905 DAD_PROCESS,
3906 DAD_BEGIN,
3907 DAD_ABORT,
3908 } action = DAD_PROCESS;
3909
3910 rtnl_lock();
3911
3912 spin_lock_bh(&ifp->lock);
3913 if (ifp->state == INET6_IFADDR_STATE_PREDAD) {
3914 action = DAD_BEGIN;
3915 ifp->state = INET6_IFADDR_STATE_DAD;
3916 } else if (ifp->state == INET6_IFADDR_STATE_ERRDAD) {
3917 action = DAD_ABORT;
3918 ifp->state = INET6_IFADDR_STATE_POSTDAD;
3919
3920 if ((dev_net(idev->dev)->ipv6.devconf_all->accept_dad > 1 ||
3921 idev->cnf.accept_dad > 1) &&
3922 !idev->cnf.disable_ipv6 &&
3923 !(ifp->flags & IFA_F_STABLE_PRIVACY)) {
3924 struct in6_addr addr;
3925
3926 addr.s6_addr32[0] = htonl(0xfe800000);
3927 addr.s6_addr32[1] = 0;
3928
3929 if (!ipv6_generate_eui64(addr.s6_addr + 8, idev->dev) &&
3930 ipv6_addr_equal(&ifp->addr, &addr)) {
3931 /* DAD failed for link-local based on MAC */
3932 idev->cnf.disable_ipv6 = 1;
3933
3934 pr_info("%s: IPv6 being disabled!\n",
3935 ifp->idev->dev->name);
3936 disable_ipv6 = true;
3937 }
3938 }
3939 }
3940 spin_unlock_bh(&ifp->lock);
3941
3942 if (action == DAD_BEGIN) {
3943 addrconf_dad_begin(ifp);
3944 goto out;
3945 } else if (action == DAD_ABORT) {
3946 in6_ifa_hold(ifp);
3947 addrconf_dad_stop(ifp, 1);
3948 if (disable_ipv6)
3949 addrconf_ifdown(idev->dev, 0);
3950 goto out;
3951 }
3952
3953 if (!ifp->dad_probes && addrconf_dad_end(ifp))
3954 goto out;
3955
3956 write_lock_bh(&idev->lock);
3957 if (idev->dead || !(idev->if_flags & IF_READY)) {
3958 write_unlock_bh(&idev->lock);
3959 goto out;
3960 }
3961
3962 spin_lock(&ifp->lock);
3963 if (ifp->state == INET6_IFADDR_STATE_DEAD) {
3964 spin_unlock(&ifp->lock);
3965 write_unlock_bh(&idev->lock);
3966 goto out;
3967 }
3968
3969 if (ifp->dad_probes == 0) {
3970 /*
3971 * DAD was successful
3972 */
3973
3974 bump_id = ifp->flags & IFA_F_TENTATIVE;
3975 ifp->flags &= ~(IFA_F_TENTATIVE|IFA_F_OPTIMISTIC|IFA_F_DADFAILED);
3976 spin_unlock(&ifp->lock);
3977 write_unlock_bh(&idev->lock);
3978
3979 addrconf_dad_completed(ifp, bump_id);
3980
3981 goto out;
3982 }
3983
3984 ifp->dad_probes--;
3985 addrconf_mod_dad_work(ifp,
3986 NEIGH_VAR(ifp->idev->nd_parms, RETRANS_TIME));
3987 spin_unlock(&ifp->lock);
3988 write_unlock_bh(&idev->lock);
3989
3990 /* send a neighbour solicitation for our addr */
3991 addrconf_addr_solict_mult(&ifp->addr, &mcaddr);
3992 ndisc_send_ns(ifp->idev->dev, &ifp->addr, &mcaddr, &in6addr_any,
3993 ifp->dad_nonce);
3994 out:
3995 in6_ifa_put(ifp);
3996 rtnl_unlock();
3997 }
3998
3999 /* ifp->idev must be at least read locked */
4000 static bool ipv6_lonely_lladdr(struct inet6_ifaddr *ifp)
4001 {
4002 struct inet6_ifaddr *ifpiter;
4003 struct inet6_dev *idev = ifp->idev;
4004
4005 list_for_each_entry_reverse(ifpiter, &idev->addr_list, if_list) {
4006 if (ifpiter->scope > IFA_LINK)
4007 break;
4008 if (ifp != ifpiter && ifpiter->scope == IFA_LINK &&
4009 (ifpiter->flags & (IFA_F_PERMANENT|IFA_F_TENTATIVE|
4010 IFA_F_OPTIMISTIC|IFA_F_DADFAILED)) ==
4011 IFA_F_PERMANENT)
4012 return false;
4013 }
4014 return true;
4015 }
4016
4017 static void addrconf_dad_completed(struct inet6_ifaddr *ifp, bool bump_id)
4018 {
4019 struct net_device *dev = ifp->idev->dev;
4020 struct in6_addr lladdr;
4021 bool send_rs, send_mld;
4022
4023 addrconf_del_dad_work(ifp);
4024
4025 /*
4026 * Configure the address for reception. Now it is valid.
4027 */
4028
4029 ipv6_ifa_notify(RTM_NEWADDR, ifp);
4030
4031 /* If added prefix is link local and we are prepared to process
4032 router advertisements, start sending router solicitations.
4033 */
4034
4035 read_lock_bh(&ifp->idev->lock);
4036 send_mld = ifp->scope == IFA_LINK && ipv6_lonely_lladdr(ifp);
4037 send_rs = send_mld &&
4038 ipv6_accept_ra(ifp->idev) &&
4039 ifp->idev->cnf.rtr_solicits != 0 &&
4040 (dev->flags&IFF_LOOPBACK) == 0;
4041 read_unlock_bh(&ifp->idev->lock);
4042
4043 /* While dad is in progress mld report's source address is in6_addrany.
4044 * Resend with proper ll now.
4045 */
4046 if (send_mld)
4047 ipv6_mc_dad_complete(ifp->idev);
4048
4049 if (send_rs) {
4050 /*
4051 * If a host as already performed a random delay
4052 * [...] as part of DAD [...] there is no need
4053 * to delay again before sending the first RS
4054 */
4055 if (ipv6_get_lladdr(dev, &lladdr, IFA_F_TENTATIVE))
4056 return;
4057 ndisc_send_rs(dev, &lladdr, &in6addr_linklocal_allrouters);
4058
4059 write_lock_bh(&ifp->idev->lock);
4060 spin_lock(&ifp->lock);
4061 ifp->idev->rs_interval = rfc3315_s14_backoff_init(
4062 ifp->idev->cnf.rtr_solicit_interval);
4063 ifp->idev->rs_probes = 1;
4064 ifp->idev->if_flags |= IF_RS_SENT;
4065 addrconf_mod_rs_timer(ifp->idev, ifp->idev->rs_interval);
4066 spin_unlock(&ifp->lock);
4067 write_unlock_bh(&ifp->idev->lock);
4068 }
4069
4070 if (bump_id)
4071 rt_genid_bump_ipv6(dev_net(dev));
4072
4073 /* Make sure that a new temporary address will be created
4074 * before this temporary address becomes deprecated.
4075 */
4076 if (ifp->flags & IFA_F_TEMPORARY)
4077 addrconf_verify_rtnl();
4078 }
4079
4080 static void addrconf_dad_run(struct inet6_dev *idev)
4081 {
4082 struct inet6_ifaddr *ifp;
4083
4084 read_lock_bh(&idev->lock);
4085 list_for_each_entry(ifp, &idev->addr_list, if_list) {
4086 spin_lock(&ifp->lock);
4087 if (ifp->flags & IFA_F_TENTATIVE &&
4088 ifp->state == INET6_IFADDR_STATE_DAD)
4089 addrconf_dad_kick(ifp);
4090 spin_unlock(&ifp->lock);
4091 }
4092 read_unlock_bh(&idev->lock);
4093 }
4094
4095 #ifdef CONFIG_PROC_FS
4096 struct if6_iter_state {
4097 struct seq_net_private p;
4098 int bucket;
4099 int offset;
4100 };
4101
4102 static struct inet6_ifaddr *if6_get_first(struct seq_file *seq, loff_t pos)
4103 {
4104 struct if6_iter_state *state = seq->private;
4105 struct net *net = seq_file_net(seq);
4106 struct inet6_ifaddr *ifa = NULL;
4107 int p = 0;
4108
4109 /* initial bucket if pos is 0 */
4110 if (pos == 0) {
4111 state->bucket = 0;
4112 state->offset = 0;
4113 }
4114
4115 for (; state->bucket < IN6_ADDR_HSIZE; ++state->bucket) {
4116 hlist_for_each_entry_rcu(ifa, &inet6_addr_lst[state->bucket],
4117 addr_lst) {
4118 if (!net_eq(dev_net(ifa->idev->dev), net))
4119 continue;
4120 /* sync with offset */
4121 if (p < state->offset) {
4122 p++;
4123 continue;
4124 }
4125 state->offset++;
4126 return ifa;
4127 }
4128
4129 /* prepare for next bucket */
4130 state->offset = 0;
4131 p = 0;
4132 }
4133 return NULL;
4134 }
4135
4136 static struct inet6_ifaddr *if6_get_next(struct seq_file *seq,
4137 struct inet6_ifaddr *ifa)
4138 {
4139 struct if6_iter_state *state = seq->private;
4140 struct net *net = seq_file_net(seq);
4141
4142 hlist_for_each_entry_continue_rcu(ifa, addr_lst) {
4143 if (!net_eq(dev_net(ifa->idev->dev), net))
4144 continue;
4145 state->offset++;
4146 return ifa;
4147 }
4148
4149 while (++state->bucket < IN6_ADDR_HSIZE) {
4150 state->offset = 0;
4151 hlist_for_each_entry_rcu(ifa,
4152 &inet6_addr_lst[state->bucket], addr_lst) {
4153 if (!net_eq(dev_net(ifa->idev->dev), net))
4154 continue;
4155 state->offset++;
4156 return ifa;
4157 }
4158 }
4159
4160 return NULL;
4161 }
4162
4163 static void *if6_seq_start(struct seq_file *seq, loff_t *pos)
4164 __acquires(rcu)
4165 {
4166 rcu_read_lock();
4167 return if6_get_first(seq, *pos);
4168 }
4169
4170 static void *if6_seq_next(struct seq_file *seq, void *v, loff_t *pos)
4171 {
4172 struct inet6_ifaddr *ifa;
4173
4174 ifa = if6_get_next(seq, v);
4175 ++*pos;
4176 return ifa;
4177 }
4178
4179 static void if6_seq_stop(struct seq_file *seq, void *v)
4180 __releases(rcu)
4181 {
4182 rcu_read_unlock();
4183 }
4184
4185 static int if6_seq_show(struct seq_file *seq, void *v)
4186 {
4187 struct inet6_ifaddr *ifp = (struct inet6_ifaddr *)v;
4188 seq_printf(seq, "%pi6 %02x %02x %02x %02x %8s\n",
4189 &ifp->addr,
4190 ifp->idev->dev->ifindex,
4191 ifp->prefix_len,
4192 ifp->scope,
4193 (u8) ifp->flags,
4194 ifp->idev->dev->name);
4195 return 0;
4196 }
4197
4198 static const struct seq_operations if6_seq_ops = {
4199 .start = if6_seq_start,
4200 .next = if6_seq_next,
4201 .show = if6_seq_show,
4202 .stop = if6_seq_stop,
4203 };
4204
4205 static int if6_seq_open(struct inode *inode, struct file *file)
4206 {
4207 return seq_open_net(inode, file, &if6_seq_ops,
4208 sizeof(struct if6_iter_state));
4209 }
4210
4211 static const struct file_operations if6_fops = {
4212 .owner = THIS_MODULE,
4213 .open = if6_seq_open,
4214 .read = seq_read,
4215 .llseek = seq_lseek,
4216 .release = seq_release_net,
4217 };
4218
4219 static int __net_init if6_proc_net_init(struct net *net)
4220 {
4221 if (!proc_create("if_inet6", S_IRUGO, net->proc_net, &if6_fops))
4222 return -ENOMEM;
4223 return 0;
4224 }
4225
4226 static void __net_exit if6_proc_net_exit(struct net *net)
4227 {
4228 remove_proc_entry("if_inet6", net->proc_net);
4229 }
4230
4231 static struct pernet_operations if6_proc_net_ops = {
4232 .init = if6_proc_net_init,
4233 .exit = if6_proc_net_exit,
4234 };
4235
4236 int __init if6_proc_init(void)
4237 {
4238 return register_pernet_subsys(&if6_proc_net_ops);
4239 }
4240
4241 void if6_proc_exit(void)
4242 {
4243 unregister_pernet_subsys(&if6_proc_net_ops);
4244 }
4245 #endif /* CONFIG_PROC_FS */
4246
4247 #if IS_ENABLED(CONFIG_IPV6_MIP6)
4248 /* Check if address is a home address configured on any interface. */
4249 int ipv6_chk_home_addr(struct net *net, const struct in6_addr *addr)
4250 {
4251 unsigned int hash = inet6_addr_hash(net, addr);
4252 struct inet6_ifaddr *ifp = NULL;
4253 int ret = 0;
4254
4255 rcu_read_lock();
4256 hlist_for_each_entry_rcu(ifp, &inet6_addr_lst[hash], addr_lst) {
4257 if (!net_eq(dev_net(ifp->idev->dev), net))
4258 continue;
4259 if (ipv6_addr_equal(&ifp->addr, addr) &&
4260 (ifp->flags & IFA_F_HOMEADDRESS)) {
4261 ret = 1;
4262 break;
4263 }
4264 }
4265 rcu_read_unlock();
4266 return ret;
4267 }
4268 #endif
4269
4270 /*
4271 * Periodic address status verification
4272 */
4273
4274 static void addrconf_verify_rtnl(void)
4275 {
4276 unsigned long now, next, next_sec, next_sched;
4277 struct inet6_ifaddr *ifp;
4278 int i;
4279
4280 ASSERT_RTNL();
4281
4282 rcu_read_lock_bh();
4283 now = jiffies;
4284 next = round_jiffies_up(now + ADDR_CHECK_FREQUENCY);
4285
4286 cancel_delayed_work(&addr_chk_work);
4287
4288 for (i = 0; i < IN6_ADDR_HSIZE; i++) {
4289 restart:
4290 hlist_for_each_entry_rcu_bh(ifp, &inet6_addr_lst[i], addr_lst) {
4291 unsigned long age;
4292
4293 /* When setting preferred_lft to a value not zero or
4294 * infinity, while valid_lft is infinity
4295 * IFA_F_PERMANENT has a non-infinity life time.
4296 */
4297 if ((ifp->flags & IFA_F_PERMANENT) &&
4298 (ifp->prefered_lft == INFINITY_LIFE_TIME))
4299 continue;
4300
4301 spin_lock(&ifp->lock);
4302 /* We try to batch several events at once. */
4303 age = (now - ifp->tstamp + ADDRCONF_TIMER_FUZZ_MINUS) / HZ;
4304
4305 if (ifp->valid_lft != INFINITY_LIFE_TIME &&
4306 age >= ifp->valid_lft) {
4307 spin_unlock(&ifp->lock);
4308 in6_ifa_hold(ifp);
4309 ipv6_del_addr(ifp);
4310 goto restart;
4311 } else if (ifp->prefered_lft == INFINITY_LIFE_TIME) {
4312 spin_unlock(&ifp->lock);
4313 continue;
4314 } else if (age >= ifp->prefered_lft) {
4315 /* jiffies - ifp->tstamp > age >= ifp->prefered_lft */
4316 int deprecate = 0;
4317
4318 if (!(ifp->flags&IFA_F_DEPRECATED)) {
4319 deprecate = 1;
4320 ifp->flags |= IFA_F_DEPRECATED;
4321 }
4322
4323 if ((ifp->valid_lft != INFINITY_LIFE_TIME) &&
4324 (time_before(ifp->tstamp + ifp->valid_lft * HZ, next)))
4325 next = ifp->tstamp + ifp->valid_lft * HZ;
4326
4327 spin_unlock(&ifp->lock);
4328
4329 if (deprecate) {
4330 in6_ifa_hold(ifp);
4331
4332 ipv6_ifa_notify(0, ifp);
4333 in6_ifa_put(ifp);
4334 goto restart;
4335 }
4336 } else if ((ifp->flags&IFA_F_TEMPORARY) &&
4337 !(ifp->flags&IFA_F_TENTATIVE)) {
4338 unsigned long regen_advance = ifp->idev->cnf.regen_max_retry *
4339 ifp->idev->cnf.dad_transmits *
4340 NEIGH_VAR(ifp->idev->nd_parms, RETRANS_TIME) / HZ;
4341
4342 if (age >= ifp->prefered_lft - regen_advance) {
4343 struct inet6_ifaddr *ifpub = ifp->ifpub;
4344 if (time_before(ifp->tstamp + ifp->prefered_lft * HZ, next))
4345 next = ifp->tstamp + ifp->prefered_lft * HZ;
4346 if (!ifp->regen_count && ifpub) {
4347 ifp->regen_count++;
4348 in6_ifa_hold(ifp);
4349 in6_ifa_hold(ifpub);
4350 spin_unlock(&ifp->lock);
4351
4352 spin_lock(&ifpub->lock);
4353 ifpub->regen_count = 0;
4354 spin_unlock(&ifpub->lock);
4355 ipv6_create_tempaddr(ifpub, ifp, true);
4356 in6_ifa_put(ifpub);
4357 in6_ifa_put(ifp);
4358 goto restart;
4359 }
4360 } else if (time_before(ifp->tstamp + ifp->prefered_lft * HZ - regen_advance * HZ, next))
4361 next = ifp->tstamp + ifp->prefered_lft * HZ - regen_advance * HZ;
4362 spin_unlock(&ifp->lock);
4363 } else {
4364 /* ifp->prefered_lft <= ifp->valid_lft */
4365 if (time_before(ifp->tstamp + ifp->prefered_lft * HZ, next))
4366 next = ifp->tstamp + ifp->prefered_lft * HZ;
4367 spin_unlock(&ifp->lock);
4368 }
4369 }
4370 }
4371
4372 next_sec = round_jiffies_up(next);
4373 next_sched = next;
4374
4375 /* If rounded timeout is accurate enough, accept it. */
4376 if (time_before(next_sec, next + ADDRCONF_TIMER_FUZZ))
4377 next_sched = next_sec;
4378
4379 /* And minimum interval is ADDRCONF_TIMER_FUZZ_MAX. */
4380 if (time_before(next_sched, jiffies + ADDRCONF_TIMER_FUZZ_MAX))
4381 next_sched = jiffies + ADDRCONF_TIMER_FUZZ_MAX;
4382
4383 ADBG(KERN_DEBUG "now = %lu, schedule = %lu, rounded schedule = %lu => %lu\n",
4384 now, next, next_sec, next_sched);
4385 mod_delayed_work(addrconf_wq, &addr_chk_work, next_sched - now);
4386 rcu_read_unlock_bh();
4387 }
4388
4389 static void addrconf_verify_work(struct work_struct *w)
4390 {
4391 rtnl_lock();
4392 addrconf_verify_rtnl();
4393 rtnl_unlock();
4394 }
4395
4396 static void addrconf_verify(void)
4397 {
4398 mod_delayed_work(addrconf_wq, &addr_chk_work, 0);
4399 }
4400
4401 static struct in6_addr *extract_addr(struct nlattr *addr, struct nlattr *local,
4402 struct in6_addr **peer_pfx)
4403 {
4404 struct in6_addr *pfx = NULL;
4405
4406 *peer_pfx = NULL;
4407
4408 if (addr)
4409 pfx = nla_data(addr);
4410
4411 if (local) {
4412 if (pfx && nla_memcmp(local, pfx, sizeof(*pfx)))
4413 *peer_pfx = pfx;
4414 pfx = nla_data(local);
4415 }
4416
4417 return pfx;
4418 }
4419
4420 static const struct nla_policy ifa_ipv6_policy[IFA_MAX+1] = {
4421 [IFA_ADDRESS] = { .len = sizeof(struct in6_addr) },
4422 [IFA_LOCAL] = { .len = sizeof(struct in6_addr) },
4423 [IFA_CACHEINFO] = { .len = sizeof(struct ifa_cacheinfo) },
4424 [IFA_FLAGS] = { .len = sizeof(u32) },
4425 };
4426
4427 static int
4428 inet6_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh,
4429 struct netlink_ext_ack *extack)
4430 {
4431 struct net *net = sock_net(skb->sk);
4432 struct ifaddrmsg *ifm;
4433 struct nlattr *tb[IFA_MAX+1];
4434 struct in6_addr *pfx, *peer_pfx;
4435 u32 ifa_flags;
4436 int err;
4437
4438 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, ifa_ipv6_policy,
4439 extack);
4440 if (err < 0)
4441 return err;
4442
4443 ifm = nlmsg_data(nlh);
4444 pfx = extract_addr(tb[IFA_ADDRESS], tb[IFA_LOCAL], &peer_pfx);
4445 if (!pfx)
4446 return -EINVAL;
4447
4448 ifa_flags = tb[IFA_FLAGS] ? nla_get_u32(tb[IFA_FLAGS]) : ifm->ifa_flags;
4449
4450 /* We ignore other flags so far. */
4451 ifa_flags &= IFA_F_MANAGETEMPADDR;
4452
4453 return inet6_addr_del(net, ifm->ifa_index, ifa_flags, pfx,
4454 ifm->ifa_prefixlen);
4455 }
4456
4457 static int inet6_addr_modify(struct inet6_ifaddr *ifp, u32 ifa_flags,
4458 u32 prefered_lft, u32 valid_lft)
4459 {
4460 u32 flags;
4461 clock_t expires;
4462 unsigned long timeout;
4463 bool was_managetempaddr;
4464 bool had_prefixroute;
4465
4466 ASSERT_RTNL();
4467
4468 if (!valid_lft || (prefered_lft > valid_lft))
4469 return -EINVAL;
4470
4471 if (ifa_flags & IFA_F_MANAGETEMPADDR &&
4472 (ifp->flags & IFA_F_TEMPORARY || ifp->prefix_len != 64))
4473 return -EINVAL;
4474
4475 timeout = addrconf_timeout_fixup(valid_lft, HZ);
4476 if (addrconf_finite_timeout(timeout)) {
4477 expires = jiffies_to_clock_t(timeout * HZ);
4478 valid_lft = timeout;
4479 flags = RTF_EXPIRES;
4480 } else {
4481 expires = 0;
4482 flags = 0;
4483 ifa_flags |= IFA_F_PERMANENT;
4484 }
4485
4486 timeout = addrconf_timeout_fixup(prefered_lft, HZ);
4487 if (addrconf_finite_timeout(timeout)) {
4488 if (timeout == 0)
4489 ifa_flags |= IFA_F_DEPRECATED;
4490 prefered_lft = timeout;
4491 }
4492
4493 spin_lock_bh(&ifp->lock);
4494 was_managetempaddr = ifp->flags & IFA_F_MANAGETEMPADDR;
4495 had_prefixroute = ifp->flags & IFA_F_PERMANENT &&
4496 !(ifp->flags & IFA_F_NOPREFIXROUTE);
4497 ifp->flags &= ~(IFA_F_DEPRECATED | IFA_F_PERMANENT | IFA_F_NODAD |
4498 IFA_F_HOMEADDRESS | IFA_F_MANAGETEMPADDR |
4499 IFA_F_NOPREFIXROUTE);
4500 ifp->flags |= ifa_flags;
4501 ifp->tstamp = jiffies;
4502 ifp->valid_lft = valid_lft;
4503 ifp->prefered_lft = prefered_lft;
4504
4505 spin_unlock_bh(&ifp->lock);
4506 if (!(ifp->flags&IFA_F_TENTATIVE))
4507 ipv6_ifa_notify(0, ifp);
4508
4509 if (!(ifa_flags & IFA_F_NOPREFIXROUTE)) {
4510 addrconf_prefix_route(&ifp->addr, ifp->prefix_len, ifp->idev->dev,
4511 expires, flags);
4512 } else if (had_prefixroute) {
4513 enum cleanup_prefix_rt_t action;
4514 unsigned long rt_expires;
4515
4516 write_lock_bh(&ifp->idev->lock);
4517 action = check_cleanup_prefix_route(ifp, &rt_expires);
4518 write_unlock_bh(&ifp->idev->lock);
4519
4520 if (action != CLEANUP_PREFIX_RT_NOP) {
4521 cleanup_prefix_route(ifp, rt_expires,
4522 action == CLEANUP_PREFIX_RT_DEL);
4523 }
4524 }
4525
4526 if (was_managetempaddr || ifp->flags & IFA_F_MANAGETEMPADDR) {
4527 if (was_managetempaddr && !(ifp->flags & IFA_F_MANAGETEMPADDR))
4528 valid_lft = prefered_lft = 0;
4529 manage_tempaddrs(ifp->idev, ifp, valid_lft, prefered_lft,
4530 !was_managetempaddr, jiffies);
4531 }
4532
4533 addrconf_verify_rtnl();
4534
4535 return 0;
4536 }
4537
4538 static int
4539 inet6_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh,
4540 struct netlink_ext_ack *extack)
4541 {
4542 struct net *net = sock_net(skb->sk);
4543 struct ifaddrmsg *ifm;
4544 struct nlattr *tb[IFA_MAX+1];
4545 struct in6_addr *pfx, *peer_pfx;
4546 struct inet6_ifaddr *ifa;
4547 struct net_device *dev;
4548 u32 valid_lft = INFINITY_LIFE_TIME, preferred_lft = INFINITY_LIFE_TIME;
4549 u32 ifa_flags;
4550 int err;
4551
4552 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, ifa_ipv6_policy,
4553 extack);
4554 if (err < 0)
4555 return err;
4556
4557 ifm = nlmsg_data(nlh);
4558 pfx = extract_addr(tb[IFA_ADDRESS], tb[IFA_LOCAL], &peer_pfx);
4559 if (!pfx)
4560 return -EINVAL;
4561
4562 if (tb[IFA_CACHEINFO]) {
4563 struct ifa_cacheinfo *ci;
4564
4565 ci = nla_data(tb[IFA_CACHEINFO]);
4566 valid_lft = ci->ifa_valid;
4567 preferred_lft = ci->ifa_prefered;
4568 } else {
4569 preferred_lft = INFINITY_LIFE_TIME;
4570 valid_lft = INFINITY_LIFE_TIME;
4571 }
4572
4573 dev = __dev_get_by_index(net, ifm->ifa_index);
4574 if (!dev)
4575 return -ENODEV;
4576
4577 ifa_flags = tb[IFA_FLAGS] ? nla_get_u32(tb[IFA_FLAGS]) : ifm->ifa_flags;
4578
4579 /* We ignore other flags so far. */
4580 ifa_flags &= IFA_F_NODAD | IFA_F_HOMEADDRESS | IFA_F_MANAGETEMPADDR |
4581 IFA_F_NOPREFIXROUTE | IFA_F_MCAUTOJOIN;
4582
4583 ifa = ipv6_get_ifaddr(net, pfx, dev, 1);
4584 if (!ifa) {
4585 /*
4586 * It would be best to check for !NLM_F_CREATE here but
4587 * userspace already relies on not having to provide this.
4588 */
4589 return inet6_addr_add(net, ifm->ifa_index, pfx, peer_pfx,
4590 ifm->ifa_prefixlen, ifa_flags,
4591 preferred_lft, valid_lft, extack);
4592 }
4593
4594 if (nlh->nlmsg_flags & NLM_F_EXCL ||
4595 !(nlh->nlmsg_flags & NLM_F_REPLACE))
4596 err = -EEXIST;
4597 else
4598 err = inet6_addr_modify(ifa, ifa_flags, preferred_lft, valid_lft);
4599
4600 in6_ifa_put(ifa);
4601
4602 return err;
4603 }
4604
4605 static void put_ifaddrmsg(struct nlmsghdr *nlh, u8 prefixlen, u32 flags,
4606 u8 scope, int ifindex)
4607 {
4608 struct ifaddrmsg *ifm;
4609
4610 ifm = nlmsg_data(nlh);
4611 ifm->ifa_family = AF_INET6;
4612 ifm->ifa_prefixlen = prefixlen;
4613 ifm->ifa_flags = flags;
4614 ifm->ifa_scope = scope;
4615 ifm->ifa_index = ifindex;
4616 }
4617
4618 static int put_cacheinfo(struct sk_buff *skb, unsigned long cstamp,
4619 unsigned long tstamp, u32 preferred, u32 valid)
4620 {
4621 struct ifa_cacheinfo ci;
4622
4623 ci.cstamp = cstamp_delta(cstamp);
4624 ci.tstamp = cstamp_delta(tstamp);
4625 ci.ifa_prefered = preferred;
4626 ci.ifa_valid = valid;
4627
4628 return nla_put(skb, IFA_CACHEINFO, sizeof(ci), &ci);
4629 }
4630
4631 static inline int rt_scope(int ifa_scope)
4632 {
4633 if (ifa_scope & IFA_HOST)
4634 return RT_SCOPE_HOST;
4635 else if (ifa_scope & IFA_LINK)
4636 return RT_SCOPE_LINK;
4637 else if (ifa_scope & IFA_SITE)
4638 return RT_SCOPE_SITE;
4639 else
4640 return RT_SCOPE_UNIVERSE;
4641 }
4642
4643 static inline int inet6_ifaddr_msgsize(void)
4644 {
4645 return NLMSG_ALIGN(sizeof(struct ifaddrmsg))
4646 + nla_total_size(16) /* IFA_LOCAL */
4647 + nla_total_size(16) /* IFA_ADDRESS */
4648 + nla_total_size(sizeof(struct ifa_cacheinfo))
4649 + nla_total_size(4) /* IFA_FLAGS */;
4650 }
4651
4652 static int inet6_fill_ifaddr(struct sk_buff *skb, struct inet6_ifaddr *ifa,
4653 u32 portid, u32 seq, int event, unsigned int flags)
4654 {
4655 struct nlmsghdr *nlh;
4656 u32 preferred, valid;
4657
4658 nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct ifaddrmsg), flags);
4659 if (!nlh)
4660 return -EMSGSIZE;
4661
4662 put_ifaddrmsg(nlh, ifa->prefix_len, ifa->flags, rt_scope(ifa->scope),
4663 ifa->idev->dev->ifindex);
4664
4665 if (!((ifa->flags&IFA_F_PERMANENT) &&
4666 (ifa->prefered_lft == INFINITY_LIFE_TIME))) {
4667 preferred = ifa->prefered_lft;
4668 valid = ifa->valid_lft;
4669 if (preferred != INFINITY_LIFE_TIME) {
4670 long tval = (jiffies - ifa->tstamp)/HZ;
4671 if (preferred > tval)
4672 preferred -= tval;
4673 else
4674 preferred = 0;
4675 if (valid != INFINITY_LIFE_TIME) {
4676 if (valid > tval)
4677 valid -= tval;
4678 else
4679 valid = 0;
4680 }
4681 }
4682 } else {
4683 preferred = INFINITY_LIFE_TIME;
4684 valid = INFINITY_LIFE_TIME;
4685 }
4686
4687 if (!ipv6_addr_any(&ifa->peer_addr)) {
4688 if (nla_put_in6_addr(skb, IFA_LOCAL, &ifa->addr) < 0 ||
4689 nla_put_in6_addr(skb, IFA_ADDRESS, &ifa->peer_addr) < 0)
4690 goto error;
4691 } else
4692 if (nla_put_in6_addr(skb, IFA_ADDRESS, &ifa->addr) < 0)
4693 goto error;
4694
4695 if (put_cacheinfo(skb, ifa->cstamp, ifa->tstamp, preferred, valid) < 0)
4696 goto error;
4697
4698 if (nla_put_u32(skb, IFA_FLAGS, ifa->flags) < 0)
4699 goto error;
4700
4701 nlmsg_end(skb, nlh);
4702 return 0;
4703
4704 error:
4705 nlmsg_cancel(skb, nlh);
4706 return -EMSGSIZE;
4707 }
4708
4709 static int inet6_fill_ifmcaddr(struct sk_buff *skb, struct ifmcaddr6 *ifmca,
4710 u32 portid, u32 seq, int event, u16 flags)
4711 {
4712 struct nlmsghdr *nlh;
4713 u8 scope = RT_SCOPE_UNIVERSE;
4714 int ifindex = ifmca->idev->dev->ifindex;
4715
4716 if (ipv6_addr_scope(&ifmca->mca_addr) & IFA_SITE)
4717 scope = RT_SCOPE_SITE;
4718
4719 nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct ifaddrmsg), flags);
4720 if (!nlh)
4721 return -EMSGSIZE;
4722
4723 put_ifaddrmsg(nlh, 128, IFA_F_PERMANENT, scope, ifindex);
4724 if (nla_put_in6_addr(skb, IFA_MULTICAST, &ifmca->mca_addr) < 0 ||
4725 put_cacheinfo(skb, ifmca->mca_cstamp, ifmca->mca_tstamp,
4726 INFINITY_LIFE_TIME, INFINITY_LIFE_TIME) < 0) {
4727 nlmsg_cancel(skb, nlh);
4728 return -EMSGSIZE;
4729 }
4730
4731 nlmsg_end(skb, nlh);
4732 return 0;
4733 }
4734
4735 static int inet6_fill_ifacaddr(struct sk_buff *skb, struct ifacaddr6 *ifaca,
4736 u32 portid, u32 seq, int event, unsigned int flags)
4737 {
4738 struct nlmsghdr *nlh;
4739 u8 scope = RT_SCOPE_UNIVERSE;
4740 int ifindex = ifaca->aca_idev->dev->ifindex;
4741
4742 if (ipv6_addr_scope(&ifaca->aca_addr) & IFA_SITE)
4743 scope = RT_SCOPE_SITE;
4744
4745 nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct ifaddrmsg), flags);
4746 if (!nlh)
4747 return -EMSGSIZE;
4748
4749 put_ifaddrmsg(nlh, 128, IFA_F_PERMANENT, scope, ifindex);
4750 if (nla_put_in6_addr(skb, IFA_ANYCAST, &ifaca->aca_addr) < 0 ||
4751 put_cacheinfo(skb, ifaca->aca_cstamp, ifaca->aca_tstamp,
4752 INFINITY_LIFE_TIME, INFINITY_LIFE_TIME) < 0) {
4753 nlmsg_cancel(skb, nlh);
4754 return -EMSGSIZE;
4755 }
4756
4757 nlmsg_end(skb, nlh);
4758 return 0;
4759 }
4760
4761 enum addr_type_t {
4762 UNICAST_ADDR,
4763 MULTICAST_ADDR,
4764 ANYCAST_ADDR,
4765 };
4766
4767 /* called with rcu_read_lock() */
4768 static int in6_dump_addrs(struct inet6_dev *idev, struct sk_buff *skb,
4769 struct netlink_callback *cb, enum addr_type_t type,
4770 int s_ip_idx, int *p_ip_idx)
4771 {
4772 struct ifmcaddr6 *ifmca;
4773 struct ifacaddr6 *ifaca;
4774 int err = 1;
4775 int ip_idx = *p_ip_idx;
4776
4777 read_lock_bh(&idev->lock);
4778 switch (type) {
4779 case UNICAST_ADDR: {
4780 struct inet6_ifaddr *ifa;
4781
4782 /* unicast address incl. temp addr */
4783 list_for_each_entry(ifa, &idev->addr_list, if_list) {
4784 if (++ip_idx < s_ip_idx)
4785 continue;
4786 err = inet6_fill_ifaddr(skb, ifa,
4787 NETLINK_CB(cb->skb).portid,
4788 cb->nlh->nlmsg_seq,
4789 RTM_NEWADDR,
4790 NLM_F_MULTI);
4791 if (err < 0)
4792 break;
4793 nl_dump_check_consistent(cb, nlmsg_hdr(skb));
4794 }
4795 break;
4796 }
4797 case MULTICAST_ADDR:
4798 /* multicast address */
4799 for (ifmca = idev->mc_list; ifmca;
4800 ifmca = ifmca->next, ip_idx++) {
4801 if (ip_idx < s_ip_idx)
4802 continue;
4803 err = inet6_fill_ifmcaddr(skb, ifmca,
4804 NETLINK_CB(cb->skb).portid,
4805 cb->nlh->nlmsg_seq,
4806 RTM_GETMULTICAST,
4807 NLM_F_MULTI);
4808 if (err < 0)
4809 break;
4810 }
4811 break;
4812 case ANYCAST_ADDR:
4813 /* anycast address */
4814 for (ifaca = idev->ac_list; ifaca;
4815 ifaca = ifaca->aca_next, ip_idx++) {
4816 if (ip_idx < s_ip_idx)
4817 continue;
4818 err = inet6_fill_ifacaddr(skb, ifaca,
4819 NETLINK_CB(cb->skb).portid,
4820 cb->nlh->nlmsg_seq,
4821 RTM_GETANYCAST,
4822 NLM_F_MULTI);
4823 if (err < 0)
4824 break;
4825 }
4826 break;
4827 default:
4828 break;
4829 }
4830 read_unlock_bh(&idev->lock);
4831 *p_ip_idx = ip_idx;
4832 return err;
4833 }
4834
4835 static int inet6_dump_addr(struct sk_buff *skb, struct netlink_callback *cb,
4836 enum addr_type_t type)
4837 {
4838 struct net *net = sock_net(skb->sk);
4839 int h, s_h;
4840 int idx, ip_idx;
4841 int s_idx, s_ip_idx;
4842 struct net_device *dev;
4843 struct inet6_dev *idev;
4844 struct hlist_head *head;
4845
4846 s_h = cb->args[0];
4847 s_idx = idx = cb->args[1];
4848 s_ip_idx = ip_idx = cb->args[2];
4849
4850 rcu_read_lock();
4851 cb->seq = atomic_read(&net->ipv6.dev_addr_genid) ^ net->dev_base_seq;
4852 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
4853 idx = 0;
4854 head = &net->dev_index_head[h];
4855 hlist_for_each_entry_rcu(dev, head, index_hlist) {
4856 if (idx < s_idx)
4857 goto cont;
4858 if (h > s_h || idx > s_idx)
4859 s_ip_idx = 0;
4860 ip_idx = 0;
4861 idev = __in6_dev_get(dev);
4862 if (!idev)
4863 goto cont;
4864
4865 if (in6_dump_addrs(idev, skb, cb, type,
4866 s_ip_idx, &ip_idx) < 0)
4867 goto done;
4868 cont:
4869 idx++;
4870 }
4871 }
4872 done:
4873 rcu_read_unlock();
4874 cb->args[0] = h;
4875 cb->args[1] = idx;
4876 cb->args[2] = ip_idx;
4877
4878 return skb->len;
4879 }
4880
4881 static int inet6_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
4882 {
4883 enum addr_type_t type = UNICAST_ADDR;
4884
4885 return inet6_dump_addr(skb, cb, type);
4886 }
4887
4888 static int inet6_dump_ifmcaddr(struct sk_buff *skb, struct netlink_callback *cb)
4889 {
4890 enum addr_type_t type = MULTICAST_ADDR;
4891
4892 return inet6_dump_addr(skb, cb, type);
4893 }
4894
4895
4896 static int inet6_dump_ifacaddr(struct sk_buff *skb, struct netlink_callback *cb)
4897 {
4898 enum addr_type_t type = ANYCAST_ADDR;
4899
4900 return inet6_dump_addr(skb, cb, type);
4901 }
4902
4903 static int inet6_rtm_getaddr(struct sk_buff *in_skb, struct nlmsghdr *nlh,
4904 struct netlink_ext_ack *extack)
4905 {
4906 struct net *net = sock_net(in_skb->sk);
4907 struct ifaddrmsg *ifm;
4908 struct nlattr *tb[IFA_MAX+1];
4909 struct in6_addr *addr = NULL, *peer;
4910 struct net_device *dev = NULL;
4911 struct inet6_ifaddr *ifa;
4912 struct sk_buff *skb;
4913 int err;
4914
4915 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, ifa_ipv6_policy,
4916 extack);
4917 if (err < 0)
4918 return err;
4919
4920 addr = extract_addr(tb[IFA_ADDRESS], tb[IFA_LOCAL], &peer);
4921 if (!addr)
4922 return -EINVAL;
4923
4924 ifm = nlmsg_data(nlh);
4925 if (ifm->ifa_index)
4926 dev = dev_get_by_index(net, ifm->ifa_index);
4927
4928 ifa = ipv6_get_ifaddr(net, addr, dev, 1);
4929 if (!ifa) {
4930 err = -EADDRNOTAVAIL;
4931 goto errout;
4932 }
4933
4934 skb = nlmsg_new(inet6_ifaddr_msgsize(), GFP_KERNEL);
4935 if (!skb) {
4936 err = -ENOBUFS;
4937 goto errout_ifa;
4938 }
4939
4940 err = inet6_fill_ifaddr(skb, ifa, NETLINK_CB(in_skb).portid,
4941 nlh->nlmsg_seq, RTM_NEWADDR, 0);
4942 if (err < 0) {
4943 /* -EMSGSIZE implies BUG in inet6_ifaddr_msgsize() */
4944 WARN_ON(err == -EMSGSIZE);
4945 kfree_skb(skb);
4946 goto errout_ifa;
4947 }
4948 err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
4949 errout_ifa:
4950 in6_ifa_put(ifa);
4951 errout:
4952 if (dev)
4953 dev_put(dev);
4954 return err;
4955 }
4956
4957 static void inet6_ifa_notify(int event, struct inet6_ifaddr *ifa)
4958 {
4959 struct sk_buff *skb;
4960 struct net *net = dev_net(ifa->idev->dev);
4961 int err = -ENOBUFS;
4962
4963 /* Don't send DELADDR notification for TENTATIVE address,
4964 * since NEWADDR notification is sent only after removing
4965 * TENTATIVE flag, if DAD has not failed.
4966 */
4967 if (ifa->flags & IFA_F_TENTATIVE && !(ifa->flags & IFA_F_DADFAILED) &&
4968 event == RTM_DELADDR)
4969 return;
4970
4971 skb = nlmsg_new(inet6_ifaddr_msgsize(), GFP_ATOMIC);
4972 if (!skb)
4973 goto errout;
4974
4975 err = inet6_fill_ifaddr(skb, ifa, 0, 0, event, 0);
4976 if (err < 0) {
4977 /* -EMSGSIZE implies BUG in inet6_ifaddr_msgsize() */
4978 WARN_ON(err == -EMSGSIZE);
4979 kfree_skb(skb);
4980 goto errout;
4981 }
4982 rtnl_notify(skb, net, 0, RTNLGRP_IPV6_IFADDR, NULL, GFP_ATOMIC);
4983 return;
4984 errout:
4985 if (err < 0)
4986 rtnl_set_sk_err(net, RTNLGRP_IPV6_IFADDR, err);
4987 }
4988
4989 static inline void ipv6_store_devconf(struct ipv6_devconf *cnf,
4990 __s32 *array, int bytes)
4991 {
4992 BUG_ON(bytes < (DEVCONF_MAX * 4));
4993
4994 memset(array, 0, bytes);
4995 array[DEVCONF_FORWARDING] = cnf->forwarding;
4996 array[DEVCONF_HOPLIMIT] = cnf->hop_limit;
4997 array[DEVCONF_MTU6] = cnf->mtu6;
4998 array[DEVCONF_ACCEPT_RA] = cnf->accept_ra;
4999 array[DEVCONF_ACCEPT_REDIRECTS] = cnf->accept_redirects;
5000 array[DEVCONF_AUTOCONF] = cnf->autoconf;
5001 array[DEVCONF_DAD_TRANSMITS] = cnf->dad_transmits;
5002 array[DEVCONF_RTR_SOLICITS] = cnf->rtr_solicits;
5003 array[DEVCONF_RTR_SOLICIT_INTERVAL] =
5004 jiffies_to_msecs(cnf->rtr_solicit_interval);
5005 array[DEVCONF_RTR_SOLICIT_MAX_INTERVAL] =
5006 jiffies_to_msecs(cnf->rtr_solicit_max_interval);
5007 array[DEVCONF_RTR_SOLICIT_DELAY] =
5008 jiffies_to_msecs(cnf->rtr_solicit_delay);
5009 array[DEVCONF_FORCE_MLD_VERSION] = cnf->force_mld_version;
5010 array[DEVCONF_MLDV1_UNSOLICITED_REPORT_INTERVAL] =
5011 jiffies_to_msecs(cnf->mldv1_unsolicited_report_interval);
5012 array[DEVCONF_MLDV2_UNSOLICITED_REPORT_INTERVAL] =
5013 jiffies_to_msecs(cnf->mldv2_unsolicited_report_interval);
5014 array[DEVCONF_USE_TEMPADDR] = cnf->use_tempaddr;
5015 array[DEVCONF_TEMP_VALID_LFT] = cnf->temp_valid_lft;
5016 array[DEVCONF_TEMP_PREFERED_LFT] = cnf->temp_prefered_lft;
5017 array[DEVCONF_REGEN_MAX_RETRY] = cnf->regen_max_retry;
5018 array[DEVCONF_MAX_DESYNC_FACTOR] = cnf->max_desync_factor;
5019 array[DEVCONF_MAX_ADDRESSES] = cnf->max_addresses;
5020 array[DEVCONF_ACCEPT_RA_DEFRTR] = cnf->accept_ra_defrtr;
5021 array[DEVCONF_ACCEPT_RA_MIN_HOP_LIMIT] = cnf->accept_ra_min_hop_limit;
5022 array[DEVCONF_ACCEPT_RA_PINFO] = cnf->accept_ra_pinfo;
5023 #ifdef CONFIG_IPV6_ROUTER_PREF
5024 array[DEVCONF_ACCEPT_RA_RTR_PREF] = cnf->accept_ra_rtr_pref;
5025 array[DEVCONF_RTR_PROBE_INTERVAL] =
5026 jiffies_to_msecs(cnf->rtr_probe_interval);
5027 #ifdef CONFIG_IPV6_ROUTE_INFO
5028 array[DEVCONF_ACCEPT_RA_RT_INFO_MIN_PLEN] = cnf->accept_ra_rt_info_min_plen;
5029 array[DEVCONF_ACCEPT_RA_RT_INFO_MAX_PLEN] = cnf->accept_ra_rt_info_max_plen;
5030 #endif
5031 #endif
5032 array[DEVCONF_PROXY_NDP] = cnf->proxy_ndp;
5033 array[DEVCONF_ACCEPT_SOURCE_ROUTE] = cnf->accept_source_route;
5034 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
5035 array[DEVCONF_OPTIMISTIC_DAD] = cnf->optimistic_dad;
5036 array[DEVCONF_USE_OPTIMISTIC] = cnf->use_optimistic;
5037 #endif
5038 #ifdef CONFIG_IPV6_MROUTE
5039 array[DEVCONF_MC_FORWARDING] = cnf->mc_forwarding;
5040 #endif
5041 array[DEVCONF_DISABLE_IPV6] = cnf->disable_ipv6;
5042 array[DEVCONF_ACCEPT_DAD] = cnf->accept_dad;
5043 array[DEVCONF_FORCE_TLLAO] = cnf->force_tllao;
5044 array[DEVCONF_NDISC_NOTIFY] = cnf->ndisc_notify;
5045 array[DEVCONF_SUPPRESS_FRAG_NDISC] = cnf->suppress_frag_ndisc;
5046 array[DEVCONF_ACCEPT_RA_FROM_LOCAL] = cnf->accept_ra_from_local;
5047 array[DEVCONF_ACCEPT_RA_MTU] = cnf->accept_ra_mtu;
5048 array[DEVCONF_IGNORE_ROUTES_WITH_LINKDOWN] = cnf->ignore_routes_with_linkdown;
5049 /* we omit DEVCONF_STABLE_SECRET for now */
5050 array[DEVCONF_USE_OIF_ADDRS_ONLY] = cnf->use_oif_addrs_only;
5051 array[DEVCONF_DROP_UNICAST_IN_L2_MULTICAST] = cnf->drop_unicast_in_l2_multicast;
5052 array[DEVCONF_DROP_UNSOLICITED_NA] = cnf->drop_unsolicited_na;
5053 array[DEVCONF_KEEP_ADDR_ON_DOWN] = cnf->keep_addr_on_down;
5054 array[DEVCONF_SEG6_ENABLED] = cnf->seg6_enabled;
5055 #ifdef CONFIG_IPV6_SEG6_HMAC
5056 array[DEVCONF_SEG6_REQUIRE_HMAC] = cnf->seg6_require_hmac;
5057 #endif
5058 array[DEVCONF_ENHANCED_DAD] = cnf->enhanced_dad;
5059 array[DEVCONF_ADDR_GEN_MODE] = cnf->addr_gen_mode;
5060 array[DEVCONF_DISABLE_POLICY] = cnf->disable_policy;
5061 array[DEVCONF_NDISC_TCLASS] = cnf->ndisc_tclass;
5062 }
5063
5064 static inline size_t inet6_ifla6_size(void)
5065 {
5066 return nla_total_size(4) /* IFLA_INET6_FLAGS */
5067 + nla_total_size(sizeof(struct ifla_cacheinfo))
5068 + nla_total_size(DEVCONF_MAX * 4) /* IFLA_INET6_CONF */
5069 + nla_total_size(IPSTATS_MIB_MAX * 8) /* IFLA_INET6_STATS */
5070 + nla_total_size(ICMP6_MIB_MAX * 8) /* IFLA_INET6_ICMP6STATS */
5071 + nla_total_size(sizeof(struct in6_addr)); /* IFLA_INET6_TOKEN */
5072 }
5073
5074 static inline size_t inet6_if_nlmsg_size(void)
5075 {
5076 return NLMSG_ALIGN(sizeof(struct ifinfomsg))
5077 + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */
5078 + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */
5079 + nla_total_size(4) /* IFLA_MTU */
5080 + nla_total_size(4) /* IFLA_LINK */
5081 + nla_total_size(1) /* IFLA_OPERSTATE */
5082 + nla_total_size(inet6_ifla6_size()); /* IFLA_PROTINFO */
5083 }
5084
5085 static inline void __snmp6_fill_statsdev(u64 *stats, atomic_long_t *mib,
5086 int bytes)
5087 {
5088 int i;
5089 int pad = bytes - sizeof(u64) * ICMP6_MIB_MAX;
5090 BUG_ON(pad < 0);
5091
5092 /* Use put_unaligned() because stats may not be aligned for u64. */
5093 put_unaligned(ICMP6_MIB_MAX, &stats[0]);
5094 for (i = 1; i < ICMP6_MIB_MAX; i++)
5095 put_unaligned(atomic_long_read(&mib[i]), &stats[i]);
5096
5097 memset(&stats[ICMP6_MIB_MAX], 0, pad);
5098 }
5099
5100 static inline void __snmp6_fill_stats64(u64 *stats, void __percpu *mib,
5101 int bytes, size_t syncpoff)
5102 {
5103 int i, c;
5104 u64 buff[IPSTATS_MIB_MAX];
5105 int pad = bytes - sizeof(u64) * IPSTATS_MIB_MAX;
5106
5107 BUG_ON(pad < 0);
5108
5109 memset(buff, 0, sizeof(buff));
5110 buff[0] = IPSTATS_MIB_MAX;
5111
5112 for_each_possible_cpu(c) {
5113 for (i = 1; i < IPSTATS_MIB_MAX; i++)
5114 buff[i] += snmp_get_cpu_field64(mib, c, i, syncpoff);
5115 }
5116
5117 memcpy(stats, buff, IPSTATS_MIB_MAX * sizeof(u64));
5118 memset(&stats[IPSTATS_MIB_MAX], 0, pad);
5119 }
5120
5121 static void snmp6_fill_stats(u64 *stats, struct inet6_dev *idev, int attrtype,
5122 int bytes)
5123 {
5124 switch (attrtype) {
5125 case IFLA_INET6_STATS:
5126 __snmp6_fill_stats64(stats, idev->stats.ipv6, bytes,
5127 offsetof(struct ipstats_mib, syncp));
5128 break;
5129 case IFLA_INET6_ICMP6STATS:
5130 __snmp6_fill_statsdev(stats, idev->stats.icmpv6dev->mibs, bytes);
5131 break;
5132 }
5133 }
5134
5135 static int inet6_fill_ifla6_attrs(struct sk_buff *skb, struct inet6_dev *idev,
5136 u32 ext_filter_mask)
5137 {
5138 struct nlattr *nla;
5139 struct ifla_cacheinfo ci;
5140
5141 if (nla_put_u32(skb, IFLA_INET6_FLAGS, idev->if_flags))
5142 goto nla_put_failure;
5143 ci.max_reasm_len = IPV6_MAXPLEN;
5144 ci.tstamp = cstamp_delta(idev->tstamp);
5145 ci.reachable_time = jiffies_to_msecs(idev->nd_parms->reachable_time);
5146 ci.retrans_time = jiffies_to_msecs(NEIGH_VAR(idev->nd_parms, RETRANS_TIME));
5147 if (nla_put(skb, IFLA_INET6_CACHEINFO, sizeof(ci), &ci))
5148 goto nla_put_failure;
5149 nla = nla_reserve(skb, IFLA_INET6_CONF, DEVCONF_MAX * sizeof(s32));
5150 if (!nla)
5151 goto nla_put_failure;
5152 ipv6_store_devconf(&idev->cnf, nla_data(nla), nla_len(nla));
5153
5154 /* XXX - MC not implemented */
5155
5156 if (ext_filter_mask & RTEXT_FILTER_SKIP_STATS)
5157 return 0;
5158
5159 nla = nla_reserve(skb, IFLA_INET6_STATS, IPSTATS_MIB_MAX * sizeof(u64));
5160 if (!nla)
5161 goto nla_put_failure;
5162 snmp6_fill_stats(nla_data(nla), idev, IFLA_INET6_STATS, nla_len(nla));
5163
5164 nla = nla_reserve(skb, IFLA_INET6_ICMP6STATS, ICMP6_MIB_MAX * sizeof(u64));
5165 if (!nla)
5166 goto nla_put_failure;
5167 snmp6_fill_stats(nla_data(nla), idev, IFLA_INET6_ICMP6STATS, nla_len(nla));
5168
5169 nla = nla_reserve(skb, IFLA_INET6_TOKEN, sizeof(struct in6_addr));
5170 if (!nla)
5171 goto nla_put_failure;
5172
5173 if (nla_put_u8(skb, IFLA_INET6_ADDR_GEN_MODE, idev->cnf.addr_gen_mode))
5174 goto nla_put_failure;
5175
5176 read_lock_bh(&idev->lock);
5177 memcpy(nla_data(nla), idev->token.s6_addr, nla_len(nla));
5178 read_unlock_bh(&idev->lock);
5179
5180 return 0;
5181
5182 nla_put_failure:
5183 return -EMSGSIZE;
5184 }
5185
5186 static size_t inet6_get_link_af_size(const struct net_device *dev,
5187 u32 ext_filter_mask)
5188 {
5189 if (!__in6_dev_get(dev))
5190 return 0;
5191
5192 return inet6_ifla6_size();
5193 }
5194
5195 static int inet6_fill_link_af(struct sk_buff *skb, const struct net_device *dev,
5196 u32 ext_filter_mask)
5197 {
5198 struct inet6_dev *idev = __in6_dev_get(dev);
5199
5200 if (!idev)
5201 return -ENODATA;
5202
5203 if (inet6_fill_ifla6_attrs(skb, idev, ext_filter_mask) < 0)
5204 return -EMSGSIZE;
5205
5206 return 0;
5207 }
5208
5209 static int inet6_set_iftoken(struct inet6_dev *idev, struct in6_addr *token)
5210 {
5211 struct inet6_ifaddr *ifp;
5212 struct net_device *dev = idev->dev;
5213 bool clear_token, update_rs = false;
5214 struct in6_addr ll_addr;
5215
5216 ASSERT_RTNL();
5217
5218 if (!token)
5219 return -EINVAL;
5220 if (dev->flags & (IFF_LOOPBACK | IFF_NOARP))
5221 return -EINVAL;
5222 if (!ipv6_accept_ra(idev))
5223 return -EINVAL;
5224 if (idev->cnf.rtr_solicits == 0)
5225 return -EINVAL;
5226
5227 write_lock_bh(&idev->lock);
5228
5229 BUILD_BUG_ON(sizeof(token->s6_addr) != 16);
5230 memcpy(idev->token.s6_addr + 8, token->s6_addr + 8, 8);
5231
5232 write_unlock_bh(&idev->lock);
5233
5234 clear_token = ipv6_addr_any(token);
5235 if (clear_token)
5236 goto update_lft;
5237
5238 if (!idev->dead && (idev->if_flags & IF_READY) &&
5239 !ipv6_get_lladdr(dev, &ll_addr, IFA_F_TENTATIVE |
5240 IFA_F_OPTIMISTIC)) {
5241 /* If we're not ready, then normal ifup will take care
5242 * of this. Otherwise, we need to request our rs here.
5243 */
5244 ndisc_send_rs(dev, &ll_addr, &in6addr_linklocal_allrouters);
5245 update_rs = true;
5246 }
5247
5248 update_lft:
5249 write_lock_bh(&idev->lock);
5250
5251 if (update_rs) {
5252 idev->if_flags |= IF_RS_SENT;
5253 idev->rs_interval = rfc3315_s14_backoff_init(
5254 idev->cnf.rtr_solicit_interval);
5255 idev->rs_probes = 1;
5256 addrconf_mod_rs_timer(idev, idev->rs_interval);
5257 }
5258
5259 /* Well, that's kinda nasty ... */
5260 list_for_each_entry(ifp, &idev->addr_list, if_list) {
5261 spin_lock(&ifp->lock);
5262 if (ifp->tokenized) {
5263 ifp->valid_lft = 0;
5264 ifp->prefered_lft = 0;
5265 }
5266 spin_unlock(&ifp->lock);
5267 }
5268
5269 write_unlock_bh(&idev->lock);
5270 inet6_ifinfo_notify(RTM_NEWLINK, idev);
5271 addrconf_verify_rtnl();
5272 return 0;
5273 }
5274
5275 static const struct nla_policy inet6_af_policy[IFLA_INET6_MAX + 1] = {
5276 [IFLA_INET6_ADDR_GEN_MODE] = { .type = NLA_U8 },
5277 [IFLA_INET6_TOKEN] = { .len = sizeof(struct in6_addr) },
5278 };
5279
5280 static int inet6_validate_link_af(const struct net_device *dev,
5281 const struct nlattr *nla)
5282 {
5283 struct nlattr *tb[IFLA_INET6_MAX + 1];
5284
5285 if (dev && !__in6_dev_get(dev))
5286 return -EAFNOSUPPORT;
5287
5288 return nla_parse_nested(tb, IFLA_INET6_MAX, nla, inet6_af_policy,
5289 NULL);
5290 }
5291
5292 static int check_addr_gen_mode(int mode)
5293 {
5294 if (mode != IN6_ADDR_GEN_MODE_EUI64 &&
5295 mode != IN6_ADDR_GEN_MODE_NONE &&
5296 mode != IN6_ADDR_GEN_MODE_STABLE_PRIVACY &&
5297 mode != IN6_ADDR_GEN_MODE_RANDOM)
5298 return -EINVAL;
5299 return 1;
5300 }
5301
5302 static int check_stable_privacy(struct inet6_dev *idev, struct net *net,
5303 int mode)
5304 {
5305 if (mode == IN6_ADDR_GEN_MODE_STABLE_PRIVACY &&
5306 !idev->cnf.stable_secret.initialized &&
5307 !net->ipv6.devconf_dflt->stable_secret.initialized)
5308 return -EINVAL;
5309 return 1;
5310 }
5311
5312 static int inet6_set_link_af(struct net_device *dev, const struct nlattr *nla)
5313 {
5314 int err = -EINVAL;
5315 struct inet6_dev *idev = __in6_dev_get(dev);
5316 struct nlattr *tb[IFLA_INET6_MAX + 1];
5317
5318 if (!idev)
5319 return -EAFNOSUPPORT;
5320
5321 if (nla_parse_nested(tb, IFLA_INET6_MAX, nla, NULL, NULL) < 0)
5322 BUG();
5323
5324 if (tb[IFLA_INET6_TOKEN]) {
5325 err = inet6_set_iftoken(idev, nla_data(tb[IFLA_INET6_TOKEN]));
5326 if (err)
5327 return err;
5328 }
5329
5330 if (tb[IFLA_INET6_ADDR_GEN_MODE]) {
5331 u8 mode = nla_get_u8(tb[IFLA_INET6_ADDR_GEN_MODE]);
5332
5333 if (check_addr_gen_mode(mode) < 0 ||
5334 check_stable_privacy(idev, dev_net(dev), mode) < 0)
5335 return -EINVAL;
5336
5337 idev->cnf.addr_gen_mode = mode;
5338 err = 0;
5339 }
5340
5341 return err;
5342 }
5343
5344 static int inet6_fill_ifinfo(struct sk_buff *skb, struct inet6_dev *idev,
5345 u32 portid, u32 seq, int event, unsigned int flags)
5346 {
5347 struct net_device *dev = idev->dev;
5348 struct ifinfomsg *hdr;
5349 struct nlmsghdr *nlh;
5350 void *protoinfo;
5351
5352 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*hdr), flags);
5353 if (!nlh)
5354 return -EMSGSIZE;
5355
5356 hdr = nlmsg_data(nlh);
5357 hdr->ifi_family = AF_INET6;
5358 hdr->__ifi_pad = 0;
5359 hdr->ifi_type = dev->type;
5360 hdr->ifi_index = dev->ifindex;
5361 hdr->ifi_flags = dev_get_flags(dev);
5362 hdr->ifi_change = 0;
5363
5364 if (nla_put_string(skb, IFLA_IFNAME, dev->name) ||
5365 (dev->addr_len &&
5366 nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr)) ||
5367 nla_put_u32(skb, IFLA_MTU, dev->mtu) ||
5368 (dev->ifindex != dev_get_iflink(dev) &&
5369 nla_put_u32(skb, IFLA_LINK, dev_get_iflink(dev))) ||
5370 nla_put_u8(skb, IFLA_OPERSTATE,
5371 netif_running(dev) ? dev->operstate : IF_OPER_DOWN))
5372 goto nla_put_failure;
5373 protoinfo = nla_nest_start(skb, IFLA_PROTINFO);
5374 if (!protoinfo)
5375 goto nla_put_failure;
5376
5377 if (inet6_fill_ifla6_attrs(skb, idev, 0) < 0)
5378 goto nla_put_failure;
5379
5380 nla_nest_end(skb, protoinfo);
5381 nlmsg_end(skb, nlh);
5382 return 0;
5383
5384 nla_put_failure:
5385 nlmsg_cancel(skb, nlh);
5386 return -EMSGSIZE;
5387 }
5388
5389 static int inet6_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
5390 {
5391 struct net *net = sock_net(skb->sk);
5392 int h, s_h;
5393 int idx = 0, s_idx;
5394 struct net_device *dev;
5395 struct inet6_dev *idev;
5396 struct hlist_head *head;
5397
5398 s_h = cb->args[0];
5399 s_idx = cb->args[1];
5400
5401 rcu_read_lock();
5402 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
5403 idx = 0;
5404 head = &net->dev_index_head[h];
5405 hlist_for_each_entry_rcu(dev, head, index_hlist) {
5406 if (idx < s_idx)
5407 goto cont;
5408 idev = __in6_dev_get(dev);
5409 if (!idev)
5410 goto cont;
5411 if (inet6_fill_ifinfo(skb, idev,
5412 NETLINK_CB(cb->skb).portid,
5413 cb->nlh->nlmsg_seq,
5414 RTM_NEWLINK, NLM_F_MULTI) < 0)
5415 goto out;
5416 cont:
5417 idx++;
5418 }
5419 }
5420 out:
5421 rcu_read_unlock();
5422 cb->args[1] = idx;
5423 cb->args[0] = h;
5424
5425 return skb->len;
5426 }
5427
5428 void inet6_ifinfo_notify(int event, struct inet6_dev *idev)
5429 {
5430 struct sk_buff *skb;
5431 struct net *net = dev_net(idev->dev);
5432 int err = -ENOBUFS;
5433
5434 skb = nlmsg_new(inet6_if_nlmsg_size(), GFP_ATOMIC);
5435 if (!skb)
5436 goto errout;
5437
5438 err = inet6_fill_ifinfo(skb, idev, 0, 0, event, 0);
5439 if (err < 0) {
5440 /* -EMSGSIZE implies BUG in inet6_if_nlmsg_size() */
5441 WARN_ON(err == -EMSGSIZE);
5442 kfree_skb(skb);
5443 goto errout;
5444 }
5445 rtnl_notify(skb, net, 0, RTNLGRP_IPV6_IFINFO, NULL, GFP_ATOMIC);
5446 return;
5447 errout:
5448 if (err < 0)
5449 rtnl_set_sk_err(net, RTNLGRP_IPV6_IFINFO, err);
5450 }
5451
5452 static inline size_t inet6_prefix_nlmsg_size(void)
5453 {
5454 return NLMSG_ALIGN(sizeof(struct prefixmsg))
5455 + nla_total_size(sizeof(struct in6_addr))
5456 + nla_total_size(sizeof(struct prefix_cacheinfo));
5457 }
5458
5459 static int inet6_fill_prefix(struct sk_buff *skb, struct inet6_dev *idev,
5460 struct prefix_info *pinfo, u32 portid, u32 seq,
5461 int event, unsigned int flags)
5462 {
5463 struct prefixmsg *pmsg;
5464 struct nlmsghdr *nlh;
5465 struct prefix_cacheinfo ci;
5466
5467 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*pmsg), flags);
5468 if (!nlh)
5469 return -EMSGSIZE;
5470
5471 pmsg = nlmsg_data(nlh);
5472 pmsg->prefix_family = AF_INET6;
5473 pmsg->prefix_pad1 = 0;
5474 pmsg->prefix_pad2 = 0;
5475 pmsg->prefix_ifindex = idev->dev->ifindex;
5476 pmsg->prefix_len = pinfo->prefix_len;
5477 pmsg->prefix_type = pinfo->type;
5478 pmsg->prefix_pad3 = 0;
5479 pmsg->prefix_flags = 0;
5480 if (pinfo->onlink)
5481 pmsg->prefix_flags |= IF_PREFIX_ONLINK;
5482 if (pinfo->autoconf)
5483 pmsg->prefix_flags |= IF_PREFIX_AUTOCONF;
5484
5485 if (nla_put(skb, PREFIX_ADDRESS, sizeof(pinfo->prefix), &pinfo->prefix))
5486 goto nla_put_failure;
5487 ci.preferred_time = ntohl(pinfo->prefered);
5488 ci.valid_time = ntohl(pinfo->valid);
5489 if (nla_put(skb, PREFIX_CACHEINFO, sizeof(ci), &ci))
5490 goto nla_put_failure;
5491 nlmsg_end(skb, nlh);
5492 return 0;
5493
5494 nla_put_failure:
5495 nlmsg_cancel(skb, nlh);
5496 return -EMSGSIZE;
5497 }
5498
5499 static void inet6_prefix_notify(int event, struct inet6_dev *idev,
5500 struct prefix_info *pinfo)
5501 {
5502 struct sk_buff *skb;
5503 struct net *net = dev_net(idev->dev);
5504 int err = -ENOBUFS;
5505
5506 skb = nlmsg_new(inet6_prefix_nlmsg_size(), GFP_ATOMIC);
5507 if (!skb)
5508 goto errout;
5509
5510 err = inet6_fill_prefix(skb, idev, pinfo, 0, 0, event, 0);
5511 if (err < 0) {
5512 /* -EMSGSIZE implies BUG in inet6_prefix_nlmsg_size() */
5513 WARN_ON(err == -EMSGSIZE);
5514 kfree_skb(skb);
5515 goto errout;
5516 }
5517 rtnl_notify(skb, net, 0, RTNLGRP_IPV6_PREFIX, NULL, GFP_ATOMIC);
5518 return;
5519 errout:
5520 if (err < 0)
5521 rtnl_set_sk_err(net, RTNLGRP_IPV6_PREFIX, err);
5522 }
5523
5524 static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
5525 {
5526 struct net *net = dev_net(ifp->idev->dev);
5527
5528 if (event)
5529 ASSERT_RTNL();
5530
5531 inet6_ifa_notify(event ? : RTM_NEWADDR, ifp);
5532
5533 switch (event) {
5534 case RTM_NEWADDR:
5535 /*
5536 * If the address was optimistic
5537 * we inserted the route at the start of
5538 * our DAD process, so we don't need
5539 * to do it again
5540 */
5541 if (!rcu_access_pointer(ifp->rt->rt6i_node))
5542 ip6_ins_rt(ifp->rt);
5543 if (ifp->idev->cnf.forwarding)
5544 addrconf_join_anycast(ifp);
5545 if (!ipv6_addr_any(&ifp->peer_addr))
5546 addrconf_prefix_route(&ifp->peer_addr, 128,
5547 ifp->idev->dev, 0, 0);
5548 break;
5549 case RTM_DELADDR:
5550 if (ifp->idev->cnf.forwarding)
5551 addrconf_leave_anycast(ifp);
5552 addrconf_leave_solict(ifp->idev, &ifp->addr);
5553 if (!ipv6_addr_any(&ifp->peer_addr)) {
5554 struct rt6_info *rt;
5555
5556 rt = addrconf_get_prefix_route(&ifp->peer_addr, 128,
5557 ifp->idev->dev, 0, 0);
5558 if (rt)
5559 ip6_del_rt(rt);
5560 }
5561 if (ifp->rt) {
5562 if (dst_hold_safe(&ifp->rt->dst))
5563 ip6_del_rt(ifp->rt);
5564 }
5565 rt_genid_bump_ipv6(net);
5566 break;
5567 }
5568 atomic_inc(&net->ipv6.dev_addr_genid);
5569 }
5570
5571 static void ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
5572 {
5573 rcu_read_lock_bh();
5574 if (likely(ifp->idev->dead == 0))
5575 __ipv6_ifa_notify(event, ifp);
5576 rcu_read_unlock_bh();
5577 }
5578
5579 #ifdef CONFIG_SYSCTL
5580
5581 static
5582 int addrconf_sysctl_forward(struct ctl_table *ctl, int write,
5583 void __user *buffer, size_t *lenp, loff_t *ppos)
5584 {
5585 int *valp = ctl->data;
5586 int val = *valp;
5587 loff_t pos = *ppos;
5588 struct ctl_table lctl;
5589 int ret;
5590
5591 /*
5592 * ctl->data points to idev->cnf.forwarding, we should
5593 * not modify it until we get the rtnl lock.
5594 */
5595 lctl = *ctl;
5596 lctl.data = &val;
5597
5598 ret = proc_dointvec(&lctl, write, buffer, lenp, ppos);
5599
5600 if (write)
5601 ret = addrconf_fixup_forwarding(ctl, valp, val);
5602 if (ret)
5603 *ppos = pos;
5604 return ret;
5605 }
5606
5607 static
5608 int addrconf_sysctl_mtu(struct ctl_table *ctl, int write,
5609 void __user *buffer, size_t *lenp, loff_t *ppos)
5610 {
5611 struct inet6_dev *idev = ctl->extra1;
5612 int min_mtu = IPV6_MIN_MTU;
5613 struct ctl_table lctl;
5614
5615 lctl = *ctl;
5616 lctl.extra1 = &min_mtu;
5617 lctl.extra2 = idev ? &idev->dev->mtu : NULL;
5618
5619 return proc_dointvec_minmax(&lctl, write, buffer, lenp, ppos);
5620 }
5621
5622 static void dev_disable_change(struct inet6_dev *idev)
5623 {
5624 struct netdev_notifier_info info;
5625
5626 if (!idev || !idev->dev)
5627 return;
5628
5629 netdev_notifier_info_init(&info, idev->dev);
5630 if (idev->cnf.disable_ipv6)
5631 addrconf_notify(NULL, NETDEV_DOWN, &info);
5632 else
5633 addrconf_notify(NULL, NETDEV_UP, &info);
5634 }
5635
5636 static void addrconf_disable_change(struct net *net, __s32 newf)
5637 {
5638 struct net_device *dev;
5639 struct inet6_dev *idev;
5640
5641 for_each_netdev(net, dev) {
5642 idev = __in6_dev_get(dev);
5643 if (idev) {
5644 int changed = (!idev->cnf.disable_ipv6) ^ (!newf);
5645 idev->cnf.disable_ipv6 = newf;
5646 if (changed)
5647 dev_disable_change(idev);
5648 }
5649 }
5650 }
5651
5652 static int addrconf_disable_ipv6(struct ctl_table *table, int *p, int newf)
5653 {
5654 struct net *net;
5655 int old;
5656
5657 if (!rtnl_trylock())
5658 return restart_syscall();
5659
5660 net = (struct net *)table->extra2;
5661 old = *p;
5662 *p = newf;
5663
5664 if (p == &net->ipv6.devconf_dflt->disable_ipv6) {
5665 rtnl_unlock();
5666 return 0;
5667 }
5668
5669 if (p == &net->ipv6.devconf_all->disable_ipv6) {
5670 net->ipv6.devconf_dflt->disable_ipv6 = newf;
5671 addrconf_disable_change(net, newf);
5672 } else if ((!newf) ^ (!old))
5673 dev_disable_change((struct inet6_dev *)table->extra1);
5674
5675 rtnl_unlock();
5676 return 0;
5677 }
5678
5679 static
5680 int addrconf_sysctl_disable(struct ctl_table *ctl, int write,
5681 void __user *buffer, size_t *lenp, loff_t *ppos)
5682 {
5683 int *valp = ctl->data;
5684 int val = *valp;
5685 loff_t pos = *ppos;
5686 struct ctl_table lctl;
5687 int ret;
5688
5689 /*
5690 * ctl->data points to idev->cnf.disable_ipv6, we should
5691 * not modify it until we get the rtnl lock.
5692 */
5693 lctl = *ctl;
5694 lctl.data = &val;
5695
5696 ret = proc_dointvec(&lctl, write, buffer, lenp, ppos);
5697
5698 if (write)
5699 ret = addrconf_disable_ipv6(ctl, valp, val);
5700 if (ret)
5701 *ppos = pos;
5702 return ret;
5703 }
5704
5705 static
5706 int addrconf_sysctl_proxy_ndp(struct ctl_table *ctl, int write,
5707 void __user *buffer, size_t *lenp, loff_t *ppos)
5708 {
5709 int *valp = ctl->data;
5710 int ret;
5711 int old, new;
5712
5713 old = *valp;
5714 ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
5715 new = *valp;
5716
5717 if (write && old != new) {
5718 struct net *net = ctl->extra2;
5719
5720 if (!rtnl_trylock())
5721 return restart_syscall();
5722
5723 if (valp == &net->ipv6.devconf_dflt->proxy_ndp)
5724 inet6_netconf_notify_devconf(net, RTM_NEWNETCONF,
5725 NETCONFA_PROXY_NEIGH,
5726 NETCONFA_IFINDEX_DEFAULT,
5727 net->ipv6.devconf_dflt);
5728 else if (valp == &net->ipv6.devconf_all->proxy_ndp)
5729 inet6_netconf_notify_devconf(net, RTM_NEWNETCONF,
5730 NETCONFA_PROXY_NEIGH,
5731 NETCONFA_IFINDEX_ALL,
5732 net->ipv6.devconf_all);
5733 else {
5734 struct inet6_dev *idev = ctl->extra1;
5735
5736 inet6_netconf_notify_devconf(net, RTM_NEWNETCONF,
5737 NETCONFA_PROXY_NEIGH,
5738 idev->dev->ifindex,
5739 &idev->cnf);
5740 }
5741 rtnl_unlock();
5742 }
5743
5744 return ret;
5745 }
5746
5747 static int addrconf_sysctl_addr_gen_mode(struct ctl_table *ctl, int write,
5748 void __user *buffer, size_t *lenp,
5749 loff_t *ppos)
5750 {
5751 int ret = 0;
5752 int new_val;
5753 struct inet6_dev *idev = (struct inet6_dev *)ctl->extra1;
5754 struct net *net = (struct net *)ctl->extra2;
5755
5756 if (!rtnl_trylock())
5757 return restart_syscall();
5758
5759 ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
5760
5761 if (write) {
5762 new_val = *((int *)ctl->data);
5763
5764 if (check_addr_gen_mode(new_val) < 0) {
5765 ret = -EINVAL;
5766 goto out;
5767 }
5768
5769 /* request for default */
5770 if (&net->ipv6.devconf_dflt->addr_gen_mode == ctl->data) {
5771 ipv6_devconf_dflt.addr_gen_mode = new_val;
5772
5773 /* request for individual net device */
5774 } else {
5775 if (!idev)
5776 goto out;
5777
5778 if (check_stable_privacy(idev, net, new_val) < 0) {
5779 ret = -EINVAL;
5780 goto out;
5781 }
5782
5783 if (idev->cnf.addr_gen_mode != new_val) {
5784 idev->cnf.addr_gen_mode = new_val;
5785 addrconf_dev_config(idev->dev);
5786 }
5787 }
5788 }
5789
5790 out:
5791 rtnl_unlock();
5792
5793 return ret;
5794 }
5795
5796 static int addrconf_sysctl_stable_secret(struct ctl_table *ctl, int write,
5797 void __user *buffer, size_t *lenp,
5798 loff_t *ppos)
5799 {
5800 int err;
5801 struct in6_addr addr;
5802 char str[IPV6_MAX_STRLEN];
5803 struct ctl_table lctl = *ctl;
5804 struct net *net = ctl->extra2;
5805 struct ipv6_stable_secret *secret = ctl->data;
5806
5807 if (&net->ipv6.devconf_all->stable_secret == ctl->data)
5808 return -EIO;
5809
5810 lctl.maxlen = IPV6_MAX_STRLEN;
5811 lctl.data = str;
5812
5813 if (!rtnl_trylock())
5814 return restart_syscall();
5815
5816 if (!write && !secret->initialized) {
5817 err = -EIO;
5818 goto out;
5819 }
5820
5821 err = snprintf(str, sizeof(str), "%pI6", &secret->secret);
5822 if (err >= sizeof(str)) {
5823 err = -EIO;
5824 goto out;
5825 }
5826
5827 err = proc_dostring(&lctl, write, buffer, lenp, ppos);
5828 if (err || !write)
5829 goto out;
5830
5831 if (in6_pton(str, -1, addr.in6_u.u6_addr8, -1, NULL) != 1) {
5832 err = -EIO;
5833 goto out;
5834 }
5835
5836 secret->initialized = true;
5837 secret->secret = addr;
5838
5839 if (&net->ipv6.devconf_dflt->stable_secret == ctl->data) {
5840 struct net_device *dev;
5841
5842 for_each_netdev(net, dev) {
5843 struct inet6_dev *idev = __in6_dev_get(dev);
5844
5845 if (idev) {
5846 idev->cnf.addr_gen_mode =
5847 IN6_ADDR_GEN_MODE_STABLE_PRIVACY;
5848 }
5849 }
5850 } else {
5851 struct inet6_dev *idev = ctl->extra1;
5852
5853 idev->cnf.addr_gen_mode = IN6_ADDR_GEN_MODE_STABLE_PRIVACY;
5854 }
5855
5856 out:
5857 rtnl_unlock();
5858
5859 return err;
5860 }
5861
5862 static
5863 int addrconf_sysctl_ignore_routes_with_linkdown(struct ctl_table *ctl,
5864 int write,
5865 void __user *buffer,
5866 size_t *lenp,
5867 loff_t *ppos)
5868 {
5869 int *valp = ctl->data;
5870 int val = *valp;
5871 loff_t pos = *ppos;
5872 struct ctl_table lctl;
5873 int ret;
5874
5875 /* ctl->data points to idev->cnf.ignore_routes_when_linkdown
5876 * we should not modify it until we get the rtnl lock.
5877 */
5878 lctl = *ctl;
5879 lctl.data = &val;
5880
5881 ret = proc_dointvec(&lctl, write, buffer, lenp, ppos);
5882
5883 if (write)
5884 ret = addrconf_fixup_linkdown(ctl, valp, val);
5885 if (ret)
5886 *ppos = pos;
5887 return ret;
5888 }
5889
5890 static
5891 void addrconf_set_nopolicy(struct rt6_info *rt, int action)
5892 {
5893 if (rt) {
5894 if (action)
5895 rt->dst.flags |= DST_NOPOLICY;
5896 else
5897 rt->dst.flags &= ~DST_NOPOLICY;
5898 }
5899 }
5900
5901 static
5902 void addrconf_disable_policy_idev(struct inet6_dev *idev, int val)
5903 {
5904 struct inet6_ifaddr *ifa;
5905
5906 read_lock_bh(&idev->lock);
5907 list_for_each_entry(ifa, &idev->addr_list, if_list) {
5908 spin_lock(&ifa->lock);
5909 if (ifa->rt) {
5910 struct rt6_info *rt = ifa->rt;
5911 int cpu;
5912
5913 rcu_read_lock();
5914 addrconf_set_nopolicy(ifa->rt, val);
5915 if (rt->rt6i_pcpu) {
5916 for_each_possible_cpu(cpu) {
5917 struct rt6_info **rtp;
5918
5919 rtp = per_cpu_ptr(rt->rt6i_pcpu, cpu);
5920 addrconf_set_nopolicy(*rtp, val);
5921 }
5922 }
5923 rcu_read_unlock();
5924 }
5925 spin_unlock(&ifa->lock);
5926 }
5927 read_unlock_bh(&idev->lock);
5928 }
5929
5930 static
5931 int addrconf_disable_policy(struct ctl_table *ctl, int *valp, int val)
5932 {
5933 struct inet6_dev *idev;
5934 struct net *net;
5935
5936 if (!rtnl_trylock())
5937 return restart_syscall();
5938
5939 *valp = val;
5940
5941 net = (struct net *)ctl->extra2;
5942 if (valp == &net->ipv6.devconf_dflt->disable_policy) {
5943 rtnl_unlock();
5944 return 0;
5945 }
5946
5947 if (valp == &net->ipv6.devconf_all->disable_policy) {
5948 struct net_device *dev;
5949
5950 for_each_netdev(net, dev) {
5951 idev = __in6_dev_get(dev);
5952 if (idev)
5953 addrconf_disable_policy_idev(idev, val);
5954 }
5955 } else {
5956 idev = (struct inet6_dev *)ctl->extra1;
5957 addrconf_disable_policy_idev(idev, val);
5958 }
5959
5960 rtnl_unlock();
5961 return 0;
5962 }
5963
5964 static
5965 int addrconf_sysctl_disable_policy(struct ctl_table *ctl, int write,
5966 void __user *buffer, size_t *lenp,
5967 loff_t *ppos)
5968 {
5969 int *valp = ctl->data;
5970 int val = *valp;
5971 loff_t pos = *ppos;
5972 struct ctl_table lctl;
5973 int ret;
5974
5975 lctl = *ctl;
5976 lctl.data = &val;
5977 ret = proc_dointvec(&lctl, write, buffer, lenp, ppos);
5978
5979 if (write && (*valp != val))
5980 ret = addrconf_disable_policy(ctl, valp, val);
5981
5982 if (ret)
5983 *ppos = pos;
5984
5985 return ret;
5986 }
5987
5988 static int minus_one = -1;
5989 static const int zero = 0;
5990 static const int one = 1;
5991 static const int two_five_five = 255;
5992
5993 static const struct ctl_table addrconf_sysctl[] = {
5994 {
5995 .procname = "forwarding",
5996 .data = &ipv6_devconf.forwarding,
5997 .maxlen = sizeof(int),
5998 .mode = 0644,
5999 .proc_handler = addrconf_sysctl_forward,
6000 },
6001 {
6002 .procname = "hop_limit",
6003 .data = &ipv6_devconf.hop_limit,
6004 .maxlen = sizeof(int),
6005 .mode = 0644,
6006 .proc_handler = proc_dointvec_minmax,
6007 .extra1 = (void *)&one,
6008 .extra2 = (void *)&two_five_five,
6009 },
6010 {
6011 .procname = "mtu",
6012 .data = &ipv6_devconf.mtu6,
6013 .maxlen = sizeof(int),
6014 .mode = 0644,
6015 .proc_handler = addrconf_sysctl_mtu,
6016 },
6017 {
6018 .procname = "accept_ra",
6019 .data = &ipv6_devconf.accept_ra,
6020 .maxlen = sizeof(int),
6021 .mode = 0644,
6022 .proc_handler = proc_dointvec,
6023 },
6024 {
6025 .procname = "accept_redirects",
6026 .data = &ipv6_devconf.accept_redirects,
6027 .maxlen = sizeof(int),
6028 .mode = 0644,
6029 .proc_handler = proc_dointvec,
6030 },
6031 {
6032 .procname = "autoconf",
6033 .data = &ipv6_devconf.autoconf,
6034 .maxlen = sizeof(int),
6035 .mode = 0644,
6036 .proc_handler = proc_dointvec,
6037 },
6038 {
6039 .procname = "dad_transmits",
6040 .data = &ipv6_devconf.dad_transmits,
6041 .maxlen = sizeof(int),
6042 .mode = 0644,
6043 .proc_handler = proc_dointvec,
6044 },
6045 {
6046 .procname = "router_solicitations",
6047 .data = &ipv6_devconf.rtr_solicits,
6048 .maxlen = sizeof(int),
6049 .mode = 0644,
6050 .proc_handler = proc_dointvec_minmax,
6051 .extra1 = &minus_one,
6052 },
6053 {
6054 .procname = "router_solicitation_interval",
6055 .data = &ipv6_devconf.rtr_solicit_interval,
6056 .maxlen = sizeof(int),
6057 .mode = 0644,
6058 .proc_handler = proc_dointvec_jiffies,
6059 },
6060 {
6061 .procname = "router_solicitation_max_interval",
6062 .data = &ipv6_devconf.rtr_solicit_max_interval,
6063 .maxlen = sizeof(int),
6064 .mode = 0644,
6065 .proc_handler = proc_dointvec_jiffies,
6066 },
6067 {
6068 .procname = "router_solicitation_delay",
6069 .data = &ipv6_devconf.rtr_solicit_delay,
6070 .maxlen = sizeof(int),
6071 .mode = 0644,
6072 .proc_handler = proc_dointvec_jiffies,
6073 },
6074 {
6075 .procname = "force_mld_version",
6076 .data = &ipv6_devconf.force_mld_version,
6077 .maxlen = sizeof(int),
6078 .mode = 0644,
6079 .proc_handler = proc_dointvec,
6080 },
6081 {
6082 .procname = "mldv1_unsolicited_report_interval",
6083 .data =
6084 &ipv6_devconf.mldv1_unsolicited_report_interval,
6085 .maxlen = sizeof(int),
6086 .mode = 0644,
6087 .proc_handler = proc_dointvec_ms_jiffies,
6088 },
6089 {
6090 .procname = "mldv2_unsolicited_report_interval",
6091 .data =
6092 &ipv6_devconf.mldv2_unsolicited_report_interval,
6093 .maxlen = sizeof(int),
6094 .mode = 0644,
6095 .proc_handler = proc_dointvec_ms_jiffies,
6096 },
6097 {
6098 .procname = "use_tempaddr",
6099 .data = &ipv6_devconf.use_tempaddr,
6100 .maxlen = sizeof(int),
6101 .mode = 0644,
6102 .proc_handler = proc_dointvec,
6103 },
6104 {
6105 .procname = "temp_valid_lft",
6106 .data = &ipv6_devconf.temp_valid_lft,
6107 .maxlen = sizeof(int),
6108 .mode = 0644,
6109 .proc_handler = proc_dointvec,
6110 },
6111 {
6112 .procname = "temp_prefered_lft",
6113 .data = &ipv6_devconf.temp_prefered_lft,
6114 .maxlen = sizeof(int),
6115 .mode = 0644,
6116 .proc_handler = proc_dointvec,
6117 },
6118 {
6119 .procname = "regen_max_retry",
6120 .data = &ipv6_devconf.regen_max_retry,
6121 .maxlen = sizeof(int),
6122 .mode = 0644,
6123 .proc_handler = proc_dointvec,
6124 },
6125 {
6126 .procname = "max_desync_factor",
6127 .data = &ipv6_devconf.max_desync_factor,
6128 .maxlen = sizeof(int),
6129 .mode = 0644,
6130 .proc_handler = proc_dointvec,
6131 },
6132 {
6133 .procname = "max_addresses",
6134 .data = &ipv6_devconf.max_addresses,
6135 .maxlen = sizeof(int),
6136 .mode = 0644,
6137 .proc_handler = proc_dointvec,
6138 },
6139 {
6140 .procname = "accept_ra_defrtr",
6141 .data = &ipv6_devconf.accept_ra_defrtr,
6142 .maxlen = sizeof(int),
6143 .mode = 0644,
6144 .proc_handler = proc_dointvec,
6145 },
6146 {
6147 .procname = "accept_ra_min_hop_limit",
6148 .data = &ipv6_devconf.accept_ra_min_hop_limit,
6149 .maxlen = sizeof(int),
6150 .mode = 0644,
6151 .proc_handler = proc_dointvec,
6152 },
6153 {
6154 .procname = "accept_ra_pinfo",
6155 .data = &ipv6_devconf.accept_ra_pinfo,
6156 .maxlen = sizeof(int),
6157 .mode = 0644,
6158 .proc_handler = proc_dointvec,
6159 },
6160 #ifdef CONFIG_IPV6_ROUTER_PREF
6161 {
6162 .procname = "accept_ra_rtr_pref",
6163 .data = &ipv6_devconf.accept_ra_rtr_pref,
6164 .maxlen = sizeof(int),
6165 .mode = 0644,
6166 .proc_handler = proc_dointvec,
6167 },
6168 {
6169 .procname = "router_probe_interval",
6170 .data = &ipv6_devconf.rtr_probe_interval,
6171 .maxlen = sizeof(int),
6172 .mode = 0644,
6173 .proc_handler = proc_dointvec_jiffies,
6174 },
6175 #ifdef CONFIG_IPV6_ROUTE_INFO
6176 {
6177 .procname = "accept_ra_rt_info_min_plen",
6178 .data = &ipv6_devconf.accept_ra_rt_info_min_plen,
6179 .maxlen = sizeof(int),
6180 .mode = 0644,
6181 .proc_handler = proc_dointvec,
6182 },
6183 {
6184 .procname = "accept_ra_rt_info_max_plen",
6185 .data = &ipv6_devconf.accept_ra_rt_info_max_plen,
6186 .maxlen = sizeof(int),
6187 .mode = 0644,
6188 .proc_handler = proc_dointvec,
6189 },
6190 #endif
6191 #endif
6192 {
6193 .procname = "proxy_ndp",
6194 .data = &ipv6_devconf.proxy_ndp,
6195 .maxlen = sizeof(int),
6196 .mode = 0644,
6197 .proc_handler = addrconf_sysctl_proxy_ndp,
6198 },
6199 {
6200 .procname = "accept_source_route",
6201 .data = &ipv6_devconf.accept_source_route,
6202 .maxlen = sizeof(int),
6203 .mode = 0644,
6204 .proc_handler = proc_dointvec,
6205 },
6206 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
6207 {
6208 .procname = "optimistic_dad",
6209 .data = &ipv6_devconf.optimistic_dad,
6210 .maxlen = sizeof(int),
6211 .mode = 0644,
6212 .proc_handler = proc_dointvec,
6213 },
6214 {
6215 .procname = "use_optimistic",
6216 .data = &ipv6_devconf.use_optimistic,
6217 .maxlen = sizeof(int),
6218 .mode = 0644,
6219 .proc_handler = proc_dointvec,
6220 },
6221 #endif
6222 #ifdef CONFIG_IPV6_MROUTE
6223 {
6224 .procname = "mc_forwarding",
6225 .data = &ipv6_devconf.mc_forwarding,
6226 .maxlen = sizeof(int),
6227 .mode = 0444,
6228 .proc_handler = proc_dointvec,
6229 },
6230 #endif
6231 {
6232 .procname = "disable_ipv6",
6233 .data = &ipv6_devconf.disable_ipv6,
6234 .maxlen = sizeof(int),
6235 .mode = 0644,
6236 .proc_handler = addrconf_sysctl_disable,
6237 },
6238 {
6239 .procname = "accept_dad",
6240 .data = &ipv6_devconf.accept_dad,
6241 .maxlen = sizeof(int),
6242 .mode = 0644,
6243 .proc_handler = proc_dointvec,
6244 },
6245 {
6246 .procname = "force_tllao",
6247 .data = &ipv6_devconf.force_tllao,
6248 .maxlen = sizeof(int),
6249 .mode = 0644,
6250 .proc_handler = proc_dointvec
6251 },
6252 {
6253 .procname = "ndisc_notify",
6254 .data = &ipv6_devconf.ndisc_notify,
6255 .maxlen = sizeof(int),
6256 .mode = 0644,
6257 .proc_handler = proc_dointvec
6258 },
6259 {
6260 .procname = "suppress_frag_ndisc",
6261 .data = &ipv6_devconf.suppress_frag_ndisc,
6262 .maxlen = sizeof(int),
6263 .mode = 0644,
6264 .proc_handler = proc_dointvec
6265 },
6266 {
6267 .procname = "accept_ra_from_local",
6268 .data = &ipv6_devconf.accept_ra_from_local,
6269 .maxlen = sizeof(int),
6270 .mode = 0644,
6271 .proc_handler = proc_dointvec,
6272 },
6273 {
6274 .procname = "accept_ra_mtu",
6275 .data = &ipv6_devconf.accept_ra_mtu,
6276 .maxlen = sizeof(int),
6277 .mode = 0644,
6278 .proc_handler = proc_dointvec,
6279 },
6280 {
6281 .procname = "stable_secret",
6282 .data = &ipv6_devconf.stable_secret,
6283 .maxlen = IPV6_MAX_STRLEN,
6284 .mode = 0600,
6285 .proc_handler = addrconf_sysctl_stable_secret,
6286 },
6287 {
6288 .procname = "use_oif_addrs_only",
6289 .data = &ipv6_devconf.use_oif_addrs_only,
6290 .maxlen = sizeof(int),
6291 .mode = 0644,
6292 .proc_handler = proc_dointvec,
6293 },
6294 {
6295 .procname = "ignore_routes_with_linkdown",
6296 .data = &ipv6_devconf.ignore_routes_with_linkdown,
6297 .maxlen = sizeof(int),
6298 .mode = 0644,
6299 .proc_handler = addrconf_sysctl_ignore_routes_with_linkdown,
6300 },
6301 {
6302 .procname = "drop_unicast_in_l2_multicast",
6303 .data = &ipv6_devconf.drop_unicast_in_l2_multicast,
6304 .maxlen = sizeof(int),
6305 .mode = 0644,
6306 .proc_handler = proc_dointvec,
6307 },
6308 {
6309 .procname = "drop_unsolicited_na",
6310 .data = &ipv6_devconf.drop_unsolicited_na,
6311 .maxlen = sizeof(int),
6312 .mode = 0644,
6313 .proc_handler = proc_dointvec,
6314 },
6315 {
6316 .procname = "keep_addr_on_down",
6317 .data = &ipv6_devconf.keep_addr_on_down,
6318 .maxlen = sizeof(int),
6319 .mode = 0644,
6320 .proc_handler = proc_dointvec,
6321
6322 },
6323 {
6324 .procname = "seg6_enabled",
6325 .data = &ipv6_devconf.seg6_enabled,
6326 .maxlen = sizeof(int),
6327 .mode = 0644,
6328 .proc_handler = proc_dointvec,
6329 },
6330 #ifdef CONFIG_IPV6_SEG6_HMAC
6331 {
6332 .procname = "seg6_require_hmac",
6333 .data = &ipv6_devconf.seg6_require_hmac,
6334 .maxlen = sizeof(int),
6335 .mode = 0644,
6336 .proc_handler = proc_dointvec,
6337 },
6338 #endif
6339 {
6340 .procname = "enhanced_dad",
6341 .data = &ipv6_devconf.enhanced_dad,
6342 .maxlen = sizeof(int),
6343 .mode = 0644,
6344 .proc_handler = proc_dointvec,
6345 },
6346 {
6347 .procname = "addr_gen_mode",
6348 .data = &ipv6_devconf.addr_gen_mode,
6349 .maxlen = sizeof(int),
6350 .mode = 0644,
6351 .proc_handler = addrconf_sysctl_addr_gen_mode,
6352 },
6353 {
6354 .procname = "disable_policy",
6355 .data = &ipv6_devconf.disable_policy,
6356 .maxlen = sizeof(int),
6357 .mode = 0644,
6358 .proc_handler = addrconf_sysctl_disable_policy,
6359 },
6360 {
6361 .procname = "ndisc_tclass",
6362 .data = &ipv6_devconf.ndisc_tclass,
6363 .maxlen = sizeof(int),
6364 .mode = 0644,
6365 .proc_handler = proc_dointvec_minmax,
6366 .extra1 = (void *)&zero,
6367 .extra2 = (void *)&two_five_five,
6368 },
6369 {
6370 /* sentinel */
6371 }
6372 };
6373
6374 static int __addrconf_sysctl_register(struct net *net, char *dev_name,
6375 struct inet6_dev *idev, struct ipv6_devconf *p)
6376 {
6377 int i, ifindex;
6378 struct ctl_table *table;
6379 char path[sizeof("net/ipv6/conf/") + IFNAMSIZ];
6380
6381 table = kmemdup(addrconf_sysctl, sizeof(addrconf_sysctl), GFP_KERNEL);
6382 if (!table)
6383 goto out;
6384
6385 for (i = 0; table[i].data; i++) {
6386 table[i].data += (char *)p - (char *)&ipv6_devconf;
6387 /* If one of these is already set, then it is not safe to
6388 * overwrite either of them: this makes proc_dointvec_minmax
6389 * usable.
6390 */
6391 if (!table[i].extra1 && !table[i].extra2) {
6392 table[i].extra1 = idev; /* embedded; no ref */
6393 table[i].extra2 = net;
6394 }
6395 }
6396
6397 snprintf(path, sizeof(path), "net/ipv6/conf/%s", dev_name);
6398
6399 p->sysctl_header = register_net_sysctl(net, path, table);
6400 if (!p->sysctl_header)
6401 goto free;
6402
6403 if (!strcmp(dev_name, "all"))
6404 ifindex = NETCONFA_IFINDEX_ALL;
6405 else if (!strcmp(dev_name, "default"))
6406 ifindex = NETCONFA_IFINDEX_DEFAULT;
6407 else
6408 ifindex = idev->dev->ifindex;
6409 inet6_netconf_notify_devconf(net, RTM_NEWNETCONF, NETCONFA_ALL,
6410 ifindex, p);
6411 return 0;
6412
6413 free:
6414 kfree(table);
6415 out:
6416 return -ENOBUFS;
6417 }
6418
6419 static void __addrconf_sysctl_unregister(struct net *net,
6420 struct ipv6_devconf *p, int ifindex)
6421 {
6422 struct ctl_table *table;
6423
6424 if (!p->sysctl_header)
6425 return;
6426
6427 table = p->sysctl_header->ctl_table_arg;
6428 unregister_net_sysctl_table(p->sysctl_header);
6429 p->sysctl_header = NULL;
6430 kfree(table);
6431
6432 inet6_netconf_notify_devconf(net, RTM_DELNETCONF, 0, ifindex, NULL);
6433 }
6434
6435 static int addrconf_sysctl_register(struct inet6_dev *idev)
6436 {
6437 int err;
6438
6439 if (!sysctl_dev_name_is_allowed(idev->dev->name))
6440 return -EINVAL;
6441
6442 err = neigh_sysctl_register(idev->dev, idev->nd_parms,
6443 &ndisc_ifinfo_sysctl_change);
6444 if (err)
6445 return err;
6446 err = __addrconf_sysctl_register(dev_net(idev->dev), idev->dev->name,
6447 idev, &idev->cnf);
6448 if (err)
6449 neigh_sysctl_unregister(idev->nd_parms);
6450
6451 return err;
6452 }
6453
6454 static void addrconf_sysctl_unregister(struct inet6_dev *idev)
6455 {
6456 __addrconf_sysctl_unregister(dev_net(idev->dev), &idev->cnf,
6457 idev->dev->ifindex);
6458 neigh_sysctl_unregister(idev->nd_parms);
6459 }
6460
6461
6462 #endif
6463
6464 static int __net_init addrconf_init_net(struct net *net)
6465 {
6466 int err = -ENOMEM;
6467 struct ipv6_devconf *all, *dflt;
6468
6469 all = kmemdup(&ipv6_devconf, sizeof(ipv6_devconf), GFP_KERNEL);
6470 if (!all)
6471 goto err_alloc_all;
6472
6473 dflt = kmemdup(&ipv6_devconf_dflt, sizeof(ipv6_devconf_dflt), GFP_KERNEL);
6474 if (!dflt)
6475 goto err_alloc_dflt;
6476
6477 /* these will be inherited by all namespaces */
6478 dflt->autoconf = ipv6_defaults.autoconf;
6479 dflt->disable_ipv6 = ipv6_defaults.disable_ipv6;
6480
6481 dflt->stable_secret.initialized = false;
6482 all->stable_secret.initialized = false;
6483
6484 net->ipv6.devconf_all = all;
6485 net->ipv6.devconf_dflt = dflt;
6486
6487 #ifdef CONFIG_SYSCTL
6488 err = __addrconf_sysctl_register(net, "all", NULL, all);
6489 if (err < 0)
6490 goto err_reg_all;
6491
6492 err = __addrconf_sysctl_register(net, "default", NULL, dflt);
6493 if (err < 0)
6494 goto err_reg_dflt;
6495 #endif
6496 return 0;
6497
6498 #ifdef CONFIG_SYSCTL
6499 err_reg_dflt:
6500 __addrconf_sysctl_unregister(net, all, NETCONFA_IFINDEX_ALL);
6501 err_reg_all:
6502 kfree(dflt);
6503 #endif
6504 err_alloc_dflt:
6505 kfree(all);
6506 err_alloc_all:
6507 return err;
6508 }
6509
6510 static void __net_exit addrconf_exit_net(struct net *net)
6511 {
6512 #ifdef CONFIG_SYSCTL
6513 __addrconf_sysctl_unregister(net, net->ipv6.devconf_dflt,
6514 NETCONFA_IFINDEX_DEFAULT);
6515 __addrconf_sysctl_unregister(net, net->ipv6.devconf_all,
6516 NETCONFA_IFINDEX_ALL);
6517 #endif
6518 kfree(net->ipv6.devconf_dflt);
6519 kfree(net->ipv6.devconf_all);
6520 }
6521
6522 static struct pernet_operations addrconf_ops = {
6523 .init = addrconf_init_net,
6524 .exit = addrconf_exit_net,
6525 };
6526
6527 static struct rtnl_af_ops inet6_ops __read_mostly = {
6528 .family = AF_INET6,
6529 .fill_link_af = inet6_fill_link_af,
6530 .get_link_af_size = inet6_get_link_af_size,
6531 .validate_link_af = inet6_validate_link_af,
6532 .set_link_af = inet6_set_link_af,
6533 };
6534
6535 /*
6536 * Init / cleanup code
6537 */
6538
6539 int __init addrconf_init(void)
6540 {
6541 struct inet6_dev *idev;
6542 int i, err;
6543
6544 err = ipv6_addr_label_init();
6545 if (err < 0) {
6546 pr_crit("%s: cannot initialize default policy table: %d\n",
6547 __func__, err);
6548 goto out;
6549 }
6550
6551 err = register_pernet_subsys(&addrconf_ops);
6552 if (err < 0)
6553 goto out_addrlabel;
6554
6555 addrconf_wq = create_workqueue("ipv6_addrconf");
6556 if (!addrconf_wq) {
6557 err = -ENOMEM;
6558 goto out_nowq;
6559 }
6560
6561 /* The addrconf netdev notifier requires that loopback_dev
6562 * has it's ipv6 private information allocated and setup
6563 * before it can bring up and give link-local addresses
6564 * to other devices which are up.
6565 *
6566 * Unfortunately, loopback_dev is not necessarily the first
6567 * entry in the global dev_base list of net devices. In fact,
6568 * it is likely to be the very last entry on that list.
6569 * So this causes the notifier registry below to try and
6570 * give link-local addresses to all devices besides loopback_dev
6571 * first, then loopback_dev, which cases all the non-loopback_dev
6572 * devices to fail to get a link-local address.
6573 *
6574 * So, as a temporary fix, allocate the ipv6 structure for
6575 * loopback_dev first by hand.
6576 * Longer term, all of the dependencies ipv6 has upon the loopback
6577 * device and it being up should be removed.
6578 */
6579 rtnl_lock();
6580 idev = ipv6_add_dev(init_net.loopback_dev);
6581 rtnl_unlock();
6582 if (IS_ERR(idev)) {
6583 err = PTR_ERR(idev);
6584 goto errlo;
6585 }
6586
6587 ip6_route_init_special_entries();
6588
6589 for (i = 0; i < IN6_ADDR_HSIZE; i++)
6590 INIT_HLIST_HEAD(&inet6_addr_lst[i]);
6591
6592 register_netdevice_notifier(&ipv6_dev_notf);
6593
6594 addrconf_verify();
6595
6596 rtnl_af_register(&inet6_ops);
6597
6598 err = __rtnl_register(PF_INET6, RTM_GETLINK, NULL, inet6_dump_ifinfo,
6599 0);
6600 if (err < 0)
6601 goto errout;
6602
6603 /* Only the first call to __rtnl_register can fail */
6604 __rtnl_register(PF_INET6, RTM_NEWADDR, inet6_rtm_newaddr, NULL, 0);
6605 __rtnl_register(PF_INET6, RTM_DELADDR, inet6_rtm_deladdr, NULL, 0);
6606 __rtnl_register(PF_INET6, RTM_GETADDR, inet6_rtm_getaddr,
6607 inet6_dump_ifaddr, RTNL_FLAG_DOIT_UNLOCKED);
6608 __rtnl_register(PF_INET6, RTM_GETMULTICAST, NULL,
6609 inet6_dump_ifmcaddr, 0);
6610 __rtnl_register(PF_INET6, RTM_GETANYCAST, NULL,
6611 inet6_dump_ifacaddr, 0);
6612 __rtnl_register(PF_INET6, RTM_GETNETCONF, inet6_netconf_get_devconf,
6613 inet6_netconf_dump_devconf, RTNL_FLAG_DOIT_UNLOCKED);
6614
6615 ipv6_addr_label_rtnl_register();
6616
6617 return 0;
6618 errout:
6619 rtnl_af_unregister(&inet6_ops);
6620 unregister_netdevice_notifier(&ipv6_dev_notf);
6621 errlo:
6622 destroy_workqueue(addrconf_wq);
6623 out_nowq:
6624 unregister_pernet_subsys(&addrconf_ops);
6625 out_addrlabel:
6626 ipv6_addr_label_cleanup();
6627 out:
6628 return err;
6629 }
6630
6631 void addrconf_cleanup(void)
6632 {
6633 struct net_device *dev;
6634 int i;
6635
6636 unregister_netdevice_notifier(&ipv6_dev_notf);
6637 unregister_pernet_subsys(&addrconf_ops);
6638 ipv6_addr_label_cleanup();
6639
6640 rtnl_af_unregister(&inet6_ops);
6641
6642 rtnl_lock();
6643
6644 /* clean dev list */
6645 for_each_netdev(&init_net, dev) {
6646 if (__in6_dev_get(dev) == NULL)
6647 continue;
6648 addrconf_ifdown(dev, 1);
6649 }
6650 addrconf_ifdown(init_net.loopback_dev, 2);
6651
6652 /*
6653 * Check hash table.
6654 */
6655 spin_lock_bh(&addrconf_hash_lock);
6656 for (i = 0; i < IN6_ADDR_HSIZE; i++)
6657 WARN_ON(!hlist_empty(&inet6_addr_lst[i]));
6658 spin_unlock_bh(&addrconf_hash_lock);
6659 cancel_delayed_work(&addr_chk_work);
6660 rtnl_unlock();
6661
6662 destroy_workqueue(addrconf_wq);
6663 }