1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * IPv6 Address [auto]configuration
4 * Linux INET6 implementation
5 *
6 * Authors:
7 * Pedro Roque <roque@di.fc.ul.pt>
8 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
9 */
10
11 /*
12 * Changes:
13 *
14 * Janos Farkas : delete timer on ifdown
15 * <chexum@bankinf.banki.hu>
16 * Andi Kleen : kill double kfree on module
17 * unload.
18 * Maciej W. Rozycki : FDDI support
19 * sekiya@USAGI : Don't send too many RS
20 * packets.
21 * yoshfuji@USAGI : Fixed interval between DAD
22 * packets.
23 * YOSHIFUJI Hideaki @USAGI : improved accuracy of
24 * address validation timer.
25 * YOSHIFUJI Hideaki @USAGI : Privacy Extensions (RFC3041)
26 * support.
27 * Yuji SEKIYA @USAGI : Don't assign a same IPv6
28 * address on a same interface.
29 * YOSHIFUJI Hideaki @USAGI : ARCnet support
30 * YOSHIFUJI Hideaki @USAGI : convert /proc/net/if_inet6 to
31 * seq_file.
32 * YOSHIFUJI Hideaki @USAGI : improved source address
33 * selection; consider scope,
34 * status etc.
35 */
36
37 #define pr_fmt(fmt) "IPv6: " fmt
38
39 #include <linux/errno.h>
40 #include <linux/types.h>
41 #include <linux/kernel.h>
42 #include <linux/sched/signal.h>
43 #include <linux/socket.h>
44 #include <linux/sockios.h>
45 #include <linux/net.h>
46 #include <linux/inet.h>
47 #include <linux/in6.h>
48 #include <linux/netdevice.h>
49 #include <linux/if_addr.h>
50 #include <linux/if_arp.h>
51 #include <linux/if_arcnet.h>
52 #include <linux/if_infiniband.h>
53 #include <linux/route.h>
54 #include <linux/inetdevice.h>
55 #include <linux/init.h>
56 #include <linux/slab.h>
57 #ifdef CONFIG_SYSCTL
58 #include <linux/sysctl.h>
59 #endif
60 #include <linux/capability.h>
61 #include <linux/delay.h>
62 #include <linux/notifier.h>
63 #include <linux/string.h>
64 #include <linux/hash.h>
65
66 #include <net/net_namespace.h>
67 #include <net/sock.h>
68 #include <net/snmp.h>
69
70 #include <net/6lowpan.h>
71 #include <net/firewire.h>
72 #include <net/ipv6.h>
73 #include <net/protocol.h>
74 #include <net/ndisc.h>
75 #include <net/ip6_route.h>
76 #include <net/addrconf.h>
77 #include <net/tcp.h>
78 #include <net/ip.h>
79 #include <net/netlink.h>
80 #include <net/pkt_sched.h>
81 #include <net/l3mdev.h>
82 #include <linux/if_tunnel.h>
83 #include <linux/rtnetlink.h>
84 #include <linux/netconf.h>
85 #include <linux/random.h>
86 #include <linux/uaccess.h>
87 #include <asm/unaligned.h>
88
89 #include <linux/proc_fs.h>
90 #include <linux/seq_file.h>
91 #include <linux/export.h>
92
93 #define INFINITY_LIFE_TIME 0xFFFFFFFF
94
95 #define IPV6_MAX_STRLEN \
96 sizeof("ffff:ffff:ffff:ffff:ffff:ffff:255.255.255.255")
97
cstamp_delta(unsigned long cstamp)98 static inline u32 cstamp_delta(unsigned long cstamp)
99 {
100 return (cstamp - INITIAL_JIFFIES) * 100UL / HZ;
101 }
102
rfc3315_s14_backoff_init(s32 irt)103 static inline s32 rfc3315_s14_backoff_init(s32 irt)
104 {
105 /* multiply 'initial retransmission time' by 0.9 .. 1.1 */
106 u64 tmp = (900000 + prandom_u32() % 200001) * (u64)irt;
107 do_div(tmp, 1000000);
108 return (s32)tmp;
109 }
110
rfc3315_s14_backoff_update(s32 rt, s32 mrt)111 static inline s32 rfc3315_s14_backoff_update(s32 rt, s32 mrt)
112 {
113 /* multiply 'retransmission timeout' by 1.9 .. 2.1 */
114 u64 tmp = (1900000 + prandom_u32() % 200001) * (u64)rt;
115 do_div(tmp, 1000000);
116 if ((s32)tmp > mrt) {
117 /* multiply 'maximum retransmission time' by 0.9 .. 1.1 */
118 tmp = (900000 + prandom_u32() % 200001) * (u64)mrt;
119 do_div(tmp, 1000000);
120 }
121 return (s32)tmp;
122 }
123
124 #ifdef CONFIG_SYSCTL
125 static int addrconf_sysctl_register(struct inet6_dev *idev);
126 static void addrconf_sysctl_unregister(struct inet6_dev *idev);
127 #else
addrconf_sysctl_register(struct inet6_dev *idev)128 static inline int addrconf_sysctl_register(struct inet6_dev *idev)
129 {
130 return 0;
131 }
132
addrconf_sysctl_unregister(struct inet6_dev *idev)133 static inline void addrconf_sysctl_unregister(struct inet6_dev *idev)
134 {
135 }
136 #endif
137
138 static void ipv6_gen_rnd_iid(struct in6_addr *addr);
139
140 static int ipv6_generate_eui64(u8 *eui, struct net_device *dev);
141 static int ipv6_count_addresses(const struct inet6_dev *idev);
142 static int ipv6_generate_stable_address(struct in6_addr *addr,
143 u8 dad_count,
144 const struct inet6_dev *idev);
145
146 #define IN6_ADDR_HSIZE_SHIFT 8
147 #define IN6_ADDR_HSIZE (1 << IN6_ADDR_HSIZE_SHIFT)
148 /*
149 * Configured unicast address hash table
150 */
151 static struct hlist_head inet6_addr_lst[IN6_ADDR_HSIZE];
152 static DEFINE_SPINLOCK(addrconf_hash_lock);
153
154 static void addrconf_verify(void);
155 static void addrconf_verify_rtnl(void);
156 static void addrconf_verify_work(struct work_struct *);
157
158 static struct workqueue_struct *addrconf_wq;
159 static DECLARE_DELAYED_WORK(addr_chk_work, addrconf_verify_work);
160
161 static void addrconf_join_anycast(struct inet6_ifaddr *ifp);
162 static void addrconf_leave_anycast(struct inet6_ifaddr *ifp);
163
164 static void addrconf_type_change(struct net_device *dev,
165 unsigned long event);
166 static int addrconf_ifdown(struct net_device *dev, bool unregister);
167
168 static struct fib6_info *addrconf_get_prefix_route(const struct in6_addr *pfx,
169 int plen,
170 const struct net_device *dev,
171 u32 flags, u32 noflags,
172 bool no_gw);
173
174 static void addrconf_dad_start(struct inet6_ifaddr *ifp);
175 static void addrconf_dad_work(struct work_struct *w);
176 static void addrconf_dad_completed(struct inet6_ifaddr *ifp, bool bump_id,
177 bool send_na);
178 static void addrconf_dad_run(struct inet6_dev *idev, bool restart);
179 static void addrconf_rs_timer(struct timer_list *t);
180 static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifa);
181 static void ipv6_ifa_notify(int event, struct inet6_ifaddr *ifa);
182
183 static void inet6_prefix_notify(int event, struct inet6_dev *idev,
184 struct prefix_info *pinfo);
185
186 static struct ipv6_devconf ipv6_devconf __read_mostly = {
187 .forwarding = 0,
188 .hop_limit = IPV6_DEFAULT_HOPLIMIT,
189 .mtu6 = IPV6_MIN_MTU,
190 .accept_ra = 1,
191 .accept_redirects = 1,
192 .autoconf = 1,
193 .force_mld_version = 0,
194 .mldv1_unsolicited_report_interval = 10 * HZ,
195 .mldv2_unsolicited_report_interval = HZ,
196 .dad_transmits = 1,
197 .rtr_solicits = MAX_RTR_SOLICITATIONS,
198 .rtr_solicit_interval = RTR_SOLICITATION_INTERVAL,
199 .rtr_solicit_max_interval = RTR_SOLICITATION_MAX_INTERVAL,
200 .rtr_solicit_delay = MAX_RTR_SOLICITATION_DELAY,
201 .use_tempaddr = 0,
202 .temp_valid_lft = TEMP_VALID_LIFETIME,
203 .temp_prefered_lft = TEMP_PREFERRED_LIFETIME,
204 .regen_max_retry = REGEN_MAX_RETRY,
205 .max_desync_factor = MAX_DESYNC_FACTOR,
206 .max_addresses = IPV6_MAX_ADDRESSES,
207 .accept_ra_defrtr = 1,
208 .accept_ra_from_local = 0,
209 .accept_ra_min_hop_limit= 1,
210 .accept_ra_min_lft = 0,
211 .accept_ra_pinfo = 1,
212 #ifdef CONFIG_IPV6_ROUTER_PREF
213 .accept_ra_rtr_pref = 1,
214 .rtr_probe_interval = 60 * HZ,
215 #ifdef CONFIG_IPV6_ROUTE_INFO
216 .accept_ra_rt_info_min_plen = 0,
217 .accept_ra_rt_info_max_plen = 0,
218 #endif
219 #endif
220 .proxy_ndp = 0,
221 .accept_source_route = 0, /* we do not accept RH0 by default. */
222 .disable_ipv6 = 0,
223 .accept_dad = 0,
224 .suppress_frag_ndisc = 1,
225 .accept_ra_mtu = 1,
226 .stable_secret = {
227 .initialized = false,
228 },
229 .use_oif_addrs_only = 0,
230 .ignore_routes_with_linkdown = 0,
231 .keep_addr_on_down = 0,
232 .seg6_enabled = 0,
233 #ifdef CONFIG_IPV6_SEG6_HMAC
234 .seg6_require_hmac = 0,
235 #endif
236 .enhanced_dad = 1,
237 .addr_gen_mode = IN6_ADDR_GEN_MODE_EUI64,
238 .disable_policy = 0,
239 .rpl_seg_enabled = 0,
240 };
241
242 static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = {
243 .forwarding = 0,
244 .hop_limit = IPV6_DEFAULT_HOPLIMIT,
245 .mtu6 = IPV6_MIN_MTU,
246 .accept_ra = 1,
247 .accept_redirects = 1,
248 .autoconf = 1,
249 .force_mld_version = 0,
250 .mldv1_unsolicited_report_interval = 10 * HZ,
251 .mldv2_unsolicited_report_interval = HZ,
252 .dad_transmits = 1,
253 .rtr_solicits = MAX_RTR_SOLICITATIONS,
254 .rtr_solicit_interval = RTR_SOLICITATION_INTERVAL,
255 .rtr_solicit_max_interval = RTR_SOLICITATION_MAX_INTERVAL,
256 .rtr_solicit_delay = MAX_RTR_SOLICITATION_DELAY,
257 .use_tempaddr = 0,
258 .temp_valid_lft = TEMP_VALID_LIFETIME,
259 .temp_prefered_lft = TEMP_PREFERRED_LIFETIME,
260 .regen_max_retry = REGEN_MAX_RETRY,
261 .max_desync_factor = MAX_DESYNC_FACTOR,
262 .max_addresses = IPV6_MAX_ADDRESSES,
263 .accept_ra_defrtr = 1,
264 .accept_ra_from_local = 0,
265 .accept_ra_min_hop_limit= 1,
266 .accept_ra_min_lft = 0,
267 .accept_ra_pinfo = 1,
268 #ifdef CONFIG_IPV6_ROUTER_PREF
269 .accept_ra_rtr_pref = 1,
270 .rtr_probe_interval = 60 * HZ,
271 #ifdef CONFIG_IPV6_ROUTE_INFO
272 .accept_ra_rt_info_min_plen = 0,
273 .accept_ra_rt_info_max_plen = 0,
274 #endif
275 #endif
276 .proxy_ndp = 0,
277 .accept_source_route = 0, /* we do not accept RH0 by default. */
278 .disable_ipv6 = 0,
279 .accept_dad = 1,
280 .suppress_frag_ndisc = 1,
281 .accept_ra_mtu = 1,
282 .stable_secret = {
283 .initialized = false,
284 },
285 .use_oif_addrs_only = 0,
286 .ignore_routes_with_linkdown = 0,
287 .keep_addr_on_down = 0,
288 .seg6_enabled = 0,
289 #ifdef CONFIG_IPV6_SEG6_HMAC
290 .seg6_require_hmac = 0,
291 #endif
292 .enhanced_dad = 1,
293 .addr_gen_mode = IN6_ADDR_GEN_MODE_EUI64,
294 .disable_policy = 0,
295 .rpl_seg_enabled = 0,
296 };
297
298 /* Check if link is ready: is it up and is a valid qdisc available */
addrconf_link_ready(const struct net_device *dev)299 static inline bool addrconf_link_ready(const struct net_device *dev)
300 {
301 return netif_oper_up(dev) && !qdisc_tx_is_noop(dev);
302 }
303
addrconf_del_rs_timer(struct inet6_dev *idev)304 static void addrconf_del_rs_timer(struct inet6_dev *idev)
305 {
306 if (del_timer(&idev->rs_timer))
307 __in6_dev_put(idev);
308 }
309
addrconf_del_dad_work(struct inet6_ifaddr *ifp)310 static void addrconf_del_dad_work(struct inet6_ifaddr *ifp)
311 {
312 if (cancel_delayed_work(&ifp->dad_work))
313 __in6_ifa_put(ifp);
314 }
315
addrconf_mod_rs_timer(struct inet6_dev *idev, unsigned long when)316 static void addrconf_mod_rs_timer(struct inet6_dev *idev,
317 unsigned long when)
318 {
319 if (!mod_timer(&idev->rs_timer, jiffies + when))
320 in6_dev_hold(idev);
321 }
322
addrconf_mod_dad_work(struct inet6_ifaddr *ifp, unsigned long delay)323 static void addrconf_mod_dad_work(struct inet6_ifaddr *ifp,
324 unsigned long delay)
325 {
326 in6_ifa_hold(ifp);
327 if (mod_delayed_work(addrconf_wq, &ifp->dad_work, delay))
328 in6_ifa_put(ifp);
329 }
330
snmp6_alloc_dev(struct inet6_dev *idev)331 static int snmp6_alloc_dev(struct inet6_dev *idev)
332 {
333 int i;
334
335 idev->stats.ipv6 = alloc_percpu(struct ipstats_mib);
336 if (!idev->stats.ipv6)
337 goto err_ip;
338
339 for_each_possible_cpu(i) {
340 struct ipstats_mib *addrconf_stats;
341 addrconf_stats = per_cpu_ptr(idev->stats.ipv6, i);
342 u64_stats_init(&addrconf_stats->syncp);
343 }
344
345
346 idev->stats.icmpv6dev = kzalloc(sizeof(struct icmpv6_mib_device),
347 GFP_KERNEL);
348 if (!idev->stats.icmpv6dev)
349 goto err_icmp;
350 idev->stats.icmpv6msgdev = kzalloc(sizeof(struct icmpv6msg_mib_device),
351 GFP_KERNEL);
352 if (!idev->stats.icmpv6msgdev)
353 goto err_icmpmsg;
354
355 return 0;
356
357 err_icmpmsg:
358 kfree(idev->stats.icmpv6dev);
359 err_icmp:
360 free_percpu(idev->stats.ipv6);
361 err_ip:
362 return -ENOMEM;
363 }
364
ipv6_add_dev(struct net_device *dev)365 static struct inet6_dev *ipv6_add_dev(struct net_device *dev)
366 {
367 struct inet6_dev *ndev;
368 int err = -ENOMEM;
369
370 ASSERT_RTNL();
371
372 if (dev->mtu < IPV6_MIN_MTU)
373 return ERR_PTR(-EINVAL);
374
375 ndev = kzalloc(sizeof(struct inet6_dev), GFP_KERNEL);
376 if (!ndev)
377 return ERR_PTR(err);
378
379 rwlock_init(&ndev->lock);
380 ndev->dev = dev;
381 INIT_LIST_HEAD(&ndev->addr_list);
382 timer_setup(&ndev->rs_timer, addrconf_rs_timer, 0);
383 memcpy(&ndev->cnf, dev_net(dev)->ipv6.devconf_dflt, sizeof(ndev->cnf));
384
385 if (ndev->cnf.stable_secret.initialized)
386 ndev->cnf.addr_gen_mode = IN6_ADDR_GEN_MODE_STABLE_PRIVACY;
387
388 ndev->cnf.mtu6 = dev->mtu;
389 ndev->nd_parms = neigh_parms_alloc(dev, &nd_tbl);
390 if (!ndev->nd_parms) {
391 kfree(ndev);
392 return ERR_PTR(err);
393 }
394 if (ndev->cnf.forwarding)
395 dev_disable_lro(dev);
396 /* We refer to the device */
397 dev_hold(dev);
398
399 if (snmp6_alloc_dev(ndev) < 0) {
400 netdev_dbg(dev, "%s: cannot allocate memory for statistics\n",
401 __func__);
402 neigh_parms_release(&nd_tbl, ndev->nd_parms);
403 dev_put(dev);
404 kfree(ndev);
405 return ERR_PTR(err);
406 }
407
408 if (snmp6_register_dev(ndev) < 0) {
409 netdev_dbg(dev, "%s: cannot create /proc/net/dev_snmp6/%s\n",
410 __func__, dev->name);
411 goto err_release;
412 }
413
414 /* One reference from device. */
415 refcount_set(&ndev->refcnt, 1);
416
417 if (dev->flags & (IFF_NOARP | IFF_LOOPBACK))
418 ndev->cnf.accept_dad = -1;
419
420 #if IS_ENABLED(CONFIG_IPV6_SIT)
421 if (dev->type == ARPHRD_SIT && (dev->priv_flags & IFF_ISATAP)) {
422 pr_info("%s: Disabled Multicast RS\n", dev->name);
423 ndev->cnf.rtr_solicits = 0;
424 }
425 #endif
426
427 INIT_LIST_HEAD(&ndev->tempaddr_list);
428 ndev->desync_factor = U32_MAX;
429 if ((dev->flags&IFF_LOOPBACK) ||
430 dev->type == ARPHRD_TUNNEL ||
431 dev->type == ARPHRD_TUNNEL6 ||
432 dev->type == ARPHRD_SIT ||
433 dev->type == ARPHRD_NONE) {
434 ndev->cnf.use_tempaddr = -1;
435 }
436
437 ndev->token = in6addr_any;
438
439 if (netif_running(dev) && addrconf_link_ready(dev))
440 ndev->if_flags |= IF_READY;
441
442 ipv6_mc_init_dev(ndev);
443 ndev->tstamp = jiffies;
444 err = addrconf_sysctl_register(ndev);
445 if (err) {
446 ipv6_mc_destroy_dev(ndev);
447 snmp6_unregister_dev(ndev);
448 goto err_release;
449 }
450 /* protected by rtnl_lock */
451 rcu_assign_pointer(dev->ip6_ptr, ndev);
452
453 /* Join interface-local all-node multicast group */
454 ipv6_dev_mc_inc(dev, &in6addr_interfacelocal_allnodes);
455
456 /* Join all-node multicast group */
457 ipv6_dev_mc_inc(dev, &in6addr_linklocal_allnodes);
458
459 /* Join all-router multicast group if forwarding is set */
460 if (ndev->cnf.forwarding && (dev->flags & IFF_MULTICAST))
461 ipv6_dev_mc_inc(dev, &in6addr_linklocal_allrouters);
462
463 return ndev;
464
465 err_release:
466 neigh_parms_release(&nd_tbl, ndev->nd_parms);
467 ndev->dead = 1;
468 in6_dev_finish_destroy(ndev);
469 return ERR_PTR(err);
470 }
471
ipv6_find_idev(struct net_device *dev)472 static struct inet6_dev *ipv6_find_idev(struct net_device *dev)
473 {
474 struct inet6_dev *idev;
475
476 ASSERT_RTNL();
477
478 idev = __in6_dev_get(dev);
479 if (!idev) {
480 idev = ipv6_add_dev(dev);
481 if (IS_ERR(idev))
482 return idev;
483 }
484
485 if (dev->flags&IFF_UP)
486 ipv6_mc_up(idev);
487 return idev;
488 }
489
inet6_netconf_msgsize_devconf(int type)490 static int inet6_netconf_msgsize_devconf(int type)
491 {
492 int size = NLMSG_ALIGN(sizeof(struct netconfmsg))
493 + nla_total_size(4); /* NETCONFA_IFINDEX */
494 bool all = false;
495
496 if (type == NETCONFA_ALL)
497 all = true;
498
499 if (all || type == NETCONFA_FORWARDING)
500 size += nla_total_size(4);
501 #ifdef CONFIG_IPV6_MROUTE
502 if (all || type == NETCONFA_MC_FORWARDING)
503 size += nla_total_size(4);
504 #endif
505 if (all || type == NETCONFA_PROXY_NEIGH)
506 size += nla_total_size(4);
507
508 if (all || type == NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN)
509 size += nla_total_size(4);
510
511 return size;
512 }
513
inet6_netconf_fill_devconf(struct sk_buff *skb, int ifindex, struct ipv6_devconf *devconf, u32 portid, u32 seq, int event, unsigned int flags, int type)514 static int inet6_netconf_fill_devconf(struct sk_buff *skb, int ifindex,
515 struct ipv6_devconf *devconf, u32 portid,
516 u32 seq, int event, unsigned int flags,
517 int type)
518 {
519 struct nlmsghdr *nlh;
520 struct netconfmsg *ncm;
521 bool all = false;
522
523 nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct netconfmsg),
524 flags);
525 if (!nlh)
526 return -EMSGSIZE;
527
528 if (type == NETCONFA_ALL)
529 all = true;
530
531 ncm = nlmsg_data(nlh);
532 ncm->ncm_family = AF_INET6;
533
534 if (nla_put_s32(skb, NETCONFA_IFINDEX, ifindex) < 0)
535 goto nla_put_failure;
536
537 if (!devconf)
538 goto out;
539
540 if ((all || type == NETCONFA_FORWARDING) &&
541 nla_put_s32(skb, NETCONFA_FORWARDING, devconf->forwarding) < 0)
542 goto nla_put_failure;
543 #ifdef CONFIG_IPV6_MROUTE
544 if ((all || type == NETCONFA_MC_FORWARDING) &&
545 nla_put_s32(skb, NETCONFA_MC_FORWARDING,
546 atomic_read(&devconf->mc_forwarding)) < 0)
547 goto nla_put_failure;
548 #endif
549 if ((all || type == NETCONFA_PROXY_NEIGH) &&
550 nla_put_s32(skb, NETCONFA_PROXY_NEIGH, devconf->proxy_ndp) < 0)
551 goto nla_put_failure;
552
553 if ((all || type == NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN) &&
554 nla_put_s32(skb, NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN,
555 devconf->ignore_routes_with_linkdown) < 0)
556 goto nla_put_failure;
557
558 out:
559 nlmsg_end(skb, nlh);
560 return 0;
561
562 nla_put_failure:
563 nlmsg_cancel(skb, nlh);
564 return -EMSGSIZE;
565 }
566
inet6_netconf_notify_devconf(struct net *net, int event, int type, int ifindex, struct ipv6_devconf *devconf)567 void inet6_netconf_notify_devconf(struct net *net, int event, int type,
568 int ifindex, struct ipv6_devconf *devconf)
569 {
570 struct sk_buff *skb;
571 int err = -ENOBUFS;
572
573 skb = nlmsg_new(inet6_netconf_msgsize_devconf(type), GFP_KERNEL);
574 if (!skb)
575 goto errout;
576
577 err = inet6_netconf_fill_devconf(skb, ifindex, devconf, 0, 0,
578 event, 0, type);
579 if (err < 0) {
580 /* -EMSGSIZE implies BUG in inet6_netconf_msgsize_devconf() */
581 WARN_ON(err == -EMSGSIZE);
582 kfree_skb(skb);
583 goto errout;
584 }
585 rtnl_notify(skb, net, 0, RTNLGRP_IPV6_NETCONF, NULL, GFP_KERNEL);
586 return;
587 errout:
588 rtnl_set_sk_err(net, RTNLGRP_IPV6_NETCONF, err);
589 }
590
591 static const struct nla_policy devconf_ipv6_policy[NETCONFA_MAX+1] = {
592 [NETCONFA_IFINDEX] = { .len = sizeof(int) },
593 [NETCONFA_FORWARDING] = { .len = sizeof(int) },
594 [NETCONFA_PROXY_NEIGH] = { .len = sizeof(int) },
595 [NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN] = { .len = sizeof(int) },
596 };
597
inet6_netconf_valid_get_req(struct sk_buff *skb, const struct nlmsghdr *nlh, struct nlattr **tb, struct netlink_ext_ack *extack)598 static int inet6_netconf_valid_get_req(struct sk_buff *skb,
599 const struct nlmsghdr *nlh,
600 struct nlattr **tb,
601 struct netlink_ext_ack *extack)
602 {
603 int i, err;
604
605 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(struct netconfmsg))) {
606 NL_SET_ERR_MSG_MOD(extack, "Invalid header for netconf get request");
607 return -EINVAL;
608 }
609
610 if (!netlink_strict_get_check(skb))
611 return nlmsg_parse_deprecated(nlh, sizeof(struct netconfmsg),
612 tb, NETCONFA_MAX,
613 devconf_ipv6_policy, extack);
614
615 err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct netconfmsg),
616 tb, NETCONFA_MAX,
617 devconf_ipv6_policy, extack);
618 if (err)
619 return err;
620
621 for (i = 0; i <= NETCONFA_MAX; i++) {
622 if (!tb[i])
623 continue;
624
625 switch (i) {
626 case NETCONFA_IFINDEX:
627 break;
628 default:
629 NL_SET_ERR_MSG_MOD(extack, "Unsupported attribute in netconf get request");
630 return -EINVAL;
631 }
632 }
633
634 return 0;
635 }
636
inet6_netconf_get_devconf(struct sk_buff *in_skb, struct nlmsghdr *nlh, struct netlink_ext_ack *extack)637 static int inet6_netconf_get_devconf(struct sk_buff *in_skb,
638 struct nlmsghdr *nlh,
639 struct netlink_ext_ack *extack)
640 {
641 struct net *net = sock_net(in_skb->sk);
642 struct nlattr *tb[NETCONFA_MAX+1];
643 struct inet6_dev *in6_dev = NULL;
644 struct net_device *dev = NULL;
645 struct sk_buff *skb;
646 struct ipv6_devconf *devconf;
647 int ifindex;
648 int err;
649
650 err = inet6_netconf_valid_get_req(in_skb, nlh, tb, extack);
651 if (err < 0)
652 return err;
653
654 if (!tb[NETCONFA_IFINDEX])
655 return -EINVAL;
656
657 err = -EINVAL;
658 ifindex = nla_get_s32(tb[NETCONFA_IFINDEX]);
659 switch (ifindex) {
660 case NETCONFA_IFINDEX_ALL:
661 devconf = net->ipv6.devconf_all;
662 break;
663 case NETCONFA_IFINDEX_DEFAULT:
664 devconf = net->ipv6.devconf_dflt;
665 break;
666 default:
667 dev = dev_get_by_index(net, ifindex);
668 if (!dev)
669 return -EINVAL;
670 in6_dev = in6_dev_get(dev);
671 if (!in6_dev)
672 goto errout;
673 devconf = &in6_dev->cnf;
674 break;
675 }
676
677 err = -ENOBUFS;
678 skb = nlmsg_new(inet6_netconf_msgsize_devconf(NETCONFA_ALL), GFP_KERNEL);
679 if (!skb)
680 goto errout;
681
682 err = inet6_netconf_fill_devconf(skb, ifindex, devconf,
683 NETLINK_CB(in_skb).portid,
684 nlh->nlmsg_seq, RTM_NEWNETCONF, 0,
685 NETCONFA_ALL);
686 if (err < 0) {
687 /* -EMSGSIZE implies BUG in inet6_netconf_msgsize_devconf() */
688 WARN_ON(err == -EMSGSIZE);
689 kfree_skb(skb);
690 goto errout;
691 }
692 err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
693 errout:
694 if (in6_dev)
695 in6_dev_put(in6_dev);
696 if (dev)
697 dev_put(dev);
698 return err;
699 }
700
inet6_netconf_dump_devconf(struct sk_buff *skb, struct netlink_callback *cb)701 static int inet6_netconf_dump_devconf(struct sk_buff *skb,
702 struct netlink_callback *cb)
703 {
704 const struct nlmsghdr *nlh = cb->nlh;
705 struct net *net = sock_net(skb->sk);
706 int h, s_h;
707 int idx, s_idx;
708 struct net_device *dev;
709 struct inet6_dev *idev;
710 struct hlist_head *head;
711
712 if (cb->strict_check) {
713 struct netlink_ext_ack *extack = cb->extack;
714 struct netconfmsg *ncm;
715
716 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ncm))) {
717 NL_SET_ERR_MSG_MOD(extack, "Invalid header for netconf dump request");
718 return -EINVAL;
719 }
720
721 if (nlmsg_attrlen(nlh, sizeof(*ncm))) {
722 NL_SET_ERR_MSG_MOD(extack, "Invalid data after header in netconf dump request");
723 return -EINVAL;
724 }
725 }
726
727 s_h = cb->args[0];
728 s_idx = idx = cb->args[1];
729
730 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
731 idx = 0;
732 head = &net->dev_index_head[h];
733 rcu_read_lock();
734 cb->seq = atomic_read(&net->ipv6.dev_addr_genid) ^
735 net->dev_base_seq;
736 hlist_for_each_entry_rcu(dev, head, index_hlist) {
737 if (idx < s_idx)
738 goto cont;
739 idev = __in6_dev_get(dev);
740 if (!idev)
741 goto cont;
742
743 if (inet6_netconf_fill_devconf(skb, dev->ifindex,
744 &idev->cnf,
745 NETLINK_CB(cb->skb).portid,
746 nlh->nlmsg_seq,
747 RTM_NEWNETCONF,
748 NLM_F_MULTI,
749 NETCONFA_ALL) < 0) {
750 rcu_read_unlock();
751 goto done;
752 }
753 nl_dump_check_consistent(cb, nlmsg_hdr(skb));
754 cont:
755 idx++;
756 }
757 rcu_read_unlock();
758 }
759 if (h == NETDEV_HASHENTRIES) {
760 if (inet6_netconf_fill_devconf(skb, NETCONFA_IFINDEX_ALL,
761 net->ipv6.devconf_all,
762 NETLINK_CB(cb->skb).portid,
763 nlh->nlmsg_seq,
764 RTM_NEWNETCONF, NLM_F_MULTI,
765 NETCONFA_ALL) < 0)
766 goto done;
767 else
768 h++;
769 }
770 if (h == NETDEV_HASHENTRIES + 1) {
771 if (inet6_netconf_fill_devconf(skb, NETCONFA_IFINDEX_DEFAULT,
772 net->ipv6.devconf_dflt,
773 NETLINK_CB(cb->skb).portid,
774 nlh->nlmsg_seq,
775 RTM_NEWNETCONF, NLM_F_MULTI,
776 NETCONFA_ALL) < 0)
777 goto done;
778 else
779 h++;
780 }
781 done:
782 cb->args[0] = h;
783 cb->args[1] = idx;
784
785 return skb->len;
786 }
787
788 #ifdef CONFIG_SYSCTL
dev_forward_change(struct inet6_dev *idev)789 static void dev_forward_change(struct inet6_dev *idev)
790 {
791 struct net_device *dev;
792 struct inet6_ifaddr *ifa;
793 LIST_HEAD(tmp_addr_list);
794
795 if (!idev)
796 return;
797 dev = idev->dev;
798 if (idev->cnf.forwarding)
799 dev_disable_lro(dev);
800 if (dev->flags & IFF_MULTICAST) {
801 if (idev->cnf.forwarding) {
802 ipv6_dev_mc_inc(dev, &in6addr_linklocal_allrouters);
803 ipv6_dev_mc_inc(dev, &in6addr_interfacelocal_allrouters);
804 ipv6_dev_mc_inc(dev, &in6addr_sitelocal_allrouters);
805 } else {
806 ipv6_dev_mc_dec(dev, &in6addr_linklocal_allrouters);
807 ipv6_dev_mc_dec(dev, &in6addr_interfacelocal_allrouters);
808 ipv6_dev_mc_dec(dev, &in6addr_sitelocal_allrouters);
809 }
810 }
811
812 read_lock_bh(&idev->lock);
813 list_for_each_entry(ifa, &idev->addr_list, if_list) {
814 if (ifa->flags&IFA_F_TENTATIVE)
815 continue;
816 list_add_tail(&ifa->if_list_aux, &tmp_addr_list);
817 }
818 read_unlock_bh(&idev->lock);
819
820 while (!list_empty(&tmp_addr_list)) {
821 ifa = list_first_entry(&tmp_addr_list,
822 struct inet6_ifaddr, if_list_aux);
823 list_del(&ifa->if_list_aux);
824 if (idev->cnf.forwarding)
825 addrconf_join_anycast(ifa);
826 else
827 addrconf_leave_anycast(ifa);
828 }
829
830 inet6_netconf_notify_devconf(dev_net(dev), RTM_NEWNETCONF,
831 NETCONFA_FORWARDING,
832 dev->ifindex, &idev->cnf);
833 }
834
835
addrconf_forward_change(struct net *net, __s32 newf)836 static void addrconf_forward_change(struct net *net, __s32 newf)
837 {
838 struct net_device *dev;
839 struct inet6_dev *idev;
840
841 for_each_netdev(net, dev) {
842 idev = __in6_dev_get(dev);
843 if (idev) {
844 int changed = (!idev->cnf.forwarding) ^ (!newf);
845 idev->cnf.forwarding = newf;
846 if (changed)
847 dev_forward_change(idev);
848 }
849 }
850 }
851
addrconf_fixup_forwarding(struct ctl_table *table, int *p, int newf)852 static int addrconf_fixup_forwarding(struct ctl_table *table, int *p, int newf)
853 {
854 struct net *net;
855 int old;
856
857 if (!rtnl_trylock())
858 return restart_syscall();
859
860 net = (struct net *)table->extra2;
861 old = *p;
862 *p = newf;
863
864 if (p == &net->ipv6.devconf_dflt->forwarding) {
865 if ((!newf) ^ (!old))
866 inet6_netconf_notify_devconf(net, RTM_NEWNETCONF,
867 NETCONFA_FORWARDING,
868 NETCONFA_IFINDEX_DEFAULT,
869 net->ipv6.devconf_dflt);
870 rtnl_unlock();
871 return 0;
872 }
873
874 if (p == &net->ipv6.devconf_all->forwarding) {
875 int old_dflt = net->ipv6.devconf_dflt->forwarding;
876
877 net->ipv6.devconf_dflt->forwarding = newf;
878 if ((!newf) ^ (!old_dflt))
879 inet6_netconf_notify_devconf(net, RTM_NEWNETCONF,
880 NETCONFA_FORWARDING,
881 NETCONFA_IFINDEX_DEFAULT,
882 net->ipv6.devconf_dflt);
883
884 addrconf_forward_change(net, newf);
885 if ((!newf) ^ (!old))
886 inet6_netconf_notify_devconf(net, RTM_NEWNETCONF,
887 NETCONFA_FORWARDING,
888 NETCONFA_IFINDEX_ALL,
889 net->ipv6.devconf_all);
890 } else if ((!newf) ^ (!old))
891 dev_forward_change((struct inet6_dev *)table->extra1);
892 rtnl_unlock();
893
894 if (newf)
895 rt6_purge_dflt_routers(net);
896 return 1;
897 }
898
addrconf_linkdown_change(struct net *net, __s32 newf)899 static void addrconf_linkdown_change(struct net *net, __s32 newf)
900 {
901 struct net_device *dev;
902 struct inet6_dev *idev;
903
904 for_each_netdev(net, dev) {
905 idev = __in6_dev_get(dev);
906 if (idev) {
907 int changed = (!idev->cnf.ignore_routes_with_linkdown) ^ (!newf);
908
909 idev->cnf.ignore_routes_with_linkdown = newf;
910 if (changed)
911 inet6_netconf_notify_devconf(dev_net(dev),
912 RTM_NEWNETCONF,
913 NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN,
914 dev->ifindex,
915 &idev->cnf);
916 }
917 }
918 }
919
addrconf_fixup_linkdown(struct ctl_table *table, int *p, int newf)920 static int addrconf_fixup_linkdown(struct ctl_table *table, int *p, int newf)
921 {
922 struct net *net;
923 int old;
924
925 if (!rtnl_trylock())
926 return restart_syscall();
927
928 net = (struct net *)table->extra2;
929 old = *p;
930 *p = newf;
931
932 if (p == &net->ipv6.devconf_dflt->ignore_routes_with_linkdown) {
933 if ((!newf) ^ (!old))
934 inet6_netconf_notify_devconf(net,
935 RTM_NEWNETCONF,
936 NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN,
937 NETCONFA_IFINDEX_DEFAULT,
938 net->ipv6.devconf_dflt);
939 rtnl_unlock();
940 return 0;
941 }
942
943 if (p == &net->ipv6.devconf_all->ignore_routes_with_linkdown) {
944 net->ipv6.devconf_dflt->ignore_routes_with_linkdown = newf;
945 addrconf_linkdown_change(net, newf);
946 if ((!newf) ^ (!old))
947 inet6_netconf_notify_devconf(net,
948 RTM_NEWNETCONF,
949 NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN,
950 NETCONFA_IFINDEX_ALL,
951 net->ipv6.devconf_all);
952 }
953 rtnl_unlock();
954
955 return 1;
956 }
957
958 #endif
959
960 /* Nobody refers to this ifaddr, destroy it */
inet6_ifa_finish_destroy(struct inet6_ifaddr *ifp)961 void inet6_ifa_finish_destroy(struct inet6_ifaddr *ifp)
962 {
963 WARN_ON(!hlist_unhashed(&ifp->addr_lst));
964
965 #ifdef NET_REFCNT_DEBUG
966 pr_debug("%s\n", __func__);
967 #endif
968
969 in6_dev_put(ifp->idev);
970
971 if (cancel_delayed_work(&ifp->dad_work))
972 pr_notice("delayed DAD work was pending while freeing ifa=%p\n",
973 ifp);
974
975 if (ifp->state != INET6_IFADDR_STATE_DEAD) {
976 pr_warn("Freeing alive inet6 address %p\n", ifp);
977 return;
978 }
979
980 kfree_rcu(ifp, rcu);
981 }
982
983 static void
ipv6_link_dev_addr(struct inet6_dev *idev, struct inet6_ifaddr *ifp)984 ipv6_link_dev_addr(struct inet6_dev *idev, struct inet6_ifaddr *ifp)
985 {
986 struct list_head *p;
987 int ifp_scope = ipv6_addr_src_scope(&ifp->addr);
988
989 /*
990 * Each device address list is sorted in order of scope -
991 * global before linklocal.
992 */
993 list_for_each(p, &idev->addr_list) {
994 struct inet6_ifaddr *ifa
995 = list_entry(p, struct inet6_ifaddr, if_list);
996 if (ifp_scope >= ipv6_addr_src_scope(&ifa->addr))
997 break;
998 }
999
1000 list_add_tail_rcu(&ifp->if_list, p);
1001 }
1002
inet6_addr_hash(const struct net *net, const struct in6_addr *addr)1003 static u32 inet6_addr_hash(const struct net *net, const struct in6_addr *addr)
1004 {
1005 u32 val = ipv6_addr_hash(addr) ^ net_hash_mix(net);
1006
1007 return hash_32(val, IN6_ADDR_HSIZE_SHIFT);
1008 }
1009
ipv6_chk_same_addr(struct net *net, const struct in6_addr *addr, struct net_device *dev, unsigned int hash)1010 static bool ipv6_chk_same_addr(struct net *net, const struct in6_addr *addr,
1011 struct net_device *dev, unsigned int hash)
1012 {
1013 struct inet6_ifaddr *ifp;
1014
1015 hlist_for_each_entry(ifp, &inet6_addr_lst[hash], addr_lst) {
1016 if (!net_eq(dev_net(ifp->idev->dev), net))
1017 continue;
1018 if (ipv6_addr_equal(&ifp->addr, addr)) {
1019 if (!dev || ifp->idev->dev == dev)
1020 return true;
1021 }
1022 }
1023 return false;
1024 }
1025
ipv6_add_addr_hash(struct net_device *dev, struct inet6_ifaddr *ifa)1026 static int ipv6_add_addr_hash(struct net_device *dev, struct inet6_ifaddr *ifa)
1027 {
1028 unsigned int hash = inet6_addr_hash(dev_net(dev), &ifa->addr);
1029 int err = 0;
1030
1031 spin_lock(&addrconf_hash_lock);
1032
1033 /* Ignore adding duplicate addresses on an interface */
1034 if (ipv6_chk_same_addr(dev_net(dev), &ifa->addr, dev, hash)) {
1035 netdev_dbg(dev, "ipv6_add_addr: already assigned\n");
1036 err = -EEXIST;
1037 } else {
1038 hlist_add_head_rcu(&ifa->addr_lst, &inet6_addr_lst[hash]);
1039 }
1040
1041 spin_unlock(&addrconf_hash_lock);
1042
1043 return err;
1044 }
1045
1046 /* On success it returns ifp with increased reference count */
1047
1048 static struct inet6_ifaddr *
ipv6_add_addr(struct inet6_dev *idev, struct ifa6_config *cfg, bool can_block, struct netlink_ext_ack *extack)1049 ipv6_add_addr(struct inet6_dev *idev, struct ifa6_config *cfg,
1050 bool can_block, struct netlink_ext_ack *extack)
1051 {
1052 gfp_t gfp_flags = can_block ? GFP_KERNEL : GFP_ATOMIC;
1053 int addr_type = ipv6_addr_type(cfg->pfx);
1054 struct net *net = dev_net(idev->dev);
1055 struct inet6_ifaddr *ifa = NULL;
1056 struct fib6_info *f6i = NULL;
1057 int err = 0;
1058
1059 if (addr_type == IPV6_ADDR_ANY ||
1060 (addr_type & IPV6_ADDR_MULTICAST &&
1061 !(cfg->ifa_flags & IFA_F_MCAUTOJOIN)) ||
1062 (!(idev->dev->flags & IFF_LOOPBACK) &&
1063 !netif_is_l3_master(idev->dev) &&
1064 addr_type & IPV6_ADDR_LOOPBACK))
1065 return ERR_PTR(-EADDRNOTAVAIL);
1066
1067 if (idev->dead) {
1068 err = -ENODEV; /*XXX*/
1069 goto out;
1070 }
1071
1072 if (idev->cnf.disable_ipv6) {
1073 err = -EACCES;
1074 goto out;
1075 }
1076
1077 /* validator notifier needs to be blocking;
1078 * do not call in atomic context
1079 */
1080 if (can_block) {
1081 struct in6_validator_info i6vi = {
1082 .i6vi_addr = *cfg->pfx,
1083 .i6vi_dev = idev,
1084 .extack = extack,
1085 };
1086
1087 err = inet6addr_validator_notifier_call_chain(NETDEV_UP, &i6vi);
1088 err = notifier_to_errno(err);
1089 if (err < 0)
1090 goto out;
1091 }
1092
1093 ifa = kzalloc(sizeof(*ifa), gfp_flags);
1094 if (!ifa) {
1095 err = -ENOBUFS;
1096 goto out;
1097 }
1098
1099 f6i = addrconf_f6i_alloc(net, idev, cfg->pfx, false, gfp_flags);
1100 if (IS_ERR(f6i)) {
1101 err = PTR_ERR(f6i);
1102 f6i = NULL;
1103 goto out;
1104 }
1105
1106 neigh_parms_data_state_setall(idev->nd_parms);
1107
1108 ifa->addr = *cfg->pfx;
1109 if (cfg->peer_pfx)
1110 ifa->peer_addr = *cfg->peer_pfx;
1111
1112 spin_lock_init(&ifa->lock);
1113 INIT_DELAYED_WORK(&ifa->dad_work, addrconf_dad_work);
1114 INIT_HLIST_NODE(&ifa->addr_lst);
1115 ifa->scope = cfg->scope;
1116 ifa->prefix_len = cfg->plen;
1117 ifa->rt_priority = cfg->rt_priority;
1118 ifa->flags = cfg->ifa_flags;
1119 /* No need to add the TENTATIVE flag for addresses with NODAD */
1120 if (!(cfg->ifa_flags & IFA_F_NODAD))
1121 ifa->flags |= IFA_F_TENTATIVE;
1122 ifa->valid_lft = cfg->valid_lft;
1123 ifa->prefered_lft = cfg->preferred_lft;
1124 ifa->cstamp = ifa->tstamp = jiffies;
1125 ifa->tokenized = false;
1126
1127 ifa->rt = f6i;
1128
1129 ifa->idev = idev;
1130 in6_dev_hold(idev);
1131
1132 /* For caller */
1133 refcount_set(&ifa->refcnt, 1);
1134
1135 rcu_read_lock_bh();
1136
1137 err = ipv6_add_addr_hash(idev->dev, ifa);
1138 if (err < 0) {
1139 rcu_read_unlock_bh();
1140 goto out;
1141 }
1142
1143 write_lock(&idev->lock);
1144
1145 /* Add to inet6_dev unicast addr list. */
1146 ipv6_link_dev_addr(idev, ifa);
1147
1148 if (ifa->flags&IFA_F_TEMPORARY) {
1149 list_add(&ifa->tmp_list, &idev->tempaddr_list);
1150 in6_ifa_hold(ifa);
1151 }
1152
1153 in6_ifa_hold(ifa);
1154 write_unlock(&idev->lock);
1155
1156 rcu_read_unlock_bh();
1157
1158 inet6addr_notifier_call_chain(NETDEV_UP, ifa);
1159 out:
1160 if (unlikely(err < 0)) {
1161 fib6_info_release(f6i);
1162
1163 if (ifa) {
1164 if (ifa->idev)
1165 in6_dev_put(ifa->idev);
1166 kfree(ifa);
1167 }
1168 ifa = ERR_PTR(err);
1169 }
1170
1171 return ifa;
1172 }
1173
1174 enum cleanup_prefix_rt_t {
1175 CLEANUP_PREFIX_RT_NOP, /* no cleanup action for prefix route */
1176 CLEANUP_PREFIX_RT_DEL, /* delete the prefix route */
1177 CLEANUP_PREFIX_RT_EXPIRE, /* update the lifetime of the prefix route */
1178 };
1179
1180 /*
1181 * Check, whether the prefix for ifp would still need a prefix route
1182 * after deleting ifp. The function returns one of the CLEANUP_PREFIX_RT_*
1183 * constants.
1184 *
1185 * 1) we don't purge prefix if address was not permanent.
1186 * prefix is managed by its own lifetime.
1187 * 2) we also don't purge, if the address was IFA_F_NOPREFIXROUTE.
1188 * 3) if there are no addresses, delete prefix.
1189 * 4) if there are still other permanent address(es),
1190 * corresponding prefix is still permanent.
1191 * 5) if there are still other addresses with IFA_F_NOPREFIXROUTE,
1192 * don't purge the prefix, assume user space is managing it.
1193 * 6) otherwise, update prefix lifetime to the
1194 * longest valid lifetime among the corresponding
1195 * addresses on the device.
1196 * Note: subsequent RA will update lifetime.
1197 **/
1198 static enum cleanup_prefix_rt_t
check_cleanup_prefix_route(struct inet6_ifaddr *ifp, unsigned long *expires)1199 check_cleanup_prefix_route(struct inet6_ifaddr *ifp, unsigned long *expires)
1200 {
1201 struct inet6_ifaddr *ifa;
1202 struct inet6_dev *idev = ifp->idev;
1203 unsigned long lifetime;
1204 enum cleanup_prefix_rt_t action = CLEANUP_PREFIX_RT_DEL;
1205
1206 *expires = jiffies;
1207
1208 list_for_each_entry(ifa, &idev->addr_list, if_list) {
1209 if (ifa == ifp)
1210 continue;
1211 if (ifa->prefix_len != ifp->prefix_len ||
1212 !ipv6_prefix_equal(&ifa->addr, &ifp->addr,
1213 ifp->prefix_len))
1214 continue;
1215 if (ifa->flags & (IFA_F_PERMANENT | IFA_F_NOPREFIXROUTE))
1216 return CLEANUP_PREFIX_RT_NOP;
1217
1218 action = CLEANUP_PREFIX_RT_EXPIRE;
1219
1220 spin_lock(&ifa->lock);
1221
1222 lifetime = addrconf_timeout_fixup(ifa->valid_lft, HZ);
1223 /*
1224 * Note: Because this address is
1225 * not permanent, lifetime <
1226 * LONG_MAX / HZ here.
1227 */
1228 if (time_before(*expires, ifa->tstamp + lifetime * HZ))
1229 *expires = ifa->tstamp + lifetime * HZ;
1230 spin_unlock(&ifa->lock);
1231 }
1232
1233 return action;
1234 }
1235
1236 static void
cleanup_prefix_route(struct inet6_ifaddr *ifp, unsigned long expires, bool del_rt, bool del_peer)1237 cleanup_prefix_route(struct inet6_ifaddr *ifp, unsigned long expires,
1238 bool del_rt, bool del_peer)
1239 {
1240 struct fib6_info *f6i;
1241
1242 f6i = addrconf_get_prefix_route(del_peer ? &ifp->peer_addr : &ifp->addr,
1243 ifp->prefix_len,
1244 ifp->idev->dev, 0, RTF_DEFAULT, true);
1245 if (f6i) {
1246 if (del_rt)
1247 ip6_del_rt(dev_net(ifp->idev->dev), f6i, false);
1248 else {
1249 if (!(f6i->fib6_flags & RTF_EXPIRES))
1250 fib6_set_expires(f6i, expires);
1251 fib6_info_release(f6i);
1252 }
1253 }
1254 }
1255
1256
1257 /* This function wants to get referenced ifp and releases it before return */
1258
ipv6_del_addr(struct inet6_ifaddr *ifp)1259 static void ipv6_del_addr(struct inet6_ifaddr *ifp)
1260 {
1261 int state;
1262 enum cleanup_prefix_rt_t action = CLEANUP_PREFIX_RT_NOP;
1263 unsigned long expires;
1264
1265 ASSERT_RTNL();
1266
1267 spin_lock_bh(&ifp->lock);
1268 state = ifp->state;
1269 ifp->state = INET6_IFADDR_STATE_DEAD;
1270 spin_unlock_bh(&ifp->lock);
1271
1272 if (state == INET6_IFADDR_STATE_DEAD)
1273 goto out;
1274
1275 spin_lock_bh(&addrconf_hash_lock);
1276 hlist_del_init_rcu(&ifp->addr_lst);
1277 spin_unlock_bh(&addrconf_hash_lock);
1278
1279 write_lock_bh(&ifp->idev->lock);
1280
1281 if (ifp->flags&IFA_F_TEMPORARY) {
1282 list_del(&ifp->tmp_list);
1283 if (ifp->ifpub) {
1284 in6_ifa_put(ifp->ifpub);
1285 ifp->ifpub = NULL;
1286 }
1287 __in6_ifa_put(ifp);
1288 }
1289
1290 if (ifp->flags & IFA_F_PERMANENT && !(ifp->flags & IFA_F_NOPREFIXROUTE))
1291 action = check_cleanup_prefix_route(ifp, &expires);
1292
1293 list_del_rcu(&ifp->if_list);
1294 __in6_ifa_put(ifp);
1295
1296 write_unlock_bh(&ifp->idev->lock);
1297
1298 addrconf_del_dad_work(ifp);
1299
1300 ipv6_ifa_notify(RTM_DELADDR, ifp);
1301
1302 inet6addr_notifier_call_chain(NETDEV_DOWN, ifp);
1303
1304 if (action != CLEANUP_PREFIX_RT_NOP) {
1305 cleanup_prefix_route(ifp, expires,
1306 action == CLEANUP_PREFIX_RT_DEL, false);
1307 }
1308
1309 /* clean up prefsrc entries */
1310 rt6_remove_prefsrc(ifp);
1311 out:
1312 in6_ifa_put(ifp);
1313 }
1314
ipv6_create_tempaddr(struct inet6_ifaddr *ifp, bool block)1315 static int ipv6_create_tempaddr(struct inet6_ifaddr *ifp, bool block)
1316 {
1317 struct inet6_dev *idev = ifp->idev;
1318 unsigned long tmp_tstamp, age;
1319 unsigned long regen_advance;
1320 unsigned long now = jiffies;
1321 s32 cnf_temp_preferred_lft;
1322 struct inet6_ifaddr *ift;
1323 struct ifa6_config cfg;
1324 long max_desync_factor;
1325 struct in6_addr addr;
1326 int ret = 0;
1327
1328 write_lock_bh(&idev->lock);
1329
1330 retry:
1331 in6_dev_hold(idev);
1332 if (idev->cnf.use_tempaddr <= 0) {
1333 write_unlock_bh(&idev->lock);
1334 pr_info("%s: use_tempaddr is disabled\n", __func__);
1335 in6_dev_put(idev);
1336 ret = -1;
1337 goto out;
1338 }
1339 spin_lock_bh(&ifp->lock);
1340 if (ifp->regen_count++ >= idev->cnf.regen_max_retry) {
1341 idev->cnf.use_tempaddr = -1; /*XXX*/
1342 spin_unlock_bh(&ifp->lock);
1343 write_unlock_bh(&idev->lock);
1344 pr_warn("%s: regeneration time exceeded - disabled temporary address support\n",
1345 __func__);
1346 in6_dev_put(idev);
1347 ret = -1;
1348 goto out;
1349 }
1350 in6_ifa_hold(ifp);
1351 memcpy(addr.s6_addr, ifp->addr.s6_addr, 8);
1352 ipv6_gen_rnd_iid(&addr);
1353
1354 age = (now - ifp->tstamp) / HZ;
1355
1356 regen_advance = idev->cnf.regen_max_retry *
1357 idev->cnf.dad_transmits *
1358 max(NEIGH_VAR(idev->nd_parms, RETRANS_TIME), HZ/100) / HZ;
1359
1360 /* recalculate max_desync_factor each time and update
1361 * idev->desync_factor if it's larger
1362 */
1363 cnf_temp_preferred_lft = READ_ONCE(idev->cnf.temp_prefered_lft);
1364 max_desync_factor = min_t(long,
1365 idev->cnf.max_desync_factor,
1366 cnf_temp_preferred_lft - regen_advance);
1367
1368 if (unlikely(idev->desync_factor > max_desync_factor)) {
1369 if (max_desync_factor > 0) {
1370 get_random_bytes(&idev->desync_factor,
1371 sizeof(idev->desync_factor));
1372 idev->desync_factor %= max_desync_factor;
1373 } else {
1374 idev->desync_factor = 0;
1375 }
1376 }
1377
1378 memset(&cfg, 0, sizeof(cfg));
1379 cfg.valid_lft = min_t(__u32, ifp->valid_lft,
1380 idev->cnf.temp_valid_lft + age);
1381 cfg.preferred_lft = cnf_temp_preferred_lft + age - idev->desync_factor;
1382 cfg.preferred_lft = min_t(__u32, ifp->prefered_lft, cfg.preferred_lft);
1383
1384 cfg.plen = ifp->prefix_len;
1385 tmp_tstamp = ifp->tstamp;
1386 spin_unlock_bh(&ifp->lock);
1387
1388 write_unlock_bh(&idev->lock);
1389
1390 /* A temporary address is created only if this calculated Preferred
1391 * Lifetime is greater than REGEN_ADVANCE time units. In particular,
1392 * an implementation must not create a temporary address with a zero
1393 * Preferred Lifetime.
1394 * Use age calculation as in addrconf_verify to avoid unnecessary
1395 * temporary addresses being generated.
1396 */
1397 age = (now - tmp_tstamp + ADDRCONF_TIMER_FUZZ_MINUS) / HZ;
1398 if (cfg.preferred_lft <= regen_advance + age) {
1399 in6_ifa_put(ifp);
1400 in6_dev_put(idev);
1401 ret = -1;
1402 goto out;
1403 }
1404
1405 cfg.ifa_flags = IFA_F_TEMPORARY;
1406 /* set in addrconf_prefix_rcv() */
1407 if (ifp->flags & IFA_F_OPTIMISTIC)
1408 cfg.ifa_flags |= IFA_F_OPTIMISTIC;
1409
1410 cfg.pfx = &addr;
1411 cfg.scope = ipv6_addr_scope(cfg.pfx);
1412
1413 ift = ipv6_add_addr(idev, &cfg, block, NULL);
1414 if (IS_ERR(ift)) {
1415 in6_ifa_put(ifp);
1416 in6_dev_put(idev);
1417 pr_info("%s: retry temporary address regeneration\n", __func__);
1418 write_lock_bh(&idev->lock);
1419 goto retry;
1420 }
1421
1422 spin_lock_bh(&ift->lock);
1423 ift->ifpub = ifp;
1424 ift->cstamp = now;
1425 ift->tstamp = tmp_tstamp;
1426 spin_unlock_bh(&ift->lock);
1427
1428 addrconf_dad_start(ift);
1429 in6_ifa_put(ift);
1430 in6_dev_put(idev);
1431 out:
1432 return ret;
1433 }
1434
1435 /*
1436 * Choose an appropriate source address (RFC3484)
1437 */
1438 enum {
1439 IPV6_SADDR_RULE_INIT = 0,
1440 IPV6_SADDR_RULE_LOCAL,
1441 IPV6_SADDR_RULE_SCOPE,
1442 IPV6_SADDR_RULE_PREFERRED,
1443 #ifdef CONFIG_IPV6_MIP6
1444 IPV6_SADDR_RULE_HOA,
1445 #endif
1446 IPV6_SADDR_RULE_OIF,
1447 IPV6_SADDR_RULE_LABEL,
1448 IPV6_SADDR_RULE_PRIVACY,
1449 IPV6_SADDR_RULE_ORCHID,
1450 IPV6_SADDR_RULE_PREFIX,
1451 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
1452 IPV6_SADDR_RULE_NOT_OPTIMISTIC,
1453 #endif
1454 IPV6_SADDR_RULE_MAX
1455 };
1456
1457 struct ipv6_saddr_score {
1458 int rule;
1459 int addr_type;
1460 struct inet6_ifaddr *ifa;
1461 DECLARE_BITMAP(scorebits, IPV6_SADDR_RULE_MAX);
1462 int scopedist;
1463 int matchlen;
1464 };
1465
1466 struct ipv6_saddr_dst {
1467 const struct in6_addr *addr;
1468 int ifindex;
1469 int scope;
1470 int label;
1471 unsigned int prefs;
1472 };
1473
ipv6_saddr_preferred(int type)1474 static inline int ipv6_saddr_preferred(int type)
1475 {
1476 if (type & (IPV6_ADDR_MAPPED|IPV6_ADDR_COMPATv4|IPV6_ADDR_LOOPBACK))
1477 return 1;
1478 return 0;
1479 }
1480
ipv6_use_optimistic_addr(struct net *net, struct inet6_dev *idev)1481 static bool ipv6_use_optimistic_addr(struct net *net,
1482 struct inet6_dev *idev)
1483 {
1484 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
1485 if (!idev)
1486 return false;
1487 if (!net->ipv6.devconf_all->optimistic_dad && !idev->cnf.optimistic_dad)
1488 return false;
1489 if (!net->ipv6.devconf_all->use_optimistic && !idev->cnf.use_optimistic)
1490 return false;
1491
1492 return true;
1493 #else
1494 return false;
1495 #endif
1496 }
1497
ipv6_allow_optimistic_dad(struct net *net, struct inet6_dev *idev)1498 static bool ipv6_allow_optimistic_dad(struct net *net,
1499 struct inet6_dev *idev)
1500 {
1501 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
1502 if (!idev)
1503 return false;
1504 if (!net->ipv6.devconf_all->optimistic_dad && !idev->cnf.optimistic_dad)
1505 return false;
1506
1507 return true;
1508 #else
1509 return false;
1510 #endif
1511 }
1512
ipv6_get_saddr_eval(struct net *net, struct ipv6_saddr_score *score, struct ipv6_saddr_dst *dst, int i)1513 static int ipv6_get_saddr_eval(struct net *net,
1514 struct ipv6_saddr_score *score,
1515 struct ipv6_saddr_dst *dst,
1516 int i)
1517 {
1518 int ret;
1519
1520 if (i <= score->rule) {
1521 switch (i) {
1522 case IPV6_SADDR_RULE_SCOPE:
1523 ret = score->scopedist;
1524 break;
1525 case IPV6_SADDR_RULE_PREFIX:
1526 ret = score->matchlen;
1527 break;
1528 default:
1529 ret = !!test_bit(i, score->scorebits);
1530 }
1531 goto out;
1532 }
1533
1534 switch (i) {
1535 case IPV6_SADDR_RULE_INIT:
1536 /* Rule 0: remember if hiscore is not ready yet */
1537 ret = !!score->ifa;
1538 break;
1539 case IPV6_SADDR_RULE_LOCAL:
1540 /* Rule 1: Prefer same address */
1541 ret = ipv6_addr_equal(&score->ifa->addr, dst->addr);
1542 break;
1543 case IPV6_SADDR_RULE_SCOPE:
1544 /* Rule 2: Prefer appropriate scope
1545 *
1546 * ret
1547 * ^
1548 * -1 | d 15
1549 * ---+--+-+---> scope
1550 * |
1551 * | d is scope of the destination.
1552 * B-d | \
1553 * | \ <- smaller scope is better if
1554 * B-15 | \ if scope is enough for destination.
1555 * | ret = B - scope (-1 <= scope >= d <= 15).
1556 * d-C-1 | /
1557 * |/ <- greater is better
1558 * -C / if scope is not enough for destination.
1559 * /| ret = scope - C (-1 <= d < scope <= 15).
1560 *
1561 * d - C - 1 < B -15 (for all -1 <= d <= 15).
1562 * C > d + 14 - B >= 15 + 14 - B = 29 - B.
1563 * Assume B = 0 and we get C > 29.
1564 */
1565 ret = __ipv6_addr_src_scope(score->addr_type);
1566 if (ret >= dst->scope)
1567 ret = -ret;
1568 else
1569 ret -= 128; /* 30 is enough */
1570 score->scopedist = ret;
1571 break;
1572 case IPV6_SADDR_RULE_PREFERRED:
1573 {
1574 /* Rule 3: Avoid deprecated and optimistic addresses */
1575 u8 avoid = IFA_F_DEPRECATED;
1576
1577 if (!ipv6_use_optimistic_addr(net, score->ifa->idev))
1578 avoid |= IFA_F_OPTIMISTIC;
1579 ret = ipv6_saddr_preferred(score->addr_type) ||
1580 !(score->ifa->flags & avoid);
1581 break;
1582 }
1583 #ifdef CONFIG_IPV6_MIP6
1584 case IPV6_SADDR_RULE_HOA:
1585 {
1586 /* Rule 4: Prefer home address */
1587 int prefhome = !(dst->prefs & IPV6_PREFER_SRC_COA);
1588 ret = !(score->ifa->flags & IFA_F_HOMEADDRESS) ^ prefhome;
1589 break;
1590 }
1591 #endif
1592 case IPV6_SADDR_RULE_OIF:
1593 /* Rule 5: Prefer outgoing interface */
1594 ret = (!dst->ifindex ||
1595 dst->ifindex == score->ifa->idev->dev->ifindex);
1596 break;
1597 case IPV6_SADDR_RULE_LABEL:
1598 /* Rule 6: Prefer matching label */
1599 ret = ipv6_addr_label(net,
1600 &score->ifa->addr, score->addr_type,
1601 score->ifa->idev->dev->ifindex) == dst->label;
1602 break;
1603 case IPV6_SADDR_RULE_PRIVACY:
1604 {
1605 /* Rule 7: Prefer public address
1606 * Note: prefer temporary address if use_tempaddr >= 2
1607 */
1608 int preftmp = dst->prefs & (IPV6_PREFER_SRC_PUBLIC|IPV6_PREFER_SRC_TMP) ?
1609 !!(dst->prefs & IPV6_PREFER_SRC_TMP) :
1610 score->ifa->idev->cnf.use_tempaddr >= 2;
1611 ret = (!(score->ifa->flags & IFA_F_TEMPORARY)) ^ preftmp;
1612 break;
1613 }
1614 case IPV6_SADDR_RULE_ORCHID:
1615 /* Rule 8-: Prefer ORCHID vs ORCHID or
1616 * non-ORCHID vs non-ORCHID
1617 */
1618 ret = !(ipv6_addr_orchid(&score->ifa->addr) ^
1619 ipv6_addr_orchid(dst->addr));
1620 break;
1621 case IPV6_SADDR_RULE_PREFIX:
1622 /* Rule 8: Use longest matching prefix */
1623 ret = ipv6_addr_diff(&score->ifa->addr, dst->addr);
1624 if (ret > score->ifa->prefix_len)
1625 ret = score->ifa->prefix_len;
1626 score->matchlen = ret;
1627 break;
1628 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
1629 case IPV6_SADDR_RULE_NOT_OPTIMISTIC:
1630 /* Optimistic addresses still have lower precedence than other
1631 * preferred addresses.
1632 */
1633 ret = !(score->ifa->flags & IFA_F_OPTIMISTIC);
1634 break;
1635 #endif
1636 default:
1637 ret = 0;
1638 }
1639
1640 if (ret)
1641 __set_bit(i, score->scorebits);
1642 score->rule = i;
1643 out:
1644 return ret;
1645 }
1646
__ipv6_dev_get_saddr(struct net *net, struct ipv6_saddr_dst *dst, struct inet6_dev *idev, struct ipv6_saddr_score *scores, int hiscore_idx)1647 static int __ipv6_dev_get_saddr(struct net *net,
1648 struct ipv6_saddr_dst *dst,
1649 struct inet6_dev *idev,
1650 struct ipv6_saddr_score *scores,
1651 int hiscore_idx)
1652 {
1653 struct ipv6_saddr_score *score = &scores[1 - hiscore_idx], *hiscore = &scores[hiscore_idx];
1654
1655 list_for_each_entry_rcu(score->ifa, &idev->addr_list, if_list) {
1656 int i;
1657
1658 /*
1659 * - Tentative Address (RFC2462 section 5.4)
1660 * - A tentative address is not considered
1661 * "assigned to an interface" in the traditional
1662 * sense, unless it is also flagged as optimistic.
1663 * - Candidate Source Address (section 4)
1664 * - In any case, anycast addresses, multicast
1665 * addresses, and the unspecified address MUST
1666 * NOT be included in a candidate set.
1667 */
1668 if ((score->ifa->flags & IFA_F_TENTATIVE) &&
1669 (!(score->ifa->flags & IFA_F_OPTIMISTIC)))
1670 continue;
1671
1672 score->addr_type = __ipv6_addr_type(&score->ifa->addr);
1673
1674 if (unlikely(score->addr_type == IPV6_ADDR_ANY ||
1675 score->addr_type & IPV6_ADDR_MULTICAST)) {
1676 net_dbg_ratelimited("ADDRCONF: unspecified / multicast address assigned as unicast address on %s",
1677 idev->dev->name);
1678 continue;
1679 }
1680
1681 score->rule = -1;
1682 bitmap_zero(score->scorebits, IPV6_SADDR_RULE_MAX);
1683
1684 for (i = 0; i < IPV6_SADDR_RULE_MAX; i++) {
1685 int minihiscore, miniscore;
1686
1687 minihiscore = ipv6_get_saddr_eval(net, hiscore, dst, i);
1688 miniscore = ipv6_get_saddr_eval(net, score, dst, i);
1689
1690 if (minihiscore > miniscore) {
1691 if (i == IPV6_SADDR_RULE_SCOPE &&
1692 score->scopedist > 0) {
1693 /*
1694 * special case:
1695 * each remaining entry
1696 * has too small (not enough)
1697 * scope, because ifa entries
1698 * are sorted by their scope
1699 * values.
1700 */
1701 goto out;
1702 }
1703 break;
1704 } else if (minihiscore < miniscore) {
1705 swap(hiscore, score);
1706 hiscore_idx = 1 - hiscore_idx;
1707
1708 /* restore our iterator */
1709 score->ifa = hiscore->ifa;
1710
1711 break;
1712 }
1713 }
1714 }
1715 out:
1716 return hiscore_idx;
1717 }
1718
ipv6_get_saddr_master(struct net *net, const struct net_device *dst_dev, const struct net_device *master, struct ipv6_saddr_dst *dst, struct ipv6_saddr_score *scores, int hiscore_idx)1719 static int ipv6_get_saddr_master(struct net *net,
1720 const struct net_device *dst_dev,
1721 const struct net_device *master,
1722 struct ipv6_saddr_dst *dst,
1723 struct ipv6_saddr_score *scores,
1724 int hiscore_idx)
1725 {
1726 struct inet6_dev *idev;
1727
1728 idev = __in6_dev_get(dst_dev);
1729 if (idev)
1730 hiscore_idx = __ipv6_dev_get_saddr(net, dst, idev,
1731 scores, hiscore_idx);
1732
1733 idev = __in6_dev_get(master);
1734 if (idev)
1735 hiscore_idx = __ipv6_dev_get_saddr(net, dst, idev,
1736 scores, hiscore_idx);
1737
1738 return hiscore_idx;
1739 }
1740
ipv6_dev_get_saddr(struct net *net, const struct net_device *dst_dev, const struct in6_addr *daddr, unsigned int prefs, struct in6_addr *saddr)1741 int ipv6_dev_get_saddr(struct net *net, const struct net_device *dst_dev,
1742 const struct in6_addr *daddr, unsigned int prefs,
1743 struct in6_addr *saddr)
1744 {
1745 struct ipv6_saddr_score scores[2], *hiscore;
1746 struct ipv6_saddr_dst dst;
1747 struct inet6_dev *idev;
1748 struct net_device *dev;
1749 int dst_type;
1750 bool use_oif_addr = false;
1751 int hiscore_idx = 0;
1752 int ret = 0;
1753
1754 dst_type = __ipv6_addr_type(daddr);
1755 dst.addr = daddr;
1756 dst.ifindex = dst_dev ? dst_dev->ifindex : 0;
1757 dst.scope = __ipv6_addr_src_scope(dst_type);
1758 dst.label = ipv6_addr_label(net, daddr, dst_type, dst.ifindex);
1759 dst.prefs = prefs;
1760
1761 scores[hiscore_idx].rule = -1;
1762 scores[hiscore_idx].ifa = NULL;
1763
1764 rcu_read_lock();
1765
1766 /* Candidate Source Address (section 4)
1767 * - multicast and link-local destination address,
1768 * the set of candidate source address MUST only
1769 * include addresses assigned to interfaces
1770 * belonging to the same link as the outgoing
1771 * interface.
1772 * (- For site-local destination addresses, the
1773 * set of candidate source addresses MUST only
1774 * include addresses assigned to interfaces
1775 * belonging to the same site as the outgoing
1776 * interface.)
1777 * - "It is RECOMMENDED that the candidate source addresses
1778 * be the set of unicast addresses assigned to the
1779 * interface that will be used to send to the destination
1780 * (the 'outgoing' interface)." (RFC 6724)
1781 */
1782 if (dst_dev) {
1783 idev = __in6_dev_get(dst_dev);
1784 if ((dst_type & IPV6_ADDR_MULTICAST) ||
1785 dst.scope <= IPV6_ADDR_SCOPE_LINKLOCAL ||
1786 (idev && idev->cnf.use_oif_addrs_only)) {
1787 use_oif_addr = true;
1788 }
1789 }
1790
1791 if (use_oif_addr) {
1792 if (idev)
1793 hiscore_idx = __ipv6_dev_get_saddr(net, &dst, idev, scores, hiscore_idx);
1794 } else {
1795 const struct net_device *master;
1796 int master_idx = 0;
1797
1798 /* if dst_dev exists and is enslaved to an L3 device, then
1799 * prefer addresses from dst_dev and then the master over
1800 * any other enslaved devices in the L3 domain.
1801 */
1802 master = l3mdev_master_dev_rcu(dst_dev);
1803 if (master) {
1804 master_idx = master->ifindex;
1805
1806 hiscore_idx = ipv6_get_saddr_master(net, dst_dev,
1807 master, &dst,
1808 scores, hiscore_idx);
1809
1810 if (scores[hiscore_idx].ifa)
1811 goto out;
1812 }
1813
1814 for_each_netdev_rcu(net, dev) {
1815 /* only consider addresses on devices in the
1816 * same L3 domain
1817 */
1818 if (l3mdev_master_ifindex_rcu(dev) != master_idx)
1819 continue;
1820 idev = __in6_dev_get(dev);
1821 if (!idev)
1822 continue;
1823 hiscore_idx = __ipv6_dev_get_saddr(net, &dst, idev, scores, hiscore_idx);
1824 }
1825 }
1826
1827 out:
1828 hiscore = &scores[hiscore_idx];
1829 if (!hiscore->ifa)
1830 ret = -EADDRNOTAVAIL;
1831 else
1832 *saddr = hiscore->ifa->addr;
1833
1834 rcu_read_unlock();
1835 return ret;
1836 }
1837 EXPORT_SYMBOL(ipv6_dev_get_saddr);
1838
__ipv6_get_lladdr(struct inet6_dev *idev, struct in6_addr *addr, u32 banned_flags)1839 int __ipv6_get_lladdr(struct inet6_dev *idev, struct in6_addr *addr,
1840 u32 banned_flags)
1841 {
1842 struct inet6_ifaddr *ifp;
1843 int err = -EADDRNOTAVAIL;
1844
1845 list_for_each_entry_reverse(ifp, &idev->addr_list, if_list) {
1846 if (ifp->scope > IFA_LINK)
1847 break;
1848 if (ifp->scope == IFA_LINK &&
1849 !(ifp->flags & banned_flags)) {
1850 *addr = ifp->addr;
1851 err = 0;
1852 break;
1853 }
1854 }
1855 return err;
1856 }
1857
ipv6_get_lladdr(struct net_device *dev, struct in6_addr *addr, u32 banned_flags)1858 int ipv6_get_lladdr(struct net_device *dev, struct in6_addr *addr,
1859 u32 banned_flags)
1860 {
1861 struct inet6_dev *idev;
1862 int err = -EADDRNOTAVAIL;
1863
1864 rcu_read_lock();
1865 idev = __in6_dev_get(dev);
1866 if (idev) {
1867 read_lock_bh(&idev->lock);
1868 err = __ipv6_get_lladdr(idev, addr, banned_flags);
1869 read_unlock_bh(&idev->lock);
1870 }
1871 rcu_read_unlock();
1872 return err;
1873 }
1874
ipv6_count_addresses(const struct inet6_dev *idev)1875 static int ipv6_count_addresses(const struct inet6_dev *idev)
1876 {
1877 const struct inet6_ifaddr *ifp;
1878 int cnt = 0;
1879
1880 rcu_read_lock();
1881 list_for_each_entry_rcu(ifp, &idev->addr_list, if_list)
1882 cnt++;
1883 rcu_read_unlock();
1884 return cnt;
1885 }
1886
ipv6_chk_addr(struct net *net, const struct in6_addr *addr, const struct net_device *dev, int strict)1887 int ipv6_chk_addr(struct net *net, const struct in6_addr *addr,
1888 const struct net_device *dev, int strict)
1889 {
1890 return ipv6_chk_addr_and_flags(net, addr, dev, !dev,
1891 strict, IFA_F_TENTATIVE);
1892 }
1893 EXPORT_SYMBOL(ipv6_chk_addr);
1894
1895 /* device argument is used to find the L3 domain of interest. If
1896 * skip_dev_check is set, then the ifp device is not checked against
1897 * the passed in dev argument. So the 2 cases for addresses checks are:
1898 * 1. does the address exist in the L3 domain that dev is part of
1899 * (skip_dev_check = true), or
1900 *
1901 * 2. does the address exist on the specific device
1902 * (skip_dev_check = false)
1903 */
1904 static struct net_device *
__ipv6_chk_addr_and_flags(struct net *net, const struct in6_addr *addr, const struct net_device *dev, bool skip_dev_check, int strict, u32 banned_flags)1905 __ipv6_chk_addr_and_flags(struct net *net, const struct in6_addr *addr,
1906 const struct net_device *dev, bool skip_dev_check,
1907 int strict, u32 banned_flags)
1908 {
1909 unsigned int hash = inet6_addr_hash(net, addr);
1910 struct net_device *l3mdev, *ndev;
1911 struct inet6_ifaddr *ifp;
1912 u32 ifp_flags;
1913
1914 rcu_read_lock();
1915
1916 l3mdev = l3mdev_master_dev_rcu(dev);
1917 if (skip_dev_check)
1918 dev = NULL;
1919
1920 hlist_for_each_entry_rcu(ifp, &inet6_addr_lst[hash], addr_lst) {
1921 ndev = ifp->idev->dev;
1922 if (!net_eq(dev_net(ndev), net))
1923 continue;
1924
1925 if (l3mdev_master_dev_rcu(ndev) != l3mdev)
1926 continue;
1927
1928 /* Decouple optimistic from tentative for evaluation here.
1929 * Ban optimistic addresses explicitly, when required.
1930 */
1931 ifp_flags = (ifp->flags&IFA_F_OPTIMISTIC)
1932 ? (ifp->flags&~IFA_F_TENTATIVE)
1933 : ifp->flags;
1934 if (ipv6_addr_equal(&ifp->addr, addr) &&
1935 !(ifp_flags&banned_flags) &&
1936 (!dev || ndev == dev ||
1937 !(ifp->scope&(IFA_LINK|IFA_HOST) || strict))) {
1938 rcu_read_unlock();
1939 return ndev;
1940 }
1941 }
1942
1943 rcu_read_unlock();
1944 return NULL;
1945 }
1946
ipv6_chk_addr_and_flags(struct net *net, const struct in6_addr *addr, const struct net_device *dev, bool skip_dev_check, int strict, u32 banned_flags)1947 int ipv6_chk_addr_and_flags(struct net *net, const struct in6_addr *addr,
1948 const struct net_device *dev, bool skip_dev_check,
1949 int strict, u32 banned_flags)
1950 {
1951 return __ipv6_chk_addr_and_flags(net, addr, dev, skip_dev_check,
1952 strict, banned_flags) ? 1 : 0;
1953 }
1954 EXPORT_SYMBOL(ipv6_chk_addr_and_flags);
1955
1956
1957 /* Compares an address/prefix_len with addresses on device @dev.
1958 * If one is found it returns true.
1959 */
ipv6_chk_custom_prefix(const struct in6_addr *addr, const unsigned int prefix_len, struct net_device *dev)1960 bool ipv6_chk_custom_prefix(const struct in6_addr *addr,
1961 const unsigned int prefix_len, struct net_device *dev)
1962 {
1963 const struct inet6_ifaddr *ifa;
1964 const struct inet6_dev *idev;
1965 bool ret = false;
1966
1967 rcu_read_lock();
1968 idev = __in6_dev_get(dev);
1969 if (idev) {
1970 list_for_each_entry_rcu(ifa, &idev->addr_list, if_list) {
1971 ret = ipv6_prefix_equal(addr, &ifa->addr, prefix_len);
1972 if (ret)
1973 break;
1974 }
1975 }
1976 rcu_read_unlock();
1977
1978 return ret;
1979 }
1980 EXPORT_SYMBOL(ipv6_chk_custom_prefix);
1981
ipv6_chk_prefix(const struct in6_addr *addr, struct net_device *dev)1982 int ipv6_chk_prefix(const struct in6_addr *addr, struct net_device *dev)
1983 {
1984 const struct inet6_ifaddr *ifa;
1985 const struct inet6_dev *idev;
1986 int onlink;
1987
1988 onlink = 0;
1989 rcu_read_lock();
1990 idev = __in6_dev_get(dev);
1991 if (idev) {
1992 list_for_each_entry_rcu(ifa, &idev->addr_list, if_list) {
1993 onlink = ipv6_prefix_equal(addr, &ifa->addr,
1994 ifa->prefix_len);
1995 if (onlink)
1996 break;
1997 }
1998 }
1999 rcu_read_unlock();
2000 return onlink;
2001 }
2002 EXPORT_SYMBOL(ipv6_chk_prefix);
2003
2004 /**
2005 * ipv6_dev_find - find the first device with a given source address.
2006 * @net: the net namespace
2007 * @addr: the source address
2008 *
2009 * The caller should be protected by RCU, or RTNL.
2010 */
ipv6_dev_find(struct net *net, const struct in6_addr *addr, struct net_device *dev)2011 struct net_device *ipv6_dev_find(struct net *net, const struct in6_addr *addr,
2012 struct net_device *dev)
2013 {
2014 return __ipv6_chk_addr_and_flags(net, addr, dev, !dev, 1,
2015 IFA_F_TENTATIVE);
2016 }
2017 EXPORT_SYMBOL(ipv6_dev_find);
2018
ipv6_get_ifaddr(struct net *net, const struct in6_addr *addr, struct net_device *dev, int strict)2019 struct inet6_ifaddr *ipv6_get_ifaddr(struct net *net, const struct in6_addr *addr,
2020 struct net_device *dev, int strict)
2021 {
2022 unsigned int hash = inet6_addr_hash(net, addr);
2023 struct inet6_ifaddr *ifp, *result = NULL;
2024
2025 rcu_read_lock();
2026 hlist_for_each_entry_rcu(ifp, &inet6_addr_lst[hash], addr_lst) {
2027 if (!net_eq(dev_net(ifp->idev->dev), net))
2028 continue;
2029 if (ipv6_addr_equal(&ifp->addr, addr)) {
2030 if (!dev || ifp->idev->dev == dev ||
2031 !(ifp->scope&(IFA_LINK|IFA_HOST) || strict)) {
2032 if (in6_ifa_hold_safe(ifp)) {
2033 result = ifp;
2034 break;
2035 }
2036 }
2037 }
2038 }
2039 rcu_read_unlock();
2040
2041 return result;
2042 }
2043
2044 /* Gets referenced address, destroys ifaddr */
2045
addrconf_dad_stop(struct inet6_ifaddr *ifp, int dad_failed)2046 static void addrconf_dad_stop(struct inet6_ifaddr *ifp, int dad_failed)
2047 {
2048 if (dad_failed)
2049 ifp->flags |= IFA_F_DADFAILED;
2050
2051 if (ifp->flags&IFA_F_TEMPORARY) {
2052 struct inet6_ifaddr *ifpub;
2053 spin_lock_bh(&ifp->lock);
2054 ifpub = ifp->ifpub;
2055 if (ifpub) {
2056 in6_ifa_hold(ifpub);
2057 spin_unlock_bh(&ifp->lock);
2058 ipv6_create_tempaddr(ifpub, true);
2059 in6_ifa_put(ifpub);
2060 } else {
2061 spin_unlock_bh(&ifp->lock);
2062 }
2063 ipv6_del_addr(ifp);
2064 } else if (ifp->flags&IFA_F_PERMANENT || !dad_failed) {
2065 spin_lock_bh(&ifp->lock);
2066 addrconf_del_dad_work(ifp);
2067 ifp->flags |= IFA_F_TENTATIVE;
2068 if (dad_failed)
2069 ifp->flags &= ~IFA_F_OPTIMISTIC;
2070 spin_unlock_bh(&ifp->lock);
2071 if (dad_failed)
2072 ipv6_ifa_notify(0, ifp);
2073 in6_ifa_put(ifp);
2074 } else {
2075 ipv6_del_addr(ifp);
2076 }
2077 }
2078
addrconf_dad_end(struct inet6_ifaddr *ifp)2079 static int addrconf_dad_end(struct inet6_ifaddr *ifp)
2080 {
2081 int err = -ENOENT;
2082
2083 spin_lock_bh(&ifp->lock);
2084 if (ifp->state == INET6_IFADDR_STATE_DAD) {
2085 ifp->state = INET6_IFADDR_STATE_POSTDAD;
2086 err = 0;
2087 }
2088 spin_unlock_bh(&ifp->lock);
2089
2090 return err;
2091 }
2092
addrconf_dad_failure(struct sk_buff *skb, struct inet6_ifaddr *ifp)2093 void addrconf_dad_failure(struct sk_buff *skb, struct inet6_ifaddr *ifp)
2094 {
2095 struct inet6_dev *idev = ifp->idev;
2096 struct net *net = dev_net(ifp->idev->dev);
2097
2098 if (addrconf_dad_end(ifp)) {
2099 in6_ifa_put(ifp);
2100 return;
2101 }
2102
2103 net_info_ratelimited("%s: IPv6 duplicate address %pI6c used by %pM detected!\n",
2104 ifp->idev->dev->name, &ifp->addr, eth_hdr(skb)->h_source);
2105
2106 spin_lock_bh(&ifp->lock);
2107
2108 if (ifp->flags & IFA_F_STABLE_PRIVACY) {
2109 struct in6_addr new_addr;
2110 struct inet6_ifaddr *ifp2;
2111 int retries = ifp->stable_privacy_retry + 1;
2112 struct ifa6_config cfg = {
2113 .pfx = &new_addr,
2114 .plen = ifp->prefix_len,
2115 .ifa_flags = ifp->flags,
2116 .valid_lft = ifp->valid_lft,
2117 .preferred_lft = ifp->prefered_lft,
2118 .scope = ifp->scope,
2119 };
2120
2121 if (retries > net->ipv6.sysctl.idgen_retries) {
2122 net_info_ratelimited("%s: privacy stable address generation failed because of DAD conflicts!\n",
2123 ifp->idev->dev->name);
2124 goto errdad;
2125 }
2126
2127 new_addr = ifp->addr;
2128 if (ipv6_generate_stable_address(&new_addr, retries,
2129 idev))
2130 goto errdad;
2131
2132 spin_unlock_bh(&ifp->lock);
2133
2134 if (idev->cnf.max_addresses &&
2135 ipv6_count_addresses(idev) >=
2136 idev->cnf.max_addresses)
2137 goto lock_errdad;
2138
2139 net_info_ratelimited("%s: generating new stable privacy address because of DAD conflict\n",
2140 ifp->idev->dev->name);
2141
2142 ifp2 = ipv6_add_addr(idev, &cfg, false, NULL);
2143 if (IS_ERR(ifp2))
2144 goto lock_errdad;
2145
2146 spin_lock_bh(&ifp2->lock);
2147 ifp2->stable_privacy_retry = retries;
2148 ifp2->state = INET6_IFADDR_STATE_PREDAD;
2149 spin_unlock_bh(&ifp2->lock);
2150
2151 addrconf_mod_dad_work(ifp2, net->ipv6.sysctl.idgen_delay);
2152 in6_ifa_put(ifp2);
2153 lock_errdad:
2154 spin_lock_bh(&ifp->lock);
2155 }
2156
2157 errdad:
2158 /* transition from _POSTDAD to _ERRDAD */
2159 ifp->state = INET6_IFADDR_STATE_ERRDAD;
2160 spin_unlock_bh(&ifp->lock);
2161
2162 addrconf_mod_dad_work(ifp, 0);
2163 in6_ifa_put(ifp);
2164 }
2165
2166 /* Join to solicited addr multicast group.
2167 * caller must hold RTNL */
addrconf_join_solict(struct net_device *dev, const struct in6_addr *addr)2168 void addrconf_join_solict(struct net_device *dev, const struct in6_addr *addr)
2169 {
2170 struct in6_addr maddr;
2171
2172 if (dev->flags&(IFF_LOOPBACK|IFF_NOARP))
2173 return;
2174
2175 addrconf_addr_solict_mult(addr, &maddr);
2176 ipv6_dev_mc_inc(dev, &maddr);
2177 }
2178
2179 /* caller must hold RTNL */
addrconf_leave_solict(struct inet6_dev *idev, const struct in6_addr *addr)2180 void addrconf_leave_solict(struct inet6_dev *idev, const struct in6_addr *addr)
2181 {
2182 struct in6_addr maddr;
2183
2184 if (idev->dev->flags&(IFF_LOOPBACK|IFF_NOARP))
2185 return;
2186
2187 addrconf_addr_solict_mult(addr, &maddr);
2188 __ipv6_dev_mc_dec(idev, &maddr);
2189 }
2190
2191 /* caller must hold RTNL */
addrconf_join_anycast(struct inet6_ifaddr *ifp)2192 static void addrconf_join_anycast(struct inet6_ifaddr *ifp)
2193 {
2194 struct in6_addr addr;
2195
2196 if (ifp->prefix_len >= 127) /* RFC 6164 */
2197 return;
2198 ipv6_addr_prefix(&addr, &ifp->addr, ifp->prefix_len);
2199 if (ipv6_addr_any(&addr))
2200 return;
2201 __ipv6_dev_ac_inc(ifp->idev, &addr);
2202 }
2203
2204 /* caller must hold RTNL */
addrconf_leave_anycast(struct inet6_ifaddr *ifp)2205 static void addrconf_leave_anycast(struct inet6_ifaddr *ifp)
2206 {
2207 struct in6_addr addr;
2208
2209 if (ifp->prefix_len >= 127) /* RFC 6164 */
2210 return;
2211 ipv6_addr_prefix(&addr, &ifp->addr, ifp->prefix_len);
2212 if (ipv6_addr_any(&addr))
2213 return;
2214 __ipv6_dev_ac_dec(ifp->idev, &addr);
2215 }
2216
addrconf_ifid_6lowpan(u8 *eui, struct net_device *dev)2217 static int addrconf_ifid_6lowpan(u8 *eui, struct net_device *dev)
2218 {
2219 switch (dev->addr_len) {
2220 case ETH_ALEN:
2221 memcpy(eui, dev->dev_addr, 3);
2222 eui[3] = 0xFF;
2223 eui[4] = 0xFE;
2224 memcpy(eui + 5, dev->dev_addr + 3, 3);
2225 break;
2226 case EUI64_ADDR_LEN:
2227 memcpy(eui, dev->dev_addr, EUI64_ADDR_LEN);
2228 eui[0] ^= 2;
2229 break;
2230 default:
2231 return -1;
2232 }
2233
2234 return 0;
2235 }
2236
addrconf_ifid_ieee1394(u8 *eui, struct net_device *dev)2237 static int addrconf_ifid_ieee1394(u8 *eui, struct net_device *dev)
2238 {
2239 union fwnet_hwaddr *ha;
2240
2241 if (dev->addr_len != FWNET_ALEN)
2242 return -1;
2243
2244 ha = (union fwnet_hwaddr *)dev->dev_addr;
2245
2246 memcpy(eui, &ha->uc.uniq_id, sizeof(ha->uc.uniq_id));
2247 eui[0] ^= 2;
2248 return 0;
2249 }
2250
addrconf_ifid_arcnet(u8 *eui, struct net_device *dev)2251 static int addrconf_ifid_arcnet(u8 *eui, struct net_device *dev)
2252 {
2253 /* XXX: inherit EUI-64 from other interface -- yoshfuji */
2254 if (dev->addr_len != ARCNET_ALEN)
2255 return -1;
2256 memset(eui, 0, 7);
2257 eui[7] = *(u8 *)dev->dev_addr;
2258 return 0;
2259 }
2260
addrconf_ifid_infiniband(u8 *eui, struct net_device *dev)2261 static int addrconf_ifid_infiniband(u8 *eui, struct net_device *dev)
2262 {
2263 if (dev->addr_len != INFINIBAND_ALEN)
2264 return -1;
2265 memcpy(eui, dev->dev_addr + 12, 8);
2266 eui[0] |= 2;
2267 return 0;
2268 }
2269
__ipv6_isatap_ifid(u8 *eui, __be32 addr)2270 static int __ipv6_isatap_ifid(u8 *eui, __be32 addr)
2271 {
2272 if (addr == 0)
2273 return -1;
2274 eui[0] = (ipv4_is_zeronet(addr) || ipv4_is_private_10(addr) ||
2275 ipv4_is_loopback(addr) || ipv4_is_linklocal_169(addr) ||
2276 ipv4_is_private_172(addr) || ipv4_is_test_192(addr) ||
2277 ipv4_is_anycast_6to4(addr) || ipv4_is_private_192(addr) ||
2278 ipv4_is_test_198(addr) || ipv4_is_multicast(addr) ||
2279 ipv4_is_lbcast(addr)) ? 0x00 : 0x02;
2280 eui[1] = 0;
2281 eui[2] = 0x5E;
2282 eui[3] = 0xFE;
2283 memcpy(eui + 4, &addr, 4);
2284 return 0;
2285 }
2286
addrconf_ifid_sit(u8 *eui, struct net_device *dev)2287 static int addrconf_ifid_sit(u8 *eui, struct net_device *dev)
2288 {
2289 if (dev->priv_flags & IFF_ISATAP)
2290 return __ipv6_isatap_ifid(eui, *(__be32 *)dev->dev_addr);
2291 return -1;
2292 }
2293
addrconf_ifid_gre(u8 *eui, struct net_device *dev)2294 static int addrconf_ifid_gre(u8 *eui, struct net_device *dev)
2295 {
2296 return __ipv6_isatap_ifid(eui, *(__be32 *)dev->dev_addr);
2297 }
2298
addrconf_ifid_ip6tnl(u8 *eui, struct net_device *dev)2299 static int addrconf_ifid_ip6tnl(u8 *eui, struct net_device *dev)
2300 {
2301 memcpy(eui, dev->perm_addr, 3);
2302 memcpy(eui + 5, dev->perm_addr + 3, 3);
2303 eui[3] = 0xFF;
2304 eui[4] = 0xFE;
2305 eui[0] ^= 2;
2306 return 0;
2307 }
2308
ipv6_generate_eui64(u8 *eui, struct net_device *dev)2309 static int ipv6_generate_eui64(u8 *eui, struct net_device *dev)
2310 {
2311 switch (dev->type) {
2312 case ARPHRD_ETHER:
2313 case ARPHRD_FDDI:
2314 return addrconf_ifid_eui48(eui, dev);
2315 case ARPHRD_ARCNET:
2316 return addrconf_ifid_arcnet(eui, dev);
2317 case ARPHRD_INFINIBAND:
2318 return addrconf_ifid_infiniband(eui, dev);
2319 case ARPHRD_SIT:
2320 return addrconf_ifid_sit(eui, dev);
2321 case ARPHRD_IPGRE:
2322 case ARPHRD_TUNNEL:
2323 return addrconf_ifid_gre(eui, dev);
2324 case ARPHRD_6LOWPAN:
2325 return addrconf_ifid_6lowpan(eui, dev);
2326 case ARPHRD_IEEE1394:
2327 return addrconf_ifid_ieee1394(eui, dev);
2328 case ARPHRD_TUNNEL6:
2329 case ARPHRD_IP6GRE:
2330 case ARPHRD_RAWIP:
2331 return addrconf_ifid_ip6tnl(eui, dev);
2332 }
2333 return -1;
2334 }
2335
ipv6_inherit_eui64(u8 *eui, struct inet6_dev *idev)2336 static int ipv6_inherit_eui64(u8 *eui, struct inet6_dev *idev)
2337 {
2338 int err = -1;
2339 struct inet6_ifaddr *ifp;
2340
2341 read_lock_bh(&idev->lock);
2342 list_for_each_entry_reverse(ifp, &idev->addr_list, if_list) {
2343 if (ifp->scope > IFA_LINK)
2344 break;
2345 if (ifp->scope == IFA_LINK && !(ifp->flags&IFA_F_TENTATIVE)) {
2346 memcpy(eui, ifp->addr.s6_addr+8, 8);
2347 err = 0;
2348 break;
2349 }
2350 }
2351 read_unlock_bh(&idev->lock);
2352 return err;
2353 }
2354
2355 /* Generation of a randomized Interface Identifier
2356 * draft-ietf-6man-rfc4941bis, Section 3.3.1
2357 */
2358
ipv6_gen_rnd_iid(struct in6_addr *addr)2359 static void ipv6_gen_rnd_iid(struct in6_addr *addr)
2360 {
2361 regen:
2362 get_random_bytes(&addr->s6_addr[8], 8);
2363
2364 /* <draft-ietf-6man-rfc4941bis-08.txt>, Section 3.3.1:
2365 * check if generated address is not inappropriate:
2366 *
2367 * - Reserved IPv6 Interface Identifers
2368 * - XXX: already assigned to an address on the device
2369 */
2370
2371 /* Subnet-router anycast: 0000:0000:0000:0000 */
2372 if (!(addr->s6_addr32[2] | addr->s6_addr32[3]))
2373 goto regen;
2374
2375 /* IANA Ethernet block: 0200:5EFF:FE00:0000-0200:5EFF:FE00:5212
2376 * Proxy Mobile IPv6: 0200:5EFF:FE00:5213
2377 * IANA Ethernet block: 0200:5EFF:FE00:5214-0200:5EFF:FEFF:FFFF
2378 */
2379 if (ntohl(addr->s6_addr32[2]) == 0x02005eff &&
2380 (ntohl(addr->s6_addr32[3]) & 0Xff000000) == 0xfe000000)
2381 goto regen;
2382
2383 /* Reserved subnet anycast addresses */
2384 if (ntohl(addr->s6_addr32[2]) == 0xfdffffff &&
2385 ntohl(addr->s6_addr32[3]) >= 0Xffffff80)
2386 goto regen;
2387 }
2388
2389 /*
2390 * Add prefix route.
2391 */
2392
2393 static void
addrconf_prefix_route(struct in6_addr *pfx, int plen, u32 metric, struct net_device *dev, unsigned long expires, u32 flags, gfp_t gfp_flags)2394 addrconf_prefix_route(struct in6_addr *pfx, int plen, u32 metric,
2395 struct net_device *dev, unsigned long expires,
2396 u32 flags, gfp_t gfp_flags)
2397 {
2398 struct fib6_config cfg = {
2399 .fc_table = l3mdev_fib_table(dev) ? : RT6_TABLE_PREFIX,
2400 .fc_metric = metric ? : IP6_RT_PRIO_ADDRCONF,
2401 .fc_ifindex = dev->ifindex,
2402 .fc_expires = expires,
2403 .fc_dst_len = plen,
2404 .fc_flags = RTF_UP | flags,
2405 .fc_nlinfo.nl_net = dev_net(dev),
2406 .fc_protocol = RTPROT_KERNEL,
2407 .fc_type = RTN_UNICAST,
2408 };
2409
2410 cfg.fc_dst = *pfx;
2411
2412 /* Prevent useless cloning on PtP SIT.
2413 This thing is done here expecting that the whole
2414 class of non-broadcast devices need not cloning.
2415 */
2416 #if IS_ENABLED(CONFIG_IPV6_SIT)
2417 if (dev->type == ARPHRD_SIT && (dev->flags & IFF_POINTOPOINT))
2418 cfg.fc_flags |= RTF_NONEXTHOP;
2419 #endif
2420
2421 ip6_route_add(&cfg, gfp_flags, NULL);
2422 }
2423
2424
addrconf_get_prefix_route(const struct in6_addr *pfx, int plen, const struct net_device *dev, u32 flags, u32 noflags, bool no_gw)2425 static struct fib6_info *addrconf_get_prefix_route(const struct in6_addr *pfx,
2426 int plen,
2427 const struct net_device *dev,
2428 u32 flags, u32 noflags,
2429 bool no_gw)
2430 {
2431 struct fib6_node *fn;
2432 struct fib6_info *rt = NULL;
2433 struct fib6_table *table;
2434 u32 tb_id = l3mdev_fib_table(dev) ? : RT6_TABLE_PREFIX;
2435
2436 table = fib6_get_table(dev_net(dev), tb_id);
2437 if (!table)
2438 return NULL;
2439
2440 rcu_read_lock();
2441 fn = fib6_locate(&table->tb6_root, pfx, plen, NULL, 0, true);
2442 if (!fn)
2443 goto out;
2444
2445 for_each_fib6_node_rt_rcu(fn) {
2446 /* prefix routes only use builtin fib6_nh */
2447 if (rt->nh)
2448 continue;
2449
2450 if (rt->fib6_nh->fib_nh_dev->ifindex != dev->ifindex)
2451 continue;
2452 if (no_gw && rt->fib6_nh->fib_nh_gw_family)
2453 continue;
2454 if ((rt->fib6_flags & flags) != flags)
2455 continue;
2456 if ((rt->fib6_flags & noflags) != 0)
2457 continue;
2458 if (!fib6_info_hold_safe(rt))
2459 continue;
2460 break;
2461 }
2462 out:
2463 rcu_read_unlock();
2464 return rt;
2465 }
2466
2467
2468 /* Create "default" multicast route to the interface */
2469
addrconf_add_mroute(struct net_device *dev)2470 static void addrconf_add_mroute(struct net_device *dev)
2471 {
2472 struct fib6_config cfg = {
2473 .fc_table = l3mdev_fib_table(dev) ? : RT6_TABLE_LOCAL,
2474 .fc_metric = IP6_RT_PRIO_ADDRCONF,
2475 .fc_ifindex = dev->ifindex,
2476 .fc_dst_len = 8,
2477 .fc_flags = RTF_UP,
2478 .fc_type = RTN_MULTICAST,
2479 .fc_nlinfo.nl_net = dev_net(dev),
2480 .fc_protocol = RTPROT_KERNEL,
2481 };
2482
2483 ipv6_addr_set(&cfg.fc_dst, htonl(0xFF000000), 0, 0, 0);
2484
2485 ip6_route_add(&cfg, GFP_KERNEL, NULL);
2486 }
2487
addrconf_add_dev(struct net_device *dev)2488 static struct inet6_dev *addrconf_add_dev(struct net_device *dev)
2489 {
2490 struct inet6_dev *idev;
2491
2492 ASSERT_RTNL();
2493
2494 idev = ipv6_find_idev(dev);
2495 if (IS_ERR(idev))
2496 return idev;
2497
2498 if (idev->cnf.disable_ipv6)
2499 return ERR_PTR(-EACCES);
2500
2501 /* Add default multicast route */
2502 if (!(dev->flags & IFF_LOOPBACK) && !netif_is_l3_master(dev))
2503 addrconf_add_mroute(dev);
2504
2505 return idev;
2506 }
2507
manage_tempaddrs(struct inet6_dev *idev, struct inet6_ifaddr *ifp, __u32 valid_lft, __u32 prefered_lft, bool create, unsigned long now)2508 static void manage_tempaddrs(struct inet6_dev *idev,
2509 struct inet6_ifaddr *ifp,
2510 __u32 valid_lft, __u32 prefered_lft,
2511 bool create, unsigned long now)
2512 {
2513 u32 flags;
2514 struct inet6_ifaddr *ift;
2515
2516 read_lock_bh(&idev->lock);
2517 /* update all temporary addresses in the list */
2518 list_for_each_entry(ift, &idev->tempaddr_list, tmp_list) {
2519 int age, max_valid, max_prefered;
2520
2521 if (ifp != ift->ifpub)
2522 continue;
2523
2524 /* RFC 4941 section 3.3:
2525 * If a received option will extend the lifetime of a public
2526 * address, the lifetimes of temporary addresses should
2527 * be extended, subject to the overall constraint that no
2528 * temporary addresses should ever remain "valid" or "preferred"
2529 * for a time longer than (TEMP_VALID_LIFETIME) or
2530 * (TEMP_PREFERRED_LIFETIME - DESYNC_FACTOR), respectively.
2531 */
2532 age = (now - ift->cstamp) / HZ;
2533 max_valid = idev->cnf.temp_valid_lft - age;
2534 if (max_valid < 0)
2535 max_valid = 0;
2536
2537 max_prefered = idev->cnf.temp_prefered_lft -
2538 idev->desync_factor - age;
2539 if (max_prefered < 0)
2540 max_prefered = 0;
2541
2542 if (valid_lft > max_valid)
2543 valid_lft = max_valid;
2544
2545 if (prefered_lft > max_prefered)
2546 prefered_lft = max_prefered;
2547
2548 spin_lock(&ift->lock);
2549 flags = ift->flags;
2550 ift->valid_lft = valid_lft;
2551 ift->prefered_lft = prefered_lft;
2552 ift->tstamp = now;
2553 if (prefered_lft > 0)
2554 ift->flags &= ~IFA_F_DEPRECATED;
2555
2556 spin_unlock(&ift->lock);
2557 if (!(flags&IFA_F_TENTATIVE))
2558 ipv6_ifa_notify(0, ift);
2559 }
2560
2561 /* Also create a temporary address if it's enabled but no temporary
2562 * address currently exists.
2563 * However, we get called with valid_lft == 0, prefered_lft == 0, create == false
2564 * as part of cleanup (ie. deleting the mngtmpaddr).
2565 * We don't want that to result in creating a new temporary ip address.
2566 */
2567 if (list_empty(&idev->tempaddr_list) && (valid_lft || prefered_lft))
2568 create = true;
2569
2570 if (create && idev->cnf.use_tempaddr > 0) {
2571 /* When a new public address is created as described
2572 * in [ADDRCONF], also create a new temporary address.
2573 */
2574 read_unlock_bh(&idev->lock);
2575 ipv6_create_tempaddr(ifp, false);
2576 } else {
2577 read_unlock_bh(&idev->lock);
2578 }
2579 }
2580
is_addr_mode_generate_stable(struct inet6_dev *idev)2581 static bool is_addr_mode_generate_stable(struct inet6_dev *idev)
2582 {
2583 return idev->cnf.addr_gen_mode == IN6_ADDR_GEN_MODE_STABLE_PRIVACY ||
2584 idev->cnf.addr_gen_mode == IN6_ADDR_GEN_MODE_RANDOM;
2585 }
2586
addrconf_prefix_rcv_add_addr(struct net *net, struct net_device *dev, const struct prefix_info *pinfo, struct inet6_dev *in6_dev, const struct in6_addr *addr, int addr_type, u32 addr_flags, bool sllao, bool tokenized, __u32 valid_lft, u32 prefered_lft)2587 int addrconf_prefix_rcv_add_addr(struct net *net, struct net_device *dev,
2588 const struct prefix_info *pinfo,
2589 struct inet6_dev *in6_dev,
2590 const struct in6_addr *addr, int addr_type,
2591 u32 addr_flags, bool sllao, bool tokenized,
2592 __u32 valid_lft, u32 prefered_lft)
2593 {
2594 struct inet6_ifaddr *ifp = ipv6_get_ifaddr(net, addr, dev, 1);
2595 int create = 0, update_lft = 0;
2596
2597 if (!ifp && valid_lft) {
2598 int max_addresses = in6_dev->cnf.max_addresses;
2599 struct ifa6_config cfg = {
2600 .pfx = addr,
2601 .plen = pinfo->prefix_len,
2602 .ifa_flags = addr_flags,
2603 .valid_lft = valid_lft,
2604 .preferred_lft = prefered_lft,
2605 .scope = addr_type & IPV6_ADDR_SCOPE_MASK,
2606 };
2607
2608 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
2609 if ((net->ipv6.devconf_all->optimistic_dad ||
2610 in6_dev->cnf.optimistic_dad) &&
2611 !net->ipv6.devconf_all->forwarding && sllao)
2612 cfg.ifa_flags |= IFA_F_OPTIMISTIC;
2613 #endif
2614
2615 /* Do not allow to create too much of autoconfigured
2616 * addresses; this would be too easy way to crash kernel.
2617 */
2618 if (!max_addresses ||
2619 ipv6_count_addresses(in6_dev) < max_addresses)
2620 ifp = ipv6_add_addr(in6_dev, &cfg, false, NULL);
2621
2622 if (IS_ERR_OR_NULL(ifp))
2623 return -1;
2624
2625 create = 1;
2626 spin_lock_bh(&ifp->lock);
2627 ifp->flags |= IFA_F_MANAGETEMPADDR;
2628 ifp->cstamp = jiffies;
2629 ifp->tokenized = tokenized;
2630 spin_unlock_bh(&ifp->lock);
2631 addrconf_dad_start(ifp);
2632 }
2633
2634 if (ifp) {
2635 u32 flags;
2636 unsigned long now;
2637 u32 stored_lft;
2638
2639 /* update lifetime (RFC2462 5.5.3 e) */
2640 spin_lock_bh(&ifp->lock);
2641 now = jiffies;
2642 if (ifp->valid_lft > (now - ifp->tstamp) / HZ)
2643 stored_lft = ifp->valid_lft - (now - ifp->tstamp) / HZ;
2644 else
2645 stored_lft = 0;
2646 if (!create && stored_lft) {
2647 const u32 minimum_lft = min_t(u32,
2648 stored_lft, MIN_VALID_LIFETIME);
2649 valid_lft = max(valid_lft, minimum_lft);
2650
2651 /* RFC4862 Section 5.5.3e:
2652 * "Note that the preferred lifetime of the
2653 * corresponding address is always reset to
2654 * the Preferred Lifetime in the received
2655 * Prefix Information option, regardless of
2656 * whether the valid lifetime is also reset or
2657 * ignored."
2658 *
2659 * So we should always update prefered_lft here.
2660 */
2661 update_lft = 1;
2662 }
2663
2664 if (update_lft) {
2665 ifp->valid_lft = valid_lft;
2666 ifp->prefered_lft = prefered_lft;
2667 ifp->tstamp = now;
2668 flags = ifp->flags;
2669 ifp->flags &= ~IFA_F_DEPRECATED;
2670 spin_unlock_bh(&ifp->lock);
2671
2672 if (!(flags&IFA_F_TENTATIVE))
2673 ipv6_ifa_notify(0, ifp);
2674 } else
2675 spin_unlock_bh(&ifp->lock);
2676
2677 manage_tempaddrs(in6_dev, ifp, valid_lft, prefered_lft,
2678 create, now);
2679
2680 in6_ifa_put(ifp);
2681 addrconf_verify();
2682 }
2683
2684 return 0;
2685 }
2686 EXPORT_SYMBOL_GPL(addrconf_prefix_rcv_add_addr);
2687
addrconf_prefix_rcv(struct net_device *dev, u8 *opt, int len, bool sllao)2688 void addrconf_prefix_rcv(struct net_device *dev, u8 *opt, int len, bool sllao)
2689 {
2690 struct prefix_info *pinfo;
2691 __u32 valid_lft;
2692 __u32 prefered_lft;
2693 int addr_type, err;
2694 u32 addr_flags = 0;
2695 struct inet6_dev *in6_dev;
2696 struct net *net = dev_net(dev);
2697
2698 pinfo = (struct prefix_info *) opt;
2699
2700 if (len < sizeof(struct prefix_info)) {
2701 netdev_dbg(dev, "addrconf: prefix option too short\n");
2702 return;
2703 }
2704
2705 /*
2706 * Validation checks ([ADDRCONF], page 19)
2707 */
2708
2709 addr_type = ipv6_addr_type(&pinfo->prefix);
2710
2711 if (addr_type & (IPV6_ADDR_MULTICAST|IPV6_ADDR_LINKLOCAL))
2712 return;
2713
2714 valid_lft = ntohl(pinfo->valid);
2715 prefered_lft = ntohl(pinfo->prefered);
2716
2717 if (prefered_lft > valid_lft) {
2718 net_warn_ratelimited("addrconf: prefix option has invalid lifetime\n");
2719 return;
2720 }
2721
2722 in6_dev = in6_dev_get(dev);
2723
2724 if (!in6_dev) {
2725 net_dbg_ratelimited("addrconf: device %s not configured\n",
2726 dev->name);
2727 return;
2728 }
2729
2730 if (valid_lft != 0 && valid_lft < in6_dev->cnf.accept_ra_min_lft)
2731 goto put;
2732
2733 /*
2734 * Two things going on here:
2735 * 1) Add routes for on-link prefixes
2736 * 2) Configure prefixes with the auto flag set
2737 */
2738
2739 if (pinfo->onlink) {
2740 struct fib6_info *rt;
2741 unsigned long rt_expires;
2742
2743 /* Avoid arithmetic overflow. Really, we could
2744 * save rt_expires in seconds, likely valid_lft,
2745 * but it would require division in fib gc, that it
2746 * not good.
2747 */
2748 if (HZ > USER_HZ)
2749 rt_expires = addrconf_timeout_fixup(valid_lft, HZ);
2750 else
2751 rt_expires = addrconf_timeout_fixup(valid_lft, USER_HZ);
2752
2753 if (addrconf_finite_timeout(rt_expires))
2754 rt_expires *= HZ;
2755
2756 rt = addrconf_get_prefix_route(&pinfo->prefix,
2757 pinfo->prefix_len,
2758 dev,
2759 RTF_ADDRCONF | RTF_PREFIX_RT,
2760 RTF_DEFAULT, true);
2761
2762 if (rt) {
2763 /* Autoconf prefix route */
2764 if (valid_lft == 0) {
2765 ip6_del_rt(net, rt, false);
2766 rt = NULL;
2767 } else if (addrconf_finite_timeout(rt_expires)) {
2768 /* not infinity */
2769 fib6_set_expires(rt, jiffies + rt_expires);
2770 } else {
2771 fib6_clean_expires(rt);
2772 }
2773 } else if (valid_lft) {
2774 clock_t expires = 0;
2775 int flags = RTF_ADDRCONF | RTF_PREFIX_RT;
2776 if (addrconf_finite_timeout(rt_expires)) {
2777 /* not infinity */
2778 flags |= RTF_EXPIRES;
2779 expires = jiffies_to_clock_t(rt_expires);
2780 }
2781 addrconf_prefix_route(&pinfo->prefix, pinfo->prefix_len,
2782 0, dev, expires, flags,
2783 GFP_ATOMIC);
2784 }
2785 fib6_info_release(rt);
2786 }
2787
2788 /* Try to figure out our local address for this prefix */
2789
2790 if (pinfo->autoconf && in6_dev->cnf.autoconf) {
2791 struct in6_addr addr;
2792 bool tokenized = false, dev_addr_generated = false;
2793
2794 if (pinfo->prefix_len == 64) {
2795 memcpy(&addr, &pinfo->prefix, 8);
2796
2797 if (!ipv6_addr_any(&in6_dev->token)) {
2798 read_lock_bh(&in6_dev->lock);
2799 memcpy(addr.s6_addr + 8,
2800 in6_dev->token.s6_addr + 8, 8);
2801 read_unlock_bh(&in6_dev->lock);
2802 tokenized = true;
2803 } else if (is_addr_mode_generate_stable(in6_dev) &&
2804 !ipv6_generate_stable_address(&addr, 0,
2805 in6_dev)) {
2806 addr_flags |= IFA_F_STABLE_PRIVACY;
2807 goto ok;
2808 } else if (ipv6_generate_eui64(addr.s6_addr + 8, dev) &&
2809 ipv6_inherit_eui64(addr.s6_addr + 8, in6_dev)) {
2810 goto put;
2811 } else {
2812 dev_addr_generated = true;
2813 }
2814 goto ok;
2815 }
2816 net_dbg_ratelimited("IPv6 addrconf: prefix with wrong length %d\n",
2817 pinfo->prefix_len);
2818 goto put;
2819
2820 ok:
2821 err = addrconf_prefix_rcv_add_addr(net, dev, pinfo, in6_dev,
2822 &addr, addr_type,
2823 addr_flags, sllao,
2824 tokenized, valid_lft,
2825 prefered_lft);
2826 if (err)
2827 goto put;
2828
2829 /* Ignore error case here because previous prefix add addr was
2830 * successful which will be notified.
2831 */
2832 ndisc_ops_prefix_rcv_add_addr(net, dev, pinfo, in6_dev, &addr,
2833 addr_type, addr_flags, sllao,
2834 tokenized, valid_lft,
2835 prefered_lft,
2836 dev_addr_generated);
2837 }
2838 inet6_prefix_notify(RTM_NEWPREFIX, in6_dev, pinfo);
2839 put:
2840 in6_dev_put(in6_dev);
2841 }
2842
addrconf_set_sit_dstaddr(struct net *net, struct net_device *dev, struct in6_ifreq *ireq)2843 static int addrconf_set_sit_dstaddr(struct net *net, struct net_device *dev,
2844 struct in6_ifreq *ireq)
2845 {
2846 struct ip_tunnel_parm p = { };
2847 int err;
2848
2849 if (!(ipv6_addr_type(&ireq->ifr6_addr) & IPV6_ADDR_COMPATv4))
2850 return -EADDRNOTAVAIL;
2851
2852 p.iph.daddr = ireq->ifr6_addr.s6_addr32[3];
2853 p.iph.version = 4;
2854 p.iph.ihl = 5;
2855 p.iph.protocol = IPPROTO_IPV6;
2856 p.iph.ttl = 64;
2857
2858 if (!dev->netdev_ops->ndo_tunnel_ctl)
2859 return -EOPNOTSUPP;
2860 err = dev->netdev_ops->ndo_tunnel_ctl(dev, &p, SIOCADDTUNNEL);
2861 if (err)
2862 return err;
2863
2864 dev = __dev_get_by_name(net, p.name);
2865 if (!dev)
2866 return -ENOBUFS;
2867 return dev_open(dev, NULL);
2868 }
2869
2870 /*
2871 * Set destination address.
2872 * Special case for SIT interfaces where we create a new "virtual"
2873 * device.
2874 */
addrconf_set_dstaddr(struct net *net, void __user *arg)2875 int addrconf_set_dstaddr(struct net *net, void __user *arg)
2876 {
2877 struct net_device *dev;
2878 struct in6_ifreq ireq;
2879 int err = -ENODEV;
2880
2881 if (!IS_ENABLED(CONFIG_IPV6_SIT))
2882 return -ENODEV;
2883 if (copy_from_user(&ireq, arg, sizeof(struct in6_ifreq)))
2884 return -EFAULT;
2885
2886 rtnl_lock();
2887 dev = __dev_get_by_index(net, ireq.ifr6_ifindex);
2888 if (dev && dev->type == ARPHRD_SIT)
2889 err = addrconf_set_sit_dstaddr(net, dev, &ireq);
2890 rtnl_unlock();
2891 return err;
2892 }
2893
ipv6_mc_config(struct sock *sk, bool join, const struct in6_addr *addr, int ifindex)2894 static int ipv6_mc_config(struct sock *sk, bool join,
2895 const struct in6_addr *addr, int ifindex)
2896 {
2897 int ret;
2898
2899 ASSERT_RTNL();
2900
2901 lock_sock(sk);
2902 if (join)
2903 ret = ipv6_sock_mc_join(sk, ifindex, addr);
2904 else
2905 ret = ipv6_sock_mc_drop(sk, ifindex, addr);
2906 release_sock(sk);
2907
2908 return ret;
2909 }
2910
2911 /*
2912 * Manual configuration of address on an interface
2913 */
inet6_addr_add(struct net *net, int ifindex, struct ifa6_config *cfg, struct netlink_ext_ack *extack)2914 static int inet6_addr_add(struct net *net, int ifindex,
2915 struct ifa6_config *cfg,
2916 struct netlink_ext_ack *extack)
2917 {
2918 struct inet6_ifaddr *ifp;
2919 struct inet6_dev *idev;
2920 struct net_device *dev;
2921 unsigned long timeout;
2922 clock_t expires;
2923 u32 flags;
2924
2925 ASSERT_RTNL();
2926
2927 if (cfg->plen > 128)
2928 return -EINVAL;
2929
2930 /* check the lifetime */
2931 if (!cfg->valid_lft || cfg->preferred_lft > cfg->valid_lft)
2932 return -EINVAL;
2933
2934 if (cfg->ifa_flags & IFA_F_MANAGETEMPADDR && cfg->plen != 64)
2935 return -EINVAL;
2936
2937 dev = __dev_get_by_index(net, ifindex);
2938 if (!dev)
2939 return -ENODEV;
2940
2941 idev = addrconf_add_dev(dev);
2942 if (IS_ERR(idev))
2943 return PTR_ERR(idev);
2944
2945 if (cfg->ifa_flags & IFA_F_MCAUTOJOIN) {
2946 int ret = ipv6_mc_config(net->ipv6.mc_autojoin_sk,
2947 true, cfg->pfx, ifindex);
2948
2949 if (ret < 0)
2950 return ret;
2951 }
2952
2953 cfg->scope = ipv6_addr_scope(cfg->pfx);
2954
2955 timeout = addrconf_timeout_fixup(cfg->valid_lft, HZ);
2956 if (addrconf_finite_timeout(timeout)) {
2957 expires = jiffies_to_clock_t(timeout * HZ);
2958 cfg->valid_lft = timeout;
2959 flags = RTF_EXPIRES;
2960 } else {
2961 expires = 0;
2962 flags = 0;
2963 cfg->ifa_flags |= IFA_F_PERMANENT;
2964 }
2965
2966 timeout = addrconf_timeout_fixup(cfg->preferred_lft, HZ);
2967 if (addrconf_finite_timeout(timeout)) {
2968 if (timeout == 0)
2969 cfg->ifa_flags |= IFA_F_DEPRECATED;
2970 cfg->preferred_lft = timeout;
2971 }
2972
2973 ifp = ipv6_add_addr(idev, cfg, true, extack);
2974 if (!IS_ERR(ifp)) {
2975 if (!(cfg->ifa_flags & IFA_F_NOPREFIXROUTE)) {
2976 addrconf_prefix_route(&ifp->addr, ifp->prefix_len,
2977 ifp->rt_priority, dev, expires,
2978 flags, GFP_KERNEL);
2979 }
2980
2981 /* Send a netlink notification if DAD is enabled and
2982 * optimistic flag is not set
2983 */
2984 if (!(ifp->flags & (IFA_F_OPTIMISTIC | IFA_F_NODAD)))
2985 ipv6_ifa_notify(0, ifp);
2986 /*
2987 * Note that section 3.1 of RFC 4429 indicates
2988 * that the Optimistic flag should not be set for
2989 * manually configured addresses
2990 */
2991 addrconf_dad_start(ifp);
2992 if (cfg->ifa_flags & IFA_F_MANAGETEMPADDR)
2993 manage_tempaddrs(idev, ifp, cfg->valid_lft,
2994 cfg->preferred_lft, true, jiffies);
2995 in6_ifa_put(ifp);
2996 addrconf_verify_rtnl();
2997 return 0;
2998 } else if (cfg->ifa_flags & IFA_F_MCAUTOJOIN) {
2999 ipv6_mc_config(net->ipv6.mc_autojoin_sk, false,
3000 cfg->pfx, ifindex);
3001 }
3002
3003 return PTR_ERR(ifp);
3004 }
3005
inet6_addr_del(struct net *net, int ifindex, u32 ifa_flags, const struct in6_addr *pfx, unsigned int plen)3006 static int inet6_addr_del(struct net *net, int ifindex, u32 ifa_flags,
3007 const struct in6_addr *pfx, unsigned int plen)
3008 {
3009 struct inet6_ifaddr *ifp;
3010 struct inet6_dev *idev;
3011 struct net_device *dev;
3012
3013 if (plen > 128)
3014 return -EINVAL;
3015
3016 dev = __dev_get_by_index(net, ifindex);
3017 if (!dev)
3018 return -ENODEV;
3019
3020 idev = __in6_dev_get(dev);
3021 if (!idev)
3022 return -ENXIO;
3023
3024 read_lock_bh(&idev->lock);
3025 list_for_each_entry(ifp, &idev->addr_list, if_list) {
3026 if (ifp->prefix_len == plen &&
3027 ipv6_addr_equal(pfx, &ifp->addr)) {
3028 in6_ifa_hold(ifp);
3029 read_unlock_bh(&idev->lock);
3030
3031 if (!(ifp->flags & IFA_F_TEMPORARY) &&
3032 (ifa_flags & IFA_F_MANAGETEMPADDR))
3033 manage_tempaddrs(idev, ifp, 0, 0, false,
3034 jiffies);
3035 ipv6_del_addr(ifp);
3036 addrconf_verify_rtnl();
3037 if (ipv6_addr_is_multicast(pfx)) {
3038 ipv6_mc_config(net->ipv6.mc_autojoin_sk,
3039 false, pfx, dev->ifindex);
3040 }
3041 return 0;
3042 }
3043 }
3044 read_unlock_bh(&idev->lock);
3045 return -EADDRNOTAVAIL;
3046 }
3047
3048
addrconf_add_ifaddr(struct net *net, void __user *arg)3049 int addrconf_add_ifaddr(struct net *net, void __user *arg)
3050 {
3051 struct ifa6_config cfg = {
3052 .ifa_flags = IFA_F_PERMANENT,
3053 .preferred_lft = INFINITY_LIFE_TIME,
3054 .valid_lft = INFINITY_LIFE_TIME,
3055 };
3056 struct in6_ifreq ireq;
3057 int err;
3058
3059 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
3060 return -EPERM;
3061
3062 if (copy_from_user(&ireq, arg, sizeof(struct in6_ifreq)))
3063 return -EFAULT;
3064
3065 cfg.pfx = &ireq.ifr6_addr;
3066 cfg.plen = ireq.ifr6_prefixlen;
3067
3068 rtnl_lock();
3069 err = inet6_addr_add(net, ireq.ifr6_ifindex, &cfg, NULL);
3070 rtnl_unlock();
3071 return err;
3072 }
3073
addrconf_del_ifaddr(struct net *net, void __user *arg)3074 int addrconf_del_ifaddr(struct net *net, void __user *arg)
3075 {
3076 struct in6_ifreq ireq;
3077 int err;
3078
3079 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
3080 return -EPERM;
3081
3082 if (copy_from_user(&ireq, arg, sizeof(struct in6_ifreq)))
3083 return -EFAULT;
3084
3085 rtnl_lock();
3086 err = inet6_addr_del(net, ireq.ifr6_ifindex, 0, &ireq.ifr6_addr,
3087 ireq.ifr6_prefixlen);
3088 rtnl_unlock();
3089 return err;
3090 }
3091
add_addr(struct inet6_dev *idev, const struct in6_addr *addr, int plen, int scope)3092 static void add_addr(struct inet6_dev *idev, const struct in6_addr *addr,
3093 int plen, int scope)
3094 {
3095 struct inet6_ifaddr *ifp;
3096 struct ifa6_config cfg = {
3097 .pfx = addr,
3098 .plen = plen,
3099 .ifa_flags = IFA_F_PERMANENT,
3100 .valid_lft = INFINITY_LIFE_TIME,
3101 .preferred_lft = INFINITY_LIFE_TIME,
3102 .scope = scope
3103 };
3104
3105 ifp = ipv6_add_addr(idev, &cfg, true, NULL);
3106 if (!IS_ERR(ifp)) {
3107 spin_lock_bh(&ifp->lock);
3108 ifp->flags &= ~IFA_F_TENTATIVE;
3109 spin_unlock_bh(&ifp->lock);
3110 rt_genid_bump_ipv6(dev_net(idev->dev));
3111 ipv6_ifa_notify(RTM_NEWADDR, ifp);
3112 in6_ifa_put(ifp);
3113 }
3114 }
3115
3116 #if IS_ENABLED(CONFIG_IPV6_SIT)
sit_add_v4_addrs(struct inet6_dev *idev)3117 static void sit_add_v4_addrs(struct inet6_dev *idev)
3118 {
3119 struct in6_addr addr;
3120 struct net_device *dev;
3121 struct net *net = dev_net(idev->dev);
3122 int scope, plen;
3123 u32 pflags = 0;
3124
3125 ASSERT_RTNL();
3126
3127 memset(&addr, 0, sizeof(struct in6_addr));
3128 memcpy(&addr.s6_addr32[3], idev->dev->dev_addr, 4);
3129
3130 if (idev->dev->flags&IFF_POINTOPOINT) {
3131 if (idev->cnf.addr_gen_mode == IN6_ADDR_GEN_MODE_NONE)
3132 return;
3133
3134 addr.s6_addr32[0] = htonl(0xfe800000);
3135 scope = IFA_LINK;
3136 plen = 64;
3137 } else {
3138 scope = IPV6_ADDR_COMPATv4;
3139 plen = 96;
3140 pflags |= RTF_NONEXTHOP;
3141 }
3142
3143 if (addr.s6_addr32[3]) {
3144 add_addr(idev, &addr, plen, scope);
3145 addrconf_prefix_route(&addr, plen, 0, idev->dev, 0, pflags,
3146 GFP_KERNEL);
3147 return;
3148 }
3149
3150 for_each_netdev(net, dev) {
3151 struct in_device *in_dev = __in_dev_get_rtnl(dev);
3152 if (in_dev && (dev->flags & IFF_UP)) {
3153 struct in_ifaddr *ifa;
3154 int flag = scope;
3155
3156 in_dev_for_each_ifa_rtnl(ifa, in_dev) {
3157 addr.s6_addr32[3] = ifa->ifa_local;
3158
3159 if (ifa->ifa_scope == RT_SCOPE_LINK)
3160 continue;
3161 if (ifa->ifa_scope >= RT_SCOPE_HOST) {
3162 if (idev->dev->flags&IFF_POINTOPOINT)
3163 continue;
3164 flag |= IFA_HOST;
3165 }
3166
3167 add_addr(idev, &addr, plen, flag);
3168 addrconf_prefix_route(&addr, plen, 0, idev->dev,
3169 0, pflags, GFP_KERNEL);
3170 }
3171 }
3172 }
3173 }
3174 #endif
3175
init_loopback(struct net_device *dev)3176 static void init_loopback(struct net_device *dev)
3177 {
3178 struct inet6_dev *idev;
3179
3180 /* ::1 */
3181
3182 ASSERT_RTNL();
3183
3184 idev = ipv6_find_idev(dev);
3185 if (IS_ERR(idev)) {
3186 pr_debug("%s: add_dev failed\n", __func__);
3187 return;
3188 }
3189
3190 add_addr(idev, &in6addr_loopback, 128, IFA_HOST);
3191 }
3192
addrconf_add_linklocal(struct inet6_dev *idev, const struct in6_addr *addr, u32 flags)3193 void addrconf_add_linklocal(struct inet6_dev *idev,
3194 const struct in6_addr *addr, u32 flags)
3195 {
3196 struct ifa6_config cfg = {
3197 .pfx = addr,
3198 .plen = 64,
3199 .ifa_flags = flags | IFA_F_PERMANENT,
3200 .valid_lft = INFINITY_LIFE_TIME,
3201 .preferred_lft = INFINITY_LIFE_TIME,
3202 .scope = IFA_LINK
3203 };
3204 struct inet6_ifaddr *ifp;
3205
3206 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
3207 if ((dev_net(idev->dev)->ipv6.devconf_all->optimistic_dad ||
3208 idev->cnf.optimistic_dad) &&
3209 !dev_net(idev->dev)->ipv6.devconf_all->forwarding)
3210 cfg.ifa_flags |= IFA_F_OPTIMISTIC;
3211 #endif
3212
3213 ifp = ipv6_add_addr(idev, &cfg, true, NULL);
3214 if (!IS_ERR(ifp)) {
3215 addrconf_prefix_route(&ifp->addr, ifp->prefix_len, 0, idev->dev,
3216 0, 0, GFP_ATOMIC);
3217 addrconf_dad_start(ifp);
3218 in6_ifa_put(ifp);
3219 }
3220 }
3221 EXPORT_SYMBOL_GPL(addrconf_add_linklocal);
3222
ipv6_reserved_interfaceid(struct in6_addr address)3223 static bool ipv6_reserved_interfaceid(struct in6_addr address)
3224 {
3225 if ((address.s6_addr32[2] | address.s6_addr32[3]) == 0)
3226 return true;
3227
3228 if (address.s6_addr32[2] == htonl(0x02005eff) &&
3229 ((address.s6_addr32[3] & htonl(0xfe000000)) == htonl(0xfe000000)))
3230 return true;
3231
3232 if (address.s6_addr32[2] == htonl(0xfdffffff) &&
3233 ((address.s6_addr32[3] & htonl(0xffffff80)) == htonl(0xffffff80)))
3234 return true;
3235
3236 return false;
3237 }
3238
ipv6_generate_stable_address(struct in6_addr *address, u8 dad_count, const struct inet6_dev *idev)3239 static int ipv6_generate_stable_address(struct in6_addr *address,
3240 u8 dad_count,
3241 const struct inet6_dev *idev)
3242 {
3243 static DEFINE_SPINLOCK(lock);
3244 static __u32 digest[SHA1_DIGEST_WORDS];
3245 static __u32 workspace[SHA1_WORKSPACE_WORDS];
3246
3247 static union {
3248 char __data[SHA1_BLOCK_SIZE];
3249 struct {
3250 struct in6_addr secret;
3251 __be32 prefix[2];
3252 unsigned char hwaddr[MAX_ADDR_LEN];
3253 u8 dad_count;
3254 } __packed;
3255 } data;
3256
3257 struct in6_addr secret;
3258 struct in6_addr temp;
3259 struct net *net = dev_net(idev->dev);
3260
3261 BUILD_BUG_ON(sizeof(data.__data) != sizeof(data));
3262
3263 if (idev->cnf.stable_secret.initialized)
3264 secret = idev->cnf.stable_secret.secret;
3265 else if (net->ipv6.devconf_dflt->stable_secret.initialized)
3266 secret = net->ipv6.devconf_dflt->stable_secret.secret;
3267 else
3268 return -1;
3269
3270 retry:
3271 spin_lock_bh(&lock);
3272
3273 sha1_init(digest);
3274 memset(&data, 0, sizeof(data));
3275 memset(workspace, 0, sizeof(workspace));
3276 memcpy(data.hwaddr, idev->dev->perm_addr, idev->dev->addr_len);
3277 data.prefix[0] = address->s6_addr32[0];
3278 data.prefix[1] = address->s6_addr32[1];
3279 data.secret = secret;
3280 data.dad_count = dad_count;
3281
3282 sha1_transform(digest, data.__data, workspace);
3283
3284 temp = *address;
3285 temp.s6_addr32[2] = (__force __be32)digest[0];
3286 temp.s6_addr32[3] = (__force __be32)digest[1];
3287
3288 spin_unlock_bh(&lock);
3289
3290 if (ipv6_reserved_interfaceid(temp)) {
3291 dad_count++;
3292 if (dad_count > dev_net(idev->dev)->ipv6.sysctl.idgen_retries)
3293 return -1;
3294 goto retry;
3295 }
3296
3297 *address = temp;
3298 return 0;
3299 }
3300
ipv6_gen_mode_random_init(struct inet6_dev *idev)3301 static void ipv6_gen_mode_random_init(struct inet6_dev *idev)
3302 {
3303 struct ipv6_stable_secret *s = &idev->cnf.stable_secret;
3304
3305 if (s->initialized)
3306 return;
3307 s = &idev->cnf.stable_secret;
3308 get_random_bytes(&s->secret, sizeof(s->secret));
3309 s->initialized = true;
3310 }
3311
addrconf_addr_gen(struct inet6_dev *idev, bool prefix_route)3312 static void addrconf_addr_gen(struct inet6_dev *idev, bool prefix_route)
3313 {
3314 struct in6_addr addr;
3315
3316 /* no link local addresses on L3 master devices */
3317 if (netif_is_l3_master(idev->dev))
3318 return;
3319
3320 /* no link local addresses on devices flagged as slaves */
3321 if (idev->dev->flags & IFF_SLAVE)
3322 return;
3323
3324 ipv6_addr_set(&addr, htonl(0xFE800000), 0, 0, 0);
3325
3326 switch (idev->cnf.addr_gen_mode) {
3327 case IN6_ADDR_GEN_MODE_RANDOM:
3328 ipv6_gen_mode_random_init(idev);
3329 fallthrough;
3330 case IN6_ADDR_GEN_MODE_STABLE_PRIVACY:
3331 if (!ipv6_generate_stable_address(&addr, 0, idev))
3332 addrconf_add_linklocal(idev, &addr,
3333 IFA_F_STABLE_PRIVACY);
3334 else if (prefix_route)
3335 addrconf_prefix_route(&addr, 64, 0, idev->dev,
3336 0, 0, GFP_KERNEL);
3337 break;
3338 case IN6_ADDR_GEN_MODE_EUI64:
3339 /* addrconf_add_linklocal also adds a prefix_route and we
3340 * only need to care about prefix routes if ipv6_generate_eui64
3341 * couldn't generate one.
3342 */
3343 if (ipv6_generate_eui64(addr.s6_addr + 8, idev->dev) == 0)
3344 addrconf_add_linklocal(idev, &addr, 0);
3345 else if (prefix_route)
3346 addrconf_prefix_route(&addr, 64, 0, idev->dev,
3347 0, 0, GFP_KERNEL);
3348 break;
3349 case IN6_ADDR_GEN_MODE_NONE:
3350 default:
3351 /* will not add any link local address */
3352 break;
3353 }
3354 }
3355
addrconf_dev_config(struct net_device *dev)3356 static void addrconf_dev_config(struct net_device *dev)
3357 {
3358 struct inet6_dev *idev;
3359
3360 ASSERT_RTNL();
3361
3362 if ((dev->type != ARPHRD_ETHER) &&
3363 (dev->type != ARPHRD_FDDI) &&
3364 (dev->type != ARPHRD_ARCNET) &&
3365 (dev->type != ARPHRD_INFINIBAND) &&
3366 (dev->type != ARPHRD_IEEE1394) &&
3367 (dev->type != ARPHRD_TUNNEL6) &&
3368 (dev->type != ARPHRD_6LOWPAN) &&
3369 (dev->type != ARPHRD_IP6GRE) &&
3370 (dev->type != ARPHRD_IPGRE) &&
3371 (dev->type != ARPHRD_TUNNEL) &&
3372 (dev->type != ARPHRD_NONE) &&
3373 (dev->type != ARPHRD_RAWIP)) {
3374 /* Alas, we support only Ethernet autoconfiguration. */
3375 idev = __in6_dev_get(dev);
3376 if (!IS_ERR_OR_NULL(idev) && dev->flags & IFF_UP &&
3377 dev->flags & IFF_MULTICAST)
3378 ipv6_mc_up(idev);
3379 return;
3380 }
3381
3382 idev = addrconf_add_dev(dev);
3383 if (IS_ERR(idev))
3384 return;
3385
3386 /* this device type has no EUI support */
3387 if (dev->type == ARPHRD_NONE &&
3388 idev->cnf.addr_gen_mode == IN6_ADDR_GEN_MODE_EUI64)
3389 idev->cnf.addr_gen_mode = IN6_ADDR_GEN_MODE_RANDOM;
3390
3391 addrconf_addr_gen(idev, false);
3392 }
3393
3394 #if IS_ENABLED(CONFIG_IPV6_SIT)
addrconf_sit_config(struct net_device *dev)3395 static void addrconf_sit_config(struct net_device *dev)
3396 {
3397 struct inet6_dev *idev;
3398
3399 ASSERT_RTNL();
3400
3401 /*
3402 * Configure the tunnel with one of our IPv4
3403 * addresses... we should configure all of
3404 * our v4 addrs in the tunnel
3405 */
3406
3407 idev = ipv6_find_idev(dev);
3408 if (IS_ERR(idev)) {
3409 pr_debug("%s: add_dev failed\n", __func__);
3410 return;
3411 }
3412
3413 if (dev->priv_flags & IFF_ISATAP) {
3414 addrconf_addr_gen(idev, false);
3415 return;
3416 }
3417
3418 sit_add_v4_addrs(idev);
3419
3420 if (dev->flags&IFF_POINTOPOINT)
3421 addrconf_add_mroute(dev);
3422 }
3423 #endif
3424
3425 #if IS_ENABLED(CONFIG_NET_IPGRE)
addrconf_gre_config(struct net_device *dev)3426 static void addrconf_gre_config(struct net_device *dev)
3427 {
3428 struct inet6_dev *idev;
3429
3430 ASSERT_RTNL();
3431
3432 idev = ipv6_find_idev(dev);
3433 if (IS_ERR(idev)) {
3434 pr_debug("%s: add_dev failed\n", __func__);
3435 return;
3436 }
3437
3438 addrconf_addr_gen(idev, true);
3439 if (dev->flags & IFF_POINTOPOINT)
3440 addrconf_add_mroute(dev);
3441 }
3442 #endif
3443
fixup_permanent_addr(struct net *net, struct inet6_dev *idev, struct inet6_ifaddr *ifp)3444 static int fixup_permanent_addr(struct net *net,
3445 struct inet6_dev *idev,
3446 struct inet6_ifaddr *ifp)
3447 {
3448 /* !fib6_node means the host route was removed from the
3449 * FIB, for example, if 'lo' device is taken down. In that
3450 * case regenerate the host route.
3451 */
3452 if (!ifp->rt || !ifp->rt->fib6_node) {
3453 struct fib6_info *f6i, *prev;
3454
3455 f6i = addrconf_f6i_alloc(net, idev, &ifp->addr, false,
3456 GFP_ATOMIC);
3457 if (IS_ERR(f6i))
3458 return PTR_ERR(f6i);
3459
3460 /* ifp->rt can be accessed outside of rtnl */
3461 spin_lock(&ifp->lock);
3462 prev = ifp->rt;
3463 ifp->rt = f6i;
3464 spin_unlock(&ifp->lock);
3465
3466 fib6_info_release(prev);
3467 }
3468
3469 if (!(ifp->flags & IFA_F_NOPREFIXROUTE)) {
3470 addrconf_prefix_route(&ifp->addr, ifp->prefix_len,
3471 ifp->rt_priority, idev->dev, 0, 0,
3472 GFP_ATOMIC);
3473 }
3474
3475 if (ifp->state == INET6_IFADDR_STATE_PREDAD)
3476 addrconf_dad_start(ifp);
3477
3478 return 0;
3479 }
3480
addrconf_permanent_addr(struct net *net, struct net_device *dev)3481 static void addrconf_permanent_addr(struct net *net, struct net_device *dev)
3482 {
3483 struct inet6_ifaddr *ifp, *tmp;
3484 struct inet6_dev *idev;
3485
3486 idev = __in6_dev_get(dev);
3487 if (!idev)
3488 return;
3489
3490 write_lock_bh(&idev->lock);
3491
3492 list_for_each_entry_safe(ifp, tmp, &idev->addr_list, if_list) {
3493 if ((ifp->flags & IFA_F_PERMANENT) &&
3494 fixup_permanent_addr(net, idev, ifp) < 0) {
3495 write_unlock_bh(&idev->lock);
3496 in6_ifa_hold(ifp);
3497 ipv6_del_addr(ifp);
3498 write_lock_bh(&idev->lock);
3499
3500 net_info_ratelimited("%s: Failed to add prefix route for address %pI6c; dropping\n",
3501 idev->dev->name, &ifp->addr);
3502 }
3503 }
3504
3505 write_unlock_bh(&idev->lock);
3506 }
3507
addrconf_notify(struct notifier_block *this, unsigned long event, void *ptr)3508 static int addrconf_notify(struct notifier_block *this, unsigned long event,
3509 void *ptr)
3510 {
3511 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
3512 struct netdev_notifier_change_info *change_info;
3513 struct netdev_notifier_changeupper_info *info;
3514 struct inet6_dev *idev = __in6_dev_get(dev);
3515 struct net *net = dev_net(dev);
3516 int run_pending = 0;
3517 int err;
3518
3519 switch (event) {
3520 case NETDEV_REGISTER:
3521 if (!idev && dev->mtu >= IPV6_MIN_MTU) {
3522 idev = ipv6_add_dev(dev);
3523 if (IS_ERR(idev))
3524 return notifier_from_errno(PTR_ERR(idev));
3525 }
3526 break;
3527
3528 case NETDEV_CHANGEMTU:
3529 /* if MTU under IPV6_MIN_MTU stop IPv6 on this interface. */
3530 if (dev->mtu < IPV6_MIN_MTU) {
3531 addrconf_ifdown(dev, dev != net->loopback_dev);
3532 break;
3533 }
3534
3535 if (idev) {
3536 rt6_mtu_change(dev, dev->mtu);
3537 idev->cnf.mtu6 = dev->mtu;
3538 break;
3539 }
3540
3541 /* allocate new idev */
3542 idev = ipv6_add_dev(dev);
3543 if (IS_ERR(idev))
3544 break;
3545
3546 /* device is still not ready */
3547 if (!(idev->if_flags & IF_READY))
3548 break;
3549
3550 run_pending = 1;
3551 fallthrough;
3552 case NETDEV_UP:
3553 case NETDEV_CHANGE:
3554 if (dev->flags & IFF_SLAVE)
3555 break;
3556
3557 if (idev && idev->cnf.disable_ipv6)
3558 break;
3559
3560 if (event == NETDEV_UP) {
3561 /* restore routes for permanent addresses */
3562 addrconf_permanent_addr(net, dev);
3563
3564 if (!addrconf_link_ready(dev)) {
3565 /* device is not ready yet. */
3566 pr_debug("ADDRCONF(NETDEV_UP): %s: link is not ready\n",
3567 dev->name);
3568 break;
3569 }
3570
3571 if (!idev && dev->mtu >= IPV6_MIN_MTU)
3572 idev = ipv6_add_dev(dev);
3573
3574 if (!IS_ERR_OR_NULL(idev)) {
3575 idev->if_flags |= IF_READY;
3576 run_pending = 1;
3577 }
3578 } else if (event == NETDEV_CHANGE) {
3579 if (!addrconf_link_ready(dev)) {
3580 /* device is still not ready. */
3581 rt6_sync_down_dev(dev, event);
3582 break;
3583 }
3584
3585 if (!IS_ERR_OR_NULL(idev)) {
3586 if (idev->if_flags & IF_READY) {
3587 /* device is already configured -
3588 * but resend MLD reports, we might
3589 * have roamed and need to update
3590 * multicast snooping switches
3591 */
3592 ipv6_mc_up(idev);
3593 change_info = ptr;
3594 if (change_info->flags_changed & IFF_NOARP)
3595 addrconf_dad_run(idev, true);
3596 rt6_sync_up(dev, RTNH_F_LINKDOWN);
3597 break;
3598 }
3599 idev->if_flags |= IF_READY;
3600 }
3601
3602 pr_info("ADDRCONF(NETDEV_CHANGE): %s: link becomes ready\n",
3603 dev->name);
3604
3605 run_pending = 1;
3606 }
3607
3608 switch (dev->type) {
3609 #if IS_ENABLED(CONFIG_IPV6_SIT)
3610 case ARPHRD_SIT:
3611 addrconf_sit_config(dev);
3612 break;
3613 #endif
3614 #if IS_ENABLED(CONFIG_NET_IPGRE)
3615 case ARPHRD_IPGRE:
3616 addrconf_gre_config(dev);
3617 break;
3618 #endif
3619 case ARPHRD_LOOPBACK:
3620 init_loopback(dev);
3621 break;
3622
3623 default:
3624 addrconf_dev_config(dev);
3625 break;
3626 }
3627
3628 if (!IS_ERR_OR_NULL(idev)) {
3629 if (run_pending)
3630 addrconf_dad_run(idev, false);
3631
3632 /* Device has an address by now */
3633 rt6_sync_up(dev, RTNH_F_DEAD);
3634
3635 /*
3636 * If the MTU changed during the interface down,
3637 * when the interface up, the changed MTU must be
3638 * reflected in the idev as well as routers.
3639 */
3640 if (idev->cnf.mtu6 != dev->mtu &&
3641 dev->mtu >= IPV6_MIN_MTU) {
3642 rt6_mtu_change(dev, dev->mtu);
3643 idev->cnf.mtu6 = dev->mtu;
3644 }
3645 idev->tstamp = jiffies;
3646 inet6_ifinfo_notify(RTM_NEWLINK, idev);
3647
3648 /*
3649 * If the changed mtu during down is lower than
3650 * IPV6_MIN_MTU stop IPv6 on this interface.
3651 */
3652 if (dev->mtu < IPV6_MIN_MTU)
3653 addrconf_ifdown(dev, dev != net->loopback_dev);
3654 }
3655 break;
3656
3657 case NETDEV_DOWN:
3658 case NETDEV_UNREGISTER:
3659 /*
3660 * Remove all addresses from this interface.
3661 */
3662 addrconf_ifdown(dev, event != NETDEV_DOWN);
3663 break;
3664
3665 case NETDEV_CHANGENAME:
3666 if (idev) {
3667 snmp6_unregister_dev(idev);
3668 addrconf_sysctl_unregister(idev);
3669 err = addrconf_sysctl_register(idev);
3670 if (err)
3671 return notifier_from_errno(err);
3672 err = snmp6_register_dev(idev);
3673 if (err) {
3674 addrconf_sysctl_unregister(idev);
3675 return notifier_from_errno(err);
3676 }
3677 }
3678 break;
3679
3680 case NETDEV_PRE_TYPE_CHANGE:
3681 case NETDEV_POST_TYPE_CHANGE:
3682 if (idev)
3683 addrconf_type_change(dev, event);
3684 break;
3685
3686 case NETDEV_CHANGEUPPER:
3687 info = ptr;
3688
3689 /* flush all routes if dev is linked to or unlinked from
3690 * an L3 master device (e.g., VRF)
3691 */
3692 if (info->upper_dev && netif_is_l3_master(info->upper_dev))
3693 addrconf_ifdown(dev, false);
3694 }
3695
3696 return NOTIFY_OK;
3697 }
3698
3699 /*
3700 * addrconf module should be notified of a device going up
3701 */
3702 static struct notifier_block ipv6_dev_notf = {
3703 .notifier_call = addrconf_notify,
3704 .priority = ADDRCONF_NOTIFY_PRIORITY,
3705 };
3706
addrconf_type_change(struct net_device *dev, unsigned long event)3707 static void addrconf_type_change(struct net_device *dev, unsigned long event)
3708 {
3709 struct inet6_dev *idev;
3710 ASSERT_RTNL();
3711
3712 idev = __in6_dev_get(dev);
3713
3714 if (event == NETDEV_POST_TYPE_CHANGE)
3715 ipv6_mc_remap(idev);
3716 else if (event == NETDEV_PRE_TYPE_CHANGE)
3717 ipv6_mc_unmap(idev);
3718 }
3719
addr_is_local(const struct in6_addr *addr)3720 static bool addr_is_local(const struct in6_addr *addr)
3721 {
3722 return ipv6_addr_type(addr) &
3723 (IPV6_ADDR_LINKLOCAL | IPV6_ADDR_LOOPBACK);
3724 }
3725
addrconf_ifdown(struct net_device *dev, bool unregister)3726 static int addrconf_ifdown(struct net_device *dev, bool unregister)
3727 {
3728 unsigned long event = unregister ? NETDEV_UNREGISTER : NETDEV_DOWN;
3729 struct net *net = dev_net(dev);
3730 struct inet6_dev *idev;
3731 struct inet6_ifaddr *ifa;
3732 LIST_HEAD(tmp_addr_list);
3733 bool keep_addr = false;
3734 bool was_ready;
3735 int state, i;
3736
3737 ASSERT_RTNL();
3738
3739 rt6_disable_ip(dev, event);
3740
3741 idev = __in6_dev_get(dev);
3742 if (!idev)
3743 return -ENODEV;
3744
3745 /*
3746 * Step 1: remove reference to ipv6 device from parent device.
3747 * Do not dev_put!
3748 */
3749 if (unregister) {
3750 idev->dead = 1;
3751
3752 /* protected by rtnl_lock */
3753 RCU_INIT_POINTER(dev->ip6_ptr, NULL);
3754
3755 /* Step 1.5: remove snmp6 entry */
3756 snmp6_unregister_dev(idev);
3757
3758 }
3759
3760 /* combine the user config with event to determine if permanent
3761 * addresses are to be removed from address hash table
3762 */
3763 if (!unregister && !idev->cnf.disable_ipv6) {
3764 /* aggregate the system setting and interface setting */
3765 int _keep_addr = net->ipv6.devconf_all->keep_addr_on_down;
3766
3767 if (!_keep_addr)
3768 _keep_addr = idev->cnf.keep_addr_on_down;
3769
3770 keep_addr = (_keep_addr > 0);
3771 }
3772
3773 /* Step 2: clear hash table */
3774 for (i = 0; i < IN6_ADDR_HSIZE; i++) {
3775 struct hlist_head *h = &inet6_addr_lst[i];
3776
3777 spin_lock_bh(&addrconf_hash_lock);
3778 restart:
3779 hlist_for_each_entry_rcu(ifa, h, addr_lst) {
3780 if (ifa->idev == idev) {
3781 addrconf_del_dad_work(ifa);
3782 /* combined flag + permanent flag decide if
3783 * address is retained on a down event
3784 */
3785 if (!keep_addr ||
3786 !(ifa->flags & IFA_F_PERMANENT) ||
3787 addr_is_local(&ifa->addr)) {
3788 hlist_del_init_rcu(&ifa->addr_lst);
3789 goto restart;
3790 }
3791 }
3792 }
3793 spin_unlock_bh(&addrconf_hash_lock);
3794 }
3795
3796 write_lock_bh(&idev->lock);
3797
3798 addrconf_del_rs_timer(idev);
3799
3800 /* Step 2: clear flags for stateless addrconf, repeated down
3801 * detection
3802 */
3803 was_ready = idev->if_flags & IF_READY;
3804 if (!unregister)
3805 idev->if_flags &= ~(IF_RS_SENT|IF_RA_RCVD|IF_READY);
3806
3807 /* Step 3: clear tempaddr list */
3808 while (!list_empty(&idev->tempaddr_list)) {
3809 ifa = list_first_entry(&idev->tempaddr_list,
3810 struct inet6_ifaddr, tmp_list);
3811 list_del(&ifa->tmp_list);
3812 write_unlock_bh(&idev->lock);
3813 spin_lock_bh(&ifa->lock);
3814
3815 if (ifa->ifpub) {
3816 in6_ifa_put(ifa->ifpub);
3817 ifa->ifpub = NULL;
3818 }
3819 spin_unlock_bh(&ifa->lock);
3820 in6_ifa_put(ifa);
3821 write_lock_bh(&idev->lock);
3822 }
3823
3824 list_for_each_entry(ifa, &idev->addr_list, if_list)
3825 list_add_tail(&ifa->if_list_aux, &tmp_addr_list);
3826 write_unlock_bh(&idev->lock);
3827
3828 while (!list_empty(&tmp_addr_list)) {
3829 struct fib6_info *rt = NULL;
3830 bool keep;
3831
3832 ifa = list_first_entry(&tmp_addr_list,
3833 struct inet6_ifaddr, if_list_aux);
3834 list_del(&ifa->if_list_aux);
3835
3836 addrconf_del_dad_work(ifa);
3837
3838 keep = keep_addr && (ifa->flags & IFA_F_PERMANENT) &&
3839 !addr_is_local(&ifa->addr);
3840
3841 spin_lock_bh(&ifa->lock);
3842
3843 if (keep) {
3844 /* set state to skip the notifier below */
3845 state = INET6_IFADDR_STATE_DEAD;
3846 ifa->state = INET6_IFADDR_STATE_PREDAD;
3847 if (!(ifa->flags & IFA_F_NODAD))
3848 ifa->flags |= IFA_F_TENTATIVE;
3849
3850 rt = ifa->rt;
3851 ifa->rt = NULL;
3852 } else {
3853 state = ifa->state;
3854 ifa->state = INET6_IFADDR_STATE_DEAD;
3855 }
3856
3857 spin_unlock_bh(&ifa->lock);
3858
3859 if (rt)
3860 ip6_del_rt(net, rt, false);
3861
3862 if (state != INET6_IFADDR_STATE_DEAD) {
3863 __ipv6_ifa_notify(RTM_DELADDR, ifa);
3864 inet6addr_notifier_call_chain(NETDEV_DOWN, ifa);
3865 } else {
3866 if (idev->cnf.forwarding)
3867 addrconf_leave_anycast(ifa);
3868 addrconf_leave_solict(ifa->idev, &ifa->addr);
3869 }
3870
3871 if (!keep) {
3872 write_lock_bh(&idev->lock);
3873 list_del_rcu(&ifa->if_list);
3874 write_unlock_bh(&idev->lock);
3875 in6_ifa_put(ifa);
3876 }
3877 }
3878
3879 /* Step 5: Discard anycast and multicast list */
3880 if (unregister) {
3881 ipv6_ac_destroy_dev(idev);
3882 ipv6_mc_destroy_dev(idev);
3883 } else if (was_ready) {
3884 ipv6_mc_down(idev);
3885 }
3886
3887 idev->tstamp = jiffies;
3888
3889 /* Last: Shot the device (if unregistered) */
3890 if (unregister) {
3891 addrconf_sysctl_unregister(idev);
3892 neigh_parms_release(&nd_tbl, idev->nd_parms);
3893 neigh_ifdown(&nd_tbl, dev);
3894 in6_dev_put(idev);
3895 }
3896 return 0;
3897 }
3898
addrconf_rs_timer(struct timer_list *t)3899 static void addrconf_rs_timer(struct timer_list *t)
3900 {
3901 struct inet6_dev *idev = from_timer(idev, t, rs_timer);
3902 struct net_device *dev = idev->dev;
3903 struct in6_addr lladdr;
3904
3905 write_lock(&idev->lock);
3906 if (idev->dead || !(idev->if_flags & IF_READY))
3907 goto out;
3908
3909 if (!ipv6_accept_ra(idev))
3910 goto out;
3911
3912 /* Announcement received after solicitation was sent */
3913 if (idev->if_flags & IF_RA_RCVD)
3914 goto out;
3915
3916 if (idev->rs_probes++ < idev->cnf.rtr_solicits || idev->cnf.rtr_solicits < 0) {
3917 write_unlock(&idev->lock);
3918 if (!ipv6_get_lladdr(dev, &lladdr, IFA_F_TENTATIVE))
3919 ndisc_send_rs(dev, &lladdr,
3920 &in6addr_linklocal_allrouters);
3921 else
3922 goto put;
3923
3924 write_lock(&idev->lock);
3925 idev->rs_interval = rfc3315_s14_backoff_update(
3926 idev->rs_interval, idev->cnf.rtr_solicit_max_interval);
3927 /* The wait after the last probe can be shorter */
3928 addrconf_mod_rs_timer(idev, (idev->rs_probes ==
3929 idev->cnf.rtr_solicits) ?
3930 idev->cnf.rtr_solicit_delay :
3931 idev->rs_interval);
3932 } else {
3933 /*
3934 * Note: we do not support deprecated "all on-link"
3935 * assumption any longer.
3936 */
3937 pr_debug("%s: no IPv6 routers present\n", idev->dev->name);
3938 }
3939
3940 out:
3941 write_unlock(&idev->lock);
3942 put:
3943 in6_dev_put(idev);
3944 }
3945
3946 /*
3947 * Duplicate Address Detection
3948 */
addrconf_dad_kick(struct inet6_ifaddr *ifp)3949 static void addrconf_dad_kick(struct inet6_ifaddr *ifp)
3950 {
3951 unsigned long rand_num;
3952 struct inet6_dev *idev = ifp->idev;
3953 u64 nonce;
3954
3955 if (ifp->flags & IFA_F_OPTIMISTIC)
3956 rand_num = 0;
3957 else
3958 rand_num = prandom_u32() % (idev->cnf.rtr_solicit_delay ? : 1);
3959
3960 nonce = 0;
3961 if (idev->cnf.enhanced_dad ||
3962 dev_net(idev->dev)->ipv6.devconf_all->enhanced_dad) {
3963 do
3964 get_random_bytes(&nonce, 6);
3965 while (nonce == 0);
3966 }
3967 ifp->dad_nonce = nonce;
3968 ifp->dad_probes = idev->cnf.dad_transmits;
3969 addrconf_mod_dad_work(ifp, rand_num);
3970 }
3971
addrconf_dad_begin(struct inet6_ifaddr *ifp)3972 static void addrconf_dad_begin(struct inet6_ifaddr *ifp)
3973 {
3974 struct inet6_dev *idev = ifp->idev;
3975 struct net_device *dev = idev->dev;
3976 bool bump_id, notify = false;
3977 struct net *net;
3978
3979 addrconf_join_solict(dev, &ifp->addr);
3980
3981 prandom_seed((__force u32) ifp->addr.s6_addr32[3]);
3982
3983 read_lock_bh(&idev->lock);
3984 spin_lock(&ifp->lock);
3985 if (ifp->state == INET6_IFADDR_STATE_DEAD)
3986 goto out;
3987
3988 net = dev_net(dev);
3989 if (dev->flags&(IFF_NOARP|IFF_LOOPBACK) ||
3990 (net->ipv6.devconf_all->accept_dad < 1 &&
3991 idev->cnf.accept_dad < 1) ||
3992 !(ifp->flags&IFA_F_TENTATIVE) ||
3993 ifp->flags & IFA_F_NODAD) {
3994 bool send_na = false;
3995
3996 if (ifp->flags & IFA_F_TENTATIVE &&
3997 !(ifp->flags & IFA_F_OPTIMISTIC))
3998 send_na = true;
3999 bump_id = ifp->flags & IFA_F_TENTATIVE;
4000 ifp->flags &= ~(IFA_F_TENTATIVE|IFA_F_OPTIMISTIC|IFA_F_DADFAILED);
4001 spin_unlock(&ifp->lock);
4002 read_unlock_bh(&idev->lock);
4003
4004 addrconf_dad_completed(ifp, bump_id, send_na);
4005 return;
4006 }
4007
4008 if (!(idev->if_flags & IF_READY)) {
4009 spin_unlock(&ifp->lock);
4010 read_unlock_bh(&idev->lock);
4011 /*
4012 * If the device is not ready:
4013 * - keep it tentative if it is a permanent address.
4014 * - otherwise, kill it.
4015 */
4016 in6_ifa_hold(ifp);
4017 addrconf_dad_stop(ifp, 0);
4018 return;
4019 }
4020
4021 /*
4022 * Optimistic nodes can start receiving
4023 * Frames right away
4024 */
4025 if (ifp->flags & IFA_F_OPTIMISTIC) {
4026 ip6_ins_rt(net, ifp->rt);
4027 if (ipv6_use_optimistic_addr(net, idev)) {
4028 /* Because optimistic nodes can use this address,
4029 * notify listeners. If DAD fails, RTM_DELADDR is sent.
4030 */
4031 notify = true;
4032 }
4033 }
4034
4035 addrconf_dad_kick(ifp);
4036 out:
4037 spin_unlock(&ifp->lock);
4038 read_unlock_bh(&idev->lock);
4039 if (notify)
4040 ipv6_ifa_notify(RTM_NEWADDR, ifp);
4041 }
4042
addrconf_dad_start(struct inet6_ifaddr *ifp)4043 static void addrconf_dad_start(struct inet6_ifaddr *ifp)
4044 {
4045 bool begin_dad = false;
4046
4047 spin_lock_bh(&ifp->lock);
4048 if (ifp->state != INET6_IFADDR_STATE_DEAD) {
4049 ifp->state = INET6_IFADDR_STATE_PREDAD;
4050 begin_dad = true;
4051 }
4052 spin_unlock_bh(&ifp->lock);
4053
4054 if (begin_dad)
4055 addrconf_mod_dad_work(ifp, 0);
4056 }
4057
addrconf_dad_work(struct work_struct *w)4058 static void addrconf_dad_work(struct work_struct *w)
4059 {
4060 struct inet6_ifaddr *ifp = container_of(to_delayed_work(w),
4061 struct inet6_ifaddr,
4062 dad_work);
4063 struct inet6_dev *idev = ifp->idev;
4064 bool bump_id, disable_ipv6 = false;
4065 struct in6_addr mcaddr;
4066
4067 enum {
4068 DAD_PROCESS,
4069 DAD_BEGIN,
4070 DAD_ABORT,
4071 } action = DAD_PROCESS;
4072
4073 rtnl_lock();
4074
4075 spin_lock_bh(&ifp->lock);
4076 if (ifp->state == INET6_IFADDR_STATE_PREDAD) {
4077 action = DAD_BEGIN;
4078 ifp->state = INET6_IFADDR_STATE_DAD;
4079 } else if (ifp->state == INET6_IFADDR_STATE_ERRDAD) {
4080 action = DAD_ABORT;
4081 ifp->state = INET6_IFADDR_STATE_POSTDAD;
4082
4083 if ((dev_net(idev->dev)->ipv6.devconf_all->accept_dad > 1 ||
4084 idev->cnf.accept_dad > 1) &&
4085 !idev->cnf.disable_ipv6 &&
4086 !(ifp->flags & IFA_F_STABLE_PRIVACY)) {
4087 struct in6_addr addr;
4088
4089 addr.s6_addr32[0] = htonl(0xfe800000);
4090 addr.s6_addr32[1] = 0;
4091
4092 if (!ipv6_generate_eui64(addr.s6_addr + 8, idev->dev) &&
4093 ipv6_addr_equal(&ifp->addr, &addr)) {
4094 /* DAD failed for link-local based on MAC */
4095 idev->cnf.disable_ipv6 = 1;
4096
4097 pr_info("%s: IPv6 being disabled!\n",
4098 ifp->idev->dev->name);
4099 disable_ipv6 = true;
4100 }
4101 }
4102 }
4103 spin_unlock_bh(&ifp->lock);
4104
4105 if (action == DAD_BEGIN) {
4106 addrconf_dad_begin(ifp);
4107 goto out;
4108 } else if (action == DAD_ABORT) {
4109 in6_ifa_hold(ifp);
4110 addrconf_dad_stop(ifp, 1);
4111 if (disable_ipv6)
4112 addrconf_ifdown(idev->dev, false);
4113 goto out;
4114 }
4115
4116 if (!ifp->dad_probes && addrconf_dad_end(ifp))
4117 goto out;
4118
4119 write_lock_bh(&idev->lock);
4120 if (idev->dead || !(idev->if_flags & IF_READY)) {
4121 write_unlock_bh(&idev->lock);
4122 goto out;
4123 }
4124
4125 spin_lock(&ifp->lock);
4126 if (ifp->state == INET6_IFADDR_STATE_DEAD) {
4127 spin_unlock(&ifp->lock);
4128 write_unlock_bh(&idev->lock);
4129 goto out;
4130 }
4131
4132 if (ifp->dad_probes == 0) {
4133 bool send_na = false;
4134
4135 /*
4136 * DAD was successful
4137 */
4138
4139 if (ifp->flags & IFA_F_TENTATIVE &&
4140 !(ifp->flags & IFA_F_OPTIMISTIC))
4141 send_na = true;
4142 bump_id = ifp->flags & IFA_F_TENTATIVE;
4143 ifp->flags &= ~(IFA_F_TENTATIVE|IFA_F_OPTIMISTIC|IFA_F_DADFAILED);
4144 spin_unlock(&ifp->lock);
4145 write_unlock_bh(&idev->lock);
4146
4147 addrconf_dad_completed(ifp, bump_id, send_na);
4148
4149 goto out;
4150 }
4151
4152 ifp->dad_probes--;
4153 addrconf_mod_dad_work(ifp,
4154 max(NEIGH_VAR(ifp->idev->nd_parms, RETRANS_TIME),
4155 HZ/100));
4156 spin_unlock(&ifp->lock);
4157 write_unlock_bh(&idev->lock);
4158
4159 /* send a neighbour solicitation for our addr */
4160 addrconf_addr_solict_mult(&ifp->addr, &mcaddr);
4161 ndisc_send_ns(ifp->idev->dev, &ifp->addr, &mcaddr, &in6addr_any,
4162 ifp->dad_nonce);
4163 out:
4164 in6_ifa_put(ifp);
4165 rtnl_unlock();
4166 }
4167
4168 /* ifp->idev must be at least read locked */
ipv6_lonely_lladdr(struct inet6_ifaddr *ifp)4169 static bool ipv6_lonely_lladdr(struct inet6_ifaddr *ifp)
4170 {
4171 struct inet6_ifaddr *ifpiter;
4172 struct inet6_dev *idev = ifp->idev;
4173
4174 list_for_each_entry_reverse(ifpiter, &idev->addr_list, if_list) {
4175 if (ifpiter->scope > IFA_LINK)
4176 break;
4177 if (ifp != ifpiter && ifpiter->scope == IFA_LINK &&
4178 (ifpiter->flags & (IFA_F_PERMANENT|IFA_F_TENTATIVE|
4179 IFA_F_OPTIMISTIC|IFA_F_DADFAILED)) ==
4180 IFA_F_PERMANENT)
4181 return false;
4182 }
4183 return true;
4184 }
4185
addrconf_dad_completed(struct inet6_ifaddr *ifp, bool bump_id, bool send_na)4186 static void addrconf_dad_completed(struct inet6_ifaddr *ifp, bool bump_id,
4187 bool send_na)
4188 {
4189 struct net_device *dev = ifp->idev->dev;
4190 struct in6_addr lladdr;
4191 bool send_rs, send_mld;
4192
4193 addrconf_del_dad_work(ifp);
4194
4195 /*
4196 * Configure the address for reception. Now it is valid.
4197 */
4198
4199 ipv6_ifa_notify(RTM_NEWADDR, ifp);
4200
4201 /* If added prefix is link local and we are prepared to process
4202 router advertisements, start sending router solicitations.
4203 */
4204
4205 read_lock_bh(&ifp->idev->lock);
4206 send_mld = ifp->scope == IFA_LINK && ipv6_lonely_lladdr(ifp);
4207 send_rs = send_mld &&
4208 ipv6_accept_ra(ifp->idev) &&
4209 ifp->idev->cnf.rtr_solicits != 0 &&
4210 (dev->flags & IFF_LOOPBACK) == 0 &&
4211 (dev->type != ARPHRD_TUNNEL);
4212 read_unlock_bh(&ifp->idev->lock);
4213
4214 /* While dad is in progress mld report's source address is in6_addrany.
4215 * Resend with proper ll now.
4216 */
4217 if (send_mld)
4218 ipv6_mc_dad_complete(ifp->idev);
4219
4220 /* send unsolicited NA if enabled */
4221 if (send_na &&
4222 (ifp->idev->cnf.ndisc_notify ||
4223 dev_net(dev)->ipv6.devconf_all->ndisc_notify)) {
4224 ndisc_send_na(dev, &in6addr_linklocal_allnodes, &ifp->addr,
4225 /*router=*/ !!ifp->idev->cnf.forwarding,
4226 /*solicited=*/ false, /*override=*/ true,
4227 /*inc_opt=*/ true);
4228 }
4229
4230 if (send_rs) {
4231 /*
4232 * If a host as already performed a random delay
4233 * [...] as part of DAD [...] there is no need
4234 * to delay again before sending the first RS
4235 */
4236 if (ipv6_get_lladdr(dev, &lladdr, IFA_F_TENTATIVE))
4237 return;
4238 ndisc_send_rs(dev, &lladdr, &in6addr_linklocal_allrouters);
4239
4240 write_lock_bh(&ifp->idev->lock);
4241 spin_lock(&ifp->lock);
4242 ifp->idev->rs_interval = rfc3315_s14_backoff_init(
4243 ifp->idev->cnf.rtr_solicit_interval);
4244 ifp->idev->rs_probes = 1;
4245 ifp->idev->if_flags |= IF_RS_SENT;
4246 addrconf_mod_rs_timer(ifp->idev, ifp->idev->rs_interval);
4247 spin_unlock(&ifp->lock);
4248 write_unlock_bh(&ifp->idev->lock);
4249 }
4250
4251 if (bump_id)
4252 rt_genid_bump_ipv6(dev_net(dev));
4253
4254 /* Make sure that a new temporary address will be created
4255 * before this temporary address becomes deprecated.
4256 */
4257 if (ifp->flags & IFA_F_TEMPORARY)
4258 addrconf_verify_rtnl();
4259 }
4260
addrconf_dad_run(struct inet6_dev *idev, bool restart)4261 static void addrconf_dad_run(struct inet6_dev *idev, bool restart)
4262 {
4263 struct inet6_ifaddr *ifp;
4264
4265 read_lock_bh(&idev->lock);
4266 list_for_each_entry(ifp, &idev->addr_list, if_list) {
4267 spin_lock(&ifp->lock);
4268 if ((ifp->flags & IFA_F_TENTATIVE &&
4269 ifp->state == INET6_IFADDR_STATE_DAD) || restart) {
4270 if (restart)
4271 ifp->state = INET6_IFADDR_STATE_PREDAD;
4272 addrconf_dad_kick(ifp);
4273 }
4274 spin_unlock(&ifp->lock);
4275 }
4276 read_unlock_bh(&idev->lock);
4277 }
4278
4279 #ifdef CONFIG_PROC_FS
4280 struct if6_iter_state {
4281 struct seq_net_private p;
4282 int bucket;
4283 int offset;
4284 };
4285
if6_get_first(struct seq_file *seq, loff_t pos)4286 static struct inet6_ifaddr *if6_get_first(struct seq_file *seq, loff_t pos)
4287 {
4288 struct if6_iter_state *state = seq->private;
4289 struct net *net = seq_file_net(seq);
4290 struct inet6_ifaddr *ifa = NULL;
4291 int p = 0;
4292
4293 /* initial bucket if pos is 0 */
4294 if (pos == 0) {
4295 state->bucket = 0;
4296 state->offset = 0;
4297 }
4298
4299 for (; state->bucket < IN6_ADDR_HSIZE; ++state->bucket) {
4300 hlist_for_each_entry_rcu(ifa, &inet6_addr_lst[state->bucket],
4301 addr_lst) {
4302 if (!net_eq(dev_net(ifa->idev->dev), net))
4303 continue;
4304 /* sync with offset */
4305 if (p < state->offset) {
4306 p++;
4307 continue;
4308 }
4309 return ifa;
4310 }
4311
4312 /* prepare for next bucket */
4313 state->offset = 0;
4314 p = 0;
4315 }
4316 return NULL;
4317 }
4318
if6_get_next(struct seq_file *seq, struct inet6_ifaddr *ifa)4319 static struct inet6_ifaddr *if6_get_next(struct seq_file *seq,
4320 struct inet6_ifaddr *ifa)
4321 {
4322 struct if6_iter_state *state = seq->private;
4323 struct net *net = seq_file_net(seq);
4324
4325 hlist_for_each_entry_continue_rcu(ifa, addr_lst) {
4326 if (!net_eq(dev_net(ifa->idev->dev), net))
4327 continue;
4328 state->offset++;
4329 return ifa;
4330 }
4331
4332 state->offset = 0;
4333 while (++state->bucket < IN6_ADDR_HSIZE) {
4334 hlist_for_each_entry_rcu(ifa,
4335 &inet6_addr_lst[state->bucket], addr_lst) {
4336 if (!net_eq(dev_net(ifa->idev->dev), net))
4337 continue;
4338 return ifa;
4339 }
4340 }
4341
4342 return NULL;
4343 }
4344
4345 static void *if6_seq_start(struct seq_file *seq, loff_t *pos)
__acquiresnull4346 __acquires(rcu)
4347 {
4348 rcu_read_lock();
4349 return if6_get_first(seq, *pos);
4350 }
4351
if6_seq_next(struct seq_file *seq, void *v, loff_t *pos)4352 static void *if6_seq_next(struct seq_file *seq, void *v, loff_t *pos)
4353 {
4354 struct inet6_ifaddr *ifa;
4355
4356 ifa = if6_get_next(seq, v);
4357 ++*pos;
4358 return ifa;
4359 }
4360
4361 static void if6_seq_stop(struct seq_file *seq, void *v)
__releasesnull4362 __releases(rcu)
4363 {
4364 rcu_read_unlock();
4365 }
4366
if6_seq_show(struct seq_file *seq, void *v)4367 static int if6_seq_show(struct seq_file *seq, void *v)
4368 {
4369 struct inet6_ifaddr *ifp = (struct inet6_ifaddr *)v;
4370 seq_printf(seq, "%pi6 %02x %02x %02x %02x %8s\n",
4371 &ifp->addr,
4372 ifp->idev->dev->ifindex,
4373 ifp->prefix_len,
4374 ifp->scope,
4375 (u8) ifp->flags,
4376 ifp->idev->dev->name);
4377 return 0;
4378 }
4379
4380 static const struct seq_operations if6_seq_ops = {
4381 .start = if6_seq_start,
4382 .next = if6_seq_next,
4383 .show = if6_seq_show,
4384 .stop = if6_seq_stop,
4385 };
4386
if6_proc_net_init(struct net *net)4387 static int __net_init if6_proc_net_init(struct net *net)
4388 {
4389 if (!proc_create_net("if_inet6", 0444, net->proc_net, &if6_seq_ops,
4390 sizeof(struct if6_iter_state)))
4391 return -ENOMEM;
4392 return 0;
4393 }
4394
if6_proc_net_exit(struct net *net)4395 static void __net_exit if6_proc_net_exit(struct net *net)
4396 {
4397 remove_proc_entry("if_inet6", net->proc_net);
4398 }
4399
4400 static struct pernet_operations if6_proc_net_ops = {
4401 .init = if6_proc_net_init,
4402 .exit = if6_proc_net_exit,
4403 };
4404
if6_proc_init(void)4405 int __init if6_proc_init(void)
4406 {
4407 return register_pernet_subsys(&if6_proc_net_ops);
4408 }
4409
if6_proc_exit(void)4410 void if6_proc_exit(void)
4411 {
4412 unregister_pernet_subsys(&if6_proc_net_ops);
4413 }
4414 #endif /* CONFIG_PROC_FS */
4415
4416 #if IS_ENABLED(CONFIG_IPV6_MIP6)
4417 /* Check if address is a home address configured on any interface. */
ipv6_chk_home_addr(struct net *net, const struct in6_addr *addr)4418 int ipv6_chk_home_addr(struct net *net, const struct in6_addr *addr)
4419 {
4420 unsigned int hash = inet6_addr_hash(net, addr);
4421 struct inet6_ifaddr *ifp = NULL;
4422 int ret = 0;
4423
4424 rcu_read_lock();
4425 hlist_for_each_entry_rcu(ifp, &inet6_addr_lst[hash], addr_lst) {
4426 if (!net_eq(dev_net(ifp->idev->dev), net))
4427 continue;
4428 if (ipv6_addr_equal(&ifp->addr, addr) &&
4429 (ifp->flags & IFA_F_HOMEADDRESS)) {
4430 ret = 1;
4431 break;
4432 }
4433 }
4434 rcu_read_unlock();
4435 return ret;
4436 }
4437 #endif
4438
4439 /* RFC6554 has some algorithm to avoid loops in segment routing by
4440 * checking if the segments contains any of a local interface address.
4441 *
4442 * Quote:
4443 *
4444 * To detect loops in the SRH, a router MUST determine if the SRH
4445 * includes multiple addresses assigned to any interface on that router.
4446 * If such addresses appear more than once and are separated by at least
4447 * one address not assigned to that router.
4448 */
ipv6_chk_rpl_srh_loop(struct net *net, const struct in6_addr *segs, unsigned char nsegs)4449 int ipv6_chk_rpl_srh_loop(struct net *net, const struct in6_addr *segs,
4450 unsigned char nsegs)
4451 {
4452 const struct in6_addr *addr;
4453 int i, ret = 0, found = 0;
4454 struct inet6_ifaddr *ifp;
4455 bool separated = false;
4456 unsigned int hash;
4457 bool hash_found;
4458
4459 rcu_read_lock();
4460 for (i = 0; i < nsegs; i++) {
4461 addr = &segs[i];
4462 hash = inet6_addr_hash(net, addr);
4463
4464 hash_found = false;
4465 hlist_for_each_entry_rcu(ifp, &inet6_addr_lst[hash], addr_lst) {
4466 if (!net_eq(dev_net(ifp->idev->dev), net))
4467 continue;
4468
4469 if (ipv6_addr_equal(&ifp->addr, addr)) {
4470 hash_found = true;
4471 break;
4472 }
4473 }
4474
4475 if (hash_found) {
4476 if (found > 1 && separated) {
4477 ret = 1;
4478 break;
4479 }
4480
4481 separated = false;
4482 found++;
4483 } else {
4484 separated = true;
4485 }
4486 }
4487 rcu_read_unlock();
4488
4489 return ret;
4490 }
4491
4492 /*
4493 * Periodic address status verification
4494 */
4495
addrconf_verify_rtnl(void)4496 static void addrconf_verify_rtnl(void)
4497 {
4498 unsigned long now, next, next_sec, next_sched;
4499 struct inet6_ifaddr *ifp;
4500 int i;
4501
4502 ASSERT_RTNL();
4503
4504 rcu_read_lock_bh();
4505 now = jiffies;
4506 next = round_jiffies_up(now + ADDR_CHECK_FREQUENCY);
4507
4508 cancel_delayed_work(&addr_chk_work);
4509
4510 for (i = 0; i < IN6_ADDR_HSIZE; i++) {
4511 restart:
4512 hlist_for_each_entry_rcu_bh(ifp, &inet6_addr_lst[i], addr_lst) {
4513 unsigned long age;
4514
4515 /* When setting preferred_lft to a value not zero or
4516 * infinity, while valid_lft is infinity
4517 * IFA_F_PERMANENT has a non-infinity life time.
4518 */
4519 if ((ifp->flags & IFA_F_PERMANENT) &&
4520 (ifp->prefered_lft == INFINITY_LIFE_TIME))
4521 continue;
4522
4523 spin_lock(&ifp->lock);
4524 /* We try to batch several events at once. */
4525 age = (now - ifp->tstamp + ADDRCONF_TIMER_FUZZ_MINUS) / HZ;
4526
4527 if (ifp->valid_lft != INFINITY_LIFE_TIME &&
4528 age >= ifp->valid_lft) {
4529 spin_unlock(&ifp->lock);
4530 in6_ifa_hold(ifp);
4531 ipv6_del_addr(ifp);
4532 goto restart;
4533 } else if (ifp->prefered_lft == INFINITY_LIFE_TIME) {
4534 spin_unlock(&ifp->lock);
4535 continue;
4536 } else if (age >= ifp->prefered_lft) {
4537 /* jiffies - ifp->tstamp > age >= ifp->prefered_lft */
4538 int deprecate = 0;
4539
4540 if (!(ifp->flags&IFA_F_DEPRECATED)) {
4541 deprecate = 1;
4542 ifp->flags |= IFA_F_DEPRECATED;
4543 }
4544
4545 if ((ifp->valid_lft != INFINITY_LIFE_TIME) &&
4546 (time_before(ifp->tstamp + ifp->valid_lft * HZ, next)))
4547 next = ifp->tstamp + ifp->valid_lft * HZ;
4548
4549 spin_unlock(&ifp->lock);
4550
4551 if (deprecate) {
4552 in6_ifa_hold(ifp);
4553
4554 ipv6_ifa_notify(0, ifp);
4555 in6_ifa_put(ifp);
4556 goto restart;
4557 }
4558 } else if ((ifp->flags&IFA_F_TEMPORARY) &&
4559 !(ifp->flags&IFA_F_TENTATIVE)) {
4560 unsigned long regen_advance = ifp->idev->cnf.regen_max_retry *
4561 ifp->idev->cnf.dad_transmits *
4562 max(NEIGH_VAR(ifp->idev->nd_parms, RETRANS_TIME), HZ/100) / HZ;
4563
4564 if (age >= ifp->prefered_lft - regen_advance) {
4565 struct inet6_ifaddr *ifpub = ifp->ifpub;
4566 if (time_before(ifp->tstamp + ifp->prefered_lft * HZ, next))
4567 next = ifp->tstamp + ifp->prefered_lft * HZ;
4568 if (!ifp->regen_count && ifpub) {
4569 ifp->regen_count++;
4570 in6_ifa_hold(ifp);
4571 in6_ifa_hold(ifpub);
4572 spin_unlock(&ifp->lock);
4573
4574 spin_lock(&ifpub->lock);
4575 ifpub->regen_count = 0;
4576 spin_unlock(&ifpub->lock);
4577 rcu_read_unlock_bh();
4578 ipv6_create_tempaddr(ifpub, true);
4579 in6_ifa_put(ifpub);
4580 in6_ifa_put(ifp);
4581 rcu_read_lock_bh();
4582 goto restart;
4583 }
4584 } else if (time_before(ifp->tstamp + ifp->prefered_lft * HZ - regen_advance * HZ, next))
4585 next = ifp->tstamp + ifp->prefered_lft * HZ - regen_advance * HZ;
4586 spin_unlock(&ifp->lock);
4587 } else {
4588 /* ifp->prefered_lft <= ifp->valid_lft */
4589 if (time_before(ifp->tstamp + ifp->prefered_lft * HZ, next))
4590 next = ifp->tstamp + ifp->prefered_lft * HZ;
4591 spin_unlock(&ifp->lock);
4592 }
4593 }
4594 }
4595
4596 next_sec = round_jiffies_up(next);
4597 next_sched = next;
4598
4599 /* If rounded timeout is accurate enough, accept it. */
4600 if (time_before(next_sec, next + ADDRCONF_TIMER_FUZZ))
4601 next_sched = next_sec;
4602
4603 /* And minimum interval is ADDRCONF_TIMER_FUZZ_MAX. */
4604 if (time_before(next_sched, jiffies + ADDRCONF_TIMER_FUZZ_MAX))
4605 next_sched = jiffies + ADDRCONF_TIMER_FUZZ_MAX;
4606
4607 pr_debug("now = %lu, schedule = %lu, rounded schedule = %lu => %lu\n",
4608 now, next, next_sec, next_sched);
4609 mod_delayed_work(addrconf_wq, &addr_chk_work, next_sched - now);
4610 rcu_read_unlock_bh();
4611 }
4612
addrconf_verify_work(struct work_struct *w)4613 static void addrconf_verify_work(struct work_struct *w)
4614 {
4615 rtnl_lock();
4616 addrconf_verify_rtnl();
4617 rtnl_unlock();
4618 }
4619
addrconf_verify(void)4620 static void addrconf_verify(void)
4621 {
4622 mod_delayed_work(addrconf_wq, &addr_chk_work, 0);
4623 }
4624
extract_addr(struct nlattr *addr, struct nlattr *local, struct in6_addr **peer_pfx)4625 static struct in6_addr *extract_addr(struct nlattr *addr, struct nlattr *local,
4626 struct in6_addr **peer_pfx)
4627 {
4628 struct in6_addr *pfx = NULL;
4629
4630 *peer_pfx = NULL;
4631
4632 if (addr)
4633 pfx = nla_data(addr);
4634
4635 if (local) {
4636 if (pfx && nla_memcmp(local, pfx, sizeof(*pfx)))
4637 *peer_pfx = pfx;
4638 pfx = nla_data(local);
4639 }
4640
4641 return pfx;
4642 }
4643
4644 static const struct nla_policy ifa_ipv6_policy[IFA_MAX+1] = {
4645 [IFA_ADDRESS] = { .len = sizeof(struct in6_addr) },
4646 [IFA_LOCAL] = { .len = sizeof(struct in6_addr) },
4647 [IFA_CACHEINFO] = { .len = sizeof(struct ifa_cacheinfo) },
4648 [IFA_FLAGS] = { .len = sizeof(u32) },
4649 [IFA_RT_PRIORITY] = { .len = sizeof(u32) },
4650 [IFA_TARGET_NETNSID] = { .type = NLA_S32 },
4651 };
4652
4653 static int
inet6_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh, struct netlink_ext_ack *extack)4654 inet6_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh,
4655 struct netlink_ext_ack *extack)
4656 {
4657 struct net *net = sock_net(skb->sk);
4658 struct ifaddrmsg *ifm;
4659 struct nlattr *tb[IFA_MAX+1];
4660 struct in6_addr *pfx, *peer_pfx;
4661 u32 ifa_flags;
4662 int err;
4663
4664 err = nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFA_MAX,
4665 ifa_ipv6_policy, extack);
4666 if (err < 0)
4667 return err;
4668
4669 ifm = nlmsg_data(nlh);
4670 pfx = extract_addr(tb[IFA_ADDRESS], tb[IFA_LOCAL], &peer_pfx);
4671 if (!pfx)
4672 return -EINVAL;
4673
4674 ifa_flags = tb[IFA_FLAGS] ? nla_get_u32(tb[IFA_FLAGS]) : ifm->ifa_flags;
4675
4676 /* We ignore other flags so far. */
4677 ifa_flags &= IFA_F_MANAGETEMPADDR;
4678
4679 return inet6_addr_del(net, ifm->ifa_index, ifa_flags, pfx,
4680 ifm->ifa_prefixlen);
4681 }
4682
modify_prefix_route(struct inet6_ifaddr *ifp, unsigned long expires, u32 flags, bool modify_peer)4683 static int modify_prefix_route(struct inet6_ifaddr *ifp,
4684 unsigned long expires, u32 flags,
4685 bool modify_peer)
4686 {
4687 struct fib6_info *f6i;
4688 u32 prio;
4689
4690 f6i = addrconf_get_prefix_route(modify_peer ? &ifp->peer_addr : &ifp->addr,
4691 ifp->prefix_len,
4692 ifp->idev->dev, 0, RTF_DEFAULT, true);
4693 if (!f6i)
4694 return -ENOENT;
4695
4696 prio = ifp->rt_priority ? : IP6_RT_PRIO_ADDRCONF;
4697 if (f6i->fib6_metric != prio) {
4698 /* delete old one */
4699 ip6_del_rt(dev_net(ifp->idev->dev), f6i, false);
4700
4701 /* add new one */
4702 addrconf_prefix_route(modify_peer ? &ifp->peer_addr : &ifp->addr,
4703 ifp->prefix_len,
4704 ifp->rt_priority, ifp->idev->dev,
4705 expires, flags, GFP_KERNEL);
4706 } else {
4707 if (!expires)
4708 fib6_clean_expires(f6i);
4709 else
4710 fib6_set_expires(f6i, expires);
4711
4712 fib6_info_release(f6i);
4713 }
4714
4715 return 0;
4716 }
4717
inet6_addr_modify(struct inet6_ifaddr *ifp, struct ifa6_config *cfg)4718 static int inet6_addr_modify(struct inet6_ifaddr *ifp, struct ifa6_config *cfg)
4719 {
4720 u32 flags;
4721 clock_t expires;
4722 unsigned long timeout;
4723 bool was_managetempaddr;
4724 bool had_prefixroute;
4725 bool new_peer = false;
4726
4727 ASSERT_RTNL();
4728
4729 if (!cfg->valid_lft || cfg->preferred_lft > cfg->valid_lft)
4730 return -EINVAL;
4731
4732 if (cfg->ifa_flags & IFA_F_MANAGETEMPADDR &&
4733 (ifp->flags & IFA_F_TEMPORARY || ifp->prefix_len != 64))
4734 return -EINVAL;
4735
4736 if (!(ifp->flags & IFA_F_TENTATIVE) || ifp->flags & IFA_F_DADFAILED)
4737 cfg->ifa_flags &= ~IFA_F_OPTIMISTIC;
4738
4739 timeout = addrconf_timeout_fixup(cfg->valid_lft, HZ);
4740 if (addrconf_finite_timeout(timeout)) {
4741 expires = jiffies_to_clock_t(timeout * HZ);
4742 cfg->valid_lft = timeout;
4743 flags = RTF_EXPIRES;
4744 } else {
4745 expires = 0;
4746 flags = 0;
4747 cfg->ifa_flags |= IFA_F_PERMANENT;
4748 }
4749
4750 timeout = addrconf_timeout_fixup(cfg->preferred_lft, HZ);
4751 if (addrconf_finite_timeout(timeout)) {
4752 if (timeout == 0)
4753 cfg->ifa_flags |= IFA_F_DEPRECATED;
4754 cfg->preferred_lft = timeout;
4755 }
4756
4757 if (cfg->peer_pfx &&
4758 memcmp(&ifp->peer_addr, cfg->peer_pfx, sizeof(struct in6_addr))) {
4759 if (!ipv6_addr_any(&ifp->peer_addr))
4760 cleanup_prefix_route(ifp, expires, true, true);
4761 new_peer = true;
4762 }
4763
4764 spin_lock_bh(&ifp->lock);
4765 was_managetempaddr = ifp->flags & IFA_F_MANAGETEMPADDR;
4766 had_prefixroute = ifp->flags & IFA_F_PERMANENT &&
4767 !(ifp->flags & IFA_F_NOPREFIXROUTE);
4768 ifp->flags &= ~(IFA_F_DEPRECATED | IFA_F_PERMANENT | IFA_F_NODAD |
4769 IFA_F_HOMEADDRESS | IFA_F_MANAGETEMPADDR |
4770 IFA_F_NOPREFIXROUTE);
4771 ifp->flags |= cfg->ifa_flags;
4772 ifp->tstamp = jiffies;
4773 ifp->valid_lft = cfg->valid_lft;
4774 ifp->prefered_lft = cfg->preferred_lft;
4775
4776 if (cfg->rt_priority && cfg->rt_priority != ifp->rt_priority)
4777 ifp->rt_priority = cfg->rt_priority;
4778
4779 if (new_peer)
4780 ifp->peer_addr = *cfg->peer_pfx;
4781
4782 spin_unlock_bh(&ifp->lock);
4783 if (!(ifp->flags&IFA_F_TENTATIVE))
4784 ipv6_ifa_notify(0, ifp);
4785
4786 if (!(cfg->ifa_flags & IFA_F_NOPREFIXROUTE)) {
4787 int rc = -ENOENT;
4788
4789 if (had_prefixroute)
4790 rc = modify_prefix_route(ifp, expires, flags, false);
4791
4792 /* prefix route could have been deleted; if so restore it */
4793 if (rc == -ENOENT) {
4794 addrconf_prefix_route(&ifp->addr, ifp->prefix_len,
4795 ifp->rt_priority, ifp->idev->dev,
4796 expires, flags, GFP_KERNEL);
4797 }
4798
4799 if (had_prefixroute && !ipv6_addr_any(&ifp->peer_addr))
4800 rc = modify_prefix_route(ifp, expires, flags, true);
4801
4802 if (rc == -ENOENT && !ipv6_addr_any(&ifp->peer_addr)) {
4803 addrconf_prefix_route(&ifp->peer_addr, ifp->prefix_len,
4804 ifp->rt_priority, ifp->idev->dev,
4805 expires, flags, GFP_KERNEL);
4806 }
4807 } else if (had_prefixroute) {
4808 enum cleanup_prefix_rt_t action;
4809 unsigned long rt_expires;
4810
4811 write_lock_bh(&ifp->idev->lock);
4812 action = check_cleanup_prefix_route(ifp, &rt_expires);
4813 write_unlock_bh(&ifp->idev->lock);
4814
4815 if (action != CLEANUP_PREFIX_RT_NOP) {
4816 cleanup_prefix_route(ifp, rt_expires,
4817 action == CLEANUP_PREFIX_RT_DEL, false);
4818 }
4819 }
4820
4821 if (was_managetempaddr || ifp->flags & IFA_F_MANAGETEMPADDR) {
4822 if (was_managetempaddr &&
4823 !(ifp->flags & IFA_F_MANAGETEMPADDR)) {
4824 cfg->valid_lft = 0;
4825 cfg->preferred_lft = 0;
4826 }
4827 manage_tempaddrs(ifp->idev, ifp, cfg->valid_lft,
4828 cfg->preferred_lft, !was_managetempaddr,
4829 jiffies);
4830 }
4831
4832 addrconf_verify_rtnl();
4833
4834 return 0;
4835 }
4836
4837 static int
inet6_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh, struct netlink_ext_ack *extack)4838 inet6_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh,
4839 struct netlink_ext_ack *extack)
4840 {
4841 struct net *net = sock_net(skb->sk);
4842 struct ifaddrmsg *ifm;
4843 struct nlattr *tb[IFA_MAX+1];
4844 struct in6_addr *peer_pfx;
4845 struct inet6_ifaddr *ifa;
4846 struct net_device *dev;
4847 struct inet6_dev *idev;
4848 struct ifa6_config cfg;
4849 int err;
4850
4851 err = nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFA_MAX,
4852 ifa_ipv6_policy, extack);
4853 if (err < 0)
4854 return err;
4855
4856 memset(&cfg, 0, sizeof(cfg));
4857
4858 ifm = nlmsg_data(nlh);
4859 cfg.pfx = extract_addr(tb[IFA_ADDRESS], tb[IFA_LOCAL], &peer_pfx);
4860 if (!cfg.pfx)
4861 return -EINVAL;
4862
4863 cfg.peer_pfx = peer_pfx;
4864 cfg.plen = ifm->ifa_prefixlen;
4865 if (tb[IFA_RT_PRIORITY])
4866 cfg.rt_priority = nla_get_u32(tb[IFA_RT_PRIORITY]);
4867
4868 cfg.valid_lft = INFINITY_LIFE_TIME;
4869 cfg.preferred_lft = INFINITY_LIFE_TIME;
4870
4871 if (tb[IFA_CACHEINFO]) {
4872 struct ifa_cacheinfo *ci;
4873
4874 ci = nla_data(tb[IFA_CACHEINFO]);
4875 cfg.valid_lft = ci->ifa_valid;
4876 cfg.preferred_lft = ci->ifa_prefered;
4877 }
4878
4879 dev = __dev_get_by_index(net, ifm->ifa_index);
4880 if (!dev)
4881 return -ENODEV;
4882
4883 if (tb[IFA_FLAGS])
4884 cfg.ifa_flags = nla_get_u32(tb[IFA_FLAGS]);
4885 else
4886 cfg.ifa_flags = ifm->ifa_flags;
4887
4888 /* We ignore other flags so far. */
4889 cfg.ifa_flags &= IFA_F_NODAD | IFA_F_HOMEADDRESS |
4890 IFA_F_MANAGETEMPADDR | IFA_F_NOPREFIXROUTE |
4891 IFA_F_MCAUTOJOIN | IFA_F_OPTIMISTIC;
4892
4893 idev = ipv6_find_idev(dev);
4894 if (IS_ERR(idev))
4895 return PTR_ERR(idev);
4896
4897 if (!ipv6_allow_optimistic_dad(net, idev))
4898 cfg.ifa_flags &= ~IFA_F_OPTIMISTIC;
4899
4900 if (cfg.ifa_flags & IFA_F_NODAD &&
4901 cfg.ifa_flags & IFA_F_OPTIMISTIC) {
4902 NL_SET_ERR_MSG(extack, "IFA_F_NODAD and IFA_F_OPTIMISTIC are mutually exclusive");
4903 return -EINVAL;
4904 }
4905
4906 ifa = ipv6_get_ifaddr(net, cfg.pfx, dev, 1);
4907 if (!ifa) {
4908 /*
4909 * It would be best to check for !NLM_F_CREATE here but
4910 * userspace already relies on not having to provide this.
4911 */
4912 return inet6_addr_add(net, ifm->ifa_index, &cfg, extack);
4913 }
4914
4915 if (nlh->nlmsg_flags & NLM_F_EXCL ||
4916 !(nlh->nlmsg_flags & NLM_F_REPLACE))
4917 err = -EEXIST;
4918 else
4919 err = inet6_addr_modify(ifa, &cfg);
4920
4921 in6_ifa_put(ifa);
4922
4923 return err;
4924 }
4925
put_ifaddrmsg(struct nlmsghdr *nlh, u8 prefixlen, u32 flags, u8 scope, int ifindex)4926 static void put_ifaddrmsg(struct nlmsghdr *nlh, u8 prefixlen, u32 flags,
4927 u8 scope, int ifindex)
4928 {
4929 struct ifaddrmsg *ifm;
4930
4931 ifm = nlmsg_data(nlh);
4932 ifm->ifa_family = AF_INET6;
4933 ifm->ifa_prefixlen = prefixlen;
4934 ifm->ifa_flags = flags;
4935 ifm->ifa_scope = scope;
4936 ifm->ifa_index = ifindex;
4937 }
4938
put_cacheinfo(struct sk_buff *skb, unsigned long cstamp, unsigned long tstamp, u32 preferred, u32 valid)4939 static int put_cacheinfo(struct sk_buff *skb, unsigned long cstamp,
4940 unsigned long tstamp, u32 preferred, u32 valid)
4941 {
4942 struct ifa_cacheinfo ci;
4943
4944 ci.cstamp = cstamp_delta(cstamp);
4945 ci.tstamp = cstamp_delta(tstamp);
4946 ci.ifa_prefered = preferred;
4947 ci.ifa_valid = valid;
4948
4949 return nla_put(skb, IFA_CACHEINFO, sizeof(ci), &ci);
4950 }
4951
rt_scope(int ifa_scope)4952 static inline int rt_scope(int ifa_scope)
4953 {
4954 if (ifa_scope & IFA_HOST)
4955 return RT_SCOPE_HOST;
4956 else if (ifa_scope & IFA_LINK)
4957 return RT_SCOPE_LINK;
4958 else if (ifa_scope & IFA_SITE)
4959 return RT_SCOPE_SITE;
4960 else
4961 return RT_SCOPE_UNIVERSE;
4962 }
4963
inet6_ifaddr_msgsize(void)4964 static inline int inet6_ifaddr_msgsize(void)
4965 {
4966 return NLMSG_ALIGN(sizeof(struct ifaddrmsg))
4967 + nla_total_size(16) /* IFA_LOCAL */
4968 + nla_total_size(16) /* IFA_ADDRESS */
4969 + nla_total_size(sizeof(struct ifa_cacheinfo))
4970 + nla_total_size(4) /* IFA_FLAGS */
4971 + nla_total_size(4) /* IFA_RT_PRIORITY */;
4972 }
4973
4974 enum addr_type_t {
4975 UNICAST_ADDR,
4976 MULTICAST_ADDR,
4977 ANYCAST_ADDR,
4978 };
4979
4980 struct inet6_fill_args {
4981 u32 portid;
4982 u32 seq;
4983 int event;
4984 unsigned int flags;
4985 int netnsid;
4986 int ifindex;
4987 enum addr_type_t type;
4988 };
4989
inet6_fill_ifaddr(struct sk_buff *skb, struct inet6_ifaddr *ifa, struct inet6_fill_args *args)4990 static int inet6_fill_ifaddr(struct sk_buff *skb, struct inet6_ifaddr *ifa,
4991 struct inet6_fill_args *args)
4992 {
4993 struct nlmsghdr *nlh;
4994 u32 preferred, valid;
4995
4996 nlh = nlmsg_put(skb, args->portid, args->seq, args->event,
4997 sizeof(struct ifaddrmsg), args->flags);
4998 if (!nlh)
4999 return -EMSGSIZE;
5000
5001 put_ifaddrmsg(nlh, ifa->prefix_len, ifa->flags, rt_scope(ifa->scope),
5002 ifa->idev->dev->ifindex);
5003
5004 if (args->netnsid >= 0 &&
5005 nla_put_s32(skb, IFA_TARGET_NETNSID, args->netnsid))
5006 goto error;
5007
5008 spin_lock_bh(&ifa->lock);
5009 if (!((ifa->flags&IFA_F_PERMANENT) &&
5010 (ifa->prefered_lft == INFINITY_LIFE_TIME))) {
5011 preferred = ifa->prefered_lft;
5012 valid = ifa->valid_lft;
5013 if (preferred != INFINITY_LIFE_TIME) {
5014 long tval = (jiffies - ifa->tstamp)/HZ;
5015 if (preferred > tval)
5016 preferred -= tval;
5017 else
5018 preferred = 0;
5019 if (valid != INFINITY_LIFE_TIME) {
5020 if (valid > tval)
5021 valid -= tval;
5022 else
5023 valid = 0;
5024 }
5025 }
5026 } else {
5027 preferred = INFINITY_LIFE_TIME;
5028 valid = INFINITY_LIFE_TIME;
5029 }
5030 spin_unlock_bh(&ifa->lock);
5031
5032 if (!ipv6_addr_any(&ifa->peer_addr)) {
5033 if (nla_put_in6_addr(skb, IFA_LOCAL, &ifa->addr) < 0 ||
5034 nla_put_in6_addr(skb, IFA_ADDRESS, &ifa->peer_addr) < 0)
5035 goto error;
5036 } else
5037 if (nla_put_in6_addr(skb, IFA_ADDRESS, &ifa->addr) < 0)
5038 goto error;
5039
5040 if (ifa->rt_priority &&
5041 nla_put_u32(skb, IFA_RT_PRIORITY, ifa->rt_priority))
5042 goto error;
5043
5044 if (put_cacheinfo(skb, ifa->cstamp, ifa->tstamp, preferred, valid) < 0)
5045 goto error;
5046
5047 if (nla_put_u32(skb, IFA_FLAGS, ifa->flags) < 0)
5048 goto error;
5049
5050 nlmsg_end(skb, nlh);
5051 return 0;
5052
5053 error:
5054 nlmsg_cancel(skb, nlh);
5055 return -EMSGSIZE;
5056 }
5057
inet6_fill_ifmcaddr(struct sk_buff *skb, struct ifmcaddr6 *ifmca, struct inet6_fill_args *args)5058 static int inet6_fill_ifmcaddr(struct sk_buff *skb, struct ifmcaddr6 *ifmca,
5059 struct inet6_fill_args *args)
5060 {
5061 struct nlmsghdr *nlh;
5062 u8 scope = RT_SCOPE_UNIVERSE;
5063 int ifindex = ifmca->idev->dev->ifindex;
5064
5065 if (ipv6_addr_scope(&ifmca->mca_addr) & IFA_SITE)
5066 scope = RT_SCOPE_SITE;
5067
5068 nlh = nlmsg_put(skb, args->portid, args->seq, args->event,
5069 sizeof(struct ifaddrmsg), args->flags);
5070 if (!nlh)
5071 return -EMSGSIZE;
5072
5073 if (args->netnsid >= 0 &&
5074 nla_put_s32(skb, IFA_TARGET_NETNSID, args->netnsid)) {
5075 nlmsg_cancel(skb, nlh);
5076 return -EMSGSIZE;
5077 }
5078
5079 put_ifaddrmsg(nlh, 128, IFA_F_PERMANENT, scope, ifindex);
5080 if (nla_put_in6_addr(skb, IFA_MULTICAST, &ifmca->mca_addr) < 0 ||
5081 put_cacheinfo(skb, ifmca->mca_cstamp, ifmca->mca_tstamp,
5082 INFINITY_LIFE_TIME, INFINITY_LIFE_TIME) < 0) {
5083 nlmsg_cancel(skb, nlh);
5084 return -EMSGSIZE;
5085 }
5086
5087 nlmsg_end(skb, nlh);
5088 return 0;
5089 }
5090
inet6_fill_ifacaddr(struct sk_buff *skb, struct ifacaddr6 *ifaca, struct inet6_fill_args *args)5091 static int inet6_fill_ifacaddr(struct sk_buff *skb, struct ifacaddr6 *ifaca,
5092 struct inet6_fill_args *args)
5093 {
5094 struct net_device *dev = fib6_info_nh_dev(ifaca->aca_rt);
5095 int ifindex = dev ? dev->ifindex : 1;
5096 struct nlmsghdr *nlh;
5097 u8 scope = RT_SCOPE_UNIVERSE;
5098
5099 if (ipv6_addr_scope(&ifaca->aca_addr) & IFA_SITE)
5100 scope = RT_SCOPE_SITE;
5101
5102 nlh = nlmsg_put(skb, args->portid, args->seq, args->event,
5103 sizeof(struct ifaddrmsg), args->flags);
5104 if (!nlh)
5105 return -EMSGSIZE;
5106
5107 if (args->netnsid >= 0 &&
5108 nla_put_s32(skb, IFA_TARGET_NETNSID, args->netnsid)) {
5109 nlmsg_cancel(skb, nlh);
5110 return -EMSGSIZE;
5111 }
5112
5113 put_ifaddrmsg(nlh, 128, IFA_F_PERMANENT, scope, ifindex);
5114 if (nla_put_in6_addr(skb, IFA_ANYCAST, &ifaca->aca_addr) < 0 ||
5115 put_cacheinfo(skb, ifaca->aca_cstamp, ifaca->aca_tstamp,
5116 INFINITY_LIFE_TIME, INFINITY_LIFE_TIME) < 0) {
5117 nlmsg_cancel(skb, nlh);
5118 return -EMSGSIZE;
5119 }
5120
5121 nlmsg_end(skb, nlh);
5122 return 0;
5123 }
5124
5125 /* called with rcu_read_lock() */
in6_dump_addrs(struct inet6_dev *idev, struct sk_buff *skb, struct netlink_callback *cb, int s_ip_idx, struct inet6_fill_args *fillargs)5126 static int in6_dump_addrs(struct inet6_dev *idev, struct sk_buff *skb,
5127 struct netlink_callback *cb, int s_ip_idx,
5128 struct inet6_fill_args *fillargs)
5129 {
5130 struct ifmcaddr6 *ifmca;
5131 struct ifacaddr6 *ifaca;
5132 int ip_idx = 0;
5133 int err = 1;
5134
5135 read_lock_bh(&idev->lock);
5136 switch (fillargs->type) {
5137 case UNICAST_ADDR: {
5138 struct inet6_ifaddr *ifa;
5139 fillargs->event = RTM_NEWADDR;
5140
5141 /* unicast address incl. temp addr */
5142 list_for_each_entry(ifa, &idev->addr_list, if_list) {
5143 if (ip_idx < s_ip_idx)
5144 goto next;
5145 err = inet6_fill_ifaddr(skb, ifa, fillargs);
5146 if (err < 0)
5147 break;
5148 nl_dump_check_consistent(cb, nlmsg_hdr(skb));
5149 next:
5150 ip_idx++;
5151 }
5152 break;
5153 }
5154 case MULTICAST_ADDR:
5155 fillargs->event = RTM_GETMULTICAST;
5156
5157 /* multicast address */
5158 for (ifmca = idev->mc_list; ifmca;
5159 ifmca = ifmca->next, ip_idx++) {
5160 if (ip_idx < s_ip_idx)
5161 continue;
5162 err = inet6_fill_ifmcaddr(skb, ifmca, fillargs);
5163 if (err < 0)
5164 break;
5165 }
5166 break;
5167 case ANYCAST_ADDR:
5168 fillargs->event = RTM_GETANYCAST;
5169 /* anycast address */
5170 for (ifaca = idev->ac_list; ifaca;
5171 ifaca = ifaca->aca_next, ip_idx++) {
5172 if (ip_idx < s_ip_idx)
5173 continue;
5174 err = inet6_fill_ifacaddr(skb, ifaca, fillargs);
5175 if (err < 0)
5176 break;
5177 }
5178 break;
5179 default:
5180 break;
5181 }
5182 read_unlock_bh(&idev->lock);
5183 cb->args[2] = ip_idx;
5184 return err;
5185 }
5186
inet6_valid_dump_ifaddr_req(const struct nlmsghdr *nlh, struct inet6_fill_args *fillargs, struct net **tgt_net, struct sock *sk, struct netlink_callback *cb)5187 static int inet6_valid_dump_ifaddr_req(const struct nlmsghdr *nlh,
5188 struct inet6_fill_args *fillargs,
5189 struct net **tgt_net, struct sock *sk,
5190 struct netlink_callback *cb)
5191 {
5192 struct netlink_ext_ack *extack = cb->extack;
5193 struct nlattr *tb[IFA_MAX+1];
5194 struct ifaddrmsg *ifm;
5195 int err, i;
5196
5197 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifm))) {
5198 NL_SET_ERR_MSG_MOD(extack, "Invalid header for address dump request");
5199 return -EINVAL;
5200 }
5201
5202 ifm = nlmsg_data(nlh);
5203 if (ifm->ifa_prefixlen || ifm->ifa_flags || ifm->ifa_scope) {
5204 NL_SET_ERR_MSG_MOD(extack, "Invalid values in header for address dump request");
5205 return -EINVAL;
5206 }
5207
5208 fillargs->ifindex = ifm->ifa_index;
5209 if (fillargs->ifindex) {
5210 cb->answer_flags |= NLM_F_DUMP_FILTERED;
5211 fillargs->flags |= NLM_F_DUMP_FILTERED;
5212 }
5213
5214 err = nlmsg_parse_deprecated_strict(nlh, sizeof(*ifm), tb, IFA_MAX,
5215 ifa_ipv6_policy, extack);
5216 if (err < 0)
5217 return err;
5218
5219 for (i = 0; i <= IFA_MAX; ++i) {
5220 if (!tb[i])
5221 continue;
5222
5223 if (i == IFA_TARGET_NETNSID) {
5224 struct net *net;
5225
5226 fillargs->netnsid = nla_get_s32(tb[i]);
5227 net = rtnl_get_net_ns_capable(sk, fillargs->netnsid);
5228 if (IS_ERR(net)) {
5229 fillargs->netnsid = -1;
5230 NL_SET_ERR_MSG_MOD(extack, "Invalid target network namespace id");
5231 return PTR_ERR(net);
5232 }
5233 *tgt_net = net;
5234 } else {
5235 NL_SET_ERR_MSG_MOD(extack, "Unsupported attribute in dump request");
5236 return -EINVAL;
5237 }
5238 }
5239
5240 return 0;
5241 }
5242
inet6_dump_addr(struct sk_buff *skb, struct netlink_callback *cb, enum addr_type_t type)5243 static int inet6_dump_addr(struct sk_buff *skb, struct netlink_callback *cb,
5244 enum addr_type_t type)
5245 {
5246 const struct nlmsghdr *nlh = cb->nlh;
5247 struct inet6_fill_args fillargs = {
5248 .portid = NETLINK_CB(cb->skb).portid,
5249 .seq = cb->nlh->nlmsg_seq,
5250 .flags = NLM_F_MULTI,
5251 .netnsid = -1,
5252 .type = type,
5253 };
5254 struct net *net = sock_net(skb->sk);
5255 struct net *tgt_net = net;
5256 int idx, s_idx, s_ip_idx;
5257 int h, s_h;
5258 struct net_device *dev;
5259 struct inet6_dev *idev;
5260 struct hlist_head *head;
5261 int err = 0;
5262
5263 s_h = cb->args[0];
5264 s_idx = idx = cb->args[1];
5265 s_ip_idx = cb->args[2];
5266
5267 if (cb->strict_check) {
5268 err = inet6_valid_dump_ifaddr_req(nlh, &fillargs, &tgt_net,
5269 skb->sk, cb);
5270 if (err < 0)
5271 goto put_tgt_net;
5272
5273 err = 0;
5274 if (fillargs.ifindex) {
5275 dev = __dev_get_by_index(tgt_net, fillargs.ifindex);
5276 if (!dev) {
5277 err = -ENODEV;
5278 goto put_tgt_net;
5279 }
5280 idev = __in6_dev_get(dev);
5281 if (idev) {
5282 err = in6_dump_addrs(idev, skb, cb, s_ip_idx,
5283 &fillargs);
5284 if (err > 0)
5285 err = 0;
5286 }
5287 goto put_tgt_net;
5288 }
5289 }
5290
5291 rcu_read_lock();
5292 cb->seq = atomic_read(&tgt_net->ipv6.dev_addr_genid) ^ tgt_net->dev_base_seq;
5293 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
5294 idx = 0;
5295 head = &tgt_net->dev_index_head[h];
5296 hlist_for_each_entry_rcu(dev, head, index_hlist) {
5297 if (idx < s_idx)
5298 goto cont;
5299 if (h > s_h || idx > s_idx)
5300 s_ip_idx = 0;
5301 idev = __in6_dev_get(dev);
5302 if (!idev)
5303 goto cont;
5304
5305 if (in6_dump_addrs(idev, skb, cb, s_ip_idx,
5306 &fillargs) < 0)
5307 goto done;
5308 cont:
5309 idx++;
5310 }
5311 }
5312 done:
5313 rcu_read_unlock();
5314 cb->args[0] = h;
5315 cb->args[1] = idx;
5316 put_tgt_net:
5317 if (fillargs.netnsid >= 0)
5318 put_net(tgt_net);
5319
5320 return skb->len ? : err;
5321 }
5322
inet6_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)5323 static int inet6_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
5324 {
5325 enum addr_type_t type = UNICAST_ADDR;
5326
5327 return inet6_dump_addr(skb, cb, type);
5328 }
5329
inet6_dump_ifmcaddr(struct sk_buff *skb, struct netlink_callback *cb)5330 static int inet6_dump_ifmcaddr(struct sk_buff *skb, struct netlink_callback *cb)
5331 {
5332 enum addr_type_t type = MULTICAST_ADDR;
5333
5334 return inet6_dump_addr(skb, cb, type);
5335 }
5336
5337
inet6_dump_ifacaddr(struct sk_buff *skb, struct netlink_callback *cb)5338 static int inet6_dump_ifacaddr(struct sk_buff *skb, struct netlink_callback *cb)
5339 {
5340 enum addr_type_t type = ANYCAST_ADDR;
5341
5342 return inet6_dump_addr(skb, cb, type);
5343 }
5344
inet6_rtm_valid_getaddr_req(struct sk_buff *skb, const struct nlmsghdr *nlh, struct nlattr **tb, struct netlink_ext_ack *extack)5345 static int inet6_rtm_valid_getaddr_req(struct sk_buff *skb,
5346 const struct nlmsghdr *nlh,
5347 struct nlattr **tb,
5348 struct netlink_ext_ack *extack)
5349 {
5350 struct ifaddrmsg *ifm;
5351 int i, err;
5352
5353 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifm))) {
5354 NL_SET_ERR_MSG_MOD(extack, "Invalid header for get address request");
5355 return -EINVAL;
5356 }
5357
5358 if (!netlink_strict_get_check(skb))
5359 return nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFA_MAX,
5360 ifa_ipv6_policy, extack);
5361
5362 ifm = nlmsg_data(nlh);
5363 if (ifm->ifa_prefixlen || ifm->ifa_flags || ifm->ifa_scope) {
5364 NL_SET_ERR_MSG_MOD(extack, "Invalid values in header for get address request");
5365 return -EINVAL;
5366 }
5367
5368 err = nlmsg_parse_deprecated_strict(nlh, sizeof(*ifm), tb, IFA_MAX,
5369 ifa_ipv6_policy, extack);
5370 if (err)
5371 return err;
5372
5373 for (i = 0; i <= IFA_MAX; i++) {
5374 if (!tb[i])
5375 continue;
5376
5377 switch (i) {
5378 case IFA_TARGET_NETNSID:
5379 case IFA_ADDRESS:
5380 case IFA_LOCAL:
5381 break;
5382 default:
5383 NL_SET_ERR_MSG_MOD(extack, "Unsupported attribute in get address request");
5384 return -EINVAL;
5385 }
5386 }
5387
5388 return 0;
5389 }
5390
inet6_rtm_getaddr(struct sk_buff *in_skb, struct nlmsghdr *nlh, struct netlink_ext_ack *extack)5391 static int inet6_rtm_getaddr(struct sk_buff *in_skb, struct nlmsghdr *nlh,
5392 struct netlink_ext_ack *extack)
5393 {
5394 struct net *net = sock_net(in_skb->sk);
5395 struct inet6_fill_args fillargs = {
5396 .portid = NETLINK_CB(in_skb).portid,
5397 .seq = nlh->nlmsg_seq,
5398 .event = RTM_NEWADDR,
5399 .flags = 0,
5400 .netnsid = -1,
5401 };
5402 struct net *tgt_net = net;
5403 struct ifaddrmsg *ifm;
5404 struct nlattr *tb[IFA_MAX+1];
5405 struct in6_addr *addr = NULL, *peer;
5406 struct net_device *dev = NULL;
5407 struct inet6_ifaddr *ifa;
5408 struct sk_buff *skb;
5409 int err;
5410
5411 err = inet6_rtm_valid_getaddr_req(in_skb, nlh, tb, extack);
5412 if (err < 0)
5413 return err;
5414
5415 if (tb[IFA_TARGET_NETNSID]) {
5416 fillargs.netnsid = nla_get_s32(tb[IFA_TARGET_NETNSID]);
5417
5418 tgt_net = rtnl_get_net_ns_capable(NETLINK_CB(in_skb).sk,
5419 fillargs.netnsid);
5420 if (IS_ERR(tgt_net))
5421 return PTR_ERR(tgt_net);
5422 }
5423
5424 addr = extract_addr(tb[IFA_ADDRESS], tb[IFA_LOCAL], &peer);
5425 if (!addr) {
5426 err = -EINVAL;
5427 goto errout;
5428 }
5429 ifm = nlmsg_data(nlh);
5430 if (ifm->ifa_index)
5431 dev = dev_get_by_index(tgt_net, ifm->ifa_index);
5432
5433 ifa = ipv6_get_ifaddr(tgt_net, addr, dev, 1);
5434 if (!ifa) {
5435 err = -EADDRNOTAVAIL;
5436 goto errout;
5437 }
5438
5439 skb = nlmsg_new(inet6_ifaddr_msgsize(), GFP_KERNEL);
5440 if (!skb) {
5441 err = -ENOBUFS;
5442 goto errout_ifa;
5443 }
5444
5445 err = inet6_fill_ifaddr(skb, ifa, &fillargs);
5446 if (err < 0) {
5447 /* -EMSGSIZE implies BUG in inet6_ifaddr_msgsize() */
5448 WARN_ON(err == -EMSGSIZE);
5449 kfree_skb(skb);
5450 goto errout_ifa;
5451 }
5452 err = rtnl_unicast(skb, tgt_net, NETLINK_CB(in_skb).portid);
5453 errout_ifa:
5454 in6_ifa_put(ifa);
5455 errout:
5456 if (dev)
5457 dev_put(dev);
5458 if (fillargs.netnsid >= 0)
5459 put_net(tgt_net);
5460
5461 return err;
5462 }
5463
inet6_ifa_notify(int event, struct inet6_ifaddr *ifa)5464 static void inet6_ifa_notify(int event, struct inet6_ifaddr *ifa)
5465 {
5466 struct sk_buff *skb;
5467 struct net *net = dev_net(ifa->idev->dev);
5468 struct inet6_fill_args fillargs = {
5469 .portid = 0,
5470 .seq = 0,
5471 .event = event,
5472 .flags = 0,
5473 .netnsid = -1,
5474 };
5475 int err = -ENOBUFS;
5476
5477 skb = nlmsg_new(inet6_ifaddr_msgsize(), GFP_ATOMIC);
5478 if (!skb)
5479 goto errout;
5480
5481 err = inet6_fill_ifaddr(skb, ifa, &fillargs);
5482 if (err < 0) {
5483 /* -EMSGSIZE implies BUG in inet6_ifaddr_msgsize() */
5484 WARN_ON(err == -EMSGSIZE);
5485 kfree_skb(skb);
5486 goto errout;
5487 }
5488 rtnl_notify(skb, net, 0, RTNLGRP_IPV6_IFADDR, NULL, GFP_ATOMIC);
5489 return;
5490 errout:
5491 if (err < 0)
5492 rtnl_set_sk_err(net, RTNLGRP_IPV6_IFADDR, err);
5493 }
5494
ipv6_store_devconf(struct ipv6_devconf *cnf, __s32 *array, int bytes)5495 static inline void ipv6_store_devconf(struct ipv6_devconf *cnf,
5496 __s32 *array, int bytes)
5497 {
5498 BUG_ON(bytes < (DEVCONF_MAX * 4));
5499
5500 memset(array, 0, bytes);
5501 array[DEVCONF_FORWARDING] = cnf->forwarding;
5502 array[DEVCONF_HOPLIMIT] = cnf->hop_limit;
5503 array[DEVCONF_MTU6] = cnf->mtu6;
5504 array[DEVCONF_ACCEPT_RA] = cnf->accept_ra;
5505 array[DEVCONF_ACCEPT_REDIRECTS] = cnf->accept_redirects;
5506 array[DEVCONF_AUTOCONF] = cnf->autoconf;
5507 array[DEVCONF_DAD_TRANSMITS] = cnf->dad_transmits;
5508 array[DEVCONF_RTR_SOLICITS] = cnf->rtr_solicits;
5509 array[DEVCONF_RTR_SOLICIT_INTERVAL] =
5510 jiffies_to_msecs(cnf->rtr_solicit_interval);
5511 array[DEVCONF_RTR_SOLICIT_MAX_INTERVAL] =
5512 jiffies_to_msecs(cnf->rtr_solicit_max_interval);
5513 array[DEVCONF_RTR_SOLICIT_DELAY] =
5514 jiffies_to_msecs(cnf->rtr_solicit_delay);
5515 array[DEVCONF_FORCE_MLD_VERSION] = cnf->force_mld_version;
5516 array[DEVCONF_MLDV1_UNSOLICITED_REPORT_INTERVAL] =
5517 jiffies_to_msecs(cnf->mldv1_unsolicited_report_interval);
5518 array[DEVCONF_MLDV2_UNSOLICITED_REPORT_INTERVAL] =
5519 jiffies_to_msecs(cnf->mldv2_unsolicited_report_interval);
5520 array[DEVCONF_USE_TEMPADDR] = cnf->use_tempaddr;
5521 array[DEVCONF_TEMP_VALID_LFT] = cnf->temp_valid_lft;
5522 array[DEVCONF_TEMP_PREFERED_LFT] = cnf->temp_prefered_lft;
5523 array[DEVCONF_REGEN_MAX_RETRY] = cnf->regen_max_retry;
5524 array[DEVCONF_MAX_DESYNC_FACTOR] = cnf->max_desync_factor;
5525 array[DEVCONF_MAX_ADDRESSES] = cnf->max_addresses;
5526 array[DEVCONF_ACCEPT_RA_DEFRTR] = cnf->accept_ra_defrtr;
5527 array[DEVCONF_ACCEPT_RA_MIN_HOP_LIMIT] = cnf->accept_ra_min_hop_limit;
5528 array[DEVCONF_ACCEPT_RA_PINFO] = cnf->accept_ra_pinfo;
5529 #ifdef CONFIG_IPV6_ROUTER_PREF
5530 array[DEVCONF_ACCEPT_RA_RTR_PREF] = cnf->accept_ra_rtr_pref;
5531 array[DEVCONF_RTR_PROBE_INTERVAL] =
5532 jiffies_to_msecs(cnf->rtr_probe_interval);
5533 #ifdef CONFIG_IPV6_ROUTE_INFO
5534 array[DEVCONF_ACCEPT_RA_RT_INFO_MIN_PLEN] = cnf->accept_ra_rt_info_min_plen;
5535 array[DEVCONF_ACCEPT_RA_RT_INFO_MAX_PLEN] = cnf->accept_ra_rt_info_max_plen;
5536 #endif
5537 #endif
5538 array[DEVCONF_PROXY_NDP] = cnf->proxy_ndp;
5539 array[DEVCONF_ACCEPT_SOURCE_ROUTE] = cnf->accept_source_route;
5540 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
5541 array[DEVCONF_OPTIMISTIC_DAD] = cnf->optimistic_dad;
5542 array[DEVCONF_USE_OPTIMISTIC] = cnf->use_optimistic;
5543 #endif
5544 #ifdef CONFIG_IPV6_MROUTE
5545 array[DEVCONF_MC_FORWARDING] = atomic_read(&cnf->mc_forwarding);
5546 #endif
5547 array[DEVCONF_DISABLE_IPV6] = cnf->disable_ipv6;
5548 array[DEVCONF_ACCEPT_DAD] = cnf->accept_dad;
5549 array[DEVCONF_FORCE_TLLAO] = cnf->force_tllao;
5550 array[DEVCONF_NDISC_NOTIFY] = cnf->ndisc_notify;
5551 array[DEVCONF_SUPPRESS_FRAG_NDISC] = cnf->suppress_frag_ndisc;
5552 array[DEVCONF_ACCEPT_RA_FROM_LOCAL] = cnf->accept_ra_from_local;
5553 array[DEVCONF_ACCEPT_RA_MTU] = cnf->accept_ra_mtu;
5554 array[DEVCONF_IGNORE_ROUTES_WITH_LINKDOWN] = cnf->ignore_routes_with_linkdown;
5555 /* we omit DEVCONF_STABLE_SECRET for now */
5556 array[DEVCONF_USE_OIF_ADDRS_ONLY] = cnf->use_oif_addrs_only;
5557 array[DEVCONF_DROP_UNICAST_IN_L2_MULTICAST] = cnf->drop_unicast_in_l2_multicast;
5558 array[DEVCONF_DROP_UNSOLICITED_NA] = cnf->drop_unsolicited_na;
5559 array[DEVCONF_KEEP_ADDR_ON_DOWN] = cnf->keep_addr_on_down;
5560 array[DEVCONF_SEG6_ENABLED] = cnf->seg6_enabled;
5561 #ifdef CONFIG_IPV6_SEG6_HMAC
5562 array[DEVCONF_SEG6_REQUIRE_HMAC] = cnf->seg6_require_hmac;
5563 #endif
5564 array[DEVCONF_ENHANCED_DAD] = cnf->enhanced_dad;
5565 array[DEVCONF_ADDR_GEN_MODE] = cnf->addr_gen_mode;
5566 array[DEVCONF_DISABLE_POLICY] = cnf->disable_policy;
5567 array[DEVCONF_NDISC_TCLASS] = cnf->ndisc_tclass;
5568 array[DEVCONF_RPL_SEG_ENABLED] = cnf->rpl_seg_enabled;
5569 array[DEVCONF_ACCEPT_RA_MIN_LFT] = cnf->accept_ra_min_lft;
5570 }
5571
inet6_ifla6_size(void)5572 static inline size_t inet6_ifla6_size(void)
5573 {
5574 return nla_total_size(4) /* IFLA_INET6_FLAGS */
5575 + nla_total_size(sizeof(struct ifla_cacheinfo))
5576 + nla_total_size(DEVCONF_MAX * 4) /* IFLA_INET6_CONF */
5577 + nla_total_size(IPSTATS_MIB_MAX * 8) /* IFLA_INET6_STATS */
5578 + nla_total_size(ICMP6_MIB_MAX * 8) /* IFLA_INET6_ICMP6STATS */
5579 + nla_total_size(sizeof(struct in6_addr)) /* IFLA_INET6_TOKEN */
5580 + nla_total_size(1) /* IFLA_INET6_ADDR_GEN_MODE */
5581 + 0;
5582 }
5583
inet6_if_nlmsg_size(void)5584 static inline size_t inet6_if_nlmsg_size(void)
5585 {
5586 return NLMSG_ALIGN(sizeof(struct ifinfomsg))
5587 + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */
5588 + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */
5589 + nla_total_size(4) /* IFLA_MTU */
5590 + nla_total_size(4) /* IFLA_LINK */
5591 + nla_total_size(1) /* IFLA_OPERSTATE */
5592 + nla_total_size(inet6_ifla6_size()); /* IFLA_PROTINFO */
5593 }
5594
__snmp6_fill_statsdev(u64 *stats, atomic_long_t *mib, int bytes)5595 static inline void __snmp6_fill_statsdev(u64 *stats, atomic_long_t *mib,
5596 int bytes)
5597 {
5598 int i;
5599 int pad = bytes - sizeof(u64) * ICMP6_MIB_MAX;
5600 BUG_ON(pad < 0);
5601
5602 /* Use put_unaligned() because stats may not be aligned for u64. */
5603 put_unaligned(ICMP6_MIB_MAX, &stats[0]);
5604 for (i = 1; i < ICMP6_MIB_MAX; i++)
5605 put_unaligned(atomic_long_read(&mib[i]), &stats[i]);
5606
5607 memset(&stats[ICMP6_MIB_MAX], 0, pad);
5608 }
5609
__snmp6_fill_stats64(u64 *stats, void __percpu *mib, int bytes, size_t syncpoff)5610 static inline void __snmp6_fill_stats64(u64 *stats, void __percpu *mib,
5611 int bytes, size_t syncpoff)
5612 {
5613 int i, c;
5614 u64 buff[IPSTATS_MIB_MAX];
5615 int pad = bytes - sizeof(u64) * IPSTATS_MIB_MAX;
5616
5617 BUG_ON(pad < 0);
5618
5619 memset(buff, 0, sizeof(buff));
5620 buff[0] = IPSTATS_MIB_MAX;
5621
5622 for_each_possible_cpu(c) {
5623 for (i = 1; i < IPSTATS_MIB_MAX; i++)
5624 buff[i] += snmp_get_cpu_field64(mib, c, i, syncpoff);
5625 }
5626
5627 memcpy(stats, buff, IPSTATS_MIB_MAX * sizeof(u64));
5628 memset(&stats[IPSTATS_MIB_MAX], 0, pad);
5629 }
5630
snmp6_fill_stats(u64 *stats, struct inet6_dev *idev, int attrtype, int bytes)5631 static void snmp6_fill_stats(u64 *stats, struct inet6_dev *idev, int attrtype,
5632 int bytes)
5633 {
5634 switch (attrtype) {
5635 case IFLA_INET6_STATS:
5636 __snmp6_fill_stats64(stats, idev->stats.ipv6, bytes,
5637 offsetof(struct ipstats_mib, syncp));
5638 break;
5639 case IFLA_INET6_ICMP6STATS:
5640 __snmp6_fill_statsdev(stats, idev->stats.icmpv6dev->mibs, bytes);
5641 break;
5642 }
5643 }
5644
inet6_fill_ifla6_attrs(struct sk_buff *skb, struct inet6_dev *idev, u32 ext_filter_mask)5645 static int inet6_fill_ifla6_attrs(struct sk_buff *skb, struct inet6_dev *idev,
5646 u32 ext_filter_mask)
5647 {
5648 struct nlattr *nla;
5649 struct ifla_cacheinfo ci;
5650
5651 if (nla_put_u32(skb, IFLA_INET6_FLAGS, idev->if_flags))
5652 goto nla_put_failure;
5653 ci.max_reasm_len = IPV6_MAXPLEN;
5654 ci.tstamp = cstamp_delta(idev->tstamp);
5655 ci.reachable_time = jiffies_to_msecs(idev->nd_parms->reachable_time);
5656 ci.retrans_time = jiffies_to_msecs(NEIGH_VAR(idev->nd_parms, RETRANS_TIME));
5657 if (nla_put(skb, IFLA_INET6_CACHEINFO, sizeof(ci), &ci))
5658 goto nla_put_failure;
5659 nla = nla_reserve(skb, IFLA_INET6_CONF, DEVCONF_MAX * sizeof(s32));
5660 if (!nla)
5661 goto nla_put_failure;
5662 ipv6_store_devconf(&idev->cnf, nla_data(nla), nla_len(nla));
5663
5664 /* XXX - MC not implemented */
5665
5666 if (ext_filter_mask & RTEXT_FILTER_SKIP_STATS)
5667 return 0;
5668
5669 nla = nla_reserve(skb, IFLA_INET6_STATS, IPSTATS_MIB_MAX * sizeof(u64));
5670 if (!nla)
5671 goto nla_put_failure;
5672 snmp6_fill_stats(nla_data(nla), idev, IFLA_INET6_STATS, nla_len(nla));
5673
5674 nla = nla_reserve(skb, IFLA_INET6_ICMP6STATS, ICMP6_MIB_MAX * sizeof(u64));
5675 if (!nla)
5676 goto nla_put_failure;
5677 snmp6_fill_stats(nla_data(nla), idev, IFLA_INET6_ICMP6STATS, nla_len(nla));
5678
5679 nla = nla_reserve(skb, IFLA_INET6_TOKEN, sizeof(struct in6_addr));
5680 if (!nla)
5681 goto nla_put_failure;
5682 read_lock_bh(&idev->lock);
5683 memcpy(nla_data(nla), idev->token.s6_addr, nla_len(nla));
5684 read_unlock_bh(&idev->lock);
5685
5686 if (nla_put_u8(skb, IFLA_INET6_ADDR_GEN_MODE, idev->cnf.addr_gen_mode))
5687 goto nla_put_failure;
5688
5689 return 0;
5690
5691 nla_put_failure:
5692 return -EMSGSIZE;
5693 }
5694
inet6_get_link_af_size(const struct net_device *dev, u32 ext_filter_mask)5695 static size_t inet6_get_link_af_size(const struct net_device *dev,
5696 u32 ext_filter_mask)
5697 {
5698 if (!__in6_dev_get(dev))
5699 return 0;
5700
5701 return inet6_ifla6_size();
5702 }
5703
inet6_fill_link_af(struct sk_buff *skb, const struct net_device *dev, u32 ext_filter_mask)5704 static int inet6_fill_link_af(struct sk_buff *skb, const struct net_device *dev,
5705 u32 ext_filter_mask)
5706 {
5707 struct inet6_dev *idev = __in6_dev_get(dev);
5708
5709 if (!idev)
5710 return -ENODATA;
5711
5712 if (inet6_fill_ifla6_attrs(skb, idev, ext_filter_mask) < 0)
5713 return -EMSGSIZE;
5714
5715 return 0;
5716 }
5717
inet6_set_iftoken(struct inet6_dev *idev, struct in6_addr *token)5718 static int inet6_set_iftoken(struct inet6_dev *idev, struct in6_addr *token)
5719 {
5720 struct inet6_ifaddr *ifp;
5721 struct net_device *dev = idev->dev;
5722 bool clear_token, update_rs = false;
5723 struct in6_addr ll_addr;
5724
5725 ASSERT_RTNL();
5726
5727 if (!token)
5728 return -EINVAL;
5729 if (dev->flags & (IFF_LOOPBACK | IFF_NOARP))
5730 return -EINVAL;
5731 if (!ipv6_accept_ra(idev))
5732 return -EINVAL;
5733 if (idev->cnf.rtr_solicits == 0)
5734 return -EINVAL;
5735
5736 write_lock_bh(&idev->lock);
5737
5738 BUILD_BUG_ON(sizeof(token->s6_addr) != 16);
5739 memcpy(idev->token.s6_addr + 8, token->s6_addr + 8, 8);
5740
5741 write_unlock_bh(&idev->lock);
5742
5743 clear_token = ipv6_addr_any(token);
5744 if (clear_token)
5745 goto update_lft;
5746
5747 if (!idev->dead && (idev->if_flags & IF_READY) &&
5748 !ipv6_get_lladdr(dev, &ll_addr, IFA_F_TENTATIVE |
5749 IFA_F_OPTIMISTIC)) {
5750 /* If we're not ready, then normal ifup will take care
5751 * of this. Otherwise, we need to request our rs here.
5752 */
5753 ndisc_send_rs(dev, &ll_addr, &in6addr_linklocal_allrouters);
5754 update_rs = true;
5755 }
5756
5757 update_lft:
5758 write_lock_bh(&idev->lock);
5759
5760 if (update_rs) {
5761 idev->if_flags |= IF_RS_SENT;
5762 idev->rs_interval = rfc3315_s14_backoff_init(
5763 idev->cnf.rtr_solicit_interval);
5764 idev->rs_probes = 1;
5765 addrconf_mod_rs_timer(idev, idev->rs_interval);
5766 }
5767
5768 /* Well, that's kinda nasty ... */
5769 list_for_each_entry(ifp, &idev->addr_list, if_list) {
5770 spin_lock(&ifp->lock);
5771 if (ifp->tokenized) {
5772 ifp->valid_lft = 0;
5773 ifp->prefered_lft = 0;
5774 }
5775 spin_unlock(&ifp->lock);
5776 }
5777
5778 write_unlock_bh(&idev->lock);
5779 inet6_ifinfo_notify(RTM_NEWLINK, idev);
5780 addrconf_verify_rtnl();
5781 return 0;
5782 }
5783
5784 static const struct nla_policy inet6_af_policy[IFLA_INET6_MAX + 1] = {
5785 [IFLA_INET6_ADDR_GEN_MODE] = { .type = NLA_U8 },
5786 [IFLA_INET6_TOKEN] = { .len = sizeof(struct in6_addr) },
5787 };
5788
check_addr_gen_mode(int mode)5789 static int check_addr_gen_mode(int mode)
5790 {
5791 if (mode != IN6_ADDR_GEN_MODE_EUI64 &&
5792 mode != IN6_ADDR_GEN_MODE_NONE &&
5793 mode != IN6_ADDR_GEN_MODE_STABLE_PRIVACY &&
5794 mode != IN6_ADDR_GEN_MODE_RANDOM)
5795 return -EINVAL;
5796 return 1;
5797 }
5798
check_stable_privacy(struct inet6_dev *idev, struct net *net, int mode)5799 static int check_stable_privacy(struct inet6_dev *idev, struct net *net,
5800 int mode)
5801 {
5802 if (mode == IN6_ADDR_GEN_MODE_STABLE_PRIVACY &&
5803 !idev->cnf.stable_secret.initialized &&
5804 !net->ipv6.devconf_dflt->stable_secret.initialized)
5805 return -EINVAL;
5806 return 1;
5807 }
5808
inet6_validate_link_af(const struct net_device *dev, const struct nlattr *nla)5809 static int inet6_validate_link_af(const struct net_device *dev,
5810 const struct nlattr *nla)
5811 {
5812 struct nlattr *tb[IFLA_INET6_MAX + 1];
5813 struct inet6_dev *idev = NULL;
5814 int err;
5815
5816 if (dev) {
5817 idev = __in6_dev_get(dev);
5818 if (!idev)
5819 return -EAFNOSUPPORT;
5820 }
5821
5822 err = nla_parse_nested_deprecated(tb, IFLA_INET6_MAX, nla,
5823 inet6_af_policy, NULL);
5824 if (err)
5825 return err;
5826
5827 if (!tb[IFLA_INET6_TOKEN] && !tb[IFLA_INET6_ADDR_GEN_MODE])
5828 return -EINVAL;
5829
5830 if (tb[IFLA_INET6_ADDR_GEN_MODE]) {
5831 u8 mode = nla_get_u8(tb[IFLA_INET6_ADDR_GEN_MODE]);
5832
5833 if (check_addr_gen_mode(mode) < 0)
5834 return -EINVAL;
5835 if (dev && check_stable_privacy(idev, dev_net(dev), mode) < 0)
5836 return -EINVAL;
5837 }
5838
5839 return 0;
5840 }
5841
inet6_set_link_af(struct net_device *dev, const struct nlattr *nla)5842 static int inet6_set_link_af(struct net_device *dev, const struct nlattr *nla)
5843 {
5844 struct inet6_dev *idev = __in6_dev_get(dev);
5845 struct nlattr *tb[IFLA_INET6_MAX + 1];
5846 int err;
5847
5848 if (!idev)
5849 return -EAFNOSUPPORT;
5850
5851 if (nla_parse_nested_deprecated(tb, IFLA_INET6_MAX, nla, NULL, NULL) < 0)
5852 return -EINVAL;
5853
5854 if (tb[IFLA_INET6_TOKEN]) {
5855 err = inet6_set_iftoken(idev, nla_data(tb[IFLA_INET6_TOKEN]));
5856 if (err)
5857 return err;
5858 }
5859
5860 if (tb[IFLA_INET6_ADDR_GEN_MODE]) {
5861 u8 mode = nla_get_u8(tb[IFLA_INET6_ADDR_GEN_MODE]);
5862
5863 idev->cnf.addr_gen_mode = mode;
5864 }
5865
5866 return 0;
5867 }
5868
inet6_fill_ifinfo(struct sk_buff *skb, struct inet6_dev *idev, u32 portid, u32 seq, int event, unsigned int flags)5869 static int inet6_fill_ifinfo(struct sk_buff *skb, struct inet6_dev *idev,
5870 u32 portid, u32 seq, int event, unsigned int flags)
5871 {
5872 struct net_device *dev = idev->dev;
5873 struct ifinfomsg *hdr;
5874 struct nlmsghdr *nlh;
5875 void *protoinfo;
5876
5877 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*hdr), flags);
5878 if (!nlh)
5879 return -EMSGSIZE;
5880
5881 hdr = nlmsg_data(nlh);
5882 hdr->ifi_family = AF_INET6;
5883 hdr->__ifi_pad = 0;
5884 hdr->ifi_type = dev->type;
5885 hdr->ifi_index = dev->ifindex;
5886 hdr->ifi_flags = dev_get_flags(dev);
5887 hdr->ifi_change = 0;
5888
5889 if (nla_put_string(skb, IFLA_IFNAME, dev->name) ||
5890 (dev->addr_len &&
5891 nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr)) ||
5892 nla_put_u32(skb, IFLA_MTU, dev->mtu) ||
5893 (dev->ifindex != dev_get_iflink(dev) &&
5894 nla_put_u32(skb, IFLA_LINK, dev_get_iflink(dev))) ||
5895 nla_put_u8(skb, IFLA_OPERSTATE,
5896 netif_running(dev) ? dev->operstate : IF_OPER_DOWN))
5897 goto nla_put_failure;
5898 protoinfo = nla_nest_start_noflag(skb, IFLA_PROTINFO);
5899 if (!protoinfo)
5900 goto nla_put_failure;
5901
5902 if (inet6_fill_ifla6_attrs(skb, idev, 0) < 0)
5903 goto nla_put_failure;
5904
5905 nla_nest_end(skb, protoinfo);
5906 nlmsg_end(skb, nlh);
5907 return 0;
5908
5909 nla_put_failure:
5910 nlmsg_cancel(skb, nlh);
5911 return -EMSGSIZE;
5912 }
5913
inet6_valid_dump_ifinfo(const struct nlmsghdr *nlh, struct netlink_ext_ack *extack)5914 static int inet6_valid_dump_ifinfo(const struct nlmsghdr *nlh,
5915 struct netlink_ext_ack *extack)
5916 {
5917 struct ifinfomsg *ifm;
5918
5919 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifm))) {
5920 NL_SET_ERR_MSG_MOD(extack, "Invalid header for link dump request");
5921 return -EINVAL;
5922 }
5923
5924 if (nlmsg_attrlen(nlh, sizeof(*ifm))) {
5925 NL_SET_ERR_MSG_MOD(extack, "Invalid data after header");
5926 return -EINVAL;
5927 }
5928
5929 ifm = nlmsg_data(nlh);
5930 if (ifm->__ifi_pad || ifm->ifi_type || ifm->ifi_flags ||
5931 ifm->ifi_change || ifm->ifi_index) {
5932 NL_SET_ERR_MSG_MOD(extack, "Invalid values in header for dump request");
5933 return -EINVAL;
5934 }
5935
5936 return 0;
5937 }
5938
inet6_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)5939 static int inet6_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
5940 {
5941 struct net *net = sock_net(skb->sk);
5942 int h, s_h;
5943 int idx = 0, s_idx;
5944 struct net_device *dev;
5945 struct inet6_dev *idev;
5946 struct hlist_head *head;
5947
5948 /* only requests using strict checking can pass data to
5949 * influence the dump
5950 */
5951 if (cb->strict_check) {
5952 int err = inet6_valid_dump_ifinfo(cb->nlh, cb->extack);
5953
5954 if (err < 0)
5955 return err;
5956 }
5957
5958 s_h = cb->args[0];
5959 s_idx = cb->args[1];
5960
5961 rcu_read_lock();
5962 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
5963 idx = 0;
5964 head = &net->dev_index_head[h];
5965 hlist_for_each_entry_rcu(dev, head, index_hlist) {
5966 if (idx < s_idx)
5967 goto cont;
5968 idev = __in6_dev_get(dev);
5969 if (!idev)
5970 goto cont;
5971 if (inet6_fill_ifinfo(skb, idev,
5972 NETLINK_CB(cb->skb).portid,
5973 cb->nlh->nlmsg_seq,
5974 RTM_NEWLINK, NLM_F_MULTI) < 0)
5975 goto out;
5976 cont:
5977 idx++;
5978 }
5979 }
5980 out:
5981 rcu_read_unlock();
5982 cb->args[1] = idx;
5983 cb->args[0] = h;
5984
5985 return skb->len;
5986 }
5987
inet6_ifinfo_notify(int event, struct inet6_dev *idev)5988 void inet6_ifinfo_notify(int event, struct inet6_dev *idev)
5989 {
5990 struct sk_buff *skb;
5991 struct net *net = dev_net(idev->dev);
5992 int err = -ENOBUFS;
5993
5994 skb = nlmsg_new(inet6_if_nlmsg_size(), GFP_ATOMIC);
5995 if (!skb)
5996 goto errout;
5997
5998 err = inet6_fill_ifinfo(skb, idev, 0, 0, event, 0);
5999 if (err < 0) {
6000 /* -EMSGSIZE implies BUG in inet6_if_nlmsg_size() */
6001 WARN_ON(err == -EMSGSIZE);
6002 kfree_skb(skb);
6003 goto errout;
6004 }
6005 rtnl_notify(skb, net, 0, RTNLGRP_IPV6_IFINFO, NULL, GFP_ATOMIC);
6006 return;
6007 errout:
6008 if (err < 0)
6009 rtnl_set_sk_err(net, RTNLGRP_IPV6_IFINFO, err);
6010 }
6011
inet6_prefix_nlmsg_size(void)6012 static inline size_t inet6_prefix_nlmsg_size(void)
6013 {
6014 return NLMSG_ALIGN(sizeof(struct prefixmsg))
6015 + nla_total_size(sizeof(struct in6_addr))
6016 + nla_total_size(sizeof(struct prefix_cacheinfo));
6017 }
6018
inet6_fill_prefix(struct sk_buff *skb, struct inet6_dev *idev, struct prefix_info *pinfo, u32 portid, u32 seq, int event, unsigned int flags)6019 static int inet6_fill_prefix(struct sk_buff *skb, struct inet6_dev *idev,
6020 struct prefix_info *pinfo, u32 portid, u32 seq,
6021 int event, unsigned int flags)
6022 {
6023 struct prefixmsg *pmsg;
6024 struct nlmsghdr *nlh;
6025 struct prefix_cacheinfo ci;
6026
6027 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*pmsg), flags);
6028 if (!nlh)
6029 return -EMSGSIZE;
6030
6031 pmsg = nlmsg_data(nlh);
6032 pmsg->prefix_family = AF_INET6;
6033 pmsg->prefix_pad1 = 0;
6034 pmsg->prefix_pad2 = 0;
6035 pmsg->prefix_ifindex = idev->dev->ifindex;
6036 pmsg->prefix_len = pinfo->prefix_len;
6037 pmsg->prefix_type = pinfo->type;
6038 pmsg->prefix_pad3 = 0;
6039 pmsg->prefix_flags = pinfo->flags;
6040
6041 if (nla_put(skb, PREFIX_ADDRESS, sizeof(pinfo->prefix), &pinfo->prefix))
6042 goto nla_put_failure;
6043 ci.preferred_time = ntohl(pinfo->prefered);
6044 ci.valid_time = ntohl(pinfo->valid);
6045 if (nla_put(skb, PREFIX_CACHEINFO, sizeof(ci), &ci))
6046 goto nla_put_failure;
6047 nlmsg_end(skb, nlh);
6048 return 0;
6049
6050 nla_put_failure:
6051 nlmsg_cancel(skb, nlh);
6052 return -EMSGSIZE;
6053 }
6054
inet6_prefix_notify(int event, struct inet6_dev *idev, struct prefix_info *pinfo)6055 static void inet6_prefix_notify(int event, struct inet6_dev *idev,
6056 struct prefix_info *pinfo)
6057 {
6058 struct sk_buff *skb;
6059 struct net *net = dev_net(idev->dev);
6060 int err = -ENOBUFS;
6061
6062 skb = nlmsg_new(inet6_prefix_nlmsg_size(), GFP_ATOMIC);
6063 if (!skb)
6064 goto errout;
6065
6066 err = inet6_fill_prefix(skb, idev, pinfo, 0, 0, event, 0);
6067 if (err < 0) {
6068 /* -EMSGSIZE implies BUG in inet6_prefix_nlmsg_size() */
6069 WARN_ON(err == -EMSGSIZE);
6070 kfree_skb(skb);
6071 goto errout;
6072 }
6073 rtnl_notify(skb, net, 0, RTNLGRP_IPV6_PREFIX, NULL, GFP_ATOMIC);
6074 return;
6075 errout:
6076 if (err < 0)
6077 rtnl_set_sk_err(net, RTNLGRP_IPV6_PREFIX, err);
6078 }
6079
__ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)6080 static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
6081 {
6082 struct net *net = dev_net(ifp->idev->dev);
6083
6084 if (event)
6085 ASSERT_RTNL();
6086
6087 inet6_ifa_notify(event ? : RTM_NEWADDR, ifp);
6088
6089 switch (event) {
6090 case RTM_NEWADDR:
6091 /*
6092 * If the address was optimistic we inserted the route at the
6093 * start of our DAD process, so we don't need to do it again.
6094 * If the device was taken down in the middle of the DAD
6095 * cycle there is a race where we could get here without a
6096 * host route, so nothing to insert. That will be fixed when
6097 * the device is brought up.
6098 */
6099 if (ifp->rt && !rcu_access_pointer(ifp->rt->fib6_node)) {
6100 ip6_ins_rt(net, ifp->rt);
6101 } else if (!ifp->rt && (ifp->idev->dev->flags & IFF_UP)) {
6102 pr_warn("BUG: Address %pI6c on device %s is missing its host route.\n",
6103 &ifp->addr, ifp->idev->dev->name);
6104 }
6105
6106 if (ifp->idev->cnf.forwarding)
6107 addrconf_join_anycast(ifp);
6108 if (!ipv6_addr_any(&ifp->peer_addr))
6109 addrconf_prefix_route(&ifp->peer_addr, 128,
6110 ifp->rt_priority, ifp->idev->dev,
6111 0, 0, GFP_ATOMIC);
6112 break;
6113 case RTM_DELADDR:
6114 if (ifp->idev->cnf.forwarding)
6115 addrconf_leave_anycast(ifp);
6116 addrconf_leave_solict(ifp->idev, &ifp->addr);
6117 if (!ipv6_addr_any(&ifp->peer_addr)) {
6118 struct fib6_info *rt;
6119
6120 rt = addrconf_get_prefix_route(&ifp->peer_addr, 128,
6121 ifp->idev->dev, 0, 0,
6122 false);
6123 if (rt)
6124 ip6_del_rt(net, rt, false);
6125 }
6126 if (ifp->rt) {
6127 ip6_del_rt(net, ifp->rt, false);
6128 ifp->rt = NULL;
6129 }
6130 rt_genid_bump_ipv6(net);
6131 break;
6132 }
6133 atomic_inc(&net->ipv6.dev_addr_genid);
6134 }
6135
ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)6136 static void ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
6137 {
6138 rcu_read_lock_bh();
6139 if (likely(ifp->idev->dead == 0))
6140 __ipv6_ifa_notify(event, ifp);
6141 rcu_read_unlock_bh();
6142 }
6143
6144 #ifdef CONFIG_SYSCTL
6145
addrconf_sysctl_forward(struct ctl_table *ctl, int write, void *buffer, size_t *lenp, loff_t *ppos)6146 static int addrconf_sysctl_forward(struct ctl_table *ctl, int write,
6147 void *buffer, size_t *lenp, loff_t *ppos)
6148 {
6149 int *valp = ctl->data;
6150 int val = *valp;
6151 loff_t pos = *ppos;
6152 struct ctl_table lctl;
6153 int ret;
6154
6155 /*
6156 * ctl->data points to idev->cnf.forwarding, we should
6157 * not modify it until we get the rtnl lock.
6158 */
6159 lctl = *ctl;
6160 lctl.data = &val;
6161
6162 ret = proc_dointvec(&lctl, write, buffer, lenp, ppos);
6163
6164 if (write)
6165 ret = addrconf_fixup_forwarding(ctl, valp, val);
6166 if (ret)
6167 *ppos = pos;
6168 return ret;
6169 }
6170
addrconf_sysctl_mtu(struct ctl_table *ctl, int write, void *buffer, size_t *lenp, loff_t *ppos)6171 static int addrconf_sysctl_mtu(struct ctl_table *ctl, int write,
6172 void *buffer, size_t *lenp, loff_t *ppos)
6173 {
6174 struct inet6_dev *idev = ctl->extra1;
6175 int min_mtu = IPV6_MIN_MTU;
6176 struct ctl_table lctl;
6177
6178 lctl = *ctl;
6179 lctl.extra1 = &min_mtu;
6180 lctl.extra2 = idev ? &idev->dev->mtu : NULL;
6181
6182 return proc_dointvec_minmax(&lctl, write, buffer, lenp, ppos);
6183 }
6184
dev_disable_change(struct inet6_dev *idev)6185 static void dev_disable_change(struct inet6_dev *idev)
6186 {
6187 struct netdev_notifier_info info;
6188
6189 if (!idev || !idev->dev)
6190 return;
6191
6192 netdev_notifier_info_init(&info, idev->dev);
6193 if (idev->cnf.disable_ipv6)
6194 addrconf_notify(NULL, NETDEV_DOWN, &info);
6195 else
6196 addrconf_notify(NULL, NETDEV_UP, &info);
6197 }
6198
addrconf_disable_change(struct net *net, __s32 newf)6199 static void addrconf_disable_change(struct net *net, __s32 newf)
6200 {
6201 struct net_device *dev;
6202 struct inet6_dev *idev;
6203
6204 for_each_netdev(net, dev) {
6205 idev = __in6_dev_get(dev);
6206 if (idev) {
6207 int changed = (!idev->cnf.disable_ipv6) ^ (!newf);
6208 idev->cnf.disable_ipv6 = newf;
6209 if (changed)
6210 dev_disable_change(idev);
6211 }
6212 }
6213 }
6214
addrconf_disable_ipv6(struct ctl_table *table, int *p, int newf)6215 static int addrconf_disable_ipv6(struct ctl_table *table, int *p, int newf)
6216 {
6217 struct net *net;
6218 int old;
6219
6220 if (!rtnl_trylock())
6221 return restart_syscall();
6222
6223 net = (struct net *)table->extra2;
6224 old = *p;
6225 *p = newf;
6226
6227 if (p == &net->ipv6.devconf_dflt->disable_ipv6) {
6228 rtnl_unlock();
6229 return 0;
6230 }
6231
6232 if (p == &net->ipv6.devconf_all->disable_ipv6) {
6233 net->ipv6.devconf_dflt->disable_ipv6 = newf;
6234 addrconf_disable_change(net, newf);
6235 } else if ((!newf) ^ (!old))
6236 dev_disable_change((struct inet6_dev *)table->extra1);
6237
6238 rtnl_unlock();
6239 return 0;
6240 }
6241
addrconf_sysctl_disable(struct ctl_table *ctl, int write, void *buffer, size_t *lenp, loff_t *ppos)6242 static int addrconf_sysctl_disable(struct ctl_table *ctl, int write,
6243 void *buffer, size_t *lenp, loff_t *ppos)
6244 {
6245 int *valp = ctl->data;
6246 int val = *valp;
6247 loff_t pos = *ppos;
6248 struct ctl_table lctl;
6249 int ret;
6250
6251 /*
6252 * ctl->data points to idev->cnf.disable_ipv6, we should
6253 * not modify it until we get the rtnl lock.
6254 */
6255 lctl = *ctl;
6256 lctl.data = &val;
6257
6258 ret = proc_dointvec(&lctl, write, buffer, lenp, ppos);
6259
6260 if (write)
6261 ret = addrconf_disable_ipv6(ctl, valp, val);
6262 if (ret)
6263 *ppos = pos;
6264 return ret;
6265 }
6266
addrconf_sysctl_proxy_ndp(struct ctl_table *ctl, int write, void *buffer, size_t *lenp, loff_t *ppos)6267 static int addrconf_sysctl_proxy_ndp(struct ctl_table *ctl, int write,
6268 void *buffer, size_t *lenp, loff_t *ppos)
6269 {
6270 int *valp = ctl->data;
6271 int ret;
6272 int old, new;
6273
6274 old = *valp;
6275 ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
6276 new = *valp;
6277
6278 if (write && old != new) {
6279 struct net *net = ctl->extra2;
6280
6281 if (!rtnl_trylock())
6282 return restart_syscall();
6283
6284 if (valp == &net->ipv6.devconf_dflt->proxy_ndp)
6285 inet6_netconf_notify_devconf(net, RTM_NEWNETCONF,
6286 NETCONFA_PROXY_NEIGH,
6287 NETCONFA_IFINDEX_DEFAULT,
6288 net->ipv6.devconf_dflt);
6289 else if (valp == &net->ipv6.devconf_all->proxy_ndp)
6290 inet6_netconf_notify_devconf(net, RTM_NEWNETCONF,
6291 NETCONFA_PROXY_NEIGH,
6292 NETCONFA_IFINDEX_ALL,
6293 net->ipv6.devconf_all);
6294 else {
6295 struct inet6_dev *idev = ctl->extra1;
6296
6297 inet6_netconf_notify_devconf(net, RTM_NEWNETCONF,
6298 NETCONFA_PROXY_NEIGH,
6299 idev->dev->ifindex,
6300 &idev->cnf);
6301 }
6302 rtnl_unlock();
6303 }
6304
6305 return ret;
6306 }
6307
addrconf_sysctl_addr_gen_mode(struct ctl_table *ctl, int write, void *buffer, size_t *lenp, loff_t *ppos)6308 static int addrconf_sysctl_addr_gen_mode(struct ctl_table *ctl, int write,
6309 void *buffer, size_t *lenp,
6310 loff_t *ppos)
6311 {
6312 int ret = 0;
6313 u32 new_val;
6314 struct inet6_dev *idev = (struct inet6_dev *)ctl->extra1;
6315 struct net *net = (struct net *)ctl->extra2;
6316 struct ctl_table tmp = {
6317 .data = &new_val,
6318 .maxlen = sizeof(new_val),
6319 .mode = ctl->mode,
6320 };
6321
6322 if (!rtnl_trylock())
6323 return restart_syscall();
6324
6325 new_val = *((u32 *)ctl->data);
6326
6327 ret = proc_douintvec(&tmp, write, buffer, lenp, ppos);
6328 if (ret != 0)
6329 goto out;
6330
6331 if (write) {
6332 if (check_addr_gen_mode(new_val) < 0) {
6333 ret = -EINVAL;
6334 goto out;
6335 }
6336
6337 if (idev) {
6338 if (check_stable_privacy(idev, net, new_val) < 0) {
6339 ret = -EINVAL;
6340 goto out;
6341 }
6342
6343 if (idev->cnf.addr_gen_mode != new_val) {
6344 idev->cnf.addr_gen_mode = new_val;
6345 addrconf_dev_config(idev->dev);
6346 }
6347 } else if (&net->ipv6.devconf_all->addr_gen_mode == ctl->data) {
6348 struct net_device *dev;
6349
6350 net->ipv6.devconf_dflt->addr_gen_mode = new_val;
6351 for_each_netdev(net, dev) {
6352 idev = __in6_dev_get(dev);
6353 if (idev &&
6354 idev->cnf.addr_gen_mode != new_val) {
6355 idev->cnf.addr_gen_mode = new_val;
6356 addrconf_dev_config(idev->dev);
6357 }
6358 }
6359 }
6360
6361 *((u32 *)ctl->data) = new_val;
6362 }
6363
6364 out:
6365 rtnl_unlock();
6366
6367 return ret;
6368 }
6369
addrconf_sysctl_stable_secret(struct ctl_table *ctl, int write, void *buffer, size_t *lenp, loff_t *ppos)6370 static int addrconf_sysctl_stable_secret(struct ctl_table *ctl, int write,
6371 void *buffer, size_t *lenp,
6372 loff_t *ppos)
6373 {
6374 int err;
6375 struct in6_addr addr;
6376 char str[IPV6_MAX_STRLEN];
6377 struct ctl_table lctl = *ctl;
6378 struct net *net = ctl->extra2;
6379 struct ipv6_stable_secret *secret = ctl->data;
6380
6381 if (&net->ipv6.devconf_all->stable_secret == ctl->data)
6382 return -EIO;
6383
6384 lctl.maxlen = IPV6_MAX_STRLEN;
6385 lctl.data = str;
6386
6387 if (!rtnl_trylock())
6388 return restart_syscall();
6389
6390 if (!write && !secret->initialized) {
6391 err = -EIO;
6392 goto out;
6393 }
6394
6395 err = snprintf(str, sizeof(str), "%pI6", &secret->secret);
6396 if (err >= sizeof(str)) {
6397 err = -EIO;
6398 goto out;
6399 }
6400
6401 err = proc_dostring(&lctl, write, buffer, lenp, ppos);
6402 if (err || !write)
6403 goto out;
6404
6405 if (in6_pton(str, -1, addr.in6_u.u6_addr8, -1, NULL) != 1) {
6406 err = -EIO;
6407 goto out;
6408 }
6409
6410 secret->initialized = true;
6411 secret->secret = addr;
6412
6413 if (&net->ipv6.devconf_dflt->stable_secret == ctl->data) {
6414 struct net_device *dev;
6415
6416 for_each_netdev(net, dev) {
6417 struct inet6_dev *idev = __in6_dev_get(dev);
6418
6419 if (idev) {
6420 idev->cnf.addr_gen_mode =
6421 IN6_ADDR_GEN_MODE_STABLE_PRIVACY;
6422 }
6423 }
6424 } else {
6425 struct inet6_dev *idev = ctl->extra1;
6426
6427 idev->cnf.addr_gen_mode = IN6_ADDR_GEN_MODE_STABLE_PRIVACY;
6428 }
6429
6430 out:
6431 rtnl_unlock();
6432
6433 return err;
6434 }
6435
6436 static
addrconf_sysctl_ignore_routes_with_linkdown(struct ctl_table *ctl, int write, void *buffer, size_t *lenp, loff_t *ppos)6437 int addrconf_sysctl_ignore_routes_with_linkdown(struct ctl_table *ctl,
6438 int write, void *buffer,
6439 size_t *lenp,
6440 loff_t *ppos)
6441 {
6442 int *valp = ctl->data;
6443 int val = *valp;
6444 loff_t pos = *ppos;
6445 struct ctl_table lctl;
6446 int ret;
6447
6448 /* ctl->data points to idev->cnf.ignore_routes_when_linkdown
6449 * we should not modify it until we get the rtnl lock.
6450 */
6451 lctl = *ctl;
6452 lctl.data = &val;
6453
6454 ret = proc_dointvec(&lctl, write, buffer, lenp, ppos);
6455
6456 if (write)
6457 ret = addrconf_fixup_linkdown(ctl, valp, val);
6458 if (ret)
6459 *ppos = pos;
6460 return ret;
6461 }
6462
6463 static
addrconf_set_nopolicy(struct rt6_info *rt, int action)6464 void addrconf_set_nopolicy(struct rt6_info *rt, int action)
6465 {
6466 if (rt) {
6467 if (action)
6468 rt->dst.flags |= DST_NOPOLICY;
6469 else
6470 rt->dst.flags &= ~DST_NOPOLICY;
6471 }
6472 }
6473
6474 static
addrconf_disable_policy_idev(struct inet6_dev *idev, int val)6475 void addrconf_disable_policy_idev(struct inet6_dev *idev, int val)
6476 {
6477 struct inet6_ifaddr *ifa;
6478
6479 read_lock_bh(&idev->lock);
6480 list_for_each_entry(ifa, &idev->addr_list, if_list) {
6481 spin_lock(&ifa->lock);
6482 if (ifa->rt) {
6483 /* host routes only use builtin fib6_nh */
6484 struct fib6_nh *nh = ifa->rt->fib6_nh;
6485 int cpu;
6486
6487 rcu_read_lock();
6488 ifa->rt->dst_nopolicy = val ? true : false;
6489 if (nh->rt6i_pcpu) {
6490 for_each_possible_cpu(cpu) {
6491 struct rt6_info **rtp;
6492
6493 rtp = per_cpu_ptr(nh->rt6i_pcpu, cpu);
6494 addrconf_set_nopolicy(*rtp, val);
6495 }
6496 }
6497 rcu_read_unlock();
6498 }
6499 spin_unlock(&ifa->lock);
6500 }
6501 read_unlock_bh(&idev->lock);
6502 }
6503
6504 static
addrconf_disable_policy(struct ctl_table *ctl, int *valp, int val)6505 int addrconf_disable_policy(struct ctl_table *ctl, int *valp, int val)
6506 {
6507 struct inet6_dev *idev;
6508 struct net *net;
6509
6510 if (!rtnl_trylock())
6511 return restart_syscall();
6512
6513 *valp = val;
6514
6515 net = (struct net *)ctl->extra2;
6516 if (valp == &net->ipv6.devconf_dflt->disable_policy) {
6517 rtnl_unlock();
6518 return 0;
6519 }
6520
6521 if (valp == &net->ipv6.devconf_all->disable_policy) {
6522 struct net_device *dev;
6523
6524 for_each_netdev(net, dev) {
6525 idev = __in6_dev_get(dev);
6526 if (idev)
6527 addrconf_disable_policy_idev(idev, val);
6528 }
6529 } else {
6530 idev = (struct inet6_dev *)ctl->extra1;
6531 addrconf_disable_policy_idev(idev, val);
6532 }
6533
6534 rtnl_unlock();
6535 return 0;
6536 }
6537
addrconf_sysctl_disable_policy(struct ctl_table *ctl, int write, void *buffer, size_t *lenp, loff_t *ppos)6538 static int addrconf_sysctl_disable_policy(struct ctl_table *ctl, int write,
6539 void *buffer, size_t *lenp, loff_t *ppos)
6540 {
6541 int *valp = ctl->data;
6542 int val = *valp;
6543 loff_t pos = *ppos;
6544 struct ctl_table lctl;
6545 int ret;
6546
6547 lctl = *ctl;
6548 lctl.data = &val;
6549 ret = proc_dointvec(&lctl, write, buffer, lenp, ppos);
6550
6551 if (write && (*valp != val))
6552 ret = addrconf_disable_policy(ctl, valp, val);
6553
6554 if (ret)
6555 *ppos = pos;
6556
6557 return ret;
6558 }
6559
6560 static int minus_one = -1;
6561 static const int two_five_five = 255;
6562
6563 static const struct ctl_table addrconf_sysctl[] = {
6564 {
6565 .procname = "forwarding",
6566 .data = &ipv6_devconf.forwarding,
6567 .maxlen = sizeof(int),
6568 .mode = 0644,
6569 .proc_handler = addrconf_sysctl_forward,
6570 },
6571 {
6572 .procname = "hop_limit",
6573 .data = &ipv6_devconf.hop_limit,
6574 .maxlen = sizeof(int),
6575 .mode = 0644,
6576 .proc_handler = proc_dointvec_minmax,
6577 .extra1 = (void *)SYSCTL_ONE,
6578 .extra2 = (void *)&two_five_five,
6579 },
6580 {
6581 .procname = "mtu",
6582 .data = &ipv6_devconf.mtu6,
6583 .maxlen = sizeof(int),
6584 .mode = 0644,
6585 .proc_handler = addrconf_sysctl_mtu,
6586 },
6587 {
6588 .procname = "accept_ra",
6589 .data = &ipv6_devconf.accept_ra,
6590 .maxlen = sizeof(int),
6591 .mode = 0644,
6592 .proc_handler = proc_dointvec,
6593 },
6594 {
6595 .procname = "accept_redirects",
6596 .data = &ipv6_devconf.accept_redirects,
6597 .maxlen = sizeof(int),
6598 .mode = 0644,
6599 .proc_handler = proc_dointvec,
6600 },
6601 {
6602 .procname = "autoconf",
6603 .data = &ipv6_devconf.autoconf,
6604 .maxlen = sizeof(int),
6605 .mode = 0644,
6606 .proc_handler = proc_dointvec,
6607 },
6608 {
6609 .procname = "dad_transmits",
6610 .data = &ipv6_devconf.dad_transmits,
6611 .maxlen = sizeof(int),
6612 .mode = 0644,
6613 .proc_handler = proc_dointvec,
6614 },
6615 {
6616 .procname = "router_solicitations",
6617 .data = &ipv6_devconf.rtr_solicits,
6618 .maxlen = sizeof(int),
6619 .mode = 0644,
6620 .proc_handler = proc_dointvec_minmax,
6621 .extra1 = &minus_one,
6622 },
6623 {
6624 .procname = "router_solicitation_interval",
6625 .data = &ipv6_devconf.rtr_solicit_interval,
6626 .maxlen = sizeof(int),
6627 .mode = 0644,
6628 .proc_handler = proc_dointvec_jiffies,
6629 },
6630 {
6631 .procname = "router_solicitation_max_interval",
6632 .data = &ipv6_devconf.rtr_solicit_max_interval,
6633 .maxlen = sizeof(int),
6634 .mode = 0644,
6635 .proc_handler = proc_dointvec_jiffies,
6636 },
6637 {
6638 .procname = "router_solicitation_delay",
6639 .data = &ipv6_devconf.rtr_solicit_delay,
6640 .maxlen = sizeof(int),
6641 .mode = 0644,
6642 .proc_handler = proc_dointvec_jiffies,
6643 },
6644 {
6645 .procname = "force_mld_version",
6646 .data = &ipv6_devconf.force_mld_version,
6647 .maxlen = sizeof(int),
6648 .mode = 0644,
6649 .proc_handler = proc_dointvec,
6650 },
6651 {
6652 .procname = "mldv1_unsolicited_report_interval",
6653 .data =
6654 &ipv6_devconf.mldv1_unsolicited_report_interval,
6655 .maxlen = sizeof(int),
6656 .mode = 0644,
6657 .proc_handler = proc_dointvec_ms_jiffies,
6658 },
6659 {
6660 .procname = "mldv2_unsolicited_report_interval",
6661 .data =
6662 &ipv6_devconf.mldv2_unsolicited_report_interval,
6663 .maxlen = sizeof(int),
6664 .mode = 0644,
6665 .proc_handler = proc_dointvec_ms_jiffies,
6666 },
6667 {
6668 .procname = "use_tempaddr",
6669 .data = &ipv6_devconf.use_tempaddr,
6670 .maxlen = sizeof(int),
6671 .mode = 0644,
6672 .proc_handler = proc_dointvec,
6673 },
6674 {
6675 .procname = "temp_valid_lft",
6676 .data = &ipv6_devconf.temp_valid_lft,
6677 .maxlen = sizeof(int),
6678 .mode = 0644,
6679 .proc_handler = proc_dointvec,
6680 },
6681 {
6682 .procname = "temp_prefered_lft",
6683 .data = &ipv6_devconf.temp_prefered_lft,
6684 .maxlen = sizeof(int),
6685 .mode = 0644,
6686 .proc_handler = proc_dointvec,
6687 },
6688 {
6689 .procname = "regen_max_retry",
6690 .data = &ipv6_devconf.regen_max_retry,
6691 .maxlen = sizeof(int),
6692 .mode = 0644,
6693 .proc_handler = proc_dointvec,
6694 },
6695 {
6696 .procname = "max_desync_factor",
6697 .data = &ipv6_devconf.max_desync_factor,
6698 .maxlen = sizeof(int),
6699 .mode = 0644,
6700 .proc_handler = proc_dointvec,
6701 },
6702 {
6703 .procname = "max_addresses",
6704 .data = &ipv6_devconf.max_addresses,
6705 .maxlen = sizeof(int),
6706 .mode = 0644,
6707 .proc_handler = proc_dointvec,
6708 },
6709 {
6710 .procname = "accept_ra_defrtr",
6711 .data = &ipv6_devconf.accept_ra_defrtr,
6712 .maxlen = sizeof(int),
6713 .mode = 0644,
6714 .proc_handler = proc_dointvec,
6715 },
6716 {
6717 .procname = "accept_ra_min_hop_limit",
6718 .data = &ipv6_devconf.accept_ra_min_hop_limit,
6719 .maxlen = sizeof(int),
6720 .mode = 0644,
6721 .proc_handler = proc_dointvec,
6722 },
6723 {
6724 .procname = "accept_ra_min_lft",
6725 .data = &ipv6_devconf.accept_ra_min_lft,
6726 .maxlen = sizeof(int),
6727 .mode = 0644,
6728 .proc_handler = proc_dointvec,
6729 },
6730 {
6731 .procname = "accept_ra_pinfo",
6732 .data = &ipv6_devconf.accept_ra_pinfo,
6733 .maxlen = sizeof(int),
6734 .mode = 0644,
6735 .proc_handler = proc_dointvec,
6736 },
6737 #ifdef CONFIG_IPV6_ROUTER_PREF
6738 {
6739 .procname = "accept_ra_rtr_pref",
6740 .data = &ipv6_devconf.accept_ra_rtr_pref,
6741 .maxlen = sizeof(int),
6742 .mode = 0644,
6743 .proc_handler = proc_dointvec,
6744 },
6745 {
6746 .procname = "router_probe_interval",
6747 .data = &ipv6_devconf.rtr_probe_interval,
6748 .maxlen = sizeof(int),
6749 .mode = 0644,
6750 .proc_handler = proc_dointvec_jiffies,
6751 },
6752 #ifdef CONFIG_IPV6_ROUTE_INFO
6753 {
6754 .procname = "accept_ra_rt_info_min_plen",
6755 .data = &ipv6_devconf.accept_ra_rt_info_min_plen,
6756 .maxlen = sizeof(int),
6757 .mode = 0644,
6758 .proc_handler = proc_dointvec,
6759 },
6760 {
6761 .procname = "accept_ra_rt_info_max_plen",
6762 .data = &ipv6_devconf.accept_ra_rt_info_max_plen,
6763 .maxlen = sizeof(int),
6764 .mode = 0644,
6765 .proc_handler = proc_dointvec,
6766 },
6767 #endif
6768 #endif
6769 {
6770 .procname = "proxy_ndp",
6771 .data = &ipv6_devconf.proxy_ndp,
6772 .maxlen = sizeof(int),
6773 .mode = 0644,
6774 .proc_handler = addrconf_sysctl_proxy_ndp,
6775 },
6776 {
6777 .procname = "accept_source_route",
6778 .data = &ipv6_devconf.accept_source_route,
6779 .maxlen = sizeof(int),
6780 .mode = 0644,
6781 .proc_handler = proc_dointvec,
6782 },
6783 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
6784 {
6785 .procname = "optimistic_dad",
6786 .data = &ipv6_devconf.optimistic_dad,
6787 .maxlen = sizeof(int),
6788 .mode = 0644,
6789 .proc_handler = proc_dointvec,
6790 },
6791 {
6792 .procname = "use_optimistic",
6793 .data = &ipv6_devconf.use_optimistic,
6794 .maxlen = sizeof(int),
6795 .mode = 0644,
6796 .proc_handler = proc_dointvec,
6797 },
6798 #endif
6799 #ifdef CONFIG_IPV6_MROUTE
6800 {
6801 .procname = "mc_forwarding",
6802 .data = &ipv6_devconf.mc_forwarding,
6803 .maxlen = sizeof(int),
6804 .mode = 0444,
6805 .proc_handler = proc_dointvec,
6806 },
6807 #endif
6808 {
6809 .procname = "disable_ipv6",
6810 .data = &ipv6_devconf.disable_ipv6,
6811 .maxlen = sizeof(int),
6812 .mode = 0644,
6813 .proc_handler = addrconf_sysctl_disable,
6814 },
6815 {
6816 .procname = "accept_dad",
6817 .data = &ipv6_devconf.accept_dad,
6818 .maxlen = sizeof(int),
6819 .mode = 0644,
6820 .proc_handler = proc_dointvec,
6821 },
6822 {
6823 .procname = "force_tllao",
6824 .data = &ipv6_devconf.force_tllao,
6825 .maxlen = sizeof(int),
6826 .mode = 0644,
6827 .proc_handler = proc_dointvec
6828 },
6829 {
6830 .procname = "ndisc_notify",
6831 .data = &ipv6_devconf.ndisc_notify,
6832 .maxlen = sizeof(int),
6833 .mode = 0644,
6834 .proc_handler = proc_dointvec
6835 },
6836 {
6837 .procname = "suppress_frag_ndisc",
6838 .data = &ipv6_devconf.suppress_frag_ndisc,
6839 .maxlen = sizeof(int),
6840 .mode = 0644,
6841 .proc_handler = proc_dointvec
6842 },
6843 {
6844 .procname = "accept_ra_from_local",
6845 .data = &ipv6_devconf.accept_ra_from_local,
6846 .maxlen = sizeof(int),
6847 .mode = 0644,
6848 .proc_handler = proc_dointvec,
6849 },
6850 {
6851 .procname = "accept_ra_mtu",
6852 .data = &ipv6_devconf.accept_ra_mtu,
6853 .maxlen = sizeof(int),
6854 .mode = 0644,
6855 .proc_handler = proc_dointvec,
6856 },
6857 {
6858 .procname = "stable_secret",
6859 .data = &ipv6_devconf.stable_secret,
6860 .maxlen = IPV6_MAX_STRLEN,
6861 .mode = 0600,
6862 .proc_handler = addrconf_sysctl_stable_secret,
6863 },
6864 {
6865 .procname = "use_oif_addrs_only",
6866 .data = &ipv6_devconf.use_oif_addrs_only,
6867 .maxlen = sizeof(int),
6868 .mode = 0644,
6869 .proc_handler = proc_dointvec,
6870 },
6871 {
6872 .procname = "ignore_routes_with_linkdown",
6873 .data = &ipv6_devconf.ignore_routes_with_linkdown,
6874 .maxlen = sizeof(int),
6875 .mode = 0644,
6876 .proc_handler = addrconf_sysctl_ignore_routes_with_linkdown,
6877 },
6878 {
6879 .procname = "drop_unicast_in_l2_multicast",
6880 .data = &ipv6_devconf.drop_unicast_in_l2_multicast,
6881 .maxlen = sizeof(int),
6882 .mode = 0644,
6883 .proc_handler = proc_dointvec,
6884 },
6885 {
6886 .procname = "drop_unsolicited_na",
6887 .data = &ipv6_devconf.drop_unsolicited_na,
6888 .maxlen = sizeof(int),
6889 .mode = 0644,
6890 .proc_handler = proc_dointvec,
6891 },
6892 {
6893 .procname = "keep_addr_on_down",
6894 .data = &ipv6_devconf.keep_addr_on_down,
6895 .maxlen = sizeof(int),
6896 .mode = 0644,
6897 .proc_handler = proc_dointvec,
6898
6899 },
6900 {
6901 .procname = "seg6_enabled",
6902 .data = &ipv6_devconf.seg6_enabled,
6903 .maxlen = sizeof(int),
6904 .mode = 0644,
6905 .proc_handler = proc_dointvec,
6906 },
6907 #ifdef CONFIG_IPV6_SEG6_HMAC
6908 {
6909 .procname = "seg6_require_hmac",
6910 .data = &ipv6_devconf.seg6_require_hmac,
6911 .maxlen = sizeof(int),
6912 .mode = 0644,
6913 .proc_handler = proc_dointvec,
6914 },
6915 #endif
6916 {
6917 .procname = "enhanced_dad",
6918 .data = &ipv6_devconf.enhanced_dad,
6919 .maxlen = sizeof(int),
6920 .mode = 0644,
6921 .proc_handler = proc_dointvec,
6922 },
6923 {
6924 .procname = "addr_gen_mode",
6925 .data = &ipv6_devconf.addr_gen_mode,
6926 .maxlen = sizeof(int),
6927 .mode = 0644,
6928 .proc_handler = addrconf_sysctl_addr_gen_mode,
6929 },
6930 {
6931 .procname = "disable_policy",
6932 .data = &ipv6_devconf.disable_policy,
6933 .maxlen = sizeof(int),
6934 .mode = 0644,
6935 .proc_handler = addrconf_sysctl_disable_policy,
6936 },
6937 {
6938 .procname = "ndisc_tclass",
6939 .data = &ipv6_devconf.ndisc_tclass,
6940 .maxlen = sizeof(int),
6941 .mode = 0644,
6942 .proc_handler = proc_dointvec_minmax,
6943 .extra1 = (void *)SYSCTL_ZERO,
6944 .extra2 = (void *)&two_five_five,
6945 },
6946 {
6947 .procname = "rpl_seg_enabled",
6948 .data = &ipv6_devconf.rpl_seg_enabled,
6949 .maxlen = sizeof(int),
6950 .mode = 0644,
6951 .proc_handler = proc_dointvec,
6952 },
6953 {
6954 /* sentinel */
6955 }
6956 };
6957
__addrconf_sysctl_register(struct net *net, char *dev_name, struct inet6_dev *idev, struct ipv6_devconf *p)6958 static int __addrconf_sysctl_register(struct net *net, char *dev_name,
6959 struct inet6_dev *idev, struct ipv6_devconf *p)
6960 {
6961 int i, ifindex;
6962 struct ctl_table *table;
6963 char path[sizeof("net/ipv6/conf/") + IFNAMSIZ];
6964
6965 table = kmemdup(addrconf_sysctl, sizeof(addrconf_sysctl), GFP_KERNEL);
6966 if (!table)
6967 goto out;
6968
6969 for (i = 0; table[i].data; i++) {
6970 table[i].data += (char *)p - (char *)&ipv6_devconf;
6971 /* If one of these is already set, then it is not safe to
6972 * overwrite either of them: this makes proc_dointvec_minmax
6973 * usable.
6974 */
6975 if (!table[i].extra1 && !table[i].extra2) {
6976 table[i].extra1 = idev; /* embedded; no ref */
6977 table[i].extra2 = net;
6978 }
6979 }
6980
6981 snprintf(path, sizeof(path), "net/ipv6/conf/%s", dev_name);
6982
6983 p->sysctl_header = register_net_sysctl(net, path, table);
6984 if (!p->sysctl_header)
6985 goto free;
6986
6987 if (!strcmp(dev_name, "all"))
6988 ifindex = NETCONFA_IFINDEX_ALL;
6989 else if (!strcmp(dev_name, "default"))
6990 ifindex = NETCONFA_IFINDEX_DEFAULT;
6991 else
6992 ifindex = idev->dev->ifindex;
6993 inet6_netconf_notify_devconf(net, RTM_NEWNETCONF, NETCONFA_ALL,
6994 ifindex, p);
6995 return 0;
6996
6997 free:
6998 kfree(table);
6999 out:
7000 return -ENOBUFS;
7001 }
7002
__addrconf_sysctl_unregister(struct net *net, struct ipv6_devconf *p, int ifindex)7003 static void __addrconf_sysctl_unregister(struct net *net,
7004 struct ipv6_devconf *p, int ifindex)
7005 {
7006 struct ctl_table *table;
7007
7008 if (!p->sysctl_header)
7009 return;
7010
7011 table = p->sysctl_header->ctl_table_arg;
7012 unregister_net_sysctl_table(p->sysctl_header);
7013 p->sysctl_header = NULL;
7014 kfree(table);
7015
7016 inet6_netconf_notify_devconf(net, RTM_DELNETCONF, 0, ifindex, NULL);
7017 }
7018
addrconf_sysctl_register(struct inet6_dev *idev)7019 static int addrconf_sysctl_register(struct inet6_dev *idev)
7020 {
7021 int err;
7022
7023 if (!sysctl_dev_name_is_allowed(idev->dev->name))
7024 return -EINVAL;
7025
7026 err = neigh_sysctl_register(idev->dev, idev->nd_parms,
7027 &ndisc_ifinfo_sysctl_change);
7028 if (err)
7029 return err;
7030 err = __addrconf_sysctl_register(dev_net(idev->dev), idev->dev->name,
7031 idev, &idev->cnf);
7032 if (err)
7033 neigh_sysctl_unregister(idev->nd_parms);
7034
7035 return err;
7036 }
7037
addrconf_sysctl_unregister(struct inet6_dev *idev)7038 static void addrconf_sysctl_unregister(struct inet6_dev *idev)
7039 {
7040 __addrconf_sysctl_unregister(dev_net(idev->dev), &idev->cnf,
7041 idev->dev->ifindex);
7042 neigh_sysctl_unregister(idev->nd_parms);
7043 }
7044
7045
7046 #endif
7047
addrconf_init_net(struct net *net)7048 static int __net_init addrconf_init_net(struct net *net)
7049 {
7050 int err = -ENOMEM;
7051 struct ipv6_devconf *all, *dflt;
7052
7053 all = kmemdup(&ipv6_devconf, sizeof(ipv6_devconf), GFP_KERNEL);
7054 if (!all)
7055 goto err_alloc_all;
7056
7057 dflt = kmemdup(&ipv6_devconf_dflt, sizeof(ipv6_devconf_dflt), GFP_KERNEL);
7058 if (!dflt)
7059 goto err_alloc_dflt;
7060
7061 if (!net_eq(net, &init_net)) {
7062 switch (net_inherit_devconf()) {
7063 case 1: /* copy from init_net */
7064 memcpy(all, init_net.ipv6.devconf_all,
7065 sizeof(ipv6_devconf));
7066 memcpy(dflt, init_net.ipv6.devconf_dflt,
7067 sizeof(ipv6_devconf_dflt));
7068 break;
7069 case 3: /* copy from the current netns */
7070 memcpy(all, current->nsproxy->net_ns->ipv6.devconf_all,
7071 sizeof(ipv6_devconf));
7072 memcpy(dflt,
7073 current->nsproxy->net_ns->ipv6.devconf_dflt,
7074 sizeof(ipv6_devconf_dflt));
7075 break;
7076 case 0:
7077 case 2:
7078 /* use compiled values */
7079 break;
7080 }
7081 }
7082
7083 /* these will be inherited by all namespaces */
7084 dflt->autoconf = ipv6_defaults.autoconf;
7085 dflt->disable_ipv6 = ipv6_defaults.disable_ipv6;
7086
7087 dflt->stable_secret.initialized = false;
7088 all->stable_secret.initialized = false;
7089
7090 net->ipv6.devconf_all = all;
7091 net->ipv6.devconf_dflt = dflt;
7092
7093 #ifdef CONFIG_SYSCTL
7094 err = __addrconf_sysctl_register(net, "all", NULL, all);
7095 if (err < 0)
7096 goto err_reg_all;
7097
7098 err = __addrconf_sysctl_register(net, "default", NULL, dflt);
7099 if (err < 0)
7100 goto err_reg_dflt;
7101 #endif
7102 return 0;
7103
7104 #ifdef CONFIG_SYSCTL
7105 err_reg_dflt:
7106 __addrconf_sysctl_unregister(net, all, NETCONFA_IFINDEX_ALL);
7107 err_reg_all:
7108 kfree(dflt);
7109 #endif
7110 err_alloc_dflt:
7111 kfree(all);
7112 err_alloc_all:
7113 return err;
7114 }
7115
addrconf_exit_net(struct net *net)7116 static void __net_exit addrconf_exit_net(struct net *net)
7117 {
7118 #ifdef CONFIG_SYSCTL
7119 __addrconf_sysctl_unregister(net, net->ipv6.devconf_dflt,
7120 NETCONFA_IFINDEX_DEFAULT);
7121 __addrconf_sysctl_unregister(net, net->ipv6.devconf_all,
7122 NETCONFA_IFINDEX_ALL);
7123 #endif
7124 kfree(net->ipv6.devconf_dflt);
7125 kfree(net->ipv6.devconf_all);
7126 }
7127
7128 static struct pernet_operations addrconf_ops = {
7129 .init = addrconf_init_net,
7130 .exit = addrconf_exit_net,
7131 };
7132
7133 static struct rtnl_af_ops inet6_ops __read_mostly = {
7134 .family = AF_INET6,
7135 .fill_link_af = inet6_fill_link_af,
7136 .get_link_af_size = inet6_get_link_af_size,
7137 .validate_link_af = inet6_validate_link_af,
7138 .set_link_af = inet6_set_link_af,
7139 };
7140
7141 /*
7142 * Init / cleanup code
7143 */
7144
addrconf_init(void)7145 int __init addrconf_init(void)
7146 {
7147 struct inet6_dev *idev;
7148 int i, err;
7149
7150 err = ipv6_addr_label_init();
7151 if (err < 0) {
7152 pr_crit("%s: cannot initialize default policy table: %d\n",
7153 __func__, err);
7154 goto out;
7155 }
7156
7157 err = register_pernet_subsys(&addrconf_ops);
7158 if (err < 0)
7159 goto out_addrlabel;
7160
7161 addrconf_wq = create_workqueue("ipv6_addrconf");
7162 if (!addrconf_wq) {
7163 err = -ENOMEM;
7164 goto out_nowq;
7165 }
7166
7167 /* The addrconf netdev notifier requires that loopback_dev
7168 * has it's ipv6 private information allocated and setup
7169 * before it can bring up and give link-local addresses
7170 * to other devices which are up.
7171 *
7172 * Unfortunately, loopback_dev is not necessarily the first
7173 * entry in the global dev_base list of net devices. In fact,
7174 * it is likely to be the very last entry on that list.
7175 * So this causes the notifier registry below to try and
7176 * give link-local addresses to all devices besides loopback_dev
7177 * first, then loopback_dev, which cases all the non-loopback_dev
7178 * devices to fail to get a link-local address.
7179 *
7180 * So, as a temporary fix, allocate the ipv6 structure for
7181 * loopback_dev first by hand.
7182 * Longer term, all of the dependencies ipv6 has upon the loopback
7183 * device and it being up should be removed.
7184 */
7185 rtnl_lock();
7186 idev = ipv6_add_dev(init_net.loopback_dev);
7187 rtnl_unlock();
7188 if (IS_ERR(idev)) {
7189 err = PTR_ERR(idev);
7190 goto errlo;
7191 }
7192
7193 ip6_route_init_special_entries();
7194
7195 for (i = 0; i < IN6_ADDR_HSIZE; i++)
7196 INIT_HLIST_HEAD(&inet6_addr_lst[i]);
7197
7198 register_netdevice_notifier(&ipv6_dev_notf);
7199
7200 addrconf_verify();
7201
7202 rtnl_af_register(&inet6_ops);
7203
7204 err = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_GETLINK,
7205 NULL, inet6_dump_ifinfo, 0);
7206 if (err < 0)
7207 goto errout;
7208
7209 err = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_NEWADDR,
7210 inet6_rtm_newaddr, NULL, 0);
7211 if (err < 0)
7212 goto errout;
7213 err = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_DELADDR,
7214 inet6_rtm_deladdr, NULL, 0);
7215 if (err < 0)
7216 goto errout;
7217 err = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_GETADDR,
7218 inet6_rtm_getaddr, inet6_dump_ifaddr,
7219 RTNL_FLAG_DOIT_UNLOCKED);
7220 if (err < 0)
7221 goto errout;
7222 err = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_GETMULTICAST,
7223 NULL, inet6_dump_ifmcaddr, 0);
7224 if (err < 0)
7225 goto errout;
7226 err = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_GETANYCAST,
7227 NULL, inet6_dump_ifacaddr, 0);
7228 if (err < 0)
7229 goto errout;
7230 err = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_GETNETCONF,
7231 inet6_netconf_get_devconf,
7232 inet6_netconf_dump_devconf,
7233 RTNL_FLAG_DOIT_UNLOCKED);
7234 if (err < 0)
7235 goto errout;
7236 err = ipv6_addr_label_rtnl_register();
7237 if (err < 0)
7238 goto errout;
7239
7240 return 0;
7241 errout:
7242 rtnl_unregister_all(PF_INET6);
7243 rtnl_af_unregister(&inet6_ops);
7244 unregister_netdevice_notifier(&ipv6_dev_notf);
7245 errlo:
7246 destroy_workqueue(addrconf_wq);
7247 out_nowq:
7248 unregister_pernet_subsys(&addrconf_ops);
7249 out_addrlabel:
7250 ipv6_addr_label_cleanup();
7251 out:
7252 return err;
7253 }
7254
addrconf_cleanup(void)7255 void addrconf_cleanup(void)
7256 {
7257 struct net_device *dev;
7258 int i;
7259
7260 unregister_netdevice_notifier(&ipv6_dev_notf);
7261 unregister_pernet_subsys(&addrconf_ops);
7262 ipv6_addr_label_cleanup();
7263
7264 rtnl_af_unregister(&inet6_ops);
7265
7266 rtnl_lock();
7267
7268 /* clean dev list */
7269 for_each_netdev(&init_net, dev) {
7270 if (__in6_dev_get(dev) == NULL)
7271 continue;
7272 addrconf_ifdown(dev, true);
7273 }
7274 addrconf_ifdown(init_net.loopback_dev, true);
7275
7276 /*
7277 * Check hash table.
7278 */
7279 spin_lock_bh(&addrconf_hash_lock);
7280 for (i = 0; i < IN6_ADDR_HSIZE; i++)
7281 WARN_ON(!hlist_empty(&inet6_addr_lst[i]));
7282 spin_unlock_bh(&addrconf_hash_lock);
7283 cancel_delayed_work(&addr_chk_work);
7284 rtnl_unlock();
7285
7286 destroy_workqueue(addrconf_wq);
7287 }
7288