1// SPDX-License-Identifier: GPL-2.0-or-later 2/* 3 * IP multicast routing support for mrouted 3.6/3.8 4 * 5 * (c) 1995 Alan Cox, <alan@lxorguk.ukuu.org.uk> 6 * Linux Consultancy and Custom Driver Development 7 * 8 * Fixes: 9 * Michael Chastain : Incorrect size of copying. 10 * Alan Cox : Added the cache manager code 11 * Alan Cox : Fixed the clone/copy bug and device race. 12 * Mike McLagan : Routing by source 13 * Malcolm Beattie : Buffer handling fixes. 14 * Alexey Kuznetsov : Double buffer free and other fixes. 15 * SVR Anand : Fixed several multicast bugs and problems. 16 * Alexey Kuznetsov : Status, optimisations and more. 17 * Brad Parker : Better behaviour on mrouted upcall 18 * overflow. 19 * Carlos Picoto : PIMv1 Support 20 * Pavlin Ivanov Radoslavov: PIMv2 Registers must checksum only PIM header 21 * Relax this requirement to work with older peers. 22 */ 23 24#include <linux/uaccess.h> 25#include <linux/types.h> 26#include <linux/cache.h> 27#include <linux/capability.h> 28#include <linux/errno.h> 29#include <linux/mm.h> 30#include <linux/kernel.h> 31#include <linux/fcntl.h> 32#include <linux/stat.h> 33#include <linux/socket.h> 34#include <linux/in.h> 35#include <linux/inet.h> 36#include <linux/netdevice.h> 37#include <linux/inetdevice.h> 38#include <linux/igmp.h> 39#include <linux/proc_fs.h> 40#include <linux/seq_file.h> 41#include <linux/mroute.h> 42#include <linux/init.h> 43#include <linux/if_ether.h> 44#include <linux/slab.h> 45#include <net/net_namespace.h> 46#include <net/ip.h> 47#include <net/protocol.h> 48#include <linux/skbuff.h> 49#include <net/route.h> 50#include <net/icmp.h> 51#include <net/udp.h> 52#include <net/raw.h> 53#include <linux/notifier.h> 54#include <linux/if_arp.h> 55#include <linux/netfilter_ipv4.h> 56#include <linux/compat.h> 57#include <linux/export.h> 58#include <linux/rhashtable.h> 59#include <net/ip_tunnels.h> 60#include <net/checksum.h> 61#include <net/netlink.h> 62#include <net/fib_rules.h> 63#include <linux/netconf.h> 64#include <net/rtnh.h> 65 66#include <linux/nospec.h> 67 68struct ipmr_rule { 69 struct fib_rule common; 70}; 71 72struct ipmr_result { 73 struct mr_table *mrt; 74}; 75 76/* Big lock, protecting vif table, mrt cache and mroute socket state. 77 * Note that the changes are semaphored via rtnl_lock. 78 */ 79 80static DEFINE_RWLOCK(mrt_lock); 81 82/* Multicast router control variables */ 83 84/* Special spinlock for queue of unresolved entries */ 85static DEFINE_SPINLOCK(mfc_unres_lock); 86 87/* We return to original Alan's scheme. Hash table of resolved 88 * entries is changed only in process context and protected 89 * with weak lock mrt_lock. Queue of unresolved entries is protected 90 * with strong spinlock mfc_unres_lock. 91 * 92 * In this case data path is free of exclusive locks at all. 93 */ 94 95static struct kmem_cache *mrt_cachep __ro_after_init; 96 97static struct mr_table *ipmr_new_table(struct net *net, u32 id); 98static void ipmr_free_table(struct mr_table *mrt); 99 100static void ip_mr_forward(struct net *net, struct mr_table *mrt, 101 struct net_device *dev, struct sk_buff *skb, 102 struct mfc_cache *cache, int local); 103static int ipmr_cache_report(struct mr_table *mrt, 104 struct sk_buff *pkt, vifi_t vifi, int assert); 105static void mroute_netlink_event(struct mr_table *mrt, struct mfc_cache *mfc, 106 int cmd); 107static void igmpmsg_netlink_event(struct mr_table *mrt, struct sk_buff *pkt); 108static void mroute_clean_tables(struct mr_table *mrt, int flags); 109static void ipmr_expire_process(struct timer_list *t); 110 111#ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES 112#define ipmr_for_each_table(mrt, net) \ 113 list_for_each_entry_rcu(mrt, &net->ipv4.mr_tables, list, \ 114 lockdep_rtnl_is_held() || \ 115 list_empty(&net->ipv4.mr_tables)) 116 117static struct mr_table *ipmr_mr_table_iter(struct net *net, 118 struct mr_table *mrt) 119{ 120 struct mr_table *ret; 121 122 if (!mrt) 123 ret = list_entry_rcu(net->ipv4.mr_tables.next, 124 struct mr_table, list); 125 else 126 ret = list_entry_rcu(mrt->list.next, 127 struct mr_table, list); 128 129 if (&ret->list == &net->ipv4.mr_tables) 130 return NULL; 131 return ret; 132} 133 134static struct mr_table *ipmr_get_table(struct net *net, u32 id) 135{ 136 struct mr_table *mrt; 137 138 ipmr_for_each_table(mrt, net) { 139 if (mrt->id == id) 140 return mrt; 141 } 142 return NULL; 143} 144 145static int ipmr_fib_lookup(struct net *net, struct flowi4 *flp4, 146 struct mr_table **mrt) 147{ 148 int err; 149 struct ipmr_result res; 150 struct fib_lookup_arg arg = { 151 .result = &res, 152 .flags = FIB_LOOKUP_NOREF, 153 }; 154 155 /* update flow if oif or iif point to device enslaved to l3mdev */ 156 l3mdev_update_flow(net, flowi4_to_flowi(flp4)); 157 158 err = fib_rules_lookup(net->ipv4.mr_rules_ops, 159 flowi4_to_flowi(flp4), 0, &arg); 160 if (err < 0) 161 return err; 162 *mrt = res.mrt; 163 return 0; 164} 165 166static int ipmr_rule_action(struct fib_rule *rule, struct flowi *flp, 167 int flags, struct fib_lookup_arg *arg) 168{ 169 struct ipmr_result *res = arg->result; 170 struct mr_table *mrt; 171 172 switch (rule->action) { 173 case FR_ACT_TO_TBL: 174 break; 175 case FR_ACT_UNREACHABLE: 176 return -ENETUNREACH; 177 case FR_ACT_PROHIBIT: 178 return -EACCES; 179 case FR_ACT_BLACKHOLE: 180 default: 181 return -EINVAL; 182 } 183 184 arg->table = fib_rule_get_table(rule, arg); 185 186 mrt = ipmr_get_table(rule->fr_net, arg->table); 187 if (!mrt) 188 return -EAGAIN; 189 res->mrt = mrt; 190 return 0; 191} 192 193static int ipmr_rule_match(struct fib_rule *rule, struct flowi *fl, int flags) 194{ 195 return 1; 196} 197 198static const struct nla_policy ipmr_rule_policy[FRA_MAX + 1] = { 199 FRA_GENERIC_POLICY, 200}; 201 202static int ipmr_rule_configure(struct fib_rule *rule, struct sk_buff *skb, 203 struct fib_rule_hdr *frh, struct nlattr **tb, 204 struct netlink_ext_ack *extack) 205{ 206 return 0; 207} 208 209static int ipmr_rule_compare(struct fib_rule *rule, struct fib_rule_hdr *frh, 210 struct nlattr **tb) 211{ 212 return 1; 213} 214 215static int ipmr_rule_fill(struct fib_rule *rule, struct sk_buff *skb, 216 struct fib_rule_hdr *frh) 217{ 218 frh->dst_len = 0; 219 frh->src_len = 0; 220 frh->tos = 0; 221 return 0; 222} 223 224static const struct fib_rules_ops __net_initconst ipmr_rules_ops_template = { 225 .family = RTNL_FAMILY_IPMR, 226 .rule_size = sizeof(struct ipmr_rule), 227 .addr_size = sizeof(u32), 228 .action = ipmr_rule_action, 229 .match = ipmr_rule_match, 230 .configure = ipmr_rule_configure, 231 .compare = ipmr_rule_compare, 232 .fill = ipmr_rule_fill, 233 .nlgroup = RTNLGRP_IPV4_RULE, 234 .policy = ipmr_rule_policy, 235 .owner = THIS_MODULE, 236}; 237 238static int __net_init ipmr_rules_init(struct net *net) 239{ 240 struct fib_rules_ops *ops; 241 struct mr_table *mrt; 242 int err; 243 244 ops = fib_rules_register(&ipmr_rules_ops_template, net); 245 if (IS_ERR(ops)) 246 return PTR_ERR(ops); 247 248 INIT_LIST_HEAD(&net->ipv4.mr_tables); 249 250 mrt = ipmr_new_table(net, RT_TABLE_DEFAULT); 251 if (IS_ERR(mrt)) { 252 err = PTR_ERR(mrt); 253 goto err1; 254 } 255 256 err = fib_default_rule_add(ops, 0x7fff, RT_TABLE_DEFAULT, 0); 257 if (err < 0) 258 goto err2; 259 260 net->ipv4.mr_rules_ops = ops; 261 return 0; 262 263err2: 264 rtnl_lock(); 265 ipmr_free_table(mrt); 266 rtnl_unlock(); 267err1: 268 fib_rules_unregister(ops); 269 return err; 270} 271 272static void __net_exit ipmr_rules_exit(struct net *net) 273{ 274 struct mr_table *mrt, *next; 275 276 rtnl_lock(); 277 list_for_each_entry_safe(mrt, next, &net->ipv4.mr_tables, list) { 278 list_del(&mrt->list); 279 ipmr_free_table(mrt); 280 } 281 fib_rules_unregister(net->ipv4.mr_rules_ops); 282 rtnl_unlock(); 283} 284 285static int ipmr_rules_dump(struct net *net, struct notifier_block *nb, 286 struct netlink_ext_ack *extack) 287{ 288 return fib_rules_dump(net, nb, RTNL_FAMILY_IPMR, extack); 289} 290 291static unsigned int ipmr_rules_seq_read(struct net *net) 292{ 293 return fib_rules_seq_read(net, RTNL_FAMILY_IPMR); 294} 295 296bool ipmr_rule_default(const struct fib_rule *rule) 297{ 298 return fib_rule_matchall(rule) && rule->table == RT_TABLE_DEFAULT; 299} 300EXPORT_SYMBOL(ipmr_rule_default); 301#else 302#define ipmr_for_each_table(mrt, net) \ 303 for (mrt = net->ipv4.mrt; mrt; mrt = NULL) 304 305static struct mr_table *ipmr_mr_table_iter(struct net *net, 306 struct mr_table *mrt) 307{ 308 if (!mrt) 309 return net->ipv4.mrt; 310 return NULL; 311} 312 313static struct mr_table *ipmr_get_table(struct net *net, u32 id) 314{ 315 return net->ipv4.mrt; 316} 317 318static int ipmr_fib_lookup(struct net *net, struct flowi4 *flp4, 319 struct mr_table **mrt) 320{ 321 *mrt = net->ipv4.mrt; 322 return 0; 323} 324 325static int __net_init ipmr_rules_init(struct net *net) 326{ 327 struct mr_table *mrt; 328 329 mrt = ipmr_new_table(net, RT_TABLE_DEFAULT); 330 if (IS_ERR(mrt)) 331 return PTR_ERR(mrt); 332 net->ipv4.mrt = mrt; 333 return 0; 334} 335 336static void __net_exit ipmr_rules_exit(struct net *net) 337{ 338 rtnl_lock(); 339 ipmr_free_table(net->ipv4.mrt); 340 net->ipv4.mrt = NULL; 341 rtnl_unlock(); 342} 343 344static int ipmr_rules_dump(struct net *net, struct notifier_block *nb, 345 struct netlink_ext_ack *extack) 346{ 347 return 0; 348} 349 350static unsigned int ipmr_rules_seq_read(struct net *net) 351{ 352 return 0; 353} 354 355bool ipmr_rule_default(const struct fib_rule *rule) 356{ 357 return true; 358} 359EXPORT_SYMBOL(ipmr_rule_default); 360#endif 361 362static inline int ipmr_hash_cmp(struct rhashtable_compare_arg *arg, 363 const void *ptr) 364{ 365 const struct mfc_cache_cmp_arg *cmparg = arg->key; 366 struct mfc_cache *c = (struct mfc_cache *)ptr; 367 368 return cmparg->mfc_mcastgrp != c->mfc_mcastgrp || 369 cmparg->mfc_origin != c->mfc_origin; 370} 371 372static const struct rhashtable_params ipmr_rht_params = { 373 .head_offset = offsetof(struct mr_mfc, mnode), 374 .key_offset = offsetof(struct mfc_cache, cmparg), 375 .key_len = sizeof(struct mfc_cache_cmp_arg), 376 .nelem_hint = 3, 377 .obj_cmpfn = ipmr_hash_cmp, 378 .automatic_shrinking = true, 379}; 380 381static void ipmr_new_table_set(struct mr_table *mrt, 382 struct net *net) 383{ 384#ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES 385 list_add_tail_rcu(&mrt->list, &net->ipv4.mr_tables); 386#endif 387} 388 389static struct mfc_cache_cmp_arg ipmr_mr_table_ops_cmparg_any = { 390 .mfc_mcastgrp = htonl(INADDR_ANY), 391 .mfc_origin = htonl(INADDR_ANY), 392}; 393 394static struct mr_table_ops ipmr_mr_table_ops = { 395 .rht_params = &ipmr_rht_params, 396 .cmparg_any = &ipmr_mr_table_ops_cmparg_any, 397}; 398 399static struct mr_table *ipmr_new_table(struct net *net, u32 id) 400{ 401 struct mr_table *mrt; 402 403 /* "pimreg%u" should not exceed 16 bytes (IFNAMSIZ) */ 404 if (id != RT_TABLE_DEFAULT && id >= 1000000000) 405 return ERR_PTR(-EINVAL); 406 407 mrt = ipmr_get_table(net, id); 408 if (mrt) 409 return mrt; 410 411 return mr_table_alloc(net, id, &ipmr_mr_table_ops, 412 ipmr_expire_process, ipmr_new_table_set); 413} 414 415static void ipmr_free_table(struct mr_table *mrt) 416{ 417 del_timer_sync(&mrt->ipmr_expire_timer); 418 mroute_clean_tables(mrt, MRT_FLUSH_VIFS | MRT_FLUSH_VIFS_STATIC | 419 MRT_FLUSH_MFC | MRT_FLUSH_MFC_STATIC); 420 rhltable_destroy(&mrt->mfc_hash); 421 kfree(mrt); 422} 423 424/* Service routines creating virtual interfaces: DVMRP tunnels and PIMREG */ 425 426/* Initialize ipmr pimreg/tunnel in_device */ 427static bool ipmr_init_vif_indev(const struct net_device *dev) 428{ 429 struct in_device *in_dev; 430 431 ASSERT_RTNL(); 432 433 in_dev = __in_dev_get_rtnl(dev); 434 if (!in_dev) 435 return false; 436 ipv4_devconf_setall(in_dev); 437 neigh_parms_data_state_setall(in_dev->arp_parms); 438 IPV4_DEVCONF(in_dev->cnf, RP_FILTER) = 0; 439 440 return true; 441} 442 443static struct net_device *ipmr_new_tunnel(struct net *net, struct vifctl *v) 444{ 445 struct net_device *tunnel_dev, *new_dev; 446 struct ip_tunnel_parm p = { }; 447 int err; 448 449 tunnel_dev = __dev_get_by_name(net, "tunl0"); 450 if (!tunnel_dev) 451 goto out; 452 453 p.iph.daddr = v->vifc_rmt_addr.s_addr; 454 p.iph.saddr = v->vifc_lcl_addr.s_addr; 455 p.iph.version = 4; 456 p.iph.ihl = 5; 457 p.iph.protocol = IPPROTO_IPIP; 458 sprintf(p.name, "dvmrp%d", v->vifc_vifi); 459 460 if (!tunnel_dev->netdev_ops->ndo_tunnel_ctl) 461 goto out; 462 err = tunnel_dev->netdev_ops->ndo_tunnel_ctl(tunnel_dev, &p, 463 SIOCADDTUNNEL); 464 if (err) 465 goto out; 466 467 new_dev = __dev_get_by_name(net, p.name); 468 if (!new_dev) 469 goto out; 470 471 new_dev->flags |= IFF_MULTICAST; 472 if (!ipmr_init_vif_indev(new_dev)) 473 goto out_unregister; 474 if (dev_open(new_dev, NULL)) 475 goto out_unregister; 476 dev_hold(new_dev); 477 err = dev_set_allmulti(new_dev, 1); 478 if (err) { 479 dev_close(new_dev); 480 tunnel_dev->netdev_ops->ndo_tunnel_ctl(tunnel_dev, &p, 481 SIOCDELTUNNEL); 482 dev_put(new_dev); 483 new_dev = ERR_PTR(err); 484 } 485 return new_dev; 486 487out_unregister: 488 unregister_netdevice(new_dev); 489out: 490 return ERR_PTR(-ENOBUFS); 491} 492 493#if defined(CONFIG_IP_PIMSM_V1) || defined(CONFIG_IP_PIMSM_V2) 494static netdev_tx_t reg_vif_xmit(struct sk_buff *skb, struct net_device *dev) 495{ 496 struct net *net = dev_net(dev); 497 struct mr_table *mrt; 498 struct flowi4 fl4 = { 499 .flowi4_oif = dev->ifindex, 500 .flowi4_iif = skb->skb_iif ? : LOOPBACK_IFINDEX, 501 .flowi4_mark = skb->mark, 502 }; 503 int err; 504 505 err = ipmr_fib_lookup(net, &fl4, &mrt); 506 if (err < 0) { 507 kfree_skb(skb); 508 return err; 509 } 510 511 read_lock(&mrt_lock); 512 dev->stats.tx_bytes += skb->len; 513 dev->stats.tx_packets++; 514 ipmr_cache_report(mrt, skb, mrt->mroute_reg_vif_num, IGMPMSG_WHOLEPKT); 515 read_unlock(&mrt_lock); 516 kfree_skb(skb); 517 return NETDEV_TX_OK; 518} 519 520static int reg_vif_get_iflink(const struct net_device *dev) 521{ 522 return 0; 523} 524 525static const struct net_device_ops reg_vif_netdev_ops = { 526 .ndo_start_xmit = reg_vif_xmit, 527 .ndo_get_iflink = reg_vif_get_iflink, 528}; 529 530static void reg_vif_setup(struct net_device *dev) 531{ 532 dev->type = ARPHRD_PIMREG; 533 dev->mtu = ETH_DATA_LEN - sizeof(struct iphdr) - 8; 534 dev->flags = IFF_NOARP; 535 dev->netdev_ops = ®_vif_netdev_ops; 536 dev->needs_free_netdev = true; 537 dev->features |= NETIF_F_NETNS_LOCAL; 538} 539 540static struct net_device *ipmr_reg_vif(struct net *net, struct mr_table *mrt) 541{ 542 struct net_device *dev; 543 char name[IFNAMSIZ]; 544 545 if (mrt->id == RT_TABLE_DEFAULT) 546 sprintf(name, "pimreg"); 547 else 548 sprintf(name, "pimreg%u", mrt->id); 549 550 dev = alloc_netdev(0, name, NET_NAME_UNKNOWN, reg_vif_setup); 551 552 if (!dev) 553 return NULL; 554 555 dev_net_set(dev, net); 556 557 if (register_netdevice(dev)) { 558 free_netdev(dev); 559 return NULL; 560 } 561 562 if (!ipmr_init_vif_indev(dev)) 563 goto failure; 564 if (dev_open(dev, NULL)) 565 goto failure; 566 567 dev_hold(dev); 568 569 return dev; 570 571failure: 572 unregister_netdevice(dev); 573 return NULL; 574} 575 576/* called with rcu_read_lock() */ 577static int __pim_rcv(struct mr_table *mrt, struct sk_buff *skb, 578 unsigned int pimlen) 579{ 580 struct net_device *reg_dev = NULL; 581 struct iphdr *encap; 582 583 encap = (struct iphdr *)(skb_transport_header(skb) + pimlen); 584 /* Check that: 585 * a. packet is really sent to a multicast group 586 * b. packet is not a NULL-REGISTER 587 * c. packet is not truncated 588 */ 589 if (!ipv4_is_multicast(encap->daddr) || 590 encap->tot_len == 0 || 591 ntohs(encap->tot_len) + pimlen > skb->len) 592 return 1; 593 594 read_lock(&mrt_lock); 595 if (mrt->mroute_reg_vif_num >= 0) 596 reg_dev = mrt->vif_table[mrt->mroute_reg_vif_num].dev; 597 read_unlock(&mrt_lock); 598 599 if (!reg_dev) 600 return 1; 601 602 skb->mac_header = skb->network_header; 603 skb_pull(skb, (u8 *)encap - skb->data); 604 skb_reset_network_header(skb); 605 skb->protocol = htons(ETH_P_IP); 606 skb->ip_summed = CHECKSUM_NONE; 607 608 skb_tunnel_rx(skb, reg_dev, dev_net(reg_dev)); 609 610 netif_rx(skb); 611 612 return NET_RX_SUCCESS; 613} 614#else 615static struct net_device *ipmr_reg_vif(struct net *net, struct mr_table *mrt) 616{ 617 return NULL; 618} 619#endif 620 621static int call_ipmr_vif_entry_notifiers(struct net *net, 622 enum fib_event_type event_type, 623 struct vif_device *vif, 624 vifi_t vif_index, u32 tb_id) 625{ 626 return mr_call_vif_notifiers(net, RTNL_FAMILY_IPMR, event_type, 627 vif, vif_index, tb_id, 628 &net->ipv4.ipmr_seq); 629} 630 631static int call_ipmr_mfc_entry_notifiers(struct net *net, 632 enum fib_event_type event_type, 633 struct mfc_cache *mfc, u32 tb_id) 634{ 635 return mr_call_mfc_notifiers(net, RTNL_FAMILY_IPMR, event_type, 636 &mfc->_c, tb_id, &net->ipv4.ipmr_seq); 637} 638 639/** 640 * vif_delete - Delete a VIF entry 641 * @mrt: Table to delete from 642 * @vifi: VIF identifier to delete 643 * @notify: Set to 1, if the caller is a notifier_call 644 * @head: if unregistering the VIF, place it on this queue 645 */ 646static int vif_delete(struct mr_table *mrt, int vifi, int notify, 647 struct list_head *head) 648{ 649 struct net *net = read_pnet(&mrt->net); 650 struct vif_device *v; 651 struct net_device *dev; 652 struct in_device *in_dev; 653 654 if (vifi < 0 || vifi >= mrt->maxvif) 655 return -EADDRNOTAVAIL; 656 657 v = &mrt->vif_table[vifi]; 658 659 if (VIF_EXISTS(mrt, vifi)) 660 call_ipmr_vif_entry_notifiers(net, FIB_EVENT_VIF_DEL, v, vifi, 661 mrt->id); 662 663 write_lock_bh(&mrt_lock); 664 dev = v->dev; 665 v->dev = NULL; 666 667 if (!dev) { 668 write_unlock_bh(&mrt_lock); 669 return -EADDRNOTAVAIL; 670 } 671 672 if (vifi == mrt->mroute_reg_vif_num) 673 mrt->mroute_reg_vif_num = -1; 674 675 if (vifi + 1 == mrt->maxvif) { 676 int tmp; 677 678 for (tmp = vifi - 1; tmp >= 0; tmp--) { 679 if (VIF_EXISTS(mrt, tmp)) 680 break; 681 } 682 mrt->maxvif = tmp+1; 683 } 684 685 write_unlock_bh(&mrt_lock); 686 687 dev_set_allmulti(dev, -1); 688 689 in_dev = __in_dev_get_rtnl(dev); 690 if (in_dev) { 691 IPV4_DEVCONF(in_dev->cnf, MC_FORWARDING)--; 692 inet_netconf_notify_devconf(dev_net(dev), RTM_NEWNETCONF, 693 NETCONFA_MC_FORWARDING, 694 dev->ifindex, &in_dev->cnf); 695 ip_rt_multicast_event(in_dev); 696 } 697 698 if (v->flags & (VIFF_TUNNEL | VIFF_REGISTER) && !notify) 699 unregister_netdevice_queue(dev, head); 700 701 dev_put(dev); 702 return 0; 703} 704 705static void ipmr_cache_free_rcu(struct rcu_head *head) 706{ 707 struct mr_mfc *c = container_of(head, struct mr_mfc, rcu); 708 709 kmem_cache_free(mrt_cachep, (struct mfc_cache *)c); 710} 711 712static void ipmr_cache_free(struct mfc_cache *c) 713{ 714 call_rcu(&c->_c.rcu, ipmr_cache_free_rcu); 715} 716 717/* Destroy an unresolved cache entry, killing queued skbs 718 * and reporting error to netlink readers. 719 */ 720static void ipmr_destroy_unres(struct mr_table *mrt, struct mfc_cache *c) 721{ 722 struct net *net = read_pnet(&mrt->net); 723 struct sk_buff *skb; 724 struct nlmsgerr *e; 725 726 atomic_dec(&mrt->cache_resolve_queue_len); 727 728 while ((skb = skb_dequeue(&c->_c.mfc_un.unres.unresolved))) { 729 if (ip_hdr(skb)->version == 0) { 730 struct nlmsghdr *nlh = skb_pull(skb, 731 sizeof(struct iphdr)); 732 nlh->nlmsg_type = NLMSG_ERROR; 733 nlh->nlmsg_len = nlmsg_msg_size(sizeof(struct nlmsgerr)); 734 skb_trim(skb, nlh->nlmsg_len); 735 e = nlmsg_data(nlh); 736 e->error = -ETIMEDOUT; 737 memset(&e->msg, 0, sizeof(e->msg)); 738 739 rtnl_unicast(skb, net, NETLINK_CB(skb).portid); 740 } else { 741 kfree_skb(skb); 742 } 743 } 744 745 ipmr_cache_free(c); 746} 747 748/* Timer process for the unresolved queue. */ 749static void ipmr_expire_process(struct timer_list *t) 750{ 751 struct mr_table *mrt = from_timer(mrt, t, ipmr_expire_timer); 752 struct mr_mfc *c, *next; 753 unsigned long expires; 754 unsigned long now; 755 756 if (!spin_trylock(&mfc_unres_lock)) { 757 mod_timer(&mrt->ipmr_expire_timer, jiffies+HZ/10); 758 return; 759 } 760 761 if (list_empty(&mrt->mfc_unres_queue)) 762 goto out; 763 764 now = jiffies; 765 expires = 10*HZ; 766 767 list_for_each_entry_safe(c, next, &mrt->mfc_unres_queue, list) { 768 if (time_after(c->mfc_un.unres.expires, now)) { 769 unsigned long interval = c->mfc_un.unres.expires - now; 770 if (interval < expires) 771 expires = interval; 772 continue; 773 } 774 775 list_del(&c->list); 776 mroute_netlink_event(mrt, (struct mfc_cache *)c, RTM_DELROUTE); 777 ipmr_destroy_unres(mrt, (struct mfc_cache *)c); 778 } 779 780 if (!list_empty(&mrt->mfc_unres_queue)) 781 mod_timer(&mrt->ipmr_expire_timer, jiffies + expires); 782 783out: 784 spin_unlock(&mfc_unres_lock); 785} 786 787/* Fill oifs list. It is called under write locked mrt_lock. */ 788static void ipmr_update_thresholds(struct mr_table *mrt, struct mr_mfc *cache, 789 unsigned char *ttls) 790{ 791 int vifi; 792 793 cache->mfc_un.res.minvif = MAXVIFS; 794 cache->mfc_un.res.maxvif = 0; 795 memset(cache->mfc_un.res.ttls, 255, MAXVIFS); 796 797 for (vifi = 0; vifi < mrt->maxvif; vifi++) { 798 if (VIF_EXISTS(mrt, vifi) && 799 ttls[vifi] && ttls[vifi] < 255) { 800 cache->mfc_un.res.ttls[vifi] = ttls[vifi]; 801 if (cache->mfc_un.res.minvif > vifi) 802 cache->mfc_un.res.minvif = vifi; 803 if (cache->mfc_un.res.maxvif <= vifi) 804 cache->mfc_un.res.maxvif = vifi + 1; 805 } 806 } 807 cache->mfc_un.res.lastuse = jiffies; 808} 809 810static int vif_add(struct net *net, struct mr_table *mrt, 811 struct vifctl *vifc, int mrtsock) 812{ 813 struct netdev_phys_item_id ppid = { }; 814 int vifi = vifc->vifc_vifi; 815 struct vif_device *v = &mrt->vif_table[vifi]; 816 struct net_device *dev; 817 struct in_device *in_dev; 818 int err; 819 820 /* Is vif busy ? */ 821 if (VIF_EXISTS(mrt, vifi)) 822 return -EADDRINUSE; 823 824 switch (vifc->vifc_flags) { 825 case VIFF_REGISTER: 826 if (!ipmr_pimsm_enabled()) 827 return -EINVAL; 828 /* Special Purpose VIF in PIM 829 * All the packets will be sent to the daemon 830 */ 831 if (mrt->mroute_reg_vif_num >= 0) 832 return -EADDRINUSE; 833 dev = ipmr_reg_vif(net, mrt); 834 if (!dev) 835 return -ENOBUFS; 836 err = dev_set_allmulti(dev, 1); 837 if (err) { 838 unregister_netdevice(dev); 839 dev_put(dev); 840 return err; 841 } 842 break; 843 case VIFF_TUNNEL: 844 dev = ipmr_new_tunnel(net, vifc); 845 if (IS_ERR(dev)) 846 return PTR_ERR(dev); 847 break; 848 case VIFF_USE_IFINDEX: 849 case 0: 850 if (vifc->vifc_flags == VIFF_USE_IFINDEX) { 851 dev = dev_get_by_index(net, vifc->vifc_lcl_ifindex); 852 if (dev && !__in_dev_get_rtnl(dev)) { 853 dev_put(dev); 854 return -EADDRNOTAVAIL; 855 } 856 } else { 857 dev = ip_dev_find(net, vifc->vifc_lcl_addr.s_addr); 858 } 859 if (!dev) 860 return -EADDRNOTAVAIL; 861 err = dev_set_allmulti(dev, 1); 862 if (err) { 863 dev_put(dev); 864 return err; 865 } 866 break; 867 default: 868 return -EINVAL; 869 } 870 871 in_dev = __in_dev_get_rtnl(dev); 872 if (!in_dev) { 873 dev_put(dev); 874 return -EADDRNOTAVAIL; 875 } 876 IPV4_DEVCONF(in_dev->cnf, MC_FORWARDING)++; 877 inet_netconf_notify_devconf(net, RTM_NEWNETCONF, NETCONFA_MC_FORWARDING, 878 dev->ifindex, &in_dev->cnf); 879 ip_rt_multicast_event(in_dev); 880 881 /* Fill in the VIF structures */ 882 vif_device_init(v, dev, vifc->vifc_rate_limit, 883 vifc->vifc_threshold, 884 vifc->vifc_flags | (!mrtsock ? VIFF_STATIC : 0), 885 (VIFF_TUNNEL | VIFF_REGISTER)); 886 887 err = dev_get_port_parent_id(dev, &ppid, true); 888 if (err == 0) { 889 memcpy(v->dev_parent_id.id, ppid.id, ppid.id_len); 890 v->dev_parent_id.id_len = ppid.id_len; 891 } else { 892 v->dev_parent_id.id_len = 0; 893 } 894 895 v->local = vifc->vifc_lcl_addr.s_addr; 896 v->remote = vifc->vifc_rmt_addr.s_addr; 897 898 /* And finish update writing critical data */ 899 write_lock_bh(&mrt_lock); 900 v->dev = dev; 901 if (v->flags & VIFF_REGISTER) 902 mrt->mroute_reg_vif_num = vifi; 903 if (vifi+1 > mrt->maxvif) 904 mrt->maxvif = vifi+1; 905 write_unlock_bh(&mrt_lock); 906 call_ipmr_vif_entry_notifiers(net, FIB_EVENT_VIF_ADD, v, vifi, mrt->id); 907 return 0; 908} 909 910/* called with rcu_read_lock() */ 911static struct mfc_cache *ipmr_cache_find(struct mr_table *mrt, 912 __be32 origin, 913 __be32 mcastgrp) 914{ 915 struct mfc_cache_cmp_arg arg = { 916 .mfc_mcastgrp = mcastgrp, 917 .mfc_origin = origin 918 }; 919 920 return mr_mfc_find(mrt, &arg); 921} 922 923/* Look for a (*,G) entry */ 924static struct mfc_cache *ipmr_cache_find_any(struct mr_table *mrt, 925 __be32 mcastgrp, int vifi) 926{ 927 struct mfc_cache_cmp_arg arg = { 928 .mfc_mcastgrp = mcastgrp, 929 .mfc_origin = htonl(INADDR_ANY) 930 }; 931 932 if (mcastgrp == htonl(INADDR_ANY)) 933 return mr_mfc_find_any_parent(mrt, vifi); 934 return mr_mfc_find_any(mrt, vifi, &arg); 935} 936 937/* Look for a (S,G,iif) entry if parent != -1 */ 938static struct mfc_cache *ipmr_cache_find_parent(struct mr_table *mrt, 939 __be32 origin, __be32 mcastgrp, 940 int parent) 941{ 942 struct mfc_cache_cmp_arg arg = { 943 .mfc_mcastgrp = mcastgrp, 944 .mfc_origin = origin, 945 }; 946 947 return mr_mfc_find_parent(mrt, &arg, parent); 948} 949 950/* Allocate a multicast cache entry */ 951static struct mfc_cache *ipmr_cache_alloc(void) 952{ 953 struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_KERNEL); 954 955 if (c) { 956 c->_c.mfc_un.res.last_assert = jiffies - MFC_ASSERT_THRESH - 1; 957 c->_c.mfc_un.res.minvif = MAXVIFS; 958 c->_c.free = ipmr_cache_free_rcu; 959 refcount_set(&c->_c.mfc_un.res.refcount, 1); 960 } 961 return c; 962} 963 964static struct mfc_cache *ipmr_cache_alloc_unres(void) 965{ 966 struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_ATOMIC); 967 968 if (c) { 969 skb_queue_head_init(&c->_c.mfc_un.unres.unresolved); 970 c->_c.mfc_un.unres.expires = jiffies + 10 * HZ; 971 } 972 return c; 973} 974 975/* A cache entry has gone into a resolved state from queued */ 976static void ipmr_cache_resolve(struct net *net, struct mr_table *mrt, 977 struct mfc_cache *uc, struct mfc_cache *c) 978{ 979 struct sk_buff *skb; 980 struct nlmsgerr *e; 981 982 /* Play the pending entries through our router */ 983 while ((skb = __skb_dequeue(&uc->_c.mfc_un.unres.unresolved))) { 984 if (ip_hdr(skb)->version == 0) { 985 struct nlmsghdr *nlh = skb_pull(skb, 986 sizeof(struct iphdr)); 987 988 if (mr_fill_mroute(mrt, skb, &c->_c, 989 nlmsg_data(nlh)) > 0) { 990 nlh->nlmsg_len = skb_tail_pointer(skb) - 991 (u8 *)nlh; 992 } else { 993 nlh->nlmsg_type = NLMSG_ERROR; 994 nlh->nlmsg_len = nlmsg_msg_size(sizeof(struct nlmsgerr)); 995 skb_trim(skb, nlh->nlmsg_len); 996 e = nlmsg_data(nlh); 997 e->error = -EMSGSIZE; 998 memset(&e->msg, 0, sizeof(e->msg)); 999 } 1000 1001 rtnl_unicast(skb, net, NETLINK_CB(skb).portid); 1002 } else { 1003 ip_mr_forward(net, mrt, skb->dev, skb, c, 0); 1004 } 1005 } 1006} 1007 1008/* Bounce a cache query up to mrouted and netlink. 1009 * 1010 * Called under mrt_lock. 1011 */ 1012static int ipmr_cache_report(struct mr_table *mrt, 1013 struct sk_buff *pkt, vifi_t vifi, int assert) 1014{ 1015 const int ihl = ip_hdrlen(pkt); 1016 struct sock *mroute_sk; 1017 struct igmphdr *igmp; 1018 struct igmpmsg *msg; 1019 struct sk_buff *skb; 1020 int ret; 1021 1022 if (assert == IGMPMSG_WHOLEPKT || assert == IGMPMSG_WRVIFWHOLE) 1023 skb = skb_realloc_headroom(pkt, sizeof(struct iphdr)); 1024 else 1025 skb = alloc_skb(128, GFP_ATOMIC); 1026 1027 if (!skb) 1028 return -ENOBUFS; 1029 1030 if (assert == IGMPMSG_WHOLEPKT || assert == IGMPMSG_WRVIFWHOLE) { 1031 /* Ugly, but we have no choice with this interface. 1032 * Duplicate old header, fix ihl, length etc. 1033 * And all this only to mangle msg->im_msgtype and 1034 * to set msg->im_mbz to "mbz" :-) 1035 */ 1036 skb_push(skb, sizeof(struct iphdr)); 1037 skb_reset_network_header(skb); 1038 skb_reset_transport_header(skb); 1039 msg = (struct igmpmsg *)skb_network_header(skb); 1040 memcpy(msg, skb_network_header(pkt), sizeof(struct iphdr)); 1041 msg->im_msgtype = assert; 1042 msg->im_mbz = 0; 1043 if (assert == IGMPMSG_WRVIFWHOLE) { 1044 msg->im_vif = vifi; 1045 msg->im_vif_hi = vifi >> 8; 1046 } else { 1047 msg->im_vif = mrt->mroute_reg_vif_num; 1048 msg->im_vif_hi = mrt->mroute_reg_vif_num >> 8; 1049 } 1050 ip_hdr(skb)->ihl = sizeof(struct iphdr) >> 2; 1051 ip_hdr(skb)->tot_len = htons(ntohs(ip_hdr(pkt)->tot_len) + 1052 sizeof(struct iphdr)); 1053 } else { 1054 /* Copy the IP header */ 1055 skb_set_network_header(skb, skb->len); 1056 skb_put(skb, ihl); 1057 skb_copy_to_linear_data(skb, pkt->data, ihl); 1058 /* Flag to the kernel this is a route add */ 1059 ip_hdr(skb)->protocol = 0; 1060 msg = (struct igmpmsg *)skb_network_header(skb); 1061 msg->im_vif = vifi; 1062 msg->im_vif_hi = vifi >> 8; 1063 skb_dst_set(skb, dst_clone(skb_dst(pkt))); 1064 /* Add our header */ 1065 igmp = skb_put(skb, sizeof(struct igmphdr)); 1066 igmp->type = assert; 1067 msg->im_msgtype = assert; 1068 igmp->code = 0; 1069 ip_hdr(skb)->tot_len = htons(skb->len); /* Fix the length */ 1070 skb->transport_header = skb->network_header; 1071 } 1072 1073 rcu_read_lock(); 1074 mroute_sk = rcu_dereference(mrt->mroute_sk); 1075 if (!mroute_sk) { 1076 rcu_read_unlock(); 1077 kfree_skb(skb); 1078 return -EINVAL; 1079 } 1080 1081 igmpmsg_netlink_event(mrt, skb); 1082 1083 /* Deliver to mrouted */ 1084 ret = sock_queue_rcv_skb(mroute_sk, skb); 1085 rcu_read_unlock(); 1086 if (ret < 0) { 1087 net_warn_ratelimited("mroute: pending queue full, dropping entries\n"); 1088 kfree_skb(skb); 1089 } 1090 1091 return ret; 1092} 1093 1094/* Queue a packet for resolution. It gets locked cache entry! */ 1095static int ipmr_cache_unresolved(struct mr_table *mrt, vifi_t vifi, 1096 struct sk_buff *skb, struct net_device *dev) 1097{ 1098 const struct iphdr *iph = ip_hdr(skb); 1099 struct mfc_cache *c; 1100 bool found = false; 1101 int err; 1102 1103 spin_lock_bh(&mfc_unres_lock); 1104 list_for_each_entry(c, &mrt->mfc_unres_queue, _c.list) { 1105 if (c->mfc_mcastgrp == iph->daddr && 1106 c->mfc_origin == iph->saddr) { 1107 found = true; 1108 break; 1109 } 1110 } 1111 1112 if (!found) { 1113 /* Create a new entry if allowable */ 1114 c = ipmr_cache_alloc_unres(); 1115 if (!c) { 1116 spin_unlock_bh(&mfc_unres_lock); 1117 1118 kfree_skb(skb); 1119 return -ENOBUFS; 1120 } 1121 1122 /* Fill in the new cache entry */ 1123 c->_c.mfc_parent = -1; 1124 c->mfc_origin = iph->saddr; 1125 c->mfc_mcastgrp = iph->daddr; 1126 1127 /* Reflect first query at mrouted. */ 1128 err = ipmr_cache_report(mrt, skb, vifi, IGMPMSG_NOCACHE); 1129 1130 if (err < 0) { 1131 /* If the report failed throw the cache entry 1132 out - Brad Parker 1133 */ 1134 spin_unlock_bh(&mfc_unres_lock); 1135 1136 ipmr_cache_free(c); 1137 kfree_skb(skb); 1138 return err; 1139 } 1140 1141 atomic_inc(&mrt->cache_resolve_queue_len); 1142 list_add(&c->_c.list, &mrt->mfc_unres_queue); 1143 mroute_netlink_event(mrt, c, RTM_NEWROUTE); 1144 1145 if (atomic_read(&mrt->cache_resolve_queue_len) == 1) 1146 mod_timer(&mrt->ipmr_expire_timer, 1147 c->_c.mfc_un.unres.expires); 1148 } 1149 1150 /* See if we can append the packet */ 1151 if (c->_c.mfc_un.unres.unresolved.qlen > 3) { 1152 kfree_skb(skb); 1153 err = -ENOBUFS; 1154 } else { 1155 if (dev) { 1156 skb->dev = dev; 1157 skb->skb_iif = dev->ifindex; 1158 } 1159 skb_queue_tail(&c->_c.mfc_un.unres.unresolved, skb); 1160 err = 0; 1161 } 1162 1163 spin_unlock_bh(&mfc_unres_lock); 1164 return err; 1165} 1166 1167/* MFC cache manipulation by user space mroute daemon */ 1168 1169static int ipmr_mfc_delete(struct mr_table *mrt, struct mfcctl *mfc, int parent) 1170{ 1171 struct net *net = read_pnet(&mrt->net); 1172 struct mfc_cache *c; 1173 1174 /* The entries are added/deleted only under RTNL */ 1175 rcu_read_lock(); 1176 c = ipmr_cache_find_parent(mrt, mfc->mfcc_origin.s_addr, 1177 mfc->mfcc_mcastgrp.s_addr, parent); 1178 rcu_read_unlock(); 1179 if (!c) 1180 return -ENOENT; 1181 rhltable_remove(&mrt->mfc_hash, &c->_c.mnode, ipmr_rht_params); 1182 list_del_rcu(&c->_c.list); 1183 call_ipmr_mfc_entry_notifiers(net, FIB_EVENT_ENTRY_DEL, c, mrt->id); 1184 mroute_netlink_event(mrt, c, RTM_DELROUTE); 1185 mr_cache_put(&c->_c); 1186 1187 return 0; 1188} 1189 1190static int ipmr_mfc_add(struct net *net, struct mr_table *mrt, 1191 struct mfcctl *mfc, int mrtsock, int parent) 1192{ 1193 struct mfc_cache *uc, *c; 1194 struct mr_mfc *_uc; 1195 bool found; 1196 int ret; 1197 1198 if (mfc->mfcc_parent >= MAXVIFS) 1199 return -ENFILE; 1200 1201 /* The entries are added/deleted only under RTNL */ 1202 rcu_read_lock(); 1203 c = ipmr_cache_find_parent(mrt, mfc->mfcc_origin.s_addr, 1204 mfc->mfcc_mcastgrp.s_addr, parent); 1205 rcu_read_unlock(); 1206 if (c) { 1207 write_lock_bh(&mrt_lock); 1208 c->_c.mfc_parent = mfc->mfcc_parent; 1209 ipmr_update_thresholds(mrt, &c->_c, mfc->mfcc_ttls); 1210 if (!mrtsock) 1211 c->_c.mfc_flags |= MFC_STATIC; 1212 write_unlock_bh(&mrt_lock); 1213 call_ipmr_mfc_entry_notifiers(net, FIB_EVENT_ENTRY_REPLACE, c, 1214 mrt->id); 1215 mroute_netlink_event(mrt, c, RTM_NEWROUTE); 1216 return 0; 1217 } 1218 1219 if (mfc->mfcc_mcastgrp.s_addr != htonl(INADDR_ANY) && 1220 !ipv4_is_multicast(mfc->mfcc_mcastgrp.s_addr)) 1221 return -EINVAL; 1222 1223 c = ipmr_cache_alloc(); 1224 if (!c) 1225 return -ENOMEM; 1226 1227 c->mfc_origin = mfc->mfcc_origin.s_addr; 1228 c->mfc_mcastgrp = mfc->mfcc_mcastgrp.s_addr; 1229 c->_c.mfc_parent = mfc->mfcc_parent; 1230 ipmr_update_thresholds(mrt, &c->_c, mfc->mfcc_ttls); 1231 if (!mrtsock) 1232 c->_c.mfc_flags |= MFC_STATIC; 1233 1234 ret = rhltable_insert_key(&mrt->mfc_hash, &c->cmparg, &c->_c.mnode, 1235 ipmr_rht_params); 1236 if (ret) { 1237 pr_err("ipmr: rhtable insert error %d\n", ret); 1238 ipmr_cache_free(c); 1239 return ret; 1240 } 1241 list_add_tail_rcu(&c->_c.list, &mrt->mfc_cache_list); 1242 /* Check to see if we resolved a queued list. If so we 1243 * need to send on the frames and tidy up. 1244 */ 1245 found = false; 1246 spin_lock_bh(&mfc_unres_lock); 1247 list_for_each_entry(_uc, &mrt->mfc_unres_queue, list) { 1248 uc = (struct mfc_cache *)_uc; 1249 if (uc->mfc_origin == c->mfc_origin && 1250 uc->mfc_mcastgrp == c->mfc_mcastgrp) { 1251 list_del(&_uc->list); 1252 atomic_dec(&mrt->cache_resolve_queue_len); 1253 found = true; 1254 break; 1255 } 1256 } 1257 if (list_empty(&mrt->mfc_unres_queue)) 1258 del_timer(&mrt->ipmr_expire_timer); 1259 spin_unlock_bh(&mfc_unres_lock); 1260 1261 if (found) { 1262 ipmr_cache_resolve(net, mrt, uc, c); 1263 ipmr_cache_free(uc); 1264 } 1265 call_ipmr_mfc_entry_notifiers(net, FIB_EVENT_ENTRY_ADD, c, mrt->id); 1266 mroute_netlink_event(mrt, c, RTM_NEWROUTE); 1267 return 0; 1268} 1269 1270/* Close the multicast socket, and clear the vif tables etc */ 1271static void mroute_clean_tables(struct mr_table *mrt, int flags) 1272{ 1273 struct net *net = read_pnet(&mrt->net); 1274 struct mr_mfc *c, *tmp; 1275 struct mfc_cache *cache; 1276 LIST_HEAD(list); 1277 int i; 1278 1279 /* Shut down all active vif entries */ 1280 if (flags & (MRT_FLUSH_VIFS | MRT_FLUSH_VIFS_STATIC)) { 1281 for (i = 0; i < mrt->maxvif; i++) { 1282 if (((mrt->vif_table[i].flags & VIFF_STATIC) && 1283 !(flags & MRT_FLUSH_VIFS_STATIC)) || 1284 (!(mrt->vif_table[i].flags & VIFF_STATIC) && !(flags & MRT_FLUSH_VIFS))) 1285 continue; 1286 vif_delete(mrt, i, 0, &list); 1287 } 1288 unregister_netdevice_many(&list); 1289 } 1290 1291 /* Wipe the cache */ 1292 if (flags & (MRT_FLUSH_MFC | MRT_FLUSH_MFC_STATIC)) { 1293 list_for_each_entry_safe(c, tmp, &mrt->mfc_cache_list, list) { 1294 if (((c->mfc_flags & MFC_STATIC) && !(flags & MRT_FLUSH_MFC_STATIC)) || 1295 (!(c->mfc_flags & MFC_STATIC) && !(flags & MRT_FLUSH_MFC))) 1296 continue; 1297 rhltable_remove(&mrt->mfc_hash, &c->mnode, ipmr_rht_params); 1298 list_del_rcu(&c->list); 1299 cache = (struct mfc_cache *)c; 1300 call_ipmr_mfc_entry_notifiers(net, FIB_EVENT_ENTRY_DEL, cache, 1301 mrt->id); 1302 mroute_netlink_event(mrt, cache, RTM_DELROUTE); 1303 mr_cache_put(c); 1304 } 1305 } 1306 1307 if (flags & MRT_FLUSH_MFC) { 1308 if (atomic_read(&mrt->cache_resolve_queue_len) != 0) { 1309 spin_lock_bh(&mfc_unres_lock); 1310 list_for_each_entry_safe(c, tmp, &mrt->mfc_unres_queue, list) { 1311 list_del(&c->list); 1312 cache = (struct mfc_cache *)c; 1313 mroute_netlink_event(mrt, cache, RTM_DELROUTE); 1314 ipmr_destroy_unres(mrt, cache); 1315 } 1316 spin_unlock_bh(&mfc_unres_lock); 1317 } 1318 } 1319} 1320 1321/* called from ip_ra_control(), before an RCU grace period, 1322 * we dont need to call synchronize_rcu() here 1323 */ 1324static void mrtsock_destruct(struct sock *sk) 1325{ 1326 struct net *net = sock_net(sk); 1327 struct mr_table *mrt; 1328 1329 rtnl_lock(); 1330 ipmr_for_each_table(mrt, net) { 1331 if (sk == rtnl_dereference(mrt->mroute_sk)) { 1332 IPV4_DEVCONF_ALL(net, MC_FORWARDING)--; 1333 inet_netconf_notify_devconf(net, RTM_NEWNETCONF, 1334 NETCONFA_MC_FORWARDING, 1335 NETCONFA_IFINDEX_ALL, 1336 net->ipv4.devconf_all); 1337 RCU_INIT_POINTER(mrt->mroute_sk, NULL); 1338 mroute_clean_tables(mrt, MRT_FLUSH_VIFS | MRT_FLUSH_MFC); 1339 } 1340 } 1341 rtnl_unlock(); 1342} 1343 1344/* Socket options and virtual interface manipulation. The whole 1345 * virtual interface system is a complete heap, but unfortunately 1346 * that's how BSD mrouted happens to think. Maybe one day with a proper 1347 * MOSPF/PIM router set up we can clean this up. 1348 */ 1349 1350int ip_mroute_setsockopt(struct sock *sk, int optname, sockptr_t optval, 1351 unsigned int optlen) 1352{ 1353 struct net *net = sock_net(sk); 1354 int val, ret = 0, parent = 0; 1355 struct mr_table *mrt; 1356 struct vifctl vif; 1357 struct mfcctl mfc; 1358 bool do_wrvifwhole; 1359 u32 uval; 1360 1361 /* There's one exception to the lock - MRT_DONE which needs to unlock */ 1362 rtnl_lock(); 1363 if (sk->sk_type != SOCK_RAW || 1364 inet_sk(sk)->inet_num != IPPROTO_IGMP) { 1365 ret = -EOPNOTSUPP; 1366 goto out_unlock; 1367 } 1368 1369 mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT); 1370 if (!mrt) { 1371 ret = -ENOENT; 1372 goto out_unlock; 1373 } 1374 if (optname != MRT_INIT) { 1375 if (sk != rcu_access_pointer(mrt->mroute_sk) && 1376 !ns_capable(net->user_ns, CAP_NET_ADMIN)) { 1377 ret = -EACCES; 1378 goto out_unlock; 1379 } 1380 } 1381 1382 switch (optname) { 1383 case MRT_INIT: 1384 if (optlen != sizeof(int)) { 1385 ret = -EINVAL; 1386 break; 1387 } 1388 if (rtnl_dereference(mrt->mroute_sk)) { 1389 ret = -EADDRINUSE; 1390 break; 1391 } 1392 1393 ret = ip_ra_control(sk, 1, mrtsock_destruct); 1394 if (ret == 0) { 1395 rcu_assign_pointer(mrt->mroute_sk, sk); 1396 IPV4_DEVCONF_ALL(net, MC_FORWARDING)++; 1397 inet_netconf_notify_devconf(net, RTM_NEWNETCONF, 1398 NETCONFA_MC_FORWARDING, 1399 NETCONFA_IFINDEX_ALL, 1400 net->ipv4.devconf_all); 1401 } 1402 break; 1403 case MRT_DONE: 1404 if (sk != rcu_access_pointer(mrt->mroute_sk)) { 1405 ret = -EACCES; 1406 } else { 1407 /* We need to unlock here because mrtsock_destruct takes 1408 * care of rtnl itself and we can't change that due to 1409 * the IP_ROUTER_ALERT setsockopt which runs without it. 1410 */ 1411 rtnl_unlock(); 1412 ret = ip_ra_control(sk, 0, NULL); 1413 goto out; 1414 } 1415 break; 1416 case MRT_ADD_VIF: 1417 case MRT_DEL_VIF: 1418 if (optlen != sizeof(vif)) { 1419 ret = -EINVAL; 1420 break; 1421 } 1422 if (copy_from_sockptr(&vif, optval, sizeof(vif))) { 1423 ret = -EFAULT; 1424 break; 1425 } 1426 if (vif.vifc_vifi >= MAXVIFS) { 1427 ret = -ENFILE; 1428 break; 1429 } 1430 if (optname == MRT_ADD_VIF) { 1431 ret = vif_add(net, mrt, &vif, 1432 sk == rtnl_dereference(mrt->mroute_sk)); 1433 } else { 1434 ret = vif_delete(mrt, vif.vifc_vifi, 0, NULL); 1435 } 1436 break; 1437 /* Manipulate the forwarding caches. These live 1438 * in a sort of kernel/user symbiosis. 1439 */ 1440 case MRT_ADD_MFC: 1441 case MRT_DEL_MFC: 1442 parent = -1; 1443 fallthrough; 1444 case MRT_ADD_MFC_PROXY: 1445 case MRT_DEL_MFC_PROXY: 1446 if (optlen != sizeof(mfc)) { 1447 ret = -EINVAL; 1448 break; 1449 } 1450 if (copy_from_sockptr(&mfc, optval, sizeof(mfc))) { 1451 ret = -EFAULT; 1452 break; 1453 } 1454 if (parent == 0) 1455 parent = mfc.mfcc_parent; 1456 if (optname == MRT_DEL_MFC || optname == MRT_DEL_MFC_PROXY) 1457 ret = ipmr_mfc_delete(mrt, &mfc, parent); 1458 else 1459 ret = ipmr_mfc_add(net, mrt, &mfc, 1460 sk == rtnl_dereference(mrt->mroute_sk), 1461 parent); 1462 break; 1463 case MRT_FLUSH: 1464 if (optlen != sizeof(val)) { 1465 ret = -EINVAL; 1466 break; 1467 } 1468 if (copy_from_sockptr(&val, optval, sizeof(val))) { 1469 ret = -EFAULT; 1470 break; 1471 } 1472 mroute_clean_tables(mrt, val); 1473 break; 1474 /* Control PIM assert. */ 1475 case MRT_ASSERT: 1476 if (optlen != sizeof(val)) { 1477 ret = -EINVAL; 1478 break; 1479 } 1480 if (copy_from_sockptr(&val, optval, sizeof(val))) { 1481 ret = -EFAULT; 1482 break; 1483 } 1484 mrt->mroute_do_assert = val; 1485 break; 1486 case MRT_PIM: 1487 if (!ipmr_pimsm_enabled()) { 1488 ret = -ENOPROTOOPT; 1489 break; 1490 } 1491 if (optlen != sizeof(val)) { 1492 ret = -EINVAL; 1493 break; 1494 } 1495 if (copy_from_sockptr(&val, optval, sizeof(val))) { 1496 ret = -EFAULT; 1497 break; 1498 } 1499 1500 do_wrvifwhole = (val == IGMPMSG_WRVIFWHOLE); 1501 val = !!val; 1502 if (val != mrt->mroute_do_pim) { 1503 mrt->mroute_do_pim = val; 1504 mrt->mroute_do_assert = val; 1505 mrt->mroute_do_wrvifwhole = do_wrvifwhole; 1506 } 1507 break; 1508 case MRT_TABLE: 1509 if (!IS_BUILTIN(CONFIG_IP_MROUTE_MULTIPLE_TABLES)) { 1510 ret = -ENOPROTOOPT; 1511 break; 1512 } 1513 if (optlen != sizeof(uval)) { 1514 ret = -EINVAL; 1515 break; 1516 } 1517 if (copy_from_sockptr(&uval, optval, sizeof(uval))) { 1518 ret = -EFAULT; 1519 break; 1520 } 1521 1522 if (sk == rtnl_dereference(mrt->mroute_sk)) { 1523 ret = -EBUSY; 1524 } else { 1525 mrt = ipmr_new_table(net, uval); 1526 if (IS_ERR(mrt)) 1527 ret = PTR_ERR(mrt); 1528 else 1529 raw_sk(sk)->ipmr_table = uval; 1530 } 1531 break; 1532 /* Spurious command, or MRT_VERSION which you cannot set. */ 1533 default: 1534 ret = -ENOPROTOOPT; 1535 } 1536out_unlock: 1537 rtnl_unlock(); 1538out: 1539 return ret; 1540} 1541 1542/* Getsock opt support for the multicast routing system. */ 1543int ip_mroute_getsockopt(struct sock *sk, int optname, char __user *optval, int __user *optlen) 1544{ 1545 int olr; 1546 int val; 1547 struct net *net = sock_net(sk); 1548 struct mr_table *mrt; 1549 1550 if (sk->sk_type != SOCK_RAW || 1551 inet_sk(sk)->inet_num != IPPROTO_IGMP) 1552 return -EOPNOTSUPP; 1553 1554 mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT); 1555 if (!mrt) 1556 return -ENOENT; 1557 1558 switch (optname) { 1559 case MRT_VERSION: 1560 val = 0x0305; 1561 break; 1562 case MRT_PIM: 1563 if (!ipmr_pimsm_enabled()) 1564 return -ENOPROTOOPT; 1565 val = mrt->mroute_do_pim; 1566 break; 1567 case MRT_ASSERT: 1568 val = mrt->mroute_do_assert; 1569 break; 1570 default: 1571 return -ENOPROTOOPT; 1572 } 1573 1574 if (get_user(olr, optlen)) 1575 return -EFAULT; 1576 olr = min_t(unsigned int, olr, sizeof(int)); 1577 if (olr < 0) 1578 return -EINVAL; 1579 if (put_user(olr, optlen)) 1580 return -EFAULT; 1581 if (copy_to_user(optval, &val, olr)) 1582 return -EFAULT; 1583 return 0; 1584} 1585 1586/* The IP multicast ioctl support routines. */ 1587int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg) 1588{ 1589 struct sioc_sg_req sr; 1590 struct sioc_vif_req vr; 1591 struct vif_device *vif; 1592 struct mfc_cache *c; 1593 struct net *net = sock_net(sk); 1594 struct mr_table *mrt; 1595 1596 mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT); 1597 if (!mrt) 1598 return -ENOENT; 1599 1600 switch (cmd) { 1601 case SIOCGETVIFCNT: 1602 if (copy_from_user(&vr, arg, sizeof(vr))) 1603 return -EFAULT; 1604 if (vr.vifi >= mrt->maxvif) 1605 return -EINVAL; 1606 vr.vifi = array_index_nospec(vr.vifi, mrt->maxvif); 1607 read_lock(&mrt_lock); 1608 vif = &mrt->vif_table[vr.vifi]; 1609 if (VIF_EXISTS(mrt, vr.vifi)) { 1610 vr.icount = vif->pkt_in; 1611 vr.ocount = vif->pkt_out; 1612 vr.ibytes = vif->bytes_in; 1613 vr.obytes = vif->bytes_out; 1614 read_unlock(&mrt_lock); 1615 1616 if (copy_to_user(arg, &vr, sizeof(vr))) 1617 return -EFAULT; 1618 return 0; 1619 } 1620 read_unlock(&mrt_lock); 1621 return -EADDRNOTAVAIL; 1622 case SIOCGETSGCNT: 1623 if (copy_from_user(&sr, arg, sizeof(sr))) 1624 return -EFAULT; 1625 1626 rcu_read_lock(); 1627 c = ipmr_cache_find(mrt, sr.src.s_addr, sr.grp.s_addr); 1628 if (c) { 1629 sr.pktcnt = c->_c.mfc_un.res.pkt; 1630 sr.bytecnt = c->_c.mfc_un.res.bytes; 1631 sr.wrong_if = c->_c.mfc_un.res.wrong_if; 1632 rcu_read_unlock(); 1633 1634 if (copy_to_user(arg, &sr, sizeof(sr))) 1635 return -EFAULT; 1636 return 0; 1637 } 1638 rcu_read_unlock(); 1639 return -EADDRNOTAVAIL; 1640 default: 1641 return -ENOIOCTLCMD; 1642 } 1643} 1644 1645#ifdef CONFIG_COMPAT 1646struct compat_sioc_sg_req { 1647 struct in_addr src; 1648 struct in_addr grp; 1649 compat_ulong_t pktcnt; 1650 compat_ulong_t bytecnt; 1651 compat_ulong_t wrong_if; 1652}; 1653 1654struct compat_sioc_vif_req { 1655 vifi_t vifi; /* Which iface */ 1656 compat_ulong_t icount; 1657 compat_ulong_t ocount; 1658 compat_ulong_t ibytes; 1659 compat_ulong_t obytes; 1660}; 1661 1662int ipmr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg) 1663{ 1664 struct compat_sioc_sg_req sr; 1665 struct compat_sioc_vif_req vr; 1666 struct vif_device *vif; 1667 struct mfc_cache *c; 1668 struct net *net = sock_net(sk); 1669 struct mr_table *mrt; 1670 1671 mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT); 1672 if (!mrt) 1673 return -ENOENT; 1674 1675 switch (cmd) { 1676 case SIOCGETVIFCNT: 1677 if (copy_from_user(&vr, arg, sizeof(vr))) 1678 return -EFAULT; 1679 if (vr.vifi >= mrt->maxvif) 1680 return -EINVAL; 1681 vr.vifi = array_index_nospec(vr.vifi, mrt->maxvif); 1682 read_lock(&mrt_lock); 1683 vif = &mrt->vif_table[vr.vifi]; 1684 if (VIF_EXISTS(mrt, vr.vifi)) { 1685 vr.icount = vif->pkt_in; 1686 vr.ocount = vif->pkt_out; 1687 vr.ibytes = vif->bytes_in; 1688 vr.obytes = vif->bytes_out; 1689 read_unlock(&mrt_lock); 1690 1691 if (copy_to_user(arg, &vr, sizeof(vr))) 1692 return -EFAULT; 1693 return 0; 1694 } 1695 read_unlock(&mrt_lock); 1696 return -EADDRNOTAVAIL; 1697 case SIOCGETSGCNT: 1698 if (copy_from_user(&sr, arg, sizeof(sr))) 1699 return -EFAULT; 1700 1701 rcu_read_lock(); 1702 c = ipmr_cache_find(mrt, sr.src.s_addr, sr.grp.s_addr); 1703 if (c) { 1704 sr.pktcnt = c->_c.mfc_un.res.pkt; 1705 sr.bytecnt = c->_c.mfc_un.res.bytes; 1706 sr.wrong_if = c->_c.mfc_un.res.wrong_if; 1707 rcu_read_unlock(); 1708 1709 if (copy_to_user(arg, &sr, sizeof(sr))) 1710 return -EFAULT; 1711 return 0; 1712 } 1713 rcu_read_unlock(); 1714 return -EADDRNOTAVAIL; 1715 default: 1716 return -ENOIOCTLCMD; 1717 } 1718} 1719#endif 1720 1721static int ipmr_device_event(struct notifier_block *this, unsigned long event, void *ptr) 1722{ 1723 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 1724 struct net *net = dev_net(dev); 1725 struct mr_table *mrt; 1726 struct vif_device *v; 1727 int ct; 1728 1729 if (event != NETDEV_UNREGISTER) 1730 return NOTIFY_DONE; 1731 1732 ipmr_for_each_table(mrt, net) { 1733 v = &mrt->vif_table[0]; 1734 for (ct = 0; ct < mrt->maxvif; ct++, v++) { 1735 if (v->dev == dev) 1736 vif_delete(mrt, ct, 1, NULL); 1737 } 1738 } 1739 return NOTIFY_DONE; 1740} 1741 1742static struct notifier_block ip_mr_notifier = { 1743 .notifier_call = ipmr_device_event, 1744}; 1745 1746/* Encapsulate a packet by attaching a valid IPIP header to it. 1747 * This avoids tunnel drivers and other mess and gives us the speed so 1748 * important for multicast video. 1749 */ 1750static void ip_encap(struct net *net, struct sk_buff *skb, 1751 __be32 saddr, __be32 daddr) 1752{ 1753 struct iphdr *iph; 1754 const struct iphdr *old_iph = ip_hdr(skb); 1755 1756 skb_push(skb, sizeof(struct iphdr)); 1757 skb->transport_header = skb->network_header; 1758 skb_reset_network_header(skb); 1759 iph = ip_hdr(skb); 1760 1761 iph->version = 4; 1762 iph->tos = old_iph->tos; 1763 iph->ttl = old_iph->ttl; 1764 iph->frag_off = 0; 1765 iph->daddr = daddr; 1766 iph->saddr = saddr; 1767 iph->protocol = IPPROTO_IPIP; 1768 iph->ihl = 5; 1769 iph->tot_len = htons(skb->len); 1770 ip_select_ident(net, skb, NULL); 1771 ip_send_check(iph); 1772 1773 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); 1774 nf_reset_ct(skb); 1775} 1776 1777static inline int ipmr_forward_finish(struct net *net, struct sock *sk, 1778 struct sk_buff *skb) 1779{ 1780 struct ip_options *opt = &(IPCB(skb)->opt); 1781 1782 IP_INC_STATS(net, IPSTATS_MIB_OUTFORWDATAGRAMS); 1783 IP_ADD_STATS(net, IPSTATS_MIB_OUTOCTETS, skb->len); 1784 1785 if (unlikely(opt->optlen)) 1786 ip_forward_options(skb); 1787 1788 return dst_output(net, sk, skb); 1789} 1790 1791#ifdef CONFIG_NET_SWITCHDEV 1792static bool ipmr_forward_offloaded(struct sk_buff *skb, struct mr_table *mrt, 1793 int in_vifi, int out_vifi) 1794{ 1795 struct vif_device *out_vif = &mrt->vif_table[out_vifi]; 1796 struct vif_device *in_vif = &mrt->vif_table[in_vifi]; 1797 1798 if (!skb->offload_l3_fwd_mark) 1799 return false; 1800 if (!out_vif->dev_parent_id.id_len || !in_vif->dev_parent_id.id_len) 1801 return false; 1802 return netdev_phys_item_id_same(&out_vif->dev_parent_id, 1803 &in_vif->dev_parent_id); 1804} 1805#else 1806static bool ipmr_forward_offloaded(struct sk_buff *skb, struct mr_table *mrt, 1807 int in_vifi, int out_vifi) 1808{ 1809 return false; 1810} 1811#endif 1812 1813/* Processing handlers for ipmr_forward */ 1814 1815static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt, 1816 int in_vifi, struct sk_buff *skb, int vifi) 1817{ 1818 const struct iphdr *iph = ip_hdr(skb); 1819 struct vif_device *vif = &mrt->vif_table[vifi]; 1820 struct net_device *dev; 1821 struct rtable *rt; 1822 struct flowi4 fl4; 1823 int encap = 0; 1824 1825 if (!vif->dev) 1826 goto out_free; 1827 1828 if (vif->flags & VIFF_REGISTER) { 1829 vif->pkt_out++; 1830 vif->bytes_out += skb->len; 1831 vif->dev->stats.tx_bytes += skb->len; 1832 vif->dev->stats.tx_packets++; 1833 ipmr_cache_report(mrt, skb, vifi, IGMPMSG_WHOLEPKT); 1834 goto out_free; 1835 } 1836 1837 if (ipmr_forward_offloaded(skb, mrt, in_vifi, vifi)) 1838 goto out_free; 1839 1840 if (vif->flags & VIFF_TUNNEL) { 1841 rt = ip_route_output_ports(net, &fl4, NULL, 1842 vif->remote, vif->local, 1843 0, 0, 1844 IPPROTO_IPIP, 1845 RT_TOS(iph->tos), vif->link); 1846 if (IS_ERR(rt)) 1847 goto out_free; 1848 encap = sizeof(struct iphdr); 1849 } else { 1850 rt = ip_route_output_ports(net, &fl4, NULL, iph->daddr, 0, 1851 0, 0, 1852 IPPROTO_IPIP, 1853 RT_TOS(iph->tos), vif->link); 1854 if (IS_ERR(rt)) 1855 goto out_free; 1856 } 1857 1858 dev = rt->dst.dev; 1859 1860 if (skb->len+encap > dst_mtu(&rt->dst) && (ntohs(iph->frag_off) & IP_DF)) { 1861 /* Do not fragment multicasts. Alas, IPv4 does not 1862 * allow to send ICMP, so that packets will disappear 1863 * to blackhole. 1864 */ 1865 IP_INC_STATS(net, IPSTATS_MIB_FRAGFAILS); 1866 ip_rt_put(rt); 1867 goto out_free; 1868 } 1869 1870 encap += LL_RESERVED_SPACE(dev) + rt->dst.header_len; 1871 1872 if (skb_cow(skb, encap)) { 1873 ip_rt_put(rt); 1874 goto out_free; 1875 } 1876 1877 vif->pkt_out++; 1878 vif->bytes_out += skb->len; 1879 1880 skb_dst_drop(skb); 1881 skb_dst_set(skb, &rt->dst); 1882 ip_decrease_ttl(ip_hdr(skb)); 1883 1884 /* FIXME: forward and output firewalls used to be called here. 1885 * What do we do with netfilter? -- RR 1886 */ 1887 if (vif->flags & VIFF_TUNNEL) { 1888 ip_encap(net, skb, vif->local, vif->remote); 1889 /* FIXME: extra output firewall step used to be here. --RR */ 1890 vif->dev->stats.tx_packets++; 1891 vif->dev->stats.tx_bytes += skb->len; 1892 } 1893 1894 IPCB(skb)->flags |= IPSKB_FORWARDED; 1895 1896 /* RFC1584 teaches, that DVMRP/PIM router must deliver packets locally 1897 * not only before forwarding, but after forwarding on all output 1898 * interfaces. It is clear, if mrouter runs a multicasting 1899 * program, it should receive packets not depending to what interface 1900 * program is joined. 1901 * If we will not make it, the program will have to join on all 1902 * interfaces. On the other hand, multihoming host (or router, but 1903 * not mrouter) cannot join to more than one interface - it will 1904 * result in receiving multiple packets. 1905 */ 1906 NF_HOOK(NFPROTO_IPV4, NF_INET_FORWARD, 1907 net, NULL, skb, skb->dev, dev, 1908 ipmr_forward_finish); 1909 return; 1910 1911out_free: 1912 kfree_skb(skb); 1913} 1914 1915static int ipmr_find_vif(struct mr_table *mrt, struct net_device *dev) 1916{ 1917 int ct; 1918 1919 for (ct = mrt->maxvif-1; ct >= 0; ct--) { 1920 if (mrt->vif_table[ct].dev == dev) 1921 break; 1922 } 1923 return ct; 1924} 1925 1926/* "local" means that we should preserve one skb (for local delivery) */ 1927static void ip_mr_forward(struct net *net, struct mr_table *mrt, 1928 struct net_device *dev, struct sk_buff *skb, 1929 struct mfc_cache *c, int local) 1930{ 1931 int true_vifi = ipmr_find_vif(mrt, dev); 1932 int psend = -1; 1933 int vif, ct; 1934 1935 vif = c->_c.mfc_parent; 1936 c->_c.mfc_un.res.pkt++; 1937 c->_c.mfc_un.res.bytes += skb->len; 1938 c->_c.mfc_un.res.lastuse = jiffies; 1939 1940 if (c->mfc_origin == htonl(INADDR_ANY) && true_vifi >= 0) { 1941 struct mfc_cache *cache_proxy; 1942 1943 /* For an (*,G) entry, we only check that the incomming 1944 * interface is part of the static tree. 1945 */ 1946 cache_proxy = mr_mfc_find_any_parent(mrt, vif); 1947 if (cache_proxy && 1948 cache_proxy->_c.mfc_un.res.ttls[true_vifi] < 255) 1949 goto forward; 1950 } 1951 1952 /* Wrong interface: drop packet and (maybe) send PIM assert. */ 1953 if (mrt->vif_table[vif].dev != dev) { 1954 if (rt_is_output_route(skb_rtable(skb))) { 1955 /* It is our own packet, looped back. 1956 * Very complicated situation... 1957 * 1958 * The best workaround until routing daemons will be 1959 * fixed is not to redistribute packet, if it was 1960 * send through wrong interface. It means, that 1961 * multicast applications WILL NOT work for 1962 * (S,G), which have default multicast route pointing 1963 * to wrong oif. In any case, it is not a good 1964 * idea to use multicasting applications on router. 1965 */ 1966 goto dont_forward; 1967 } 1968 1969 c->_c.mfc_un.res.wrong_if++; 1970 1971 if (true_vifi >= 0 && mrt->mroute_do_assert && 1972 /* pimsm uses asserts, when switching from RPT to SPT, 1973 * so that we cannot check that packet arrived on an oif. 1974 * It is bad, but otherwise we would need to move pretty 1975 * large chunk of pimd to kernel. Ough... --ANK 1976 */ 1977 (mrt->mroute_do_pim || 1978 c->_c.mfc_un.res.ttls[true_vifi] < 255) && 1979 time_after(jiffies, 1980 c->_c.mfc_un.res.last_assert + 1981 MFC_ASSERT_THRESH)) { 1982 c->_c.mfc_un.res.last_assert = jiffies; 1983 ipmr_cache_report(mrt, skb, true_vifi, IGMPMSG_WRONGVIF); 1984 if (mrt->mroute_do_wrvifwhole) 1985 ipmr_cache_report(mrt, skb, true_vifi, 1986 IGMPMSG_WRVIFWHOLE); 1987 } 1988 goto dont_forward; 1989 } 1990 1991forward: 1992 mrt->vif_table[vif].pkt_in++; 1993 mrt->vif_table[vif].bytes_in += skb->len; 1994 1995 /* Forward the frame */ 1996 if (c->mfc_origin == htonl(INADDR_ANY) && 1997 c->mfc_mcastgrp == htonl(INADDR_ANY)) { 1998 if (true_vifi >= 0 && 1999 true_vifi != c->_c.mfc_parent && 2000 ip_hdr(skb)->ttl > 2001 c->_c.mfc_un.res.ttls[c->_c.mfc_parent]) { 2002 /* It's an (*,*) entry and the packet is not coming from 2003 * the upstream: forward the packet to the upstream 2004 * only. 2005 */ 2006 psend = c->_c.mfc_parent; 2007 goto last_forward; 2008 } 2009 goto dont_forward; 2010 } 2011 for (ct = c->_c.mfc_un.res.maxvif - 1; 2012 ct >= c->_c.mfc_un.res.minvif; ct--) { 2013 /* For (*,G) entry, don't forward to the incoming interface */ 2014 if ((c->mfc_origin != htonl(INADDR_ANY) || 2015 ct != true_vifi) && 2016 ip_hdr(skb)->ttl > c->_c.mfc_un.res.ttls[ct]) { 2017 if (psend != -1) { 2018 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC); 2019 2020 if (skb2) 2021 ipmr_queue_xmit(net, mrt, true_vifi, 2022 skb2, psend); 2023 } 2024 psend = ct; 2025 } 2026 } 2027last_forward: 2028 if (psend != -1) { 2029 if (local) { 2030 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC); 2031 2032 if (skb2) 2033 ipmr_queue_xmit(net, mrt, true_vifi, skb2, 2034 psend); 2035 } else { 2036 ipmr_queue_xmit(net, mrt, true_vifi, skb, psend); 2037 return; 2038 } 2039 } 2040 2041dont_forward: 2042 if (!local) 2043 kfree_skb(skb); 2044} 2045 2046static struct mr_table *ipmr_rt_fib_lookup(struct net *net, struct sk_buff *skb) 2047{ 2048 struct rtable *rt = skb_rtable(skb); 2049 struct iphdr *iph = ip_hdr(skb); 2050 struct flowi4 fl4 = { 2051 .daddr = iph->daddr, 2052 .saddr = iph->saddr, 2053 .flowi4_tos = RT_TOS(iph->tos), 2054 .flowi4_oif = (rt_is_output_route(rt) ? 2055 skb->dev->ifindex : 0), 2056 .flowi4_iif = (rt_is_output_route(rt) ? 2057 LOOPBACK_IFINDEX : 2058 skb->dev->ifindex), 2059 .flowi4_mark = skb->mark, 2060 }; 2061 struct mr_table *mrt; 2062 int err; 2063 2064 err = ipmr_fib_lookup(net, &fl4, &mrt); 2065 if (err) 2066 return ERR_PTR(err); 2067 return mrt; 2068} 2069 2070/* Multicast packets for forwarding arrive here 2071 * Called with rcu_read_lock(); 2072 */ 2073int ip_mr_input(struct sk_buff *skb) 2074{ 2075 struct mfc_cache *cache; 2076 struct net *net = dev_net(skb->dev); 2077 int local = skb_rtable(skb)->rt_flags & RTCF_LOCAL; 2078 struct mr_table *mrt; 2079 struct net_device *dev; 2080 2081 /* skb->dev passed in is the loX master dev for vrfs. 2082 * As there are no vifs associated with loopback devices, 2083 * get the proper interface that does have a vif associated with it. 2084 */ 2085 dev = skb->dev; 2086 if (netif_is_l3_master(skb->dev)) { 2087 dev = dev_get_by_index_rcu(net, IPCB(skb)->iif); 2088 if (!dev) { 2089 kfree_skb(skb); 2090 return -ENODEV; 2091 } 2092 } 2093 2094 /* Packet is looped back after forward, it should not be 2095 * forwarded second time, but still can be delivered locally. 2096 */ 2097 if (IPCB(skb)->flags & IPSKB_FORWARDED) 2098 goto dont_forward; 2099 2100 mrt = ipmr_rt_fib_lookup(net, skb); 2101 if (IS_ERR(mrt)) { 2102 kfree_skb(skb); 2103 return PTR_ERR(mrt); 2104 } 2105 if (!local) { 2106 if (IPCB(skb)->opt.router_alert) { 2107 if (ip_call_ra_chain(skb)) 2108 return 0; 2109 } else if (ip_hdr(skb)->protocol == IPPROTO_IGMP) { 2110 /* IGMPv1 (and broken IGMPv2 implementations sort of 2111 * Cisco IOS <= 11.2(8)) do not put router alert 2112 * option to IGMP packets destined to routable 2113 * groups. It is very bad, because it means 2114 * that we can forward NO IGMP messages. 2115 */ 2116 struct sock *mroute_sk; 2117 2118 mroute_sk = rcu_dereference(mrt->mroute_sk); 2119 if (mroute_sk) { 2120 nf_reset_ct(skb); 2121 raw_rcv(mroute_sk, skb); 2122 return 0; 2123 } 2124 } 2125 } 2126 2127 /* already under rcu_read_lock() */ 2128 cache = ipmr_cache_find(mrt, ip_hdr(skb)->saddr, ip_hdr(skb)->daddr); 2129 if (!cache) { 2130 int vif = ipmr_find_vif(mrt, dev); 2131 2132 if (vif >= 0) 2133 cache = ipmr_cache_find_any(mrt, ip_hdr(skb)->daddr, 2134 vif); 2135 } 2136 2137 /* No usable cache entry */ 2138 if (!cache) { 2139 int vif; 2140 2141 if (local) { 2142 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC); 2143 ip_local_deliver(skb); 2144 if (!skb2) 2145 return -ENOBUFS; 2146 skb = skb2; 2147 } 2148 2149 read_lock(&mrt_lock); 2150 vif = ipmr_find_vif(mrt, dev); 2151 if (vif >= 0) { 2152 int err2 = ipmr_cache_unresolved(mrt, vif, skb, dev); 2153 read_unlock(&mrt_lock); 2154 2155 return err2; 2156 } 2157 read_unlock(&mrt_lock); 2158 kfree_skb(skb); 2159 return -ENODEV; 2160 } 2161 2162 read_lock(&mrt_lock); 2163 ip_mr_forward(net, mrt, dev, skb, cache, local); 2164 read_unlock(&mrt_lock); 2165 2166 if (local) 2167 return ip_local_deliver(skb); 2168 2169 return 0; 2170 2171dont_forward: 2172 if (local) 2173 return ip_local_deliver(skb); 2174 kfree_skb(skb); 2175 return 0; 2176} 2177 2178#ifdef CONFIG_IP_PIMSM_V1 2179/* Handle IGMP messages of PIMv1 */ 2180int pim_rcv_v1(struct sk_buff *skb) 2181{ 2182 struct igmphdr *pim; 2183 struct net *net = dev_net(skb->dev); 2184 struct mr_table *mrt; 2185 2186 if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(struct iphdr))) 2187 goto drop; 2188 2189 pim = igmp_hdr(skb); 2190 2191 mrt = ipmr_rt_fib_lookup(net, skb); 2192 if (IS_ERR(mrt)) 2193 goto drop; 2194 if (!mrt->mroute_do_pim || 2195 pim->group != PIM_V1_VERSION || pim->code != PIM_V1_REGISTER) 2196 goto drop; 2197 2198 if (__pim_rcv(mrt, skb, sizeof(*pim))) { 2199drop: 2200 kfree_skb(skb); 2201 } 2202 return 0; 2203} 2204#endif 2205 2206#ifdef CONFIG_IP_PIMSM_V2 2207static int pim_rcv(struct sk_buff *skb) 2208{ 2209 struct pimreghdr *pim; 2210 struct net *net = dev_net(skb->dev); 2211 struct mr_table *mrt; 2212 2213 if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(struct iphdr))) 2214 goto drop; 2215 2216 pim = (struct pimreghdr *)skb_transport_header(skb); 2217 if (pim->type != ((PIM_VERSION << 4) | (PIM_TYPE_REGISTER)) || 2218 (pim->flags & PIM_NULL_REGISTER) || 2219 (ip_compute_csum((void *)pim, sizeof(*pim)) != 0 && 2220 csum_fold(skb_checksum(skb, 0, skb->len, 0)))) 2221 goto drop; 2222 2223 mrt = ipmr_rt_fib_lookup(net, skb); 2224 if (IS_ERR(mrt)) 2225 goto drop; 2226 if (__pim_rcv(mrt, skb, sizeof(*pim))) { 2227drop: 2228 kfree_skb(skb); 2229 } 2230 return 0; 2231} 2232#endif 2233 2234int ipmr_get_route(struct net *net, struct sk_buff *skb, 2235 __be32 saddr, __be32 daddr, 2236 struct rtmsg *rtm, u32 portid) 2237{ 2238 struct mfc_cache *cache; 2239 struct mr_table *mrt; 2240 int err; 2241 2242 mrt = ipmr_get_table(net, RT_TABLE_DEFAULT); 2243 if (!mrt) 2244 return -ENOENT; 2245 2246 rcu_read_lock(); 2247 cache = ipmr_cache_find(mrt, saddr, daddr); 2248 if (!cache && skb->dev) { 2249 int vif = ipmr_find_vif(mrt, skb->dev); 2250 2251 if (vif >= 0) 2252 cache = ipmr_cache_find_any(mrt, daddr, vif); 2253 } 2254 if (!cache) { 2255 struct sk_buff *skb2; 2256 struct iphdr *iph; 2257 struct net_device *dev; 2258 int vif = -1; 2259 2260 dev = skb->dev; 2261 read_lock(&mrt_lock); 2262 if (dev) 2263 vif = ipmr_find_vif(mrt, dev); 2264 if (vif < 0) { 2265 read_unlock(&mrt_lock); 2266 rcu_read_unlock(); 2267 return -ENODEV; 2268 } 2269 2270 skb2 = skb_realloc_headroom(skb, sizeof(struct iphdr)); 2271 if (!skb2) { 2272 read_unlock(&mrt_lock); 2273 rcu_read_unlock(); 2274 return -ENOMEM; 2275 } 2276 2277 NETLINK_CB(skb2).portid = portid; 2278 skb_push(skb2, sizeof(struct iphdr)); 2279 skb_reset_network_header(skb2); 2280 iph = ip_hdr(skb2); 2281 iph->ihl = sizeof(struct iphdr) >> 2; 2282 iph->saddr = saddr; 2283 iph->daddr = daddr; 2284 iph->version = 0; 2285 err = ipmr_cache_unresolved(mrt, vif, skb2, dev); 2286 read_unlock(&mrt_lock); 2287 rcu_read_unlock(); 2288 return err; 2289 } 2290 2291 read_lock(&mrt_lock); 2292 err = mr_fill_mroute(mrt, skb, &cache->_c, rtm); 2293 read_unlock(&mrt_lock); 2294 rcu_read_unlock(); 2295 return err; 2296} 2297 2298static int ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb, 2299 u32 portid, u32 seq, struct mfc_cache *c, int cmd, 2300 int flags) 2301{ 2302 struct nlmsghdr *nlh; 2303 struct rtmsg *rtm; 2304 int err; 2305 2306 nlh = nlmsg_put(skb, portid, seq, cmd, sizeof(*rtm), flags); 2307 if (!nlh) 2308 return -EMSGSIZE; 2309 2310 rtm = nlmsg_data(nlh); 2311 rtm->rtm_family = RTNL_FAMILY_IPMR; 2312 rtm->rtm_dst_len = 32; 2313 rtm->rtm_src_len = 32; 2314 rtm->rtm_tos = 0; 2315 rtm->rtm_table = mrt->id; 2316 if (nla_put_u32(skb, RTA_TABLE, mrt->id)) 2317 goto nla_put_failure; 2318 rtm->rtm_type = RTN_MULTICAST; 2319 rtm->rtm_scope = RT_SCOPE_UNIVERSE; 2320 if (c->_c.mfc_flags & MFC_STATIC) 2321 rtm->rtm_protocol = RTPROT_STATIC; 2322 else 2323 rtm->rtm_protocol = RTPROT_MROUTED; 2324 rtm->rtm_flags = 0; 2325 2326 if (nla_put_in_addr(skb, RTA_SRC, c->mfc_origin) || 2327 nla_put_in_addr(skb, RTA_DST, c->mfc_mcastgrp)) 2328 goto nla_put_failure; 2329 err = mr_fill_mroute(mrt, skb, &c->_c, rtm); 2330 /* do not break the dump if cache is unresolved */ 2331 if (err < 0 && err != -ENOENT) 2332 goto nla_put_failure; 2333 2334 nlmsg_end(skb, nlh); 2335 return 0; 2336 2337nla_put_failure: 2338 nlmsg_cancel(skb, nlh); 2339 return -EMSGSIZE; 2340} 2341 2342static int _ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb, 2343 u32 portid, u32 seq, struct mr_mfc *c, int cmd, 2344 int flags) 2345{ 2346 return ipmr_fill_mroute(mrt, skb, portid, seq, (struct mfc_cache *)c, 2347 cmd, flags); 2348} 2349 2350static size_t mroute_msgsize(bool unresolved, int maxvif) 2351{ 2352 size_t len = 2353 NLMSG_ALIGN(sizeof(struct rtmsg)) 2354 + nla_total_size(4) /* RTA_TABLE */ 2355 + nla_total_size(4) /* RTA_SRC */ 2356 + nla_total_size(4) /* RTA_DST */ 2357 ; 2358 2359 if (!unresolved) 2360 len = len 2361 + nla_total_size(4) /* RTA_IIF */ 2362 + nla_total_size(0) /* RTA_MULTIPATH */ 2363 + maxvif * NLA_ALIGN(sizeof(struct rtnexthop)) 2364 /* RTA_MFC_STATS */ 2365 + nla_total_size_64bit(sizeof(struct rta_mfc_stats)) 2366 ; 2367 2368 return len; 2369} 2370 2371static void mroute_netlink_event(struct mr_table *mrt, struct mfc_cache *mfc, 2372 int cmd) 2373{ 2374 struct net *net = read_pnet(&mrt->net); 2375 struct sk_buff *skb; 2376 int err = -ENOBUFS; 2377 2378 skb = nlmsg_new(mroute_msgsize(mfc->_c.mfc_parent >= MAXVIFS, 2379 mrt->maxvif), 2380 GFP_ATOMIC); 2381 if (!skb) 2382 goto errout; 2383 2384 err = ipmr_fill_mroute(mrt, skb, 0, 0, mfc, cmd, 0); 2385 if (err < 0) 2386 goto errout; 2387 2388 rtnl_notify(skb, net, 0, RTNLGRP_IPV4_MROUTE, NULL, GFP_ATOMIC); 2389 return; 2390 2391errout: 2392 kfree_skb(skb); 2393 if (err < 0) 2394 rtnl_set_sk_err(net, RTNLGRP_IPV4_MROUTE, err); 2395} 2396 2397static size_t igmpmsg_netlink_msgsize(size_t payloadlen) 2398{ 2399 size_t len = 2400 NLMSG_ALIGN(sizeof(struct rtgenmsg)) 2401 + nla_total_size(1) /* IPMRA_CREPORT_MSGTYPE */ 2402 + nla_total_size(4) /* IPMRA_CREPORT_VIF_ID */ 2403 + nla_total_size(4) /* IPMRA_CREPORT_SRC_ADDR */ 2404 + nla_total_size(4) /* IPMRA_CREPORT_DST_ADDR */ 2405 + nla_total_size(4) /* IPMRA_CREPORT_TABLE */ 2406 /* IPMRA_CREPORT_PKT */ 2407 + nla_total_size(payloadlen) 2408 ; 2409 2410 return len; 2411} 2412 2413static void igmpmsg_netlink_event(struct mr_table *mrt, struct sk_buff *pkt) 2414{ 2415 struct net *net = read_pnet(&mrt->net); 2416 struct nlmsghdr *nlh; 2417 struct rtgenmsg *rtgenm; 2418 struct igmpmsg *msg; 2419 struct sk_buff *skb; 2420 struct nlattr *nla; 2421 int payloadlen; 2422 2423 payloadlen = pkt->len - sizeof(struct igmpmsg); 2424 msg = (struct igmpmsg *)skb_network_header(pkt); 2425 2426 skb = nlmsg_new(igmpmsg_netlink_msgsize(payloadlen), GFP_ATOMIC); 2427 if (!skb) 2428 goto errout; 2429 2430 nlh = nlmsg_put(skb, 0, 0, RTM_NEWCACHEREPORT, 2431 sizeof(struct rtgenmsg), 0); 2432 if (!nlh) 2433 goto errout; 2434 rtgenm = nlmsg_data(nlh); 2435 rtgenm->rtgen_family = RTNL_FAMILY_IPMR; 2436 if (nla_put_u8(skb, IPMRA_CREPORT_MSGTYPE, msg->im_msgtype) || 2437 nla_put_u32(skb, IPMRA_CREPORT_VIF_ID, msg->im_vif | (msg->im_vif_hi << 8)) || 2438 nla_put_in_addr(skb, IPMRA_CREPORT_SRC_ADDR, 2439 msg->im_src.s_addr) || 2440 nla_put_in_addr(skb, IPMRA_CREPORT_DST_ADDR, 2441 msg->im_dst.s_addr) || 2442 nla_put_u32(skb, IPMRA_CREPORT_TABLE, mrt->id)) 2443 goto nla_put_failure; 2444 2445 nla = nla_reserve(skb, IPMRA_CREPORT_PKT, payloadlen); 2446 if (!nla || skb_copy_bits(pkt, sizeof(struct igmpmsg), 2447 nla_data(nla), payloadlen)) 2448 goto nla_put_failure; 2449 2450 nlmsg_end(skb, nlh); 2451 2452 rtnl_notify(skb, net, 0, RTNLGRP_IPV4_MROUTE_R, NULL, GFP_ATOMIC); 2453 return; 2454 2455nla_put_failure: 2456 nlmsg_cancel(skb, nlh); 2457errout: 2458 kfree_skb(skb); 2459 rtnl_set_sk_err(net, RTNLGRP_IPV4_MROUTE_R, -ENOBUFS); 2460} 2461 2462static int ipmr_rtm_valid_getroute_req(struct sk_buff *skb, 2463 const struct nlmsghdr *nlh, 2464 struct nlattr **tb, 2465 struct netlink_ext_ack *extack) 2466{ 2467 struct rtmsg *rtm; 2468 int i, err; 2469 2470 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*rtm))) { 2471 NL_SET_ERR_MSG(extack, "ipv4: Invalid header for multicast route get request"); 2472 return -EINVAL; 2473 } 2474 2475 if (!netlink_strict_get_check(skb)) 2476 return nlmsg_parse_deprecated(nlh, sizeof(*rtm), tb, RTA_MAX, 2477 rtm_ipv4_policy, extack); 2478 2479 rtm = nlmsg_data(nlh); 2480 if ((rtm->rtm_src_len && rtm->rtm_src_len != 32) || 2481 (rtm->rtm_dst_len && rtm->rtm_dst_len != 32) || 2482 rtm->rtm_tos || rtm->rtm_table || rtm->rtm_protocol || 2483 rtm->rtm_scope || rtm->rtm_type || rtm->rtm_flags) { 2484 NL_SET_ERR_MSG(extack, "ipv4: Invalid values in header for multicast route get request"); 2485 return -EINVAL; 2486 } 2487 2488 err = nlmsg_parse_deprecated_strict(nlh, sizeof(*rtm), tb, RTA_MAX, 2489 rtm_ipv4_policy, extack); 2490 if (err) 2491 return err; 2492 2493 if ((tb[RTA_SRC] && !rtm->rtm_src_len) || 2494 (tb[RTA_DST] && !rtm->rtm_dst_len)) { 2495 NL_SET_ERR_MSG(extack, "ipv4: rtm_src_len and rtm_dst_len must be 32 for IPv4"); 2496 return -EINVAL; 2497 } 2498 2499 for (i = 0; i <= RTA_MAX; i++) { 2500 if (!tb[i]) 2501 continue; 2502 2503 switch (i) { 2504 case RTA_SRC: 2505 case RTA_DST: 2506 case RTA_TABLE: 2507 break; 2508 default: 2509 NL_SET_ERR_MSG(extack, "ipv4: Unsupported attribute in multicast route get request"); 2510 return -EINVAL; 2511 } 2512 } 2513 2514 return 0; 2515} 2516 2517static int ipmr_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh, 2518 struct netlink_ext_ack *extack) 2519{ 2520 struct net *net = sock_net(in_skb->sk); 2521 struct nlattr *tb[RTA_MAX + 1]; 2522 struct sk_buff *skb = NULL; 2523 struct mfc_cache *cache; 2524 struct mr_table *mrt; 2525 __be32 src, grp; 2526 u32 tableid; 2527 int err; 2528 2529 err = ipmr_rtm_valid_getroute_req(in_skb, nlh, tb, extack); 2530 if (err < 0) 2531 goto errout; 2532 2533 src = tb[RTA_SRC] ? nla_get_in_addr(tb[RTA_SRC]) : 0; 2534 grp = tb[RTA_DST] ? nla_get_in_addr(tb[RTA_DST]) : 0; 2535 tableid = tb[RTA_TABLE] ? nla_get_u32(tb[RTA_TABLE]) : 0; 2536 2537 mrt = ipmr_get_table(net, tableid ? tableid : RT_TABLE_DEFAULT); 2538 if (!mrt) { 2539 err = -ENOENT; 2540 goto errout_free; 2541 } 2542 2543 /* entries are added/deleted only under RTNL */ 2544 rcu_read_lock(); 2545 cache = ipmr_cache_find(mrt, src, grp); 2546 rcu_read_unlock(); 2547 if (!cache) { 2548 err = -ENOENT; 2549 goto errout_free; 2550 } 2551 2552 skb = nlmsg_new(mroute_msgsize(false, mrt->maxvif), GFP_KERNEL); 2553 if (!skb) { 2554 err = -ENOBUFS; 2555 goto errout_free; 2556 } 2557 2558 err = ipmr_fill_mroute(mrt, skb, NETLINK_CB(in_skb).portid, 2559 nlh->nlmsg_seq, cache, 2560 RTM_NEWROUTE, 0); 2561 if (err < 0) 2562 goto errout_free; 2563 2564 err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid); 2565 2566errout: 2567 return err; 2568 2569errout_free: 2570 kfree_skb(skb); 2571 goto errout; 2572} 2573 2574static int ipmr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb) 2575{ 2576 struct fib_dump_filter filter = {}; 2577 int err; 2578 2579 if (cb->strict_check) { 2580 err = ip_valid_fib_dump_req(sock_net(skb->sk), cb->nlh, 2581 &filter, cb); 2582 if (err < 0) 2583 return err; 2584 } 2585 2586 if (filter.table_id) { 2587 struct mr_table *mrt; 2588 2589 mrt = ipmr_get_table(sock_net(skb->sk), filter.table_id); 2590 if (!mrt) { 2591 if (rtnl_msg_family(cb->nlh) != RTNL_FAMILY_IPMR) 2592 return skb->len; 2593 2594 NL_SET_ERR_MSG(cb->extack, "ipv4: MR table does not exist"); 2595 return -ENOENT; 2596 } 2597 err = mr_table_dump(mrt, skb, cb, _ipmr_fill_mroute, 2598 &mfc_unres_lock, &filter); 2599 return skb->len ? : err; 2600 } 2601 2602 return mr_rtm_dumproute(skb, cb, ipmr_mr_table_iter, 2603 _ipmr_fill_mroute, &mfc_unres_lock, &filter); 2604} 2605 2606static const struct nla_policy rtm_ipmr_policy[RTA_MAX + 1] = { 2607 [RTA_SRC] = { .type = NLA_U32 }, 2608 [RTA_DST] = { .type = NLA_U32 }, 2609 [RTA_IIF] = { .type = NLA_U32 }, 2610 [RTA_TABLE] = { .type = NLA_U32 }, 2611 [RTA_MULTIPATH] = { .len = sizeof(struct rtnexthop) }, 2612}; 2613 2614static bool ipmr_rtm_validate_proto(unsigned char rtm_protocol) 2615{ 2616 switch (rtm_protocol) { 2617 case RTPROT_STATIC: 2618 case RTPROT_MROUTED: 2619 return true; 2620 } 2621 return false; 2622} 2623 2624static int ipmr_nla_get_ttls(const struct nlattr *nla, struct mfcctl *mfcc) 2625{ 2626 struct rtnexthop *rtnh = nla_data(nla); 2627 int remaining = nla_len(nla), vifi = 0; 2628 2629 while (rtnh_ok(rtnh, remaining)) { 2630 mfcc->mfcc_ttls[vifi] = rtnh->rtnh_hops; 2631 if (++vifi == MAXVIFS) 2632 break; 2633 rtnh = rtnh_next(rtnh, &remaining); 2634 } 2635 2636 return remaining > 0 ? -EINVAL : vifi; 2637} 2638 2639/* returns < 0 on error, 0 for ADD_MFC and 1 for ADD_MFC_PROXY */ 2640static int rtm_to_ipmr_mfcc(struct net *net, struct nlmsghdr *nlh, 2641 struct mfcctl *mfcc, int *mrtsock, 2642 struct mr_table **mrtret, 2643 struct netlink_ext_ack *extack) 2644{ 2645 struct net_device *dev = NULL; 2646 u32 tblid = RT_TABLE_DEFAULT; 2647 struct mr_table *mrt; 2648 struct nlattr *attr; 2649 struct rtmsg *rtm; 2650 int ret, rem; 2651 2652 ret = nlmsg_validate_deprecated(nlh, sizeof(*rtm), RTA_MAX, 2653 rtm_ipmr_policy, extack); 2654 if (ret < 0) 2655 goto out; 2656 rtm = nlmsg_data(nlh); 2657 2658 ret = -EINVAL; 2659 if (rtm->rtm_family != RTNL_FAMILY_IPMR || rtm->rtm_dst_len != 32 || 2660 rtm->rtm_type != RTN_MULTICAST || 2661 rtm->rtm_scope != RT_SCOPE_UNIVERSE || 2662 !ipmr_rtm_validate_proto(rtm->rtm_protocol)) 2663 goto out; 2664 2665 memset(mfcc, 0, sizeof(*mfcc)); 2666 mfcc->mfcc_parent = -1; 2667 ret = 0; 2668 nlmsg_for_each_attr(attr, nlh, sizeof(struct rtmsg), rem) { 2669 switch (nla_type(attr)) { 2670 case RTA_SRC: 2671 mfcc->mfcc_origin.s_addr = nla_get_be32(attr); 2672 break; 2673 case RTA_DST: 2674 mfcc->mfcc_mcastgrp.s_addr = nla_get_be32(attr); 2675 break; 2676 case RTA_IIF: 2677 dev = __dev_get_by_index(net, nla_get_u32(attr)); 2678 if (!dev) { 2679 ret = -ENODEV; 2680 goto out; 2681 } 2682 break; 2683 case RTA_MULTIPATH: 2684 if (ipmr_nla_get_ttls(attr, mfcc) < 0) { 2685 ret = -EINVAL; 2686 goto out; 2687 } 2688 break; 2689 case RTA_PREFSRC: 2690 ret = 1; 2691 break; 2692 case RTA_TABLE: 2693 tblid = nla_get_u32(attr); 2694 break; 2695 } 2696 } 2697 mrt = ipmr_get_table(net, tblid); 2698 if (!mrt) { 2699 ret = -ENOENT; 2700 goto out; 2701 } 2702 *mrtret = mrt; 2703 *mrtsock = rtm->rtm_protocol == RTPROT_MROUTED ? 1 : 0; 2704 if (dev) 2705 mfcc->mfcc_parent = ipmr_find_vif(mrt, dev); 2706 2707out: 2708 return ret; 2709} 2710 2711/* takes care of both newroute and delroute */ 2712static int ipmr_rtm_route(struct sk_buff *skb, struct nlmsghdr *nlh, 2713 struct netlink_ext_ack *extack) 2714{ 2715 struct net *net = sock_net(skb->sk); 2716 int ret, mrtsock, parent; 2717 struct mr_table *tbl; 2718 struct mfcctl mfcc; 2719 2720 mrtsock = 0; 2721 tbl = NULL; 2722 ret = rtm_to_ipmr_mfcc(net, nlh, &mfcc, &mrtsock, &tbl, extack); 2723 if (ret < 0) 2724 return ret; 2725 2726 parent = ret ? mfcc.mfcc_parent : -1; 2727 if (nlh->nlmsg_type == RTM_NEWROUTE) 2728 return ipmr_mfc_add(net, tbl, &mfcc, mrtsock, parent); 2729 else 2730 return ipmr_mfc_delete(tbl, &mfcc, parent); 2731} 2732 2733static bool ipmr_fill_table(struct mr_table *mrt, struct sk_buff *skb) 2734{ 2735 u32 queue_len = atomic_read(&mrt->cache_resolve_queue_len); 2736 2737 if (nla_put_u32(skb, IPMRA_TABLE_ID, mrt->id) || 2738 nla_put_u32(skb, IPMRA_TABLE_CACHE_RES_QUEUE_LEN, queue_len) || 2739 nla_put_s32(skb, IPMRA_TABLE_MROUTE_REG_VIF_NUM, 2740 mrt->mroute_reg_vif_num) || 2741 nla_put_u8(skb, IPMRA_TABLE_MROUTE_DO_ASSERT, 2742 mrt->mroute_do_assert) || 2743 nla_put_u8(skb, IPMRA_TABLE_MROUTE_DO_PIM, mrt->mroute_do_pim) || 2744 nla_put_u8(skb, IPMRA_TABLE_MROUTE_DO_WRVIFWHOLE, 2745 mrt->mroute_do_wrvifwhole)) 2746 return false; 2747 2748 return true; 2749} 2750 2751static bool ipmr_fill_vif(struct mr_table *mrt, u32 vifid, struct sk_buff *skb) 2752{ 2753 struct nlattr *vif_nest; 2754 struct vif_device *vif; 2755 2756 /* if the VIF doesn't exist just continue */ 2757 if (!VIF_EXISTS(mrt, vifid)) 2758 return true; 2759 2760 vif = &mrt->vif_table[vifid]; 2761 vif_nest = nla_nest_start_noflag(skb, IPMRA_VIF); 2762 if (!vif_nest) 2763 return false; 2764 if (nla_put_u32(skb, IPMRA_VIFA_IFINDEX, vif->dev->ifindex) || 2765 nla_put_u32(skb, IPMRA_VIFA_VIF_ID, vifid) || 2766 nla_put_u16(skb, IPMRA_VIFA_FLAGS, vif->flags) || 2767 nla_put_u64_64bit(skb, IPMRA_VIFA_BYTES_IN, vif->bytes_in, 2768 IPMRA_VIFA_PAD) || 2769 nla_put_u64_64bit(skb, IPMRA_VIFA_BYTES_OUT, vif->bytes_out, 2770 IPMRA_VIFA_PAD) || 2771 nla_put_u64_64bit(skb, IPMRA_VIFA_PACKETS_IN, vif->pkt_in, 2772 IPMRA_VIFA_PAD) || 2773 nla_put_u64_64bit(skb, IPMRA_VIFA_PACKETS_OUT, vif->pkt_out, 2774 IPMRA_VIFA_PAD) || 2775 nla_put_be32(skb, IPMRA_VIFA_LOCAL_ADDR, vif->local) || 2776 nla_put_be32(skb, IPMRA_VIFA_REMOTE_ADDR, vif->remote)) { 2777 nla_nest_cancel(skb, vif_nest); 2778 return false; 2779 } 2780 nla_nest_end(skb, vif_nest); 2781 2782 return true; 2783} 2784 2785static int ipmr_valid_dumplink(const struct nlmsghdr *nlh, 2786 struct netlink_ext_ack *extack) 2787{ 2788 struct ifinfomsg *ifm; 2789 2790 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifm))) { 2791 NL_SET_ERR_MSG(extack, "ipv4: Invalid header for ipmr link dump"); 2792 return -EINVAL; 2793 } 2794 2795 if (nlmsg_attrlen(nlh, sizeof(*ifm))) { 2796 NL_SET_ERR_MSG(extack, "Invalid data after header in ipmr link dump"); 2797 return -EINVAL; 2798 } 2799 2800 ifm = nlmsg_data(nlh); 2801 if (ifm->__ifi_pad || ifm->ifi_type || ifm->ifi_flags || 2802 ifm->ifi_change || ifm->ifi_index) { 2803 NL_SET_ERR_MSG(extack, "Invalid values in header for ipmr link dump request"); 2804 return -EINVAL; 2805 } 2806 2807 return 0; 2808} 2809 2810static int ipmr_rtm_dumplink(struct sk_buff *skb, struct netlink_callback *cb) 2811{ 2812 struct net *net = sock_net(skb->sk); 2813 struct nlmsghdr *nlh = NULL; 2814 unsigned int t = 0, s_t; 2815 unsigned int e = 0, s_e; 2816 struct mr_table *mrt; 2817 2818 if (cb->strict_check) { 2819 int err = ipmr_valid_dumplink(cb->nlh, cb->extack); 2820 2821 if (err < 0) 2822 return err; 2823 } 2824 2825 s_t = cb->args[0]; 2826 s_e = cb->args[1]; 2827 2828 ipmr_for_each_table(mrt, net) { 2829 struct nlattr *vifs, *af; 2830 struct ifinfomsg *hdr; 2831 u32 i; 2832 2833 if (t < s_t) 2834 goto skip_table; 2835 nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, 2836 cb->nlh->nlmsg_seq, RTM_NEWLINK, 2837 sizeof(*hdr), NLM_F_MULTI); 2838 if (!nlh) 2839 break; 2840 2841 hdr = nlmsg_data(nlh); 2842 memset(hdr, 0, sizeof(*hdr)); 2843 hdr->ifi_family = RTNL_FAMILY_IPMR; 2844 2845 af = nla_nest_start_noflag(skb, IFLA_AF_SPEC); 2846 if (!af) { 2847 nlmsg_cancel(skb, nlh); 2848 goto out; 2849 } 2850 2851 if (!ipmr_fill_table(mrt, skb)) { 2852 nlmsg_cancel(skb, nlh); 2853 goto out; 2854 } 2855 2856 vifs = nla_nest_start_noflag(skb, IPMRA_TABLE_VIFS); 2857 if (!vifs) { 2858 nla_nest_end(skb, af); 2859 nlmsg_end(skb, nlh); 2860 goto out; 2861 } 2862 for (i = 0; i < mrt->maxvif; i++) { 2863 if (e < s_e) 2864 goto skip_entry; 2865 if (!ipmr_fill_vif(mrt, i, skb)) { 2866 nla_nest_end(skb, vifs); 2867 nla_nest_end(skb, af); 2868 nlmsg_end(skb, nlh); 2869 goto out; 2870 } 2871skip_entry: 2872 e++; 2873 } 2874 s_e = 0; 2875 e = 0; 2876 nla_nest_end(skb, vifs); 2877 nla_nest_end(skb, af); 2878 nlmsg_end(skb, nlh); 2879skip_table: 2880 t++; 2881 } 2882 2883out: 2884 cb->args[1] = e; 2885 cb->args[0] = t; 2886 2887 return skb->len; 2888} 2889 2890#ifdef CONFIG_PROC_FS 2891/* The /proc interfaces to multicast routing : 2892 * /proc/net/ip_mr_cache & /proc/net/ip_mr_vif 2893 */ 2894 2895static void *ipmr_vif_seq_start(struct seq_file *seq, loff_t *pos) 2896 __acquires(mrt_lock) 2897{ 2898 struct mr_vif_iter *iter = seq->private; 2899 struct net *net = seq_file_net(seq); 2900 struct mr_table *mrt; 2901 2902 mrt = ipmr_get_table(net, RT_TABLE_DEFAULT); 2903 if (!mrt) 2904 return ERR_PTR(-ENOENT); 2905 2906 iter->mrt = mrt; 2907 2908 read_lock(&mrt_lock); 2909 return mr_vif_seq_start(seq, pos); 2910} 2911 2912static void ipmr_vif_seq_stop(struct seq_file *seq, void *v) 2913 __releases(mrt_lock) 2914{ 2915 read_unlock(&mrt_lock); 2916} 2917 2918static int ipmr_vif_seq_show(struct seq_file *seq, void *v) 2919{ 2920 struct mr_vif_iter *iter = seq->private; 2921 struct mr_table *mrt = iter->mrt; 2922 2923 if (v == SEQ_START_TOKEN) { 2924 seq_puts(seq, 2925 "Interface BytesIn PktsIn BytesOut PktsOut Flags Local Remote\n"); 2926 } else { 2927 const struct vif_device *vif = v; 2928 const char *name = vif->dev ? 2929 vif->dev->name : "none"; 2930 2931 seq_printf(seq, 2932 "%2td %-10s %8ld %7ld %8ld %7ld %05X %08X %08X\n", 2933 vif - mrt->vif_table, 2934 name, vif->bytes_in, vif->pkt_in, 2935 vif->bytes_out, vif->pkt_out, 2936 vif->flags, vif->local, vif->remote); 2937 } 2938 return 0; 2939} 2940 2941static const struct seq_operations ipmr_vif_seq_ops = { 2942 .start = ipmr_vif_seq_start, 2943 .next = mr_vif_seq_next, 2944 .stop = ipmr_vif_seq_stop, 2945 .show = ipmr_vif_seq_show, 2946}; 2947 2948static void *ipmr_mfc_seq_start(struct seq_file *seq, loff_t *pos) 2949{ 2950 struct net *net = seq_file_net(seq); 2951 struct mr_table *mrt; 2952 2953 mrt = ipmr_get_table(net, RT_TABLE_DEFAULT); 2954 if (!mrt) 2955 return ERR_PTR(-ENOENT); 2956 2957 return mr_mfc_seq_start(seq, pos, mrt, &mfc_unres_lock); 2958} 2959 2960static int ipmr_mfc_seq_show(struct seq_file *seq, void *v) 2961{ 2962 int n; 2963 2964 if (v == SEQ_START_TOKEN) { 2965 seq_puts(seq, 2966 "Group Origin Iif Pkts Bytes Wrong Oifs\n"); 2967 } else { 2968 const struct mfc_cache *mfc = v; 2969 const struct mr_mfc_iter *it = seq->private; 2970 const struct mr_table *mrt = it->mrt; 2971 2972 seq_printf(seq, "%08X %08X %-3hd", 2973 (__force u32) mfc->mfc_mcastgrp, 2974 (__force u32) mfc->mfc_origin, 2975 mfc->_c.mfc_parent); 2976 2977 if (it->cache != &mrt->mfc_unres_queue) { 2978 seq_printf(seq, " %8lu %8lu %8lu", 2979 mfc->_c.mfc_un.res.pkt, 2980 mfc->_c.mfc_un.res.bytes, 2981 mfc->_c.mfc_un.res.wrong_if); 2982 for (n = mfc->_c.mfc_un.res.minvif; 2983 n < mfc->_c.mfc_un.res.maxvif; n++) { 2984 if (VIF_EXISTS(mrt, n) && 2985 mfc->_c.mfc_un.res.ttls[n] < 255) 2986 seq_printf(seq, 2987 " %2d:%-3d", 2988 n, mfc->_c.mfc_un.res.ttls[n]); 2989 } 2990 } else { 2991 /* unresolved mfc_caches don't contain 2992 * pkt, bytes and wrong_if values 2993 */ 2994 seq_printf(seq, " %8lu %8lu %8lu", 0ul, 0ul, 0ul); 2995 } 2996 seq_putc(seq, '\n'); 2997 } 2998 return 0; 2999} 3000 3001static const struct seq_operations ipmr_mfc_seq_ops = { 3002 .start = ipmr_mfc_seq_start, 3003 .next = mr_mfc_seq_next, 3004 .stop = mr_mfc_seq_stop, 3005 .show = ipmr_mfc_seq_show, 3006}; 3007#endif 3008 3009#ifdef CONFIG_IP_PIMSM_V2 3010static const struct net_protocol pim_protocol = { 3011 .handler = pim_rcv, 3012 .netns_ok = 1, 3013}; 3014#endif 3015 3016static unsigned int ipmr_seq_read(struct net *net) 3017{ 3018 ASSERT_RTNL(); 3019 3020 return net->ipv4.ipmr_seq + ipmr_rules_seq_read(net); 3021} 3022 3023static int ipmr_dump(struct net *net, struct notifier_block *nb, 3024 struct netlink_ext_ack *extack) 3025{ 3026 return mr_dump(net, nb, RTNL_FAMILY_IPMR, ipmr_rules_dump, 3027 ipmr_mr_table_iter, &mrt_lock, extack); 3028} 3029 3030static const struct fib_notifier_ops ipmr_notifier_ops_template = { 3031 .family = RTNL_FAMILY_IPMR, 3032 .fib_seq_read = ipmr_seq_read, 3033 .fib_dump = ipmr_dump, 3034 .owner = THIS_MODULE, 3035}; 3036 3037static int __net_init ipmr_notifier_init(struct net *net) 3038{ 3039 struct fib_notifier_ops *ops; 3040 3041 net->ipv4.ipmr_seq = 0; 3042 3043 ops = fib_notifier_ops_register(&ipmr_notifier_ops_template, net); 3044 if (IS_ERR(ops)) 3045 return PTR_ERR(ops); 3046 net->ipv4.ipmr_notifier_ops = ops; 3047 3048 return 0; 3049} 3050 3051static void __net_exit ipmr_notifier_exit(struct net *net) 3052{ 3053 fib_notifier_ops_unregister(net->ipv4.ipmr_notifier_ops); 3054 net->ipv4.ipmr_notifier_ops = NULL; 3055} 3056 3057/* Setup for IP multicast routing */ 3058static int __net_init ipmr_net_init(struct net *net) 3059{ 3060 int err; 3061 3062 err = ipmr_notifier_init(net); 3063 if (err) 3064 goto ipmr_notifier_fail; 3065 3066 err = ipmr_rules_init(net); 3067 if (err < 0) 3068 goto ipmr_rules_fail; 3069 3070#ifdef CONFIG_PROC_FS 3071 err = -ENOMEM; 3072 if (!proc_create_net("ip_mr_vif", 0, net->proc_net, &ipmr_vif_seq_ops, 3073 sizeof(struct mr_vif_iter))) 3074 goto proc_vif_fail; 3075 if (!proc_create_net("ip_mr_cache", 0, net->proc_net, &ipmr_mfc_seq_ops, 3076 sizeof(struct mr_mfc_iter))) 3077 goto proc_cache_fail; 3078#endif 3079 return 0; 3080 3081#ifdef CONFIG_PROC_FS 3082proc_cache_fail: 3083 remove_proc_entry("ip_mr_vif", net->proc_net); 3084proc_vif_fail: 3085 ipmr_rules_exit(net); 3086#endif 3087ipmr_rules_fail: 3088 ipmr_notifier_exit(net); 3089ipmr_notifier_fail: 3090 return err; 3091} 3092 3093static void __net_exit ipmr_net_exit(struct net *net) 3094{ 3095#ifdef CONFIG_PROC_FS 3096 remove_proc_entry("ip_mr_cache", net->proc_net); 3097 remove_proc_entry("ip_mr_vif", net->proc_net); 3098#endif 3099 ipmr_notifier_exit(net); 3100 ipmr_rules_exit(net); 3101} 3102 3103static struct pernet_operations ipmr_net_ops = { 3104 .init = ipmr_net_init, 3105 .exit = ipmr_net_exit, 3106}; 3107 3108int __init ip_mr_init(void) 3109{ 3110 int err; 3111 3112 mrt_cachep = kmem_cache_create("ip_mrt_cache", 3113 sizeof(struct mfc_cache), 3114 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, 3115 NULL); 3116 3117 err = register_pernet_subsys(&ipmr_net_ops); 3118 if (err) 3119 goto reg_pernet_fail; 3120 3121 err = register_netdevice_notifier(&ip_mr_notifier); 3122 if (err) 3123 goto reg_notif_fail; 3124#ifdef CONFIG_IP_PIMSM_V2 3125 if (inet_add_protocol(&pim_protocol, IPPROTO_PIM) < 0) { 3126 pr_err("%s: can't add PIM protocol\n", __func__); 3127 err = -EAGAIN; 3128 goto add_proto_fail; 3129 } 3130#endif 3131 rtnl_register(RTNL_FAMILY_IPMR, RTM_GETROUTE, 3132 ipmr_rtm_getroute, ipmr_rtm_dumproute, 0); 3133 rtnl_register(RTNL_FAMILY_IPMR, RTM_NEWROUTE, 3134 ipmr_rtm_route, NULL, 0); 3135 rtnl_register(RTNL_FAMILY_IPMR, RTM_DELROUTE, 3136 ipmr_rtm_route, NULL, 0); 3137 3138 rtnl_register(RTNL_FAMILY_IPMR, RTM_GETLINK, 3139 NULL, ipmr_rtm_dumplink, 0); 3140 return 0; 3141 3142#ifdef CONFIG_IP_PIMSM_V2 3143add_proto_fail: 3144 unregister_netdevice_notifier(&ip_mr_notifier); 3145#endif 3146reg_notif_fail: 3147 unregister_pernet_subsys(&ipmr_net_ops); 3148reg_pernet_fail: 3149 kmem_cache_destroy(mrt_cachep); 3150 return err; 3151} 3152