Lines Matching refs:mrt

73 	struct mr_table		*mrt;
76 /* Big lock, protecting vif table, mrt cache and mroute socket state.
103 static void ipmr_free_table(struct mr_table *mrt);
105 static void ip_mr_forward(struct net *net, struct mr_table *mrt,
108 static int ipmr_cache_report(const struct mr_table *mrt,
110 static void mroute_netlink_event(struct mr_table *mrt, struct mfc_cache *mfc,
112 static void igmpmsg_netlink_event(const struct mr_table *mrt, struct sk_buff *pkt);
113 static void mroute_clean_tables(struct mr_table *mrt, int flags);
117 #define ipmr_for_each_table(mrt, net) \
118 list_for_each_entry_rcu(mrt, &net->ipv4.mr_tables, list, \
123 struct mr_table *mrt)
127 if (!mrt)
131 ret = list_entry_rcu(mrt->list.next,
141 struct mr_table *mrt;
143 ipmr_for_each_table(mrt, net) {
144 if (mrt->id == id)
145 return mrt;
151 struct mr_table **mrt)
167 *mrt = res.mrt;
175 struct mr_table *mrt;
191 mrt = ipmr_get_table(rule->fr_net, arg->table);
192 if (!mrt)
194 res->mrt = mrt;
241 struct mr_table *mrt;
250 mrt = ipmr_new_table(net, RT_TABLE_DEFAULT);
251 if (IS_ERR(mrt)) {
252 err = PTR_ERR(mrt);
265 ipmr_free_table(mrt);
274 struct mr_table *mrt, *next;
277 list_for_each_entry_safe(mrt, next, &net->ipv4.mr_tables, list) {
278 list_del(&mrt->list);
279 ipmr_free_table(mrt);
301 #define ipmr_for_each_table(mrt, net) \
302 for (mrt = net->ipv4.mrt; mrt; mrt = NULL)
305 struct mr_table *mrt)
307 if (!mrt)
308 return net->ipv4.mrt;
314 return net->ipv4.mrt;
318 struct mr_table **mrt)
320 *mrt = net->ipv4.mrt;
326 struct mr_table *mrt;
328 mrt = ipmr_new_table(net, RT_TABLE_DEFAULT);
329 if (IS_ERR(mrt))
330 return PTR_ERR(mrt);
331 net->ipv4.mrt = mrt;
338 ipmr_free_table(net->ipv4.mrt);
339 net->ipv4.mrt = NULL;
379 static void ipmr_new_table_set(struct mr_table *mrt,
383 list_add_tail_rcu(&mrt->list, &net->ipv4.mr_tables);
399 struct mr_table *mrt;
405 mrt = ipmr_get_table(net, id);
406 if (mrt)
407 return mrt;
413 static void ipmr_free_table(struct mr_table *mrt)
415 timer_shutdown_sync(&mrt->ipmr_expire_timer);
416 mroute_clean_tables(mrt, MRT_FLUSH_VIFS | MRT_FLUSH_VIFS_STATIC |
418 rhltable_destroy(&mrt->mfc_hash);
419 kfree(mrt);
495 struct mr_table *mrt;
503 err = ipmr_fib_lookup(net, &fl4, &mrt);
514 ipmr_cache_report(mrt, skb, READ_ONCE(mrt->mroute_reg_vif_num),
542 static struct net_device *ipmr_reg_vif(struct net *net, struct mr_table *mrt)
547 if (mrt->id == RT_TABLE_DEFAULT)
550 sprintf(name, "pimreg%u", mrt->id);
579 static int __pim_rcv(struct mr_table *mrt, struct sk_buff *skb,
598 vif_num = READ_ONCE(mrt->mroute_reg_vif_num);
600 reg_dev = vif_dev_read(&mrt->vif_table[vif_num]);
617 static struct net_device *ipmr_reg_vif(struct net *net, struct mr_table *mrt)
644 * @mrt: Table to delete from
649 static int vif_delete(struct mr_table *mrt, int vifi, int notify,
652 struct net *net = read_pnet(&mrt->net);
657 if (vifi < 0 || vifi >= mrt->maxvif)
660 v = &mrt->vif_table[vifi];
668 vifi, mrt->id);
671 if (vifi == mrt->mroute_reg_vif_num) {
673 WRITE_ONCE(mrt->mroute_reg_vif_num, -1);
675 if (vifi + 1 == mrt->maxvif) {
679 if (VIF_EXISTS(mrt, tmp))
682 WRITE_ONCE(mrt->maxvif, tmp + 1);
720 static void ipmr_destroy_unres(struct mr_table *mrt, struct mfc_cache *c)
722 struct net *net = read_pnet(&mrt->net);
726 atomic_dec(&mrt->cache_resolve_queue_len);
751 struct mr_table *mrt = from_timer(mrt, t, ipmr_expire_timer);
757 mod_timer(&mrt->ipmr_expire_timer, jiffies+HZ/10);
761 if (list_empty(&mrt->mfc_unres_queue))
767 list_for_each_entry_safe(c, next, &mrt->mfc_unres_queue, list) {
776 mroute_netlink_event(mrt, (struct mfc_cache *)c, RTM_DELROUTE);
777 ipmr_destroy_unres(mrt, (struct mfc_cache *)c);
780 if (!list_empty(&mrt->mfc_unres_queue))
781 mod_timer(&mrt->ipmr_expire_timer, jiffies + expires);
788 static void ipmr_update_thresholds(struct mr_table *mrt, struct mr_mfc *cache,
797 for (vifi = 0; vifi < mrt->maxvif; vifi++) {
798 if (VIF_EXISTS(mrt, vifi) &&
810 static int vif_add(struct net *net, struct mr_table *mrt,
815 struct vif_device *v = &mrt->vif_table[vifi];
821 if (VIF_EXISTS(mrt, vifi))
831 if (mrt->mroute_reg_vif_num >= 0)
833 dev = ipmr_reg_vif(net, mrt);
904 WRITE_ONCE(mrt->mroute_reg_vif_num, vifi);
906 if (vifi+1 > mrt->maxvif)
907 WRITE_ONCE(mrt->maxvif, vifi + 1);
910 vifi, mrt->id);
915 static struct mfc_cache *ipmr_cache_find(struct mr_table *mrt,
924 return mr_mfc_find(mrt, &arg);
928 static struct mfc_cache *ipmr_cache_find_any(struct mr_table *mrt,
937 return mr_mfc_find_any_parent(mrt, vifi);
938 return mr_mfc_find_any(mrt, vifi, &arg);
942 static struct mfc_cache *ipmr_cache_find_parent(struct mr_table *mrt,
951 return mr_mfc_find_parent(mrt, &arg, parent);
980 static void ipmr_cache_resolve(struct net *net, struct mr_table *mrt,
992 if (mr_fill_mroute(mrt, skb, &c->_c,
1008 ip_mr_forward(net, mrt, skb->dev, skb, c, 0);
1018 static int ipmr_cache_report(const struct mr_table *mrt,
1028 mroute_sk = rcu_dereference(mrt->mroute_sk);
1058 int vif_num = READ_ONCE(mrt->mroute_reg_vif_num);
1087 igmpmsg_netlink_event(mrt, skb);
1102 static int ipmr_cache_unresolved(struct mr_table *mrt, vifi_t vifi,
1111 list_for_each_entry(c, &mrt->mfc_unres_queue, _c.list) {
1135 err = ipmr_cache_report(mrt, skb, vifi, IGMPMSG_NOCACHE);
1148 atomic_inc(&mrt->cache_resolve_queue_len);
1149 list_add(&c->_c.list, &mrt->mfc_unres_queue);
1150 mroute_netlink_event(mrt, c, RTM_NEWROUTE);
1152 if (atomic_read(&mrt->cache_resolve_queue_len) == 1)
1153 mod_timer(&mrt->ipmr_expire_timer,
1176 static int ipmr_mfc_delete(struct mr_table *mrt, struct mfcctl *mfc, int parent)
1178 struct net *net = read_pnet(&mrt->net);
1183 c = ipmr_cache_find_parent(mrt, mfc->mfcc_origin.s_addr,
1188 rhltable_remove(&mrt->mfc_hash, &c->_c.mnode, ipmr_rht_params);
1190 call_ipmr_mfc_entry_notifiers(net, FIB_EVENT_ENTRY_DEL, c, mrt->id);
1191 mroute_netlink_event(mrt, c, RTM_DELROUTE);
1197 static int ipmr_mfc_add(struct net *net, struct mr_table *mrt,
1210 c = ipmr_cache_find_parent(mrt, mfc->mfcc_origin.s_addr,
1216 ipmr_update_thresholds(mrt, &c->_c, mfc->mfcc_ttls);
1221 mrt->id);
1222 mroute_netlink_event(mrt, c, RTM_NEWROUTE);
1237 ipmr_update_thresholds(mrt, &c->_c, mfc->mfcc_ttls);
1241 ret = rhltable_insert_key(&mrt->mfc_hash, &c->cmparg, &c->_c.mnode,
1248 list_add_tail_rcu(&c->_c.list, &mrt->mfc_cache_list);
1254 list_for_each_entry(_uc, &mrt->mfc_unres_queue, list) {
1259 atomic_dec(&mrt->cache_resolve_queue_len);
1264 if (list_empty(&mrt->mfc_unres_queue))
1265 del_timer(&mrt->ipmr_expire_timer);
1269 ipmr_cache_resolve(net, mrt, uc, c);
1272 call_ipmr_mfc_entry_notifiers(net, FIB_EVENT_ENTRY_ADD, c, mrt->id);
1273 mroute_netlink_event(mrt, c, RTM_NEWROUTE);
1278 static void mroute_clean_tables(struct mr_table *mrt, int flags)
1280 struct net *net = read_pnet(&mrt->net);
1288 for (i = 0; i < mrt->maxvif; i++) {
1289 if (((mrt->vif_table[i].flags & VIFF_STATIC) &&
1291 (!(mrt->vif_table[i].flags & VIFF_STATIC) && !(flags & MRT_FLUSH_VIFS)))
1293 vif_delete(mrt, i, 0, &list);
1300 list_for_each_entry_safe(c, tmp, &mrt->mfc_cache_list, list) {
1304 rhltable_remove(&mrt->mfc_hash, &c->mnode, ipmr_rht_params);
1308 mrt->id);
1309 mroute_netlink_event(mrt, cache, RTM_DELROUTE);
1315 if (atomic_read(&mrt->cache_resolve_queue_len) != 0) {
1317 list_for_each_entry_safe(c, tmp, &mrt->mfc_unres_queue, list) {
1320 mroute_netlink_event(mrt, cache, RTM_DELROUTE);
1321 ipmr_destroy_unres(mrt, cache);
1334 struct mr_table *mrt;
1337 ipmr_for_each_table(mrt, net) {
1338 if (sk == rtnl_dereference(mrt->mroute_sk)) {
1344 RCU_INIT_POINTER(mrt->mroute_sk, NULL);
1345 mroute_clean_tables(mrt, MRT_FLUSH_VIFS | MRT_FLUSH_MFC);
1362 struct mr_table *mrt;
1376 mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
1377 if (!mrt) {
1382 if (sk != rcu_access_pointer(mrt->mroute_sk) &&
1395 if (rtnl_dereference(mrt->mroute_sk)) {
1402 rcu_assign_pointer(mrt->mroute_sk, sk);
1411 if (sk != rcu_access_pointer(mrt->mroute_sk)) {
1438 ret = vif_add(net, mrt, &vif,
1439 sk == rtnl_dereference(mrt->mroute_sk));
1441 ret = vif_delete(mrt, vif.vifc_vifi, 0, NULL);
1464 ret = ipmr_mfc_delete(mrt, &mfc, parent);
1466 ret = ipmr_mfc_add(net, mrt, &mfc,
1467 sk == rtnl_dereference(mrt->mroute_sk),
1479 mroute_clean_tables(mrt, val);
1491 mrt->mroute_do_assert = val;
1509 if (val != mrt->mroute_do_pim) {
1510 mrt->mroute_do_pim = val;
1511 mrt->mroute_do_assert = val;
1512 mrt->mroute_do_wrvifwhole = do_wrvifwhole;
1529 if (sk == rtnl_dereference(mrt->mroute_sk)) {
1532 mrt = ipmr_new_table(net, uval);
1533 if (IS_ERR(mrt))
1534 ret = PTR_ERR(mrt);
1578 struct mr_table *mrt;
1584 mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
1585 if (!mrt)
1595 val = mrt->mroute_do_pim;
1598 val = mrt->mroute_do_assert;
1626 struct mr_table *mrt;
1628 mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
1629 if (!mrt)
1635 if (vr->vifi >= mrt->maxvif)
1637 vr->vifi = array_index_nospec(vr->vifi, mrt->maxvif);
1639 vif = &mrt->vif_table[vr->vifi];
1640 if (VIF_EXISTS(mrt, vr->vifi)) {
1655 c = ipmr_cache_find(mrt, sr->src.s_addr, sr->grp.s_addr);
1694 struct mr_table *mrt;
1696 mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
1697 if (!mrt)
1704 if (vr.vifi >= mrt->maxvif)
1706 vr.vifi = array_index_nospec(vr.vifi, mrt->maxvif);
1708 vif = &mrt->vif_table[vr.vifi];
1709 if (VIF_EXISTS(mrt, vr.vifi)) {
1727 c = ipmr_cache_find(mrt, sr.src.s_addr, sr.grp.s_addr);
1750 struct mr_table *mrt;
1757 ipmr_for_each_table(mrt, net) {
1758 v = &mrt->vif_table[0];
1759 for (ct = 0; ct < mrt->maxvif; ct++, v++) {
1761 vif_delete(mrt, ct, 1, NULL);
1816 static bool ipmr_forward_offloaded(struct sk_buff *skb, struct mr_table *mrt,
1819 struct vif_device *out_vif = &mrt->vif_table[out_vifi];
1820 struct vif_device *in_vif = &mrt->vif_table[in_vifi];
1830 static bool ipmr_forward_offloaded(struct sk_buff *skb, struct mr_table *mrt,
1839 static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt,
1843 struct vif_device *vif = &mrt->vif_table[vifi];
1859 ipmr_cache_report(mrt, skb, vifi, IGMPMSG_WHOLEPKT);
1863 if (ipmr_forward_offloaded(skb, mrt, in_vifi, vifi))
1942 static int ipmr_find_vif(const struct mr_table *mrt, struct net_device *dev)
1946 for (ct = READ_ONCE(mrt->maxvif) - 1; ct >= 0; ct--) {
1947 if (rcu_access_pointer(mrt->vif_table[ct].dev) == dev)
1955 static void ip_mr_forward(struct net *net, struct mr_table *mrt,
1959 int true_vifi = ipmr_find_vif(mrt, dev);
1974 cache_proxy = mr_mfc_find_any_parent(mrt, vif);
1981 if (rcu_access_pointer(mrt->vif_table[vif].dev) != dev) {
1999 if (true_vifi >= 0 && mrt->mroute_do_assert &&
2005 (mrt->mroute_do_pim ||
2011 ipmr_cache_report(mrt, skb, true_vifi, IGMPMSG_WRONGVIF);
2012 if (mrt->mroute_do_wrvifwhole)
2013 ipmr_cache_report(mrt, skb, true_vifi,
2020 WRITE_ONCE(mrt->vif_table[vif].pkt_in,
2021 mrt->vif_table[vif].pkt_in + 1);
2022 WRITE_ONCE(mrt->vif_table[vif].bytes_in,
2023 mrt->vif_table[vif].bytes_in + skb->len);
2051 ipmr_queue_xmit(net, mrt, true_vifi,
2063 ipmr_queue_xmit(net, mrt, true_vifi, skb2,
2066 ipmr_queue_xmit(net, mrt, true_vifi, skb, psend);
2091 struct mr_table *mrt;
2094 err = ipmr_fib_lookup(net, &fl4, &mrt);
2097 return mrt;
2108 struct mr_table *mrt;
2130 mrt = ipmr_rt_fib_lookup(net, skb);
2131 if (IS_ERR(mrt)) {
2133 return PTR_ERR(mrt);
2148 mroute_sk = rcu_dereference(mrt->mroute_sk);
2158 cache = ipmr_cache_find(mrt, ip_hdr(skb)->saddr, ip_hdr(skb)->daddr);
2160 int vif = ipmr_find_vif(mrt, dev);
2163 cache = ipmr_cache_find_any(mrt, ip_hdr(skb)->daddr,
2179 vif = ipmr_find_vif(mrt, dev);
2181 return ipmr_cache_unresolved(mrt, vif, skb, dev);
2186 ip_mr_forward(net, mrt, dev, skb, cache, local);
2206 struct mr_table *mrt;
2213 mrt = ipmr_rt_fib_lookup(net, skb);
2214 if (IS_ERR(mrt))
2216 if (!mrt->mroute_do_pim ||
2220 if (__pim_rcv(mrt, skb, sizeof(*pim))) {
2233 struct mr_table *mrt;
2245 mrt = ipmr_rt_fib_lookup(net, skb);
2246 if (IS_ERR(mrt))
2248 if (__pim_rcv(mrt, skb, sizeof(*pim))) {
2261 struct mr_table *mrt;
2264 mrt = ipmr_get_table(net, RT_TABLE_DEFAULT);
2265 if (!mrt)
2269 cache = ipmr_cache_find(mrt, saddr, daddr);
2271 int vif = ipmr_find_vif(mrt, skb->dev);
2274 cache = ipmr_cache_find_any(mrt, daddr, vif);
2284 vif = ipmr_find_vif(mrt, dev);
2304 err = ipmr_cache_unresolved(mrt, vif, skb2, dev);
2309 err = mr_fill_mroute(mrt, skb, &cache->_c, rtm);
2314 static int ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
2331 rtm->rtm_table = mrt->id;
2332 if (nla_put_u32(skb, RTA_TABLE, mrt->id))
2345 err = mr_fill_mroute(mrt, skb, &c->_c, rtm);
2358 static int _ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
2362 return ipmr_fill_mroute(mrt, skb, portid, seq, (struct mfc_cache *)c,
2387 static void mroute_netlink_event(struct mr_table *mrt, struct mfc_cache *mfc,
2390 struct net *net = read_pnet(&mrt->net);
2395 mrt->maxvif),
2400 err = ipmr_fill_mroute(mrt, skb, 0, 0, mfc, cmd, 0);
2429 static void igmpmsg_netlink_event(const struct mr_table *mrt, struct sk_buff *pkt)
2431 struct net *net = read_pnet(&mrt->net);
2458 nla_put_u32(skb, IPMRA_CREPORT_TABLE, mrt->id))
2540 struct mr_table *mrt;
2553 mrt = ipmr_get_table(net, tableid ? tableid : RT_TABLE_DEFAULT);
2554 if (!mrt) {
2561 cache = ipmr_cache_find(mrt, src, grp);
2568 skb = nlmsg_new(mroute_msgsize(false, mrt->maxvif), GFP_KERNEL);
2574 err = ipmr_fill_mroute(mrt, skb, NETLINK_CB(in_skb).portid,
2603 struct mr_table *mrt;
2605 mrt = ipmr_get_table(sock_net(skb->sk), filter.table_id);
2606 if (!mrt) {
2613 err = mr_table_dump(mrt, skb, cb, _ipmr_fill_mroute,
2663 struct mr_table *mrt;
2713 mrt = ipmr_get_table(net, tblid);
2714 if (!mrt) {
2718 *mrtret = mrt;
2721 mfcc->mfcc_parent = ipmr_find_vif(mrt, dev);
2749 static bool ipmr_fill_table(struct mr_table *mrt, struct sk_buff *skb)
2751 u32 queue_len = atomic_read(&mrt->cache_resolve_queue_len);
2753 if (nla_put_u32(skb, IPMRA_TABLE_ID, mrt->id) ||
2756 mrt->mroute_reg_vif_num) ||
2758 mrt->mroute_do_assert) ||
2759 nla_put_u8(skb, IPMRA_TABLE_MROUTE_DO_PIM, mrt->mroute_do_pim) ||
2761 mrt->mroute_do_wrvifwhole))
2767 static bool ipmr_fill_vif(struct mr_table *mrt, u32 vifid, struct sk_buff *skb)
2773 vif = &mrt->vif_table[vifid];
2835 struct mr_table *mrt;
2847 ipmr_for_each_table(mrt, net) {
2870 if (!ipmr_fill_table(mrt, skb)) {
2881 for (i = 0; i < mrt->maxvif; i++) {
2884 if (!ipmr_fill_vif(mrt, i, skb)) {
2919 struct mr_table *mrt;
2921 mrt = ipmr_get_table(net, RT_TABLE_DEFAULT);
2922 if (!mrt)
2925 iter->mrt = mrt;
2940 struct mr_table *mrt = iter->mrt;
2954 vif - mrt->vif_table,
2972 struct mr_table *mrt;
2974 mrt = ipmr_get_table(net, RT_TABLE_DEFAULT);
2975 if (!mrt)
2978 return mr_mfc_seq_start(seq, pos, mrt, &mfc_unres_lock);
2991 const struct mr_table *mrt = it->mrt;
2998 if (it->cache != &mrt->mfc_unres_queue) {
3005 if (VIF_EXISTS(mrt, n) &&