Lines Matching refs:mrt

73 	struct mr_table		*mrt;
76 /* Big lock, protecting vif table, mrt cache and mroute socket state.
98 static void ipmr_free_table(struct mr_table *mrt);
100 static void ip_mr_forward(struct net *net, struct mr_table *mrt,
103 static int ipmr_cache_report(struct mr_table *mrt,
105 static void mroute_netlink_event(struct mr_table *mrt, struct mfc_cache *mfc,
107 static void igmpmsg_netlink_event(struct mr_table *mrt, struct sk_buff *pkt);
108 static void mroute_clean_tables(struct mr_table *mrt, int flags);
112 #define ipmr_for_each_table(mrt, net) \
113 list_for_each_entry_rcu(mrt, &net->ipv4.mr_tables, list, \
118 struct mr_table *mrt)
122 if (!mrt)
126 ret = list_entry_rcu(mrt->list.next,
136 struct mr_table *mrt;
138 ipmr_for_each_table(mrt, net) {
139 if (mrt->id == id)
140 return mrt;
146 struct mr_table **mrt)
162 *mrt = res.mrt;
170 struct mr_table *mrt;
186 mrt = ipmr_get_table(rule->fr_net, arg->table);
187 if (!mrt)
189 res->mrt = mrt;
241 struct mr_table *mrt;
250 mrt = ipmr_new_table(net, RT_TABLE_DEFAULT);
251 if (IS_ERR(mrt)) {
252 err = PTR_ERR(mrt);
265 ipmr_free_table(mrt);
274 struct mr_table *mrt, *next;
277 list_for_each_entry_safe(mrt, next, &net->ipv4.mr_tables, list) {
278 list_del(&mrt->list);
279 ipmr_free_table(mrt);
302 #define ipmr_for_each_table(mrt, net) \
303 for (mrt = net->ipv4.mrt; mrt; mrt = NULL)
306 struct mr_table *mrt)
308 if (!mrt)
309 return net->ipv4.mrt;
315 return net->ipv4.mrt;
319 struct mr_table **mrt)
321 *mrt = net->ipv4.mrt;
327 struct mr_table *mrt;
329 mrt = ipmr_new_table(net, RT_TABLE_DEFAULT);
330 if (IS_ERR(mrt))
331 return PTR_ERR(mrt);
332 net->ipv4.mrt = mrt;
339 ipmr_free_table(net->ipv4.mrt);
340 net->ipv4.mrt = NULL;
381 static void ipmr_new_table_set(struct mr_table *mrt,
385 list_add_tail_rcu(&mrt->list, &net->ipv4.mr_tables);
401 struct mr_table *mrt;
407 mrt = ipmr_get_table(net, id);
408 if (mrt)
409 return mrt;
415 static void ipmr_free_table(struct mr_table *mrt)
417 del_timer_sync(&mrt->ipmr_expire_timer);
418 mroute_clean_tables(mrt, MRT_FLUSH_VIFS | MRT_FLUSH_VIFS_STATIC |
420 rhltable_destroy(&mrt->mfc_hash);
421 kfree(mrt);
497 struct mr_table *mrt;
505 err = ipmr_fib_lookup(net, &fl4, &mrt);
514 ipmr_cache_report(mrt, skb, mrt->mroute_reg_vif_num, IGMPMSG_WHOLEPKT);
540 static struct net_device *ipmr_reg_vif(struct net *net, struct mr_table *mrt)
545 if (mrt->id == RT_TABLE_DEFAULT)
548 sprintf(name, "pimreg%u", mrt->id);
577 static int __pim_rcv(struct mr_table *mrt, struct sk_buff *skb,
595 if (mrt->mroute_reg_vif_num >= 0)
596 reg_dev = mrt->vif_table[mrt->mroute_reg_vif_num].dev;
615 static struct net_device *ipmr_reg_vif(struct net *net, struct mr_table *mrt)
641 * @mrt: Table to delete from
646 static int vif_delete(struct mr_table *mrt, int vifi, int notify,
649 struct net *net = read_pnet(&mrt->net);
654 if (vifi < 0 || vifi >= mrt->maxvif)
657 v = &mrt->vif_table[vifi];
659 if (VIF_EXISTS(mrt, vifi))
661 mrt->id);
672 if (vifi == mrt->mroute_reg_vif_num)
673 mrt->mroute_reg_vif_num = -1;
675 if (vifi + 1 == mrt->maxvif) {
679 if (VIF_EXISTS(mrt, tmp))
682 mrt->maxvif = tmp+1;
720 static void ipmr_destroy_unres(struct mr_table *mrt, struct mfc_cache *c)
722 struct net *net = read_pnet(&mrt->net);
726 atomic_dec(&mrt->cache_resolve_queue_len);
751 struct mr_table *mrt = from_timer(mrt, t, ipmr_expire_timer);
757 mod_timer(&mrt->ipmr_expire_timer, jiffies+HZ/10);
761 if (list_empty(&mrt->mfc_unres_queue))
767 list_for_each_entry_safe(c, next, &mrt->mfc_unres_queue, list) {
776 mroute_netlink_event(mrt, (struct mfc_cache *)c, RTM_DELROUTE);
777 ipmr_destroy_unres(mrt, (struct mfc_cache *)c);
780 if (!list_empty(&mrt->mfc_unres_queue))
781 mod_timer(&mrt->ipmr_expire_timer, jiffies + expires);
788 static void ipmr_update_thresholds(struct mr_table *mrt, struct mr_mfc *cache,
797 for (vifi = 0; vifi < mrt->maxvif; vifi++) {
798 if (VIF_EXISTS(mrt, vifi) &&
810 static int vif_add(struct net *net, struct mr_table *mrt,
815 struct vif_device *v = &mrt->vif_table[vifi];
821 if (VIF_EXISTS(mrt, vifi))
831 if (mrt->mroute_reg_vif_num >= 0)
833 dev = ipmr_reg_vif(net, mrt);
902 mrt->mroute_reg_vif_num = vifi;
903 if (vifi+1 > mrt->maxvif)
904 mrt->maxvif = vifi+1;
906 call_ipmr_vif_entry_notifiers(net, FIB_EVENT_VIF_ADD, v, vifi, mrt->id);
911 static struct mfc_cache *ipmr_cache_find(struct mr_table *mrt,
920 return mr_mfc_find(mrt, &arg);
924 static struct mfc_cache *ipmr_cache_find_any(struct mr_table *mrt,
933 return mr_mfc_find_any_parent(mrt, vifi);
934 return mr_mfc_find_any(mrt, vifi, &arg);
938 static struct mfc_cache *ipmr_cache_find_parent(struct mr_table *mrt,
947 return mr_mfc_find_parent(mrt, &arg, parent);
976 static void ipmr_cache_resolve(struct net *net, struct mr_table *mrt,
988 if (mr_fill_mroute(mrt, skb, &c->_c,
1003 ip_mr_forward(net, mrt, skb->dev, skb, c, 0);
1012 static int ipmr_cache_report(struct mr_table *mrt,
1047 msg->im_vif = mrt->mroute_reg_vif_num;
1048 msg->im_vif_hi = mrt->mroute_reg_vif_num >> 8;
1074 mroute_sk = rcu_dereference(mrt->mroute_sk);
1081 igmpmsg_netlink_event(mrt, skb);
1095 static int ipmr_cache_unresolved(struct mr_table *mrt, vifi_t vifi,
1104 list_for_each_entry(c, &mrt->mfc_unres_queue, _c.list) {
1128 err = ipmr_cache_report(mrt, skb, vifi, IGMPMSG_NOCACHE);
1141 atomic_inc(&mrt->cache_resolve_queue_len);
1142 list_add(&c->_c.list, &mrt->mfc_unres_queue);
1143 mroute_netlink_event(mrt, c, RTM_NEWROUTE);
1145 if (atomic_read(&mrt->cache_resolve_queue_len) == 1)
1146 mod_timer(&mrt->ipmr_expire_timer,
1169 static int ipmr_mfc_delete(struct mr_table *mrt, struct mfcctl *mfc, int parent)
1171 struct net *net = read_pnet(&mrt->net);
1176 c = ipmr_cache_find_parent(mrt, mfc->mfcc_origin.s_addr,
1181 rhltable_remove(&mrt->mfc_hash, &c->_c.mnode, ipmr_rht_params);
1183 call_ipmr_mfc_entry_notifiers(net, FIB_EVENT_ENTRY_DEL, c, mrt->id);
1184 mroute_netlink_event(mrt, c, RTM_DELROUTE);
1190 static int ipmr_mfc_add(struct net *net, struct mr_table *mrt,
1203 c = ipmr_cache_find_parent(mrt, mfc->mfcc_origin.s_addr,
1209 ipmr_update_thresholds(mrt, &c->_c, mfc->mfcc_ttls);
1214 mrt->id);
1215 mroute_netlink_event(mrt, c, RTM_NEWROUTE);
1230 ipmr_update_thresholds(mrt, &c->_c, mfc->mfcc_ttls);
1234 ret = rhltable_insert_key(&mrt->mfc_hash, &c->cmparg, &c->_c.mnode,
1241 list_add_tail_rcu(&c->_c.list, &mrt->mfc_cache_list);
1247 list_for_each_entry(_uc, &mrt->mfc_unres_queue, list) {
1252 atomic_dec(&mrt->cache_resolve_queue_len);
1257 if (list_empty(&mrt->mfc_unres_queue))
1258 del_timer(&mrt->ipmr_expire_timer);
1262 ipmr_cache_resolve(net, mrt, uc, c);
1265 call_ipmr_mfc_entry_notifiers(net, FIB_EVENT_ENTRY_ADD, c, mrt->id);
1266 mroute_netlink_event(mrt, c, RTM_NEWROUTE);
1271 static void mroute_clean_tables(struct mr_table *mrt, int flags)
1273 struct net *net = read_pnet(&mrt->net);
1281 for (i = 0; i < mrt->maxvif; i++) {
1282 if (((mrt->vif_table[i].flags & VIFF_STATIC) &&
1284 (!(mrt->vif_table[i].flags & VIFF_STATIC) && !(flags & MRT_FLUSH_VIFS)))
1286 vif_delete(mrt, i, 0, &list);
1293 list_for_each_entry_safe(c, tmp, &mrt->mfc_cache_list, list) {
1297 rhltable_remove(&mrt->mfc_hash, &c->mnode, ipmr_rht_params);
1301 mrt->id);
1302 mroute_netlink_event(mrt, cache, RTM_DELROUTE);
1308 if (atomic_read(&mrt->cache_resolve_queue_len) != 0) {
1310 list_for_each_entry_safe(c, tmp, &mrt->mfc_unres_queue, list) {
1313 mroute_netlink_event(mrt, cache, RTM_DELROUTE);
1314 ipmr_destroy_unres(mrt, cache);
1327 struct mr_table *mrt;
1330 ipmr_for_each_table(mrt, net) {
1331 if (sk == rtnl_dereference(mrt->mroute_sk)) {
1337 RCU_INIT_POINTER(mrt->mroute_sk, NULL);
1338 mroute_clean_tables(mrt, MRT_FLUSH_VIFS | MRT_FLUSH_MFC);
1355 struct mr_table *mrt;
1369 mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
1370 if (!mrt) {
1375 if (sk != rcu_access_pointer(mrt->mroute_sk) &&
1388 if (rtnl_dereference(mrt->mroute_sk)) {
1395 rcu_assign_pointer(mrt->mroute_sk, sk);
1404 if (sk != rcu_access_pointer(mrt->mroute_sk)) {
1431 ret = vif_add(net, mrt, &vif,
1432 sk == rtnl_dereference(mrt->mroute_sk));
1434 ret = vif_delete(mrt, vif.vifc_vifi, 0, NULL);
1457 ret = ipmr_mfc_delete(mrt, &mfc, parent);
1459 ret = ipmr_mfc_add(net, mrt, &mfc,
1460 sk == rtnl_dereference(mrt->mroute_sk),
1472 mroute_clean_tables(mrt, val);
1484 mrt->mroute_do_assert = val;
1502 if (val != mrt->mroute_do_pim) {
1503 mrt->mroute_do_pim = val;
1504 mrt->mroute_do_assert = val;
1505 mrt->mroute_do_wrvifwhole = do_wrvifwhole;
1522 if (sk == rtnl_dereference(mrt->mroute_sk)) {
1525 mrt = ipmr_new_table(net, uval);
1526 if (IS_ERR(mrt))
1527 ret = PTR_ERR(mrt);
1548 struct mr_table *mrt;
1554 mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
1555 if (!mrt)
1565 val = mrt->mroute_do_pim;
1568 val = mrt->mroute_do_assert;
1594 struct mr_table *mrt;
1596 mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
1597 if (!mrt)
1604 if (vr.vifi >= mrt->maxvif)
1606 vr.vifi = array_index_nospec(vr.vifi, mrt->maxvif);
1608 vif = &mrt->vif_table[vr.vifi];
1609 if (VIF_EXISTS(mrt, vr.vifi)) {
1627 c = ipmr_cache_find(mrt, sr.src.s_addr, sr.grp.s_addr);
1669 struct mr_table *mrt;
1671 mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
1672 if (!mrt)
1679 if (vr.vifi >= mrt->maxvif)
1681 vr.vifi = array_index_nospec(vr.vifi, mrt->maxvif);
1683 vif = &mrt->vif_table[vr.vifi];
1684 if (VIF_EXISTS(mrt, vr.vifi)) {
1702 c = ipmr_cache_find(mrt, sr.src.s_addr, sr.grp.s_addr);
1725 struct mr_table *mrt;
1732 ipmr_for_each_table(mrt, net) {
1733 v = &mrt->vif_table[0];
1734 for (ct = 0; ct < mrt->maxvif; ct++, v++) {
1736 vif_delete(mrt, ct, 1, NULL);
1792 static bool ipmr_forward_offloaded(struct sk_buff *skb, struct mr_table *mrt,
1795 struct vif_device *out_vif = &mrt->vif_table[out_vifi];
1796 struct vif_device *in_vif = &mrt->vif_table[in_vifi];
1806 static bool ipmr_forward_offloaded(struct sk_buff *skb, struct mr_table *mrt,
1815 static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt,
1819 struct vif_device *vif = &mrt->vif_table[vifi];
1833 ipmr_cache_report(mrt, skb, vifi, IGMPMSG_WHOLEPKT);
1837 if (ipmr_forward_offloaded(skb, mrt, in_vifi, vifi))
1915 static int ipmr_find_vif(struct mr_table *mrt, struct net_device *dev)
1919 for (ct = mrt->maxvif-1; ct >= 0; ct--) {
1920 if (mrt->vif_table[ct].dev == dev)
1927 static void ip_mr_forward(struct net *net, struct mr_table *mrt,
1931 int true_vifi = ipmr_find_vif(mrt, dev);
1946 cache_proxy = mr_mfc_find_any_parent(mrt, vif);
1953 if (mrt->vif_table[vif].dev != dev) {
1971 if (true_vifi >= 0 && mrt->mroute_do_assert &&
1977 (mrt->mroute_do_pim ||
1983 ipmr_cache_report(mrt, skb, true_vifi, IGMPMSG_WRONGVIF);
1984 if (mrt->mroute_do_wrvifwhole)
1985 ipmr_cache_report(mrt, skb, true_vifi,
1992 mrt->vif_table[vif].pkt_in++;
1993 mrt->vif_table[vif].bytes_in += skb->len;
2021 ipmr_queue_xmit(net, mrt, true_vifi,
2033 ipmr_queue_xmit(net, mrt, true_vifi, skb2,
2036 ipmr_queue_xmit(net, mrt, true_vifi, skb, psend);
2061 struct mr_table *mrt;
2064 err = ipmr_fib_lookup(net, &fl4, &mrt);
2067 return mrt;
2078 struct mr_table *mrt;
2100 mrt = ipmr_rt_fib_lookup(net, skb);
2101 if (IS_ERR(mrt)) {
2103 return PTR_ERR(mrt);
2118 mroute_sk = rcu_dereference(mrt->mroute_sk);
2128 cache = ipmr_cache_find(mrt, ip_hdr(skb)->saddr, ip_hdr(skb)->daddr);
2130 int vif = ipmr_find_vif(mrt, dev);
2133 cache = ipmr_cache_find_any(mrt, ip_hdr(skb)->daddr,
2150 vif = ipmr_find_vif(mrt, dev);
2152 int err2 = ipmr_cache_unresolved(mrt, vif, skb, dev);
2163 ip_mr_forward(net, mrt, dev, skb, cache, local);
2184 struct mr_table *mrt;
2191 mrt = ipmr_rt_fib_lookup(net, skb);
2192 if (IS_ERR(mrt))
2194 if (!mrt->mroute_do_pim ||
2198 if (__pim_rcv(mrt, skb, sizeof(*pim))) {
2211 struct mr_table *mrt;
2223 mrt = ipmr_rt_fib_lookup(net, skb);
2224 if (IS_ERR(mrt))
2226 if (__pim_rcv(mrt, skb, sizeof(*pim))) {
2239 struct mr_table *mrt;
2242 mrt = ipmr_get_table(net, RT_TABLE_DEFAULT);
2243 if (!mrt)
2247 cache = ipmr_cache_find(mrt, saddr, daddr);
2249 int vif = ipmr_find_vif(mrt, skb->dev);
2252 cache = ipmr_cache_find_any(mrt, daddr, vif);
2263 vif = ipmr_find_vif(mrt, dev);
2285 err = ipmr_cache_unresolved(mrt, vif, skb2, dev);
2292 err = mr_fill_mroute(mrt, skb, &cache->_c, rtm);
2298 static int ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
2315 rtm->rtm_table = mrt->id;
2316 if (nla_put_u32(skb, RTA_TABLE, mrt->id))
2329 err = mr_fill_mroute(mrt, skb, &c->_c, rtm);
2342 static int _ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
2346 return ipmr_fill_mroute(mrt, skb, portid, seq, (struct mfc_cache *)c,
2371 static void mroute_netlink_event(struct mr_table *mrt, struct mfc_cache *mfc,
2374 struct net *net = read_pnet(&mrt->net);
2379 mrt->maxvif),
2384 err = ipmr_fill_mroute(mrt, skb, 0, 0, mfc, cmd, 0);
2413 static void igmpmsg_netlink_event(struct mr_table *mrt, struct sk_buff *pkt)
2415 struct net *net = read_pnet(&mrt->net);
2442 nla_put_u32(skb, IPMRA_CREPORT_TABLE, mrt->id))
2524 struct mr_table *mrt;
2537 mrt = ipmr_get_table(net, tableid ? tableid : RT_TABLE_DEFAULT);
2538 if (!mrt) {
2545 cache = ipmr_cache_find(mrt, src, grp);
2552 skb = nlmsg_new(mroute_msgsize(false, mrt->maxvif), GFP_KERNEL);
2558 err = ipmr_fill_mroute(mrt, skb, NETLINK_CB(in_skb).portid,
2587 struct mr_table *mrt;
2589 mrt = ipmr_get_table(sock_net(skb->sk), filter.table_id);
2590 if (!mrt) {
2597 err = mr_table_dump(mrt, skb, cb, _ipmr_fill_mroute,
2647 struct mr_table *mrt;
2697 mrt = ipmr_get_table(net, tblid);
2698 if (!mrt) {
2702 *mrtret = mrt;
2705 mfcc->mfcc_parent = ipmr_find_vif(mrt, dev);
2733 static bool ipmr_fill_table(struct mr_table *mrt, struct sk_buff *skb)
2735 u32 queue_len = atomic_read(&mrt->cache_resolve_queue_len);
2737 if (nla_put_u32(skb, IPMRA_TABLE_ID, mrt->id) ||
2740 mrt->mroute_reg_vif_num) ||
2742 mrt->mroute_do_assert) ||
2743 nla_put_u8(skb, IPMRA_TABLE_MROUTE_DO_PIM, mrt->mroute_do_pim) ||
2745 mrt->mroute_do_wrvifwhole))
2751 static bool ipmr_fill_vif(struct mr_table *mrt, u32 vifid, struct sk_buff *skb)
2757 if (!VIF_EXISTS(mrt, vifid))
2760 vif = &mrt->vif_table[vifid];
2816 struct mr_table *mrt;
2828 ipmr_for_each_table(mrt, net) {
2851 if (!ipmr_fill_table(mrt, skb)) {
2862 for (i = 0; i < mrt->maxvif; i++) {
2865 if (!ipmr_fill_vif(mrt, i, skb)) {
2900 struct mr_table *mrt;
2902 mrt = ipmr_get_table(net, RT_TABLE_DEFAULT);
2903 if (!mrt)
2906 iter->mrt = mrt;
2921 struct mr_table *mrt = iter->mrt;
2933 vif - mrt->vif_table,
2951 struct mr_table *mrt;
2953 mrt = ipmr_get_table(net, RT_TABLE_DEFAULT);
2954 if (!mrt)
2957 return mr_mfc_seq_start(seq, pos, mrt, &mfc_unres_lock);
2970 const struct mr_table *mrt = it->mrt;
2977 if (it->cache != &mrt->mfc_unres_queue) {
2984 if (VIF_EXISTS(mrt, n) &&