Lines Matching refs:ign
78 static void ip6gre_tunnel_link(struct ip6gre_net *ign, struct ip6_tnl *t);
124 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
132 for_each_ip_tunnel_rcu(t, ign->tunnels_r_l[h0 ^ h1]) {
157 for_each_ip_tunnel_rcu(t, ign->tunnels_r[h0 ^ h1]) {
181 for_each_ip_tunnel_rcu(t, ign->tunnels_l[h1]) {
207 for_each_ip_tunnel_rcu(t, ign->tunnels_wc[h1]) {
235 t = rcu_dereference(ign->collect_md_tun_erspan);
237 t = rcu_dereference(ign->collect_md_tun);
242 ndev = READ_ONCE(ign->fb_tunnel_dev);
249 static struct ip6_tnl __rcu **__ip6gre_bucket(struct ip6gre_net *ign,
264 return &ign->tunnels[prio][h];
267 static void ip6gre_tunnel_link_md(struct ip6gre_net *ign, struct ip6_tnl *t)
270 rcu_assign_pointer(ign->collect_md_tun, t);
273 static void ip6erspan_tunnel_link_md(struct ip6gre_net *ign, struct ip6_tnl *t)
276 rcu_assign_pointer(ign->collect_md_tun_erspan, t);
279 static void ip6gre_tunnel_unlink_md(struct ip6gre_net *ign, struct ip6_tnl *t)
282 rcu_assign_pointer(ign->collect_md_tun, NULL);
285 static void ip6erspan_tunnel_unlink_md(struct ip6gre_net *ign,
289 rcu_assign_pointer(ign->collect_md_tun_erspan, NULL);
292 static inline struct ip6_tnl __rcu **ip6gre_bucket(struct ip6gre_net *ign,
295 return __ip6gre_bucket(ign, &t->parms);
298 static void ip6gre_tunnel_link(struct ip6gre_net *ign, struct ip6_tnl *t)
300 struct ip6_tnl __rcu **tp = ip6gre_bucket(ign, t);
306 static void ip6gre_tunnel_unlink(struct ip6gre_net *ign, struct ip6_tnl *t)
311 for (tp = ip6gre_bucket(ign, t);
331 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
333 for (tp = __ip6gre_bucket(ign, parms);
352 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
385 ip6gre_tunnel_link(ign, nt);
396 struct ip6gre_net *ign = net_generic(t->net, ip6gre_net_id);
398 ip6erspan_tunnel_unlink_md(ign, t);
399 ip6gre_tunnel_unlink(ign, t);
407 struct ip6gre_net *ign = net_generic(t->net, ip6gre_net_id);
409 ip6gre_tunnel_unlink_md(ign, t);
410 ip6gre_tunnel_unlink(ign, t);
411 if (ign->fb_tunnel_dev == dev)
412 WRITE_ONCE(ign->fb_tunnel_dev, NULL);
1273 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
1279 if (dev == ign->fb_tunnel_dev) {
1317 if (dev != ign->fb_tunnel_dev && cmd == SIOCCHGTUNNEL) {
1326 ip6gre_tunnel_unlink(ign, t);
1329 ip6gre_tunnel_link(ign, t);
1350 if (dev == ign->fb_tunnel_dev) {
1360 if (t == netdev_priv(ign->fb_tunnel_dev))
1566 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
1581 t = rtnl_dereference(ign->tunnels[prio][h]);
1598 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
1610 ign->fb_tunnel_dev = ndev;
1611 dev_net_set(ign->fb_tunnel_dev, net);
1615 ign->fb_tunnel_dev->features |= NETIF_F_NETNS_LOCAL;
1618 ip6gre_fb_tunnel_init(ign->fb_tunnel_dev);
1619 ign->fb_tunnel_dev->rtnl_link_ops = &ip6gre_link_ops;
1621 err = register_netdev(ign->fb_tunnel_dev);
1625 rcu_assign_pointer(ign->tunnels_wc[0],
1626 netdev_priv(ign->fb_tunnel_dev));
2015 struct ip6gre_net *ign;
2019 ign = net_generic(net, ip6gre_net_id);
2022 if (rtnl_dereference(ign->collect_md_tun))
2032 ip6gre_tunnel_link_md(ign, nt);
2045 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
2048 if (dev == ign->fb_tunnel_dev)
2077 struct ip6gre_net *ign = net_generic(t->net, ip6gre_net_id);
2084 ip6gre_tunnel_unlink_md(ign, t);
2085 ip6gre_tunnel_unlink(ign, t);
2087 ip6gre_tunnel_link_md(ign, t);
2088 ip6gre_tunnel_link(ign, t);
2095 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
2097 if (dev != ign->fb_tunnel_dev)
2248 struct ip6gre_net *ign;
2253 ign = net_generic(net, ip6gre_net_id);
2256 if (rtnl_dereference(ign->collect_md_tun_erspan))
2266 ip6erspan_tunnel_link_md(ign, nt);
2290 struct ip6gre_net *ign = net_generic(dev_net(dev), ip6gre_net_id);
2299 ip6gre_tunnel_unlink_md(ign, t);
2300 ip6gre_tunnel_unlink(ign, t);
2302 ip6erspan_tunnel_link_md(ign, t);
2303 ip6gre_tunnel_link(ign, t);