Lines Matching refs:ign
78 static void ip6gre_tunnel_link(struct ip6gre_net *ign, struct ip6_tnl *t);
124 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
132 for_each_ip_tunnel_rcu(t, ign->tunnels_r_l[h0 ^ h1]) {
157 for_each_ip_tunnel_rcu(t, ign->tunnels_r[h0 ^ h1]) {
181 for_each_ip_tunnel_rcu(t, ign->tunnels_l[h1]) {
207 for_each_ip_tunnel_rcu(t, ign->tunnels_wc[h1]) {
235 t = rcu_dereference(ign->collect_md_tun_erspan);
237 t = rcu_dereference(ign->collect_md_tun);
242 ndev = READ_ONCE(ign->fb_tunnel_dev);
249 static struct ip6_tnl __rcu **__ip6gre_bucket(struct ip6gre_net *ign,
264 return &ign->tunnels[prio][h];
267 static void ip6gre_tunnel_link_md(struct ip6gre_net *ign, struct ip6_tnl *t)
270 rcu_assign_pointer(ign->collect_md_tun, t);
273 static void ip6erspan_tunnel_link_md(struct ip6gre_net *ign, struct ip6_tnl *t)
276 rcu_assign_pointer(ign->collect_md_tun_erspan, t);
279 static void ip6gre_tunnel_unlink_md(struct ip6gre_net *ign, struct ip6_tnl *t)
282 rcu_assign_pointer(ign->collect_md_tun, NULL);
285 static void ip6erspan_tunnel_unlink_md(struct ip6gre_net *ign,
289 rcu_assign_pointer(ign->collect_md_tun_erspan, NULL);
292 static inline struct ip6_tnl __rcu **ip6gre_bucket(struct ip6gre_net *ign,
295 return __ip6gre_bucket(ign, &t->parms);
298 static void ip6gre_tunnel_link(struct ip6gre_net *ign, struct ip6_tnl *t)
300 struct ip6_tnl __rcu **tp = ip6gre_bucket(ign, t);
306 static void ip6gre_tunnel_unlink(struct ip6gre_net *ign, struct ip6_tnl *t)
311 for (tp = ip6gre_bucket(ign, t);
331 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
333 for (tp = __ip6gre_bucket(ign, parms);
352 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
390 ip6gre_tunnel_link(ign, nt);
401 struct ip6gre_net *ign = net_generic(t->net, ip6gre_net_id);
403 ip6erspan_tunnel_unlink_md(ign, t);
404 ip6gre_tunnel_unlink(ign, t);
412 struct ip6gre_net *ign = net_generic(t->net, ip6gre_net_id);
414 ip6gre_tunnel_unlink_md(ign, t);
415 ip6gre_tunnel_unlink(ign, t);
416 if (ign->fb_tunnel_dev == dev)
417 WRITE_ONCE(ign->fb_tunnel_dev, NULL);
1273 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
1279 if (dev == ign->fb_tunnel_dev) {
1317 if (dev != ign->fb_tunnel_dev && cmd == SIOCCHGTUNNEL) {
1326 ip6gre_tunnel_unlink(ign, t);
1329 ip6gre_tunnel_link(ign, t);
1350 if (dev == ign->fb_tunnel_dev) {
1360 if (t == netdev_priv(ign->fb_tunnel_dev))
1569 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
1584 t = rtnl_dereference(ign->tunnels[prio][h]);
1601 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
1613 ign->fb_tunnel_dev = ndev;
1614 dev_net_set(ign->fb_tunnel_dev, net);
1618 ign->fb_tunnel_dev->features |= NETIF_F_NETNS_LOCAL;
1621 ip6gre_fb_tunnel_init(ign->fb_tunnel_dev);
1622 ign->fb_tunnel_dev->rtnl_link_ops = &ip6gre_link_ops;
1624 err = register_netdev(ign->fb_tunnel_dev);
1628 rcu_assign_pointer(ign->tunnels_wc[0],
1629 netdev_priv(ign->fb_tunnel_dev));
2018 struct ip6gre_net *ign;
2022 ign = net_generic(net, ip6gre_net_id);
2025 if (rtnl_dereference(ign->collect_md_tun))
2035 ip6gre_tunnel_link_md(ign, nt);
2048 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
2051 if (dev == ign->fb_tunnel_dev)
2080 struct ip6gre_net *ign = net_generic(t->net, ip6gre_net_id);
2087 ip6gre_tunnel_unlink_md(ign, t);
2088 ip6gre_tunnel_unlink(ign, t);
2090 ip6gre_tunnel_link_md(ign, t);
2091 ip6gre_tunnel_link(ign, t);
2098 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
2100 if (dev != ign->fb_tunnel_dev)
2251 struct ip6gre_net *ign;
2256 ign = net_generic(net, ip6gre_net_id);
2259 if (rtnl_dereference(ign->collect_md_tun_erspan))
2269 ip6erspan_tunnel_link_md(ign, nt);
2293 struct ip6gre_net *ign = net_generic(dev_net(dev), ip6gre_net_id);
2302 ip6gre_tunnel_unlink_md(ign, t);
2303 ip6gre_tunnel_unlink(ign, t);
2305 ip6erspan_tunnel_link_md(ign, t);
2306 ip6gre_tunnel_link(ign, t);