Lines Matching refs:tun

6  *  $Id: tun.c,v 1.15 2002/03/01 02:44:24 maxk Exp $
30 #define DRV_NAME "tun"
132 struct tun_struct __rcu *tun;
158 struct tun_struct *tun;
222 static void tun_flow_init(struct tun_struct *tun);
223 static void tun_flow_uninit(struct tun_struct *tun);
265 static void tun_napi_init(struct tun_struct *tun, struct tun_file *tfile,
271 netif_napi_add_tx(tun->dev, &tfile->napi, tun_napi_poll);
300 static inline bool tun_legacy_is_little_endian(struct tun_struct *tun)
302 return tun->flags & TUN_VNET_BE ? false :
306 static long tun_get_vnet_be(struct tun_struct *tun, int __user *argp)
308 int be = !!(tun->flags & TUN_VNET_BE);
316 static long tun_set_vnet_be(struct tun_struct *tun, int __user *argp)
324 tun->flags |= TUN_VNET_BE;
326 tun->flags &= ~TUN_VNET_BE;
331 static inline bool tun_legacy_is_little_endian(struct tun_struct *tun)
336 static long tun_get_vnet_be(struct tun_struct *tun, int __user *argp)
341 static long tun_set_vnet_be(struct tun_struct *tun, int __user *argp)
347 static inline bool tun_is_little_endian(struct tun_struct *tun)
349 return tun->flags & TUN_VNET_LE ||
350 tun_legacy_is_little_endian(tun);
353 static inline u16 tun16_to_cpu(struct tun_struct *tun, __virtio16 val)
355 return __virtio16_to_cpu(tun_is_little_endian(tun), val);
358 static inline __virtio16 cpu_to_tun16(struct tun_struct *tun, u16 val)
360 return __cpu_to_virtio16(tun_is_little_endian(tun), val);
379 static struct tun_flow_entry *tun_flow_create(struct tun_struct *tun,
386 netif_info(tun, tx_queued, tun->dev,
393 e->tun = tun;
395 ++tun->flow_count;
400 static void tun_flow_delete(struct tun_struct *tun, struct tun_flow_entry *e)
402 netif_info(tun, tx_queued, tun->dev, "delete flow: hash %u index %u\n",
406 --tun->flow_count;
409 static void tun_flow_flush(struct tun_struct *tun)
413 spin_lock_bh(&tun->lock);
418 hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link)
419 tun_flow_delete(tun, e);
421 spin_unlock_bh(&tun->lock);
424 static void tun_flow_delete_by_queue(struct tun_struct *tun, u16 queue_index)
428 spin_lock_bh(&tun->lock);
433 hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link) {
435 tun_flow_delete(tun, e);
438 spin_unlock_bh(&tun->lock);
443 struct tun_struct *tun = from_timer(tun, t, flow_gc_timer);
444 unsigned long delay = tun->ageing_time;
449 spin_lock(&tun->lock);
454 hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link) {
459 tun_flow_delete(tun, e);
469 mod_timer(&tun->flow_gc_timer, round_jiffies_up(next_timer));
470 spin_unlock(&tun->lock);
473 static void tun_flow_update(struct tun_struct *tun, u32 rxhash,
478 unsigned long delay = tun->ageing_time;
481 head = &tun->flows[tun_hashfn(rxhash)];
494 spin_lock_bh(&tun->lock);
496 tun->flow_count < MAX_TAP_FLOWS)
497 tun_flow_create(tun, head, rxhash, queue_index);
499 if (!timer_pending(&tun->flow_gc_timer))
500 mod_timer(&tun->flow_gc_timer,
502 spin_unlock_bh(&tun->lock);
523 static u16 tun_automq_select_queue(struct tun_struct *tun, struct sk_buff *skb)
529 numqueues = READ_ONCE(tun->numqueues);
532 e = tun_flow_find(&tun->flows[tun_hashfn(txq)], txq);
544 static u16 tun_ebpf_select_queue(struct tun_struct *tun, struct sk_buff *skb)
550 numqueues = READ_ONCE(tun->numqueues);
554 prog = rcu_dereference(tun->steering_prog);
564 struct tun_struct *tun = netdev_priv(dev);
568 if (rcu_dereference(tun->steering_prog))
569 ret = tun_ebpf_select_queue(tun, skb);
571 ret = tun_automq_select_queue(tun, skb);
577 static inline bool tun_not_capable(struct tun_struct *tun)
580 struct net *net = dev_net(tun->dev);
582 return ((uid_valid(tun->owner) && !uid_eq(cred->euid, tun->owner)) ||
583 (gid_valid(tun->group) && !in_egroup_p(tun->group))) &&
587 static void tun_set_real_num_queues(struct tun_struct *tun)
589 netif_set_real_num_tx_queues(tun->dev, tun->numqueues);
590 netif_set_real_num_rx_queues(tun->dev, tun->numqueues);
593 static void tun_disable_queue(struct tun_struct *tun, struct tun_file *tfile)
595 tfile->detached = tun;
596 list_add_tail(&tfile->next, &tun->disabled);
597 ++tun->numdisabled;
602 struct tun_struct *tun = tfile->detached;
606 --tun->numdisabled;
607 return tun;
638 struct tun_struct *tun;
640 tun = rtnl_dereference(tfile->tun);
642 if (tun && clean) {
648 if (tun && !tfile->detached) {
650 BUG_ON(index >= tun->numqueues);
652 rcu_assign_pointer(tun->tfiles[index],
653 tun->tfiles[tun->numqueues - 1]);
654 ntfile = rtnl_dereference(tun->tfiles[index]);
657 rcu_assign_pointer(tun->tfiles[tun->numqueues - 1],
660 --tun->numqueues;
662 RCU_INIT_POINTER(tfile->tun, NULL);
665 tun_disable_queue(tun, tfile);
670 tun_flow_delete_by_queue(tun, tun->numqueues + 1);
673 tun_set_real_num_queues(tun);
675 tun = tun_enable_queue(tfile);
680 if (tun && tun->numqueues == 0 && tun->numdisabled == 0) {
681 netif_carrier_off(tun->dev);
683 if (!(tun->flags & IFF_PERSIST) &&
684 tun->dev->reg_state == NETREG_REGISTERED)
685 unregister_netdevice(tun->dev);
687 if (tun)
695 struct tun_struct *tun;
699 tun = rtnl_dereference(tfile->tun);
700 dev = tun ? tun->dev : NULL;
712 struct tun_struct *tun = netdev_priv(dev);
714 int i, n = tun->numqueues;
717 tfile = rtnl_dereference(tun->tfiles[i]);
722 RCU_INIT_POINTER(tfile->tun, NULL);
723 --tun->numqueues;
725 list_for_each_entry(tfile, &tun->disabled, next) {
728 RCU_INIT_POINTER(tfile->tun, NULL);
730 BUG_ON(tun->numqueues != 0);
734 tfile = rtnl_dereference(tun->tfiles[i]);
741 list_for_each_entry_safe(tfile, tmp, &tun->disabled, next) {
748 BUG_ON(tun->numdisabled != 0);
750 if (tun->flags & IFF_PERSIST)
754 static int tun_attach(struct tun_struct *tun, struct file *file,
759 struct net_device *dev = tun->dev;
762 err = security_tun_dev_attach(tfile->socket.sk, tun->security);
767 if (rtnl_dereference(tfile->tun) && !tfile->detached)
771 if (!(tun->flags & IFF_MULTI_QUEUE) && tun->numqueues == 1)
776 tun->numqueues + tun->numdisabled == MAX_TAP_QUEUES)
782 if (!skip_filter && (tun->filter_attached == true)) {
784 err = sk_attach_filter(&tun->fprog, tfile->socket.sk);
797 tfile->queue_index = tun->numqueues;
809 tun->dev, tfile->queue_index, 0);
826 tun_napi_init(tun, tfile, napi, napi_frags);
829 if (rtnl_dereference(tun->xdp_prog))
836 /* Publish tfile->tun and tun->tfiles only after we've fully
841 rcu_assign_pointer(tfile->tun, tun);
842 rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile);
843 tun->numqueues++;
844 tun_set_real_num_queues(tun);
851 struct tun_struct *tun;
854 tun = rcu_dereference(tfile->tun);
855 if (tun)
856 dev_hold(tun->dev);
859 return tun;
862 static void tun_put(struct tun_struct *tun)
864 dev_put(tun->dev);
977 struct tun_struct *tun = netdev_priv(dev);
978 struct ifreq *ifr = tun->ifr;
985 spin_lock_init(&tun->lock);
987 err = security_tun_dev_alloc_security(&tun->security);
993 tun_flow_init(tun);
1003 tun->flags = (tun->flags & ~TUN_FEATURES) |
1006 INIT_LIST_HEAD(&tun->disabled);
1007 err = tun_attach(tun, tun->file, false, ifr->ifr_flags & IFF_NAPI,
1010 tun_flow_uninit(tun);
1011 security_tun_dev_free_security(tun->security);
1040 static void tun_automq_xmit(struct tun_struct *tun, struct sk_buff *skb)
1043 if (tun->numqueues == 1 && static_branch_unlikely(&rps_needed)) {
1051 e = tun_flow_find(&tun->flows[tun_hashfn(rxhash)], rxhash);
1058 static unsigned int run_ebpf_filter(struct tun_struct *tun,
1062 struct tun_prog *prog = rcu_dereference(tun->filter_prog);
1073 struct tun_struct *tun = netdev_priv(dev);
1081 tfile = rcu_dereference(tun->tfiles[txq]);
1089 if (!rcu_dereference(tun->steering_prog))
1090 tun_automq_xmit(tun, skb);
1092 netif_info(tun, tx_queued, tun->dev, "%s %d\n", __func__, skb->len);
1097 if (!check_filter(&tun->txflt, skb)) {
1108 len = run_ebpf_filter(tun, skb, len);
1170 struct tun_struct *tun = netdev_priv(dev);
1172 return (features & tun->set_features) | (features & ~TUN_USER_FEATURES);
1177 struct tun_struct *tun = netdev_priv(dev);
1182 tun->align = new_hr;
1188 struct tun_struct *tun = netdev_priv(dev);
1193 (unsigned long)atomic_long_read(&tun->rx_frame_errors);
1199 struct tun_struct *tun = netdev_priv(dev);
1204 old_prog = rtnl_dereference(tun->xdp_prog);
1205 rcu_assign_pointer(tun->xdp_prog, prog);
1209 for (i = 0; i < tun->numqueues; i++) {
1210 tfile = rtnl_dereference(tun->tfiles[i]);
1216 list_for_each_entry(tfile, &tun->disabled, next) {
1239 struct tun_struct *tun = netdev_priv(dev);
1241 if (!tun->numqueues)
1275 struct tun_struct *tun = netdev_priv(dev);
1287 numqueues = READ_ONCE(tun->numqueues);
1293 tfile = rcu_dereference(tun->tfiles[smp_processor_id() %
1354 static void tun_flow_init(struct tun_struct *tun)
1359 INIT_HLIST_HEAD(&tun->flows[i]);
1361 tun->ageing_time = TUN_FLOW_EXPIRE;
1362 timer_setup(&tun->flow_gc_timer, tun_flow_cleanup, 0);
1363 mod_timer(&tun->flow_gc_timer,
1364 round_jiffies_up(jiffies + tun->ageing_time));
1367 static void tun_flow_uninit(struct tun_struct *tun)
1369 del_timer_sync(&tun->flow_gc_timer);
1370 tun_flow_flush(tun);
1379 struct tun_struct *tun = netdev_priv(dev);
1381 switch (tun->flags & TUN_TYPE_MASK) {
1405 /* Currently tun does not support XDP, only tap does. */
1417 static bool tun_sock_writeable(struct tun_struct *tun, struct tun_file *tfile)
1421 return (tun->dev->flags & IFF_UP) && sock_writeable(sk);
1430 struct tun_struct *tun = tun_get(tfile);
1434 if (!tun)
1449 if (tun_sock_writeable(tun, tfile) ||
1451 tun_sock_writeable(tun, tfile)))
1454 if (tun->dev->reg_state != NETREG_REGISTERED)
1457 tun_put(tun);
1545 static void tun_rx_batched(struct tun_struct *tun, struct tun_file *tfile,
1550 u32 rx_batched = tun->rx_batched;
1585 static bool tun_can_build_skb(struct tun_struct *tun, struct tun_file *tfile,
1588 if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP)
1626 static int tun_xdp_act(struct tun_struct *tun, struct bpf_prog *xdp_prog,
1633 err = xdp_do_redirect(tun->dev, xdp, xdp_prog);
1635 dev_core_stats_rx_dropped_inc(tun->dev);
1638 dev_sw_netstats_rx_add(tun->dev, xdp->data_end - xdp->data);
1641 err = tun_xdp_tx(tun->dev, xdp);
1643 dev_core_stats_rx_dropped_inc(tun->dev);
1646 dev_sw_netstats_rx_add(tun->dev, xdp->data_end - xdp->data);
1651 bpf_warn_invalid_xdp_action(tun->dev, xdp_prog, act);
1654 trace_xdp_exception(tun->dev, xdp_prog, act);
1657 dev_core_stats_rx_dropped_inc(tun->dev);
1664 static struct sk_buff *tun_build_skb(struct tun_struct *tun,
1679 xdp_prog = rcu_dereference(tun->xdp_prog);
1710 xdp_prog = rcu_dereference(tun->xdp_prog);
1723 err = tun_xdp_act(tun, xdp_prog, &xdp, act);
1750 static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
1757 size_t len = total_len, align = tun->align, linear;
1768 if (!(tun->flags & IFF_NO_PI)) {
1777 if (tun->flags & IFF_VNET_HDR) {
1778 int vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz);
1788 tun16_to_cpu(tun, gso.csum_start) + tun16_to_cpu(tun, gso.csum_offset) + 2 > tun16_to_cpu(tun, gso.hdr_len))
1789 gso.hdr_len = cpu_to_tun16(tun, tun16_to_cpu(tun, gso.csum_start) + tun16_to_cpu(tun, gso.csum_offset) + 2);
1791 if (tun16_to_cpu(tun, gso.hdr_len) > len)
1796 if ((tun->flags & TUN_TYPE_MASK) == IFF_TAP) {
1799 (gso.hdr_len && tun16_to_cpu(tun, gso.hdr_len) < ETH_HLEN)))
1812 copylen = gso.hdr_len ? tun16_to_cpu(tun, gso.hdr_len) : GOODCOPY_LEN;
1821 if (!frags && tun_can_build_skb(tun, tfile, len, noblock, zerocopy)) {
1826 skb = tun_build_skb(tun, tfile, from, &gso, len, &skb_xdp);
1835 if (tun16_to_cpu(tun, gso.hdr_len) > good_linear)
1838 linear = tun16_to_cpu(tun, gso.hdr_len);
1873 if (virtio_net_hdr_to_skb(skb, &gso, tun_is_little_endian(tun))) {
1874 atomic_long_inc(&tun->rx_frame_errors);
1879 switch (tun->flags & TUN_TYPE_MASK) {
1881 if (tun->flags & IFF_NO_PI) {
1899 skb->dev = tun->dev;
1907 skb->protocol = eth_type_trans(skb, tun->dev);
1929 xdp_prog = rcu_dereference(tun->xdp_prog);
1946 if (!rcu_access_pointer(tun->steering_prog) && tun->numqueues > 1 &&
1951 if (unlikely(!(tun->dev->flags & IFF_UP))) {
1963 headlen = eth_get_headlen(tun->dev, skb->data,
1969 dev_core_stats_rx_dropped_inc(tun->dev);
2009 tun_rx_batched(tun, tfile, skb, more);
2016 dev_sw_netstats_rx_add(tun->dev, len);
2020 tun_flow_update(tun, rxhash, tfile);
2026 dev_core_stats_rx_dropped_inc(tun->dev);
2045 struct tun_struct *tun = tun_get(tfile);
2049 if (!tun)
2055 result = tun_get_user(tun, tfile, NULL, from, noblock, false);
2057 tun_put(tun);
2061 static ssize_t tun_put_user_xdp(struct tun_struct *tun,
2070 if (tun->flags & IFF_VNET_HDR) {
2073 vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz);
2085 dev_sw_netstats_tx_add(tun->dev, 1, ret);
2092 static ssize_t tun_put_user(struct tun_struct *tun,
2106 if (tun->flags & IFF_VNET_HDR)
2107 vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz);
2111 if (!(tun->flags & IFF_NO_PI)) {
2132 tun_is_little_endian(tun), true,
2137 sinfo->gso_type, tun16_to_cpu(tun, gso.gso_size),
2138 tun16_to_cpu(tun, gso.hdr_len));
2139 print_hex_dump(KERN_ERR, "tun: ",
2142 min((int)tun16_to_cpu(tun, gso.hdr_len), 64), true);
2176 dev_sw_netstats_tx_add(tun->dev, 1, skb->len + vlan_hlen);
2223 static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile,
2245 ret = tun_put_user_xdp(tun, tfile, xdpf, to);
2250 ret = tun_put_user(tun, tfile, skb, to);
2264 struct tun_struct *tun = tun_get(tfile);
2268 if (!tun)
2274 ret = tun_do_read(tun, tfile, to, noblock, NULL);
2278 tun_put(tun);
2290 static int __tun_set_ebpf(struct tun_struct *tun,
2303 spin_lock_bh(&tun->lock);
2305 lockdep_is_held(&tun->lock));
2307 spin_unlock_bh(&tun->lock);
2317 struct tun_struct *tun = netdev_priv(dev);
2319 BUG_ON(!(list_empty(&tun->disabled)));
2322 tun_flow_uninit(tun);
2323 security_tun_dev_free_security(tun->security);
2324 __tun_set_ebpf(tun, &tun->steering_prog, NULL);
2325 __tun_set_ebpf(tun, &tun->filter_prog, NULL);
2330 struct tun_struct *tun = netdev_priv(dev);
2332 tun->owner = INVALID_UID;
2333 tun->group = INVALID_GID;
2334 tun_default_link_ksettings(dev, &tun->link_ksettings);
2343 /* Trivial set of netlink ops to allow deleting tun or tap
2350 "tun/tap creation via rtnetlink is not supported.");
2373 struct tun_struct *tun = netdev_priv(dev);
2375 if (nla_put_u8(skb, IFLA_TUN_TYPE, tun->flags & TUN_TYPE_MASK))
2377 if (uid_valid(tun->owner) &&
2379 from_kuid_munged(current_user_ns(), tun->owner)))
2381 if (gid_valid(tun->group) &&
2383 from_kgid_munged(current_user_ns(), tun->group)))
2385 if (nla_put_u8(skb, IFLA_TUN_PI, !(tun->flags & IFF_NO_PI)))
2387 if (nla_put_u8(skb, IFLA_TUN_VNET_HDR, !!(tun->flags & IFF_VNET_HDR)))
2389 if (nla_put_u8(skb, IFLA_TUN_PERSIST, !!(tun->flags & IFF_PERSIST)))
2392 !!(tun->flags & IFF_MULTI_QUEUE)))
2394 if (tun->flags & IFF_MULTI_QUEUE) {
2395 if (nla_put_u32(skb, IFLA_TUN_NUM_QUEUES, tun->numqueues))
2398 tun->numdisabled))
2443 static int tun_xdp_one(struct tun_struct *tun,
2460 xdp_prog = rcu_dereference(tun->xdp_prog);
2471 ret = tun_xdp_act(tun, xdp_prog, xdp, act);
2508 if (virtio_net_hdr_to_skb(skb, gso, tun_is_little_endian(tun))) {
2509 atomic_long_inc(&tun->rx_frame_errors);
2515 skb->protocol = eth_type_trans(skb, tun->dev);
2528 if (!rcu_dereference(tun->steering_prog) && tun->numqueues > 1 &&
2553 dev_sw_netstats_rx_add(tun->dev, datasize);
2556 tun_flow_update(tun, rxhash, tfile);
2566 struct tun_struct *tun = tun_get(tfile);
2570 if (!tun)
2586 ret = tun_xdp_one(tun, tfile, xdp, &flush, &tpage);
2606 ret = tun_get_user(tun, tfile, ctl ? ctl->ptr : NULL, &m->msg_iter,
2610 tun_put(tun);
2618 struct tun_struct *tun = tun_get(tfile);
2622 if (!tun) {
2636 ret = tun_do_read(tun, tfile, &m->msg_iter, flags & MSG_DONTWAIT, ptr);
2642 tun_put(tun);
2646 tun_put(tun);
2669 struct tun_struct *tun;
2672 tun = tun_get(tfile);
2673 if (!tun)
2677 tun_put(tun);
2682 /* Ops structure to mimic raw sockets with tun */
2690 .name = "tun",
2695 static int tun_flags(struct tun_struct *tun)
2697 return tun->flags & (TUN_FEATURES | IFF_PERSIST | IFF_TUN | IFF_TAP);
2703 struct tun_struct *tun = netdev_priv(to_net_dev(dev));
2704 return sysfs_emit(buf, "0x%x\n", tun_flags(tun));
2710 struct tun_struct *tun = netdev_priv(to_net_dev(dev));
2711 return uid_valid(tun->owner)?
2713 from_kuid_munged(current_user_ns(), tun->owner)) :
2720 struct tun_struct *tun = netdev_priv(to_net_dev(dev));
2721 return gid_valid(tun->group) ?
2723 from_kgid_munged(current_user_ns(), tun->group)) :
2744 struct tun_struct *tun;
2766 tun = netdev_priv(dev);
2768 tun = netdev_priv(dev);
2773 !!(tun->flags & IFF_MULTI_QUEUE))
2776 if (tun_not_capable(tun))
2778 err = security_tun_dev_open(tun->security);
2782 err = tun_attach(tun, file, ifr->ifr_flags & IFF_NOFILTER,
2788 if (tun->flags & IFF_MULTI_QUEUE &&
2789 (tun->numqueues + tun->numdisabled > 1)) {
2797 tun->flags = (tun->flags & ~TUN_FEATURES) |
2817 name = "tun%d";
2840 tun = netdev_priv(dev);
2841 tun->dev = dev;
2842 tun->flags = flags;
2843 tun->txflt.count = 0;
2844 tun->vnet_hdr_sz = sizeof(struct virtio_net_hdr);
2846 tun->align = NET_SKB_PAD;
2847 tun->filter_attached = false;
2848 tun->sndbuf = tfile->socket.sk->sk_sndbuf;
2849 tun->rx_batched = 0;
2850 RCU_INIT_POINTER(tun->steering_prog, NULL);
2852 tun->ifr = ifr;
2853 tun->file = file;
2857 err = register_netdevice(tun->dev);
2863 * with dev_put() we need publish tun after registration.
2865 rcu_assign_pointer(tfile->tun, tun);
2869 netif_carrier_off(tun->dev);
2871 netif_carrier_on(tun->dev);
2876 if (netif_running(tun->dev))
2877 netif_tx_wake_all_queues(tun->dev);
2879 strcpy(ifr->ifr_name, tun->dev->name);
2883 static void tun_get_iff(struct tun_struct *tun, struct ifreq *ifr)
2885 strcpy(ifr->ifr_name, tun->dev->name);
2887 ifr->ifr_flags = tun_flags(tun);
2891 /* This is like a cut-down ethtool ops, except done via tun fd so no
2893 static int set_offload(struct tun_struct *tun, unsigned long arg)
2927 tun->set_features = features;
2928 tun->dev->wanted_features &= ~TUN_USER_FEATURES;
2929 tun->dev->wanted_features |= features;
2930 netdev_update_features(tun->dev);
2935 static void tun_detach_filter(struct tun_struct *tun, int n)
2941 tfile = rtnl_dereference(tun->tfiles[i]);
2947 tun->filter_attached = false;
2950 static int tun_attach_filter(struct tun_struct *tun)
2955 for (i = 0; i < tun->numqueues; i++) {
2956 tfile = rtnl_dereference(tun->tfiles[i]);
2958 ret = sk_attach_filter(&tun->fprog, tfile->socket.sk);
2961 tun_detach_filter(tun, i);
2966 tun->filter_attached = true;
2970 static void tun_set_sndbuf(struct tun_struct *tun)
2975 for (i = 0; i < tun->numqueues; i++) {
2976 tfile = rtnl_dereference(tun->tfiles[i]);
2977 tfile->socket.sk->sk_sndbuf = tun->sndbuf;
2984 struct tun_struct *tun;
2990 tun = tfile->detached;
2991 if (!tun) {
2995 ret = security_tun_dev_attach_queue(tun->security);
2998 ret = tun_attach(tun, file, false, tun->flags & IFF_NAPI,
2999 tun->flags & IFF_NAPI_FRAGS, true);
3001 tun = rtnl_dereference(tfile->tun);
3002 if (!tun || !(tun->flags & IFF_MULTI_QUEUE) || tfile->detached)
3010 netdev_state_change(tun->dev);
3017 static int tun_set_ebpf(struct tun_struct *tun, struct tun_prog __rcu **prog_p,
3034 return __tun_set_ebpf(tun, prog_p, prog);
3037 /* Return correct value for tun->dev->addr_len based on tun->dev->type. */
3081 struct tun_struct *tun;
3118 tun = tun_get(tfile);
3121 if (tun)
3137 if (tun)
3152 if (!tun)
3155 netif_info(tun, drv, tun->dev, "tun_chr_ioctl cmd %u\n", cmd);
3157 net = dev_net(tun->dev);
3161 tun_get_iff(tun, &ifr);
3176 netif_info(tun, drv, tun->dev, "ignored: set checksum %s\n",
3184 if (arg && !(tun->flags & IFF_PERSIST)) {
3185 tun->flags |= IFF_PERSIST;
3189 if (!arg && (tun->flags & IFF_PERSIST)) {
3190 tun->flags &= ~IFF_PERSIST;
3195 netif_info(tun, drv, tun->dev, "persist %s\n",
3206 tun->owner = owner;
3208 netif_info(tun, drv, tun->dev, "owner set to %u\n",
3209 from_kuid(&init_user_ns, tun->owner));
3219 tun->group = group;
3221 netif_info(tun, drv, tun->dev, "group set to %u\n",
3222 from_kgid(&init_user_ns, tun->group));
3227 if (tun->dev->flags & IFF_UP) {
3228 netif_info(tun, drv, tun->dev,
3233 tun->dev);
3236 netif_info(tun, drv, tun->dev,
3240 tun->dev->type = (int) arg;
3241 tun->dev->addr_len = tun_get_addr_len(tun->dev->type);
3242 netif_info(tun, drv, tun->dev, "linktype set to %d\n",
3243 tun->dev->type);
3245 tun->dev);
3250 tun->msg_enable = (u32)arg;
3254 ret = set_offload(tun, arg);
3260 if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP)
3262 ret = update_filter(&tun->txflt, (void __user *)arg);
3267 dev_get_mac_address(&ifr.ifr_hwaddr, net, tun->dev->name);
3274 ret = dev_set_mac_address_user(tun->dev, &ifr.ifr_hwaddr, NULL);
3293 tun->sndbuf = sndbuf;
3294 tun_set_sndbuf(tun);
3298 vnet_hdr_sz = tun->vnet_hdr_sz;
3313 tun->vnet_hdr_sz = vnet_hdr_sz;
3317 le = !!(tun->flags & TUN_VNET_LE);
3328 tun->flags |= TUN_VNET_LE;
3330 tun->flags &= ~TUN_VNET_LE;
3334 ret = tun_get_vnet_be(tun, argp);
3338 ret = tun_set_vnet_be(tun, argp);
3344 if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP)
3347 if (copy_from_user(&tun->fprog, argp, sizeof(tun->fprog)))
3350 ret = tun_attach_filter(tun);
3356 if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP)
3359 tun_detach_filter(tun, tun->numqueues);
3364 if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP)
3367 if (copy_to_user(argp, &tun->fprog, sizeof(tun->fprog)))
3373 ret = tun_set_ebpf(tun, &tun->steering_prog, argp);
3377 ret = tun_set_ebpf(tun, &tun->filter_prog, argp);
3385 ret = tun_net_change_carrier(tun->dev, (bool)carrier);
3401 netdev_state_change(tun->dev);
3405 if (tun)
3406 tun_put(tun);
3478 RCU_INIT_POINTER(tfile->tun, NULL);
3497 /* tun groks IOCB_NOWAIT just fine, mark it as such */
3515 struct tun_struct *tun;
3521 tun = tun_get(tfile);
3522 if (tun)
3523 tun_get_iff(tun, &ifr);
3526 if (tun)
3527 tun_put(tun);
3553 .name = "tun",
3554 .nodename = "net/tun",
3575 struct tun_struct *tun = netdev_priv(dev);
3577 memcpy(cmd, &tun->link_ksettings, sizeof(*cmd));
3584 struct tun_struct *tun = netdev_priv(dev);
3586 memcpy(&tun->link_ksettings, cmd, sizeof(*cmd));
3592 struct tun_struct *tun = netdev_priv(dev);
3597 switch (tun->flags & TUN_TYPE_MASK) {
3599 strscpy(info->bus_info, "tun", sizeof(info->bus_info));
3609 struct tun_struct *tun = netdev_priv(dev);
3611 return tun->msg_enable;
3616 struct tun_struct *tun = netdev_priv(dev);
3618 tun->msg_enable = value;
3626 struct tun_struct *tun = netdev_priv(dev);
3628 ec->rx_max_coalesced_frames = tun->rx_batched;
3638 struct tun_struct *tun = netdev_priv(dev);
3641 tun->rx_batched = NAPI_POLL_WEIGHT;
3643 tun->rx_batched = ec->rx_max_coalesced_frames;
3661 static int tun_queue_resize(struct tun_struct *tun)
3663 struct net_device *dev = tun->dev;
3666 int n = tun->numqueues + tun->numdisabled;
3673 for (i = 0; i < tun->numqueues; i++) {
3674 tfile = rtnl_dereference(tun->tfiles[i]);
3677 list_for_each_entry(tfile, &tun->disabled, next)
3692 struct tun_struct *tun = netdev_priv(dev);
3700 if (tun_queue_resize(tun))
3704 for (i = 0; i < tun->numqueues; i++) {
3707 tfile = rtnl_dereference(tun->tfiles[i]);
3763 /* Get an underlying socket object from tun file. Returns error unless file is
3798 MODULE_ALIAS("devname:net/tun");