Lines Matching refs:tun

6  *  $Id: tun.c,v 1.15 2002/03/01 02:44:24 maxk Exp $
30 #define DRV_NAME "tun"
143 struct tun_struct __rcu *tun;
169 struct tun_struct *tun;
233 static void tun_flow_init(struct tun_struct *tun);
234 static void tun_flow_uninit(struct tun_struct *tun);
276 static void tun_napi_init(struct tun_struct *tun, struct tun_file *tfile,
282 netif_tx_napi_add(tun->dev, &tfile->napi, tun_napi_poll,
312 static inline bool tun_legacy_is_little_endian(struct tun_struct *tun)
314 return tun->flags & TUN_VNET_BE ? false :
318 static long tun_get_vnet_be(struct tun_struct *tun, int __user *argp)
320 int be = !!(tun->flags & TUN_VNET_BE);
328 static long tun_set_vnet_be(struct tun_struct *tun, int __user *argp)
336 tun->flags |= TUN_VNET_BE;
338 tun->flags &= ~TUN_VNET_BE;
343 static inline bool tun_legacy_is_little_endian(struct tun_struct *tun)
348 static long tun_get_vnet_be(struct tun_struct *tun, int __user *argp)
353 static long tun_set_vnet_be(struct tun_struct *tun, int __user *argp)
359 static inline bool tun_is_little_endian(struct tun_struct *tun)
361 return tun->flags & TUN_VNET_LE ||
362 tun_legacy_is_little_endian(tun);
365 static inline u16 tun16_to_cpu(struct tun_struct *tun, __virtio16 val)
367 return __virtio16_to_cpu(tun_is_little_endian(tun), val);
370 static inline __virtio16 cpu_to_tun16(struct tun_struct *tun, u16 val)
372 return __cpu_to_virtio16(tun_is_little_endian(tun), val);
391 static struct tun_flow_entry *tun_flow_create(struct tun_struct *tun,
398 netif_info(tun, tx_queued, tun->dev,
405 e->tun = tun;
407 ++tun->flow_count;
412 static void tun_flow_delete(struct tun_struct *tun, struct tun_flow_entry *e)
414 netif_info(tun, tx_queued, tun->dev, "delete flow: hash %u index %u\n",
418 --tun->flow_count;
421 static void tun_flow_flush(struct tun_struct *tun)
425 spin_lock_bh(&tun->lock);
430 hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link)
431 tun_flow_delete(tun, e);
433 spin_unlock_bh(&tun->lock);
436 static void tun_flow_delete_by_queue(struct tun_struct *tun, u16 queue_index)
440 spin_lock_bh(&tun->lock);
445 hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link) {
447 tun_flow_delete(tun, e);
450 spin_unlock_bh(&tun->lock);
455 struct tun_struct *tun = from_timer(tun, t, flow_gc_timer);
456 unsigned long delay = tun->ageing_time;
461 spin_lock(&tun->lock);
466 hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link) {
471 tun_flow_delete(tun, e);
481 mod_timer(&tun->flow_gc_timer, round_jiffies_up(next_timer));
482 spin_unlock(&tun->lock);
485 static void tun_flow_update(struct tun_struct *tun, u32 rxhash,
490 unsigned long delay = tun->ageing_time;
493 head = &tun->flows[tun_hashfn(rxhash)];
506 spin_lock_bh(&tun->lock);
508 tun->flow_count < MAX_TAP_FLOWS)
509 tun_flow_create(tun, head, rxhash, queue_index);
511 if (!timer_pending(&tun->flow_gc_timer))
512 mod_timer(&tun->flow_gc_timer,
514 spin_unlock_bh(&tun->lock);
535 static u16 tun_automq_select_queue(struct tun_struct *tun, struct sk_buff *skb)
541 numqueues = READ_ONCE(tun->numqueues);
544 e = tun_flow_find(&tun->flows[tun_hashfn(txq)], txq);
556 static u16 tun_ebpf_select_queue(struct tun_struct *tun, struct sk_buff *skb)
562 numqueues = READ_ONCE(tun->numqueues);
566 prog = rcu_dereference(tun->steering_prog);
576 struct tun_struct *tun = netdev_priv(dev);
580 if (rcu_dereference(tun->steering_prog))
581 ret = tun_ebpf_select_queue(tun, skb);
583 ret = tun_automq_select_queue(tun, skb);
589 static inline bool tun_not_capable(struct tun_struct *tun)
592 struct net *net = dev_net(tun->dev);
594 return ((uid_valid(tun->owner) && !uid_eq(cred->euid, tun->owner)) ||
595 (gid_valid(tun->group) && !in_egroup_p(tun->group))) &&
599 static void tun_set_real_num_queues(struct tun_struct *tun)
601 netif_set_real_num_tx_queues(tun->dev, tun->numqueues);
602 netif_set_real_num_rx_queues(tun->dev, tun->numqueues);
605 static void tun_disable_queue(struct tun_struct *tun, struct tun_file *tfile)
607 tfile->detached = tun;
608 list_add_tail(&tfile->next, &tun->disabled);
609 ++tun->numdisabled;
614 struct tun_struct *tun = tfile->detached;
618 --tun->numdisabled;
619 return tun;
650 struct tun_struct *tun;
652 tun = rtnl_dereference(tfile->tun);
654 if (tun && clean) {
660 if (tun && !tfile->detached) {
662 BUG_ON(index >= tun->numqueues);
664 rcu_assign_pointer(tun->tfiles[index],
665 tun->tfiles[tun->numqueues - 1]);
666 ntfile = rtnl_dereference(tun->tfiles[index]);
668 rcu_assign_pointer(tun->tfiles[tun->numqueues - 1],
671 --tun->numqueues;
673 RCU_INIT_POINTER(tfile->tun, NULL);
676 tun_disable_queue(tun, tfile);
681 tun_flow_delete_by_queue(tun, tun->numqueues + 1);
684 tun_set_real_num_queues(tun);
686 tun = tun_enable_queue(tfile);
691 if (tun && tun->numqueues == 0 && tun->numdisabled == 0) {
692 netif_carrier_off(tun->dev);
694 if (!(tun->flags & IFF_PERSIST) &&
695 tun->dev->reg_state == NETREG_REGISTERED)
696 unregister_netdevice(tun->dev);
698 if (tun)
706 struct tun_struct *tun;
710 tun = rtnl_dereference(tfile->tun);
711 dev = tun ? tun->dev : NULL;
723 struct tun_struct *tun = netdev_priv(dev);
725 int i, n = tun->numqueues;
728 tfile = rtnl_dereference(tun->tfiles[i]);
733 RCU_INIT_POINTER(tfile->tun, NULL);
734 --tun->numqueues;
736 list_for_each_entry(tfile, &tun->disabled, next) {
739 RCU_INIT_POINTER(tfile->tun, NULL);
741 BUG_ON(tun->numqueues != 0);
745 tfile = rtnl_dereference(tun->tfiles[i]);
752 list_for_each_entry_safe(tfile, tmp, &tun->disabled, next) {
759 BUG_ON(tun->numdisabled != 0);
761 if (tun->flags & IFF_PERSIST)
765 static int tun_attach(struct tun_struct *tun, struct file *file,
770 struct net_device *dev = tun->dev;
773 err = security_tun_dev_attach(tfile->socket.sk, tun->security);
778 if (rtnl_dereference(tfile->tun) && !tfile->detached)
782 if (!(tun->flags & IFF_MULTI_QUEUE) && tun->numqueues == 1)
787 tun->numqueues + tun->numdisabled == MAX_TAP_QUEUES)
793 if (!skip_filter && (tun->filter_attached == true)) {
795 err = sk_attach_filter(&tun->fprog, tfile->socket.sk);
808 tfile->queue_index = tun->numqueues;
820 tun->dev, tfile->queue_index);
837 tun_napi_init(tun, tfile, napi, napi_frags);
840 if (rtnl_dereference(tun->xdp_prog))
847 /* Publish tfile->tun and tun->tfiles only after we've fully
852 rcu_assign_pointer(tfile->tun, tun);
853 rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile);
854 tun->numqueues++;
855 tun_set_real_num_queues(tun);
862 struct tun_struct *tun;
865 tun = rcu_dereference(tfile->tun);
866 if (tun)
867 dev_hold(tun->dev);
870 return tun;
873 static void tun_put(struct tun_struct *tun)
875 dev_put(tun->dev);
988 struct tun_struct *tun = netdev_priv(dev);
989 struct ifreq *ifr = tun->ifr;
992 tun->pcpu_stats = netdev_alloc_pcpu_stats(struct tun_pcpu_stats);
993 if (!tun->pcpu_stats)
996 spin_lock_init(&tun->lock);
998 err = security_tun_dev_alloc_security(&tun->security);
1000 free_percpu(tun->pcpu_stats);
1004 tun_flow_init(tun);
1014 tun->flags = (tun->flags & ~TUN_FEATURES) |
1017 INIT_LIST_HEAD(&tun->disabled);
1018 err = tun_attach(tun, tun->file, false, ifr->ifr_flags & IFF_NAPI,
1021 tun_flow_uninit(tun);
1022 security_tun_dev_free_security(tun->security);
1023 free_percpu(tun->pcpu_stats);
1051 static void tun_automq_xmit(struct tun_struct *tun, struct sk_buff *skb)
1054 if (tun->numqueues == 1 && static_branch_unlikely(&rps_needed)) {
1062 e = tun_flow_find(&tun->flows[tun_hashfn(rxhash)], rxhash);
1069 static unsigned int run_ebpf_filter(struct tun_struct *tun,
1073 struct tun_prog *prog = rcu_dereference(tun->filter_prog);
1084 struct tun_struct *tun = netdev_priv(dev);
1091 tfile = rcu_dereference(tun->tfiles[txq]);
1097 if (!rcu_dereference(tun->steering_prog))
1098 tun_automq_xmit(tun, skb);
1100 netif_info(tun, tx_queued, tun->dev, "%s %d\n", __func__, skb->len);
1105 if (!check_filter(&tun->txflt, skb))
1112 len = run_ebpf_filter(tun, skb, len);
1144 this_cpu_inc(tun->pcpu_stats->tx_dropped);
1163 struct tun_struct *tun = netdev_priv(dev);
1165 return (features & tun->set_features) | (features & ~TUN_USER_FEATURES);
1170 struct tun_struct *tun = netdev_priv(dev);
1175 tun->align = new_hr;
1182 struct tun_struct *tun = netdev_priv(dev);
1190 p = per_cpu_ptr(tun->pcpu_stats, i);
1217 struct tun_struct *tun = netdev_priv(dev);
1222 old_prog = rtnl_dereference(tun->xdp_prog);
1223 rcu_assign_pointer(tun->xdp_prog, prog);
1227 for (i = 0; i < tun->numqueues; i++) {
1228 tfile = rtnl_dereference(tun->tfiles[i]);
1234 list_for_each_entry(tfile, &tun->disabled, next) {
1257 struct tun_struct *tun = netdev_priv(dev);
1259 if (!tun->numqueues)
1293 struct tun_struct *tun = netdev_priv(dev);
1306 numqueues = READ_ONCE(tun->numqueues);
1312 tfile = rcu_dereference(tun->tfiles[smp_processor_id() %
1326 this_cpu_inc(tun->pcpu_stats->tx_dropped);
1369 static void tun_flow_init(struct tun_struct *tun)
1374 INIT_HLIST_HEAD(&tun->flows[i]);
1376 tun->ageing_time = TUN_FLOW_EXPIRE;
1377 timer_setup(&tun->flow_gc_timer, tun_flow_cleanup, 0);
1378 mod_timer(&tun->flow_gc_timer,
1379 round_jiffies_up(jiffies + tun->ageing_time));
1382 static void tun_flow_uninit(struct tun_struct *tun)
1384 del_timer_sync(&tun->flow_gc_timer);
1385 tun_flow_flush(tun);
1394 struct tun_struct *tun = netdev_priv(dev);
1396 switch (tun->flags & TUN_TYPE_MASK) {
1427 static bool tun_sock_writeable(struct tun_struct *tun, struct tun_file *tfile)
1431 return (tun->dev->flags & IFF_UP) && sock_writeable(sk);
1440 struct tun_struct *tun = tun_get(tfile);
1444 if (!tun)
1459 if (tun_sock_writeable(tun, tfile) ||
1461 tun_sock_writeable(tun, tfile)))
1464 if (tun->dev->reg_state != NETREG_REGISTERED)
1467 tun_put(tun);
1552 static void tun_rx_batched(struct tun_struct *tun, struct tun_file *tfile,
1557 u32 rx_batched = tun->rx_batched;
1592 static bool tun_can_build_skb(struct tun_struct *tun, struct tun_file *tfile,
1595 if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP)
1633 static int tun_xdp_act(struct tun_struct *tun, struct bpf_prog *xdp_prog,
1640 err = xdp_do_redirect(tun->dev, xdp, xdp_prog);
1645 err = tun_xdp_tx(tun->dev, xdp);
1655 trace_xdp_exception(tun->dev, xdp_prog, act);
1658 this_cpu_inc(tun->pcpu_stats->rx_dropped);
1665 static struct sk_buff *tun_build_skb(struct tun_struct *tun,
1680 xdp_prog = rcu_dereference(tun->xdp_prog);
1711 xdp_prog = rcu_dereference(tun->xdp_prog);
1728 err = tun_xdp_act(tun, xdp_prog, &xdp, act);
1755 static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
1762 size_t len = total_len, align = tun->align, linear;
1773 if (!(tun->flags & IFF_NO_PI)) {
1782 if (tun->flags & IFF_VNET_HDR) {
1783 int vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz);
1793 tun16_to_cpu(tun, gso.csum_start) + tun16_to_cpu(tun, gso.csum_offset) + 2 > tun16_to_cpu(tun, gso.hdr_len))
1794 gso.hdr_len = cpu_to_tun16(tun, tun16_to_cpu(tun, gso.csum_start) + tun16_to_cpu(tun, gso.csum_offset) + 2);
1796 if (tun16_to_cpu(tun, gso.hdr_len) > len)
1801 if ((tun->flags & TUN_TYPE_MASK) == IFF_TAP) {
1804 (gso.hdr_len && tun16_to_cpu(tun, gso.hdr_len) < ETH_HLEN)))
1817 copylen = gso.hdr_len ? tun16_to_cpu(tun, gso.hdr_len) : GOODCOPY_LEN;
1826 if (!frags && tun_can_build_skb(tun, tfile, len, noblock, zerocopy)) {
1831 skb = tun_build_skb(tun, tfile, from, &gso, len, &skb_xdp);
1833 this_cpu_inc(tun->pcpu_stats->rx_dropped);
1841 if (tun16_to_cpu(tun, gso.hdr_len) > good_linear)
1844 linear = tun16_to_cpu(tun, gso.hdr_len);
1862 this_cpu_inc(tun->pcpu_stats->rx_dropped);
1876 this_cpu_inc(tun->pcpu_stats->rx_dropped);
1887 if (virtio_net_hdr_to_skb(skb, &gso, tun_is_little_endian(tun))) {
1888 this_cpu_inc(tun->pcpu_stats->rx_frame_errors);
1898 switch (tun->flags & TUN_TYPE_MASK) {
1900 if (tun->flags & IFF_NO_PI) {
1911 this_cpu_inc(tun->pcpu_stats->rx_dropped);
1919 skb->dev = tun->dev;
1926 skb->protocol = eth_type_trans(skb, tun->dev);
1950 xdp_prog = rcu_dereference(tun->xdp_prog);
1971 if (!rcu_access_pointer(tun->steering_prog) && tun->numqueues > 1 &&
1976 if (unlikely(!(tun->dev->flags & IFF_UP))) {
1987 headlen = eth_get_headlen(tun->dev, skb->data,
1993 this_cpu_inc(tun->pcpu_stats->rx_dropped);
2025 tun_rx_batched(tun, tfile, skb, more);
2031 stats = get_cpu_ptr(tun->pcpu_stats);
2039 tun_flow_update(tun, rxhash, tfile);
2048 struct tun_struct *tun = tun_get(tfile);
2052 if (!tun)
2058 result = tun_get_user(tun, tfile, NULL, from, noblock, false);
2060 tun_put(tun);
2064 static ssize_t tun_put_user_xdp(struct tun_struct *tun,
2074 if (tun->flags & IFF_VNET_HDR) {
2077 vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz);
2088 stats = get_cpu_ptr(tun->pcpu_stats);
2093 put_cpu_ptr(tun->pcpu_stats);
2099 static ssize_t tun_put_user(struct tun_struct *tun,
2114 if (tun->flags & IFF_VNET_HDR)
2115 vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz);
2119 if (!(tun->flags & IFF_NO_PI)) {
2140 tun_is_little_endian(tun), true,
2145 netdev_err(tun->dev, "unexpected GSO type: 0x%x, gso_size %d, hdr_len %d\n",
2146 sinfo->gso_type, tun16_to_cpu(tun, gso.gso_size),
2147 tun16_to_cpu(tun, gso.hdr_len));
2148 print_hex_dump(KERN_ERR, "tun: ",
2151 min((int)tun16_to_cpu(tun, gso.hdr_len), 64), true);
2185 stats = get_cpu_ptr(tun->pcpu_stats);
2190 put_cpu_ptr(tun->pcpu_stats);
2236 static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile,
2258 ret = tun_put_user_xdp(tun, tfile, xdpf, to);
2263 ret = tun_put_user(tun, tfile, skb, to);
2277 struct tun_struct *tun = tun_get(tfile);
2281 if (!tun)
2287 ret = tun_do_read(tun, tfile, to, noblock, NULL);
2291 tun_put(tun);
2303 static int __tun_set_ebpf(struct tun_struct *tun,
2316 spin_lock_bh(&tun->lock);
2318 lockdep_is_held(&tun->lock));
2320 spin_unlock_bh(&tun->lock);
2330 struct tun_struct *tun = netdev_priv(dev);
2332 BUG_ON(!(list_empty(&tun->disabled)));
2334 free_percpu(tun->pcpu_stats);
2336 tun_flow_uninit(tun);
2337 security_tun_dev_free_security(tun->security);
2338 __tun_set_ebpf(tun, &tun->steering_prog, NULL);
2339 __tun_set_ebpf(tun, &tun->filter_prog, NULL);
2344 struct tun_struct *tun = netdev_priv(dev);
2346 tun->owner = INVALID_UID;
2347 tun->group = INVALID_GID;
2348 tun_default_link_ksettings(dev, &tun->link_ksettings);
2357 /* Trivial set of netlink ops to allow deleting tun or tap
2364 "tun/tap creation via rtnetlink is not supported.");
2387 struct tun_struct *tun = netdev_priv(dev);
2389 if (nla_put_u8(skb, IFLA_TUN_TYPE, tun->flags & TUN_TYPE_MASK))
2391 if (uid_valid(tun->owner) &&
2393 from_kuid_munged(current_user_ns(), tun->owner)))
2395 if (gid_valid(tun->group) &&
2397 from_kgid_munged(current_user_ns(), tun->group)))
2399 if (nla_put_u8(skb, IFLA_TUN_PI, !(tun->flags & IFF_NO_PI)))
2401 if (nla_put_u8(skb, IFLA_TUN_VNET_HDR, !!(tun->flags & IFF_VNET_HDR)))
2403 if (nla_put_u8(skb, IFLA_TUN_PERSIST, !!(tun->flags & IFF_PERSIST)))
2406 !!(tun->flags & IFF_MULTI_QUEUE)))
2408 if (tun->flags & IFF_MULTI_QUEUE) {
2409 if (nla_put_u32(skb, IFLA_TUN_NUM_QUEUES, tun->numqueues))
2412 tun->numdisabled))
2457 static int tun_xdp_one(struct tun_struct *tun,
2477 xdp_prog = rcu_dereference(tun->xdp_prog);
2488 err = tun_xdp_act(tun, xdp_prog, xdp, act);
2525 if (virtio_net_hdr_to_skb(skb, gso, tun_is_little_endian(tun))) {
2526 this_cpu_inc(tun->pcpu_stats->rx_frame_errors);
2532 skb->protocol = eth_type_trans(skb, tun->dev);
2543 if (!rcu_dereference(tun->steering_prog) && tun->numqueues > 1 &&
2552 stats = this_cpu_ptr(tun->pcpu_stats);
2559 tun_flow_update(tun, rxhash, tfile);
2569 struct tun_struct *tun = tun_get(tfile);
2573 if (!tun)
2589 tun_xdp_one(tun, tfile, xdp, &flush, &tpage);
2604 ret = tun_get_user(tun, tfile, ctl ? ctl->ptr : NULL, &m->msg_iter,
2608 tun_put(tun);
2616 struct tun_struct *tun = tun_get(tfile);
2620 if (!tun) {
2634 ret = tun_do_read(tun, tfile, &m->msg_iter, flags & MSG_DONTWAIT, ptr);
2640 tun_put(tun);
2644 tun_put(tun);
2667 struct tun_struct *tun;
2670 tun = tun_get(tfile);
2671 if (!tun)
2675 tun_put(tun);
2680 /* Ops structure to mimic raw sockets with tun */
2688 .name = "tun",
2693 static int tun_flags(struct tun_struct *tun)
2695 return tun->flags & (TUN_FEATURES | IFF_PERSIST | IFF_TUN | IFF_TAP);
2701 struct tun_struct *tun = netdev_priv(to_net_dev(dev));
2702 return sprintf(buf, "0x%x\n", tun_flags(tun));
2708 struct tun_struct *tun = netdev_priv(to_net_dev(dev));
2709 return uid_valid(tun->owner)?
2711 from_kuid_munged(current_user_ns(), tun->owner)):
2718 struct tun_struct *tun = netdev_priv(to_net_dev(dev));
2719 return gid_valid(tun->group) ?
2721 from_kgid_munged(current_user_ns(), tun->group)):
2742 struct tun_struct *tun;
2764 tun = netdev_priv(dev);
2766 tun = netdev_priv(dev);
2771 !!(tun->flags & IFF_MULTI_QUEUE))
2774 if (tun_not_capable(tun))
2776 err = security_tun_dev_open(tun->security);
2780 err = tun_attach(tun, file, ifr->ifr_flags & IFF_NOFILTER,
2786 if (tun->flags & IFF_MULTI_QUEUE &&
2787 (tun->numqueues + tun->numdisabled > 1)) {
2795 tun->flags = (tun->flags & ~TUN_FEATURES) |
2815 name = "tun%d";
2838 tun = netdev_priv(dev);
2839 tun->dev = dev;
2840 tun->flags = flags;
2841 tun->txflt.count = 0;
2842 tun->vnet_hdr_sz = sizeof(struct virtio_net_hdr);
2844 tun->align = NET_SKB_PAD;
2845 tun->filter_attached = false;
2846 tun->sndbuf = tfile->socket.sk->sk_sndbuf;
2847 tun->rx_batched = 0;
2848 RCU_INIT_POINTER(tun->steering_prog, NULL);
2850 tun->ifr = ifr;
2851 tun->file = file;
2855 err = register_netdevice(tun->dev);
2861 * with dev_put() we need publish tun after registration.
2863 rcu_assign_pointer(tfile->tun, tun);
2866 netif_carrier_on(tun->dev);
2871 if (netif_running(tun->dev))
2872 netif_tx_wake_all_queues(tun->dev);
2874 strcpy(ifr->ifr_name, tun->dev->name);
2878 static void tun_get_iff(struct tun_struct *tun, struct ifreq *ifr)
2880 strcpy(ifr->ifr_name, tun->dev->name);
2882 ifr->ifr_flags = tun_flags(tun);
2886 /* This is like a cut-down ethtool ops, except done via tun fd so no
2888 static int set_offload(struct tun_struct *tun, unsigned long arg)
2916 tun->set_features = features;
2917 tun->dev->wanted_features &= ~TUN_USER_FEATURES;
2918 tun->dev->wanted_features |= features;
2919 netdev_update_features(tun->dev);
2924 static void tun_detach_filter(struct tun_struct *tun, int n)
2930 tfile = rtnl_dereference(tun->tfiles[i]);
2936 tun->filter_attached = false;
2939 static int tun_attach_filter(struct tun_struct *tun)
2944 for (i = 0; i < tun->numqueues; i++) {
2945 tfile = rtnl_dereference(tun->tfiles[i]);
2947 ret = sk_attach_filter(&tun->fprog, tfile->socket.sk);
2950 tun_detach_filter(tun, i);
2955 tun->filter_attached = true;
2959 static void tun_set_sndbuf(struct tun_struct *tun)
2964 for (i = 0; i < tun->numqueues; i++) {
2965 tfile = rtnl_dereference(tun->tfiles[i]);
2966 tfile->socket.sk->sk_sndbuf = tun->sndbuf;
2973 struct tun_struct *tun;
2979 tun = tfile->detached;
2980 if (!tun) {
2984 ret = security_tun_dev_attach_queue(tun->security);
2987 ret = tun_attach(tun, file, false, tun->flags & IFF_NAPI,
2988 tun->flags & IFF_NAPI_FRAGS, true);
2990 tun = rtnl_dereference(tfile->tun);
2991 if (!tun || !(tun->flags & IFF_MULTI_QUEUE) || tfile->detached)
2999 netdev_state_change(tun->dev);
3006 static int tun_set_ebpf(struct tun_struct *tun, struct tun_prog __rcu **prog_p,
3023 return __tun_set_ebpf(tun, prog_p, prog);
3026 /* Return correct value for tun->dev->addr_len based on tun->dev->type. */
3070 struct tun_struct *tun;
3108 tun = tun_get(tfile);
3111 if (tun)
3127 if (tun)
3142 if (!tun)
3145 netif_info(tun, drv, tun->dev, "tun_chr_ioctl cmd %u\n", cmd);
3147 net = dev_net(tun->dev);
3151 tun_get_iff(tun, &ifr);
3166 netif_info(tun, drv, tun->dev, "ignored: set checksum %s\n",
3174 if (arg && !(tun->flags & IFF_PERSIST)) {
3175 tun->flags |= IFF_PERSIST;
3179 if (!arg && (tun->flags & IFF_PERSIST)) {
3180 tun->flags &= ~IFF_PERSIST;
3185 netif_info(tun, drv, tun->dev, "persist %s\n",
3196 tun->owner = owner;
3198 netif_info(tun, drv, tun->dev, "owner set to %u\n",
3199 from_kuid(&init_user_ns, tun->owner));
3209 tun->group = group;
3211 netif_info(tun, drv, tun->dev, "group set to %u\n",
3212 from_kgid(&init_user_ns, tun->group));
3217 if (tun->dev->flags & IFF_UP) {
3218 netif_info(tun, drv, tun->dev,
3222 tun->dev->type = (int) arg;
3223 tun->dev->addr_len = tun_get_addr_len(tun->dev->type);
3224 netif_info(tun, drv, tun->dev, "linktype set to %d\n",
3225 tun->dev->type);
3231 tun->msg_enable = (u32)arg;
3235 ret = set_offload(tun, arg);
3241 if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP)
3243 ret = update_filter(&tun->txflt, (void __user *)arg);
3248 dev_get_mac_address(&ifr.ifr_hwaddr, net, tun->dev->name);
3255 ret = dev_set_mac_address_user(tun->dev, &ifr.ifr_hwaddr, NULL);
3274 tun->sndbuf = sndbuf;
3275 tun_set_sndbuf(tun);
3279 vnet_hdr_sz = tun->vnet_hdr_sz;
3294 tun->vnet_hdr_sz = vnet_hdr_sz;
3298 le = !!(tun->flags & TUN_VNET_LE);
3309 tun->flags |= TUN_VNET_LE;
3311 tun->flags &= ~TUN_VNET_LE;
3315 ret = tun_get_vnet_be(tun, argp);
3319 ret = tun_set_vnet_be(tun, argp);
3325 if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP)
3328 if (copy_from_user(&tun->fprog, argp, sizeof(tun->fprog)))
3331 ret = tun_attach_filter(tun);
3337 if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP)
3340 tun_detach_filter(tun, tun->numqueues);
3345 if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP)
3348 if (copy_to_user(argp, &tun->fprog, sizeof(tun->fprog)))
3354 ret = tun_set_ebpf(tun, &tun->steering_prog, argp);
3358 ret = tun_set_ebpf(tun, &tun->filter_prog, argp);
3366 ret = tun_net_change_carrier(tun->dev, (bool)carrier);
3382 netdev_state_change(tun->dev);
3386 if (tun)
3387 tun_put(tun);
3459 RCU_INIT_POINTER(tfile->tun, NULL);
3494 struct tun_struct *tun;
3500 tun = tun_get(tfile);
3501 if (tun)
3502 tun_get_iff(tun, &ifr);
3505 if (tun)
3506 tun_put(tun);
3532 .name = "tun",
3533 .nodename = "net/tun",
3554 struct tun_struct *tun = netdev_priv(dev);
3556 memcpy(cmd, &tun->link_ksettings, sizeof(*cmd));
3563 struct tun_struct *tun = netdev_priv(dev);
3565 memcpy(&tun->link_ksettings, cmd, sizeof(*cmd));
3571 struct tun_struct *tun = netdev_priv(dev);
3576 switch (tun->flags & TUN_TYPE_MASK) {
3578 strlcpy(info->bus_info, "tun", sizeof(info->bus_info));
3588 struct tun_struct *tun = netdev_priv(dev);
3590 return tun->msg_enable;
3595 struct tun_struct *tun = netdev_priv(dev);
3597 tun->msg_enable = value;
3603 struct tun_struct *tun = netdev_priv(dev);
3605 ec->rx_max_coalesced_frames = tun->rx_batched;
3613 struct tun_struct *tun = netdev_priv(dev);
3616 tun->rx_batched = NAPI_POLL_WEIGHT;
3618 tun->rx_batched = ec->rx_max_coalesced_frames;
3636 static int tun_queue_resize(struct tun_struct *tun)
3638 struct net_device *dev = tun->dev;
3641 int n = tun->numqueues + tun->numdisabled;
3648 for (i = 0; i < tun->numqueues; i++) {
3649 tfile = rtnl_dereference(tun->tfiles[i]);
3652 list_for_each_entry(tfile, &tun->disabled, next)
3667 struct tun_struct *tun = netdev_priv(dev);
3675 if (tun_queue_resize(tun))
3679 for (i = 0; i < tun->numqueues; i++) {
3682 tfile = rtnl_dereference(tun->tfiles[i]);
3738 /* Get an underlying socket object from tun file. Returns error unless file is
3773 MODULE_ALIAS("devname:net/tun");