0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
0029
0030 #define DRV_NAME "tun"
0031 #define DRV_VERSION "1.6"
0032 #define DRV_DESCRIPTION "Universal TUN/TAP device driver"
0033 #define DRV_COPYRIGHT "(C) 1999-2004 Max Krasnyansky <maxk@qualcomm.com>"
0034
0035 #include <linux/module.h>
0036 #include <linux/errno.h>
0037 #include <linux/kernel.h>
0038 #include <linux/sched/signal.h>
0039 #include <linux/major.h>
0040 #include <linux/slab.h>
0041 #include <linux/poll.h>
0042 #include <linux/fcntl.h>
0043 #include <linux/init.h>
0044 #include <linux/skbuff.h>
0045 #include <linux/netdevice.h>
0046 #include <linux/etherdevice.h>
0047 #include <linux/miscdevice.h>
0048 #include <linux/ethtool.h>
0049 #include <linux/rtnetlink.h>
0050 #include <linux/compat.h>
0051 #include <linux/if.h>
0052 #include <linux/if_arp.h>
0053 #include <linux/if_ether.h>
0054 #include <linux/if_tun.h>
0055 #include <linux/if_vlan.h>
0056 #include <linux/crc32.h>
0057 #include <linux/nsproxy.h>
0058 #include <linux/virtio_net.h>
0059 #include <linux/rcupdate.h>
0060 #include <net/net_namespace.h>
0061 #include <net/netns/generic.h>
0062 #include <net/rtnetlink.h>
0063 #include <net/sock.h>
0064 #include <net/xdp.h>
0065 #include <net/ip_tunnels.h>
0066 #include <linux/seq_file.h>
0067 #include <linux/uio.h>
0068 #include <linux/skb_array.h>
0069 #include <linux/bpf.h>
0070 #include <linux/bpf_trace.h>
0071 #include <linux/mutex.h>
0072 #include <linux/ieee802154.h>
0073 #include <linux/if_ltalk.h>
0074 #include <uapi/linux/if_fddi.h>
0075 #include <uapi/linux/if_hippi.h>
0076 #include <uapi/linux/if_fc.h>
0077 #include <net/ax25.h>
0078 #include <net/rose.h>
0079 #include <net/6lowpan.h>
0080
0081 #include <linux/uaccess.h>
0082 #include <linux/proc_fs.h>
0083
0084 static void tun_default_link_ksettings(struct net_device *dev,
0085 struct ethtool_link_ksettings *cmd);
0086
0087 #define TUN_RX_PAD (NET_IP_ALIGN + NET_SKB_PAD)
0088
0089
0090
0091
0092
0093
0094 #define TUN_FASYNC IFF_ATTACH_QUEUE
0095
0096 #define TUN_VNET_LE 0x80000000
0097 #define TUN_VNET_BE 0x40000000
0098
0099 #define TUN_FEATURES (IFF_NO_PI | IFF_ONE_QUEUE | IFF_VNET_HDR | \
0100 IFF_MULTI_QUEUE | IFF_NAPI | IFF_NAPI_FRAGS)
0101
0102 #define GOODCOPY_LEN 128
0103
0104 #define FLT_EXACT_COUNT 8
0105 struct tap_filter {
0106 unsigned int count;
0107 u32 mask[2];
0108 unsigned char addr[FLT_EXACT_COUNT][ETH_ALEN];
0109 };
0110
0111
0112
0113 #define MAX_TAP_QUEUES 256
0114 #define MAX_TAP_FLOWS 4096
0115
0116 #define TUN_FLOW_EXPIRE (3 * HZ)
0117
0118
0119
0120
0121
0122
0123
0124
0125
0126
0127
0128
0129 struct tun_file {
0130 struct sock sk;
0131 struct socket socket;
0132 struct tun_struct __rcu *tun;
0133 struct fasync_struct *fasync;
0134
0135 unsigned int flags;
0136 union {
0137 u16 queue_index;
0138 unsigned int ifindex;
0139 };
0140 struct napi_struct napi;
0141 bool napi_enabled;
0142 bool napi_frags_enabled;
0143 struct mutex napi_mutex;
0144 struct list_head next;
0145 struct tun_struct *detached;
0146 struct ptr_ring tx_ring;
0147 struct xdp_rxq_info xdp_rxq;
0148 };
0149
0150 struct tun_page {
0151 struct page *page;
0152 int count;
0153 };
0154
0155 struct tun_flow_entry {
0156 struct hlist_node hash_link;
0157 struct rcu_head rcu;
0158 struct tun_struct *tun;
0159
0160 u32 rxhash;
0161 u32 rps_rxhash;
0162 int queue_index;
0163 unsigned long updated ____cacheline_aligned_in_smp;
0164 };
0165
0166 #define TUN_NUM_FLOW_ENTRIES 1024
0167 #define TUN_MASK_FLOW_ENTRIES (TUN_NUM_FLOW_ENTRIES - 1)
0168
0169 struct tun_prog {
0170 struct rcu_head rcu;
0171 struct bpf_prog *prog;
0172 };
0173
0174
0175
0176
0177
0178 struct tun_struct {
0179 struct tun_file __rcu *tfiles[MAX_TAP_QUEUES];
0180 unsigned int numqueues;
0181 unsigned int flags;
0182 kuid_t owner;
0183 kgid_t group;
0184
0185 struct net_device *dev;
0186 netdev_features_t set_features;
0187 #define TUN_USER_FEATURES (NETIF_F_HW_CSUM|NETIF_F_TSO_ECN|NETIF_F_TSO| \
0188 NETIF_F_TSO6)
0189
0190 int align;
0191 int vnet_hdr_sz;
0192 int sndbuf;
0193 struct tap_filter txflt;
0194 struct sock_fprog fprog;
0195
0196 bool filter_attached;
0197 u32 msg_enable;
0198 spinlock_t lock;
0199 struct hlist_head flows[TUN_NUM_FLOW_ENTRIES];
0200 struct timer_list flow_gc_timer;
0201 unsigned long ageing_time;
0202 unsigned int numdisabled;
0203 struct list_head disabled;
0204 void *security;
0205 u32 flow_count;
0206 u32 rx_batched;
0207 atomic_long_t rx_frame_errors;
0208 struct bpf_prog __rcu *xdp_prog;
0209 struct tun_prog __rcu *steering_prog;
0210 struct tun_prog __rcu *filter_prog;
0211 struct ethtool_link_ksettings link_ksettings;
0212
0213 struct file *file;
0214 struct ifreq *ifr;
0215 };
0216
0217 struct veth {
0218 __be16 h_vlan_proto;
0219 __be16 h_vlan_TCI;
0220 };
0221
0222 static void tun_flow_init(struct tun_struct *tun);
0223 static void tun_flow_uninit(struct tun_struct *tun);
0224
0225 static int tun_napi_receive(struct napi_struct *napi, int budget)
0226 {
0227 struct tun_file *tfile = container_of(napi, struct tun_file, napi);
0228 struct sk_buff_head *queue = &tfile->sk.sk_write_queue;
0229 struct sk_buff_head process_queue;
0230 struct sk_buff *skb;
0231 int received = 0;
0232
0233 __skb_queue_head_init(&process_queue);
0234
0235 spin_lock(&queue->lock);
0236 skb_queue_splice_tail_init(queue, &process_queue);
0237 spin_unlock(&queue->lock);
0238
0239 while (received < budget && (skb = __skb_dequeue(&process_queue))) {
0240 napi_gro_receive(napi, skb);
0241 ++received;
0242 }
0243
0244 if (!skb_queue_empty(&process_queue)) {
0245 spin_lock(&queue->lock);
0246 skb_queue_splice(&process_queue, queue);
0247 spin_unlock(&queue->lock);
0248 }
0249
0250 return received;
0251 }
0252
0253 static int tun_napi_poll(struct napi_struct *napi, int budget)
0254 {
0255 unsigned int received;
0256
0257 received = tun_napi_receive(napi, budget);
0258
0259 if (received < budget)
0260 napi_complete_done(napi, received);
0261
0262 return received;
0263 }
0264
0265 static void tun_napi_init(struct tun_struct *tun, struct tun_file *tfile,
0266 bool napi_en, bool napi_frags)
0267 {
0268 tfile->napi_enabled = napi_en;
0269 tfile->napi_frags_enabled = napi_en && napi_frags;
0270 if (napi_en) {
0271 netif_napi_add_tx(tun->dev, &tfile->napi, tun_napi_poll);
0272 napi_enable(&tfile->napi);
0273 }
0274 }
0275
0276 static void tun_napi_enable(struct tun_file *tfile)
0277 {
0278 if (tfile->napi_enabled)
0279 napi_enable(&tfile->napi);
0280 }
0281
0282 static void tun_napi_disable(struct tun_file *tfile)
0283 {
0284 if (tfile->napi_enabled)
0285 napi_disable(&tfile->napi);
0286 }
0287
0288 static void tun_napi_del(struct tun_file *tfile)
0289 {
0290 if (tfile->napi_enabled)
0291 netif_napi_del(&tfile->napi);
0292 }
0293
0294 static bool tun_napi_frags_enabled(const struct tun_file *tfile)
0295 {
0296 return tfile->napi_frags_enabled;
0297 }
0298
0299 #ifdef CONFIG_TUN_VNET_CROSS_LE
0300 static inline bool tun_legacy_is_little_endian(struct tun_struct *tun)
0301 {
0302 return tun->flags & TUN_VNET_BE ? false :
0303 virtio_legacy_is_little_endian();
0304 }
0305
0306 static long tun_get_vnet_be(struct tun_struct *tun, int __user *argp)
0307 {
0308 int be = !!(tun->flags & TUN_VNET_BE);
0309
0310 if (put_user(be, argp))
0311 return -EFAULT;
0312
0313 return 0;
0314 }
0315
0316 static long tun_set_vnet_be(struct tun_struct *tun, int __user *argp)
0317 {
0318 int be;
0319
0320 if (get_user(be, argp))
0321 return -EFAULT;
0322
0323 if (be)
0324 tun->flags |= TUN_VNET_BE;
0325 else
0326 tun->flags &= ~TUN_VNET_BE;
0327
0328 return 0;
0329 }
0330 #else
0331 static inline bool tun_legacy_is_little_endian(struct tun_struct *tun)
0332 {
0333 return virtio_legacy_is_little_endian();
0334 }
0335
0336 static long tun_get_vnet_be(struct tun_struct *tun, int __user *argp)
0337 {
0338 return -EINVAL;
0339 }
0340
0341 static long tun_set_vnet_be(struct tun_struct *tun, int __user *argp)
0342 {
0343 return -EINVAL;
0344 }
0345 #endif
0346
0347 static inline bool tun_is_little_endian(struct tun_struct *tun)
0348 {
0349 return tun->flags & TUN_VNET_LE ||
0350 tun_legacy_is_little_endian(tun);
0351 }
0352
0353 static inline u16 tun16_to_cpu(struct tun_struct *tun, __virtio16 val)
0354 {
0355 return __virtio16_to_cpu(tun_is_little_endian(tun), val);
0356 }
0357
0358 static inline __virtio16 cpu_to_tun16(struct tun_struct *tun, u16 val)
0359 {
0360 return __cpu_to_virtio16(tun_is_little_endian(tun), val);
0361 }
0362
0363 static inline u32 tun_hashfn(u32 rxhash)
0364 {
0365 return rxhash & TUN_MASK_FLOW_ENTRIES;
0366 }
0367
0368 static struct tun_flow_entry *tun_flow_find(struct hlist_head *head, u32 rxhash)
0369 {
0370 struct tun_flow_entry *e;
0371
0372 hlist_for_each_entry_rcu(e, head, hash_link) {
0373 if (e->rxhash == rxhash)
0374 return e;
0375 }
0376 return NULL;
0377 }
0378
0379 static struct tun_flow_entry *tun_flow_create(struct tun_struct *tun,
0380 struct hlist_head *head,
0381 u32 rxhash, u16 queue_index)
0382 {
0383 struct tun_flow_entry *e = kmalloc(sizeof(*e), GFP_ATOMIC);
0384
0385 if (e) {
0386 netif_info(tun, tx_queued, tun->dev,
0387 "create flow: hash %u index %u\n",
0388 rxhash, queue_index);
0389 e->updated = jiffies;
0390 e->rxhash = rxhash;
0391 e->rps_rxhash = 0;
0392 e->queue_index = queue_index;
0393 e->tun = tun;
0394 hlist_add_head_rcu(&e->hash_link, head);
0395 ++tun->flow_count;
0396 }
0397 return e;
0398 }
0399
0400 static void tun_flow_delete(struct tun_struct *tun, struct tun_flow_entry *e)
0401 {
0402 netif_info(tun, tx_queued, tun->dev, "delete flow: hash %u index %u\n",
0403 e->rxhash, e->queue_index);
0404 hlist_del_rcu(&e->hash_link);
0405 kfree_rcu(e, rcu);
0406 --tun->flow_count;
0407 }
0408
0409 static void tun_flow_flush(struct tun_struct *tun)
0410 {
0411 int i;
0412
0413 spin_lock_bh(&tun->lock);
0414 for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) {
0415 struct tun_flow_entry *e;
0416 struct hlist_node *n;
0417
0418 hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link)
0419 tun_flow_delete(tun, e);
0420 }
0421 spin_unlock_bh(&tun->lock);
0422 }
0423
0424 static void tun_flow_delete_by_queue(struct tun_struct *tun, u16 queue_index)
0425 {
0426 int i;
0427
0428 spin_lock_bh(&tun->lock);
0429 for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) {
0430 struct tun_flow_entry *e;
0431 struct hlist_node *n;
0432
0433 hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link) {
0434 if (e->queue_index == queue_index)
0435 tun_flow_delete(tun, e);
0436 }
0437 }
0438 spin_unlock_bh(&tun->lock);
0439 }
0440
0441 static void tun_flow_cleanup(struct timer_list *t)
0442 {
0443 struct tun_struct *tun = from_timer(tun, t, flow_gc_timer);
0444 unsigned long delay = tun->ageing_time;
0445 unsigned long next_timer = jiffies + delay;
0446 unsigned long count = 0;
0447 int i;
0448
0449 spin_lock(&tun->lock);
0450 for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) {
0451 struct tun_flow_entry *e;
0452 struct hlist_node *n;
0453
0454 hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link) {
0455 unsigned long this_timer;
0456
0457 this_timer = e->updated + delay;
0458 if (time_before_eq(this_timer, jiffies)) {
0459 tun_flow_delete(tun, e);
0460 continue;
0461 }
0462 count++;
0463 if (time_before(this_timer, next_timer))
0464 next_timer = this_timer;
0465 }
0466 }
0467
0468 if (count)
0469 mod_timer(&tun->flow_gc_timer, round_jiffies_up(next_timer));
0470 spin_unlock(&tun->lock);
0471 }
0472
0473 static void tun_flow_update(struct tun_struct *tun, u32 rxhash,
0474 struct tun_file *tfile)
0475 {
0476 struct hlist_head *head;
0477 struct tun_flow_entry *e;
0478 unsigned long delay = tun->ageing_time;
0479 u16 queue_index = tfile->queue_index;
0480
0481 head = &tun->flows[tun_hashfn(rxhash)];
0482
0483 rcu_read_lock();
0484
0485 e = tun_flow_find(head, rxhash);
0486 if (likely(e)) {
0487
0488 if (READ_ONCE(e->queue_index) != queue_index)
0489 WRITE_ONCE(e->queue_index, queue_index);
0490 if (e->updated != jiffies)
0491 e->updated = jiffies;
0492 sock_rps_record_flow_hash(e->rps_rxhash);
0493 } else {
0494 spin_lock_bh(&tun->lock);
0495 if (!tun_flow_find(head, rxhash) &&
0496 tun->flow_count < MAX_TAP_FLOWS)
0497 tun_flow_create(tun, head, rxhash, queue_index);
0498
0499 if (!timer_pending(&tun->flow_gc_timer))
0500 mod_timer(&tun->flow_gc_timer,
0501 round_jiffies_up(jiffies + delay));
0502 spin_unlock_bh(&tun->lock);
0503 }
0504
0505 rcu_read_unlock();
0506 }
0507
0508
0509
0510
0511 static inline void tun_flow_save_rps_rxhash(struct tun_flow_entry *e, u32 hash)
0512 {
0513 if (unlikely(e->rps_rxhash != hash))
0514 e->rps_rxhash = hash;
0515 }
0516
0517
0518
0519
0520
0521
0522
0523 static u16 tun_automq_select_queue(struct tun_struct *tun, struct sk_buff *skb)
0524 {
0525 struct tun_flow_entry *e;
0526 u32 txq = 0;
0527 u32 numqueues = 0;
0528
0529 numqueues = READ_ONCE(tun->numqueues);
0530
0531 txq = __skb_get_hash_symmetric(skb);
0532 e = tun_flow_find(&tun->flows[tun_hashfn(txq)], txq);
0533 if (e) {
0534 tun_flow_save_rps_rxhash(e, txq);
0535 txq = e->queue_index;
0536 } else {
0537
0538 txq = ((u64)txq * numqueues) >> 32;
0539 }
0540
0541 return txq;
0542 }
0543
0544 static u16 tun_ebpf_select_queue(struct tun_struct *tun, struct sk_buff *skb)
0545 {
0546 struct tun_prog *prog;
0547 u32 numqueues;
0548 u16 ret = 0;
0549
0550 numqueues = READ_ONCE(tun->numqueues);
0551 if (!numqueues)
0552 return 0;
0553
0554 prog = rcu_dereference(tun->steering_prog);
0555 if (prog)
0556 ret = bpf_prog_run_clear_cb(prog->prog, skb);
0557
0558 return ret % numqueues;
0559 }
0560
0561 static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb,
0562 struct net_device *sb_dev)
0563 {
0564 struct tun_struct *tun = netdev_priv(dev);
0565 u16 ret;
0566
0567 rcu_read_lock();
0568 if (rcu_dereference(tun->steering_prog))
0569 ret = tun_ebpf_select_queue(tun, skb);
0570 else
0571 ret = tun_automq_select_queue(tun, skb);
0572 rcu_read_unlock();
0573
0574 return ret;
0575 }
0576
0577 static inline bool tun_not_capable(struct tun_struct *tun)
0578 {
0579 const struct cred *cred = current_cred();
0580 struct net *net = dev_net(tun->dev);
0581
0582 return ((uid_valid(tun->owner) && !uid_eq(cred->euid, tun->owner)) ||
0583 (gid_valid(tun->group) && !in_egroup_p(tun->group))) &&
0584 !ns_capable(net->user_ns, CAP_NET_ADMIN);
0585 }
0586
0587 static void tun_set_real_num_queues(struct tun_struct *tun)
0588 {
0589 netif_set_real_num_tx_queues(tun->dev, tun->numqueues);
0590 netif_set_real_num_rx_queues(tun->dev, tun->numqueues);
0591 }
0592
0593 static void tun_disable_queue(struct tun_struct *tun, struct tun_file *tfile)
0594 {
0595 tfile->detached = tun;
0596 list_add_tail(&tfile->next, &tun->disabled);
0597 ++tun->numdisabled;
0598 }
0599
0600 static struct tun_struct *tun_enable_queue(struct tun_file *tfile)
0601 {
0602 struct tun_struct *tun = tfile->detached;
0603
0604 tfile->detached = NULL;
0605 list_del_init(&tfile->next);
0606 --tun->numdisabled;
0607 return tun;
0608 }
0609
0610 void tun_ptr_free(void *ptr)
0611 {
0612 if (!ptr)
0613 return;
0614 if (tun_is_xdp_frame(ptr)) {
0615 struct xdp_frame *xdpf = tun_ptr_to_xdp(ptr);
0616
0617 xdp_return_frame(xdpf);
0618 } else {
0619 __skb_array_destroy_skb(ptr);
0620 }
0621 }
0622 EXPORT_SYMBOL_GPL(tun_ptr_free);
0623
0624 static void tun_queue_purge(struct tun_file *tfile)
0625 {
0626 void *ptr;
0627
0628 while ((ptr = ptr_ring_consume(&tfile->tx_ring)) != NULL)
0629 tun_ptr_free(ptr);
0630
0631 skb_queue_purge(&tfile->sk.sk_write_queue);
0632 skb_queue_purge(&tfile->sk.sk_error_queue);
0633 }
0634
0635 static void __tun_detach(struct tun_file *tfile, bool clean)
0636 {
0637 struct tun_file *ntfile;
0638 struct tun_struct *tun;
0639
0640 tun = rtnl_dereference(tfile->tun);
0641
0642 if (tun && clean) {
0643 if (!tfile->detached)
0644 tun_napi_disable(tfile);
0645 tun_napi_del(tfile);
0646 }
0647
0648 if (tun && !tfile->detached) {
0649 u16 index = tfile->queue_index;
0650 BUG_ON(index >= tun->numqueues);
0651
0652 rcu_assign_pointer(tun->tfiles[index],
0653 tun->tfiles[tun->numqueues - 1]);
0654 ntfile = rtnl_dereference(tun->tfiles[index]);
0655 ntfile->queue_index = index;
0656 rcu_assign_pointer(tun->tfiles[tun->numqueues - 1],
0657 NULL);
0658
0659 --tun->numqueues;
0660 if (clean) {
0661 RCU_INIT_POINTER(tfile->tun, NULL);
0662 sock_put(&tfile->sk);
0663 } else {
0664 tun_disable_queue(tun, tfile);
0665 tun_napi_disable(tfile);
0666 }
0667
0668 synchronize_net();
0669 tun_flow_delete_by_queue(tun, tun->numqueues + 1);
0670
0671 tun_queue_purge(tfile);
0672 tun_set_real_num_queues(tun);
0673 } else if (tfile->detached && clean) {
0674 tun = tun_enable_queue(tfile);
0675 sock_put(&tfile->sk);
0676 }
0677
0678 if (clean) {
0679 if (tun && tun->numqueues == 0 && tun->numdisabled == 0) {
0680 netif_carrier_off(tun->dev);
0681
0682 if (!(tun->flags & IFF_PERSIST) &&
0683 tun->dev->reg_state == NETREG_REGISTERED)
0684 unregister_netdevice(tun->dev);
0685 }
0686 if (tun)
0687 xdp_rxq_info_unreg(&tfile->xdp_rxq);
0688 ptr_ring_cleanup(&tfile->tx_ring, tun_ptr_free);
0689 sock_put(&tfile->sk);
0690 }
0691 }
0692
0693 static void tun_detach(struct tun_file *tfile, bool clean)
0694 {
0695 struct tun_struct *tun;
0696 struct net_device *dev;
0697
0698 rtnl_lock();
0699 tun = rtnl_dereference(tfile->tun);
0700 dev = tun ? tun->dev : NULL;
0701 __tun_detach(tfile, clean);
0702 if (dev)
0703 netdev_state_change(dev);
0704 rtnl_unlock();
0705 }
0706
0707 static void tun_detach_all(struct net_device *dev)
0708 {
0709 struct tun_struct *tun = netdev_priv(dev);
0710 struct tun_file *tfile, *tmp;
0711 int i, n = tun->numqueues;
0712
0713 for (i = 0; i < n; i++) {
0714 tfile = rtnl_dereference(tun->tfiles[i]);
0715 BUG_ON(!tfile);
0716 tun_napi_disable(tfile);
0717 tfile->socket.sk->sk_shutdown = RCV_SHUTDOWN;
0718 tfile->socket.sk->sk_data_ready(tfile->socket.sk);
0719 RCU_INIT_POINTER(tfile->tun, NULL);
0720 --tun->numqueues;
0721 }
0722 list_for_each_entry(tfile, &tun->disabled, next) {
0723 tfile->socket.sk->sk_shutdown = RCV_SHUTDOWN;
0724 tfile->socket.sk->sk_data_ready(tfile->socket.sk);
0725 RCU_INIT_POINTER(tfile->tun, NULL);
0726 }
0727 BUG_ON(tun->numqueues != 0);
0728
0729 synchronize_net();
0730 for (i = 0; i < n; i++) {
0731 tfile = rtnl_dereference(tun->tfiles[i]);
0732 tun_napi_del(tfile);
0733
0734 tun_queue_purge(tfile);
0735 xdp_rxq_info_unreg(&tfile->xdp_rxq);
0736 sock_put(&tfile->sk);
0737 }
0738 list_for_each_entry_safe(tfile, tmp, &tun->disabled, next) {
0739 tun_napi_del(tfile);
0740 tun_enable_queue(tfile);
0741 tun_queue_purge(tfile);
0742 xdp_rxq_info_unreg(&tfile->xdp_rxq);
0743 sock_put(&tfile->sk);
0744 }
0745 BUG_ON(tun->numdisabled != 0);
0746
0747 if (tun->flags & IFF_PERSIST)
0748 module_put(THIS_MODULE);
0749 }
0750
0751 static int tun_attach(struct tun_struct *tun, struct file *file,
0752 bool skip_filter, bool napi, bool napi_frags,
0753 bool publish_tun)
0754 {
0755 struct tun_file *tfile = file->private_data;
0756 struct net_device *dev = tun->dev;
0757 int err;
0758
0759 err = security_tun_dev_attach(tfile->socket.sk, tun->security);
0760 if (err < 0)
0761 goto out;
0762
0763 err = -EINVAL;
0764 if (rtnl_dereference(tfile->tun) && !tfile->detached)
0765 goto out;
0766
0767 err = -EBUSY;
0768 if (!(tun->flags & IFF_MULTI_QUEUE) && tun->numqueues == 1)
0769 goto out;
0770
0771 err = -E2BIG;
0772 if (!tfile->detached &&
0773 tun->numqueues + tun->numdisabled == MAX_TAP_QUEUES)
0774 goto out;
0775
0776 err = 0;
0777
0778
0779 if (!skip_filter && (tun->filter_attached == true)) {
0780 lock_sock(tfile->socket.sk);
0781 err = sk_attach_filter(&tun->fprog, tfile->socket.sk);
0782 release_sock(tfile->socket.sk);
0783 if (!err)
0784 goto out;
0785 }
0786
0787 if (!tfile->detached &&
0788 ptr_ring_resize(&tfile->tx_ring, dev->tx_queue_len,
0789 GFP_KERNEL, tun_ptr_free)) {
0790 err = -ENOMEM;
0791 goto out;
0792 }
0793
0794 tfile->queue_index = tun->numqueues;
0795 tfile->socket.sk->sk_shutdown &= ~RCV_SHUTDOWN;
0796
0797 if (tfile->detached) {
0798
0799 WARN_ON(!xdp_rxq_info_is_reg(&tfile->xdp_rxq));
0800
0801 if (tfile->xdp_rxq.queue_index != tfile->queue_index)
0802 tfile->xdp_rxq.queue_index = tfile->queue_index;
0803 } else {
0804
0805 err = xdp_rxq_info_reg(&tfile->xdp_rxq,
0806 tun->dev, tfile->queue_index, 0);
0807 if (err < 0)
0808 goto out;
0809 err = xdp_rxq_info_reg_mem_model(&tfile->xdp_rxq,
0810 MEM_TYPE_PAGE_SHARED, NULL);
0811 if (err < 0) {
0812 xdp_rxq_info_unreg(&tfile->xdp_rxq);
0813 goto out;
0814 }
0815 err = 0;
0816 }
0817
0818 if (tfile->detached) {
0819 tun_enable_queue(tfile);
0820 tun_napi_enable(tfile);
0821 } else {
0822 sock_hold(&tfile->sk);
0823 tun_napi_init(tun, tfile, napi, napi_frags);
0824 }
0825
0826 if (rtnl_dereference(tun->xdp_prog))
0827 sock_set_flag(&tfile->sk, SOCK_XDP);
0828
0829
0830
0831
0832
0833
0834
0835
0836
0837 if (publish_tun)
0838 rcu_assign_pointer(tfile->tun, tun);
0839 rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile);
0840 tun->numqueues++;
0841 tun_set_real_num_queues(tun);
0842 out:
0843 return err;
0844 }
0845
0846 static struct tun_struct *tun_get(struct tun_file *tfile)
0847 {
0848 struct tun_struct *tun;
0849
0850 rcu_read_lock();
0851 tun = rcu_dereference(tfile->tun);
0852 if (tun)
0853 dev_hold(tun->dev);
0854 rcu_read_unlock();
0855
0856 return tun;
0857 }
0858
0859 static void tun_put(struct tun_struct *tun)
0860 {
0861 dev_put(tun->dev);
0862 }
0863
0864
0865 static void addr_hash_set(u32 *mask, const u8 *addr)
0866 {
0867 int n = ether_crc(ETH_ALEN, addr) >> 26;
0868 mask[n >> 5] |= (1 << (n & 31));
0869 }
0870
0871 static unsigned int addr_hash_test(const u32 *mask, const u8 *addr)
0872 {
0873 int n = ether_crc(ETH_ALEN, addr) >> 26;
0874 return mask[n >> 5] & (1 << (n & 31));
0875 }
0876
0877 static int update_filter(struct tap_filter *filter, void __user *arg)
0878 {
0879 struct { u8 u[ETH_ALEN]; } *addr;
0880 struct tun_filter uf;
0881 int err, alen, n, nexact;
0882
0883 if (copy_from_user(&uf, arg, sizeof(uf)))
0884 return -EFAULT;
0885
0886 if (!uf.count) {
0887
0888 filter->count = 0;
0889 return 0;
0890 }
0891
0892 alen = ETH_ALEN * uf.count;
0893 addr = memdup_user(arg + sizeof(uf), alen);
0894 if (IS_ERR(addr))
0895 return PTR_ERR(addr);
0896
0897
0898
0899
0900 filter->count = 0;
0901 wmb();
0902
0903
0904 for (n = 0; n < uf.count && n < FLT_EXACT_COUNT; n++)
0905 memcpy(filter->addr[n], addr[n].u, ETH_ALEN);
0906
0907 nexact = n;
0908
0909
0910
0911 memset(filter->mask, 0, sizeof(filter->mask));
0912 for (; n < uf.count; n++) {
0913 if (!is_multicast_ether_addr(addr[n].u)) {
0914 err = 0;
0915 goto free_addr;
0916 }
0917 addr_hash_set(filter->mask, addr[n].u);
0918 }
0919
0920
0921
0922 if ((uf.flags & TUN_FLT_ALLMULTI))
0923 memset(filter->mask, ~0, sizeof(filter->mask));
0924
0925
0926 wmb();
0927 filter->count = nexact;
0928
0929
0930 err = nexact;
0931 free_addr:
0932 kfree(addr);
0933 return err;
0934 }
0935
0936
0937 static int run_filter(struct tap_filter *filter, const struct sk_buff *skb)
0938 {
0939
0940
0941 struct ethhdr *eh = (struct ethhdr *) skb->data;
0942 int i;
0943
0944
0945 for (i = 0; i < filter->count; i++)
0946 if (ether_addr_equal(eh->h_dest, filter->addr[i]))
0947 return 1;
0948
0949
0950 if (is_multicast_ether_addr(eh->h_dest))
0951 return addr_hash_test(filter->mask, eh->h_dest);
0952
0953 return 0;
0954 }
0955
0956
0957
0958
0959
0960 static int check_filter(struct tap_filter *filter, const struct sk_buff *skb)
0961 {
0962 if (!filter->count)
0963 return 1;
0964
0965 return run_filter(filter, skb);
0966 }
0967
0968
0969
0970 static const struct ethtool_ops tun_ethtool_ops;
0971
0972 static int tun_net_init(struct net_device *dev)
0973 {
0974 struct tun_struct *tun = netdev_priv(dev);
0975 struct ifreq *ifr = tun->ifr;
0976 int err;
0977
0978 dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
0979 if (!dev->tstats)
0980 return -ENOMEM;
0981
0982 spin_lock_init(&tun->lock);
0983
0984 err = security_tun_dev_alloc_security(&tun->security);
0985 if (err < 0) {
0986 free_percpu(dev->tstats);
0987 return err;
0988 }
0989
0990 tun_flow_init(tun);
0991
0992 dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST |
0993 TUN_USER_FEATURES | NETIF_F_HW_VLAN_CTAG_TX |
0994 NETIF_F_HW_VLAN_STAG_TX;
0995 dev->features = dev->hw_features | NETIF_F_LLTX;
0996 dev->vlan_features = dev->features &
0997 ~(NETIF_F_HW_VLAN_CTAG_TX |
0998 NETIF_F_HW_VLAN_STAG_TX);
0999
1000 tun->flags = (tun->flags & ~TUN_FEATURES) |
1001 (ifr->ifr_flags & TUN_FEATURES);
1002
1003 INIT_LIST_HEAD(&tun->disabled);
1004 err = tun_attach(tun, tun->file, false, ifr->ifr_flags & IFF_NAPI,
1005 ifr->ifr_flags & IFF_NAPI_FRAGS, false);
1006 if (err < 0) {
1007 tun_flow_uninit(tun);
1008 security_tun_dev_free_security(tun->security);
1009 free_percpu(dev->tstats);
1010 return err;
1011 }
1012 return 0;
1013 }
1014
1015
1016 static void tun_net_uninit(struct net_device *dev)
1017 {
1018 tun_detach_all(dev);
1019 }
1020
1021
1022 static int tun_net_open(struct net_device *dev)
1023 {
1024 netif_tx_start_all_queues(dev);
1025
1026 return 0;
1027 }
1028
1029
1030 static int tun_net_close(struct net_device *dev)
1031 {
1032 netif_tx_stop_all_queues(dev);
1033 return 0;
1034 }
1035
1036
1037 static void tun_automq_xmit(struct tun_struct *tun, struct sk_buff *skb)
1038 {
1039 #ifdef CONFIG_RPS
1040 if (tun->numqueues == 1 && static_branch_unlikely(&rps_needed)) {
1041
1042
1043
1044 struct tun_flow_entry *e;
1045 __u32 rxhash;
1046
1047 rxhash = __skb_get_hash_symmetric(skb);
1048 e = tun_flow_find(&tun->flows[tun_hashfn(rxhash)], rxhash);
1049 if (e)
1050 tun_flow_save_rps_rxhash(e, rxhash);
1051 }
1052 #endif
1053 }
1054
1055 static unsigned int run_ebpf_filter(struct tun_struct *tun,
1056 struct sk_buff *skb,
1057 int len)
1058 {
1059 struct tun_prog *prog = rcu_dereference(tun->filter_prog);
1060
1061 if (prog)
1062 len = bpf_prog_run_clear_cb(prog->prog, skb);
1063
1064 return len;
1065 }
1066
1067
1068 static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
1069 {
1070 struct tun_struct *tun = netdev_priv(dev);
1071 enum skb_drop_reason drop_reason;
1072 int txq = skb->queue_mapping;
1073 struct netdev_queue *queue;
1074 struct tun_file *tfile;
1075 int len = skb->len;
1076
1077 rcu_read_lock();
1078 tfile = rcu_dereference(tun->tfiles[txq]);
1079
1080
1081 if (!tfile) {
1082 drop_reason = SKB_DROP_REASON_DEV_READY;
1083 goto drop;
1084 }
1085
1086 if (!rcu_dereference(tun->steering_prog))
1087 tun_automq_xmit(tun, skb);
1088
1089 netif_info(tun, tx_queued, tun->dev, "%s %d\n", __func__, skb->len);
1090
1091
1092
1093
1094 if (!check_filter(&tun->txflt, skb)) {
1095 drop_reason = SKB_DROP_REASON_TAP_TXFILTER;
1096 goto drop;
1097 }
1098
1099 if (tfile->socket.sk->sk_filter &&
1100 sk_filter(tfile->socket.sk, skb)) {
1101 drop_reason = SKB_DROP_REASON_SOCKET_FILTER;
1102 goto drop;
1103 }
1104
1105 len = run_ebpf_filter(tun, skb, len);
1106 if (len == 0) {
1107 drop_reason = SKB_DROP_REASON_TAP_FILTER;
1108 goto drop;
1109 }
1110
1111 if (pskb_trim(skb, len)) {
1112 drop_reason = SKB_DROP_REASON_NOMEM;
1113 goto drop;
1114 }
1115
1116 if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC))) {
1117 drop_reason = SKB_DROP_REASON_SKB_UCOPY_FAULT;
1118 goto drop;
1119 }
1120
1121 skb_tx_timestamp(skb);
1122
1123
1124
1125
1126 skb_orphan(skb);
1127
1128 nf_reset_ct(skb);
1129
1130 if (ptr_ring_produce(&tfile->tx_ring, skb)) {
1131 drop_reason = SKB_DROP_REASON_FULL_RING;
1132 goto drop;
1133 }
1134
1135
1136 queue = netdev_get_tx_queue(dev, txq);
1137 txq_trans_cond_update(queue);
1138
1139
1140 if (tfile->flags & TUN_FASYNC)
1141 kill_fasync(&tfile->fasync, SIGIO, POLL_IN);
1142 tfile->socket.sk->sk_data_ready(tfile->socket.sk);
1143
1144 rcu_read_unlock();
1145 return NETDEV_TX_OK;
1146
1147 drop:
1148 dev_core_stats_tx_dropped_inc(dev);
1149 skb_tx_error(skb);
1150 kfree_skb_reason(skb, drop_reason);
1151 rcu_read_unlock();
1152 return NET_XMIT_DROP;
1153 }
1154
1155 static void tun_net_mclist(struct net_device *dev)
1156 {
1157
1158
1159
1160
1161
1162 }
1163
1164 static netdev_features_t tun_net_fix_features(struct net_device *dev,
1165 netdev_features_t features)
1166 {
1167 struct tun_struct *tun = netdev_priv(dev);
1168
1169 return (features & tun->set_features) | (features & ~TUN_USER_FEATURES);
1170 }
1171
1172 static void tun_set_headroom(struct net_device *dev, int new_hr)
1173 {
1174 struct tun_struct *tun = netdev_priv(dev);
1175
1176 if (new_hr < NET_SKB_PAD)
1177 new_hr = NET_SKB_PAD;
1178
1179 tun->align = new_hr;
1180 }
1181
1182 static void
1183 tun_net_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
1184 {
1185 struct tun_struct *tun = netdev_priv(dev);
1186
1187 dev_get_tstats64(dev, stats);
1188
1189 stats->rx_frame_errors +=
1190 (unsigned long)atomic_long_read(&tun->rx_frame_errors);
1191 }
1192
1193 static int tun_xdp_set(struct net_device *dev, struct bpf_prog *prog,
1194 struct netlink_ext_ack *extack)
1195 {
1196 struct tun_struct *tun = netdev_priv(dev);
1197 struct tun_file *tfile;
1198 struct bpf_prog *old_prog;
1199 int i;
1200
1201 old_prog = rtnl_dereference(tun->xdp_prog);
1202 rcu_assign_pointer(tun->xdp_prog, prog);
1203 if (old_prog)
1204 bpf_prog_put(old_prog);
1205
1206 for (i = 0; i < tun->numqueues; i++) {
1207 tfile = rtnl_dereference(tun->tfiles[i]);
1208 if (prog)
1209 sock_set_flag(&tfile->sk, SOCK_XDP);
1210 else
1211 sock_reset_flag(&tfile->sk, SOCK_XDP);
1212 }
1213 list_for_each_entry(tfile, &tun->disabled, next) {
1214 if (prog)
1215 sock_set_flag(&tfile->sk, SOCK_XDP);
1216 else
1217 sock_reset_flag(&tfile->sk, SOCK_XDP);
1218 }
1219
1220 return 0;
1221 }
1222
1223 static int tun_xdp(struct net_device *dev, struct netdev_bpf *xdp)
1224 {
1225 switch (xdp->command) {
1226 case XDP_SETUP_PROG:
1227 return tun_xdp_set(dev, xdp->prog, xdp->extack);
1228 default:
1229 return -EINVAL;
1230 }
1231 }
1232
1233 static int tun_net_change_carrier(struct net_device *dev, bool new_carrier)
1234 {
1235 if (new_carrier) {
1236 struct tun_struct *tun = netdev_priv(dev);
1237
1238 if (!tun->numqueues)
1239 return -EPERM;
1240
1241 netif_carrier_on(dev);
1242 } else {
1243 netif_carrier_off(dev);
1244 }
1245 return 0;
1246 }
1247
1248 static const struct net_device_ops tun_netdev_ops = {
1249 .ndo_init = tun_net_init,
1250 .ndo_uninit = tun_net_uninit,
1251 .ndo_open = tun_net_open,
1252 .ndo_stop = tun_net_close,
1253 .ndo_start_xmit = tun_net_xmit,
1254 .ndo_fix_features = tun_net_fix_features,
1255 .ndo_select_queue = tun_select_queue,
1256 .ndo_set_rx_headroom = tun_set_headroom,
1257 .ndo_get_stats64 = tun_net_get_stats64,
1258 .ndo_change_carrier = tun_net_change_carrier,
1259 };
1260
1261 static void __tun_xdp_flush_tfile(struct tun_file *tfile)
1262 {
1263
1264 if (tfile->flags & TUN_FASYNC)
1265 kill_fasync(&tfile->fasync, SIGIO, POLL_IN);
1266 tfile->socket.sk->sk_data_ready(tfile->socket.sk);
1267 }
1268
1269 static int tun_xdp_xmit(struct net_device *dev, int n,
1270 struct xdp_frame **frames, u32 flags)
1271 {
1272 struct tun_struct *tun = netdev_priv(dev);
1273 struct tun_file *tfile;
1274 u32 numqueues;
1275 int nxmit = 0;
1276 int i;
1277
1278 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
1279 return -EINVAL;
1280
1281 rcu_read_lock();
1282
1283 resample:
1284 numqueues = READ_ONCE(tun->numqueues);
1285 if (!numqueues) {
1286 rcu_read_unlock();
1287 return -ENXIO;
1288 }
1289
1290 tfile = rcu_dereference(tun->tfiles[smp_processor_id() %
1291 numqueues]);
1292 if (unlikely(!tfile))
1293 goto resample;
1294
1295 spin_lock(&tfile->tx_ring.producer_lock);
1296 for (i = 0; i < n; i++) {
1297 struct xdp_frame *xdp = frames[i];
1298
1299
1300
1301 void *frame = tun_xdp_to_ptr(xdp);
1302
1303 if (__ptr_ring_produce(&tfile->tx_ring, frame)) {
1304 dev_core_stats_tx_dropped_inc(dev);
1305 break;
1306 }
1307 nxmit++;
1308 }
1309 spin_unlock(&tfile->tx_ring.producer_lock);
1310
1311 if (flags & XDP_XMIT_FLUSH)
1312 __tun_xdp_flush_tfile(tfile);
1313
1314 rcu_read_unlock();
1315 return nxmit;
1316 }
1317
1318 static int tun_xdp_tx(struct net_device *dev, struct xdp_buff *xdp)
1319 {
1320 struct xdp_frame *frame = xdp_convert_buff_to_frame(xdp);
1321 int nxmit;
1322
1323 if (unlikely(!frame))
1324 return -EOVERFLOW;
1325
1326 nxmit = tun_xdp_xmit(dev, 1, &frame, XDP_XMIT_FLUSH);
1327 if (!nxmit)
1328 xdp_return_frame_rx_napi(frame);
1329 return nxmit;
1330 }
1331
1332 static const struct net_device_ops tap_netdev_ops = {
1333 .ndo_init = tun_net_init,
1334 .ndo_uninit = tun_net_uninit,
1335 .ndo_open = tun_net_open,
1336 .ndo_stop = tun_net_close,
1337 .ndo_start_xmit = tun_net_xmit,
1338 .ndo_fix_features = tun_net_fix_features,
1339 .ndo_set_rx_mode = tun_net_mclist,
1340 .ndo_set_mac_address = eth_mac_addr,
1341 .ndo_validate_addr = eth_validate_addr,
1342 .ndo_select_queue = tun_select_queue,
1343 .ndo_features_check = passthru_features_check,
1344 .ndo_set_rx_headroom = tun_set_headroom,
1345 .ndo_get_stats64 = dev_get_tstats64,
1346 .ndo_bpf = tun_xdp,
1347 .ndo_xdp_xmit = tun_xdp_xmit,
1348 .ndo_change_carrier = tun_net_change_carrier,
1349 };
1350
1351 static void tun_flow_init(struct tun_struct *tun)
1352 {
1353 int i;
1354
1355 for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++)
1356 INIT_HLIST_HEAD(&tun->flows[i]);
1357
1358 tun->ageing_time = TUN_FLOW_EXPIRE;
1359 timer_setup(&tun->flow_gc_timer, tun_flow_cleanup, 0);
1360 mod_timer(&tun->flow_gc_timer,
1361 round_jiffies_up(jiffies + tun->ageing_time));
1362 }
1363
1364 static void tun_flow_uninit(struct tun_struct *tun)
1365 {
1366 del_timer_sync(&tun->flow_gc_timer);
1367 tun_flow_flush(tun);
1368 }
1369
1370 #define MIN_MTU 68
1371 #define MAX_MTU 65535
1372
1373
1374 static void tun_net_initialize(struct net_device *dev)
1375 {
1376 struct tun_struct *tun = netdev_priv(dev);
1377
1378 switch (tun->flags & TUN_TYPE_MASK) {
1379 case IFF_TUN:
1380 dev->netdev_ops = &tun_netdev_ops;
1381 dev->header_ops = &ip_tunnel_header_ops;
1382
1383
1384 dev->hard_header_len = 0;
1385 dev->addr_len = 0;
1386 dev->mtu = 1500;
1387
1388
1389 dev->type = ARPHRD_NONE;
1390 dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
1391 break;
1392
1393 case IFF_TAP:
1394 dev->netdev_ops = &tap_netdev_ops;
1395
1396 ether_setup(dev);
1397 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
1398 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1399
1400 eth_hw_addr_random(dev);
1401
1402 break;
1403 }
1404
1405 dev->min_mtu = MIN_MTU;
1406 dev->max_mtu = MAX_MTU - dev->hard_header_len;
1407 }
1408
1409 static bool tun_sock_writeable(struct tun_struct *tun, struct tun_file *tfile)
1410 {
1411 struct sock *sk = tfile->socket.sk;
1412
1413 return (tun->dev->flags & IFF_UP) && sock_writeable(sk);
1414 }
1415
1416
1417
1418
1419 static __poll_t tun_chr_poll(struct file *file, poll_table *wait)
1420 {
1421 struct tun_file *tfile = file->private_data;
1422 struct tun_struct *tun = tun_get(tfile);
1423 struct sock *sk;
1424 __poll_t mask = 0;
1425
1426 if (!tun)
1427 return EPOLLERR;
1428
1429 sk = tfile->socket.sk;
1430
1431 poll_wait(file, sk_sleep(sk), wait);
1432
1433 if (!ptr_ring_empty(&tfile->tx_ring))
1434 mask |= EPOLLIN | EPOLLRDNORM;
1435
1436
1437
1438
1439
1440
1441 if (tun_sock_writeable(tun, tfile) ||
1442 (!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags) &&
1443 tun_sock_writeable(tun, tfile)))
1444 mask |= EPOLLOUT | EPOLLWRNORM;
1445
1446 if (tun->dev->reg_state != NETREG_REGISTERED)
1447 mask = EPOLLERR;
1448
1449 tun_put(tun);
1450 return mask;
1451 }
1452
1453 static struct sk_buff *tun_napi_alloc_frags(struct tun_file *tfile,
1454 size_t len,
1455 const struct iov_iter *it)
1456 {
1457 struct sk_buff *skb;
1458 size_t linear;
1459 int err;
1460 int i;
1461
1462 if (it->nr_segs > MAX_SKB_FRAGS + 1)
1463 return ERR_PTR(-EMSGSIZE);
1464
1465 local_bh_disable();
1466 skb = napi_get_frags(&tfile->napi);
1467 local_bh_enable();
1468 if (!skb)
1469 return ERR_PTR(-ENOMEM);
1470
1471 linear = iov_iter_single_seg_count(it);
1472 err = __skb_grow(skb, linear);
1473 if (err)
1474 goto free;
1475
1476 skb->len = len;
1477 skb->data_len = len - linear;
1478 skb->truesize += skb->data_len;
1479
1480 for (i = 1; i < it->nr_segs; i++) {
1481 size_t fragsz = it->iov[i].iov_len;
1482 struct page *page;
1483 void *frag;
1484
1485 if (fragsz == 0 || fragsz > PAGE_SIZE) {
1486 err = -EINVAL;
1487 goto free;
1488 }
1489 frag = netdev_alloc_frag(fragsz);
1490 if (!frag) {
1491 err = -ENOMEM;
1492 goto free;
1493 }
1494 page = virt_to_head_page(frag);
1495 skb_fill_page_desc(skb, i - 1, page,
1496 frag - page_address(page), fragsz);
1497 }
1498
1499 return skb;
1500 free:
1501
1502 napi_free_frags(&tfile->napi);
1503 return ERR_PTR(err);
1504 }
1505
1506
1507
1508 static struct sk_buff *tun_alloc_skb(struct tun_file *tfile,
1509 size_t prepad, size_t len,
1510 size_t linear, int noblock)
1511 {
1512 struct sock *sk = tfile->socket.sk;
1513 struct sk_buff *skb;
1514 int err;
1515
1516
1517 if (prepad + len < PAGE_SIZE || !linear)
1518 linear = len;
1519
1520 skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
1521 &err, 0);
1522 if (!skb)
1523 return ERR_PTR(err);
1524
1525 skb_reserve(skb, prepad);
1526 skb_put(skb, linear);
1527 skb->data_len = len - linear;
1528 skb->len += len - linear;
1529
1530 return skb;
1531 }
1532
1533 static void tun_rx_batched(struct tun_struct *tun, struct tun_file *tfile,
1534 struct sk_buff *skb, int more)
1535 {
1536 struct sk_buff_head *queue = &tfile->sk.sk_write_queue;
1537 struct sk_buff_head process_queue;
1538 u32 rx_batched = tun->rx_batched;
1539 bool rcv = false;
1540
1541 if (!rx_batched || (!more && skb_queue_empty(queue))) {
1542 local_bh_disable();
1543 skb_record_rx_queue(skb, tfile->queue_index);
1544 netif_receive_skb(skb);
1545 local_bh_enable();
1546 return;
1547 }
1548
1549 spin_lock(&queue->lock);
1550 if (!more || skb_queue_len(queue) == rx_batched) {
1551 __skb_queue_head_init(&process_queue);
1552 skb_queue_splice_tail_init(queue, &process_queue);
1553 rcv = true;
1554 } else {
1555 __skb_queue_tail(queue, skb);
1556 }
1557 spin_unlock(&queue->lock);
1558
1559 if (rcv) {
1560 struct sk_buff *nskb;
1561
1562 local_bh_disable();
1563 while ((nskb = __skb_dequeue(&process_queue))) {
1564 skb_record_rx_queue(nskb, tfile->queue_index);
1565 netif_receive_skb(nskb);
1566 }
1567 skb_record_rx_queue(skb, tfile->queue_index);
1568 netif_receive_skb(skb);
1569 local_bh_enable();
1570 }
1571 }
1572
1573 static bool tun_can_build_skb(struct tun_struct *tun, struct tun_file *tfile,
1574 int len, int noblock, bool zerocopy)
1575 {
1576 if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP)
1577 return false;
1578
1579 if (tfile->socket.sk->sk_sndbuf != INT_MAX)
1580 return false;
1581
1582 if (!noblock)
1583 return false;
1584
1585 if (zerocopy)
1586 return false;
1587
1588 if (SKB_DATA_ALIGN(len + TUN_RX_PAD) +
1589 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) > PAGE_SIZE)
1590 return false;
1591
1592 return true;
1593 }
1594
1595 static struct sk_buff *__tun_build_skb(struct tun_file *tfile,
1596 struct page_frag *alloc_frag, char *buf,
1597 int buflen, int len, int pad)
1598 {
1599 struct sk_buff *skb = build_skb(buf, buflen);
1600
1601 if (!skb)
1602 return ERR_PTR(-ENOMEM);
1603
1604 skb_reserve(skb, pad);
1605 skb_put(skb, len);
1606 skb_set_owner_w(skb, tfile->socket.sk);
1607
1608 get_page(alloc_frag->page);
1609 alloc_frag->offset += buflen;
1610
1611 return skb;
1612 }
1613
1614 static int tun_xdp_act(struct tun_struct *tun, struct bpf_prog *xdp_prog,
1615 struct xdp_buff *xdp, u32 act)
1616 {
1617 int err;
1618
1619 switch (act) {
1620 case XDP_REDIRECT:
1621 err = xdp_do_redirect(tun->dev, xdp, xdp_prog);
1622 if (err)
1623 return err;
1624 break;
1625 case XDP_TX:
1626 err = tun_xdp_tx(tun->dev, xdp);
1627 if (err < 0)
1628 return err;
1629 break;
1630 case XDP_PASS:
1631 break;
1632 default:
1633 bpf_warn_invalid_xdp_action(tun->dev, xdp_prog, act);
1634 fallthrough;
1635 case XDP_ABORTED:
1636 trace_xdp_exception(tun->dev, xdp_prog, act);
1637 fallthrough;
1638 case XDP_DROP:
1639 dev_core_stats_rx_dropped_inc(tun->dev);
1640 break;
1641 }
1642
1643 return act;
1644 }
1645
1646 static struct sk_buff *tun_build_skb(struct tun_struct *tun,
1647 struct tun_file *tfile,
1648 struct iov_iter *from,
1649 struct virtio_net_hdr *hdr,
1650 int len, int *skb_xdp)
1651 {
1652 struct page_frag *alloc_frag = ¤t->task_frag;
1653 struct bpf_prog *xdp_prog;
1654 int buflen = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1655 char *buf;
1656 size_t copied;
1657 int pad = TUN_RX_PAD;
1658 int err = 0;
1659
1660 rcu_read_lock();
1661 xdp_prog = rcu_dereference(tun->xdp_prog);
1662 if (xdp_prog)
1663 pad += XDP_PACKET_HEADROOM;
1664 buflen += SKB_DATA_ALIGN(len + pad);
1665 rcu_read_unlock();
1666
1667 alloc_frag->offset = ALIGN((u64)alloc_frag->offset, SMP_CACHE_BYTES);
1668 if (unlikely(!skb_page_frag_refill(buflen, alloc_frag, GFP_KERNEL)))
1669 return ERR_PTR(-ENOMEM);
1670
1671 buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset;
1672 copied = copy_page_from_iter(alloc_frag->page,
1673 alloc_frag->offset + pad,
1674 len, from);
1675 if (copied != len)
1676 return ERR_PTR(-EFAULT);
1677
1678
1679
1680
1681
1682 if (hdr->gso_type || !xdp_prog) {
1683 *skb_xdp = 1;
1684 return __tun_build_skb(tfile, alloc_frag, buf, buflen, len,
1685 pad);
1686 }
1687
1688 *skb_xdp = 0;
1689
1690 local_bh_disable();
1691 rcu_read_lock();
1692 xdp_prog = rcu_dereference(tun->xdp_prog);
1693 if (xdp_prog) {
1694 struct xdp_buff xdp;
1695 u32 act;
1696
1697 xdp_init_buff(&xdp, buflen, &tfile->xdp_rxq);
1698 xdp_prepare_buff(&xdp, buf, pad, len, false);
1699
1700 act = bpf_prog_run_xdp(xdp_prog, &xdp);
1701 if (act == XDP_REDIRECT || act == XDP_TX) {
1702 get_page(alloc_frag->page);
1703 alloc_frag->offset += buflen;
1704 }
1705 err = tun_xdp_act(tun, xdp_prog, &xdp, act);
1706 if (err < 0) {
1707 if (act == XDP_REDIRECT || act == XDP_TX)
1708 put_page(alloc_frag->page);
1709 goto out;
1710 }
1711
1712 if (err == XDP_REDIRECT)
1713 xdp_do_flush();
1714 if (err != XDP_PASS)
1715 goto out;
1716
1717 pad = xdp.data - xdp.data_hard_start;
1718 len = xdp.data_end - xdp.data;
1719 }
1720 rcu_read_unlock();
1721 local_bh_enable();
1722
1723 return __tun_build_skb(tfile, alloc_frag, buf, buflen, len, pad);
1724
1725 out:
1726 rcu_read_unlock();
1727 local_bh_enable();
1728 return NULL;
1729 }
1730
1731
1732 static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
1733 void *msg_control, struct iov_iter *from,
1734 int noblock, bool more)
1735 {
1736 struct tun_pi pi = { 0, cpu_to_be16(ETH_P_IP) };
1737 struct sk_buff *skb;
1738 size_t total_len = iov_iter_count(from);
1739 size_t len = total_len, align = tun->align, linear;
1740 struct virtio_net_hdr gso = { 0 };
1741 int good_linear;
1742 int copylen;
1743 bool zerocopy = false;
1744 int err;
1745 u32 rxhash = 0;
1746 int skb_xdp = 1;
1747 bool frags = tun_napi_frags_enabled(tfile);
1748 enum skb_drop_reason drop_reason;
1749
1750 if (!(tun->flags & IFF_NO_PI)) {
1751 if (len < sizeof(pi))
1752 return -EINVAL;
1753 len -= sizeof(pi);
1754
1755 if (!copy_from_iter_full(&pi, sizeof(pi), from))
1756 return -EFAULT;
1757 }
1758
1759 if (tun->flags & IFF_VNET_HDR) {
1760 int vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz);
1761
1762 if (len < vnet_hdr_sz)
1763 return -EINVAL;
1764 len -= vnet_hdr_sz;
1765
1766 if (!copy_from_iter_full(&gso, sizeof(gso), from))
1767 return -EFAULT;
1768
1769 if ((gso.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
1770 tun16_to_cpu(tun, gso.csum_start) + tun16_to_cpu(tun, gso.csum_offset) + 2 > tun16_to_cpu(tun, gso.hdr_len))
1771 gso.hdr_len = cpu_to_tun16(tun, tun16_to_cpu(tun, gso.csum_start) + tun16_to_cpu(tun, gso.csum_offset) + 2);
1772
1773 if (tun16_to_cpu(tun, gso.hdr_len) > len)
1774 return -EINVAL;
1775 iov_iter_advance(from, vnet_hdr_sz - sizeof(gso));
1776 }
1777
1778 if ((tun->flags & TUN_TYPE_MASK) == IFF_TAP) {
1779 align += NET_IP_ALIGN;
1780 if (unlikely(len < ETH_HLEN ||
1781 (gso.hdr_len && tun16_to_cpu(tun, gso.hdr_len) < ETH_HLEN)))
1782 return -EINVAL;
1783 }
1784
1785 good_linear = SKB_MAX_HEAD(align);
1786
1787 if (msg_control) {
1788 struct iov_iter i = *from;
1789
1790
1791
1792
1793
1794 copylen = gso.hdr_len ? tun16_to_cpu(tun, gso.hdr_len) : GOODCOPY_LEN;
1795 if (copylen > good_linear)
1796 copylen = good_linear;
1797 linear = copylen;
1798 iov_iter_advance(&i, copylen);
1799 if (iov_iter_npages(&i, INT_MAX) <= MAX_SKB_FRAGS)
1800 zerocopy = true;
1801 }
1802
1803 if (!frags && tun_can_build_skb(tun, tfile, len, noblock, zerocopy)) {
1804
1805
1806
1807
1808 skb = tun_build_skb(tun, tfile, from, &gso, len, &skb_xdp);
1809 if (IS_ERR(skb)) {
1810 dev_core_stats_rx_dropped_inc(tun->dev);
1811 return PTR_ERR(skb);
1812 }
1813 if (!skb)
1814 return total_len;
1815 } else {
1816 if (!zerocopy) {
1817 copylen = len;
1818 if (tun16_to_cpu(tun, gso.hdr_len) > good_linear)
1819 linear = good_linear;
1820 else
1821 linear = tun16_to_cpu(tun, gso.hdr_len);
1822 }
1823
1824 if (frags) {
1825 mutex_lock(&tfile->napi_mutex);
1826 skb = tun_napi_alloc_frags(tfile, copylen, from);
1827
1828
1829
1830
1831 zerocopy = false;
1832 } else {
1833 skb = tun_alloc_skb(tfile, align, copylen, linear,
1834 noblock);
1835 }
1836
1837 if (IS_ERR(skb)) {
1838 if (PTR_ERR(skb) != -EAGAIN)
1839 dev_core_stats_rx_dropped_inc(tun->dev);
1840 if (frags)
1841 mutex_unlock(&tfile->napi_mutex);
1842 return PTR_ERR(skb);
1843 }
1844
1845 if (zerocopy)
1846 err = zerocopy_sg_from_iter(skb, from);
1847 else
1848 err = skb_copy_datagram_from_iter(skb, 0, from, len);
1849
1850 if (err) {
1851 err = -EFAULT;
1852 drop_reason = SKB_DROP_REASON_SKB_UCOPY_FAULT;
1853 drop:
1854 dev_core_stats_rx_dropped_inc(tun->dev);
1855 kfree_skb_reason(skb, drop_reason);
1856 if (frags) {
1857 tfile->napi.skb = NULL;
1858 mutex_unlock(&tfile->napi_mutex);
1859 }
1860
1861 return err;
1862 }
1863 }
1864
1865 if (virtio_net_hdr_to_skb(skb, &gso, tun_is_little_endian(tun))) {
1866 atomic_long_inc(&tun->rx_frame_errors);
1867 kfree_skb(skb);
1868 if (frags) {
1869 tfile->napi.skb = NULL;
1870 mutex_unlock(&tfile->napi_mutex);
1871 }
1872
1873 return -EINVAL;
1874 }
1875
1876 switch (tun->flags & TUN_TYPE_MASK) {
1877 case IFF_TUN:
1878 if (tun->flags & IFF_NO_PI) {
1879 u8 ip_version = skb->len ? (skb->data[0] >> 4) : 0;
1880
1881 switch (ip_version) {
1882 case 4:
1883 pi.proto = htons(ETH_P_IP);
1884 break;
1885 case 6:
1886 pi.proto = htons(ETH_P_IPV6);
1887 break;
1888 default:
1889 dev_core_stats_rx_dropped_inc(tun->dev);
1890 kfree_skb(skb);
1891 return -EINVAL;
1892 }
1893 }
1894
1895 skb_reset_mac_header(skb);
1896 skb->protocol = pi.proto;
1897 skb->dev = tun->dev;
1898 break;
1899 case IFF_TAP:
1900 if (frags && !pskb_may_pull(skb, ETH_HLEN)) {
1901 err = -ENOMEM;
1902 drop_reason = SKB_DROP_REASON_HDR_TRUNC;
1903 goto drop;
1904 }
1905 skb->protocol = eth_type_trans(skb, tun->dev);
1906 break;
1907 }
1908
1909
1910 if (zerocopy) {
1911 skb_zcopy_init(skb, msg_control);
1912 } else if (msg_control) {
1913 struct ubuf_info *uarg = msg_control;
1914 uarg->callback(NULL, uarg, false);
1915 }
1916
1917 skb_reset_network_header(skb);
1918 skb_probe_transport_header(skb);
1919 skb_record_rx_queue(skb, tfile->queue_index);
1920
1921 if (skb_xdp) {
1922 struct bpf_prog *xdp_prog;
1923 int ret;
1924
1925 local_bh_disable();
1926 rcu_read_lock();
1927 xdp_prog = rcu_dereference(tun->xdp_prog);
1928 if (xdp_prog) {
1929 ret = do_xdp_generic(xdp_prog, skb);
1930 if (ret != XDP_PASS) {
1931 rcu_read_unlock();
1932 local_bh_enable();
1933 if (frags) {
1934 tfile->napi.skb = NULL;
1935 mutex_unlock(&tfile->napi_mutex);
1936 }
1937 return total_len;
1938 }
1939 }
1940 rcu_read_unlock();
1941 local_bh_enable();
1942 }
1943
1944
1945
1946
1947
1948 if (!rcu_access_pointer(tun->steering_prog) && tun->numqueues > 1 &&
1949 !tfile->detached)
1950 rxhash = __skb_get_hash_symmetric(skb);
1951
1952 rcu_read_lock();
1953 if (unlikely(!(tun->dev->flags & IFF_UP))) {
1954 err = -EIO;
1955 rcu_read_unlock();
1956 drop_reason = SKB_DROP_REASON_DEV_READY;
1957 goto drop;
1958 }
1959
1960 if (frags) {
1961 u32 headlen;
1962
1963
1964 skb_push(skb, ETH_HLEN);
1965 headlen = eth_get_headlen(tun->dev, skb->data,
1966 skb_headlen(skb));
1967
1968 if (unlikely(headlen > skb_headlen(skb))) {
1969 dev_core_stats_rx_dropped_inc(tun->dev);
1970 napi_free_frags(&tfile->napi);
1971 rcu_read_unlock();
1972 mutex_unlock(&tfile->napi_mutex);
1973 WARN_ON(1);
1974 return -ENOMEM;
1975 }
1976
1977 local_bh_disable();
1978 napi_gro_frags(&tfile->napi);
1979 local_bh_enable();
1980 mutex_unlock(&tfile->napi_mutex);
1981 } else if (tfile->napi_enabled) {
1982 struct sk_buff_head *queue = &tfile->sk.sk_write_queue;
1983 int queue_len;
1984
1985 spin_lock_bh(&queue->lock);
1986 __skb_queue_tail(queue, skb);
1987 queue_len = skb_queue_len(queue);
1988 spin_unlock(&queue->lock);
1989
1990 if (!more || queue_len > NAPI_POLL_WEIGHT)
1991 napi_schedule(&tfile->napi);
1992
1993 local_bh_enable();
1994 } else if (!IS_ENABLED(CONFIG_4KSTACKS)) {
1995 tun_rx_batched(tun, tfile, skb, more);
1996 } else {
1997 netif_rx(skb);
1998 }
1999 rcu_read_unlock();
2000
2001 preempt_disable();
2002 dev_sw_netstats_rx_add(tun->dev, len);
2003 preempt_enable();
2004
2005 if (rxhash)
2006 tun_flow_update(tun, rxhash, tfile);
2007
2008 return total_len;
2009 }
2010
2011 static ssize_t tun_chr_write_iter(struct kiocb *iocb, struct iov_iter *from)
2012 {
2013 struct file *file = iocb->ki_filp;
2014 struct tun_file *tfile = file->private_data;
2015 struct tun_struct *tun = tun_get(tfile);
2016 ssize_t result;
2017 int noblock = 0;
2018
2019 if (!tun)
2020 return -EBADFD;
2021
2022 if ((file->f_flags & O_NONBLOCK) || (iocb->ki_flags & IOCB_NOWAIT))
2023 noblock = 1;
2024
2025 result = tun_get_user(tun, tfile, NULL, from, noblock, false);
2026
2027 tun_put(tun);
2028 return result;
2029 }
2030
2031 static ssize_t tun_put_user_xdp(struct tun_struct *tun,
2032 struct tun_file *tfile,
2033 struct xdp_frame *xdp_frame,
2034 struct iov_iter *iter)
2035 {
2036 int vnet_hdr_sz = 0;
2037 size_t size = xdp_frame->len;
2038 size_t ret;
2039
2040 if (tun->flags & IFF_VNET_HDR) {
2041 struct virtio_net_hdr gso = { 0 };
2042
2043 vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz);
2044 if (unlikely(iov_iter_count(iter) < vnet_hdr_sz))
2045 return -EINVAL;
2046 if (unlikely(copy_to_iter(&gso, sizeof(gso), iter) !=
2047 sizeof(gso)))
2048 return -EFAULT;
2049 iov_iter_advance(iter, vnet_hdr_sz - sizeof(gso));
2050 }
2051
2052 ret = copy_to_iter(xdp_frame->data, size, iter) + vnet_hdr_sz;
2053
2054 preempt_disable();
2055 dev_sw_netstats_tx_add(tun->dev, 1, ret);
2056 preempt_enable();
2057
2058 return ret;
2059 }
2060
2061
2062 static ssize_t tun_put_user(struct tun_struct *tun,
2063 struct tun_file *tfile,
2064 struct sk_buff *skb,
2065 struct iov_iter *iter)
2066 {
2067 struct tun_pi pi = { 0, skb->protocol };
2068 ssize_t total;
2069 int vlan_offset = 0;
2070 int vlan_hlen = 0;
2071 int vnet_hdr_sz = 0;
2072
2073 if (skb_vlan_tag_present(skb))
2074 vlan_hlen = VLAN_HLEN;
2075
2076 if (tun->flags & IFF_VNET_HDR)
2077 vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz);
2078
2079 total = skb->len + vlan_hlen + vnet_hdr_sz;
2080
2081 if (!(tun->flags & IFF_NO_PI)) {
2082 if (iov_iter_count(iter) < sizeof(pi))
2083 return -EINVAL;
2084
2085 total += sizeof(pi);
2086 if (iov_iter_count(iter) < total) {
2087
2088 pi.flags |= TUN_PKT_STRIP;
2089 }
2090
2091 if (copy_to_iter(&pi, sizeof(pi), iter) != sizeof(pi))
2092 return -EFAULT;
2093 }
2094
2095 if (vnet_hdr_sz) {
2096 struct virtio_net_hdr gso;
2097
2098 if (iov_iter_count(iter) < vnet_hdr_sz)
2099 return -EINVAL;
2100
2101 if (virtio_net_hdr_from_skb(skb, &gso,
2102 tun_is_little_endian(tun), true,
2103 vlan_hlen)) {
2104 struct skb_shared_info *sinfo = skb_shinfo(skb);
2105 pr_err("unexpected GSO type: "
2106 "0x%x, gso_size %d, hdr_len %d\n",
2107 sinfo->gso_type, tun16_to_cpu(tun, gso.gso_size),
2108 tun16_to_cpu(tun, gso.hdr_len));
2109 print_hex_dump(KERN_ERR, "tun: ",
2110 DUMP_PREFIX_NONE,
2111 16, 1, skb->head,
2112 min((int)tun16_to_cpu(tun, gso.hdr_len), 64), true);
2113 WARN_ON_ONCE(1);
2114 return -EINVAL;
2115 }
2116
2117 if (copy_to_iter(&gso, sizeof(gso), iter) != sizeof(gso))
2118 return -EFAULT;
2119
2120 iov_iter_advance(iter, vnet_hdr_sz - sizeof(gso));
2121 }
2122
2123 if (vlan_hlen) {
2124 int ret;
2125 struct veth veth;
2126
2127 veth.h_vlan_proto = skb->vlan_proto;
2128 veth.h_vlan_TCI = htons(skb_vlan_tag_get(skb));
2129
2130 vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto);
2131
2132 ret = skb_copy_datagram_iter(skb, 0, iter, vlan_offset);
2133 if (ret || !iov_iter_count(iter))
2134 goto done;
2135
2136 ret = copy_to_iter(&veth, sizeof(veth), iter);
2137 if (ret != sizeof(veth) || !iov_iter_count(iter))
2138 goto done;
2139 }
2140
2141 skb_copy_datagram_iter(skb, vlan_offset, iter, skb->len - vlan_offset);
2142
2143 done:
2144
2145 preempt_disable();
2146 dev_sw_netstats_tx_add(tun->dev, 1, skb->len + vlan_hlen);
2147 preempt_enable();
2148
2149 return total;
2150 }
2151
2152 static void *tun_ring_recv(struct tun_file *tfile, int noblock, int *err)
2153 {
2154 DECLARE_WAITQUEUE(wait, current);
2155 void *ptr = NULL;
2156 int error = 0;
2157
2158 ptr = ptr_ring_consume(&tfile->tx_ring);
2159 if (ptr)
2160 goto out;
2161 if (noblock) {
2162 error = -EAGAIN;
2163 goto out;
2164 }
2165
2166 add_wait_queue(&tfile->socket.wq.wait, &wait);
2167
2168 while (1) {
2169 set_current_state(TASK_INTERRUPTIBLE);
2170 ptr = ptr_ring_consume(&tfile->tx_ring);
2171 if (ptr)
2172 break;
2173 if (signal_pending(current)) {
2174 error = -ERESTARTSYS;
2175 break;
2176 }
2177 if (tfile->socket.sk->sk_shutdown & RCV_SHUTDOWN) {
2178 error = -EFAULT;
2179 break;
2180 }
2181
2182 schedule();
2183 }
2184
2185 __set_current_state(TASK_RUNNING);
2186 remove_wait_queue(&tfile->socket.wq.wait, &wait);
2187
2188 out:
2189 *err = error;
2190 return ptr;
2191 }
2192
2193 static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile,
2194 struct iov_iter *to,
2195 int noblock, void *ptr)
2196 {
2197 ssize_t ret;
2198 int err;
2199
2200 if (!iov_iter_count(to)) {
2201 tun_ptr_free(ptr);
2202 return 0;
2203 }
2204
2205 if (!ptr) {
2206
2207 ptr = tun_ring_recv(tfile, noblock, &err);
2208 if (!ptr)
2209 return err;
2210 }
2211
2212 if (tun_is_xdp_frame(ptr)) {
2213 struct xdp_frame *xdpf = tun_ptr_to_xdp(ptr);
2214
2215 ret = tun_put_user_xdp(tun, tfile, xdpf, to);
2216 xdp_return_frame(xdpf);
2217 } else {
2218 struct sk_buff *skb = ptr;
2219
2220 ret = tun_put_user(tun, tfile, skb, to);
2221 if (unlikely(ret < 0))
2222 kfree_skb(skb);
2223 else
2224 consume_skb(skb);
2225 }
2226
2227 return ret;
2228 }
2229
2230 static ssize_t tun_chr_read_iter(struct kiocb *iocb, struct iov_iter *to)
2231 {
2232 struct file *file = iocb->ki_filp;
2233 struct tun_file *tfile = file->private_data;
2234 struct tun_struct *tun = tun_get(tfile);
2235 ssize_t len = iov_iter_count(to), ret;
2236 int noblock = 0;
2237
2238 if (!tun)
2239 return -EBADFD;
2240
2241 if ((file->f_flags & O_NONBLOCK) || (iocb->ki_flags & IOCB_NOWAIT))
2242 noblock = 1;
2243
2244 ret = tun_do_read(tun, tfile, to, noblock, NULL);
2245 ret = min_t(ssize_t, ret, len);
2246 if (ret > 0)
2247 iocb->ki_pos = ret;
2248 tun_put(tun);
2249 return ret;
2250 }
2251
2252 static void tun_prog_free(struct rcu_head *rcu)
2253 {
2254 struct tun_prog *prog = container_of(rcu, struct tun_prog, rcu);
2255
2256 bpf_prog_destroy(prog->prog);
2257 kfree(prog);
2258 }
2259
2260 static int __tun_set_ebpf(struct tun_struct *tun,
2261 struct tun_prog __rcu **prog_p,
2262 struct bpf_prog *prog)
2263 {
2264 struct tun_prog *old, *new = NULL;
2265
2266 if (prog) {
2267 new = kmalloc(sizeof(*new), GFP_KERNEL);
2268 if (!new)
2269 return -ENOMEM;
2270 new->prog = prog;
2271 }
2272
2273 spin_lock_bh(&tun->lock);
2274 old = rcu_dereference_protected(*prog_p,
2275 lockdep_is_held(&tun->lock));
2276 rcu_assign_pointer(*prog_p, new);
2277 spin_unlock_bh(&tun->lock);
2278
2279 if (old)
2280 call_rcu(&old->rcu, tun_prog_free);
2281
2282 return 0;
2283 }
2284
2285 static void tun_free_netdev(struct net_device *dev)
2286 {
2287 struct tun_struct *tun = netdev_priv(dev);
2288
2289 BUG_ON(!(list_empty(&tun->disabled)));
2290
2291 free_percpu(dev->tstats);
2292 tun_flow_uninit(tun);
2293 security_tun_dev_free_security(tun->security);
2294 __tun_set_ebpf(tun, &tun->steering_prog, NULL);
2295 __tun_set_ebpf(tun, &tun->filter_prog, NULL);
2296 }
2297
2298 static void tun_setup(struct net_device *dev)
2299 {
2300 struct tun_struct *tun = netdev_priv(dev);
2301
2302 tun->owner = INVALID_UID;
2303 tun->group = INVALID_GID;
2304 tun_default_link_ksettings(dev, &tun->link_ksettings);
2305
2306 dev->ethtool_ops = &tun_ethtool_ops;
2307 dev->needs_free_netdev = true;
2308 dev->priv_destructor = tun_free_netdev;
2309
2310 dev->tx_queue_len = TUN_READQ_SIZE;
2311 }
2312
2313
2314
2315
2316 static int tun_validate(struct nlattr *tb[], struct nlattr *data[],
2317 struct netlink_ext_ack *extack)
2318 {
2319 NL_SET_ERR_MSG(extack,
2320 "tun/tap creation via rtnetlink is not supported.");
2321 return -EOPNOTSUPP;
2322 }
2323
2324 static size_t tun_get_size(const struct net_device *dev)
2325 {
2326 BUILD_BUG_ON(sizeof(u32) != sizeof(uid_t));
2327 BUILD_BUG_ON(sizeof(u32) != sizeof(gid_t));
2328
2329 return nla_total_size(sizeof(uid_t)) +
2330 nla_total_size(sizeof(gid_t)) +
2331 nla_total_size(sizeof(u8)) +
2332 nla_total_size(sizeof(u8)) +
2333 nla_total_size(sizeof(u8)) +
2334 nla_total_size(sizeof(u8)) +
2335 nla_total_size(sizeof(u8)) +
2336 nla_total_size(sizeof(u32)) +
2337 nla_total_size(sizeof(u32)) +
2338 0;
2339 }
2340
2341 static int tun_fill_info(struct sk_buff *skb, const struct net_device *dev)
2342 {
2343 struct tun_struct *tun = netdev_priv(dev);
2344
2345 if (nla_put_u8(skb, IFLA_TUN_TYPE, tun->flags & TUN_TYPE_MASK))
2346 goto nla_put_failure;
2347 if (uid_valid(tun->owner) &&
2348 nla_put_u32(skb, IFLA_TUN_OWNER,
2349 from_kuid_munged(current_user_ns(), tun->owner)))
2350 goto nla_put_failure;
2351 if (gid_valid(tun->group) &&
2352 nla_put_u32(skb, IFLA_TUN_GROUP,
2353 from_kgid_munged(current_user_ns(), tun->group)))
2354 goto nla_put_failure;
2355 if (nla_put_u8(skb, IFLA_TUN_PI, !(tun->flags & IFF_NO_PI)))
2356 goto nla_put_failure;
2357 if (nla_put_u8(skb, IFLA_TUN_VNET_HDR, !!(tun->flags & IFF_VNET_HDR)))
2358 goto nla_put_failure;
2359 if (nla_put_u8(skb, IFLA_TUN_PERSIST, !!(tun->flags & IFF_PERSIST)))
2360 goto nla_put_failure;
2361 if (nla_put_u8(skb, IFLA_TUN_MULTI_QUEUE,
2362 !!(tun->flags & IFF_MULTI_QUEUE)))
2363 goto nla_put_failure;
2364 if (tun->flags & IFF_MULTI_QUEUE) {
2365 if (nla_put_u32(skb, IFLA_TUN_NUM_QUEUES, tun->numqueues))
2366 goto nla_put_failure;
2367 if (nla_put_u32(skb, IFLA_TUN_NUM_DISABLED_QUEUES,
2368 tun->numdisabled))
2369 goto nla_put_failure;
2370 }
2371
2372 return 0;
2373
2374 nla_put_failure:
2375 return -EMSGSIZE;
2376 }
2377
2378 static struct rtnl_link_ops tun_link_ops __read_mostly = {
2379 .kind = DRV_NAME,
2380 .priv_size = sizeof(struct tun_struct),
2381 .setup = tun_setup,
2382 .validate = tun_validate,
2383 .get_size = tun_get_size,
2384 .fill_info = tun_fill_info,
2385 };
2386
2387 static void tun_sock_write_space(struct sock *sk)
2388 {
2389 struct tun_file *tfile;
2390 wait_queue_head_t *wqueue;
2391
2392 if (!sock_writeable(sk))
2393 return;
2394
2395 if (!test_and_clear_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags))
2396 return;
2397
2398 wqueue = sk_sleep(sk);
2399 if (wqueue && waitqueue_active(wqueue))
2400 wake_up_interruptible_sync_poll(wqueue, EPOLLOUT |
2401 EPOLLWRNORM | EPOLLWRBAND);
2402
2403 tfile = container_of(sk, struct tun_file, sk);
2404 kill_fasync(&tfile->fasync, SIGIO, POLL_OUT);
2405 }
2406
2407 static void tun_put_page(struct tun_page *tpage)
2408 {
2409 if (tpage->page)
2410 __page_frag_cache_drain(tpage->page, tpage->count);
2411 }
2412
2413 static int tun_xdp_one(struct tun_struct *tun,
2414 struct tun_file *tfile,
2415 struct xdp_buff *xdp, int *flush,
2416 struct tun_page *tpage)
2417 {
2418 unsigned int datasize = xdp->data_end - xdp->data;
2419 struct tun_xdp_hdr *hdr = xdp->data_hard_start;
2420 struct virtio_net_hdr *gso = &hdr->gso;
2421 struct bpf_prog *xdp_prog;
2422 struct sk_buff *skb = NULL;
2423 struct sk_buff_head *queue;
2424 u32 rxhash = 0, act;
2425 int buflen = hdr->buflen;
2426 int ret = 0;
2427 bool skb_xdp = false;
2428 struct page *page;
2429
2430 xdp_prog = rcu_dereference(tun->xdp_prog);
2431 if (xdp_prog) {
2432 if (gso->gso_type) {
2433 skb_xdp = true;
2434 goto build;
2435 }
2436
2437 xdp_init_buff(xdp, buflen, &tfile->xdp_rxq);
2438 xdp_set_data_meta_invalid(xdp);
2439
2440 act = bpf_prog_run_xdp(xdp_prog, xdp);
2441 ret = tun_xdp_act(tun, xdp_prog, xdp, act);
2442 if (ret < 0) {
2443 put_page(virt_to_head_page(xdp->data));
2444 return ret;
2445 }
2446
2447 switch (ret) {
2448 case XDP_REDIRECT:
2449 *flush = true;
2450 fallthrough;
2451 case XDP_TX:
2452 return 0;
2453 case XDP_PASS:
2454 break;
2455 default:
2456 page = virt_to_head_page(xdp->data);
2457 if (tpage->page == page) {
2458 ++tpage->count;
2459 } else {
2460 tun_put_page(tpage);
2461 tpage->page = page;
2462 tpage->count = 1;
2463 }
2464 return 0;
2465 }
2466 }
2467
2468 build:
2469 skb = build_skb(xdp->data_hard_start, buflen);
2470 if (!skb) {
2471 ret = -ENOMEM;
2472 goto out;
2473 }
2474
2475 skb_reserve(skb, xdp->data - xdp->data_hard_start);
2476 skb_put(skb, xdp->data_end - xdp->data);
2477
2478 if (virtio_net_hdr_to_skb(skb, gso, tun_is_little_endian(tun))) {
2479 atomic_long_inc(&tun->rx_frame_errors);
2480 kfree_skb(skb);
2481 ret = -EINVAL;
2482 goto out;
2483 }
2484
2485 skb->protocol = eth_type_trans(skb, tun->dev);
2486 skb_reset_network_header(skb);
2487 skb_probe_transport_header(skb);
2488 skb_record_rx_queue(skb, tfile->queue_index);
2489
2490 if (skb_xdp) {
2491 ret = do_xdp_generic(xdp_prog, skb);
2492 if (ret != XDP_PASS) {
2493 ret = 0;
2494 goto out;
2495 }
2496 }
2497
2498 if (!rcu_dereference(tun->steering_prog) && tun->numqueues > 1 &&
2499 !tfile->detached)
2500 rxhash = __skb_get_hash_symmetric(skb);
2501
2502 if (tfile->napi_enabled) {
2503 queue = &tfile->sk.sk_write_queue;
2504 spin_lock(&queue->lock);
2505 __skb_queue_tail(queue, skb);
2506 spin_unlock(&queue->lock);
2507 ret = 1;
2508 } else {
2509 netif_receive_skb(skb);
2510 ret = 0;
2511 }
2512
2513
2514
2515
2516 dev_sw_netstats_rx_add(tun->dev, datasize);
2517
2518 if (rxhash)
2519 tun_flow_update(tun, rxhash, tfile);
2520
2521 out:
2522 return ret;
2523 }
2524
2525 static int tun_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
2526 {
2527 int ret, i;
2528 struct tun_file *tfile = container_of(sock, struct tun_file, socket);
2529 struct tun_struct *tun = tun_get(tfile);
2530 struct tun_msg_ctl *ctl = m->msg_control;
2531 struct xdp_buff *xdp;
2532
2533 if (!tun)
2534 return -EBADFD;
2535
2536 if (m->msg_controllen == sizeof(struct tun_msg_ctl) &&
2537 ctl && ctl->type == TUN_MSG_PTR) {
2538 struct tun_page tpage;
2539 int n = ctl->num;
2540 int flush = 0, queued = 0;
2541
2542 memset(&tpage, 0, sizeof(tpage));
2543
2544 local_bh_disable();
2545 rcu_read_lock();
2546
2547 for (i = 0; i < n; i++) {
2548 xdp = &((struct xdp_buff *)ctl->ptr)[i];
2549 ret = tun_xdp_one(tun, tfile, xdp, &flush, &tpage);
2550 if (ret > 0)
2551 queued += ret;
2552 }
2553
2554 if (flush)
2555 xdp_do_flush();
2556
2557 if (tfile->napi_enabled && queued > 0)
2558 napi_schedule(&tfile->napi);
2559
2560 rcu_read_unlock();
2561 local_bh_enable();
2562
2563 tun_put_page(&tpage);
2564
2565 ret = total_len;
2566 goto out;
2567 }
2568
2569 ret = tun_get_user(tun, tfile, ctl ? ctl->ptr : NULL, &m->msg_iter,
2570 m->msg_flags & MSG_DONTWAIT,
2571 m->msg_flags & MSG_MORE);
2572 out:
2573 tun_put(tun);
2574 return ret;
2575 }
2576
2577 static int tun_recvmsg(struct socket *sock, struct msghdr *m, size_t total_len,
2578 int flags)
2579 {
2580 struct tun_file *tfile = container_of(sock, struct tun_file, socket);
2581 struct tun_struct *tun = tun_get(tfile);
2582 void *ptr = m->msg_control;
2583 int ret;
2584
2585 if (!tun) {
2586 ret = -EBADFD;
2587 goto out_free;
2588 }
2589
2590 if (flags & ~(MSG_DONTWAIT|MSG_TRUNC|MSG_ERRQUEUE)) {
2591 ret = -EINVAL;
2592 goto out_put_tun;
2593 }
2594 if (flags & MSG_ERRQUEUE) {
2595 ret = sock_recv_errqueue(sock->sk, m, total_len,
2596 SOL_PACKET, TUN_TX_TIMESTAMP);
2597 goto out;
2598 }
2599 ret = tun_do_read(tun, tfile, &m->msg_iter, flags & MSG_DONTWAIT, ptr);
2600 if (ret > (ssize_t)total_len) {
2601 m->msg_flags |= MSG_TRUNC;
2602 ret = flags & MSG_TRUNC ? ret : total_len;
2603 }
2604 out:
2605 tun_put(tun);
2606 return ret;
2607
2608 out_put_tun:
2609 tun_put(tun);
2610 out_free:
2611 tun_ptr_free(ptr);
2612 return ret;
2613 }
2614
2615 static int tun_ptr_peek_len(void *ptr)
2616 {
2617 if (likely(ptr)) {
2618 if (tun_is_xdp_frame(ptr)) {
2619 struct xdp_frame *xdpf = tun_ptr_to_xdp(ptr);
2620
2621 return xdpf->len;
2622 }
2623 return __skb_array_len_with_tag(ptr);
2624 } else {
2625 return 0;
2626 }
2627 }
2628
2629 static int tun_peek_len(struct socket *sock)
2630 {
2631 struct tun_file *tfile = container_of(sock, struct tun_file, socket);
2632 struct tun_struct *tun;
2633 int ret = 0;
2634
2635 tun = tun_get(tfile);
2636 if (!tun)
2637 return 0;
2638
2639 ret = PTR_RING_PEEK_CALL(&tfile->tx_ring, tun_ptr_peek_len);
2640 tun_put(tun);
2641
2642 return ret;
2643 }
2644
2645
2646 static const struct proto_ops tun_socket_ops = {
2647 .peek_len = tun_peek_len,
2648 .sendmsg = tun_sendmsg,
2649 .recvmsg = tun_recvmsg,
2650 };
2651
2652 static struct proto tun_proto = {
2653 .name = "tun",
2654 .owner = THIS_MODULE,
2655 .obj_size = sizeof(struct tun_file),
2656 };
2657
2658 static int tun_flags(struct tun_struct *tun)
2659 {
2660 return tun->flags & (TUN_FEATURES | IFF_PERSIST | IFF_TUN | IFF_TAP);
2661 }
2662
2663 static ssize_t tun_flags_show(struct device *dev, struct device_attribute *attr,
2664 char *buf)
2665 {
2666 struct tun_struct *tun = netdev_priv(to_net_dev(dev));
2667 return sprintf(buf, "0x%x\n", tun_flags(tun));
2668 }
2669
2670 static ssize_t owner_show(struct device *dev, struct device_attribute *attr,
2671 char *buf)
2672 {
2673 struct tun_struct *tun = netdev_priv(to_net_dev(dev));
2674 return uid_valid(tun->owner)?
2675 sprintf(buf, "%u\n",
2676 from_kuid_munged(current_user_ns(), tun->owner)):
2677 sprintf(buf, "-1\n");
2678 }
2679
2680 static ssize_t group_show(struct device *dev, struct device_attribute *attr,
2681 char *buf)
2682 {
2683 struct tun_struct *tun = netdev_priv(to_net_dev(dev));
2684 return gid_valid(tun->group) ?
2685 sprintf(buf, "%u\n",
2686 from_kgid_munged(current_user_ns(), tun->group)):
2687 sprintf(buf, "-1\n");
2688 }
2689
2690 static DEVICE_ATTR_RO(tun_flags);
2691 static DEVICE_ATTR_RO(owner);
2692 static DEVICE_ATTR_RO(group);
2693
2694 static struct attribute *tun_dev_attrs[] = {
2695 &dev_attr_tun_flags.attr,
2696 &dev_attr_owner.attr,
2697 &dev_attr_group.attr,
2698 NULL
2699 };
2700
2701 static const struct attribute_group tun_attr_group = {
2702 .attrs = tun_dev_attrs
2703 };
2704
2705 static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
2706 {
2707 struct tun_struct *tun;
2708 struct tun_file *tfile = file->private_data;
2709 struct net_device *dev;
2710 int err;
2711
2712 if (tfile->detached)
2713 return -EINVAL;
2714
2715 if ((ifr->ifr_flags & IFF_NAPI_FRAGS)) {
2716 if (!capable(CAP_NET_ADMIN))
2717 return -EPERM;
2718
2719 if (!(ifr->ifr_flags & IFF_NAPI) ||
2720 (ifr->ifr_flags & TUN_TYPE_MASK) != IFF_TAP)
2721 return -EINVAL;
2722 }
2723
2724 dev = __dev_get_by_name(net, ifr->ifr_name);
2725 if (dev) {
2726 if (ifr->ifr_flags & IFF_TUN_EXCL)
2727 return -EBUSY;
2728 if ((ifr->ifr_flags & IFF_TUN) && dev->netdev_ops == &tun_netdev_ops)
2729 tun = netdev_priv(dev);
2730 else if ((ifr->ifr_flags & IFF_TAP) && dev->netdev_ops == &tap_netdev_ops)
2731 tun = netdev_priv(dev);
2732 else
2733 return -EINVAL;
2734
2735 if (!!(ifr->ifr_flags & IFF_MULTI_QUEUE) !=
2736 !!(tun->flags & IFF_MULTI_QUEUE))
2737 return -EINVAL;
2738
2739 if (tun_not_capable(tun))
2740 return -EPERM;
2741 err = security_tun_dev_open(tun->security);
2742 if (err < 0)
2743 return err;
2744
2745 err = tun_attach(tun, file, ifr->ifr_flags & IFF_NOFILTER,
2746 ifr->ifr_flags & IFF_NAPI,
2747 ifr->ifr_flags & IFF_NAPI_FRAGS, true);
2748 if (err < 0)
2749 return err;
2750
2751 if (tun->flags & IFF_MULTI_QUEUE &&
2752 (tun->numqueues + tun->numdisabled > 1)) {
2753
2754
2755
2756 netdev_state_change(dev);
2757 return 0;
2758 }
2759
2760 tun->flags = (tun->flags & ~TUN_FEATURES) |
2761 (ifr->ifr_flags & TUN_FEATURES);
2762
2763 netdev_state_change(dev);
2764 } else {
2765 char *name;
2766 unsigned long flags = 0;
2767 int queues = ifr->ifr_flags & IFF_MULTI_QUEUE ?
2768 MAX_TAP_QUEUES : 1;
2769
2770 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
2771 return -EPERM;
2772 err = security_tun_dev_create();
2773 if (err < 0)
2774 return err;
2775
2776
2777 if (ifr->ifr_flags & IFF_TUN) {
2778
2779 flags |= IFF_TUN;
2780 name = "tun%d";
2781 } else if (ifr->ifr_flags & IFF_TAP) {
2782
2783 flags |= IFF_TAP;
2784 name = "tap%d";
2785 } else
2786 return -EINVAL;
2787
2788 if (*ifr->ifr_name)
2789 name = ifr->ifr_name;
2790
2791 dev = alloc_netdev_mqs(sizeof(struct tun_struct), name,
2792 NET_NAME_UNKNOWN, tun_setup, queues,
2793 queues);
2794
2795 if (!dev)
2796 return -ENOMEM;
2797
2798 dev_net_set(dev, net);
2799 dev->rtnl_link_ops = &tun_link_ops;
2800 dev->ifindex = tfile->ifindex;
2801 dev->sysfs_groups[0] = &tun_attr_group;
2802
2803 tun = netdev_priv(dev);
2804 tun->dev = dev;
2805 tun->flags = flags;
2806 tun->txflt.count = 0;
2807 tun->vnet_hdr_sz = sizeof(struct virtio_net_hdr);
2808
2809 tun->align = NET_SKB_PAD;
2810 tun->filter_attached = false;
2811 tun->sndbuf = tfile->socket.sk->sk_sndbuf;
2812 tun->rx_batched = 0;
2813 RCU_INIT_POINTER(tun->steering_prog, NULL);
2814
2815 tun->ifr = ifr;
2816 tun->file = file;
2817
2818 tun_net_initialize(dev);
2819
2820 err = register_netdevice(tun->dev);
2821 if (err < 0) {
2822 free_netdev(dev);
2823 return err;
2824 }
2825
2826
2827
2828 rcu_assign_pointer(tfile->tun, tun);
2829 }
2830
2831 if (ifr->ifr_flags & IFF_NO_CARRIER)
2832 netif_carrier_off(tun->dev);
2833 else
2834 netif_carrier_on(tun->dev);
2835
2836
2837
2838
2839 if (netif_running(tun->dev))
2840 netif_tx_wake_all_queues(tun->dev);
2841
2842 strcpy(ifr->ifr_name, tun->dev->name);
2843 return 0;
2844 }
2845
2846 static void tun_get_iff(struct tun_struct *tun, struct ifreq *ifr)
2847 {
2848 strcpy(ifr->ifr_name, tun->dev->name);
2849
2850 ifr->ifr_flags = tun_flags(tun);
2851
2852 }
2853
2854
2855
2856 static int set_offload(struct tun_struct *tun, unsigned long arg)
2857 {
2858 netdev_features_t features = 0;
2859
2860 if (arg & TUN_F_CSUM) {
2861 features |= NETIF_F_HW_CSUM;
2862 arg &= ~TUN_F_CSUM;
2863
2864 if (arg & (TUN_F_TSO4|TUN_F_TSO6)) {
2865 if (arg & TUN_F_TSO_ECN) {
2866 features |= NETIF_F_TSO_ECN;
2867 arg &= ~TUN_F_TSO_ECN;
2868 }
2869 if (arg & TUN_F_TSO4)
2870 features |= NETIF_F_TSO;
2871 if (arg & TUN_F_TSO6)
2872 features |= NETIF_F_TSO6;
2873 arg &= ~(TUN_F_TSO4|TUN_F_TSO6);
2874 }
2875
2876 arg &= ~TUN_F_UFO;
2877 }
2878
2879
2880
2881 if (arg)
2882 return -EINVAL;
2883
2884 tun->set_features = features;
2885 tun->dev->wanted_features &= ~TUN_USER_FEATURES;
2886 tun->dev->wanted_features |= features;
2887 netdev_update_features(tun->dev);
2888
2889 return 0;
2890 }
2891
2892 static void tun_detach_filter(struct tun_struct *tun, int n)
2893 {
2894 int i;
2895 struct tun_file *tfile;
2896
2897 for (i = 0; i < n; i++) {
2898 tfile = rtnl_dereference(tun->tfiles[i]);
2899 lock_sock(tfile->socket.sk);
2900 sk_detach_filter(tfile->socket.sk);
2901 release_sock(tfile->socket.sk);
2902 }
2903
2904 tun->filter_attached = false;
2905 }
2906
2907 static int tun_attach_filter(struct tun_struct *tun)
2908 {
2909 int i, ret = 0;
2910 struct tun_file *tfile;
2911
2912 for (i = 0; i < tun->numqueues; i++) {
2913 tfile = rtnl_dereference(tun->tfiles[i]);
2914 lock_sock(tfile->socket.sk);
2915 ret = sk_attach_filter(&tun->fprog, tfile->socket.sk);
2916 release_sock(tfile->socket.sk);
2917 if (ret) {
2918 tun_detach_filter(tun, i);
2919 return ret;
2920 }
2921 }
2922
2923 tun->filter_attached = true;
2924 return ret;
2925 }
2926
2927 static void tun_set_sndbuf(struct tun_struct *tun)
2928 {
2929 struct tun_file *tfile;
2930 int i;
2931
2932 for (i = 0; i < tun->numqueues; i++) {
2933 tfile = rtnl_dereference(tun->tfiles[i]);
2934 tfile->socket.sk->sk_sndbuf = tun->sndbuf;
2935 }
2936 }
2937
2938 static int tun_set_queue(struct file *file, struct ifreq *ifr)
2939 {
2940 struct tun_file *tfile = file->private_data;
2941 struct tun_struct *tun;
2942 int ret = 0;
2943
2944 rtnl_lock();
2945
2946 if (ifr->ifr_flags & IFF_ATTACH_QUEUE) {
2947 tun = tfile->detached;
2948 if (!tun) {
2949 ret = -EINVAL;
2950 goto unlock;
2951 }
2952 ret = security_tun_dev_attach_queue(tun->security);
2953 if (ret < 0)
2954 goto unlock;
2955 ret = tun_attach(tun, file, false, tun->flags & IFF_NAPI,
2956 tun->flags & IFF_NAPI_FRAGS, true);
2957 } else if (ifr->ifr_flags & IFF_DETACH_QUEUE) {
2958 tun = rtnl_dereference(tfile->tun);
2959 if (!tun || !(tun->flags & IFF_MULTI_QUEUE) || tfile->detached)
2960 ret = -EINVAL;
2961 else
2962 __tun_detach(tfile, false);
2963 } else
2964 ret = -EINVAL;
2965
2966 if (ret >= 0)
2967 netdev_state_change(tun->dev);
2968
2969 unlock:
2970 rtnl_unlock();
2971 return ret;
2972 }
2973
2974 static int tun_set_ebpf(struct tun_struct *tun, struct tun_prog __rcu **prog_p,
2975 void __user *data)
2976 {
2977 struct bpf_prog *prog;
2978 int fd;
2979
2980 if (copy_from_user(&fd, data, sizeof(fd)))
2981 return -EFAULT;
2982
2983 if (fd == -1) {
2984 prog = NULL;
2985 } else {
2986 prog = bpf_prog_get_type(fd, BPF_PROG_TYPE_SOCKET_FILTER);
2987 if (IS_ERR(prog))
2988 return PTR_ERR(prog);
2989 }
2990
2991 return __tun_set_ebpf(tun, prog_p, prog);
2992 }
2993
2994
2995 static unsigned char tun_get_addr_len(unsigned short type)
2996 {
2997 switch (type) {
2998 case ARPHRD_IP6GRE:
2999 case ARPHRD_TUNNEL6:
3000 return sizeof(struct in6_addr);
3001 case ARPHRD_IPGRE:
3002 case ARPHRD_TUNNEL:
3003 case ARPHRD_SIT:
3004 return 4;
3005 case ARPHRD_ETHER:
3006 return ETH_ALEN;
3007 case ARPHRD_IEEE802154:
3008 case ARPHRD_IEEE802154_MONITOR:
3009 return IEEE802154_EXTENDED_ADDR_LEN;
3010 case ARPHRD_PHONET_PIPE:
3011 case ARPHRD_PPP:
3012 case ARPHRD_NONE:
3013 return 0;
3014 case ARPHRD_6LOWPAN:
3015 return EUI64_ADDR_LEN;
3016 case ARPHRD_FDDI:
3017 return FDDI_K_ALEN;
3018 case ARPHRD_HIPPI:
3019 return HIPPI_ALEN;
3020 case ARPHRD_IEEE802:
3021 return FC_ALEN;
3022 case ARPHRD_ROSE:
3023 return ROSE_ADDR_LEN;
3024 case ARPHRD_NETROM:
3025 return AX25_ADDR_LEN;
3026 case ARPHRD_LOCALTLK:
3027 return LTALK_ALEN;
3028 default:
3029 return 0;
3030 }
3031 }
3032
3033 static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
3034 unsigned long arg, int ifreq_len)
3035 {
3036 struct tun_file *tfile = file->private_data;
3037 struct net *net = sock_net(&tfile->sk);
3038 struct tun_struct *tun;
3039 void __user* argp = (void __user*)arg;
3040 unsigned int ifindex, carrier;
3041 struct ifreq ifr;
3042 kuid_t owner;
3043 kgid_t group;
3044 int sndbuf;
3045 int vnet_hdr_sz;
3046 int le;
3047 int ret;
3048 bool do_notify = false;
3049
3050 if (cmd == TUNSETIFF || cmd == TUNSETQUEUE ||
3051 (_IOC_TYPE(cmd) == SOCK_IOC_TYPE && cmd != SIOCGSKNS)) {
3052 if (copy_from_user(&ifr, argp, ifreq_len))
3053 return -EFAULT;
3054 } else {
3055 memset(&ifr, 0, sizeof(ifr));
3056 }
3057 if (cmd == TUNGETFEATURES) {
3058
3059
3060
3061
3062 return put_user(IFF_TUN | IFF_TAP | IFF_NO_CARRIER |
3063 TUN_FEATURES, (unsigned int __user*)argp);
3064 } else if (cmd == TUNSETQUEUE) {
3065 return tun_set_queue(file, &ifr);
3066 } else if (cmd == SIOCGSKNS) {
3067 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
3068 return -EPERM;
3069 return open_related_ns(&net->ns, get_net_ns);
3070 }
3071
3072 rtnl_lock();
3073
3074 tun = tun_get(tfile);
3075 if (cmd == TUNSETIFF) {
3076 ret = -EEXIST;
3077 if (tun)
3078 goto unlock;
3079
3080 ifr.ifr_name[IFNAMSIZ-1] = '\0';
3081
3082 ret = tun_set_iff(net, file, &ifr);
3083
3084 if (ret)
3085 goto unlock;
3086
3087 if (copy_to_user(argp, &ifr, ifreq_len))
3088 ret = -EFAULT;
3089 goto unlock;
3090 }
3091 if (cmd == TUNSETIFINDEX) {
3092 ret = -EPERM;
3093 if (tun)
3094 goto unlock;
3095
3096 ret = -EFAULT;
3097 if (copy_from_user(&ifindex, argp, sizeof(ifindex)))
3098 goto unlock;
3099
3100 ret = 0;
3101 tfile->ifindex = ifindex;
3102 goto unlock;
3103 }
3104
3105 ret = -EBADFD;
3106 if (!tun)
3107 goto unlock;
3108
3109 netif_info(tun, drv, tun->dev, "tun_chr_ioctl cmd %u\n", cmd);
3110
3111 net = dev_net(tun->dev);
3112 ret = 0;
3113 switch (cmd) {
3114 case TUNGETIFF:
3115 tun_get_iff(tun, &ifr);
3116
3117 if (tfile->detached)
3118 ifr.ifr_flags |= IFF_DETACH_QUEUE;
3119 if (!tfile->socket.sk->sk_filter)
3120 ifr.ifr_flags |= IFF_NOFILTER;
3121
3122 if (copy_to_user(argp, &ifr, ifreq_len))
3123 ret = -EFAULT;
3124 break;
3125
3126 case TUNSETNOCSUM:
3127
3128
3129
3130 netif_info(tun, drv, tun->dev, "ignored: set checksum %s\n",
3131 arg ? "disabled" : "enabled");
3132 break;
3133
3134 case TUNSETPERSIST:
3135
3136
3137
3138 if (arg && !(tun->flags & IFF_PERSIST)) {
3139 tun->flags |= IFF_PERSIST;
3140 __module_get(THIS_MODULE);
3141 do_notify = true;
3142 }
3143 if (!arg && (tun->flags & IFF_PERSIST)) {
3144 tun->flags &= ~IFF_PERSIST;
3145 module_put(THIS_MODULE);
3146 do_notify = true;
3147 }
3148
3149 netif_info(tun, drv, tun->dev, "persist %s\n",
3150 arg ? "enabled" : "disabled");
3151 break;
3152
3153 case TUNSETOWNER:
3154
3155 owner = make_kuid(current_user_ns(), arg);
3156 if (!uid_valid(owner)) {
3157 ret = -EINVAL;
3158 break;
3159 }
3160 tun->owner = owner;
3161 do_notify = true;
3162 netif_info(tun, drv, tun->dev, "owner set to %u\n",
3163 from_kuid(&init_user_ns, tun->owner));
3164 break;
3165
3166 case TUNSETGROUP:
3167
3168 group = make_kgid(current_user_ns(), arg);
3169 if (!gid_valid(group)) {
3170 ret = -EINVAL;
3171 break;
3172 }
3173 tun->group = group;
3174 do_notify = true;
3175 netif_info(tun, drv, tun->dev, "group set to %u\n",
3176 from_kgid(&init_user_ns, tun->group));
3177 break;
3178
3179 case TUNSETLINK:
3180
3181 if (tun->dev->flags & IFF_UP) {
3182 netif_info(tun, drv, tun->dev,
3183 "Linktype set failed because interface is up\n");
3184 ret = -EBUSY;
3185 } else {
3186 ret = call_netdevice_notifiers(NETDEV_PRE_TYPE_CHANGE,
3187 tun->dev);
3188 ret = notifier_to_errno(ret);
3189 if (ret) {
3190 netif_info(tun, drv, tun->dev,
3191 "Refused to change device type\n");
3192 break;
3193 }
3194 tun->dev->type = (int) arg;
3195 tun->dev->addr_len = tun_get_addr_len(tun->dev->type);
3196 netif_info(tun, drv, tun->dev, "linktype set to %d\n",
3197 tun->dev->type);
3198 call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE,
3199 tun->dev);
3200 }
3201 break;
3202
3203 case TUNSETDEBUG:
3204 tun->msg_enable = (u32)arg;
3205 break;
3206
3207 case TUNSETOFFLOAD:
3208 ret = set_offload(tun, arg);
3209 break;
3210
3211 case TUNSETTXFILTER:
3212
3213 ret = -EINVAL;
3214 if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP)
3215 break;
3216 ret = update_filter(&tun->txflt, (void __user *)arg);
3217 break;
3218
3219 case SIOCGIFHWADDR:
3220
3221 dev_get_mac_address(&ifr.ifr_hwaddr, net, tun->dev->name);
3222 if (copy_to_user(argp, &ifr, ifreq_len))
3223 ret = -EFAULT;
3224 break;
3225
3226 case SIOCSIFHWADDR:
3227
3228 ret = dev_set_mac_address_user(tun->dev, &ifr.ifr_hwaddr, NULL);
3229 break;
3230
3231 case TUNGETSNDBUF:
3232 sndbuf = tfile->socket.sk->sk_sndbuf;
3233 if (copy_to_user(argp, &sndbuf, sizeof(sndbuf)))
3234 ret = -EFAULT;
3235 break;
3236
3237 case TUNSETSNDBUF:
3238 if (copy_from_user(&sndbuf, argp, sizeof(sndbuf))) {
3239 ret = -EFAULT;
3240 break;
3241 }
3242 if (sndbuf <= 0) {
3243 ret = -EINVAL;
3244 break;
3245 }
3246
3247 tun->sndbuf = sndbuf;
3248 tun_set_sndbuf(tun);
3249 break;
3250
3251 case TUNGETVNETHDRSZ:
3252 vnet_hdr_sz = tun->vnet_hdr_sz;
3253 if (copy_to_user(argp, &vnet_hdr_sz, sizeof(vnet_hdr_sz)))
3254 ret = -EFAULT;
3255 break;
3256
3257 case TUNSETVNETHDRSZ:
3258 if (copy_from_user(&vnet_hdr_sz, argp, sizeof(vnet_hdr_sz))) {
3259 ret = -EFAULT;
3260 break;
3261 }
3262 if (vnet_hdr_sz < (int)sizeof(struct virtio_net_hdr)) {
3263 ret = -EINVAL;
3264 break;
3265 }
3266
3267 tun->vnet_hdr_sz = vnet_hdr_sz;
3268 break;
3269
3270 case TUNGETVNETLE:
3271 le = !!(tun->flags & TUN_VNET_LE);
3272 if (put_user(le, (int __user *)argp))
3273 ret = -EFAULT;
3274 break;
3275
3276 case TUNSETVNETLE:
3277 if (get_user(le, (int __user *)argp)) {
3278 ret = -EFAULT;
3279 break;
3280 }
3281 if (le)
3282 tun->flags |= TUN_VNET_LE;
3283 else
3284 tun->flags &= ~TUN_VNET_LE;
3285 break;
3286
3287 case TUNGETVNETBE:
3288 ret = tun_get_vnet_be(tun, argp);
3289 break;
3290
3291 case TUNSETVNETBE:
3292 ret = tun_set_vnet_be(tun, argp);
3293 break;
3294
3295 case TUNATTACHFILTER:
3296
3297 ret = -EINVAL;
3298 if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP)
3299 break;
3300 ret = -EFAULT;
3301 if (copy_from_user(&tun->fprog, argp, sizeof(tun->fprog)))
3302 break;
3303
3304 ret = tun_attach_filter(tun);
3305 break;
3306
3307 case TUNDETACHFILTER:
3308
3309 ret = -EINVAL;
3310 if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP)
3311 break;
3312 ret = 0;
3313 tun_detach_filter(tun, tun->numqueues);
3314 break;
3315
3316 case TUNGETFILTER:
3317 ret = -EINVAL;
3318 if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP)
3319 break;
3320 ret = -EFAULT;
3321 if (copy_to_user(argp, &tun->fprog, sizeof(tun->fprog)))
3322 break;
3323 ret = 0;
3324 break;
3325
3326 case TUNSETSTEERINGEBPF:
3327 ret = tun_set_ebpf(tun, &tun->steering_prog, argp);
3328 break;
3329
3330 case TUNSETFILTEREBPF:
3331 ret = tun_set_ebpf(tun, &tun->filter_prog, argp);
3332 break;
3333
3334 case TUNSETCARRIER:
3335 ret = -EFAULT;
3336 if (copy_from_user(&carrier, argp, sizeof(carrier)))
3337 goto unlock;
3338
3339 ret = tun_net_change_carrier(tun->dev, (bool)carrier);
3340 break;
3341
3342 case TUNGETDEVNETNS:
3343 ret = -EPERM;
3344 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
3345 goto unlock;
3346 ret = open_related_ns(&net->ns, get_net_ns);
3347 break;
3348
3349 default:
3350 ret = -EINVAL;
3351 break;
3352 }
3353
3354 if (do_notify)
3355 netdev_state_change(tun->dev);
3356
3357 unlock:
3358 rtnl_unlock();
3359 if (tun)
3360 tun_put(tun);
3361 return ret;
3362 }
3363
3364 static long tun_chr_ioctl(struct file *file,
3365 unsigned int cmd, unsigned long arg)
3366 {
3367 return __tun_chr_ioctl(file, cmd, arg, sizeof (struct ifreq));
3368 }
3369
3370 #ifdef CONFIG_COMPAT
3371 static long tun_chr_compat_ioctl(struct file *file,
3372 unsigned int cmd, unsigned long arg)
3373 {
3374 switch (cmd) {
3375 case TUNSETIFF:
3376 case TUNGETIFF:
3377 case TUNSETTXFILTER:
3378 case TUNGETSNDBUF:
3379 case TUNSETSNDBUF:
3380 case SIOCGIFHWADDR:
3381 case SIOCSIFHWADDR:
3382 arg = (unsigned long)compat_ptr(arg);
3383 break;
3384 default:
3385 arg = (compat_ulong_t)arg;
3386 break;
3387 }
3388
3389
3390
3391
3392
3393
3394
3395 return __tun_chr_ioctl(file, cmd, arg, sizeof(struct compat_ifreq));
3396 }
3397 #endif
3398
3399 static int tun_chr_fasync(int fd, struct file *file, int on)
3400 {
3401 struct tun_file *tfile = file->private_data;
3402 int ret;
3403
3404 if ((ret = fasync_helper(fd, file, on, &tfile->fasync)) < 0)
3405 goto out;
3406
3407 if (on) {
3408 __f_setown(file, task_pid(current), PIDTYPE_TGID, 0);
3409 tfile->flags |= TUN_FASYNC;
3410 } else
3411 tfile->flags &= ~TUN_FASYNC;
3412 ret = 0;
3413 out:
3414 return ret;
3415 }
3416
3417 static int tun_chr_open(struct inode *inode, struct file * file)
3418 {
3419 struct net *net = current->nsproxy->net_ns;
3420 struct tun_file *tfile;
3421
3422 tfile = (struct tun_file *)sk_alloc(net, AF_UNSPEC, GFP_KERNEL,
3423 &tun_proto, 0);
3424 if (!tfile)
3425 return -ENOMEM;
3426 if (ptr_ring_init(&tfile->tx_ring, 0, GFP_KERNEL)) {
3427 sk_free(&tfile->sk);
3428 return -ENOMEM;
3429 }
3430
3431 mutex_init(&tfile->napi_mutex);
3432 RCU_INIT_POINTER(tfile->tun, NULL);
3433 tfile->flags = 0;
3434 tfile->ifindex = 0;
3435
3436 init_waitqueue_head(&tfile->socket.wq.wait);
3437
3438 tfile->socket.file = file;
3439 tfile->socket.ops = &tun_socket_ops;
3440
3441 sock_init_data(&tfile->socket, &tfile->sk);
3442
3443 tfile->sk.sk_write_space = tun_sock_write_space;
3444 tfile->sk.sk_sndbuf = INT_MAX;
3445
3446 file->private_data = tfile;
3447 INIT_LIST_HEAD(&tfile->next);
3448
3449 sock_set_flag(&tfile->sk, SOCK_ZEROCOPY);
3450
3451 return 0;
3452 }
3453
3454 static int tun_chr_close(struct inode *inode, struct file *file)
3455 {
3456 struct tun_file *tfile = file->private_data;
3457
3458 tun_detach(tfile, true);
3459
3460 return 0;
3461 }
3462
3463 #ifdef CONFIG_PROC_FS
3464 static void tun_chr_show_fdinfo(struct seq_file *m, struct file *file)
3465 {
3466 struct tun_file *tfile = file->private_data;
3467 struct tun_struct *tun;
3468 struct ifreq ifr;
3469
3470 memset(&ifr, 0, sizeof(ifr));
3471
3472 rtnl_lock();
3473 tun = tun_get(tfile);
3474 if (tun)
3475 tun_get_iff(tun, &ifr);
3476 rtnl_unlock();
3477
3478 if (tun)
3479 tun_put(tun);
3480
3481 seq_printf(m, "iff:\t%s\n", ifr.ifr_name);
3482 }
3483 #endif
3484
3485 static const struct file_operations tun_fops = {
3486 .owner = THIS_MODULE,
3487 .llseek = no_llseek,
3488 .read_iter = tun_chr_read_iter,
3489 .write_iter = tun_chr_write_iter,
3490 .poll = tun_chr_poll,
3491 .unlocked_ioctl = tun_chr_ioctl,
3492 #ifdef CONFIG_COMPAT
3493 .compat_ioctl = tun_chr_compat_ioctl,
3494 #endif
3495 .open = tun_chr_open,
3496 .release = tun_chr_close,
3497 .fasync = tun_chr_fasync,
3498 #ifdef CONFIG_PROC_FS
3499 .show_fdinfo = tun_chr_show_fdinfo,
3500 #endif
3501 };
3502
3503 static struct miscdevice tun_miscdev = {
3504 .minor = TUN_MINOR,
3505 .name = "tun",
3506 .nodename = "net/tun",
3507 .fops = &tun_fops,
3508 };
3509
3510
3511
3512 static void tun_default_link_ksettings(struct net_device *dev,
3513 struct ethtool_link_ksettings *cmd)
3514 {
3515 ethtool_link_ksettings_zero_link_mode(cmd, supported);
3516 ethtool_link_ksettings_zero_link_mode(cmd, advertising);
3517 cmd->base.speed = SPEED_10;
3518 cmd->base.duplex = DUPLEX_FULL;
3519 cmd->base.port = PORT_TP;
3520 cmd->base.phy_address = 0;
3521 cmd->base.autoneg = AUTONEG_DISABLE;
3522 }
3523
3524 static int tun_get_link_ksettings(struct net_device *dev,
3525 struct ethtool_link_ksettings *cmd)
3526 {
3527 struct tun_struct *tun = netdev_priv(dev);
3528
3529 memcpy(cmd, &tun->link_ksettings, sizeof(*cmd));
3530 return 0;
3531 }
3532
3533 static int tun_set_link_ksettings(struct net_device *dev,
3534 const struct ethtool_link_ksettings *cmd)
3535 {
3536 struct tun_struct *tun = netdev_priv(dev);
3537
3538 memcpy(&tun->link_ksettings, cmd, sizeof(*cmd));
3539 return 0;
3540 }
3541
3542 static void tun_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
3543 {
3544 struct tun_struct *tun = netdev_priv(dev);
3545
3546 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
3547 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
3548
3549 switch (tun->flags & TUN_TYPE_MASK) {
3550 case IFF_TUN:
3551 strlcpy(info->bus_info, "tun", sizeof(info->bus_info));
3552 break;
3553 case IFF_TAP:
3554 strlcpy(info->bus_info, "tap", sizeof(info->bus_info));
3555 break;
3556 }
3557 }
3558
3559 static u32 tun_get_msglevel(struct net_device *dev)
3560 {
3561 struct tun_struct *tun = netdev_priv(dev);
3562
3563 return tun->msg_enable;
3564 }
3565
3566 static void tun_set_msglevel(struct net_device *dev, u32 value)
3567 {
3568 struct tun_struct *tun = netdev_priv(dev);
3569
3570 tun->msg_enable = value;
3571 }
3572
3573 static int tun_get_coalesce(struct net_device *dev,
3574 struct ethtool_coalesce *ec,
3575 struct kernel_ethtool_coalesce *kernel_coal,
3576 struct netlink_ext_ack *extack)
3577 {
3578 struct tun_struct *tun = netdev_priv(dev);
3579
3580 ec->rx_max_coalesced_frames = tun->rx_batched;
3581
3582 return 0;
3583 }
3584
3585 static int tun_set_coalesce(struct net_device *dev,
3586 struct ethtool_coalesce *ec,
3587 struct kernel_ethtool_coalesce *kernel_coal,
3588 struct netlink_ext_ack *extack)
3589 {
3590 struct tun_struct *tun = netdev_priv(dev);
3591
3592 if (ec->rx_max_coalesced_frames > NAPI_POLL_WEIGHT)
3593 tun->rx_batched = NAPI_POLL_WEIGHT;
3594 else
3595 tun->rx_batched = ec->rx_max_coalesced_frames;
3596
3597 return 0;
3598 }
3599
3600 static const struct ethtool_ops tun_ethtool_ops = {
3601 .supported_coalesce_params = ETHTOOL_COALESCE_RX_MAX_FRAMES,
3602 .get_drvinfo = tun_get_drvinfo,
3603 .get_msglevel = tun_get_msglevel,
3604 .set_msglevel = tun_set_msglevel,
3605 .get_link = ethtool_op_get_link,
3606 .get_ts_info = ethtool_op_get_ts_info,
3607 .get_coalesce = tun_get_coalesce,
3608 .set_coalesce = tun_set_coalesce,
3609 .get_link_ksettings = tun_get_link_ksettings,
3610 .set_link_ksettings = tun_set_link_ksettings,
3611 };
3612
3613 static int tun_queue_resize(struct tun_struct *tun)
3614 {
3615 struct net_device *dev = tun->dev;
3616 struct tun_file *tfile;
3617 struct ptr_ring **rings;
3618 int n = tun->numqueues + tun->numdisabled;
3619 int ret, i;
3620
3621 rings = kmalloc_array(n, sizeof(*rings), GFP_KERNEL);
3622 if (!rings)
3623 return -ENOMEM;
3624
3625 for (i = 0; i < tun->numqueues; i++) {
3626 tfile = rtnl_dereference(tun->tfiles[i]);
3627 rings[i] = &tfile->tx_ring;
3628 }
3629 list_for_each_entry(tfile, &tun->disabled, next)
3630 rings[i++] = &tfile->tx_ring;
3631
3632 ret = ptr_ring_resize_multiple(rings, n,
3633 dev->tx_queue_len, GFP_KERNEL,
3634 tun_ptr_free);
3635
3636 kfree(rings);
3637 return ret;
3638 }
3639
3640 static int tun_device_event(struct notifier_block *unused,
3641 unsigned long event, void *ptr)
3642 {
3643 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
3644 struct tun_struct *tun = netdev_priv(dev);
3645 int i;
3646
3647 if (dev->rtnl_link_ops != &tun_link_ops)
3648 return NOTIFY_DONE;
3649
3650 switch (event) {
3651 case NETDEV_CHANGE_TX_QUEUE_LEN:
3652 if (tun_queue_resize(tun))
3653 return NOTIFY_BAD;
3654 break;
3655 case NETDEV_UP:
3656 for (i = 0; i < tun->numqueues; i++) {
3657 struct tun_file *tfile;
3658
3659 tfile = rtnl_dereference(tun->tfiles[i]);
3660 tfile->socket.sk->sk_write_space(tfile->socket.sk);
3661 }
3662 break;
3663 default:
3664 break;
3665 }
3666
3667 return NOTIFY_DONE;
3668 }
3669
3670 static struct notifier_block tun_notifier_block __read_mostly = {
3671 .notifier_call = tun_device_event,
3672 };
3673
3674 static int __init tun_init(void)
3675 {
3676 int ret = 0;
3677
3678 pr_info("%s, %s\n", DRV_DESCRIPTION, DRV_VERSION);
3679
3680 ret = rtnl_link_register(&tun_link_ops);
3681 if (ret) {
3682 pr_err("Can't register link_ops\n");
3683 goto err_linkops;
3684 }
3685
3686 ret = misc_register(&tun_miscdev);
3687 if (ret) {
3688 pr_err("Can't register misc device %d\n", TUN_MINOR);
3689 goto err_misc;
3690 }
3691
3692 ret = register_netdevice_notifier(&tun_notifier_block);
3693 if (ret) {
3694 pr_err("Can't register netdevice notifier\n");
3695 goto err_notifier;
3696 }
3697
3698 return 0;
3699
3700 err_notifier:
3701 misc_deregister(&tun_miscdev);
3702 err_misc:
3703 rtnl_link_unregister(&tun_link_ops);
3704 err_linkops:
3705 return ret;
3706 }
3707
3708 static void tun_cleanup(void)
3709 {
3710 misc_deregister(&tun_miscdev);
3711 rtnl_link_unregister(&tun_link_ops);
3712 unregister_netdevice_notifier(&tun_notifier_block);
3713 }
3714
3715
3716
3717
3718
3719 struct socket *tun_get_socket(struct file *file)
3720 {
3721 struct tun_file *tfile;
3722 if (file->f_op != &tun_fops)
3723 return ERR_PTR(-EINVAL);
3724 tfile = file->private_data;
3725 if (!tfile)
3726 return ERR_PTR(-EBADFD);
3727 return &tfile->socket;
3728 }
3729 EXPORT_SYMBOL_GPL(tun_get_socket);
3730
3731 struct ptr_ring *tun_get_tx_ring(struct file *file)
3732 {
3733 struct tun_file *tfile;
3734
3735 if (file->f_op != &tun_fops)
3736 return ERR_PTR(-EINVAL);
3737 tfile = file->private_data;
3738 if (!tfile)
3739 return ERR_PTR(-EBADFD);
3740 return &tfile->tx_ring;
3741 }
3742 EXPORT_SYMBOL_GPL(tun_get_tx_ring);
3743
3744 module_init(tun_init);
3745 module_exit(tun_cleanup);
3746 MODULE_DESCRIPTION(DRV_DESCRIPTION);
3747 MODULE_AUTHOR(DRV_COPYRIGHT);
3748 MODULE_LICENSE("GPL");
3749 MODULE_ALIAS_MISCDEV(TUN_MINOR);
3750 MODULE_ALIAS("devname:net/tun");