]> git.ipfire.org Git - thirdparty/linux.git/blame - drivers/net/tun.c
pid: Implement PIDTYPE_TGID
[thirdparty/linux.git] / drivers / net / tun.c
CommitLineData
1da177e4
LT
1/*
2 * TUN - Universal TUN/TAP device driver.
3 * Copyright (C) 1999-2002 Maxim Krasnyansky <maxk@qualcomm.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * $Id: tun.c,v 1.15 2002/03/01 02:44:24 maxk Exp $
16 */
17
18/*
19 * Changes:
20 *
ff4cc3ac
MK
21 * Mike Kershaw <dragorn@kismetwireless.net> 2005/08/14
22 * Add TUNSETLINK ioctl to set the link encapsulation
23 *
1da177e4 24 * Mark Smith <markzzzsmith@yahoo.com.au>
344dc8ed 25 * Use eth_random_addr() for tap MAC address.
1da177e4
LT
26 *
27 * Harald Roelle <harald.roelle@ifi.lmu.de> 2004/04/20
28 * Fixes in packet dropping, queue length setting and queue wakeup.
29 * Increased default tx queue length.
30 * Added ethtool API.
31 * Minor cleanups
32 *
33 * Daniel Podlejski <underley@underley.eu.org>
34 * Modifications for 2.3.99-pre5 kernel.
35 */
36
6b8a66ee
JP
37#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
38
1da177e4
LT
39#define DRV_NAME "tun"
40#define DRV_VERSION "1.6"
41#define DRV_DESCRIPTION "Universal TUN/TAP device driver"
42#define DRV_COPYRIGHT "(C) 1999-2004 Max Krasnyansky <maxk@qualcomm.com>"
43
1da177e4
LT
44#include <linux/module.h>
45#include <linux/errno.h>
46#include <linux/kernel.h>
174cd4b1 47#include <linux/sched/signal.h>
1da177e4
LT
48#include <linux/major.h>
49#include <linux/slab.h>
50#include <linux/poll.h>
51#include <linux/fcntl.h>
52#include <linux/init.h>
53#include <linux/skbuff.h>
54#include <linux/netdevice.h>
55#include <linux/etherdevice.h>
56#include <linux/miscdevice.h>
57#include <linux/ethtool.h>
58#include <linux/rtnetlink.h>
50857e2a 59#include <linux/compat.h>
1da177e4
LT
60#include <linux/if.h>
61#include <linux/if_arp.h>
62#include <linux/if_ether.h>
63#include <linux/if_tun.h>
6680ec68 64#include <linux/if_vlan.h>
1da177e4 65#include <linux/crc32.h>
d647a591 66#include <linux/nsproxy.h>
f43798c2 67#include <linux/virtio_net.h>
99405162 68#include <linux/rcupdate.h>
881d966b 69#include <net/net_namespace.h>
79d17604 70#include <net/netns/generic.h>
f019a7a5 71#include <net/rtnetlink.h>
33dccbb0 72#include <net/sock.h>
735fc405 73#include <net/xdp.h>
93e14b6d 74#include <linux/seq_file.h>
e0b46d0e 75#include <linux/uio.h>
1576d986 76#include <linux/skb_array.h>
761876c8
JW
77#include <linux/bpf.h>
78#include <linux/bpf_trace.h>
90e33d45 79#include <linux/mutex.h>
1da177e4 80
7c0f6ba6 81#include <linux/uaccess.h>
f2780d6d 82#include <linux/proc_fs.h>
1da177e4 83
4e24f2dd
CW
84static void tun_default_link_ksettings(struct net_device *dev,
85 struct ethtool_link_ksettings *cmd);
86
14daa021
RR
87/* Uncomment to enable debugging */
88/* #define TUN_DEBUG 1 */
89
1da177e4
LT
90#ifdef TUN_DEBUG
91static int debug;
14daa021 92
6b8a66ee
JP
93#define tun_debug(level, tun, fmt, args...) \
94do { \
95 if (tun->debug) \
96 netdev_printk(level, tun->dev, fmt, ##args); \
97} while (0)
98#define DBG1(level, fmt, args...) \
99do { \
100 if (debug == 2) \
101 printk(level fmt, ##args); \
102} while (0)
14daa021 103#else
6b8a66ee
JP
104#define tun_debug(level, tun, fmt, args...) \
105do { \
106 if (0) \
107 netdev_printk(level, tun->dev, fmt, ##args); \
108} while (0)
109#define DBG1(level, fmt, args...) \
110do { \
111 if (0) \
112 printk(level fmt, ##args); \
113} while (0)
14daa021
RR
114#endif
115
761876c8 116#define TUN_HEADROOM 256
7df13219 117#define TUN_RX_PAD (NET_IP_ALIGN + NET_SKB_PAD)
66ccbc9c 118
031f5e03
MT
119/* TUN device flags */
120
121/* IFF_ATTACH_QUEUE is never stored in device flags,
122 * overload it to mean fasync when stored there.
123 */
124#define TUN_FASYNC IFF_ATTACH_QUEUE
1cf8e410
MT
125/* High bits in flags field are unused. */
126#define TUN_VNET_LE 0x80000000
8b8e658b 127#define TUN_VNET_BE 0x40000000
031f5e03
MT
128
129#define TUN_FEATURES (IFF_NO_PI | IFF_ONE_QUEUE | IFF_VNET_HDR | \
90e33d45
PP
130 IFF_MULTI_QUEUE | IFF_NAPI | IFF_NAPI_FRAGS)
131
0690899b
MT
132#define GOODCOPY_LEN 128
133
f271b2cc
MK
134#define FLT_EXACT_COUNT 8
135struct tap_filter {
136 unsigned int count; /* Number of addrs. Zero means disabled */
137 u32 mask[2]; /* Mask of the hashed addrs */
138 unsigned char addr[FLT_EXACT_COUNT][ETH_ALEN];
139};
140
baf71c5c
PG
141/* MAX_TAP_QUEUES 256 is chosen to allow rx/tx queues to be equal
142 * to max number of VCPUs in guest. */
143#define MAX_TAP_QUEUES 256
b8732fb7 144#define MAX_TAP_FLOWS 4096
c8d68e6b 145
96442e42
JW
146#define TUN_FLOW_EXPIRE (3 * HZ)
147
608b9977
PA
148struct tun_pcpu_stats {
149 u64 rx_packets;
150 u64 rx_bytes;
151 u64 tx_packets;
152 u64 tx_bytes;
153 struct u64_stats_sync syncp;
154 u32 rx_dropped;
155 u32 tx_dropped;
156 u32 rx_frame_errors;
157};
158
54f968d6 159/* A tun_file connects an open character device to a tuntap netdevice. It
92d4ea6e 160 * also contains all socket related structures (except sock_fprog and tap_filter)
54f968d6
JW
161 * to serve as one transmit queue for tuntap device. The sock_fprog and
162 * tap_filter were kept in tun_struct since they were used for filtering for the
36fe8c09 163 * netdevice not for a specific queue (at least I didn't see the requirement for
54f968d6 164 * this).
6e914fc7
JW
165 *
166 * RCU usage:
36fe8c09 167 * The tun_file and tun_struct are loosely coupled, the pointer from one to the
6e914fc7 168 * other can only be read while rcu_read_lock or rtnl_lock is held.
54f968d6 169 */
631ab46b 170struct tun_file {
54f968d6
JW
171 struct sock sk;
172 struct socket socket;
173 struct socket_wq wq;
6e914fc7 174 struct tun_struct __rcu *tun;
54f968d6
JW
175 struct fasync_struct *fasync;
176 /* only used for fasnyc */
177 unsigned int flags;
fb7589a1
PE
178 union {
179 u16 queue_index;
180 unsigned int ifindex;
181 };
94317099 182 struct napi_struct napi;
aec72f33 183 bool napi_enabled;
90e33d45 184 struct mutex napi_mutex; /* Protects access to the above napi */
4008e97f
JW
185 struct list_head next;
186 struct tun_struct *detached;
5990a305 187 struct ptr_ring tx_ring;
8bf5c4ee 188 struct xdp_rxq_info xdp_rxq;
631ab46b
EB
189};
190
96442e42
JW
191struct tun_flow_entry {
192 struct hlist_node hash_link;
193 struct rcu_head rcu;
194 struct tun_struct *tun;
195
196 u32 rxhash;
9bc88939 197 u32 rps_rxhash;
96442e42
JW
198 int queue_index;
199 unsigned long updated;
200};
201
202#define TUN_NUM_FLOW_ENTRIES 1024
203
cd5681d7 204struct tun_prog {
96f84061
JW
205 struct rcu_head rcu;
206 struct bpf_prog *prog;
207};
208
54f968d6 209/* Since the socket were moved to tun_file, to preserve the behavior of persist
36fe8c09 210 * device, socket filter, sndbuf and vnet header size were restore when the
54f968d6
JW
211 * file were attached to a persist device.
212 */
14daa021 213struct tun_struct {
c8d68e6b
JW
214 struct tun_file __rcu *tfiles[MAX_TAP_QUEUES];
215 unsigned int numqueues;
f271b2cc 216 unsigned int flags;
0625c883
EB
217 kuid_t owner;
218 kgid_t group;
14daa021 219
14daa021 220 struct net_device *dev;
c8f44aff 221 netdev_features_t set_features;
88255375 222#define TUN_USER_FEATURES (NETIF_F_HW_CSUM|NETIF_F_TSO_ECN|NETIF_F_TSO| \
d591a1f3 223 NETIF_F_TSO6)
d9d52b51 224
eaea34b2 225 int align;
d9d52b51 226 int vnet_hdr_sz;
54f968d6
JW
227 int sndbuf;
228 struct tap_filter txflt;
229 struct sock_fprog fprog;
230 /* protected by rtnl lock */
231 bool filter_attached;
14daa021
RR
232#ifdef TUN_DEBUG
233 int debug;
1da177e4 234#endif
96442e42 235 spinlock_t lock;
96442e42
JW
236 struct hlist_head flows[TUN_NUM_FLOW_ENTRIES];
237 struct timer_list flow_gc_timer;
238 unsigned long ageing_time;
4008e97f
JW
239 unsigned int numdisabled;
240 struct list_head disabled;
5dbbaf2d 241 void *security;
b8732fb7 242 u32 flow_count;
5503fcec 243 u32 rx_batched;
608b9977 244 struct tun_pcpu_stats __percpu *pcpu_stats;
761876c8 245 struct bpf_prog __rcu *xdp_prog;
cd5681d7 246 struct tun_prog __rcu *steering_prog;
aff3d70a 247 struct tun_prog __rcu *filter_prog;
4e24f2dd 248 struct ethtool_link_ksettings link_ksettings;
14daa021 249};
1da177e4 250
aff3d70a
JW
251struct veth {
252 __be16 h_vlan_proto;
253 __be16 h_vlan_TCI;
14daa021 254};
1da177e4 255
1ffcbc85 256bool tun_is_xdp_frame(void *ptr)
fc72d1d5
JW
257{
258 return (unsigned long)ptr & TUN_XDP_FLAG;
259}
1ffcbc85 260EXPORT_SYMBOL(tun_is_xdp_frame);
fc72d1d5
JW
261
262void *tun_xdp_to_ptr(void *ptr)
263{
264 return (void *)((unsigned long)ptr | TUN_XDP_FLAG);
265}
266EXPORT_SYMBOL(tun_xdp_to_ptr);
267
268void *tun_ptr_to_xdp(void *ptr)
269{
270 return (void *)((unsigned long)ptr & ~TUN_XDP_FLAG);
271}
272EXPORT_SYMBOL(tun_ptr_to_xdp);
273
94317099
PP
274static int tun_napi_receive(struct napi_struct *napi, int budget)
275{
276 struct tun_file *tfile = container_of(napi, struct tun_file, napi);
277 struct sk_buff_head *queue = &tfile->sk.sk_write_queue;
278 struct sk_buff_head process_queue;
279 struct sk_buff *skb;
280 int received = 0;
281
282 __skb_queue_head_init(&process_queue);
283
284 spin_lock(&queue->lock);
285 skb_queue_splice_tail_init(queue, &process_queue);
286 spin_unlock(&queue->lock);
287
288 while (received < budget && (skb = __skb_dequeue(&process_queue))) {
289 napi_gro_receive(napi, skb);
290 ++received;
291 }
292
293 if (!skb_queue_empty(&process_queue)) {
294 spin_lock(&queue->lock);
295 skb_queue_splice(&process_queue, queue);
296 spin_unlock(&queue->lock);
297 }
298
299 return received;
300}
301
302static int tun_napi_poll(struct napi_struct *napi, int budget)
303{
304 unsigned int received;
305
306 received = tun_napi_receive(napi, budget);
307
308 if (received < budget)
309 napi_complete_done(napi, received);
310
311 return received;
312}
313
314static void tun_napi_init(struct tun_struct *tun, struct tun_file *tfile,
315 bool napi_en)
316{
aec72f33 317 tfile->napi_enabled = napi_en;
94317099
PP
318 if (napi_en) {
319 netif_napi_add(tun->dev, &tfile->napi, tun_napi_poll,
320 NAPI_POLL_WEIGHT);
321 napi_enable(&tfile->napi);
90e33d45 322 mutex_init(&tfile->napi_mutex);
94317099
PP
323 }
324}
325
326static void tun_napi_disable(struct tun_struct *tun, struct tun_file *tfile)
327{
aec72f33 328 if (tfile->napi_enabled)
94317099
PP
329 napi_disable(&tfile->napi);
330}
331
332static void tun_napi_del(struct tun_struct *tun, struct tun_file *tfile)
333{
aec72f33 334 if (tfile->napi_enabled)
94317099
PP
335 netif_napi_del(&tfile->napi);
336}
337
90e33d45
PP
338static bool tun_napi_frags_enabled(const struct tun_struct *tun)
339{
340 return READ_ONCE(tun->flags) & IFF_NAPI_FRAGS;
341}
342
8b8e658b
GK
343#ifdef CONFIG_TUN_VNET_CROSS_LE
344static inline bool tun_legacy_is_little_endian(struct tun_struct *tun)
345{
346 return tun->flags & TUN_VNET_BE ? false :
347 virtio_legacy_is_little_endian();
348}
349
350static long tun_get_vnet_be(struct tun_struct *tun, int __user *argp)
351{
352 int be = !!(tun->flags & TUN_VNET_BE);
353
354 if (put_user(be, argp))
355 return -EFAULT;
356
357 return 0;
358}
359
360static long tun_set_vnet_be(struct tun_struct *tun, int __user *argp)
361{
362 int be;
363
364 if (get_user(be, argp))
365 return -EFAULT;
366
367 if (be)
368 tun->flags |= TUN_VNET_BE;
369 else
370 tun->flags &= ~TUN_VNET_BE;
371
372 return 0;
373}
374#else
375static inline bool tun_legacy_is_little_endian(struct tun_struct *tun)
376{
377 return virtio_legacy_is_little_endian();
378}
379
380static long tun_get_vnet_be(struct tun_struct *tun, int __user *argp)
381{
382 return -EINVAL;
383}
384
385static long tun_set_vnet_be(struct tun_struct *tun, int __user *argp)
386{
387 return -EINVAL;
388}
389#endif /* CONFIG_TUN_VNET_CROSS_LE */
390
25bd55bb
GK
391static inline bool tun_is_little_endian(struct tun_struct *tun)
392{
7d824109 393 return tun->flags & TUN_VNET_LE ||
8b8e658b 394 tun_legacy_is_little_endian(tun);
25bd55bb
GK
395}
396
56f0dcc5
MT
397static inline u16 tun16_to_cpu(struct tun_struct *tun, __virtio16 val)
398{
25bd55bb 399 return __virtio16_to_cpu(tun_is_little_endian(tun), val);
56f0dcc5
MT
400}
401
402static inline __virtio16 cpu_to_tun16(struct tun_struct *tun, u16 val)
403{
25bd55bb 404 return __cpu_to_virtio16(tun_is_little_endian(tun), val);
56f0dcc5
MT
405}
406
96442e42
JW
407static inline u32 tun_hashfn(u32 rxhash)
408{
409 return rxhash & 0x3ff;
410}
411
412static struct tun_flow_entry *tun_flow_find(struct hlist_head *head, u32 rxhash)
413{
414 struct tun_flow_entry *e;
96442e42 415
b67bfe0d 416 hlist_for_each_entry_rcu(e, head, hash_link) {
96442e42
JW
417 if (e->rxhash == rxhash)
418 return e;
419 }
420 return NULL;
421}
422
423static struct tun_flow_entry *tun_flow_create(struct tun_struct *tun,
424 struct hlist_head *head,
425 u32 rxhash, u16 queue_index)
426{
9fdc6bef
ED
427 struct tun_flow_entry *e = kmalloc(sizeof(*e), GFP_ATOMIC);
428
96442e42
JW
429 if (e) {
430 tun_debug(KERN_INFO, tun, "create flow: hash %u index %u\n",
431 rxhash, queue_index);
432 e->updated = jiffies;
433 e->rxhash = rxhash;
9bc88939 434 e->rps_rxhash = 0;
96442e42
JW
435 e->queue_index = queue_index;
436 e->tun = tun;
437 hlist_add_head_rcu(&e->hash_link, head);
b8732fb7 438 ++tun->flow_count;
96442e42
JW
439 }
440 return e;
441}
442
96442e42
JW
443static void tun_flow_delete(struct tun_struct *tun, struct tun_flow_entry *e)
444{
445 tun_debug(KERN_INFO, tun, "delete flow: hash %u index %u\n",
446 e->rxhash, e->queue_index);
447 hlist_del_rcu(&e->hash_link);
9fdc6bef 448 kfree_rcu(e, rcu);
b8732fb7 449 --tun->flow_count;
96442e42
JW
450}
451
452static void tun_flow_flush(struct tun_struct *tun)
453{
454 int i;
455
456 spin_lock_bh(&tun->lock);
457 for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) {
458 struct tun_flow_entry *e;
b67bfe0d 459 struct hlist_node *n;
96442e42 460
b67bfe0d 461 hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link)
96442e42
JW
462 tun_flow_delete(tun, e);
463 }
464 spin_unlock_bh(&tun->lock);
465}
466
467static void tun_flow_delete_by_queue(struct tun_struct *tun, u16 queue_index)
468{
469 int i;
470
471 spin_lock_bh(&tun->lock);
472 for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) {
473 struct tun_flow_entry *e;
b67bfe0d 474 struct hlist_node *n;
96442e42 475
b67bfe0d 476 hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link) {
96442e42
JW
477 if (e->queue_index == queue_index)
478 tun_flow_delete(tun, e);
479 }
480 }
481 spin_unlock_bh(&tun->lock);
482}
483
e99e88a9 484static void tun_flow_cleanup(struct timer_list *t)
96442e42 485{
e99e88a9 486 struct tun_struct *tun = from_timer(tun, t, flow_gc_timer);
96442e42
JW
487 unsigned long delay = tun->ageing_time;
488 unsigned long next_timer = jiffies + delay;
489 unsigned long count = 0;
490 int i;
491
492 tun_debug(KERN_INFO, tun, "tun_flow_cleanup\n");
493
7dbfb4ef 494 spin_lock(&tun->lock);
96442e42
JW
495 for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) {
496 struct tun_flow_entry *e;
b67bfe0d 497 struct hlist_node *n;
96442e42 498
b67bfe0d 499 hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link) {
96442e42 500 unsigned long this_timer;
81d98fa4 501
96442e42 502 this_timer = e->updated + delay;
81d98fa4 503 if (time_before_eq(this_timer, jiffies)) {
96442e42 504 tun_flow_delete(tun, e);
81d98fa4
ED
505 continue;
506 }
507 count++;
508 if (time_before(this_timer, next_timer))
96442e42
JW
509 next_timer = this_timer;
510 }
511 }
512
513 if (count)
514 mod_timer(&tun->flow_gc_timer, round_jiffies_up(next_timer));
7dbfb4ef 515 spin_unlock(&tun->lock);
96442e42
JW
516}
517
49974420 518static void tun_flow_update(struct tun_struct *tun, u32 rxhash,
9e85722d 519 struct tun_file *tfile)
96442e42
JW
520{
521 struct hlist_head *head;
522 struct tun_flow_entry *e;
523 unsigned long delay = tun->ageing_time;
9e85722d 524 u16 queue_index = tfile->queue_index;
96442e42
JW
525
526 if (!rxhash)
527 return;
528 else
529 head = &tun->flows[tun_hashfn(rxhash)];
530
531 rcu_read_lock();
532
96442e42
JW
533 e = tun_flow_find(head, rxhash);
534 if (likely(e)) {
535 /* TODO: keep queueing to old queue until it's empty? */
536 e->queue_index = queue_index;
537 e->updated = jiffies;
9bc88939 538 sock_rps_record_flow_hash(e->rps_rxhash);
96442e42
JW
539 } else {
540 spin_lock_bh(&tun->lock);
b8732fb7
JW
541 if (!tun_flow_find(head, rxhash) &&
542 tun->flow_count < MAX_TAP_FLOWS)
96442e42
JW
543 tun_flow_create(tun, head, rxhash, queue_index);
544
545 if (!timer_pending(&tun->flow_gc_timer))
546 mod_timer(&tun->flow_gc_timer,
547 round_jiffies_up(jiffies + delay));
548 spin_unlock_bh(&tun->lock);
549 }
550
96442e42
JW
551 rcu_read_unlock();
552}
553
9bc88939
TH
554/**
555 * Save the hash received in the stack receive path and update the
556 * flow_hash table accordingly.
557 */
558static inline void tun_flow_save_rps_rxhash(struct tun_flow_entry *e, u32 hash)
559{
567e4b79 560 if (unlikely(e->rps_rxhash != hash))
9bc88939 561 e->rps_rxhash = hash;
9bc88939
TH
562}
563
c8d68e6b 564/* We try to identify a flow through its rxhash first. The reason that
92d4ea6e 565 * we do not check rxq no. is because some cards(e.g 82599), chooses
c8d68e6b
JW
566 * the rxq based on the txq where the last packet of the flow comes. As
567 * the userspace application move between processors, we may get a
568 * different rxq no. here. If we could not get rxhash, then we would
569 * hope the rxq no. may help here.
570 */
96f84061 571static u16 tun_automq_select_queue(struct tun_struct *tun, struct sk_buff *skb)
c8d68e6b 572{
96442e42 573 struct tun_flow_entry *e;
c8d68e6b
JW
574 u32 txq = 0;
575 u32 numqueues = 0;
576
6aa7de05 577 numqueues = READ_ONCE(tun->numqueues);
c8d68e6b 578
feec084a 579 txq = __skb_get_hash_symmetric(skb);
c8d68e6b 580 if (txq) {
96442e42 581 e = tun_flow_find(&tun->flows[tun_hashfn(txq)], txq);
9bc88939 582 if (e) {
9bc88939 583 tun_flow_save_rps_rxhash(e, txq);
fbe4d456 584 txq = e->queue_index;
9bc88939 585 } else
96442e42
JW
586 /* use multiply and shift instead of expensive divide */
587 txq = ((u64)txq * numqueues) >> 32;
c8d68e6b
JW
588 } else if (likely(skb_rx_queue_recorded(skb))) {
589 txq = skb_get_rx_queue(skb);
590 while (unlikely(txq >= numqueues))
591 txq -= numqueues;
592 }
593
c8d68e6b
JW
594 return txq;
595}
596
96f84061
JW
597static u16 tun_ebpf_select_queue(struct tun_struct *tun, struct sk_buff *skb)
598{
cd5681d7 599 struct tun_prog *prog;
96f84061
JW
600 u16 ret = 0;
601
602 prog = rcu_dereference(tun->steering_prog);
603 if (prog)
604 ret = bpf_prog_run_clear_cb(prog->prog, skb);
605
606 return ret % tun->numqueues;
607}
608
609static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb,
610 void *accel_priv, select_queue_fallback_t fallback)
611{
612 struct tun_struct *tun = netdev_priv(dev);
613 u16 ret;
614
615 rcu_read_lock();
616 if (rcu_dereference(tun->steering_prog))
617 ret = tun_ebpf_select_queue(tun, skb);
618 else
619 ret = tun_automq_select_queue(tun, skb);
620 rcu_read_unlock();
621
622 return ret;
623}
624
cde8b15f
JW
625static inline bool tun_not_capable(struct tun_struct *tun)
626{
627 const struct cred *cred = current_cred();
c260b772 628 struct net *net = dev_net(tun->dev);
cde8b15f
JW
629
630 return ((uid_valid(tun->owner) && !uid_eq(cred->euid, tun->owner)) ||
631 (gid_valid(tun->group) && !in_egroup_p(tun->group))) &&
c260b772 632 !ns_capable(net->user_ns, CAP_NET_ADMIN);
cde8b15f
JW
633}
634
c8d68e6b
JW
635static void tun_set_real_num_queues(struct tun_struct *tun)
636{
637 netif_set_real_num_tx_queues(tun->dev, tun->numqueues);
638 netif_set_real_num_rx_queues(tun->dev, tun->numqueues);
639}
640
4008e97f
JW
641static void tun_disable_queue(struct tun_struct *tun, struct tun_file *tfile)
642{
643 tfile->detached = tun;
644 list_add_tail(&tfile->next, &tun->disabled);
645 ++tun->numdisabled;
646}
647
d32649d1 648static struct tun_struct *tun_enable_queue(struct tun_file *tfile)
4008e97f
JW
649{
650 struct tun_struct *tun = tfile->detached;
651
652 tfile->detached = NULL;
653 list_del_init(&tfile->next);
654 --tun->numdisabled;
655 return tun;
656}
657
3a403076 658void tun_ptr_free(void *ptr)
fc72d1d5
JW
659{
660 if (!ptr)
661 return;
1ffcbc85
JDB
662 if (tun_is_xdp_frame(ptr)) {
663 struct xdp_frame *xdpf = tun_ptr_to_xdp(ptr);
fc72d1d5 664
03993094 665 xdp_return_frame(xdpf);
fc72d1d5
JW
666 } else {
667 __skb_array_destroy_skb(ptr);
668 }
669}
3a403076 670EXPORT_SYMBOL_GPL(tun_ptr_free);
fc72d1d5 671
4bfb0513
JW
672static void tun_queue_purge(struct tun_file *tfile)
673{
fc72d1d5 674 void *ptr;
1576d986 675
fc72d1d5
JW
676 while ((ptr = ptr_ring_consume(&tfile->tx_ring)) != NULL)
677 tun_ptr_free(ptr);
1576d986 678
5503fcec 679 skb_queue_purge(&tfile->sk.sk_write_queue);
4bfb0513
JW
680 skb_queue_purge(&tfile->sk.sk_error_queue);
681}
682
c8d68e6b
JW
683static void __tun_detach(struct tun_file *tfile, bool clean)
684{
685 struct tun_file *ntfile;
686 struct tun_struct *tun;
c8d68e6b 687
b8deabd3
JW
688 tun = rtnl_dereference(tfile->tun);
689
94317099
PP
690 if (tun && clean) {
691 tun_napi_disable(tun, tfile);
692 tun_napi_del(tun, tfile);
693 }
694
9e85722d 695 if (tun && !tfile->detached) {
c8d68e6b
JW
696 u16 index = tfile->queue_index;
697 BUG_ON(index >= tun->numqueues);
c8d68e6b
JW
698
699 rcu_assign_pointer(tun->tfiles[index],
700 tun->tfiles[tun->numqueues - 1]);
b8deabd3 701 ntfile = rtnl_dereference(tun->tfiles[index]);
c8d68e6b
JW
702 ntfile->queue_index = index;
703
704 --tun->numqueues;
9e85722d 705 if (clean) {
c956674b 706 RCU_INIT_POINTER(tfile->tun, NULL);
4008e97f 707 sock_put(&tfile->sk);
9e85722d 708 } else
4008e97f 709 tun_disable_queue(tun, tfile);
c8d68e6b
JW
710
711 synchronize_net();
96442e42 712 tun_flow_delete_by_queue(tun, tun->numqueues + 1);
c8d68e6b 713 /* Drop read queue */
4bfb0513 714 tun_queue_purge(tfile);
c8d68e6b 715 tun_set_real_num_queues(tun);
dd38bd85 716 } else if (tfile->detached && clean) {
4008e97f 717 tun = tun_enable_queue(tfile);
dd38bd85
JW
718 sock_put(&tfile->sk);
719 }
c8d68e6b
JW
720
721 if (clean) {
af668b3c
MT
722 if (tun && tun->numqueues == 0 && tun->numdisabled == 0) {
723 netif_carrier_off(tun->dev);
724
40630b82 725 if (!(tun->flags & IFF_PERSIST) &&
af668b3c 726 tun->dev->reg_state == NETREG_REGISTERED)
4008e97f 727 unregister_netdevice(tun->dev);
af668b3c 728 }
b196d88a
JW
729 if (tun)
730 xdp_rxq_info_unreg(&tfile->xdp_rxq);
7063efd3 731 ptr_ring_cleanup(&tfile->tx_ring, tun_ptr_free);
140e807d 732 sock_put(&tfile->sk);
c8d68e6b
JW
733 }
734}
735
736static void tun_detach(struct tun_file *tfile, bool clean)
737{
83c1f36f
SD
738 struct tun_struct *tun;
739 struct net_device *dev;
740
c8d68e6b 741 rtnl_lock();
83c1f36f
SD
742 tun = rtnl_dereference(tfile->tun);
743 dev = tun ? tun->dev : NULL;
c8d68e6b 744 __tun_detach(tfile, clean);
83c1f36f
SD
745 if (dev)
746 netdev_state_change(dev);
c8d68e6b
JW
747 rtnl_unlock();
748}
749
750static void tun_detach_all(struct net_device *dev)
751{
752 struct tun_struct *tun = netdev_priv(dev);
4008e97f 753 struct tun_file *tfile, *tmp;
c8d68e6b
JW
754 int i, n = tun->numqueues;
755
756 for (i = 0; i < n; i++) {
b8deabd3 757 tfile = rtnl_dereference(tun->tfiles[i]);
c8d68e6b 758 BUG_ON(!tfile);
94317099 759 tun_napi_disable(tun, tfile);
addf8fc4 760 tfile->socket.sk->sk_shutdown = RCV_SHUTDOWN;
9e641bdc 761 tfile->socket.sk->sk_data_ready(tfile->socket.sk);
c956674b 762 RCU_INIT_POINTER(tfile->tun, NULL);
c8d68e6b
JW
763 --tun->numqueues;
764 }
9e85722d 765 list_for_each_entry(tfile, &tun->disabled, next) {
addf8fc4 766 tfile->socket.sk->sk_shutdown = RCV_SHUTDOWN;
9e641bdc 767 tfile->socket.sk->sk_data_ready(tfile->socket.sk);
c956674b 768 RCU_INIT_POINTER(tfile->tun, NULL);
9e85722d 769 }
c8d68e6b
JW
770 BUG_ON(tun->numqueues != 0);
771
772 synchronize_net();
773 for (i = 0; i < n; i++) {
b8deabd3 774 tfile = rtnl_dereference(tun->tfiles[i]);
94317099 775 tun_napi_del(tun, tfile);
c8d68e6b 776 /* Drop read queue */
4bfb0513 777 tun_queue_purge(tfile);
b196d88a 778 xdp_rxq_info_unreg(&tfile->xdp_rxq);
c8d68e6b
JW
779 sock_put(&tfile->sk);
780 }
4008e97f
JW
781 list_for_each_entry_safe(tfile, tmp, &tun->disabled, next) {
782 tun_enable_queue(tfile);
4bfb0513 783 tun_queue_purge(tfile);
b196d88a 784 xdp_rxq_info_unreg(&tfile->xdp_rxq);
4008e97f
JW
785 sock_put(&tfile->sk);
786 }
787 BUG_ON(tun->numdisabled != 0);
dd38bd85 788
40630b82 789 if (tun->flags & IFF_PERSIST)
dd38bd85 790 module_put(THIS_MODULE);
c8d68e6b
JW
791}
792
94317099
PP
793static int tun_attach(struct tun_struct *tun, struct file *file,
794 bool skip_filter, bool napi)
a7385ba2 795{
631ab46b 796 struct tun_file *tfile = file->private_data;
1576d986 797 struct net_device *dev = tun->dev;
38231b7a 798 int err;
a7385ba2 799
5dbbaf2d
PM
800 err = security_tun_dev_attach(tfile->socket.sk, tun->security);
801 if (err < 0)
802 goto out;
803
38231b7a 804 err = -EINVAL;
9e85722d 805 if (rtnl_dereference(tfile->tun) && !tfile->detached)
38231b7a
EB
806 goto out;
807
808 err = -EBUSY;
40630b82 809 if (!(tun->flags & IFF_MULTI_QUEUE) && tun->numqueues == 1)
c8d68e6b
JW
810 goto out;
811
812 err = -E2BIG;
4008e97f
JW
813 if (!tfile->detached &&
814 tun->numqueues + tun->numdisabled == MAX_TAP_QUEUES)
38231b7a
EB
815 goto out;
816
817 err = 0;
54f968d6 818
92d4ea6e 819 /* Re-attach the filter to persist device */
849c9b6f 820 if (!skip_filter && (tun->filter_attached == true)) {
8ced425e
HFS
821 lock_sock(tfile->socket.sk);
822 err = sk_attach_filter(&tun->fprog, tfile->socket.sk);
823 release_sock(tfile->socket.sk);
54f968d6
JW
824 if (!err)
825 goto out;
826 }
1576d986
JW
827
828 if (!tfile->detached &&
b196d88a
JW
829 ptr_ring_resize(&tfile->tx_ring, dev->tx_queue_len,
830 GFP_KERNEL, tun_ptr_free)) {
1576d986
JW
831 err = -ENOMEM;
832 goto out;
833 }
834
c8d68e6b 835 tfile->queue_index = tun->numqueues;
addf8fc4 836 tfile->socket.sk->sk_shutdown &= ~RCV_SHUTDOWN;
8bf5c4ee
JDB
837
838 if (tfile->detached) {
839 /* Re-attach detached tfile, updating XDP queue_index */
840 WARN_ON(!xdp_rxq_info_is_reg(&tfile->xdp_rxq));
841
842 if (tfile->xdp_rxq.queue_index != tfile->queue_index)
843 tfile->xdp_rxq.queue_index = tfile->queue_index;
844 } else {
845 /* Setup XDP RX-queue info, for new tfile getting attached */
846 err = xdp_rxq_info_reg(&tfile->xdp_rxq,
847 tun->dev, tfile->queue_index);
848 if (err < 0)
849 goto out;
8d5d8852
JDB
850 err = xdp_rxq_info_reg_mem_model(&tfile->xdp_rxq,
851 MEM_TYPE_PAGE_SHARED, NULL);
852 if (err < 0) {
853 xdp_rxq_info_unreg(&tfile->xdp_rxq);
854 goto out;
855 }
8bf5c4ee
JDB
856 err = 0;
857 }
858
6e914fc7 859 rcu_assign_pointer(tfile->tun, tun);
c8d68e6b 860 rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile);
c8d68e6b 861 tun->numqueues++;
a7385ba2 862
94317099 863 if (tfile->detached) {
4008e97f 864 tun_enable_queue(tfile);
94317099 865 } else {
4008e97f 866 sock_hold(&tfile->sk);
94317099
PP
867 tun_napi_init(tun, tfile, napi);
868 }
4008e97f 869
c8d68e6b 870 tun_set_real_num_queues(tun);
a7385ba2 871
c8d68e6b
JW
872 /* device is allowed to go away first, so no need to hold extra
873 * refcnt.
874 */
875
876out:
877 return err;
631ab46b
EB
878}
879
9484dc74 880static struct tun_struct *tun_get(struct tun_file *tfile)
631ab46b 881{
6e914fc7 882 struct tun_struct *tun;
c70f1829 883
6e914fc7
JW
884 rcu_read_lock();
885 tun = rcu_dereference(tfile->tun);
886 if (tun)
887 dev_hold(tun->dev);
888 rcu_read_unlock();
c70f1829
EB
889
890 return tun;
631ab46b
EB
891}
892
631ab46b
EB
893static void tun_put(struct tun_struct *tun)
894{
6e914fc7 895 dev_put(tun->dev);
631ab46b
EB
896}
897
6b8a66ee 898/* TAP filtering */
f271b2cc
MK
899static void addr_hash_set(u32 *mask, const u8 *addr)
900{
901 int n = ether_crc(ETH_ALEN, addr) >> 26;
902 mask[n >> 5] |= (1 << (n & 31));
903}
904
905static unsigned int addr_hash_test(const u32 *mask, const u8 *addr)
906{
907 int n = ether_crc(ETH_ALEN, addr) >> 26;
908 return mask[n >> 5] & (1 << (n & 31));
909}
910
911static int update_filter(struct tap_filter *filter, void __user *arg)
912{
913 struct { u8 u[ETH_ALEN]; } *addr;
914 struct tun_filter uf;
915 int err, alen, n, nexact;
916
917 if (copy_from_user(&uf, arg, sizeof(uf)))
918 return -EFAULT;
919
920 if (!uf.count) {
921 /* Disabled */
922 filter->count = 0;
923 return 0;
924 }
925
926 alen = ETH_ALEN * uf.count;
28e8190d
ME
927 addr = memdup_user(arg + sizeof(uf), alen);
928 if (IS_ERR(addr))
929 return PTR_ERR(addr);
f271b2cc
MK
930
931 /* The filter is updated without holding any locks. Which is
932 * perfectly safe. We disable it first and in the worst
933 * case we'll accept a few undesired packets. */
934 filter->count = 0;
935 wmb();
936
937 /* Use first set of addresses as an exact filter */
938 for (n = 0; n < uf.count && n < FLT_EXACT_COUNT; n++)
939 memcpy(filter->addr[n], addr[n].u, ETH_ALEN);
940
941 nexact = n;
942
cfbf84fc
AW
943 /* Remaining multicast addresses are hashed,
944 * unicast will leave the filter disabled. */
f271b2cc 945 memset(filter->mask, 0, sizeof(filter->mask));
cfbf84fc
AW
946 for (; n < uf.count; n++) {
947 if (!is_multicast_ether_addr(addr[n].u)) {
948 err = 0; /* no filter */
3b8d2a69 949 goto free_addr;
cfbf84fc 950 }
f271b2cc 951 addr_hash_set(filter->mask, addr[n].u);
cfbf84fc 952 }
f271b2cc
MK
953
954 /* For ALLMULTI just set the mask to all ones.
955 * This overrides the mask populated above. */
956 if ((uf.flags & TUN_FLT_ALLMULTI))
957 memset(filter->mask, ~0, sizeof(filter->mask));
958
959 /* Now enable the filter */
960 wmb();
961 filter->count = nexact;
962
963 /* Return the number of exact filters */
964 err = nexact;
3b8d2a69 965free_addr:
f271b2cc
MK
966 kfree(addr);
967 return err;
968}
969
970/* Returns: 0 - drop, !=0 - accept */
971static int run_filter(struct tap_filter *filter, const struct sk_buff *skb)
972{
973 /* Cannot use eth_hdr(skb) here because skb_mac_hdr() is incorrect
974 * at this point. */
975 struct ethhdr *eh = (struct ethhdr *) skb->data;
976 int i;
977
978 /* Exact match */
979 for (i = 0; i < filter->count; i++)
2e42e474 980 if (ether_addr_equal(eh->h_dest, filter->addr[i]))
f271b2cc
MK
981 return 1;
982
983 /* Inexact match (multicast only) */
984 if (is_multicast_ether_addr(eh->h_dest))
985 return addr_hash_test(filter->mask, eh->h_dest);
986
987 return 0;
988}
989
990/*
991 * Checks whether the packet is accepted or not.
992 * Returns: 0 - drop, !=0 - accept
993 */
994static int check_filter(struct tap_filter *filter, const struct sk_buff *skb)
995{
996 if (!filter->count)
997 return 1;
998
999 return run_filter(filter, skb);
1000}
1001
1da177e4
LT
1002/* Network device part of the driver */
1003
7282d491 1004static const struct ethtool_ops tun_ethtool_ops;
1da177e4 1005
c70f1829
EB
1006/* Net device detach from fd. */
1007static void tun_net_uninit(struct net_device *dev)
1008{
c8d68e6b 1009 tun_detach_all(dev);
c70f1829
EB
1010}
1011
1da177e4
LT
1012/* Net device open. */
1013static int tun_net_open(struct net_device *dev)
1014{
b20e2d54
HFS
1015 struct tun_struct *tun = netdev_priv(dev);
1016 int i;
1017
c8d68e6b 1018 netif_tx_start_all_queues(dev);
b20e2d54
HFS
1019
1020 for (i = 0; i < tun->numqueues; i++) {
1021 struct tun_file *tfile;
1022
1023 tfile = rtnl_dereference(tun->tfiles[i]);
1024 tfile->socket.sk->sk_write_space(tfile->socket.sk);
1025 }
1026
1da177e4
LT
1027 return 0;
1028}
1029
1030/* Net device close. */
1031static int tun_net_close(struct net_device *dev)
1032{
c8d68e6b 1033 netif_tx_stop_all_queues(dev);
1da177e4
LT
1034 return 0;
1035}
1036
1037/* Net device start xmit */
96f84061 1038static void tun_automq_xmit(struct tun_struct *tun, struct sk_buff *skb)
1da177e4 1039{
3df97ba8 1040#ifdef CONFIG_RPS
96f84061 1041 if (tun->numqueues == 1 && static_key_false(&rps_needed)) {
9bc88939
TH
1042 /* Select queue was not called for the skbuff, so we extract the
1043 * RPS hash and save it into the flow_table here.
1044 */
1045 __u32 rxhash;
1046
feec084a 1047 rxhash = __skb_get_hash_symmetric(skb);
9bc88939
TH
1048 if (rxhash) {
1049 struct tun_flow_entry *e;
1050 e = tun_flow_find(&tun->flows[tun_hashfn(rxhash)],
1051 rxhash);
1052 if (e)
1053 tun_flow_save_rps_rxhash(e, rxhash);
1054 }
1055 }
3df97ba8 1056#endif
96f84061
JW
1057}
1058
aff3d70a
JW
1059static unsigned int run_ebpf_filter(struct tun_struct *tun,
1060 struct sk_buff *skb,
1061 int len)
1062{
1063 struct tun_prog *prog = rcu_dereference(tun->filter_prog);
1064
1065 if (prog)
1066 len = bpf_prog_run_clear_cb(prog->prog, skb);
1067
1068 return len;
1069}
1070
96f84061
JW
1071/* Net device start xmit */
1072static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
1073{
1074 struct tun_struct *tun = netdev_priv(dev);
1075 int txq = skb->queue_mapping;
1076 struct tun_file *tfile;
aff3d70a 1077 int len = skb->len;
96f84061
JW
1078
1079 rcu_read_lock();
1080 tfile = rcu_dereference(tun->tfiles[txq]);
96f84061
JW
1081
1082 /* Drop packet if interface is not attached */
cc166427 1083 if (txq >= tun->numqueues)
96f84061
JW
1084 goto drop;
1085
1086 if (!rcu_dereference(tun->steering_prog))
1087 tun_automq_xmit(tun, skb);
9bc88939 1088
6e914fc7
JW
1089 tun_debug(KERN_INFO, tun, "tun_net_xmit %d\n", skb->len);
1090
c8d68e6b
JW
1091 BUG_ON(!tfile);
1092
f271b2cc
MK
1093 /* Drop if the filter does not like it.
1094 * This is a noop if the filter is disabled.
1095 * Filter can be enabled only for the TAP devices. */
1096 if (!check_filter(&tun->txflt, skb))
1097 goto drop;
1098
54f968d6
JW
1099 if (tfile->socket.sk->sk_filter &&
1100 sk_filter(tfile->socket.sk, skb))
99405162
MT
1101 goto drop;
1102
aff3d70a 1103 len = run_ebpf_filter(tun, skb, len);
81c89507 1104 if (len == 0 || pskb_trim(skb, len))
aff3d70a
JW
1105 goto drop;
1106
1f8b977a 1107 if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC)))
7bf66305
JW
1108 goto drop;
1109
7b996243 1110 skb_tx_timestamp(skb);
eda29772 1111
0110d6f2 1112 /* Orphan the skb - required as we might hang on to it
7bf66305
JW
1113 * for indefinite time.
1114 */
0110d6f2
MT
1115 skb_orphan(skb);
1116
f8af75f3
ED
1117 nf_reset(skb);
1118
5990a305 1119 if (ptr_ring_produce(&tfile->tx_ring, skb))
1576d986 1120 goto drop;
1da177e4
LT
1121
1122 /* Notify and wake up reader process */
54f968d6
JW
1123 if (tfile->flags & TUN_FASYNC)
1124 kill_fasync(&tfile->fasync, SIGIO, POLL_IN);
9e641bdc 1125 tfile->socket.sk->sk_data_ready(tfile->socket.sk);
6e914fc7
JW
1126
1127 rcu_read_unlock();
6ed10654 1128 return NETDEV_TX_OK;
1da177e4
LT
1129
1130drop:
608b9977 1131 this_cpu_inc(tun->pcpu_stats->tx_dropped);
149d36f7 1132 skb_tx_error(skb);
1da177e4 1133 kfree_skb(skb);
6e914fc7 1134 rcu_read_unlock();
baeababb 1135 return NET_XMIT_DROP;
1da177e4
LT
1136}
1137
f271b2cc 1138static void tun_net_mclist(struct net_device *dev)
1da177e4 1139{
f271b2cc
MK
1140 /*
1141 * This callback is supposed to deal with mc filter in
1142 * _rx_ path and has nothing to do with the _tx_ path.
1143 * In rx path we always accept everything userspace gives us.
1144 */
1da177e4
LT
1145}
1146
c8f44aff
MM
1147static netdev_features_t tun_net_fix_features(struct net_device *dev,
1148 netdev_features_t features)
88255375
MM
1149{
1150 struct tun_struct *tun = netdev_priv(dev);
1151
1152 return (features & tun->set_features) | (features & ~TUN_USER_FEATURES);
1153}
bebd097a
NH
1154#ifdef CONFIG_NET_POLL_CONTROLLER
1155static void tun_poll_controller(struct net_device *dev)
1156{
1157 /*
1158 * Tun only receives frames when:
1159 * 1) the char device endpoint gets data from user space
1160 * 2) the tun socket gets a sendmsg call from user space
94317099
PP
1161 * If NAPI is not enabled, since both of those are synchronous
1162 * operations, we are guaranteed never to have pending data when we poll
1163 * for it so there is nothing to do here but return.
bebd097a
NH
1164 * We need this though so netpoll recognizes us as an interface that
1165 * supports polling, which enables bridge devices in virt setups to
1166 * still use netconsole
94317099 1167 * If NAPI is enabled, however, we need to schedule polling for all
90e33d45
PP
1168 * queues unless we are using napi_gro_frags(), which we call in
1169 * process context and not in NAPI context.
bebd097a 1170 */
94317099
PP
1171 struct tun_struct *tun = netdev_priv(dev);
1172
1173 if (tun->flags & IFF_NAPI) {
1174 struct tun_file *tfile;
1175 int i;
1176
90e33d45
PP
1177 if (tun_napi_frags_enabled(tun))
1178 return;
1179
94317099
PP
1180 rcu_read_lock();
1181 for (i = 0; i < tun->numqueues; i++) {
1182 tfile = rcu_dereference(tun->tfiles[i]);
aec72f33
ED
1183 if (tfile->napi_enabled)
1184 napi_schedule(&tfile->napi);
94317099
PP
1185 }
1186 rcu_read_unlock();
1187 }
bebd097a
NH
1188 return;
1189}
1190#endif
eaea34b2
PA
1191
1192static void tun_set_headroom(struct net_device *dev, int new_hr)
1193{
1194 struct tun_struct *tun = netdev_priv(dev);
1195
1196 if (new_hr < NET_SKB_PAD)
1197 new_hr = NET_SKB_PAD;
1198
1199 tun->align = new_hr;
1200}
1201
bc1f4470 1202static void
608b9977
PA
1203tun_net_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
1204{
1205 u32 rx_dropped = 0, tx_dropped = 0, rx_frame_errors = 0;
1206 struct tun_struct *tun = netdev_priv(dev);
1207 struct tun_pcpu_stats *p;
1208 int i;
1209
1210 for_each_possible_cpu(i) {
1211 u64 rxpackets, rxbytes, txpackets, txbytes;
1212 unsigned int start;
1213
1214 p = per_cpu_ptr(tun->pcpu_stats, i);
1215 do {
1216 start = u64_stats_fetch_begin(&p->syncp);
1217 rxpackets = p->rx_packets;
1218 rxbytes = p->rx_bytes;
1219 txpackets = p->tx_packets;
1220 txbytes = p->tx_bytes;
1221 } while (u64_stats_fetch_retry(&p->syncp, start));
1222
1223 stats->rx_packets += rxpackets;
1224 stats->rx_bytes += rxbytes;
1225 stats->tx_packets += txpackets;
1226 stats->tx_bytes += txbytes;
1227
1228 /* u32 counters */
1229 rx_dropped += p->rx_dropped;
1230 rx_frame_errors += p->rx_frame_errors;
1231 tx_dropped += p->tx_dropped;
1232 }
1233 stats->rx_dropped = rx_dropped;
1234 stats->rx_frame_errors = rx_frame_errors;
1235 stats->tx_dropped = tx_dropped;
608b9977
PA
1236}
1237
761876c8
JW
1238static int tun_xdp_set(struct net_device *dev, struct bpf_prog *prog,
1239 struct netlink_ext_ack *extack)
1240{
1241 struct tun_struct *tun = netdev_priv(dev);
1242 struct bpf_prog *old_prog;
1243
1244 old_prog = rtnl_dereference(tun->xdp_prog);
1245 rcu_assign_pointer(tun->xdp_prog, prog);
1246 if (old_prog)
1247 bpf_prog_put(old_prog);
1248
1249 return 0;
1250}
1251
1252static u32 tun_xdp_query(struct net_device *dev)
1253{
1254 struct tun_struct *tun = netdev_priv(dev);
1255 const struct bpf_prog *xdp_prog;
1256
1257 xdp_prog = rtnl_dereference(tun->xdp_prog);
1258 if (xdp_prog)
1259 return xdp_prog->aux->id;
1260
1261 return 0;
1262}
1263
f4e63525 1264static int tun_xdp(struct net_device *dev, struct netdev_bpf *xdp)
761876c8
JW
1265{
1266 switch (xdp->command) {
1267 case XDP_SETUP_PROG:
1268 return tun_xdp_set(dev, xdp->prog, xdp->extack);
1269 case XDP_QUERY_PROG:
1270 xdp->prog_id = tun_xdp_query(dev);
1271 xdp->prog_attached = !!xdp->prog_id;
1272 return 0;
1273 default:
1274 return -EINVAL;
1275 }
1276}
1277
758e43b7 1278static const struct net_device_ops tun_netdev_ops = {
c70f1829 1279 .ndo_uninit = tun_net_uninit,
758e43b7
SH
1280 .ndo_open = tun_net_open,
1281 .ndo_stop = tun_net_close,
00829823 1282 .ndo_start_xmit = tun_net_xmit,
88255375 1283 .ndo_fix_features = tun_net_fix_features,
c8d68e6b 1284 .ndo_select_queue = tun_select_queue,
bebd097a
NH
1285#ifdef CONFIG_NET_POLL_CONTROLLER
1286 .ndo_poll_controller = tun_poll_controller,
1287#endif
eaea34b2 1288 .ndo_set_rx_headroom = tun_set_headroom,
608b9977 1289 .ndo_get_stats64 = tun_net_get_stats64,
758e43b7
SH
1290};
1291
0c9d917b
JDB
1292static void __tun_xdp_flush_tfile(struct tun_file *tfile)
1293{
1294 /* Notify and wake up reader process */
1295 if (tfile->flags & TUN_FASYNC)
1296 kill_fasync(&tfile->fasync, SIGIO, POLL_IN);
1297 tfile->socket.sk->sk_data_ready(tfile->socket.sk);
1298}
1299
42b33468
JDB
1300static int tun_xdp_xmit(struct net_device *dev, int n,
1301 struct xdp_frame **frames, u32 flags)
fc72d1d5
JW
1302{
1303 struct tun_struct *tun = netdev_priv(dev);
fc72d1d5
JW
1304 struct tun_file *tfile;
1305 u32 numqueues;
735fc405
JDB
1306 int drops = 0;
1307 int cnt = n;
1308 int i;
fc72d1d5 1309
0c9d917b 1310 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
42b33468
JDB
1311 return -EINVAL;
1312
fc72d1d5
JW
1313 rcu_read_lock();
1314
1315 numqueues = READ_ONCE(tun->numqueues);
1316 if (!numqueues) {
735fc405
JDB
1317 rcu_read_unlock();
1318 return -ENXIO; /* Caller will free/return all frames */
fc72d1d5
JW
1319 }
1320
1321 tfile = rcu_dereference(tun->tfiles[smp_processor_id() %
1322 numqueues]);
735fc405
JDB
1323
1324 spin_lock(&tfile->tx_ring.producer_lock);
1325 for (i = 0; i < n; i++) {
1326 struct xdp_frame *xdp = frames[i];
1327 /* Encode the XDP flag into lowest bit for consumer to differ
1328 * XDP buffer from sk_buff.
1329 */
1330 void *frame = tun_xdp_to_ptr(xdp);
1331
1332 if (__ptr_ring_produce(&tfile->tx_ring, frame)) {
1333 this_cpu_inc(tun->pcpu_stats->tx_dropped);
1334 xdp_return_frame_rx_napi(xdp);
1335 drops++;
1336 }
fc72d1d5 1337 }
735fc405 1338 spin_unlock(&tfile->tx_ring.producer_lock);
fc72d1d5 1339
0c9d917b
JDB
1340 if (flags & XDP_XMIT_FLUSH)
1341 __tun_xdp_flush_tfile(tfile);
1342
fc72d1d5 1343 rcu_read_unlock();
735fc405 1344 return cnt - drops;
fc72d1d5
JW
1345}
1346
44fa2dbd
JDB
1347static int tun_xdp_tx(struct net_device *dev, struct xdp_buff *xdp)
1348{
1349 struct xdp_frame *frame = convert_to_xdp_frame(xdp);
1350
1351 if (unlikely(!frame))
1352 return -EOVERFLOW;
1353
42421a56 1354 return tun_xdp_xmit(dev, 1, &frame, XDP_XMIT_FLUSH);
fc72d1d5
JW
1355}
1356
758e43b7 1357static const struct net_device_ops tap_netdev_ops = {
c70f1829 1358 .ndo_uninit = tun_net_uninit,
758e43b7
SH
1359 .ndo_open = tun_net_open,
1360 .ndo_stop = tun_net_close,
00829823 1361 .ndo_start_xmit = tun_net_xmit,
88255375 1362 .ndo_fix_features = tun_net_fix_features,
afc4b13d 1363 .ndo_set_rx_mode = tun_net_mclist,
758e43b7
SH
1364 .ndo_set_mac_address = eth_mac_addr,
1365 .ndo_validate_addr = eth_validate_addr,
c8d68e6b 1366 .ndo_select_queue = tun_select_queue,
bebd097a
NH
1367#ifdef CONFIG_NET_POLL_CONTROLLER
1368 .ndo_poll_controller = tun_poll_controller,
1369#endif
5e52796a 1370 .ndo_features_check = passthru_features_check,
eaea34b2 1371 .ndo_set_rx_headroom = tun_set_headroom,
608b9977 1372 .ndo_get_stats64 = tun_net_get_stats64,
f4e63525 1373 .ndo_bpf = tun_xdp,
fc72d1d5 1374 .ndo_xdp_xmit = tun_xdp_xmit,
758e43b7
SH
1375};
1376
944a1376 1377static void tun_flow_init(struct tun_struct *tun)
96442e42
JW
1378{
1379 int i;
1380
96442e42
JW
1381 for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++)
1382 INIT_HLIST_HEAD(&tun->flows[i]);
1383
1384 tun->ageing_time = TUN_FLOW_EXPIRE;
e99e88a9
KC
1385 timer_setup(&tun->flow_gc_timer, tun_flow_cleanup, 0);
1386 mod_timer(&tun->flow_gc_timer,
1387 round_jiffies_up(jiffies + tun->ageing_time));
96442e42
JW
1388}
1389
1390static void tun_flow_uninit(struct tun_struct *tun)
1391{
1392 del_timer_sync(&tun->flow_gc_timer);
1393 tun_flow_flush(tun);
96442e42
JW
1394}
1395
91572088
JW
1396#define MIN_MTU 68
1397#define MAX_MTU 65535
1398
1da177e4
LT
1399/* Initialize net device. */
1400static void tun_net_init(struct net_device *dev)
1401{
1402 struct tun_struct *tun = netdev_priv(dev);
6aa20a22 1403
1da177e4 1404 switch (tun->flags & TUN_TYPE_MASK) {
40630b82 1405 case IFF_TUN:
758e43b7
SH
1406 dev->netdev_ops = &tun_netdev_ops;
1407
1da177e4
LT
1408 /* Point-to-Point TUN Device */
1409 dev->hard_header_len = 0;
1410 dev->addr_len = 0;
1411 dev->mtu = 1500;
1412
1413 /* Zero header length */
6aa20a22 1414 dev->type = ARPHRD_NONE;
1da177e4 1415 dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
1da177e4
LT
1416 break;
1417
40630b82 1418 case IFF_TAP:
7a0a9608 1419 dev->netdev_ops = &tap_netdev_ops;
1da177e4 1420 /* Ethernet TAP Device */
1da177e4 1421 ether_setup(dev);
550fd08c 1422 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
a676847b 1423 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
36226a8d 1424
f2cedb63 1425 eth_hw_addr_random(dev);
36226a8d 1426
1da177e4
LT
1427 break;
1428 }
91572088
JW
1429
1430 dev->min_mtu = MIN_MTU;
1431 dev->max_mtu = MAX_MTU - dev->hard_header_len;
1da177e4
LT
1432}
1433
2f3ab622
JW
1434static bool tun_sock_writeable(struct tun_struct *tun, struct tun_file *tfile)
1435{
1436 struct sock *sk = tfile->socket.sk;
1437
1438 return (tun->dev->flags & IFF_UP) && sock_writeable(sk);
1439}
1440
1da177e4
LT
1441/* Character device part */
1442
1443/* Poll */
afc9a42b 1444static __poll_t tun_chr_poll(struct file *file, poll_table *wait)
6aa20a22 1445{
b2430de3 1446 struct tun_file *tfile = file->private_data;
9484dc74 1447 struct tun_struct *tun = tun_get(tfile);
3c8a9c63 1448 struct sock *sk;
afc9a42b 1449 __poll_t mask = 0;
1da177e4
LT
1450
1451 if (!tun)
a9a08845 1452 return EPOLLERR;
1da177e4 1453
54f968d6 1454 sk = tfile->socket.sk;
3c8a9c63 1455
6b8a66ee 1456 tun_debug(KERN_INFO, tun, "tun_chr_poll\n");
1da177e4 1457
9e641bdc 1458 poll_wait(file, sk_sleep(sk), wait);
6aa20a22 1459
5990a305 1460 if (!ptr_ring_empty(&tfile->tx_ring))
a9a08845 1461 mask |= EPOLLIN | EPOLLRDNORM;
1da177e4 1462
2f3ab622
JW
1463 /* Make sure SOCKWQ_ASYNC_NOSPACE is set if not writable to
1464 * guarantee EPOLLOUT to be raised by either here or
1465 * tun_sock_write_space(). Then process could get notification
1466 * after it writes to a down device and meets -EIO.
1467 */
1468 if (tun_sock_writeable(tun, tfile) ||
1469 (!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags) &&
1470 tun_sock_writeable(tun, tfile)))
a9a08845 1471 mask |= EPOLLOUT | EPOLLWRNORM;
33dccbb0 1472
c70f1829 1473 if (tun->dev->reg_state != NETREG_REGISTERED)
a9a08845 1474 mask = EPOLLERR;
c70f1829 1475
631ab46b 1476 tun_put(tun);
1da177e4
LT
1477 return mask;
1478}
1479
90e33d45
PP
1480static struct sk_buff *tun_napi_alloc_frags(struct tun_file *tfile,
1481 size_t len,
1482 const struct iov_iter *it)
1483{
1484 struct sk_buff *skb;
1485 size_t linear;
1486 int err;
1487 int i;
1488
1489 if (it->nr_segs > MAX_SKB_FRAGS + 1)
1490 return ERR_PTR(-ENOMEM);
1491
1492 local_bh_disable();
1493 skb = napi_get_frags(&tfile->napi);
1494 local_bh_enable();
1495 if (!skb)
1496 return ERR_PTR(-ENOMEM);
1497
1498 linear = iov_iter_single_seg_count(it);
1499 err = __skb_grow(skb, linear);
1500 if (err)
1501 goto free;
1502
1503 skb->len = len;
1504 skb->data_len = len - linear;
1505 skb->truesize += skb->data_len;
1506
1507 for (i = 1; i < it->nr_segs; i++) {
43a08e0f 1508 struct page_frag *pfrag = &current->task_frag;
90e33d45 1509 size_t fragsz = it->iov[i].iov_len;
90e33d45
PP
1510
1511 if (fragsz == 0 || fragsz > PAGE_SIZE) {
1512 err = -EINVAL;
1513 goto free;
1514 }
1515
43a08e0f 1516 if (!skb_page_frag_refill(fragsz, pfrag, GFP_KERNEL)) {
90e33d45
PP
1517 err = -ENOMEM;
1518 goto free;
1519 }
1520
43a08e0f
ED
1521 skb_fill_page_desc(skb, i - 1, pfrag->page,
1522 pfrag->offset, fragsz);
1523 page_ref_inc(pfrag->page);
1524 pfrag->offset += fragsz;
90e33d45
PP
1525 }
1526
1527 return skb;
1528free:
1529 /* frees skb and all frags allocated with napi_alloc_frag() */
1530 napi_free_frags(&tfile->napi);
1531 return ERR_PTR(err);
1532}
1533
f42157cb
RR
1534/* prepad is the amount to reserve at front. len is length after that.
1535 * linear is a hint as to how much to copy (usually headers). */
54f968d6 1536static struct sk_buff *tun_alloc_skb(struct tun_file *tfile,
6f7c156c 1537 size_t prepad, size_t len,
1538 size_t linear, int noblock)
f42157cb 1539{
54f968d6 1540 struct sock *sk = tfile->socket.sk;
f42157cb 1541 struct sk_buff *skb;
33dccbb0 1542 int err;
f42157cb
RR
1543
1544 /* Under a page? Don't bother with paged skb. */
0eca93bc 1545 if (prepad + len < PAGE_SIZE || !linear)
33dccbb0 1546 linear = len;
f42157cb 1547
33dccbb0 1548 skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
28d64271 1549 &err, 0);
f42157cb 1550 if (!skb)
33dccbb0 1551 return ERR_PTR(err);
f42157cb
RR
1552
1553 skb_reserve(skb, prepad);
1554 skb_put(skb, linear);
33dccbb0
HX
1555 skb->data_len = len - linear;
1556 skb->len += len - linear;
f42157cb
RR
1557
1558 return skb;
1559}
1560
5503fcec
JW
1561static void tun_rx_batched(struct tun_struct *tun, struct tun_file *tfile,
1562 struct sk_buff *skb, int more)
1563{
1564 struct sk_buff_head *queue = &tfile->sk.sk_write_queue;
1565 struct sk_buff_head process_queue;
1566 u32 rx_batched = tun->rx_batched;
1567 bool rcv = false;
1568
1569 if (!rx_batched || (!more && skb_queue_empty(queue))) {
1570 local_bh_disable();
1571 netif_receive_skb(skb);
1572 local_bh_enable();
1573 return;
1574 }
1575
1576 spin_lock(&queue->lock);
1577 if (!more || skb_queue_len(queue) == rx_batched) {
1578 __skb_queue_head_init(&process_queue);
1579 skb_queue_splice_tail_init(queue, &process_queue);
1580 rcv = true;
1581 } else {
1582 __skb_queue_tail(queue, skb);
1583 }
1584 spin_unlock(&queue->lock);
1585
1586 if (rcv) {
1587 struct sk_buff *nskb;
1588
1589 local_bh_disable();
1590 while ((nskb = __skb_dequeue(&process_queue)))
1591 netif_receive_skb(nskb);
1592 netif_receive_skb(skb);
1593 local_bh_enable();
1594 }
1595}
1596
66ccbc9c
JW
1597static bool tun_can_build_skb(struct tun_struct *tun, struct tun_file *tfile,
1598 int len, int noblock, bool zerocopy)
1599{
1600 if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP)
1601 return false;
1602
1603 if (tfile->socket.sk->sk_sndbuf != INT_MAX)
1604 return false;
1605
1606 if (!noblock)
1607 return false;
1608
1609 if (zerocopy)
1610 return false;
1611
1612 if (SKB_DATA_ALIGN(len + TUN_RX_PAD) +
1613 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) > PAGE_SIZE)
1614 return false;
1615
1616 return true;
1617}
1618
761876c8
JW
1619static struct sk_buff *tun_build_skb(struct tun_struct *tun,
1620 struct tun_file *tfile,
66ccbc9c 1621 struct iov_iter *from,
761876c8 1622 struct virtio_net_hdr *hdr,
1cfe6e93 1623 int len, int *skb_xdp)
66ccbc9c 1624{
0bbd7dad 1625 struct page_frag *alloc_frag = &current->task_frag;
66ccbc9c 1626 struct sk_buff *skb;
761876c8 1627 struct bpf_prog *xdp_prog;
7df13219 1628 int buflen = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
761876c8 1629 unsigned int delta = 0;
66ccbc9c
JW
1630 char *buf;
1631 size_t copied;
7df13219
JW
1632 int err, pad = TUN_RX_PAD;
1633
1634 rcu_read_lock();
1635 xdp_prog = rcu_dereference(tun->xdp_prog);
1636 if (xdp_prog)
1637 pad += TUN_HEADROOM;
1638 buflen += SKB_DATA_ALIGN(len + pad);
1639 rcu_read_unlock();
66ccbc9c 1640
63b9ab65 1641 alloc_frag->offset = ALIGN((u64)alloc_frag->offset, SMP_CACHE_BYTES);
66ccbc9c
JW
1642 if (unlikely(!skb_page_frag_refill(buflen, alloc_frag, GFP_KERNEL)))
1643 return ERR_PTR(-ENOMEM);
1644
1645 buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset;
1646 copied = copy_page_from_iter(alloc_frag->page,
7df13219 1647 alloc_frag->offset + pad,
66ccbc9c
JW
1648 len, from);
1649 if (copied != len)
1650 return ERR_PTR(-EFAULT);
1651
7df13219
JW
1652 /* There's a small window that XDP may be set after the check
1653 * of xdp_prog above, this should be rare and for simplicity
1654 * we do XDP on skb in case the headroom is not enough.
1655 */
1656 if (hdr->gso_type || !xdp_prog)
1cfe6e93 1657 *skb_xdp = 1;
761876c8 1658 else
1cfe6e93 1659 *skb_xdp = 0;
761876c8 1660
6547e387 1661 local_bh_disable();
761876c8
JW
1662 rcu_read_lock();
1663 xdp_prog = rcu_dereference(tun->xdp_prog);
1cfe6e93 1664 if (xdp_prog && !*skb_xdp) {
761876c8
JW
1665 struct xdp_buff xdp;
1666 void *orig_data;
1667 u32 act;
1668
1669 xdp.data_hard_start = buf;
7df13219 1670 xdp.data = buf + pad;
de8f3a83 1671 xdp_set_data_meta_invalid(&xdp);
761876c8 1672 xdp.data_end = xdp.data + len;
8bf5c4ee 1673 xdp.rxq = &tfile->xdp_rxq;
761876c8
JW
1674 orig_data = xdp.data;
1675 act = bpf_prog_run_xdp(xdp_prog, &xdp);
1676
1677 switch (act) {
1678 case XDP_REDIRECT:
1679 get_page(alloc_frag->page);
1680 alloc_frag->offset += buflen;
1681 err = xdp_do_redirect(tun->dev, &xdp, xdp_prog);
1bb4f2e8 1682 xdp_do_flush_map();
761876c8
JW
1683 if (err)
1684 goto err_redirect;
654d5738 1685 rcu_read_unlock();
6547e387 1686 local_bh_enable();
761876c8
JW
1687 return NULL;
1688 case XDP_TX:
59655a5b
JW
1689 get_page(alloc_frag->page);
1690 alloc_frag->offset += buflen;
44fa2dbd 1691 if (tun_xdp_tx(tun->dev, &xdp))
59655a5b 1692 goto err_redirect;
59655a5b 1693 rcu_read_unlock();
6547e387 1694 local_bh_enable();
59655a5b 1695 return NULL;
761876c8
JW
1696 case XDP_PASS:
1697 delta = orig_data - xdp.data;
8fb58f1e 1698 len = xdp.data_end - xdp.data;
761876c8
JW
1699 break;
1700 default:
1701 bpf_warn_invalid_xdp_action(act);
1702 /* fall through */
1703 case XDP_ABORTED:
1704 trace_xdp_exception(tun->dev, xdp_prog, act);
1705 /* fall through */
1706 case XDP_DROP:
1707 goto err_xdp;
1708 }
1709 }
1710
66ccbc9c 1711 skb = build_skb(buf, buflen);
761876c8
JW
1712 if (!skb) {
1713 rcu_read_unlock();
6547e387 1714 local_bh_enable();
66ccbc9c 1715 return ERR_PTR(-ENOMEM);
761876c8 1716 }
66ccbc9c 1717
7df13219 1718 skb_reserve(skb, pad - delta);
8fb58f1e 1719 skb_put(skb, len);
66ccbc9c
JW
1720 get_page(alloc_frag->page);
1721 alloc_frag->offset += buflen;
1722
761876c8 1723 rcu_read_unlock();
6547e387 1724 local_bh_enable();
761876c8 1725
66ccbc9c 1726 return skb;
761876c8
JW
1727
1728err_redirect:
1729 put_page(alloc_frag->page);
1730err_xdp:
1731 rcu_read_unlock();
6547e387 1732 local_bh_enable();
761876c8
JW
1733 this_cpu_inc(tun->pcpu_stats->rx_dropped);
1734 return NULL;
66ccbc9c
JW
1735}
1736
1da177e4 1737/* Get packet from user space buffer */
54f968d6 1738static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
f5ff53b4 1739 void *msg_control, struct iov_iter *from,
5503fcec 1740 int noblock, bool more)
1da177e4 1741{
09640e63 1742 struct tun_pi pi = { 0, cpu_to_be16(ETH_P_IP) };
1da177e4 1743 struct sk_buff *skb;
f5ff53b4 1744 size_t total_len = iov_iter_count(from);
eaea34b2 1745 size_t len = total_len, align = tun->align, linear;
f43798c2 1746 struct virtio_net_hdr gso = { 0 };
608b9977 1747 struct tun_pcpu_stats *stats;
96f8d9ec 1748 int good_linear;
0690899b
MT
1749 int copylen;
1750 bool zerocopy = false;
1751 int err;
96f84061 1752 u32 rxhash = 0;
1cfe6e93 1753 int skb_xdp = 1;
90e33d45 1754 bool frags = tun_napi_frags_enabled(tun);
1da177e4 1755
1bd4978a
ED
1756 if (!(tun->dev->flags & IFF_UP))
1757 return -EIO;
1758
40630b82 1759 if (!(tun->flags & IFF_NO_PI)) {
15718ea0 1760 if (len < sizeof(pi))
1da177e4 1761 return -EINVAL;
15718ea0 1762 len -= sizeof(pi);
1da177e4 1763
cbbd26b8 1764 if (!copy_from_iter_full(&pi, sizeof(pi), from))
1da177e4
LT
1765 return -EFAULT;
1766 }
1767
40630b82 1768 if (tun->flags & IFF_VNET_HDR) {
e1edab87
WB
1769 int vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz);
1770
1771 if (len < vnet_hdr_sz)
f43798c2 1772 return -EINVAL;
e1edab87 1773 len -= vnet_hdr_sz;
f43798c2 1774
cbbd26b8 1775 if (!copy_from_iter_full(&gso, sizeof(gso), from))
f43798c2
RR
1776 return -EFAULT;
1777
4909122f 1778 if ((gso.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
56f0dcc5
MT
1779 tun16_to_cpu(tun, gso.csum_start) + tun16_to_cpu(tun, gso.csum_offset) + 2 > tun16_to_cpu(tun, gso.hdr_len))
1780 gso.hdr_len = cpu_to_tun16(tun, tun16_to_cpu(tun, gso.csum_start) + tun16_to_cpu(tun, gso.csum_offset) + 2);
4909122f 1781
56f0dcc5 1782 if (tun16_to_cpu(tun, gso.hdr_len) > len)
f43798c2 1783 return -EINVAL;
e1edab87 1784 iov_iter_advance(from, vnet_hdr_sz - sizeof(gso));
f43798c2
RR
1785 }
1786
40630b82 1787 if ((tun->flags & TUN_TYPE_MASK) == IFF_TAP) {
a504b86e 1788 align += NET_IP_ALIGN;
0eca93bc 1789 if (unlikely(len < ETH_HLEN ||
56f0dcc5 1790 (gso.hdr_len && tun16_to_cpu(tun, gso.hdr_len) < ETH_HLEN)))
e01bf1c8
RR
1791 return -EINVAL;
1792 }
6aa20a22 1793
96f8d9ec
JW
1794 good_linear = SKB_MAX_HEAD(align);
1795
88529176 1796 if (msg_control) {
f5ff53b4
AV
1797 struct iov_iter i = *from;
1798
88529176
JW
1799 /* There are 256 bytes to be copied in skb, so there is
1800 * enough room for skb expand head in case it is used.
0690899b
MT
1801 * The rest of the buffer is mapped from userspace.
1802 */
56f0dcc5 1803 copylen = gso.hdr_len ? tun16_to_cpu(tun, gso.hdr_len) : GOODCOPY_LEN;
96f8d9ec
JW
1804 if (copylen > good_linear)
1805 copylen = good_linear;
3dd5c330 1806 linear = copylen;
f5ff53b4
AV
1807 iov_iter_advance(&i, copylen);
1808 if (iov_iter_npages(&i, INT_MAX) <= MAX_SKB_FRAGS)
88529176
JW
1809 zerocopy = true;
1810 }
1811
90e33d45 1812 if (!frags && tun_can_build_skb(tun, tfile, len, noblock, zerocopy)) {
1cfe6e93
JW
1813 /* For the packet that is not easy to be processed
1814 * (e.g gso or jumbo packet), we will do it at after
1815 * skb was created with generic XDP routine.
1816 */
1817 skb = tun_build_skb(tun, tfile, from, &gso, len, &skb_xdp);
66ccbc9c 1818 if (IS_ERR(skb)) {
608b9977 1819 this_cpu_inc(tun->pcpu_stats->rx_dropped);
66ccbc9c
JW
1820 return PTR_ERR(skb);
1821 }
761876c8
JW
1822 if (!skb)
1823 return total_len;
66ccbc9c
JW
1824 } else {
1825 if (!zerocopy) {
1826 copylen = len;
1827 if (tun16_to_cpu(tun, gso.hdr_len) > good_linear)
1828 linear = good_linear;
1829 else
1830 linear = tun16_to_cpu(tun, gso.hdr_len);
1831 }
1da177e4 1832
90e33d45
PP
1833 if (frags) {
1834 mutex_lock(&tfile->napi_mutex);
1835 skb = tun_napi_alloc_frags(tfile, copylen, from);
1836 /* tun_napi_alloc_frags() enforces a layout for the skb.
1837 * If zerocopy is enabled, then this layout will be
1838 * overwritten by zerocopy_sg_from_iter().
1839 */
1840 zerocopy = false;
1841 } else {
1842 skb = tun_alloc_skb(tfile, align, copylen, linear,
1843 noblock);
1844 }
1845
66ccbc9c
JW
1846 if (IS_ERR(skb)) {
1847 if (PTR_ERR(skb) != -EAGAIN)
1848 this_cpu_inc(tun->pcpu_stats->rx_dropped);
90e33d45
PP
1849 if (frags)
1850 mutex_unlock(&tfile->napi_mutex);
66ccbc9c
JW
1851 return PTR_ERR(skb);
1852 }
0690899b 1853
66ccbc9c
JW
1854 if (zerocopy)
1855 err = zerocopy_sg_from_iter(skb, from);
1856 else
1857 err = skb_copy_datagram_from_iter(skb, 0, from, len);
1858
1859 if (err) {
1860 this_cpu_inc(tun->pcpu_stats->rx_dropped);
1861 kfree_skb(skb);
90e33d45
PP
1862 if (frags) {
1863 tfile->napi.skb = NULL;
1864 mutex_unlock(&tfile->napi_mutex);
1865 }
1866
66ccbc9c
JW
1867 return -EFAULT;
1868 }
8f22757e 1869 }
1da177e4 1870
3e9e40e7 1871 if (virtio_net_hdr_to_skb(skb, &gso, tun_is_little_endian(tun))) {
df10db98
PA
1872 this_cpu_inc(tun->pcpu_stats->rx_frame_errors);
1873 kfree_skb(skb);
90e33d45
PP
1874 if (frags) {
1875 tfile->napi.skb = NULL;
1876 mutex_unlock(&tfile->napi_mutex);
1877 }
1878
df10db98
PA
1879 return -EINVAL;
1880 }
1881
1da177e4 1882 switch (tun->flags & TUN_TYPE_MASK) {
40630b82
MT
1883 case IFF_TUN:
1884 if (tun->flags & IFF_NO_PI) {
2580c4c1
AP
1885 u8 ip_version = skb->len ? (skb->data[0] >> 4) : 0;
1886
1887 switch (ip_version) {
1888 case 4:
f09f7ee2
AWC
1889 pi.proto = htons(ETH_P_IP);
1890 break;
2580c4c1 1891 case 6:
f09f7ee2
AWC
1892 pi.proto = htons(ETH_P_IPV6);
1893 break;
1894 default:
608b9977 1895 this_cpu_inc(tun->pcpu_stats->rx_dropped);
f09f7ee2
AWC
1896 kfree_skb(skb);
1897 return -EINVAL;
1898 }
1899 }
1900
459a98ed 1901 skb_reset_mac_header(skb);
1da177e4 1902 skb->protocol = pi.proto;
4c13eb66 1903 skb->dev = tun->dev;
1da177e4 1904 break;
40630b82 1905 case IFF_TAP:
90e33d45
PP
1906 if (!frags)
1907 skb->protocol = eth_type_trans(skb, tun->dev);
1da177e4 1908 break;
6403eab1 1909 }
1da177e4 1910
0690899b
MT
1911 /* copy skb_ubuf_info for callback when skb has no error */
1912 if (zerocopy) {
1913 skb_shinfo(skb)->destructor_arg = msg_control;
1914 skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
c9af6db4 1915 skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG;
af1cc7a2
JW
1916 } else if (msg_control) {
1917 struct ubuf_info *uarg = msg_control;
1918 uarg->callback(uarg, false);
0690899b
MT
1919 }
1920
72f65107 1921 skb_reset_network_header(skb);
40893fd0 1922 skb_probe_transport_header(skb, 0);
38502af7 1923
1cfe6e93 1924 if (skb_xdp) {
761876c8
JW
1925 struct bpf_prog *xdp_prog;
1926 int ret;
1927
6547e387 1928 local_bh_disable();
761876c8
JW
1929 rcu_read_lock();
1930 xdp_prog = rcu_dereference(tun->xdp_prog);
1931 if (xdp_prog) {
1932 ret = do_xdp_generic(xdp_prog, skb);
1933 if (ret != XDP_PASS) {
1934 rcu_read_unlock();
6547e387 1935 local_bh_enable();
761876c8
JW
1936 return total_len;
1937 }
1938 }
1939 rcu_read_unlock();
6547e387 1940 local_bh_enable();
761876c8
JW
1941 }
1942
cf1a1e07
PA
1943 /* Compute the costly rx hash only if needed for flow updates.
1944 * We may get a very small possibility of OOO during switching, not
1945 * worth to optimize.
1946 */
1947 if (!rcu_access_pointer(tun->steering_prog) && tun->numqueues > 1 &&
1948 !tfile->detached)
96f84061 1949 rxhash = __skb_get_hash_symmetric(skb);
94317099 1950
90e33d45
PP
1951 if (frags) {
1952 /* Exercise flow dissector code path. */
1953 u32 headlen = eth_get_headlen(skb->data, skb_headlen(skb));
1954
010f245b 1955 if (unlikely(headlen > skb_headlen(skb))) {
90e33d45
PP
1956 this_cpu_inc(tun->pcpu_stats->rx_dropped);
1957 napi_free_frags(&tfile->napi);
1958 mutex_unlock(&tfile->napi_mutex);
1959 WARN_ON(1);
1960 return -ENOMEM;
1961 }
1962
1963 local_bh_disable();
1964 napi_gro_frags(&tfile->napi);
1965 local_bh_enable();
1966 mutex_unlock(&tfile->napi_mutex);
aec72f33 1967 } else if (tfile->napi_enabled) {
94317099
PP
1968 struct sk_buff_head *queue = &tfile->sk.sk_write_queue;
1969 int queue_len;
1970
1971 spin_lock_bh(&queue->lock);
1972 __skb_queue_tail(queue, skb);
1973 queue_len = skb_queue_len(queue);
1974 spin_unlock(&queue->lock);
1975
1976 if (!more || queue_len > NAPI_POLL_WEIGHT)
1977 napi_schedule(&tfile->napi);
1978
1979 local_bh_enable();
1980 } else if (!IS_ENABLED(CONFIG_4KSTACKS)) {
1981 tun_rx_batched(tun, tfile, skb, more);
1982 } else {
1983 netif_rx_ni(skb);
1984 }
6aa20a22 1985
608b9977
PA
1986 stats = get_cpu_ptr(tun->pcpu_stats);
1987 u64_stats_update_begin(&stats->syncp);
1988 stats->rx_packets++;
1989 stats->rx_bytes += len;
1990 u64_stats_update_end(&stats->syncp);
1991 put_cpu_ptr(stats);
1da177e4 1992
96f84061
JW
1993 if (rxhash)
1994 tun_flow_update(tun, rxhash, tfile);
1995
0690899b 1996 return total_len;
6aa20a22 1997}
1da177e4 1998
f5ff53b4 1999static ssize_t tun_chr_write_iter(struct kiocb *iocb, struct iov_iter *from)
1da177e4 2000{
33dccbb0 2001 struct file *file = iocb->ki_filp;
54f968d6 2002 struct tun_file *tfile = file->private_data;
9484dc74 2003 struct tun_struct *tun = tun_get(tfile);
631ab46b 2004 ssize_t result;
1da177e4
LT
2005
2006 if (!tun)
2007 return -EBADFD;
2008
5503fcec
JW
2009 result = tun_get_user(tun, tfile, NULL, from,
2010 file->f_flags & O_NONBLOCK, false);
631ab46b
EB
2011
2012 tun_put(tun);
2013 return result;
1da177e4
LT
2014}
2015
fc72d1d5
JW
2016static ssize_t tun_put_user_xdp(struct tun_struct *tun,
2017 struct tun_file *tfile,
1ffcbc85 2018 struct xdp_frame *xdp_frame,
fc72d1d5
JW
2019 struct iov_iter *iter)
2020{
2021 int vnet_hdr_sz = 0;
1ffcbc85 2022 size_t size = xdp_frame->len;
fc72d1d5
JW
2023 struct tun_pcpu_stats *stats;
2024 size_t ret;
2025
2026 if (tun->flags & IFF_VNET_HDR) {
2027 struct virtio_net_hdr gso = { 0 };
2028
2029 vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz);
2030 if (unlikely(iov_iter_count(iter) < vnet_hdr_sz))
2031 return -EINVAL;
2032 if (unlikely(copy_to_iter(&gso, sizeof(gso), iter) !=
2033 sizeof(gso)))
2034 return -EFAULT;
2035 iov_iter_advance(iter, vnet_hdr_sz - sizeof(gso));
2036 }
2037
1ffcbc85 2038 ret = copy_to_iter(xdp_frame->data, size, iter) + vnet_hdr_sz;
fc72d1d5
JW
2039
2040 stats = get_cpu_ptr(tun->pcpu_stats);
2041 u64_stats_update_begin(&stats->syncp);
2042 stats->tx_packets++;
2043 stats->tx_bytes += ret;
2044 u64_stats_update_end(&stats->syncp);
2045 put_cpu_ptr(tun->pcpu_stats);
2046
2047 return ret;
2048}
2049
1da177e4 2050/* Put packet to the user space buffer */
6f7c156c 2051static ssize_t tun_put_user(struct tun_struct *tun,
54f968d6 2052 struct tun_file *tfile,
6f7c156c 2053 struct sk_buff *skb,
e0b46d0e 2054 struct iov_iter *iter)
1da177e4
LT
2055{
2056 struct tun_pi pi = { 0, skb->protocol };
608b9977 2057 struct tun_pcpu_stats *stats;
e0b46d0e 2058 ssize_t total;
8c847d25 2059 int vlan_offset = 0;
a8f9bfdf 2060 int vlan_hlen = 0;
2eb783c4 2061 int vnet_hdr_sz = 0;
a8f9bfdf 2062
df8a39de 2063 if (skb_vlan_tag_present(skb))
a8f9bfdf 2064 vlan_hlen = VLAN_HLEN;
1da177e4 2065
40630b82 2066 if (tun->flags & IFF_VNET_HDR)
e1edab87 2067 vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz);
1da177e4 2068
e0b46d0e
HX
2069 total = skb->len + vlan_hlen + vnet_hdr_sz;
2070
40630b82 2071 if (!(tun->flags & IFF_NO_PI)) {
e0b46d0e 2072 if (iov_iter_count(iter) < sizeof(pi))
1da177e4
LT
2073 return -EINVAL;
2074
e0b46d0e
HX
2075 total += sizeof(pi);
2076 if (iov_iter_count(iter) < total) {
1da177e4
LT
2077 /* Packet will be striped */
2078 pi.flags |= TUN_PKT_STRIP;
2079 }
6aa20a22 2080
e0b46d0e 2081 if (copy_to_iter(&pi, sizeof(pi), iter) != sizeof(pi))
1da177e4 2082 return -EFAULT;
6aa20a22 2083 }
1da177e4 2084
2eb783c4 2085 if (vnet_hdr_sz) {
9403cd7c 2086 struct virtio_net_hdr gso;
34166093 2087
e0b46d0e 2088 if (iov_iter_count(iter) < vnet_hdr_sz)
f43798c2
RR
2089 return -EINVAL;
2090
3e9e40e7 2091 if (virtio_net_hdr_from_skb(skb, &gso,
fd3a8862
WB
2092 tun_is_little_endian(tun), true,
2093 vlan_hlen)) {
f43798c2 2094 struct skb_shared_info *sinfo = skb_shinfo(skb);
34166093
MR
2095 pr_err("unexpected GSO type: "
2096 "0x%x, gso_size %d, hdr_len %d\n",
2097 sinfo->gso_type, tun16_to_cpu(tun, gso.gso_size),
2098 tun16_to_cpu(tun, gso.hdr_len));
2099 print_hex_dump(KERN_ERR, "tun: ",
2100 DUMP_PREFIX_NONE,
2101 16, 1, skb->head,
2102 min((int)tun16_to_cpu(tun, gso.hdr_len), 64), true);
2103 WARN_ON_ONCE(1);
2104 return -EINVAL;
2105 }
f43798c2 2106
e0b46d0e 2107 if (copy_to_iter(&gso, sizeof(gso), iter) != sizeof(gso))
f43798c2 2108 return -EFAULT;
8c847d25
JW
2109
2110 iov_iter_advance(iter, vnet_hdr_sz - sizeof(gso));
f43798c2
RR
2111 }
2112
a8f9bfdf 2113 if (vlan_hlen) {
e0b46d0e 2114 int ret;
aff3d70a 2115 struct veth veth;
6680ec68
JW
2116
2117 veth.h_vlan_proto = skb->vlan_proto;
df8a39de 2118 veth.h_vlan_TCI = htons(skb_vlan_tag_get(skb));
6680ec68
JW
2119
2120 vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto);
6680ec68 2121
e0b46d0e
HX
2122 ret = skb_copy_datagram_iter(skb, 0, iter, vlan_offset);
2123 if (ret || !iov_iter_count(iter))
6680ec68
JW
2124 goto done;
2125
e0b46d0e
HX
2126 ret = copy_to_iter(&veth, sizeof(veth), iter);
2127 if (ret != sizeof(veth) || !iov_iter_count(iter))
6680ec68
JW
2128 goto done;
2129 }
1da177e4 2130
e0b46d0e 2131 skb_copy_datagram_iter(skb, vlan_offset, iter, skb->len - vlan_offset);
1da177e4 2132
6680ec68 2133done:
608b9977
PA
2134 /* caller is in process context, */
2135 stats = get_cpu_ptr(tun->pcpu_stats);
2136 u64_stats_update_begin(&stats->syncp);
2137 stats->tx_packets++;
2138 stats->tx_bytes += skb->len + vlan_hlen;
2139 u64_stats_update_end(&stats->syncp);
2140 put_cpu_ptr(tun->pcpu_stats);
1da177e4
LT
2141
2142 return total;
2143}
2144
fc72d1d5 2145static void *tun_ring_recv(struct tun_file *tfile, int noblock, int *err)
1576d986
JW
2146{
2147 DECLARE_WAITQUEUE(wait, current);
fc72d1d5 2148 void *ptr = NULL;
f48cc6b2 2149 int error = 0;
1576d986 2150
fc72d1d5
JW
2151 ptr = ptr_ring_consume(&tfile->tx_ring);
2152 if (ptr)
1576d986
JW
2153 goto out;
2154 if (noblock) {
f48cc6b2 2155 error = -EAGAIN;
1576d986
JW
2156 goto out;
2157 }
2158
2159 add_wait_queue(&tfile->wq.wait, &wait);
2160 current->state = TASK_INTERRUPTIBLE;
2161
2162 while (1) {
fc72d1d5
JW
2163 ptr = ptr_ring_consume(&tfile->tx_ring);
2164 if (ptr)
1576d986
JW
2165 break;
2166 if (signal_pending(current)) {
f48cc6b2 2167 error = -ERESTARTSYS;
1576d986
JW
2168 break;
2169 }
2170 if (tfile->socket.sk->sk_shutdown & RCV_SHUTDOWN) {
f48cc6b2 2171 error = -EFAULT;
1576d986
JW
2172 break;
2173 }
2174
2175 schedule();
2176 }
2177
2178 current->state = TASK_RUNNING;
2179 remove_wait_queue(&tfile->wq.wait, &wait);
2180
2181out:
f48cc6b2 2182 *err = error;
fc72d1d5 2183 return ptr;
1576d986
JW
2184}
2185
54f968d6 2186static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile,
9b067034 2187 struct iov_iter *to,
fc72d1d5 2188 int noblock, void *ptr)
1da177e4 2189{
9b067034 2190 ssize_t ret;
1576d986 2191 int err;
1da177e4 2192
3872baf6 2193 tun_debug(KERN_INFO, tun, "tun_do_read\n");
1da177e4 2194
c33ee15b 2195 if (!iov_iter_count(to)) {
fc72d1d5 2196 tun_ptr_free(ptr);
9b067034 2197 return 0;
c33ee15b 2198 }
1da177e4 2199
fc72d1d5 2200 if (!ptr) {
ac77cfd4 2201 /* Read frames from ring */
fc72d1d5
JW
2202 ptr = tun_ring_recv(tfile, noblock, &err);
2203 if (!ptr)
ac77cfd4
JW
2204 return err;
2205 }
e0b46d0e 2206
1ffcbc85
JDB
2207 if (tun_is_xdp_frame(ptr)) {
2208 struct xdp_frame *xdpf = tun_ptr_to_xdp(ptr);
fc72d1d5 2209
1ffcbc85 2210 ret = tun_put_user_xdp(tun, tfile, xdpf, to);
03993094 2211 xdp_return_frame(xdpf);
fc72d1d5
JW
2212 } else {
2213 struct sk_buff *skb = ptr;
2214
2215 ret = tun_put_user(tun, tfile, skb, to);
2216 if (unlikely(ret < 0))
2217 kfree_skb(skb);
2218 else
2219 consume_skb(skb);
2220 }
1da177e4 2221
05c2828c
MT
2222 return ret;
2223}
2224
9b067034 2225static ssize_t tun_chr_read_iter(struct kiocb *iocb, struct iov_iter *to)
05c2828c
MT
2226{
2227 struct file *file = iocb->ki_filp;
2228 struct tun_file *tfile = file->private_data;
9484dc74 2229 struct tun_struct *tun = tun_get(tfile);
9b067034 2230 ssize_t len = iov_iter_count(to), ret;
05c2828c
MT
2231
2232 if (!tun)
2233 return -EBADFD;
ac77cfd4 2234 ret = tun_do_read(tun, tfile, to, file->f_flags & O_NONBLOCK, NULL);
42404c09 2235 ret = min_t(ssize_t, ret, len);
d0b7da8a
ZYW
2236 if (ret > 0)
2237 iocb->ki_pos = ret;
631ab46b 2238 tun_put(tun);
1da177e4
LT
2239 return ret;
2240}
2241
cd5681d7 2242static void tun_prog_free(struct rcu_head *rcu)
96f84061 2243{
cd5681d7 2244 struct tun_prog *prog = container_of(rcu, struct tun_prog, rcu);
96f84061
JW
2245
2246 bpf_prog_destroy(prog->prog);
2247 kfree(prog);
2248}
2249
9d6474e4
JW
2250static int __tun_set_ebpf(struct tun_struct *tun,
2251 struct tun_prog __rcu **prog_p,
cd5681d7 2252 struct bpf_prog *prog)
96f84061 2253{
cd5681d7 2254 struct tun_prog *old, *new = NULL;
96f84061
JW
2255
2256 if (prog) {
2257 new = kmalloc(sizeof(*new), GFP_KERNEL);
2258 if (!new)
2259 return -ENOMEM;
2260 new->prog = prog;
2261 }
2262
124da8f6 2263 spin_lock_bh(&tun->lock);
cd5681d7 2264 old = rcu_dereference_protected(*prog_p,
124da8f6 2265 lockdep_is_held(&tun->lock));
cd5681d7 2266 rcu_assign_pointer(*prog_p, new);
124da8f6 2267 spin_unlock_bh(&tun->lock);
96f84061
JW
2268
2269 if (old)
cd5681d7 2270 call_rcu(&old->rcu, tun_prog_free);
96f84061
JW
2271
2272 return 0;
2273}
2274
96442e42
JW
2275static void tun_free_netdev(struct net_device *dev)
2276{
2277 struct tun_struct *tun = netdev_priv(dev);
2278
4008e97f 2279 BUG_ON(!(list_empty(&tun->disabled)));
608b9977 2280 free_percpu(tun->pcpu_stats);
96442e42 2281 tun_flow_uninit(tun);
5dbbaf2d 2282 security_tun_dev_free_security(tun->security);
cd5681d7 2283 __tun_set_ebpf(tun, &tun->steering_prog, NULL);
aff3d70a 2284 __tun_set_ebpf(tun, &tun->filter_prog, NULL);
96442e42
JW
2285}
2286
1da177e4
LT
2287static void tun_setup(struct net_device *dev)
2288{
2289 struct tun_struct *tun = netdev_priv(dev);
2290
0625c883
EB
2291 tun->owner = INVALID_UID;
2292 tun->group = INVALID_GID;
4e24f2dd 2293 tun_default_link_ksettings(dev, &tun->link_ksettings);
1da177e4 2294
1da177e4 2295 dev->ethtool_ops = &tun_ethtool_ops;
cf124db5
DM
2296 dev->needs_free_netdev = true;
2297 dev->priv_destructor = tun_free_netdev;
016adb72
JW
2298 /* We prefer our own queue length */
2299 dev->tx_queue_len = TUN_READQ_SIZE;
1da177e4
LT
2300}
2301
f019a7a5
EB
2302/* Trivial set of netlink ops to allow deleting tun or tap
2303 * device with netlink.
2304 */
a8b8a889
MS
2305static int tun_validate(struct nlattr *tb[], struct nlattr *data[],
2306 struct netlink_ext_ack *extack)
f019a7a5
EB
2307{
2308 return -EINVAL;
2309}
2310
1ec010e7
SD
2311static size_t tun_get_size(const struct net_device *dev)
2312{
2313 BUILD_BUG_ON(sizeof(u32) != sizeof(uid_t));
2314 BUILD_BUG_ON(sizeof(u32) != sizeof(gid_t));
2315
2316 return nla_total_size(sizeof(uid_t)) + /* OWNER */
2317 nla_total_size(sizeof(gid_t)) + /* GROUP */
2318 nla_total_size(sizeof(u8)) + /* TYPE */
2319 nla_total_size(sizeof(u8)) + /* PI */
2320 nla_total_size(sizeof(u8)) + /* VNET_HDR */
2321 nla_total_size(sizeof(u8)) + /* PERSIST */
2322 nla_total_size(sizeof(u8)) + /* MULTI_QUEUE */
2323 nla_total_size(sizeof(u32)) + /* NUM_QUEUES */
2324 nla_total_size(sizeof(u32)) + /* NUM_DISABLED_QUEUES */
2325 0;
2326}
2327
2328static int tun_fill_info(struct sk_buff *skb, const struct net_device *dev)
2329{
2330 struct tun_struct *tun = netdev_priv(dev);
2331
2332 if (nla_put_u8(skb, IFLA_TUN_TYPE, tun->flags & TUN_TYPE_MASK))
2333 goto nla_put_failure;
2334 if (uid_valid(tun->owner) &&
2335 nla_put_u32(skb, IFLA_TUN_OWNER,
2336 from_kuid_munged(current_user_ns(), tun->owner)))
2337 goto nla_put_failure;
2338 if (gid_valid(tun->group) &&
2339 nla_put_u32(skb, IFLA_TUN_GROUP,
2340 from_kgid_munged(current_user_ns(), tun->group)))
2341 goto nla_put_failure;
2342 if (nla_put_u8(skb, IFLA_TUN_PI, !(tun->flags & IFF_NO_PI)))
2343 goto nla_put_failure;
2344 if (nla_put_u8(skb, IFLA_TUN_VNET_HDR, !!(tun->flags & IFF_VNET_HDR)))
2345 goto nla_put_failure;
2346 if (nla_put_u8(skb, IFLA_TUN_PERSIST, !!(tun->flags & IFF_PERSIST)))
2347 goto nla_put_failure;
2348 if (nla_put_u8(skb, IFLA_TUN_MULTI_QUEUE,
2349 !!(tun->flags & IFF_MULTI_QUEUE)))
2350 goto nla_put_failure;
2351 if (tun->flags & IFF_MULTI_QUEUE) {
2352 if (nla_put_u32(skb, IFLA_TUN_NUM_QUEUES, tun->numqueues))
2353 goto nla_put_failure;
2354 if (nla_put_u32(skb, IFLA_TUN_NUM_DISABLED_QUEUES,
2355 tun->numdisabled))
2356 goto nla_put_failure;
2357 }
2358
2359 return 0;
2360
2361nla_put_failure:
2362 return -EMSGSIZE;
2363}
2364
f019a7a5
EB
2365static struct rtnl_link_ops tun_link_ops __read_mostly = {
2366 .kind = DRV_NAME,
2367 .priv_size = sizeof(struct tun_struct),
2368 .setup = tun_setup,
2369 .validate = tun_validate,
1ec010e7
SD
2370 .get_size = tun_get_size,
2371 .fill_info = tun_fill_info,
f019a7a5
EB
2372};
2373
33dccbb0
HX
2374static void tun_sock_write_space(struct sock *sk)
2375{
54f968d6 2376 struct tun_file *tfile;
43815482 2377 wait_queue_head_t *wqueue;
33dccbb0
HX
2378
2379 if (!sock_writeable(sk))
2380 return;
2381
9cd3e072 2382 if (!test_and_clear_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags))
33dccbb0
HX
2383 return;
2384
43815482
ED
2385 wqueue = sk_sleep(sk);
2386 if (wqueue && waitqueue_active(wqueue))
a9a08845
LT
2387 wake_up_interruptible_sync_poll(wqueue, EPOLLOUT |
2388 EPOLLWRNORM | EPOLLWRBAND);
c722c625 2389
54f968d6
JW
2390 tfile = container_of(sk, struct tun_file, sk);
2391 kill_fasync(&tfile->fasync, SIGIO, POLL_OUT);
33dccbb0
HX
2392}
2393
1b784140 2394static int tun_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
05c2828c 2395{
54f968d6
JW
2396 int ret;
2397 struct tun_file *tfile = container_of(sock, struct tun_file, socket);
9484dc74 2398 struct tun_struct *tun = tun_get(tfile);
54f968d6
JW
2399
2400 if (!tun)
2401 return -EBADFD;
f5ff53b4 2402
c0371da6 2403 ret = tun_get_user(tun, tfile, m->msg_control, &m->msg_iter,
5503fcec
JW
2404 m->msg_flags & MSG_DONTWAIT,
2405 m->msg_flags & MSG_MORE);
54f968d6
JW
2406 tun_put(tun);
2407 return ret;
05c2828c
MT
2408}
2409
1b784140 2410static int tun_recvmsg(struct socket *sock, struct msghdr *m, size_t total_len,
05c2828c
MT
2411 int flags)
2412{
54f968d6 2413 struct tun_file *tfile = container_of(sock, struct tun_file, socket);
9484dc74 2414 struct tun_struct *tun = tun_get(tfile);
fc72d1d5 2415 void *ptr = m->msg_control;
05c2828c 2416 int ret;
54f968d6 2417
c33ee15b
WX
2418 if (!tun) {
2419 ret = -EBADFD;
fc72d1d5 2420 goto out_free;
c33ee15b 2421 }
54f968d6 2422
eda29772 2423 if (flags & ~(MSG_DONTWAIT|MSG_TRUNC|MSG_ERRQUEUE)) {
3811ae76 2424 ret = -EINVAL;
c33ee15b 2425 goto out_put_tun;
3811ae76 2426 }
eda29772
RC
2427 if (flags & MSG_ERRQUEUE) {
2428 ret = sock_recv_errqueue(sock->sk, m, total_len,
2429 SOL_PACKET, TUN_TX_TIMESTAMP);
2430 goto out;
2431 }
fc72d1d5 2432 ret = tun_do_read(tun, tfile, &m->msg_iter, flags & MSG_DONTWAIT, ptr);
87897931 2433 if (ret > (ssize_t)total_len) {
42404c09
DM
2434 m->msg_flags |= MSG_TRUNC;
2435 ret = flags & MSG_TRUNC ? ret : total_len;
2436 }
3811ae76 2437out:
54f968d6 2438 tun_put(tun);
05c2828c 2439 return ret;
c33ee15b
WX
2440
2441out_put_tun:
2442 tun_put(tun);
fc72d1d5
JW
2443out_free:
2444 tun_ptr_free(ptr);
c33ee15b 2445 return ret;
05c2828c
MT
2446}
2447
fc72d1d5
JW
2448static int tun_ptr_peek_len(void *ptr)
2449{
2450 if (likely(ptr)) {
1ffcbc85
JDB
2451 if (tun_is_xdp_frame(ptr)) {
2452 struct xdp_frame *xdpf = tun_ptr_to_xdp(ptr);
fc72d1d5 2453
1ffcbc85 2454 return xdpf->len;
fc72d1d5
JW
2455 }
2456 return __skb_array_len_with_tag(ptr);
2457 } else {
2458 return 0;
2459 }
2460}
2461
1576d986
JW
2462static int tun_peek_len(struct socket *sock)
2463{
2464 struct tun_file *tfile = container_of(sock, struct tun_file, socket);
2465 struct tun_struct *tun;
2466 int ret = 0;
2467
9484dc74 2468 tun = tun_get(tfile);
1576d986
JW
2469 if (!tun)
2470 return 0;
2471
fc72d1d5 2472 ret = PTR_RING_PEEK_CALL(&tfile->tx_ring, tun_ptr_peek_len);
1576d986
JW
2473 tun_put(tun);
2474
2475 return ret;
2476}
2477
05c2828c
MT
2478/* Ops structure to mimic raw sockets with tun */
2479static const struct proto_ops tun_socket_ops = {
1576d986 2480 .peek_len = tun_peek_len,
05c2828c
MT
2481 .sendmsg = tun_sendmsg,
2482 .recvmsg = tun_recvmsg,
2483};
2484
33dccbb0
HX
2485static struct proto tun_proto = {
2486 .name = "tun",
2487 .owner = THIS_MODULE,
54f968d6 2488 .obj_size = sizeof(struct tun_file),
33dccbb0 2489};
f019a7a5 2490
980c9e8c
DW
2491static int tun_flags(struct tun_struct *tun)
2492{
031f5e03 2493 return tun->flags & (TUN_FEATURES | IFF_PERSIST | IFF_TUN | IFF_TAP);
980c9e8c
DW
2494}
2495
2496static ssize_t tun_show_flags(struct device *dev, struct device_attribute *attr,
2497 char *buf)
2498{
2499 struct tun_struct *tun = netdev_priv(to_net_dev(dev));
2500 return sprintf(buf, "0x%x\n", tun_flags(tun));
2501}
2502
2503static ssize_t tun_show_owner(struct device *dev, struct device_attribute *attr,
2504 char *buf)
2505{
2506 struct tun_struct *tun = netdev_priv(to_net_dev(dev));
0625c883
EB
2507 return uid_valid(tun->owner)?
2508 sprintf(buf, "%u\n",
2509 from_kuid_munged(current_user_ns(), tun->owner)):
2510 sprintf(buf, "-1\n");
980c9e8c
DW
2511}
2512
2513static ssize_t tun_show_group(struct device *dev, struct device_attribute *attr,
2514 char *buf)
2515{
2516 struct tun_struct *tun = netdev_priv(to_net_dev(dev));
0625c883
EB
2517 return gid_valid(tun->group) ?
2518 sprintf(buf, "%u\n",
2519 from_kgid_munged(current_user_ns(), tun->group)):
2520 sprintf(buf, "-1\n");
980c9e8c
DW
2521}
2522
2523static DEVICE_ATTR(tun_flags, 0444, tun_show_flags, NULL);
2524static DEVICE_ATTR(owner, 0444, tun_show_owner, NULL);
2525static DEVICE_ATTR(group, 0444, tun_show_group, NULL);
2526
c4d33e24
TI
2527static struct attribute *tun_dev_attrs[] = {
2528 &dev_attr_tun_flags.attr,
2529 &dev_attr_owner.attr,
2530 &dev_attr_group.attr,
2531 NULL
2532};
2533
2534static const struct attribute_group tun_attr_group = {
2535 .attrs = tun_dev_attrs
2536};
2537
d647a591 2538static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
1da177e4
LT
2539{
2540 struct tun_struct *tun;
54f968d6 2541 struct tun_file *tfile = file->private_data;
1da177e4
LT
2542 struct net_device *dev;
2543 int err;
2544
7c0c3b1a
JW
2545 if (tfile->detached)
2546 return -EINVAL;
2547
90e33d45
PP
2548 if ((ifr->ifr_flags & IFF_NAPI_FRAGS)) {
2549 if (!capable(CAP_NET_ADMIN))
2550 return -EPERM;
2551
2552 if (!(ifr->ifr_flags & IFF_NAPI) ||
2553 (ifr->ifr_flags & TUN_TYPE_MASK) != IFF_TAP)
2554 return -EINVAL;
2555 }
2556
74a3e5a7
EB
2557 dev = __dev_get_by_name(net, ifr->ifr_name);
2558 if (dev) {
f85ba780
DW
2559 if (ifr->ifr_flags & IFF_TUN_EXCL)
2560 return -EBUSY;
74a3e5a7
EB
2561 if ((ifr->ifr_flags & IFF_TUN) && dev->netdev_ops == &tun_netdev_ops)
2562 tun = netdev_priv(dev);
2563 else if ((ifr->ifr_flags & IFF_TAP) && dev->netdev_ops == &tap_netdev_ops)
2564 tun = netdev_priv(dev);
2565 else
2566 return -EINVAL;
2567
8e6d91ae 2568 if (!!(ifr->ifr_flags & IFF_MULTI_QUEUE) !=
40630b82 2569 !!(tun->flags & IFF_MULTI_QUEUE))
8e6d91ae
JW
2570 return -EINVAL;
2571
cde8b15f 2572 if (tun_not_capable(tun))
2b980dbd 2573 return -EPERM;
5dbbaf2d 2574 err = security_tun_dev_open(tun->security);
2b980dbd
PM
2575 if (err < 0)
2576 return err;
2577
94317099
PP
2578 err = tun_attach(tun, file, ifr->ifr_flags & IFF_NOFILTER,
2579 ifr->ifr_flags & IFF_NAPI);
a7385ba2
EB
2580 if (err < 0)
2581 return err;
4008e97f 2582
40630b82 2583 if (tun->flags & IFF_MULTI_QUEUE &&
e8dbad66
JW
2584 (tun->numqueues + tun->numdisabled > 1)) {
2585 /* One or more queue has already been attached, no need
2586 * to initialize the device again.
2587 */
83c1f36f 2588 netdev_state_change(dev);
e8dbad66
JW
2589 return 0;
2590 }
9fffc5c6
SD
2591
2592 tun->flags = (tun->flags & ~TUN_FEATURES) |
2593 (ifr->ifr_flags & TUN_FEATURES);
83c1f36f
SD
2594
2595 netdev_state_change(dev);
2596 } else {
1da177e4
LT
2597 char *name;
2598 unsigned long flags = 0;
edfb6a14
JW
2599 int queues = ifr->ifr_flags & IFF_MULTI_QUEUE ?
2600 MAX_TAP_QUEUES : 1;
1da177e4 2601
c260b772 2602 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
ca6bb5d7 2603 return -EPERM;
2b980dbd
PM
2604 err = security_tun_dev_create();
2605 if (err < 0)
2606 return err;
ca6bb5d7 2607
1da177e4
LT
2608 /* Set dev type */
2609 if (ifr->ifr_flags & IFF_TUN) {
2610 /* TUN device */
40630b82 2611 flags |= IFF_TUN;
1da177e4
LT
2612 name = "tun%d";
2613 } else if (ifr->ifr_flags & IFF_TAP) {
2614 /* TAP device */
40630b82 2615 flags |= IFF_TAP;
1da177e4 2616 name = "tap%d";
6aa20a22 2617 } else
36989b90 2618 return -EINVAL;
6aa20a22 2619
1da177e4
LT
2620 if (*ifr->ifr_name)
2621 name = ifr->ifr_name;
2622
c8d68e6b 2623 dev = alloc_netdev_mqs(sizeof(struct tun_struct), name,
c835a677
TG
2624 NET_NAME_UNKNOWN, tun_setup, queues,
2625 queues);
edfb6a14 2626
1da177e4
LT
2627 if (!dev)
2628 return -ENOMEM;
0ad646c8 2629 err = dev_get_valid_name(net, dev, name);
5c25f65f 2630 if (err < 0)
0ad646c8 2631 goto err_free_dev;
1da177e4 2632
fc54c658 2633 dev_net_set(dev, net);
f019a7a5 2634 dev->rtnl_link_ops = &tun_link_ops;
fb7589a1 2635 dev->ifindex = tfile->ifindex;
c4d33e24 2636 dev->sysfs_groups[0] = &tun_attr_group;
758e43b7 2637
1da177e4
LT
2638 tun = netdev_priv(dev);
2639 tun->dev = dev;
2640 tun->flags = flags;
f271b2cc 2641 tun->txflt.count = 0;
d9d52b51 2642 tun->vnet_hdr_sz = sizeof(struct virtio_net_hdr);
33dccbb0 2643
eaea34b2 2644 tun->align = NET_SKB_PAD;
54f968d6
JW
2645 tun->filter_attached = false;
2646 tun->sndbuf = tfile->socket.sk->sk_sndbuf;
5503fcec 2647 tun->rx_batched = 0;
96f84061 2648 RCU_INIT_POINTER(tun->steering_prog, NULL);
33dccbb0 2649
608b9977
PA
2650 tun->pcpu_stats = netdev_alloc_pcpu_stats(struct tun_pcpu_stats);
2651 if (!tun->pcpu_stats) {
2652 err = -ENOMEM;
2653 goto err_free_dev;
2654 }
2655
96442e42
JW
2656 spin_lock_init(&tun->lock);
2657
5dbbaf2d
PM
2658 err = security_tun_dev_alloc_security(&tun->security);
2659 if (err < 0)
608b9977 2660 goto err_free_stat;
2b980dbd 2661
1da177e4 2662 tun_net_init(dev);
944a1376 2663 tun_flow_init(tun);
96442e42 2664
88255375 2665 dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST |
6680ec68
JW
2666 TUN_USER_FEATURES | NETIF_F_HW_VLAN_CTAG_TX |
2667 NETIF_F_HW_VLAN_STAG_TX;
2a2bbf17 2668 dev->features = dev->hw_features | NETIF_F_LLTX;
6671b224
FLVC
2669 dev->vlan_features = dev->features &
2670 ~(NETIF_F_HW_VLAN_CTAG_TX |
2671 NETIF_F_HW_VLAN_STAG_TX);
88255375 2672
9fffc5c6
SD
2673 tun->flags = (tun->flags & ~TUN_FEATURES) |
2674 (ifr->ifr_flags & TUN_FEATURES);
2675
4008e97f 2676 INIT_LIST_HEAD(&tun->disabled);
94317099 2677 err = tun_attach(tun, file, false, ifr->ifr_flags & IFF_NAPI);
eb0fb363 2678 if (err < 0)
662ca437 2679 goto err_free_flow;
eb0fb363 2680
1da177e4
LT
2681 err = register_netdevice(tun->dev);
2682 if (err < 0)
662ca437 2683 goto err_detach;
1da177e4
LT
2684 }
2685
af668b3c
MT
2686 netif_carrier_on(tun->dev);
2687
6b8a66ee 2688 tun_debug(KERN_INFO, tun, "tun_set_iff\n");
1da177e4 2689
e35259a9
MK
2690 /* Make sure persistent devices do not get stuck in
2691 * xoff state.
2692 */
2693 if (netif_running(tun->dev))
c8d68e6b 2694 netif_tx_wake_all_queues(tun->dev);
e35259a9 2695
1da177e4
LT
2696 strcpy(ifr->ifr_name, tun->dev->name);
2697 return 0;
2698
662ca437
JW
2699err_detach:
2700 tun_detach_all(dev);
ff244c6b
ED
2701 /* register_netdevice() already called tun_free_netdev() */
2702 goto err_free_dev;
2703
662ca437
JW
2704err_free_flow:
2705 tun_flow_uninit(tun);
2706 security_tun_dev_free_security(tun->security);
608b9977
PA
2707err_free_stat:
2708 free_percpu(tun->pcpu_stats);
662ca437 2709err_free_dev:
1da177e4 2710 free_netdev(dev);
1da177e4
LT
2711 return err;
2712}
2713
9ce99cf6 2714static void tun_get_iff(struct net *net, struct tun_struct *tun,
876bfd4d 2715 struct ifreq *ifr)
e3b99556 2716{
6b8a66ee 2717 tun_debug(KERN_INFO, tun, "tun_get_iff\n");
e3b99556
MM
2718
2719 strcpy(ifr->ifr_name, tun->dev->name);
2720
980c9e8c 2721 ifr->ifr_flags = tun_flags(tun);
e3b99556 2722
e3b99556
MM
2723}
2724
5228ddc9
RR
2725/* This is like a cut-down ethtool ops, except done via tun fd so no
2726 * privs required. */
88255375 2727static int set_offload(struct tun_struct *tun, unsigned long arg)
5228ddc9 2728{
c8f44aff 2729 netdev_features_t features = 0;
5228ddc9
RR
2730
2731 if (arg & TUN_F_CSUM) {
88255375 2732 features |= NETIF_F_HW_CSUM;
5228ddc9
RR
2733 arg &= ~TUN_F_CSUM;
2734
2735 if (arg & (TUN_F_TSO4|TUN_F_TSO6)) {
2736 if (arg & TUN_F_TSO_ECN) {
2737 features |= NETIF_F_TSO_ECN;
2738 arg &= ~TUN_F_TSO_ECN;
2739 }
2740 if (arg & TUN_F_TSO4)
2741 features |= NETIF_F_TSO;
2742 if (arg & TUN_F_TSO6)
2743 features |= NETIF_F_TSO6;
2744 arg &= ~(TUN_F_TSO4|TUN_F_TSO6);
2745 }
0c19f846
WB
2746
2747 arg &= ~TUN_F_UFO;
5228ddc9
RR
2748 }
2749
2750 /* This gives the user a way to test for new features in future by
2751 * trying to set them. */
2752 if (arg)
2753 return -EINVAL;
2754
88255375 2755 tun->set_features = features;
09050957
YI
2756 tun->dev->wanted_features &= ~TUN_USER_FEATURES;
2757 tun->dev->wanted_features |= features;
88255375 2758 netdev_update_features(tun->dev);
5228ddc9
RR
2759
2760 return 0;
2761}
2762
c8d68e6b
JW
2763static void tun_detach_filter(struct tun_struct *tun, int n)
2764{
2765 int i;
2766 struct tun_file *tfile;
2767
2768 for (i = 0; i < n; i++) {
b8deabd3 2769 tfile = rtnl_dereference(tun->tfiles[i]);
8ced425e
HFS
2770 lock_sock(tfile->socket.sk);
2771 sk_detach_filter(tfile->socket.sk);
2772 release_sock(tfile->socket.sk);
c8d68e6b
JW
2773 }
2774
2775 tun->filter_attached = false;
2776}
2777
2778static int tun_attach_filter(struct tun_struct *tun)
2779{
2780 int i, ret = 0;
2781 struct tun_file *tfile;
2782
2783 for (i = 0; i < tun->numqueues; i++) {
b8deabd3 2784 tfile = rtnl_dereference(tun->tfiles[i]);
8ced425e
HFS
2785 lock_sock(tfile->socket.sk);
2786 ret = sk_attach_filter(&tun->fprog, tfile->socket.sk);
2787 release_sock(tfile->socket.sk);
c8d68e6b
JW
2788 if (ret) {
2789 tun_detach_filter(tun, i);
2790 return ret;
2791 }
2792 }
2793
2794 tun->filter_attached = true;
2795 return ret;
2796}
2797
2798static void tun_set_sndbuf(struct tun_struct *tun)
2799{
2800 struct tun_file *tfile;
2801 int i;
2802
2803 for (i = 0; i < tun->numqueues; i++) {
b8deabd3 2804 tfile = rtnl_dereference(tun->tfiles[i]);
c8d68e6b
JW
2805 tfile->socket.sk->sk_sndbuf = tun->sndbuf;
2806 }
2807}
2808
cde8b15f
JW
2809static int tun_set_queue(struct file *file, struct ifreq *ifr)
2810{
2811 struct tun_file *tfile = file->private_data;
2812 struct tun_struct *tun;
cde8b15f
JW
2813 int ret = 0;
2814
2815 rtnl_lock();
2816
2817 if (ifr->ifr_flags & IFF_ATTACH_QUEUE) {
4008e97f 2818 tun = tfile->detached;
5dbbaf2d 2819 if (!tun) {
cde8b15f 2820 ret = -EINVAL;
5dbbaf2d
PM
2821 goto unlock;
2822 }
2823 ret = security_tun_dev_attach_queue(tun->security);
2824 if (ret < 0)
2825 goto unlock;
94317099 2826 ret = tun_attach(tun, file, false, tun->flags & IFF_NAPI);
4008e97f 2827 } else if (ifr->ifr_flags & IFF_DETACH_QUEUE) {
b8deabd3 2828 tun = rtnl_dereference(tfile->tun);
40630b82 2829 if (!tun || !(tun->flags & IFF_MULTI_QUEUE) || tfile->detached)
4008e97f
JW
2830 ret = -EINVAL;
2831 else
2832 __tun_detach(tfile, false);
2833 } else
cde8b15f
JW
2834 ret = -EINVAL;
2835
83c1f36f
SD
2836 if (ret >= 0)
2837 netdev_state_change(tun->dev);
2838
5dbbaf2d 2839unlock:
cde8b15f
JW
2840 rtnl_unlock();
2841 return ret;
2842}
2843
cd5681d7
JW
2844static int tun_set_ebpf(struct tun_struct *tun, struct tun_prog **prog_p,
2845 void __user *data)
96f84061
JW
2846{
2847 struct bpf_prog *prog;
2848 int fd;
2849
2850 if (copy_from_user(&fd, data, sizeof(fd)))
2851 return -EFAULT;
2852
2853 if (fd == -1) {
2854 prog = NULL;
2855 } else {
2856 prog = bpf_prog_get_type(fd, BPF_PROG_TYPE_SOCKET_FILTER);
2857 if (IS_ERR(prog))
2858 return PTR_ERR(prog);
2859 }
2860
cd5681d7 2861 return __tun_set_ebpf(tun, prog_p, prog);
96f84061
JW
2862}
2863
50857e2a
AB
2864static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
2865 unsigned long arg, int ifreq_len)
1da177e4 2866{
36b50bab 2867 struct tun_file *tfile = file->private_data;
f663706a 2868 struct net *net = sock_net(&tfile->sk);
631ab46b 2869 struct tun_struct *tun;
1da177e4
LT
2870 void __user* argp = (void __user*)arg;
2871 struct ifreq ifr;
0625c883
EB
2872 kuid_t owner;
2873 kgid_t group;
33dccbb0 2874 int sndbuf;
d9d52b51 2875 int vnet_hdr_sz;
fb7589a1 2876 unsigned int ifindex;
1cf8e410 2877 int le;
f271b2cc 2878 int ret;
83c1f36f 2879 bool do_notify = false;
1da177e4 2880
f2780d6d
KT
2881 if (cmd == TUNSETIFF || cmd == TUNSETQUEUE ||
2882 (_IOC_TYPE(cmd) == SOCK_IOC_TYPE && cmd != SIOCGSKNS)) {
50857e2a 2883 if (copy_from_user(&ifr, argp, ifreq_len))
1da177e4 2884 return -EFAULT;
8bbb1813 2885 } else {
a117dacd 2886 memset(&ifr, 0, sizeof(ifr));
8bbb1813 2887 }
631ab46b
EB
2888 if (cmd == TUNGETFEATURES) {
2889 /* Currently this just means: "what IFF flags are valid?".
2890 * This is needed because we never checked for invalid flags on
031f5e03
MT
2891 * TUNSETIFF.
2892 */
2893 return put_user(IFF_TUN | IFF_TAP | TUN_FEATURES,
631ab46b 2894 (unsigned int __user*)argp);
f663706a 2895 } else if (cmd == TUNSETQUEUE) {
cde8b15f 2896 return tun_set_queue(file, &ifr);
f663706a
KT
2897 } else if (cmd == SIOCGSKNS) {
2898 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
2899 return -EPERM;
2900 return open_related_ns(&net->ns, get_net_ns);
2901 }
631ab46b 2902
c8d68e6b 2903 ret = 0;
876bfd4d
HX
2904 rtnl_lock();
2905
9484dc74 2906 tun = tun_get(tfile);
0f16bc13
GF
2907 if (cmd == TUNSETIFF) {
2908 ret = -EEXIST;
2909 if (tun)
2910 goto unlock;
2911
1da177e4
LT
2912 ifr.ifr_name[IFNAMSIZ-1] = '\0';
2913
f2780d6d 2914 ret = tun_set_iff(net, file, &ifr);
1da177e4 2915
876bfd4d
HX
2916 if (ret)
2917 goto unlock;
1da177e4 2918
50857e2a 2919 if (copy_to_user(argp, &ifr, ifreq_len))
876bfd4d
HX
2920 ret = -EFAULT;
2921 goto unlock;
1da177e4 2922 }
fb7589a1
PE
2923 if (cmd == TUNSETIFINDEX) {
2924 ret = -EPERM;
2925 if (tun)
2926 goto unlock;
2927
2928 ret = -EFAULT;
2929 if (copy_from_user(&ifindex, argp, sizeof(ifindex)))
2930 goto unlock;
2931
2932 ret = 0;
2933 tfile->ifindex = ifindex;
2934 goto unlock;
2935 }
1da177e4 2936
876bfd4d 2937 ret = -EBADFD;
1da177e4 2938 if (!tun)
876bfd4d 2939 goto unlock;
1da177e4 2940
1e588338 2941 tun_debug(KERN_INFO, tun, "tun_chr_ioctl cmd %u\n", cmd);
1da177e4 2942
631ab46b 2943 ret = 0;
1da177e4 2944 switch (cmd) {
e3b99556 2945 case TUNGETIFF:
9ce99cf6 2946 tun_get_iff(current->nsproxy->net_ns, tun, &ifr);
e3b99556 2947
3d407a80
PE
2948 if (tfile->detached)
2949 ifr.ifr_flags |= IFF_DETACH_QUEUE;
849c9b6f
PE
2950 if (!tfile->socket.sk->sk_filter)
2951 ifr.ifr_flags |= IFF_NOFILTER;
3d407a80 2952
50857e2a 2953 if (copy_to_user(argp, &ifr, ifreq_len))
631ab46b 2954 ret = -EFAULT;
e3b99556
MM
2955 break;
2956
1da177e4
LT
2957 case TUNSETNOCSUM:
2958 /* Disable/Enable checksum */
1da177e4 2959
88255375
MM
2960 /* [unimplemented] */
2961 tun_debug(KERN_INFO, tun, "ignored: set checksum %s\n",
6b8a66ee 2962 arg ? "disabled" : "enabled");
1da177e4
LT
2963 break;
2964
2965 case TUNSETPERSIST:
54f968d6
JW
2966 /* Disable/Enable persist mode. Keep an extra reference to the
2967 * module to prevent the module being unprobed.
2968 */
40630b82
MT
2969 if (arg && !(tun->flags & IFF_PERSIST)) {
2970 tun->flags |= IFF_PERSIST;
54f968d6 2971 __module_get(THIS_MODULE);
83c1f36f 2972 do_notify = true;
dd38bd85 2973 }
40630b82
MT
2974 if (!arg && (tun->flags & IFF_PERSIST)) {
2975 tun->flags &= ~IFF_PERSIST;
54f968d6 2976 module_put(THIS_MODULE);
83c1f36f 2977 do_notify = true;
54f968d6 2978 }
1da177e4 2979
6b8a66ee
JP
2980 tun_debug(KERN_INFO, tun, "persist %s\n",
2981 arg ? "enabled" : "disabled");
1da177e4
LT
2982 break;
2983
2984 case TUNSETOWNER:
2985 /* Set owner of the device */
0625c883
EB
2986 owner = make_kuid(current_user_ns(), arg);
2987 if (!uid_valid(owner)) {
2988 ret = -EINVAL;
2989 break;
2990 }
2991 tun->owner = owner;
83c1f36f 2992 do_notify = true;
1e588338 2993 tun_debug(KERN_INFO, tun, "owner set to %u\n",
0625c883 2994 from_kuid(&init_user_ns, tun->owner));
1da177e4
LT
2995 break;
2996
8c644623
GG
2997 case TUNSETGROUP:
2998 /* Set group of the device */
0625c883
EB
2999 group = make_kgid(current_user_ns(), arg);
3000 if (!gid_valid(group)) {
3001 ret = -EINVAL;
3002 break;
3003 }
3004 tun->group = group;
83c1f36f 3005 do_notify = true;
1e588338 3006 tun_debug(KERN_INFO, tun, "group set to %u\n",
0625c883 3007 from_kgid(&init_user_ns, tun->group));
8c644623
GG
3008 break;
3009
ff4cc3ac
MK
3010 case TUNSETLINK:
3011 /* Only allow setting the type when the interface is down */
3012 if (tun->dev->flags & IFF_UP) {
6b8a66ee
JP
3013 tun_debug(KERN_INFO, tun,
3014 "Linktype set failed because interface is up\n");
48abfe05 3015 ret = -EBUSY;
ff4cc3ac
MK
3016 } else {
3017 tun->dev->type = (int) arg;
6b8a66ee
JP
3018 tun_debug(KERN_INFO, tun, "linktype set to %d\n",
3019 tun->dev->type);
48abfe05 3020 ret = 0;
ff4cc3ac 3021 }
631ab46b 3022 break;
ff4cc3ac 3023
1da177e4
LT
3024#ifdef TUN_DEBUG
3025 case TUNSETDEBUG:
3026 tun->debug = arg;
3027 break;
3028#endif
5228ddc9 3029 case TUNSETOFFLOAD:
88255375 3030 ret = set_offload(tun, arg);
631ab46b 3031 break;
5228ddc9 3032
f271b2cc
MK
3033 case TUNSETTXFILTER:
3034 /* Can be set only for TAPs */
631ab46b 3035 ret = -EINVAL;
40630b82 3036 if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP)
631ab46b 3037 break;
c0e5a8c2 3038 ret = update_filter(&tun->txflt, (void __user *)arg);
631ab46b 3039 break;
1da177e4
LT
3040
3041 case SIOCGIFHWADDR:
b595076a 3042 /* Get hw address */
f271b2cc
MK
3043 memcpy(ifr.ifr_hwaddr.sa_data, tun->dev->dev_addr, ETH_ALEN);
3044 ifr.ifr_hwaddr.sa_family = tun->dev->type;
50857e2a 3045 if (copy_to_user(argp, &ifr, ifreq_len))
631ab46b
EB
3046 ret = -EFAULT;
3047 break;
1da177e4
LT
3048
3049 case SIOCSIFHWADDR:
f271b2cc 3050 /* Set hw address */
6b8a66ee
JP
3051 tun_debug(KERN_DEBUG, tun, "set hw address: %pM\n",
3052 ifr.ifr_hwaddr.sa_data);
40102371 3053
40102371 3054 ret = dev_set_mac_address(tun->dev, &ifr.ifr_hwaddr);
631ab46b 3055 break;
33dccbb0
HX
3056
3057 case TUNGETSNDBUF:
54f968d6 3058 sndbuf = tfile->socket.sk->sk_sndbuf;
33dccbb0
HX
3059 if (copy_to_user(argp, &sndbuf, sizeof(sndbuf)))
3060 ret = -EFAULT;
3061 break;
3062
3063 case TUNSETSNDBUF:
3064 if (copy_from_user(&sndbuf, argp, sizeof(sndbuf))) {
3065 ret = -EFAULT;
3066 break;
3067 }
93161922
CG
3068 if (sndbuf <= 0) {
3069 ret = -EINVAL;
3070 break;
3071 }
33dccbb0 3072
c8d68e6b
JW
3073 tun->sndbuf = sndbuf;
3074 tun_set_sndbuf(tun);
33dccbb0
HX
3075 break;
3076
d9d52b51
MT
3077 case TUNGETVNETHDRSZ:
3078 vnet_hdr_sz = tun->vnet_hdr_sz;
3079 if (copy_to_user(argp, &vnet_hdr_sz, sizeof(vnet_hdr_sz)))
3080 ret = -EFAULT;
3081 break;
3082
3083 case TUNSETVNETHDRSZ:
3084 if (copy_from_user(&vnet_hdr_sz, argp, sizeof(vnet_hdr_sz))) {
3085 ret = -EFAULT;
3086 break;
3087 }
3088 if (vnet_hdr_sz < (int)sizeof(struct virtio_net_hdr)) {
3089 ret = -EINVAL;
3090 break;
3091 }
3092
3093 tun->vnet_hdr_sz = vnet_hdr_sz;
3094 break;
3095
1cf8e410
MT
3096 case TUNGETVNETLE:
3097 le = !!(tun->flags & TUN_VNET_LE);
3098 if (put_user(le, (int __user *)argp))
3099 ret = -EFAULT;
3100 break;
3101
3102 case TUNSETVNETLE:
3103 if (get_user(le, (int __user *)argp)) {
3104 ret = -EFAULT;
3105 break;
3106 }
3107 if (le)
3108 tun->flags |= TUN_VNET_LE;
3109 else
3110 tun->flags &= ~TUN_VNET_LE;
3111 break;
3112
8b8e658b
GK
3113 case TUNGETVNETBE:
3114 ret = tun_get_vnet_be(tun, argp);
3115 break;
3116
3117 case TUNSETVNETBE:
3118 ret = tun_set_vnet_be(tun, argp);
3119 break;
3120
99405162
MT
3121 case TUNATTACHFILTER:
3122 /* Can be set only for TAPs */
3123 ret = -EINVAL;
40630b82 3124 if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP)
99405162
MT
3125 break;
3126 ret = -EFAULT;
54f968d6 3127 if (copy_from_user(&tun->fprog, argp, sizeof(tun->fprog)))
99405162
MT
3128 break;
3129
c8d68e6b 3130 ret = tun_attach_filter(tun);
99405162
MT
3131 break;
3132
3133 case TUNDETACHFILTER:
3134 /* Can be set only for TAPs */
3135 ret = -EINVAL;
40630b82 3136 if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP)
99405162 3137 break;
c8d68e6b
JW
3138 ret = 0;
3139 tun_detach_filter(tun, tun->numqueues);
99405162
MT
3140 break;
3141
76975e9c
PE
3142 case TUNGETFILTER:
3143 ret = -EINVAL;
40630b82 3144 if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP)
76975e9c
PE
3145 break;
3146 ret = -EFAULT;
3147 if (copy_to_user(argp, &tun->fprog, sizeof(tun->fprog)))
3148 break;
3149 ret = 0;
3150 break;
3151
96f84061 3152 case TUNSETSTEERINGEBPF:
cd5681d7 3153 ret = tun_set_ebpf(tun, &tun->steering_prog, argp);
96f84061
JW
3154 break;
3155
aff3d70a
JW
3156 case TUNSETFILTEREBPF:
3157 ret = tun_set_ebpf(tun, &tun->filter_prog, argp);
3158 break;
3159
1da177e4 3160 default:
631ab46b
EB
3161 ret = -EINVAL;
3162 break;
ee289b64 3163 }
1da177e4 3164
83c1f36f
SD
3165 if (do_notify)
3166 netdev_state_change(tun->dev);
3167
876bfd4d
HX
3168unlock:
3169 rtnl_unlock();
3170 if (tun)
3171 tun_put(tun);
631ab46b 3172 return ret;
1da177e4
LT
3173}
3174
50857e2a
AB
3175static long tun_chr_ioctl(struct file *file,
3176 unsigned int cmd, unsigned long arg)
3177{
3178 return __tun_chr_ioctl(file, cmd, arg, sizeof (struct ifreq));
3179}
3180
3181#ifdef CONFIG_COMPAT
3182static long tun_chr_compat_ioctl(struct file *file,
3183 unsigned int cmd, unsigned long arg)
3184{
3185 switch (cmd) {
3186 case TUNSETIFF:
3187 case TUNGETIFF:
3188 case TUNSETTXFILTER:
3189 case TUNGETSNDBUF:
3190 case TUNSETSNDBUF:
3191 case SIOCGIFHWADDR:
3192 case SIOCSIFHWADDR:
3193 arg = (unsigned long)compat_ptr(arg);
3194 break;
3195 default:
3196 arg = (compat_ulong_t)arg;
3197 break;
3198 }
3199
3200 /*
3201 * compat_ifreq is shorter than ifreq, so we must not access beyond
3202 * the end of that structure. All fields that are used in this
3203 * driver are compatible though, we don't need to convert the
3204 * contents.
3205 */
3206 return __tun_chr_ioctl(file, cmd, arg, sizeof(struct compat_ifreq));
3207}
3208#endif /* CONFIG_COMPAT */
3209
1da177e4
LT
3210static int tun_chr_fasync(int fd, struct file *file, int on)
3211{
54f968d6 3212 struct tun_file *tfile = file->private_data;
1da177e4
LT
3213 int ret;
3214
54f968d6 3215 if ((ret = fasync_helper(fd, file, on, &tfile->fasync)) < 0)
9d319522 3216 goto out;
6aa20a22 3217
1da177e4 3218 if (on) {
e0b93edd 3219 __f_setown(file, task_pid(current), PIDTYPE_PID, 0);
54f968d6 3220 tfile->flags |= TUN_FASYNC;
6aa20a22 3221 } else
54f968d6 3222 tfile->flags &= ~TUN_FASYNC;
9d319522
JC
3223 ret = 0;
3224out:
9d319522 3225 return ret;
1da177e4
LT
3226}
3227
3228static int tun_chr_open(struct inode *inode, struct file * file)
3229{
140e807d 3230 struct net *net = current->nsproxy->net_ns;
631ab46b 3231 struct tun_file *tfile;
deed49fb 3232
6b8a66ee 3233 DBG1(KERN_INFO, "tunX: tun_chr_open\n");
631ab46b 3234
140e807d 3235 tfile = (struct tun_file *)sk_alloc(net, AF_UNSPEC, GFP_KERNEL,
11aa9c28 3236 &tun_proto, 0);
631ab46b
EB
3237 if (!tfile)
3238 return -ENOMEM;
b196d88a
JW
3239 if (ptr_ring_init(&tfile->tx_ring, 0, GFP_KERNEL)) {
3240 sk_free(&tfile->sk);
3241 return -ENOMEM;
3242 }
3243
c956674b 3244 RCU_INIT_POINTER(tfile->tun, NULL);
54f968d6 3245 tfile->flags = 0;
fb7589a1 3246 tfile->ifindex = 0;
54f968d6 3247
54f968d6 3248 init_waitqueue_head(&tfile->wq.wait);
9e641bdc 3249 RCU_INIT_POINTER(tfile->socket.wq, &tfile->wq);
54f968d6
JW
3250
3251 tfile->socket.file = file;
3252 tfile->socket.ops = &tun_socket_ops;
3253
3254 sock_init_data(&tfile->socket, &tfile->sk);
54f968d6
JW
3255
3256 tfile->sk.sk_write_space = tun_sock_write_space;
3257 tfile->sk.sk_sndbuf = INT_MAX;
3258
631ab46b 3259 file->private_data = tfile;
4008e97f 3260 INIT_LIST_HEAD(&tfile->next);
54f968d6 3261
19a6afb2
JW
3262 sock_set_flag(&tfile->sk, SOCK_ZEROCOPY);
3263
1da177e4
LT
3264 return 0;
3265}
3266
3267static int tun_chr_close(struct inode *inode, struct file *file)
3268{
631ab46b 3269 struct tun_file *tfile = file->private_data;
1da177e4 3270
c8d68e6b 3271 tun_detach(tfile, true);
1da177e4
LT
3272
3273 return 0;
3274}
3275
93e14b6d 3276#ifdef CONFIG_PROC_FS
9484dc74 3277static void tun_chr_show_fdinfo(struct seq_file *m, struct file *file)
93e14b6d 3278{
9484dc74 3279 struct tun_file *tfile = file->private_data;
93e14b6d
MY
3280 struct tun_struct *tun;
3281 struct ifreq ifr;
3282
3283 memset(&ifr, 0, sizeof(ifr));
3284
3285 rtnl_lock();
9484dc74 3286 tun = tun_get(tfile);
93e14b6d
MY
3287 if (tun)
3288 tun_get_iff(current->nsproxy->net_ns, tun, &ifr);
3289 rtnl_unlock();
3290
3291 if (tun)
3292 tun_put(tun);
3293
a3816ab0 3294 seq_printf(m, "iff:\t%s\n", ifr.ifr_name);
93e14b6d
MY
3295}
3296#endif
3297
d54b1fdb 3298static const struct file_operations tun_fops = {
6aa20a22 3299 .owner = THIS_MODULE,
1da177e4 3300 .llseek = no_llseek,
9b067034 3301 .read_iter = tun_chr_read_iter,
f5ff53b4 3302 .write_iter = tun_chr_write_iter,
1da177e4 3303 .poll = tun_chr_poll,
50857e2a
AB
3304 .unlocked_ioctl = tun_chr_ioctl,
3305#ifdef CONFIG_COMPAT
3306 .compat_ioctl = tun_chr_compat_ioctl,
3307#endif
1da177e4
LT
3308 .open = tun_chr_open,
3309 .release = tun_chr_close,
93e14b6d
MY
3310 .fasync = tun_chr_fasync,
3311#ifdef CONFIG_PROC_FS
3312 .show_fdinfo = tun_chr_show_fdinfo,
3313#endif
1da177e4
LT
3314};
3315
3316static struct miscdevice tun_miscdev = {
3317 .minor = TUN_MINOR,
3318 .name = "tun",
e454cea2 3319 .nodename = "net/tun",
1da177e4 3320 .fops = &tun_fops,
1da177e4
LT
3321};
3322
3323/* ethtool interface */
3324
4e24f2dd
CW
3325static void tun_default_link_ksettings(struct net_device *dev,
3326 struct ethtool_link_ksettings *cmd)
29ccc49d
PR
3327{
3328 ethtool_link_ksettings_zero_link_mode(cmd, supported);
3329 ethtool_link_ksettings_zero_link_mode(cmd, advertising);
3330 cmd->base.speed = SPEED_10;
3331 cmd->base.duplex = DUPLEX_FULL;
3332 cmd->base.port = PORT_TP;
3333 cmd->base.phy_address = 0;
3334 cmd->base.autoneg = AUTONEG_DISABLE;
4e24f2dd
CW
3335}
3336
3337static int tun_get_link_ksettings(struct net_device *dev,
3338 struct ethtool_link_ksettings *cmd)
3339{
3340 struct tun_struct *tun = netdev_priv(dev);
3341
3342 memcpy(cmd, &tun->link_ksettings, sizeof(*cmd));
3343 return 0;
3344}
3345
3346static int tun_set_link_ksettings(struct net_device *dev,
3347 const struct ethtool_link_ksettings *cmd)
3348{
3349 struct tun_struct *tun = netdev_priv(dev);
3350
3351 memcpy(&tun->link_ksettings, cmd, sizeof(*cmd));
1da177e4
LT
3352 return 0;
3353}
3354
3355static void tun_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
3356{
3357 struct tun_struct *tun = netdev_priv(dev);
3358
33a5ba14
RJ
3359 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
3360 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1da177e4
LT
3361
3362 switch (tun->flags & TUN_TYPE_MASK) {
40630b82 3363 case IFF_TUN:
33a5ba14 3364 strlcpy(info->bus_info, "tun", sizeof(info->bus_info));
1da177e4 3365 break;
40630b82 3366 case IFF_TAP:
33a5ba14 3367 strlcpy(info->bus_info, "tap", sizeof(info->bus_info));
1da177e4
LT
3368 break;
3369 }
3370}
3371
3372static u32 tun_get_msglevel(struct net_device *dev)
3373{
3374#ifdef TUN_DEBUG
3375 struct tun_struct *tun = netdev_priv(dev);
3376 return tun->debug;
3377#else
3378 return -EOPNOTSUPP;
3379#endif
3380}
3381
3382static void tun_set_msglevel(struct net_device *dev, u32 value)
3383{
3384#ifdef TUN_DEBUG
3385 struct tun_struct *tun = netdev_priv(dev);
3386 tun->debug = value;
3387#endif
3388}
3389
5503fcec
JW
3390static int tun_get_coalesce(struct net_device *dev,
3391 struct ethtool_coalesce *ec)
3392{
3393 struct tun_struct *tun = netdev_priv(dev);
3394
3395 ec->rx_max_coalesced_frames = tun->rx_batched;
3396
3397 return 0;
3398}
3399
3400static int tun_set_coalesce(struct net_device *dev,
3401 struct ethtool_coalesce *ec)
3402{
3403 struct tun_struct *tun = netdev_priv(dev);
3404
3405 if (ec->rx_max_coalesced_frames > NAPI_POLL_WEIGHT)
3406 tun->rx_batched = NAPI_POLL_WEIGHT;
3407 else
3408 tun->rx_batched = ec->rx_max_coalesced_frames;
3409
3410 return 0;
3411}
3412
7282d491 3413static const struct ethtool_ops tun_ethtool_ops = {
1da177e4
LT
3414 .get_drvinfo = tun_get_drvinfo,
3415 .get_msglevel = tun_get_msglevel,
3416 .set_msglevel = tun_set_msglevel,
bee31369 3417 .get_link = ethtool_op_get_link,
eda29772 3418 .get_ts_info = ethtool_op_get_ts_info,
5503fcec
JW
3419 .get_coalesce = tun_get_coalesce,
3420 .set_coalesce = tun_set_coalesce,
29ccc49d 3421 .get_link_ksettings = tun_get_link_ksettings,
4e24f2dd 3422 .set_link_ksettings = tun_set_link_ksettings,
1da177e4
LT
3423};
3424
1576d986
JW
3425static int tun_queue_resize(struct tun_struct *tun)
3426{
3427 struct net_device *dev = tun->dev;
3428 struct tun_file *tfile;
5990a305 3429 struct ptr_ring **rings;
1576d986
JW
3430 int n = tun->numqueues + tun->numdisabled;
3431 int ret, i;
3432
5990a305
JW
3433 rings = kmalloc_array(n, sizeof(*rings), GFP_KERNEL);
3434 if (!rings)
1576d986
JW
3435 return -ENOMEM;
3436
3437 for (i = 0; i < tun->numqueues; i++) {
3438 tfile = rtnl_dereference(tun->tfiles[i]);
5990a305 3439 rings[i] = &tfile->tx_ring;
1576d986
JW
3440 }
3441 list_for_each_entry(tfile, &tun->disabled, next)
5990a305 3442 rings[i++] = &tfile->tx_ring;
1576d986 3443
5990a305
JW
3444 ret = ptr_ring_resize_multiple(rings, n,
3445 dev->tx_queue_len, GFP_KERNEL,
fc72d1d5 3446 tun_ptr_free);
1576d986 3447
5990a305 3448 kfree(rings);
1576d986
JW
3449 return ret;
3450}
3451
3452static int tun_device_event(struct notifier_block *unused,
3453 unsigned long event, void *ptr)
3454{
3455 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
3456 struct tun_struct *tun = netdev_priv(dev);
3457
86dfb4ac
CG
3458 if (dev->rtnl_link_ops != &tun_link_ops)
3459 return NOTIFY_DONE;
3460
1576d986
JW
3461 switch (event) {
3462 case NETDEV_CHANGE_TX_QUEUE_LEN:
3463 if (tun_queue_resize(tun))
3464 return NOTIFY_BAD;
3465 break;
3466 default:
3467 break;
3468 }
3469
3470 return NOTIFY_DONE;
3471}
3472
3473static struct notifier_block tun_notifier_block __read_mostly = {
3474 .notifier_call = tun_device_event,
3475};
79d17604 3476
1da177e4
LT
3477static int __init tun_init(void)
3478{
3479 int ret = 0;
3480
6b8a66ee 3481 pr_info("%s, %s\n", DRV_DESCRIPTION, DRV_VERSION);
1da177e4 3482
f019a7a5 3483 ret = rtnl_link_register(&tun_link_ops);
79d17604 3484 if (ret) {
6b8a66ee 3485 pr_err("Can't register link_ops\n");
f019a7a5 3486 goto err_linkops;
79d17604
PE
3487 }
3488
1da177e4 3489 ret = misc_register(&tun_miscdev);
79d17604 3490 if (ret) {
6b8a66ee 3491 pr_err("Can't register misc device %d\n", TUN_MINOR);
79d17604
PE
3492 goto err_misc;
3493 }
1576d986 3494
5edfbd3c
TZ
3495 ret = register_netdevice_notifier(&tun_notifier_block);
3496 if (ret) {
3497 pr_err("Can't register netdevice notifier\n");
3498 goto err_notifier;
3499 }
3500
f019a7a5 3501 return 0;
5edfbd3c
TZ
3502
3503err_notifier:
3504 misc_deregister(&tun_miscdev);
79d17604 3505err_misc:
f019a7a5
EB
3506 rtnl_link_unregister(&tun_link_ops);
3507err_linkops:
1da177e4
LT
3508 return ret;
3509}
3510
3511static void tun_cleanup(void)
3512{
6aa20a22 3513 misc_deregister(&tun_miscdev);
f019a7a5 3514 rtnl_link_unregister(&tun_link_ops);
1576d986 3515 unregister_netdevice_notifier(&tun_notifier_block);
1da177e4
LT
3516}
3517
05c2828c
MT
3518/* Get an underlying socket object from tun file. Returns error unless file is
3519 * attached to a device. The returned object works like a packet socket, it
3520 * can be used for sock_sendmsg/sock_recvmsg. The caller is responsible for
3521 * holding a reference to the file for as long as the socket is in use. */
3522struct socket *tun_get_socket(struct file *file)
3523{
6e914fc7 3524 struct tun_file *tfile;
05c2828c
MT
3525 if (file->f_op != &tun_fops)
3526 return ERR_PTR(-EINVAL);
6e914fc7
JW
3527 tfile = file->private_data;
3528 if (!tfile)
05c2828c 3529 return ERR_PTR(-EBADFD);
54f968d6 3530 return &tfile->socket;
05c2828c
MT
3531}
3532EXPORT_SYMBOL_GPL(tun_get_socket);
3533
5990a305 3534struct ptr_ring *tun_get_tx_ring(struct file *file)
83339c6b
JW
3535{
3536 struct tun_file *tfile;
3537
3538 if (file->f_op != &tun_fops)
3539 return ERR_PTR(-EINVAL);
3540 tfile = file->private_data;
3541 if (!tfile)
3542 return ERR_PTR(-EBADFD);
5990a305 3543 return &tfile->tx_ring;
83339c6b 3544}
5990a305 3545EXPORT_SYMBOL_GPL(tun_get_tx_ring);
83339c6b 3546
1da177e4
LT
3547module_init(tun_init);
3548module_exit(tun_cleanup);
3549MODULE_DESCRIPTION(DRV_DESCRIPTION);
3550MODULE_AUTHOR(DRV_COPYRIGHT);
3551MODULE_LICENSE("GPL");
3552MODULE_ALIAS_MISCDEV(TUN_MINOR);
578454ff 3553MODULE_ALIAS("devname:net/tun");