1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * NET3 Protocol independent device support routines.
5 * Derived from the non IP parts of dev.c 1.0.19
7 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
8 * Mark Evans, <evansmp@uhura.aston.ac.uk>
11 * Florian la Roche <rzsfl@rz.uni-sb.de>
12 * Alan Cox <gw4pts@gw4pts.ampr.org>
13 * David Hinds <dahinds@users.sourceforge.net>
14 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
15 * Adam Sulmicki <adam@cfar.umd.edu>
16 * Pekka Riikonen <priikone@poesidon.pspt.fi>
19 * D.J. Barrow : Fixed bug where dev->refcnt gets set
20 * to 2 if register_netdev gets called
21 * before net_dev_init & also removed a
22 * few lines of code in the process.
23 * Alan Cox : device private ioctl copies fields back.
24 * Alan Cox : Transmit queue code does relevant
25 * stunts to keep the queue safe.
26 * Alan Cox : Fixed double lock.
27 * Alan Cox : Fixed promisc NULL pointer trap
28 * ???????? : Support the full private ioctl range
29 * Alan Cox : Moved ioctl permission check into
31 * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
32 * Alan Cox : 100 backlog just doesn't cut it when
33 * you start doing multicast video 8)
34 * Alan Cox : Rewrote net_bh and list manager.
35 * Alan Cox : Fix ETH_P_ALL echoback lengths.
36 * Alan Cox : Took out transmit every packet pass
37 * Saved a few bytes in the ioctl handler
38 * Alan Cox : Network driver sets packet type before
39 * calling netif_rx. Saves a function
41 * Alan Cox : Hashed net_bh()
42 * Richard Kooijman: Timestamp fixes.
43 * Alan Cox : Wrong field in SIOCGIFDSTADDR
44 * Alan Cox : Device lock protection.
45 * Alan Cox : Fixed nasty side effect of device close
47 * Rudi Cilibrasi : Pass the right thing to
49 * Dave Miller : 32bit quantity for the device lock to
50 * make it work out on a Sparc.
51 * Bjorn Ekwall : Added KERNELD hack.
52 * Alan Cox : Cleaned up the backlog initialise.
53 * Craig Metz : SIOCGIFCONF fix if space for under
55 * Thomas Bogendoerfer : Return ENODEV for dev_open, if there
56 * is no device open function.
57 * Andi Kleen : Fix error reporting for SIOCGIFCONF
58 * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF
59 * Cyrus Durgin : Cleaned for KMOD
60 * Adam Sulmicki : Bug Fix : Network Device Unload
61 * A network device unload needs to purge
63 * Paul Rusty Russell : SIOCSIFNAME
64 * Pekka Riikonen : Netdev boot-time settings code
65 * Andrew Morton : Make unregister_netdevice wait
66 * indefinitely on dev->refcnt
67 * J Hadi Salim : - Backlog queue sampling
68 * - netif_rx() feedback
71 #include <linux/uaccess.h>
72 #include <linux/bitmap.h>
73 #include <linux/capability.h>
74 #include <linux/cpu.h>
75 #include <linux/types.h>
76 #include <linux/kernel.h>
77 #include <linux/hash.h>
78 #include <linux/slab.h>
79 #include <linux/sched.h>
80 #include <linux/sched/mm.h>
81 #include <linux/mutex.h>
82 #include <linux/rwsem.h>
83 #include <linux/string.h>
85 #include <linux/socket.h>
86 #include <linux/sockios.h>
87 #include <linux/errno.h>
88 #include <linux/interrupt.h>
89 #include <linux/if_ether.h>
90 #include <linux/netdevice.h>
91 #include <linux/etherdevice.h>
92 #include <linux/ethtool.h>
93 #include <linux/skbuff.h>
94 #include <linux/kthread.h>
95 #include <linux/bpf.h>
96 #include <linux/bpf_trace.h>
97 #include <net/net_namespace.h>
99 #include <net/busy_poll.h>
100 #include <linux/rtnetlink.h>
101 #include <linux/stat.h>
104 #include <net/dst_metadata.h>
106 #include <net/pkt_sched.h>
107 #include <net/pkt_cls.h>
108 #include <net/checksum.h>
109 #include <net/xfrm.h>
111 #include <linux/highmem.h>
112 #include <linux/init.h>
113 #include <linux/module.h>
114 #include <linux/netpoll.h>
115 #include <linux/rcupdate.h>
116 #include <linux/delay.h>
117 #include <net/iw_handler.h>
118 #include <asm/current.h>
119 #include <linux/audit.h>
120 #include <linux/dmaengine.h>
121 #include <linux/err.h>
122 #include <linux/ctype.h>
123 #include <linux/if_arp.h>
124 #include <linux/if_vlan.h>
125 #include <linux/ip.h>
127 #include <net/mpls.h>
128 #include <linux/ipv6.h>
129 #include <linux/in.h>
130 #include <linux/jhash.h>
131 #include <linux/random.h>
132 #include <trace/events/napi.h>
133 #include <trace/events/net.h>
134 #include <trace/events/skb.h>
135 #include <trace/events/qdisc.h>
136 #include <trace/events/xdp.h>
137 #include <linux/inetdevice.h>
138 #include <linux/cpu_rmap.h>
139 #include <linux/static_key.h>
140 #include <linux/hashtable.h>
141 #include <linux/vmalloc.h>
142 #include <linux/if_macvlan.h>
143 #include <linux/errqueue.h>
144 #include <linux/hrtimer.h>
145 #include <linux/netfilter_netdev.h>
146 #include <linux/crash_dump.h>
147 #include <linux/sctp.h>
148 #include <net/udp_tunnel.h>
149 #include <linux/net_namespace.h>
150 #include <linux/indirect_call_wrapper.h>
151 #include <net/devlink.h>
152 #include <linux/pm_runtime.h>
153 #include <linux/prandom.h>
154 #include <linux/once_lite.h>
155 #include <net/netdev_rx_queue.h>
158 #include "net-sysfs.h"
160 static DEFINE_SPINLOCK(ptype_lock
);
161 struct list_head ptype_base
[PTYPE_HASH_SIZE
] __read_mostly
;
162 struct list_head ptype_all __read_mostly
; /* Taps */
164 static int netif_rx_internal(struct sk_buff
*skb
);
165 static int call_netdevice_notifiers_extack(unsigned long val
,
166 struct net_device
*dev
,
167 struct netlink_ext_ack
*extack
);
168 static struct napi_struct
*napi_by_id(unsigned int napi_id
);
171 * The @dev_base_head list is protected by @dev_base_lock and the rtnl
174 * Pure readers hold dev_base_lock for reading, or rcu_read_lock()
176 * Writers must hold the rtnl semaphore while they loop through the
177 * dev_base_head list, and hold dev_base_lock for writing when they do the
178 * actual updates. This allows pure readers to access the list even
179 * while a writer is preparing to update it.
181 * To put it another way, dev_base_lock is held for writing only to
182 * protect against pure readers; the rtnl semaphore provides the
183 * protection against other writers.
185 * See, for example usages, register_netdevice() and
186 * unregister_netdevice(), which must be called with the rtnl
189 DEFINE_RWLOCK(dev_base_lock
);
190 EXPORT_SYMBOL(dev_base_lock
);
192 static DEFINE_MUTEX(ifalias_mutex
);
194 /* protects napi_hash addition/deletion and napi_gen_id */
195 static DEFINE_SPINLOCK(napi_hash_lock
);
197 static unsigned int napi_gen_id
= NR_CPUS
;
198 static DEFINE_READ_MOSTLY_HASHTABLE(napi_hash
, 8);
200 static DECLARE_RWSEM(devnet_rename_sem
);
202 static inline void dev_base_seq_inc(struct net
*net
)
204 while (++net
->dev_base_seq
== 0)
208 static inline struct hlist_head
*dev_name_hash(struct net
*net
, const char *name
)
210 unsigned int hash
= full_name_hash(net
, name
, strnlen(name
, IFNAMSIZ
));
212 return &net
->dev_name_head
[hash_32(hash
, NETDEV_HASHBITS
)];
215 static inline struct hlist_head
*dev_index_hash(struct net
*net
, int ifindex
)
217 return &net
->dev_index_head
[ifindex
& (NETDEV_HASHENTRIES
- 1)];
220 static inline void rps_lock_irqsave(struct softnet_data
*sd
,
221 unsigned long *flags
)
223 if (IS_ENABLED(CONFIG_RPS
))
224 spin_lock_irqsave(&sd
->input_pkt_queue
.lock
, *flags
);
225 else if (!IS_ENABLED(CONFIG_PREEMPT_RT
))
226 local_irq_save(*flags
);
229 static inline void rps_lock_irq_disable(struct softnet_data
*sd
)
231 if (IS_ENABLED(CONFIG_RPS
))
232 spin_lock_irq(&sd
->input_pkt_queue
.lock
);
233 else if (!IS_ENABLED(CONFIG_PREEMPT_RT
))
237 static inline void rps_unlock_irq_restore(struct softnet_data
*sd
,
238 unsigned long *flags
)
240 if (IS_ENABLED(CONFIG_RPS
))
241 spin_unlock_irqrestore(&sd
->input_pkt_queue
.lock
, *flags
);
242 else if (!IS_ENABLED(CONFIG_PREEMPT_RT
))
243 local_irq_restore(*flags
);
246 static inline void rps_unlock_irq_enable(struct softnet_data
*sd
)
248 if (IS_ENABLED(CONFIG_RPS
))
249 spin_unlock_irq(&sd
->input_pkt_queue
.lock
);
250 else if (!IS_ENABLED(CONFIG_PREEMPT_RT
))
254 static struct netdev_name_node
*netdev_name_node_alloc(struct net_device
*dev
,
257 struct netdev_name_node
*name_node
;
259 name_node
= kmalloc(sizeof(*name_node
), GFP_KERNEL
);
262 INIT_HLIST_NODE(&name_node
->hlist
);
263 name_node
->dev
= dev
;
264 name_node
->name
= name
;
268 static struct netdev_name_node
*
269 netdev_name_node_head_alloc(struct net_device
*dev
)
271 struct netdev_name_node
*name_node
;
273 name_node
= netdev_name_node_alloc(dev
, dev
->name
);
276 INIT_LIST_HEAD(&name_node
->list
);
280 static void netdev_name_node_free(struct netdev_name_node
*name_node
)
285 static void netdev_name_node_add(struct net
*net
,
286 struct netdev_name_node
*name_node
)
288 hlist_add_head_rcu(&name_node
->hlist
,
289 dev_name_hash(net
, name_node
->name
));
292 static void netdev_name_node_del(struct netdev_name_node
*name_node
)
294 hlist_del_rcu(&name_node
->hlist
);
297 static struct netdev_name_node
*netdev_name_node_lookup(struct net
*net
,
300 struct hlist_head
*head
= dev_name_hash(net
, name
);
301 struct netdev_name_node
*name_node
;
303 hlist_for_each_entry(name_node
, head
, hlist
)
304 if (!strcmp(name_node
->name
, name
))
309 static struct netdev_name_node
*netdev_name_node_lookup_rcu(struct net
*net
,
312 struct hlist_head
*head
= dev_name_hash(net
, name
);
313 struct netdev_name_node
*name_node
;
315 hlist_for_each_entry_rcu(name_node
, head
, hlist
)
316 if (!strcmp(name_node
->name
, name
))
321 bool netdev_name_in_use(struct net
*net
, const char *name
)
323 return netdev_name_node_lookup(net
, name
);
325 EXPORT_SYMBOL(netdev_name_in_use
);
327 int netdev_name_node_alt_create(struct net_device
*dev
, const char *name
)
329 struct netdev_name_node
*name_node
;
330 struct net
*net
= dev_net(dev
);
332 name_node
= netdev_name_node_lookup(net
, name
);
335 name_node
= netdev_name_node_alloc(dev
, name
);
338 netdev_name_node_add(net
, name_node
);
339 /* The node that holds dev->name acts as a head of per-device list. */
340 list_add_tail(&name_node
->list
, &dev
->name_node
->list
);
345 static void __netdev_name_node_alt_destroy(struct netdev_name_node
*name_node
)
347 list_del(&name_node
->list
);
348 kfree(name_node
->name
);
349 netdev_name_node_free(name_node
);
352 int netdev_name_node_alt_destroy(struct net_device
*dev
, const char *name
)
354 struct netdev_name_node
*name_node
;
355 struct net
*net
= dev_net(dev
);
357 name_node
= netdev_name_node_lookup(net
, name
);
360 /* lookup might have found our primary name or a name belonging
363 if (name_node
== dev
->name_node
|| name_node
->dev
!= dev
)
366 netdev_name_node_del(name_node
);
368 __netdev_name_node_alt_destroy(name_node
);
373 static void netdev_name_node_alt_flush(struct net_device
*dev
)
375 struct netdev_name_node
*name_node
, *tmp
;
377 list_for_each_entry_safe(name_node
, tmp
, &dev
->name_node
->list
, list
)
378 __netdev_name_node_alt_destroy(name_node
);
381 /* Device list insertion */
382 static void list_netdevice(struct net_device
*dev
)
384 struct netdev_name_node
*name_node
;
385 struct net
*net
= dev_net(dev
);
389 write_lock(&dev_base_lock
);
390 list_add_tail_rcu(&dev
->dev_list
, &net
->dev_base_head
);
391 netdev_name_node_add(net
, dev
->name_node
);
392 hlist_add_head_rcu(&dev
->index_hlist
,
393 dev_index_hash(net
, dev
->ifindex
));
394 write_unlock(&dev_base_lock
);
396 netdev_for_each_altname(dev
, name_node
)
397 netdev_name_node_add(net
, name_node
);
399 /* We reserved the ifindex, this can't fail */
400 WARN_ON(xa_store(&net
->dev_by_index
, dev
->ifindex
, dev
, GFP_KERNEL
));
402 dev_base_seq_inc(net
);
405 /* Device list removal
406 * caller must respect a RCU grace period before freeing/reusing dev
408 static void unlist_netdevice(struct net_device
*dev
, bool lock
)
410 struct netdev_name_node
*name_node
;
411 struct net
*net
= dev_net(dev
);
415 xa_erase(&net
->dev_by_index
, dev
->ifindex
);
417 netdev_for_each_altname(dev
, name_node
)
418 netdev_name_node_del(name_node
);
420 /* Unlink dev from the device chain */
422 write_lock(&dev_base_lock
);
423 list_del_rcu(&dev
->dev_list
);
424 netdev_name_node_del(dev
->name_node
);
425 hlist_del_rcu(&dev
->index_hlist
);
427 write_unlock(&dev_base_lock
);
429 dev_base_seq_inc(dev_net(dev
));
436 static RAW_NOTIFIER_HEAD(netdev_chain
);
439 * Device drivers call our routines to queue packets here. We empty the
440 * queue in the local softnet handler.
443 DEFINE_PER_CPU_ALIGNED(struct softnet_data
, softnet_data
);
444 EXPORT_PER_CPU_SYMBOL(softnet_data
);
446 #ifdef CONFIG_LOCKDEP
448 * register_netdevice() inits txq->_xmit_lock and sets lockdep class
449 * according to dev->type
451 static const unsigned short netdev_lock_type
[] = {
452 ARPHRD_NETROM
, ARPHRD_ETHER
, ARPHRD_EETHER
, ARPHRD_AX25
,
453 ARPHRD_PRONET
, ARPHRD_CHAOS
, ARPHRD_IEEE802
, ARPHRD_ARCNET
,
454 ARPHRD_APPLETLK
, ARPHRD_DLCI
, ARPHRD_ATM
, ARPHRD_METRICOM
,
455 ARPHRD_IEEE1394
, ARPHRD_EUI64
, ARPHRD_INFINIBAND
, ARPHRD_SLIP
,
456 ARPHRD_CSLIP
, ARPHRD_SLIP6
, ARPHRD_CSLIP6
, ARPHRD_RSRVD
,
457 ARPHRD_ADAPT
, ARPHRD_ROSE
, ARPHRD_X25
, ARPHRD_HWX25
,
458 ARPHRD_PPP
, ARPHRD_CISCO
, ARPHRD_LAPB
, ARPHRD_DDCMP
,
459 ARPHRD_RAWHDLC
, ARPHRD_TUNNEL
, ARPHRD_TUNNEL6
, ARPHRD_FRAD
,
460 ARPHRD_SKIP
, ARPHRD_LOOPBACK
, ARPHRD_LOCALTLK
, ARPHRD_FDDI
,
461 ARPHRD_BIF
, ARPHRD_SIT
, ARPHRD_IPDDP
, ARPHRD_IPGRE
,
462 ARPHRD_PIMREG
, ARPHRD_HIPPI
, ARPHRD_ASH
, ARPHRD_ECONET
,
463 ARPHRD_IRDA
, ARPHRD_FCPP
, ARPHRD_FCAL
, ARPHRD_FCPL
,
464 ARPHRD_FCFABRIC
, ARPHRD_IEEE80211
, ARPHRD_IEEE80211_PRISM
,
465 ARPHRD_IEEE80211_RADIOTAP
, ARPHRD_PHONET
, ARPHRD_PHONET_PIPE
,
466 ARPHRD_IEEE802154
, ARPHRD_VOID
, ARPHRD_NONE
};
468 static const char *const netdev_lock_name
[] = {
469 "_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
470 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
471 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
472 "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
473 "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
474 "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
475 "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
476 "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
477 "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
478 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
479 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
480 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
481 "_xmit_FCFABRIC", "_xmit_IEEE80211", "_xmit_IEEE80211_PRISM",
482 "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", "_xmit_PHONET_PIPE",
483 "_xmit_IEEE802154", "_xmit_VOID", "_xmit_NONE"};
485 static struct lock_class_key netdev_xmit_lock_key
[ARRAY_SIZE(netdev_lock_type
)];
486 static struct lock_class_key netdev_addr_lock_key
[ARRAY_SIZE(netdev_lock_type
)];
488 static inline unsigned short netdev_lock_pos(unsigned short dev_type
)
492 for (i
= 0; i
< ARRAY_SIZE(netdev_lock_type
); i
++)
493 if (netdev_lock_type
[i
] == dev_type
)
495 /* the last key is used by default */
496 return ARRAY_SIZE(netdev_lock_type
) - 1;
499 static inline void netdev_set_xmit_lockdep_class(spinlock_t
*lock
,
500 unsigned short dev_type
)
504 i
= netdev_lock_pos(dev_type
);
505 lockdep_set_class_and_name(lock
, &netdev_xmit_lock_key
[i
],
506 netdev_lock_name
[i
]);
509 static inline void netdev_set_addr_lockdep_class(struct net_device
*dev
)
513 i
= netdev_lock_pos(dev
->type
);
514 lockdep_set_class_and_name(&dev
->addr_list_lock
,
515 &netdev_addr_lock_key
[i
],
516 netdev_lock_name
[i
]);
519 static inline void netdev_set_xmit_lockdep_class(spinlock_t
*lock
,
520 unsigned short dev_type
)
524 static inline void netdev_set_addr_lockdep_class(struct net_device
*dev
)
529 /*******************************************************************************
531 * Protocol management and registration routines
533 *******************************************************************************/
537 * Add a protocol ID to the list. Now that the input handler is
538 * smarter we can dispense with all the messy stuff that used to be
541 * BEWARE!!! Protocol handlers, mangling input packets,
542 * MUST BE last in hash buckets and checking protocol handlers
543 * MUST start from promiscuous ptype_all chain in net_bh.
544 * It is true now, do not change it.
545 * Explanation follows: if protocol handler, mangling packet, will
546 * be the first on list, it is not able to sense, that packet
547 * is cloned and should be copied-on-write, so that it will
548 * change it and subsequent readers will get broken packet.
552 static inline struct list_head
*ptype_head(const struct packet_type
*pt
)
554 if (pt
->type
== htons(ETH_P_ALL
))
555 return pt
->dev
? &pt
->dev
->ptype_all
: &ptype_all
;
557 return pt
->dev
? &pt
->dev
->ptype_specific
:
558 &ptype_base
[ntohs(pt
->type
) & PTYPE_HASH_MASK
];
562 * dev_add_pack - add packet handler
563 * @pt: packet type declaration
565 * Add a protocol handler to the networking stack. The passed &packet_type
566 * is linked into kernel lists and may not be freed until it has been
567 * removed from the kernel lists.
569 * This call does not sleep therefore it can not
570 * guarantee all CPU's that are in middle of receiving packets
571 * will see the new packet type (until the next received packet).
574 void dev_add_pack(struct packet_type
*pt
)
576 struct list_head
*head
= ptype_head(pt
);
578 spin_lock(&ptype_lock
);
579 list_add_rcu(&pt
->list
, head
);
580 spin_unlock(&ptype_lock
);
582 EXPORT_SYMBOL(dev_add_pack
);
585 * __dev_remove_pack - remove packet handler
586 * @pt: packet type declaration
588 * Remove a protocol handler that was previously added to the kernel
589 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
590 * from the kernel lists and can be freed or reused once this function
593 * The packet type might still be in use by receivers
594 * and must not be freed until after all the CPU's have gone
595 * through a quiescent state.
597 void __dev_remove_pack(struct packet_type
*pt
)
599 struct list_head
*head
= ptype_head(pt
);
600 struct packet_type
*pt1
;
602 spin_lock(&ptype_lock
);
604 list_for_each_entry(pt1
, head
, list
) {
606 list_del_rcu(&pt
->list
);
611 pr_warn("dev_remove_pack: %p not found\n", pt
);
613 spin_unlock(&ptype_lock
);
615 EXPORT_SYMBOL(__dev_remove_pack
);
618 * dev_remove_pack - remove packet handler
619 * @pt: packet type declaration
621 * Remove a protocol handler that was previously added to the kernel
622 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
623 * from the kernel lists and can be freed or reused once this function
626 * This call sleeps to guarantee that no CPU is looking at the packet
629 void dev_remove_pack(struct packet_type
*pt
)
631 __dev_remove_pack(pt
);
635 EXPORT_SYMBOL(dev_remove_pack
);
638 /*******************************************************************************
640 * Device Interface Subroutines
642 *******************************************************************************/
645 * dev_get_iflink - get 'iflink' value of a interface
646 * @dev: targeted interface
648 * Indicates the ifindex the interface is linked to.
649 * Physical interfaces have the same 'ifindex' and 'iflink' values.
652 int dev_get_iflink(const struct net_device
*dev
)
654 if (dev
->netdev_ops
&& dev
->netdev_ops
->ndo_get_iflink
)
655 return dev
->netdev_ops
->ndo_get_iflink(dev
);
659 EXPORT_SYMBOL(dev_get_iflink
);
662 * dev_fill_metadata_dst - Retrieve tunnel egress information.
663 * @dev: targeted interface
666 * For better visibility of tunnel traffic OVS needs to retrieve
667 * egress tunnel information for a packet. Following API allows
668 * user to get this info.
670 int dev_fill_metadata_dst(struct net_device
*dev
, struct sk_buff
*skb
)
672 struct ip_tunnel_info
*info
;
674 if (!dev
->netdev_ops
|| !dev
->netdev_ops
->ndo_fill_metadata_dst
)
677 info
= skb_tunnel_info_unclone(skb
);
680 if (unlikely(!(info
->mode
& IP_TUNNEL_INFO_TX
)))
683 return dev
->netdev_ops
->ndo_fill_metadata_dst(dev
, skb
);
685 EXPORT_SYMBOL_GPL(dev_fill_metadata_dst
);
687 static struct net_device_path
*dev_fwd_path(struct net_device_path_stack
*stack
)
689 int k
= stack
->num_paths
++;
691 if (WARN_ON_ONCE(k
>= NET_DEVICE_PATH_STACK_MAX
))
694 return &stack
->path
[k
];
697 int dev_fill_forward_path(const struct net_device
*dev
, const u8
*daddr
,
698 struct net_device_path_stack
*stack
)
700 const struct net_device
*last_dev
;
701 struct net_device_path_ctx ctx
= {
704 struct net_device_path
*path
;
707 memcpy(ctx
.daddr
, daddr
, sizeof(ctx
.daddr
));
708 stack
->num_paths
= 0;
709 while (ctx
.dev
&& ctx
.dev
->netdev_ops
->ndo_fill_forward_path
) {
711 path
= dev_fwd_path(stack
);
715 memset(path
, 0, sizeof(struct net_device_path
));
716 ret
= ctx
.dev
->netdev_ops
->ndo_fill_forward_path(&ctx
, path
);
720 if (WARN_ON_ONCE(last_dev
== ctx
.dev
))
727 path
= dev_fwd_path(stack
);
730 path
->type
= DEV_PATH_ETHERNET
;
735 EXPORT_SYMBOL_GPL(dev_fill_forward_path
);
738 * __dev_get_by_name - find a device by its name
739 * @net: the applicable net namespace
740 * @name: name to find
742 * Find an interface by name. Must be called under RTNL semaphore
743 * or @dev_base_lock. If the name is found a pointer to the device
744 * is returned. If the name is not found then %NULL is returned. The
745 * reference counters are not incremented so the caller must be
746 * careful with locks.
749 struct net_device
*__dev_get_by_name(struct net
*net
, const char *name
)
751 struct netdev_name_node
*node_name
;
753 node_name
= netdev_name_node_lookup(net
, name
);
754 return node_name
? node_name
->dev
: NULL
;
756 EXPORT_SYMBOL(__dev_get_by_name
);
759 * dev_get_by_name_rcu - find a device by its name
760 * @net: the applicable net namespace
761 * @name: name to find
763 * Find an interface by name.
764 * If the name is found a pointer to the device is returned.
765 * If the name is not found then %NULL is returned.
766 * The reference counters are not incremented so the caller must be
767 * careful with locks. The caller must hold RCU lock.
770 struct net_device
*dev_get_by_name_rcu(struct net
*net
, const char *name
)
772 struct netdev_name_node
*node_name
;
774 node_name
= netdev_name_node_lookup_rcu(net
, name
);
775 return node_name
? node_name
->dev
: NULL
;
777 EXPORT_SYMBOL(dev_get_by_name_rcu
);
779 /* Deprecated for new users, call netdev_get_by_name() instead */
780 struct net_device
*dev_get_by_name(struct net
*net
, const char *name
)
782 struct net_device
*dev
;
785 dev
= dev_get_by_name_rcu(net
, name
);
790 EXPORT_SYMBOL(dev_get_by_name
);
793 * netdev_get_by_name() - find a device by its name
794 * @net: the applicable net namespace
795 * @name: name to find
796 * @tracker: tracking object for the acquired reference
797 * @gfp: allocation flags for the tracker
799 * Find an interface by name. This can be called from any
800 * context and does its own locking. The returned handle has
801 * the usage count incremented and the caller must use netdev_put() to
802 * release it when it is no longer needed. %NULL is returned if no
803 * matching device is found.
805 struct net_device
*netdev_get_by_name(struct net
*net
, const char *name
,
806 netdevice_tracker
*tracker
, gfp_t gfp
)
808 struct net_device
*dev
;
810 dev
= dev_get_by_name(net
, name
);
812 netdev_tracker_alloc(dev
, tracker
, gfp
);
815 EXPORT_SYMBOL(netdev_get_by_name
);
818 * __dev_get_by_index - find a device by its ifindex
819 * @net: the applicable net namespace
820 * @ifindex: index of device
822 * Search for an interface by index. Returns %NULL if the device
823 * is not found or a pointer to the device. The device has not
824 * had its reference counter increased so the caller must be careful
825 * about locking. The caller must hold either the RTNL semaphore
829 struct net_device
*__dev_get_by_index(struct net
*net
, int ifindex
)
831 struct net_device
*dev
;
832 struct hlist_head
*head
= dev_index_hash(net
, ifindex
);
834 hlist_for_each_entry(dev
, head
, index_hlist
)
835 if (dev
->ifindex
== ifindex
)
840 EXPORT_SYMBOL(__dev_get_by_index
);
843 * dev_get_by_index_rcu - find a device by its ifindex
844 * @net: the applicable net namespace
845 * @ifindex: index of device
847 * Search for an interface by index. Returns %NULL if the device
848 * is not found or a pointer to the device. The device has not
849 * had its reference counter increased so the caller must be careful
850 * about locking. The caller must hold RCU lock.
853 struct net_device
*dev_get_by_index_rcu(struct net
*net
, int ifindex
)
855 struct net_device
*dev
;
856 struct hlist_head
*head
= dev_index_hash(net
, ifindex
);
858 hlist_for_each_entry_rcu(dev
, head
, index_hlist
)
859 if (dev
->ifindex
== ifindex
)
864 EXPORT_SYMBOL(dev_get_by_index_rcu
);
866 /* Deprecated for new users, call netdev_get_by_index() instead */
867 struct net_device
*dev_get_by_index(struct net
*net
, int ifindex
)
869 struct net_device
*dev
;
872 dev
= dev_get_by_index_rcu(net
, ifindex
);
877 EXPORT_SYMBOL(dev_get_by_index
);
880 * netdev_get_by_index() - find a device by its ifindex
881 * @net: the applicable net namespace
882 * @ifindex: index of device
883 * @tracker: tracking object for the acquired reference
884 * @gfp: allocation flags for the tracker
886 * Search for an interface by index. Returns NULL if the device
887 * is not found or a pointer to the device. The device returned has
888 * had a reference added and the pointer is safe until the user calls
889 * netdev_put() to indicate they have finished with it.
891 struct net_device
*netdev_get_by_index(struct net
*net
, int ifindex
,
892 netdevice_tracker
*tracker
, gfp_t gfp
)
894 struct net_device
*dev
;
896 dev
= dev_get_by_index(net
, ifindex
);
898 netdev_tracker_alloc(dev
, tracker
, gfp
);
901 EXPORT_SYMBOL(netdev_get_by_index
);
904 * dev_get_by_napi_id - find a device by napi_id
905 * @napi_id: ID of the NAPI struct
907 * Search for an interface by NAPI ID. Returns %NULL if the device
908 * is not found or a pointer to the device. The device has not had
909 * its reference counter increased so the caller must be careful
910 * about locking. The caller must hold RCU lock.
913 struct net_device
*dev_get_by_napi_id(unsigned int napi_id
)
915 struct napi_struct
*napi
;
917 WARN_ON_ONCE(!rcu_read_lock_held());
919 if (napi_id
< MIN_NAPI_ID
)
922 napi
= napi_by_id(napi_id
);
924 return napi
? napi
->dev
: NULL
;
926 EXPORT_SYMBOL(dev_get_by_napi_id
);
929 * netdev_get_name - get a netdevice name, knowing its ifindex.
930 * @net: network namespace
931 * @name: a pointer to the buffer where the name will be stored.
932 * @ifindex: the ifindex of the interface to get the name from.
934 int netdev_get_name(struct net
*net
, char *name
, int ifindex
)
936 struct net_device
*dev
;
939 down_read(&devnet_rename_sem
);
942 dev
= dev_get_by_index_rcu(net
, ifindex
);
948 strcpy(name
, dev
->name
);
953 up_read(&devnet_rename_sem
);
958 * dev_getbyhwaddr_rcu - find a device by its hardware address
959 * @net: the applicable net namespace
960 * @type: media type of device
961 * @ha: hardware address
963 * Search for an interface by MAC address. Returns NULL if the device
964 * is not found or a pointer to the device.
965 * The caller must hold RCU or RTNL.
966 * The returned device has not had its ref count increased
967 * and the caller must therefore be careful about locking
971 struct net_device
*dev_getbyhwaddr_rcu(struct net
*net
, unsigned short type
,
974 struct net_device
*dev
;
976 for_each_netdev_rcu(net
, dev
)
977 if (dev
->type
== type
&&
978 !memcmp(dev
->dev_addr
, ha
, dev
->addr_len
))
983 EXPORT_SYMBOL(dev_getbyhwaddr_rcu
);
985 struct net_device
*dev_getfirstbyhwtype(struct net
*net
, unsigned short type
)
987 struct net_device
*dev
, *ret
= NULL
;
990 for_each_netdev_rcu(net
, dev
)
991 if (dev
->type
== type
) {
999 EXPORT_SYMBOL(dev_getfirstbyhwtype
);
1002 * __dev_get_by_flags - find any device with given flags
1003 * @net: the applicable net namespace
1004 * @if_flags: IFF_* values
1005 * @mask: bitmask of bits in if_flags to check
1007 * Search for any interface with the given flags. Returns NULL if a device
1008 * is not found or a pointer to the device. Must be called inside
1009 * rtnl_lock(), and result refcount is unchanged.
1012 struct net_device
*__dev_get_by_flags(struct net
*net
, unsigned short if_flags
,
1013 unsigned short mask
)
1015 struct net_device
*dev
, *ret
;
1020 for_each_netdev(net
, dev
) {
1021 if (((dev
->flags
^ if_flags
) & mask
) == 0) {
1028 EXPORT_SYMBOL(__dev_get_by_flags
);
1031 * dev_valid_name - check if name is okay for network device
1032 * @name: name string
1034 * Network device names need to be valid file names to
1035 * allow sysfs to work. We also disallow any kind of
1038 bool dev_valid_name(const char *name
)
1042 if (strnlen(name
, IFNAMSIZ
) == IFNAMSIZ
)
1044 if (!strcmp(name
, ".") || !strcmp(name
, ".."))
1048 if (*name
== '/' || *name
== ':' || isspace(*name
))
1054 EXPORT_SYMBOL(dev_valid_name
);
1057 * __dev_alloc_name - allocate a name for a device
1058 * @net: network namespace to allocate the device name in
1059 * @name: name format string
1060 * @buf: scratch buffer and result name string
1062 * Passed a format string - eg "lt%d" it will try and find a suitable
1063 * id. It scans list of devices to build up a free map, then chooses
1064 * the first empty slot. The caller must hold the dev_base or rtnl lock
1065 * while allocating the name and adding the device in order to avoid
1067 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
1068 * Returns the number of the unit assigned or a negative errno code.
1071 static int __dev_alloc_name(struct net
*net
, const char *name
, char *buf
)
1075 const int max_netdevices
= 8*PAGE_SIZE
;
1076 unsigned long *inuse
;
1077 struct net_device
*d
;
1079 if (!dev_valid_name(name
))
1082 p
= strchr(name
, '%');
1085 * Verify the string as this thing may have come from
1086 * the user. There must be either one "%d" and no other "%"
1089 if (p
[1] != 'd' || strchr(p
+ 2, '%'))
1092 /* Use one page as a bit array of possible slots */
1093 inuse
= bitmap_zalloc(max_netdevices
, GFP_ATOMIC
);
1097 for_each_netdev(net
, d
) {
1098 struct netdev_name_node
*name_node
;
1100 netdev_for_each_altname(d
, name_node
) {
1101 if (!sscanf(name_node
->name
, name
, &i
))
1103 if (i
< 0 || i
>= max_netdevices
)
1106 /* avoid cases where sscanf is not exact inverse of printf */
1107 snprintf(buf
, IFNAMSIZ
, name
, i
);
1108 if (!strncmp(buf
, name_node
->name
, IFNAMSIZ
))
1109 __set_bit(i
, inuse
);
1111 if (!sscanf(d
->name
, name
, &i
))
1113 if (i
< 0 || i
>= max_netdevices
)
1116 /* avoid cases where sscanf is not exact inverse of printf */
1117 snprintf(buf
, IFNAMSIZ
, name
, i
);
1118 if (!strncmp(buf
, d
->name
, IFNAMSIZ
))
1119 __set_bit(i
, inuse
);
1122 i
= find_first_zero_bit(inuse
, max_netdevices
);
1126 snprintf(buf
, IFNAMSIZ
, name
, i
);
1127 if (!netdev_name_in_use(net
, buf
))
1130 /* It is possible to run out of possible slots
1131 * when the name is long and there isn't enough space left
1132 * for the digits, or if all bits are used.
1137 static int dev_prep_valid_name(struct net
*net
, struct net_device
*dev
,
1138 const char *want_name
, char *out_name
)
1142 if (!dev_valid_name(want_name
))
1145 if (strchr(want_name
, '%')) {
1146 ret
= __dev_alloc_name(net
, want_name
, out_name
);
1147 return ret
< 0 ? ret
: 0;
1148 } else if (netdev_name_in_use(net
, want_name
)) {
1150 } else if (out_name
!= want_name
) {
1151 strscpy(out_name
, want_name
, IFNAMSIZ
);
1157 static int dev_alloc_name_ns(struct net
*net
,
1158 struct net_device
*dev
,
1165 ret
= __dev_alloc_name(net
, name
, buf
);
1167 strscpy(dev
->name
, buf
, IFNAMSIZ
);
1172 * dev_alloc_name - allocate a name for a device
1174 * @name: name format string
1176 * Passed a format string - eg "lt%d" it will try and find a suitable
1177 * id. It scans list of devices to build up a free map, then chooses
1178 * the first empty slot. The caller must hold the dev_base or rtnl lock
1179 * while allocating the name and adding the device in order to avoid
1181 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
1182 * Returns the number of the unit assigned or a negative errno code.
1185 int dev_alloc_name(struct net_device
*dev
, const char *name
)
1187 return dev_alloc_name_ns(dev_net(dev
), dev
, name
);
1189 EXPORT_SYMBOL(dev_alloc_name
);
1191 static int dev_get_valid_name(struct net
*net
, struct net_device
*dev
,
1197 ret
= dev_prep_valid_name(net
, dev
, name
, buf
);
1199 strscpy(dev
->name
, buf
, IFNAMSIZ
);
1204 * dev_change_name - change name of a device
1206 * @newname: name (or format string) must be at least IFNAMSIZ
1208 * Change name of a device, can pass format strings "eth%d".
1211 int dev_change_name(struct net_device
*dev
, const char *newname
)
1213 unsigned char old_assign_type
;
1214 char oldname
[IFNAMSIZ
];
1220 BUG_ON(!dev_net(dev
));
1224 down_write(&devnet_rename_sem
);
1226 if (strncmp(newname
, dev
->name
, IFNAMSIZ
) == 0) {
1227 up_write(&devnet_rename_sem
);
1231 memcpy(oldname
, dev
->name
, IFNAMSIZ
);
1233 err
= dev_get_valid_name(net
, dev
, newname
);
1235 up_write(&devnet_rename_sem
);
1239 if (oldname
[0] && !strchr(oldname
, '%'))
1240 netdev_info(dev
, "renamed from %s%s\n", oldname
,
1241 dev
->flags
& IFF_UP
? " (while UP)" : "");
1243 old_assign_type
= dev
->name_assign_type
;
1244 dev
->name_assign_type
= NET_NAME_RENAMED
;
1247 ret
= device_rename(&dev
->dev
, dev
->name
);
1249 memcpy(dev
->name
, oldname
, IFNAMSIZ
);
1250 dev
->name_assign_type
= old_assign_type
;
1251 up_write(&devnet_rename_sem
);
1255 up_write(&devnet_rename_sem
);
1257 netdev_adjacent_rename_links(dev
, oldname
);
1259 write_lock(&dev_base_lock
);
1260 netdev_name_node_del(dev
->name_node
);
1261 write_unlock(&dev_base_lock
);
1265 write_lock(&dev_base_lock
);
1266 netdev_name_node_add(net
, dev
->name_node
);
1267 write_unlock(&dev_base_lock
);
1269 ret
= call_netdevice_notifiers(NETDEV_CHANGENAME
, dev
);
1270 ret
= notifier_to_errno(ret
);
1273 /* err >= 0 after dev_alloc_name() or stores the first errno */
1276 down_write(&devnet_rename_sem
);
1277 memcpy(dev
->name
, oldname
, IFNAMSIZ
);
1278 memcpy(oldname
, newname
, IFNAMSIZ
);
1279 dev
->name_assign_type
= old_assign_type
;
1280 old_assign_type
= NET_NAME_RENAMED
;
1283 netdev_err(dev
, "name change rollback failed: %d\n",
1292 * dev_set_alias - change ifalias of a device
1294 * @alias: name up to IFALIASZ
1295 * @len: limit of bytes to copy from info
1297 * Set ifalias for a device,
1299 int dev_set_alias(struct net_device
*dev
, const char *alias
, size_t len
)
1301 struct dev_ifalias
*new_alias
= NULL
;
1303 if (len
>= IFALIASZ
)
1307 new_alias
= kmalloc(sizeof(*new_alias
) + len
+ 1, GFP_KERNEL
);
1311 memcpy(new_alias
->ifalias
, alias
, len
);
1312 new_alias
->ifalias
[len
] = 0;
1315 mutex_lock(&ifalias_mutex
);
1316 new_alias
= rcu_replace_pointer(dev
->ifalias
, new_alias
,
1317 mutex_is_locked(&ifalias_mutex
));
1318 mutex_unlock(&ifalias_mutex
);
1321 kfree_rcu(new_alias
, rcuhead
);
1325 EXPORT_SYMBOL(dev_set_alias
);
1328 * dev_get_alias - get ifalias of a device
1330 * @name: buffer to store name of ifalias
1331 * @len: size of buffer
1333 * get ifalias for a device. Caller must make sure dev cannot go
1334 * away, e.g. rcu read lock or own a reference count to device.
1336 int dev_get_alias(const struct net_device
*dev
, char *name
, size_t len
)
1338 const struct dev_ifalias
*alias
;
1342 alias
= rcu_dereference(dev
->ifalias
);
1344 ret
= snprintf(name
, len
, "%s", alias
->ifalias
);
1351 * netdev_features_change - device changes features
1352 * @dev: device to cause notification
1354 * Called to indicate a device has changed features.
1356 void netdev_features_change(struct net_device
*dev
)
1358 call_netdevice_notifiers(NETDEV_FEAT_CHANGE
, dev
);
1360 EXPORT_SYMBOL(netdev_features_change
);
1363 * netdev_state_change - device changes state
1364 * @dev: device to cause notification
1366 * Called to indicate a device has changed state. This function calls
1367 * the notifier chains for netdev_chain and sends a NEWLINK message
1368 * to the routing socket.
1370 void netdev_state_change(struct net_device
*dev
)
1372 if (dev
->flags
& IFF_UP
) {
1373 struct netdev_notifier_change_info change_info
= {
1377 call_netdevice_notifiers_info(NETDEV_CHANGE
,
1379 rtmsg_ifinfo(RTM_NEWLINK
, dev
, 0, GFP_KERNEL
, 0, NULL
);
1382 EXPORT_SYMBOL(netdev_state_change
);
1385 * __netdev_notify_peers - notify network peers about existence of @dev,
1386 * to be called when rtnl lock is already held.
1387 * @dev: network device
1389 * Generate traffic such that interested network peers are aware of
1390 * @dev, such as by generating a gratuitous ARP. This may be used when
1391 * a device wants to inform the rest of the network about some sort of
1392 * reconfiguration such as a failover event or virtual machine
1395 void __netdev_notify_peers(struct net_device
*dev
)
1398 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS
, dev
);
1399 call_netdevice_notifiers(NETDEV_RESEND_IGMP
, dev
);
1401 EXPORT_SYMBOL(__netdev_notify_peers
);
1404 * netdev_notify_peers - notify network peers about existence of @dev
1405 * @dev: network device
1407 * Generate traffic such that interested network peers are aware of
1408 * @dev, such as by generating a gratuitous ARP. This may be used when
1409 * a device wants to inform the rest of the network about some sort of
1410 * reconfiguration such as a failover event or virtual machine
1413 void netdev_notify_peers(struct net_device
*dev
)
1416 __netdev_notify_peers(dev
);
1419 EXPORT_SYMBOL(netdev_notify_peers
);
1421 static int napi_threaded_poll(void *data
);
1423 static int napi_kthread_create(struct napi_struct
*n
)
1427 /* Create and wake up the kthread once to put it in
1428 * TASK_INTERRUPTIBLE mode to avoid the blocked task
1429 * warning and work with loadavg.
1431 n
->thread
= kthread_run(napi_threaded_poll
, n
, "napi/%s-%d",
1432 n
->dev
->name
, n
->napi_id
);
1433 if (IS_ERR(n
->thread
)) {
1434 err
= PTR_ERR(n
->thread
);
1435 pr_err("kthread_run failed with err %d\n", err
);
1442 static int __dev_open(struct net_device
*dev
, struct netlink_ext_ack
*extack
)
1444 const struct net_device_ops
*ops
= dev
->netdev_ops
;
1448 dev_addr_check(dev
);
1450 if (!netif_device_present(dev
)) {
1451 /* may be detached because parent is runtime-suspended */
1452 if (dev
->dev
.parent
)
1453 pm_runtime_resume(dev
->dev
.parent
);
1454 if (!netif_device_present(dev
))
1458 /* Block netpoll from trying to do any rx path servicing.
1459 * If we don't do this there is a chance ndo_poll_controller
1460 * or ndo_poll may be running while we open the device
1462 netpoll_poll_disable(dev
);
1464 ret
= call_netdevice_notifiers_extack(NETDEV_PRE_UP
, dev
, extack
);
1465 ret
= notifier_to_errno(ret
);
1469 set_bit(__LINK_STATE_START
, &dev
->state
);
1471 if (ops
->ndo_validate_addr
)
1472 ret
= ops
->ndo_validate_addr(dev
);
1474 if (!ret
&& ops
->ndo_open
)
1475 ret
= ops
->ndo_open(dev
);
1477 netpoll_poll_enable(dev
);
1480 clear_bit(__LINK_STATE_START
, &dev
->state
);
1482 dev
->flags
|= IFF_UP
;
1483 dev_set_rx_mode(dev
);
1485 add_device_randomness(dev
->dev_addr
, dev
->addr_len
);
1492 * dev_open - prepare an interface for use.
1493 * @dev: device to open
1494 * @extack: netlink extended ack
1496 * Takes a device from down to up state. The device's private open
1497 * function is invoked and then the multicast lists are loaded. Finally
1498 * the device is moved into the up state and a %NETDEV_UP message is
1499 * sent to the netdev notifier chain.
1501 * Calling this function on an active interface is a nop. On a failure
1502 * a negative errno code is returned.
1504 int dev_open(struct net_device
*dev
, struct netlink_ext_ack
*extack
)
1508 if (dev
->flags
& IFF_UP
)
1511 ret
= __dev_open(dev
, extack
);
1515 rtmsg_ifinfo(RTM_NEWLINK
, dev
, IFF_UP
| IFF_RUNNING
, GFP_KERNEL
, 0, NULL
);
1516 call_netdevice_notifiers(NETDEV_UP
, dev
);
1520 EXPORT_SYMBOL(dev_open
);
1522 static void __dev_close_many(struct list_head
*head
)
1524 struct net_device
*dev
;
1529 list_for_each_entry(dev
, head
, close_list
) {
1530 /* Temporarily disable netpoll until the interface is down */
1531 netpoll_poll_disable(dev
);
1533 call_netdevice_notifiers(NETDEV_GOING_DOWN
, dev
);
1535 clear_bit(__LINK_STATE_START
, &dev
->state
);
1537 /* Synchronize to scheduled poll. We cannot touch poll list, it
1538 * can be even on different cpu. So just clear netif_running().
1540 * dev->stop() will invoke napi_disable() on all of it's
1541 * napi_struct instances on this device.
1543 smp_mb__after_atomic(); /* Commit netif_running(). */
1546 dev_deactivate_many(head
);
1548 list_for_each_entry(dev
, head
, close_list
) {
1549 const struct net_device_ops
*ops
= dev
->netdev_ops
;
1552 * Call the device specific close. This cannot fail.
1553 * Only if device is UP
1555 * We allow it to be called even after a DETACH hot-plug
1561 dev
->flags
&= ~IFF_UP
;
1562 netpoll_poll_enable(dev
);
1566 static void __dev_close(struct net_device
*dev
)
1570 list_add(&dev
->close_list
, &single
);
1571 __dev_close_many(&single
);
1575 void dev_close_many(struct list_head
*head
, bool unlink
)
1577 struct net_device
*dev
, *tmp
;
1579 /* Remove the devices that don't need to be closed */
1580 list_for_each_entry_safe(dev
, tmp
, head
, close_list
)
1581 if (!(dev
->flags
& IFF_UP
))
1582 list_del_init(&dev
->close_list
);
1584 __dev_close_many(head
);
1586 list_for_each_entry_safe(dev
, tmp
, head
, close_list
) {
1587 rtmsg_ifinfo(RTM_NEWLINK
, dev
, IFF_UP
| IFF_RUNNING
, GFP_KERNEL
, 0, NULL
);
1588 call_netdevice_notifiers(NETDEV_DOWN
, dev
);
1590 list_del_init(&dev
->close_list
);
1593 EXPORT_SYMBOL(dev_close_many
);
1596 * dev_close - shutdown an interface.
1597 * @dev: device to shutdown
1599 * This function moves an active device into down state. A
1600 * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
1601 * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
1604 void dev_close(struct net_device
*dev
)
1606 if (dev
->flags
& IFF_UP
) {
1609 list_add(&dev
->close_list
, &single
);
1610 dev_close_many(&single
, true);
1614 EXPORT_SYMBOL(dev_close
);
1618 * dev_disable_lro - disable Large Receive Offload on a device
1621 * Disable Large Receive Offload (LRO) on a net device. Must be
1622 * called under RTNL. This is needed if received packets may be
1623 * forwarded to another interface.
1625 void dev_disable_lro(struct net_device
*dev
)
1627 struct net_device
*lower_dev
;
1628 struct list_head
*iter
;
1630 dev
->wanted_features
&= ~NETIF_F_LRO
;
1631 netdev_update_features(dev
);
1633 if (unlikely(dev
->features
& NETIF_F_LRO
))
1634 netdev_WARN(dev
, "failed to disable LRO!\n");
1636 netdev_for_each_lower_dev(dev
, lower_dev
, iter
)
1637 dev_disable_lro(lower_dev
);
1639 EXPORT_SYMBOL(dev_disable_lro
);
1642 * dev_disable_gro_hw - disable HW Generic Receive Offload on a device
1645 * Disable HW Generic Receive Offload (GRO_HW) on a net device. Must be
1646 * called under RTNL. This is needed if Generic XDP is installed on
1649 static void dev_disable_gro_hw(struct net_device
*dev
)
1651 dev
->wanted_features
&= ~NETIF_F_GRO_HW
;
1652 netdev_update_features(dev
);
1654 if (unlikely(dev
->features
& NETIF_F_GRO_HW
))
1655 netdev_WARN(dev
, "failed to disable GRO_HW!\n");
1658 const char *netdev_cmd_to_name(enum netdev_cmd cmd
)
1661 case NETDEV_##val: \
1662 return "NETDEV_" __stringify(val);
1664 N(UP
) N(DOWN
) N(REBOOT
) N(CHANGE
) N(REGISTER
) N(UNREGISTER
)
1665 N(CHANGEMTU
) N(CHANGEADDR
) N(GOING_DOWN
) N(CHANGENAME
) N(FEAT_CHANGE
)
1666 N(BONDING_FAILOVER
) N(PRE_UP
) N(PRE_TYPE_CHANGE
) N(POST_TYPE_CHANGE
)
1667 N(POST_INIT
) N(PRE_UNINIT
) N(RELEASE
) N(NOTIFY_PEERS
) N(JOIN
)
1668 N(CHANGEUPPER
) N(RESEND_IGMP
) N(PRECHANGEMTU
) N(CHANGEINFODATA
)
1669 N(BONDING_INFO
) N(PRECHANGEUPPER
) N(CHANGELOWERSTATE
)
1670 N(UDP_TUNNEL_PUSH_INFO
) N(UDP_TUNNEL_DROP_INFO
) N(CHANGE_TX_QUEUE_LEN
)
1671 N(CVLAN_FILTER_PUSH_INFO
) N(CVLAN_FILTER_DROP_INFO
)
1672 N(SVLAN_FILTER_PUSH_INFO
) N(SVLAN_FILTER_DROP_INFO
)
1673 N(PRE_CHANGEADDR
) N(OFFLOAD_XSTATS_ENABLE
) N(OFFLOAD_XSTATS_DISABLE
)
1674 N(OFFLOAD_XSTATS_REPORT_USED
) N(OFFLOAD_XSTATS_REPORT_DELTA
)
1678 return "UNKNOWN_NETDEV_EVENT";
1680 EXPORT_SYMBOL_GPL(netdev_cmd_to_name
);
1682 static int call_netdevice_notifier(struct notifier_block
*nb
, unsigned long val
,
1683 struct net_device
*dev
)
1685 struct netdev_notifier_info info
= {
1689 return nb
->notifier_call(nb
, val
, &info
);
1692 static int call_netdevice_register_notifiers(struct notifier_block
*nb
,
1693 struct net_device
*dev
)
1697 err
= call_netdevice_notifier(nb
, NETDEV_REGISTER
, dev
);
1698 err
= notifier_to_errno(err
);
1702 if (!(dev
->flags
& IFF_UP
))
1705 call_netdevice_notifier(nb
, NETDEV_UP
, dev
);
1709 static void call_netdevice_unregister_notifiers(struct notifier_block
*nb
,
1710 struct net_device
*dev
)
1712 if (dev
->flags
& IFF_UP
) {
1713 call_netdevice_notifier(nb
, NETDEV_GOING_DOWN
,
1715 call_netdevice_notifier(nb
, NETDEV_DOWN
, dev
);
1717 call_netdevice_notifier(nb
, NETDEV_UNREGISTER
, dev
);
1720 static int call_netdevice_register_net_notifiers(struct notifier_block
*nb
,
1723 struct net_device
*dev
;
1726 for_each_netdev(net
, dev
) {
1727 err
= call_netdevice_register_notifiers(nb
, dev
);
1734 for_each_netdev_continue_reverse(net
, dev
)
1735 call_netdevice_unregister_notifiers(nb
, dev
);
1739 static void call_netdevice_unregister_net_notifiers(struct notifier_block
*nb
,
1742 struct net_device
*dev
;
1744 for_each_netdev(net
, dev
)
1745 call_netdevice_unregister_notifiers(nb
, dev
);
1748 static int dev_boot_phase
= 1;
1751 * register_netdevice_notifier - register a network notifier block
1754 * Register a notifier to be called when network device events occur.
1755 * The notifier passed is linked into the kernel structures and must
1756 * not be reused until it has been unregistered. A negative errno code
1757 * is returned on a failure.
1759 * When registered all registration and up events are replayed
1760 * to the new notifier to allow device to have a race free
1761 * view of the network device list.
1764 int register_netdevice_notifier(struct notifier_block
*nb
)
1769 /* Close race with setup_net() and cleanup_net() */
1770 down_write(&pernet_ops_rwsem
);
1772 err
= raw_notifier_chain_register(&netdev_chain
, nb
);
1778 err
= call_netdevice_register_net_notifiers(nb
, net
);
1785 up_write(&pernet_ops_rwsem
);
1789 for_each_net_continue_reverse(net
)
1790 call_netdevice_unregister_net_notifiers(nb
, net
);
1792 raw_notifier_chain_unregister(&netdev_chain
, nb
);
1795 EXPORT_SYMBOL(register_netdevice_notifier
);
1798 * unregister_netdevice_notifier - unregister a network notifier block
1801 * Unregister a notifier previously registered by
1802 * register_netdevice_notifier(). The notifier is unlinked into the
1803 * kernel structures and may then be reused. A negative errno code
1804 * is returned on a failure.
1806 * After unregistering unregister and down device events are synthesized
1807 * for all devices on the device list to the removed notifier to remove
1808 * the need for special case cleanup code.
1811 int unregister_netdevice_notifier(struct notifier_block
*nb
)
1816 /* Close race with setup_net() and cleanup_net() */
1817 down_write(&pernet_ops_rwsem
);
1819 err
= raw_notifier_chain_unregister(&netdev_chain
, nb
);
1824 call_netdevice_unregister_net_notifiers(nb
, net
);
1828 up_write(&pernet_ops_rwsem
);
1831 EXPORT_SYMBOL(unregister_netdevice_notifier
);
1833 static int __register_netdevice_notifier_net(struct net
*net
,
1834 struct notifier_block
*nb
,
1835 bool ignore_call_fail
)
1839 err
= raw_notifier_chain_register(&net
->netdev_chain
, nb
);
1845 err
= call_netdevice_register_net_notifiers(nb
, net
);
1846 if (err
&& !ignore_call_fail
)
1847 goto chain_unregister
;
1852 raw_notifier_chain_unregister(&net
->netdev_chain
, nb
);
1856 static int __unregister_netdevice_notifier_net(struct net
*net
,
1857 struct notifier_block
*nb
)
1861 err
= raw_notifier_chain_unregister(&net
->netdev_chain
, nb
);
1865 call_netdevice_unregister_net_notifiers(nb
, net
);
1870 * register_netdevice_notifier_net - register a per-netns network notifier block
1871 * @net: network namespace
1874 * Register a notifier to be called when network device events occur.
1875 * The notifier passed is linked into the kernel structures and must
1876 * not be reused until it has been unregistered. A negative errno code
1877 * is returned on a failure.
1879 * When registered all registration and up events are replayed
1880 * to the new notifier to allow device to have a race free
1881 * view of the network device list.
1884 int register_netdevice_notifier_net(struct net
*net
, struct notifier_block
*nb
)
1889 err
= __register_netdevice_notifier_net(net
, nb
, false);
1893 EXPORT_SYMBOL(register_netdevice_notifier_net
);
1896 * unregister_netdevice_notifier_net - unregister a per-netns
1897 * network notifier block
1898 * @net: network namespace
1901 * Unregister a notifier previously registered by
1902 * register_netdevice_notifier_net(). The notifier is unlinked from the
1903 * kernel structures and may then be reused. A negative errno code
1904 * is returned on a failure.
1906 * After unregistering unregister and down device events are synthesized
1907 * for all devices on the device list to the removed notifier to remove
1908 * the need for special case cleanup code.
1911 int unregister_netdevice_notifier_net(struct net
*net
,
1912 struct notifier_block
*nb
)
1917 err
= __unregister_netdevice_notifier_net(net
, nb
);
1921 EXPORT_SYMBOL(unregister_netdevice_notifier_net
);
1923 static void __move_netdevice_notifier_net(struct net
*src_net
,
1924 struct net
*dst_net
,
1925 struct notifier_block
*nb
)
1927 __unregister_netdevice_notifier_net(src_net
, nb
);
1928 __register_netdevice_notifier_net(dst_net
, nb
, true);
1931 int register_netdevice_notifier_dev_net(struct net_device
*dev
,
1932 struct notifier_block
*nb
,
1933 struct netdev_net_notifier
*nn
)
1938 err
= __register_netdevice_notifier_net(dev_net(dev
), nb
, false);
1941 list_add(&nn
->list
, &dev
->net_notifier_list
);
1946 EXPORT_SYMBOL(register_netdevice_notifier_dev_net
);
1948 int unregister_netdevice_notifier_dev_net(struct net_device
*dev
,
1949 struct notifier_block
*nb
,
1950 struct netdev_net_notifier
*nn
)
1955 list_del(&nn
->list
);
1956 err
= __unregister_netdevice_notifier_net(dev_net(dev
), nb
);
1960 EXPORT_SYMBOL(unregister_netdevice_notifier_dev_net
);
1962 static void move_netdevice_notifiers_dev_net(struct net_device
*dev
,
1965 struct netdev_net_notifier
*nn
;
1967 list_for_each_entry(nn
, &dev
->net_notifier_list
, list
)
1968 __move_netdevice_notifier_net(dev_net(dev
), net
, nn
->nb
);
1972 * call_netdevice_notifiers_info - call all network notifier blocks
1973 * @val: value passed unmodified to notifier function
1974 * @info: notifier information data
1976 * Call all network notifier blocks. Parameters and return value
1977 * are as for raw_notifier_call_chain().
1980 int call_netdevice_notifiers_info(unsigned long val
,
1981 struct netdev_notifier_info
*info
)
1983 struct net
*net
= dev_net(info
->dev
);
1988 /* Run per-netns notifier block chain first, then run the global one.
1989 * Hopefully, one day, the global one is going to be removed after
1990 * all notifier block registrators get converted to be per-netns.
1992 ret
= raw_notifier_call_chain(&net
->netdev_chain
, val
, info
);
1993 if (ret
& NOTIFY_STOP_MASK
)
1995 return raw_notifier_call_chain(&netdev_chain
, val
, info
);
1999 * call_netdevice_notifiers_info_robust - call per-netns notifier blocks
2000 * for and rollback on error
2001 * @val_up: value passed unmodified to notifier function
2002 * @val_down: value passed unmodified to the notifier function when
2003 * recovering from an error on @val_up
2004 * @info: notifier information data
2006 * Call all per-netns network notifier blocks, but not notifier blocks on
2007 * the global notifier chain. Parameters and return value are as for
2008 * raw_notifier_call_chain_robust().
2012 call_netdevice_notifiers_info_robust(unsigned long val_up
,
2013 unsigned long val_down
,
2014 struct netdev_notifier_info
*info
)
2016 struct net
*net
= dev_net(info
->dev
);
2020 return raw_notifier_call_chain_robust(&net
->netdev_chain
,
2021 val_up
, val_down
, info
);
2024 static int call_netdevice_notifiers_extack(unsigned long val
,
2025 struct net_device
*dev
,
2026 struct netlink_ext_ack
*extack
)
2028 struct netdev_notifier_info info
= {
2033 return call_netdevice_notifiers_info(val
, &info
);
2037 * call_netdevice_notifiers - call all network notifier blocks
2038 * @val: value passed unmodified to notifier function
2039 * @dev: net_device pointer passed unmodified to notifier function
2041 * Call all network notifier blocks. Parameters and return value
2042 * are as for raw_notifier_call_chain().
2045 int call_netdevice_notifiers(unsigned long val
, struct net_device
*dev
)
2047 return call_netdevice_notifiers_extack(val
, dev
, NULL
);
2049 EXPORT_SYMBOL(call_netdevice_notifiers
);
2052 * call_netdevice_notifiers_mtu - call all network notifier blocks
2053 * @val: value passed unmodified to notifier function
2054 * @dev: net_device pointer passed unmodified to notifier function
2055 * @arg: additional u32 argument passed to the notifier function
2057 * Call all network notifier blocks. Parameters and return value
2058 * are as for raw_notifier_call_chain().
2060 static int call_netdevice_notifiers_mtu(unsigned long val
,
2061 struct net_device
*dev
, u32 arg
)
2063 struct netdev_notifier_info_ext info
= {
2068 BUILD_BUG_ON(offsetof(struct netdev_notifier_info_ext
, info
) != 0);
2070 return call_netdevice_notifiers_info(val
, &info
.info
);
2073 #ifdef CONFIG_NET_INGRESS
2074 static DEFINE_STATIC_KEY_FALSE(ingress_needed_key
);
2076 void net_inc_ingress_queue(void)
2078 static_branch_inc(&ingress_needed_key
);
2080 EXPORT_SYMBOL_GPL(net_inc_ingress_queue
);
2082 void net_dec_ingress_queue(void)
2084 static_branch_dec(&ingress_needed_key
);
2086 EXPORT_SYMBOL_GPL(net_dec_ingress_queue
);
2089 #ifdef CONFIG_NET_EGRESS
2090 static DEFINE_STATIC_KEY_FALSE(egress_needed_key
);
2092 void net_inc_egress_queue(void)
2094 static_branch_inc(&egress_needed_key
);
2096 EXPORT_SYMBOL_GPL(net_inc_egress_queue
);
2098 void net_dec_egress_queue(void)
2100 static_branch_dec(&egress_needed_key
);
2102 EXPORT_SYMBOL_GPL(net_dec_egress_queue
);
2105 DEFINE_STATIC_KEY_FALSE(netstamp_needed_key
);
2106 EXPORT_SYMBOL(netstamp_needed_key
);
2107 #ifdef CONFIG_JUMP_LABEL
2108 static atomic_t netstamp_needed_deferred
;
2109 static atomic_t netstamp_wanted
;
2110 static void netstamp_clear(struct work_struct
*work
)
2112 int deferred
= atomic_xchg(&netstamp_needed_deferred
, 0);
2115 wanted
= atomic_add_return(deferred
, &netstamp_wanted
);
2117 static_branch_enable(&netstamp_needed_key
);
2119 static_branch_disable(&netstamp_needed_key
);
2121 static DECLARE_WORK(netstamp_work
, netstamp_clear
);
2124 void net_enable_timestamp(void)
2126 #ifdef CONFIG_JUMP_LABEL
2127 int wanted
= atomic_read(&netstamp_wanted
);
2129 while (wanted
> 0) {
2130 if (atomic_try_cmpxchg(&netstamp_wanted
, &wanted
, wanted
+ 1))
2133 atomic_inc(&netstamp_needed_deferred
);
2134 schedule_work(&netstamp_work
);
2136 static_branch_inc(&netstamp_needed_key
);
2139 EXPORT_SYMBOL(net_enable_timestamp
);
2141 void net_disable_timestamp(void)
2143 #ifdef CONFIG_JUMP_LABEL
2144 int wanted
= atomic_read(&netstamp_wanted
);
2146 while (wanted
> 1) {
2147 if (atomic_try_cmpxchg(&netstamp_wanted
, &wanted
, wanted
- 1))
2150 atomic_dec(&netstamp_needed_deferred
);
2151 schedule_work(&netstamp_work
);
2153 static_branch_dec(&netstamp_needed_key
);
2156 EXPORT_SYMBOL(net_disable_timestamp
);
2158 static inline void net_timestamp_set(struct sk_buff
*skb
)
2161 skb
->mono_delivery_time
= 0;
2162 if (static_branch_unlikely(&netstamp_needed_key
))
2163 skb
->tstamp
= ktime_get_real();
2166 #define net_timestamp_check(COND, SKB) \
2167 if (static_branch_unlikely(&netstamp_needed_key)) { \
2168 if ((COND) && !(SKB)->tstamp) \
2169 (SKB)->tstamp = ktime_get_real(); \
2172 bool is_skb_forwardable(const struct net_device *dev, const struct sk_buff *skb)
2174 return __is_skb_forwardable(dev
, skb
, true);
2176 EXPORT_SYMBOL_GPL(is_skb_forwardable
);
2178 static int __dev_forward_skb2(struct net_device
*dev
, struct sk_buff
*skb
,
2181 int ret
= ____dev_forward_skb(dev
, skb
, check_mtu
);
2184 skb
->protocol
= eth_type_trans(skb
, dev
);
2185 skb_postpull_rcsum(skb
, eth_hdr(skb
), ETH_HLEN
);
2191 int __dev_forward_skb(struct net_device
*dev
, struct sk_buff
*skb
)
2193 return __dev_forward_skb2(dev
, skb
, true);
2195 EXPORT_SYMBOL_GPL(__dev_forward_skb
);
2198 * dev_forward_skb - loopback an skb to another netif
2200 * @dev: destination network device
2201 * @skb: buffer to forward
2204 * NET_RX_SUCCESS (no congestion)
2205 * NET_RX_DROP (packet was dropped, but freed)
2207 * dev_forward_skb can be used for injecting an skb from the
2208 * start_xmit function of one device into the receive queue
2209 * of another device.
2211 * The receiving device may be in another namespace, so
2212 * we have to clear all information in the skb that could
2213 * impact namespace isolation.
2215 int dev_forward_skb(struct net_device
*dev
, struct sk_buff
*skb
)
2217 return __dev_forward_skb(dev
, skb
) ?: netif_rx_internal(skb
);
2219 EXPORT_SYMBOL_GPL(dev_forward_skb
);
2221 int dev_forward_skb_nomtu(struct net_device
*dev
, struct sk_buff
*skb
)
2223 return __dev_forward_skb2(dev
, skb
, false) ?: netif_rx_internal(skb
);
2226 static inline int deliver_skb(struct sk_buff
*skb
,
2227 struct packet_type
*pt_prev
,
2228 struct net_device
*orig_dev
)
2230 if (unlikely(skb_orphan_frags_rx(skb
, GFP_ATOMIC
)))
2232 refcount_inc(&skb
->users
);
2233 return pt_prev
->func(skb
, skb
->dev
, pt_prev
, orig_dev
);
2236 static inline void deliver_ptype_list_skb(struct sk_buff
*skb
,
2237 struct packet_type
**pt
,
2238 struct net_device
*orig_dev
,
2240 struct list_head
*ptype_list
)
2242 struct packet_type
*ptype
, *pt_prev
= *pt
;
2244 list_for_each_entry_rcu(ptype
, ptype_list
, list
) {
2245 if (ptype
->type
!= type
)
2248 deliver_skb(skb
, pt_prev
, orig_dev
);
2254 static inline bool skb_loop_sk(struct packet_type
*ptype
, struct sk_buff
*skb
)
2256 if (!ptype
->af_packet_priv
|| !skb
->sk
)
2259 if (ptype
->id_match
)
2260 return ptype
->id_match(ptype
, skb
->sk
);
2261 else if ((struct sock
*)ptype
->af_packet_priv
== skb
->sk
)
2268 * dev_nit_active - return true if any network interface taps are in use
2270 * @dev: network device to check for the presence of taps
2272 bool dev_nit_active(struct net_device
*dev
)
2274 return !list_empty(&ptype_all
) || !list_empty(&dev
->ptype_all
);
2276 EXPORT_SYMBOL_GPL(dev_nit_active
);
2279 * Support routine. Sends outgoing frames to any network
2280 * taps currently in use.
2283 void dev_queue_xmit_nit(struct sk_buff
*skb
, struct net_device
*dev
)
2285 struct packet_type
*ptype
;
2286 struct sk_buff
*skb2
= NULL
;
2287 struct packet_type
*pt_prev
= NULL
;
2288 struct list_head
*ptype_list
= &ptype_all
;
2292 list_for_each_entry_rcu(ptype
, ptype_list
, list
) {
2293 if (ptype
->ignore_outgoing
)
2296 /* Never send packets back to the socket
2297 * they originated from - MvS (miquels@drinkel.ow.org)
2299 if (skb_loop_sk(ptype
, skb
))
2303 deliver_skb(skb2
, pt_prev
, skb
->dev
);
2308 /* need to clone skb, done only once */
2309 skb2
= skb_clone(skb
, GFP_ATOMIC
);
2313 net_timestamp_set(skb2
);
2315 /* skb->nh should be correctly
2316 * set by sender, so that the second statement is
2317 * just protection against buggy protocols.
2319 skb_reset_mac_header(skb2
);
2321 if (skb_network_header(skb2
) < skb2
->data
||
2322 skb_network_header(skb2
) > skb_tail_pointer(skb2
)) {
2323 net_crit_ratelimited("protocol %04x is buggy, dev %s\n",
2324 ntohs(skb2
->protocol
),
2326 skb_reset_network_header(skb2
);
2329 skb2
->transport_header
= skb2
->network_header
;
2330 skb2
->pkt_type
= PACKET_OUTGOING
;
2334 if (ptype_list
== &ptype_all
) {
2335 ptype_list
= &dev
->ptype_all
;
2340 if (!skb_orphan_frags_rx(skb2
, GFP_ATOMIC
))
2341 pt_prev
->func(skb2
, skb
->dev
, pt_prev
, skb
->dev
);
2347 EXPORT_SYMBOL_GPL(dev_queue_xmit_nit
);
2350 * netif_setup_tc - Handle tc mappings on real_num_tx_queues change
2351 * @dev: Network device
2352 * @txq: number of queues available
2354 * If real_num_tx_queues is changed the tc mappings may no longer be
2355 * valid. To resolve this verify the tc mapping remains valid and if
2356 * not NULL the mapping. With no priorities mapping to this
2357 * offset/count pair it will no longer be used. In the worst case TC0
2358 * is invalid nothing can be done so disable priority mappings. If is
2359 * expected that drivers will fix this mapping if they can before
2360 * calling netif_set_real_num_tx_queues.
2362 static void netif_setup_tc(struct net_device
*dev
, unsigned int txq
)
2365 struct netdev_tc_txq
*tc
= &dev
->tc_to_txq
[0];
2367 /* If TC0 is invalidated disable TC mapping */
2368 if (tc
->offset
+ tc
->count
> txq
) {
2369 netdev_warn(dev
, "Number of in use tx queues changed invalidating tc mappings. Priority traffic classification disabled!\n");
2374 /* Invalidated prio to tc mappings set to TC0 */
2375 for (i
= 1; i
< TC_BITMASK
+ 1; i
++) {
2376 int q
= netdev_get_prio_tc_map(dev
, i
);
2378 tc
= &dev
->tc_to_txq
[q
];
2379 if (tc
->offset
+ tc
->count
> txq
) {
2380 netdev_warn(dev
, "Number of in use tx queues changed. Priority %i to tc mapping %i is no longer valid. Setting map to 0\n",
2382 netdev_set_prio_tc_map(dev
, i
, 0);
2387 int netdev_txq_to_tc(struct net_device
*dev
, unsigned int txq
)
2390 struct netdev_tc_txq
*tc
= &dev
->tc_to_txq
[0];
2393 /* walk through the TCs and see if it falls into any of them */
2394 for (i
= 0; i
< TC_MAX_QUEUE
; i
++, tc
++) {
2395 if ((txq
- tc
->offset
) < tc
->count
)
2399 /* didn't find it, just return -1 to indicate no match */
2405 EXPORT_SYMBOL(netdev_txq_to_tc
);
2408 static struct static_key xps_needed __read_mostly
;
2409 static struct static_key xps_rxqs_needed __read_mostly
;
2410 static DEFINE_MUTEX(xps_map_mutex
);
2411 #define xmap_dereference(P) \
2412 rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex))
2414 static bool remove_xps_queue(struct xps_dev_maps
*dev_maps
,
2415 struct xps_dev_maps
*old_maps
, int tci
, u16 index
)
2417 struct xps_map
*map
= NULL
;
2420 map
= xmap_dereference(dev_maps
->attr_map
[tci
]);
2424 for (pos
= map
->len
; pos
--;) {
2425 if (map
->queues
[pos
] != index
)
2429 map
->queues
[pos
] = map
->queues
[--map
->len
];
2434 RCU_INIT_POINTER(old_maps
->attr_map
[tci
], NULL
);
2435 RCU_INIT_POINTER(dev_maps
->attr_map
[tci
], NULL
);
2436 kfree_rcu(map
, rcu
);
2443 static bool remove_xps_queue_cpu(struct net_device
*dev
,
2444 struct xps_dev_maps
*dev_maps
,
2445 int cpu
, u16 offset
, u16 count
)
2447 int num_tc
= dev_maps
->num_tc
;
2448 bool active
= false;
2451 for (tci
= cpu
* num_tc
; num_tc
--; tci
++) {
2454 for (i
= count
, j
= offset
; i
--; j
++) {
2455 if (!remove_xps_queue(dev_maps
, NULL
, tci
, j
))
2465 static void reset_xps_maps(struct net_device
*dev
,
2466 struct xps_dev_maps
*dev_maps
,
2467 enum xps_map_type type
)
2469 static_key_slow_dec_cpuslocked(&xps_needed
);
2470 if (type
== XPS_RXQS
)
2471 static_key_slow_dec_cpuslocked(&xps_rxqs_needed
);
2473 RCU_INIT_POINTER(dev
->xps_maps
[type
], NULL
);
2475 kfree_rcu(dev_maps
, rcu
);
2478 static void clean_xps_maps(struct net_device
*dev
, enum xps_map_type type
,
2479 u16 offset
, u16 count
)
2481 struct xps_dev_maps
*dev_maps
;
2482 bool active
= false;
2485 dev_maps
= xmap_dereference(dev
->xps_maps
[type
]);
2489 for (j
= 0; j
< dev_maps
->nr_ids
; j
++)
2490 active
|= remove_xps_queue_cpu(dev
, dev_maps
, j
, offset
, count
);
2492 reset_xps_maps(dev
, dev_maps
, type
);
2494 if (type
== XPS_CPUS
) {
2495 for (i
= offset
+ (count
- 1); count
--; i
--)
2496 netdev_queue_numa_node_write(
2497 netdev_get_tx_queue(dev
, i
), NUMA_NO_NODE
);
2501 static void netif_reset_xps_queues(struct net_device
*dev
, u16 offset
,
2504 if (!static_key_false(&xps_needed
))
2508 mutex_lock(&xps_map_mutex
);
2510 if (static_key_false(&xps_rxqs_needed
))
2511 clean_xps_maps(dev
, XPS_RXQS
, offset
, count
);
2513 clean_xps_maps(dev
, XPS_CPUS
, offset
, count
);
2515 mutex_unlock(&xps_map_mutex
);
2519 static void netif_reset_xps_queues_gt(struct net_device
*dev
, u16 index
)
2521 netif_reset_xps_queues(dev
, index
, dev
->num_tx_queues
- index
);
2524 static struct xps_map
*expand_xps_map(struct xps_map
*map
, int attr_index
,
2525 u16 index
, bool is_rxqs_map
)
2527 struct xps_map
*new_map
;
2528 int alloc_len
= XPS_MIN_MAP_ALLOC
;
2531 for (pos
= 0; map
&& pos
< map
->len
; pos
++) {
2532 if (map
->queues
[pos
] != index
)
2537 /* Need to add tx-queue to this CPU's/rx-queue's existing map */
2539 if (pos
< map
->alloc_len
)
2542 alloc_len
= map
->alloc_len
* 2;
2545 /* Need to allocate new map to store tx-queue on this CPU's/rx-queue's
2549 new_map
= kzalloc(XPS_MAP_SIZE(alloc_len
), GFP_KERNEL
);
2551 new_map
= kzalloc_node(XPS_MAP_SIZE(alloc_len
), GFP_KERNEL
,
2552 cpu_to_node(attr_index
));
2556 for (i
= 0; i
< pos
; i
++)
2557 new_map
->queues
[i
] = map
->queues
[i
];
2558 new_map
->alloc_len
= alloc_len
;
2564 /* Copy xps maps at a given index */
2565 static void xps_copy_dev_maps(struct xps_dev_maps
*dev_maps
,
2566 struct xps_dev_maps
*new_dev_maps
, int index
,
2567 int tc
, bool skip_tc
)
2569 int i
, tci
= index
* dev_maps
->num_tc
;
2570 struct xps_map
*map
;
2572 /* copy maps belonging to foreign traffic classes */
2573 for (i
= 0; i
< dev_maps
->num_tc
; i
++, tci
++) {
2574 if (i
== tc
&& skip_tc
)
2577 /* fill in the new device map from the old device map */
2578 map
= xmap_dereference(dev_maps
->attr_map
[tci
]);
2579 RCU_INIT_POINTER(new_dev_maps
->attr_map
[tci
], map
);
2583 /* Must be called under cpus_read_lock */
2584 int __netif_set_xps_queue(struct net_device
*dev
, const unsigned long *mask
,
2585 u16 index
, enum xps_map_type type
)
2587 struct xps_dev_maps
*dev_maps
, *new_dev_maps
= NULL
, *old_dev_maps
= NULL
;
2588 const unsigned long *online_mask
= NULL
;
2589 bool active
= false, copy
= false;
2590 int i
, j
, tci
, numa_node_id
= -2;
2591 int maps_sz
, num_tc
= 1, tc
= 0;
2592 struct xps_map
*map
, *new_map
;
2593 unsigned int nr_ids
;
2595 WARN_ON_ONCE(index
>= dev
->num_tx_queues
);
2598 /* Do not allow XPS on subordinate device directly */
2599 num_tc
= dev
->num_tc
;
2603 /* If queue belongs to subordinate dev use its map */
2604 dev
= netdev_get_tx_queue(dev
, index
)->sb_dev
? : dev
;
2606 tc
= netdev_txq_to_tc(dev
, index
);
2611 mutex_lock(&xps_map_mutex
);
2613 dev_maps
= xmap_dereference(dev
->xps_maps
[type
]);
2614 if (type
== XPS_RXQS
) {
2615 maps_sz
= XPS_RXQ_DEV_MAPS_SIZE(num_tc
, dev
->num_rx_queues
);
2616 nr_ids
= dev
->num_rx_queues
;
2618 maps_sz
= XPS_CPU_DEV_MAPS_SIZE(num_tc
);
2619 if (num_possible_cpus() > 1)
2620 online_mask
= cpumask_bits(cpu_online_mask
);
2621 nr_ids
= nr_cpu_ids
;
2624 if (maps_sz
< L1_CACHE_BYTES
)
2625 maps_sz
= L1_CACHE_BYTES
;
2627 /* The old dev_maps could be larger or smaller than the one we're
2628 * setting up now, as dev->num_tc or nr_ids could have been updated in
2629 * between. We could try to be smart, but let's be safe instead and only
2630 * copy foreign traffic classes if the two map sizes match.
2633 dev_maps
->num_tc
== num_tc
&& dev_maps
->nr_ids
== nr_ids
)
2636 /* allocate memory for queue storage */
2637 for (j
= -1; j
= netif_attrmask_next_and(j
, online_mask
, mask
, nr_ids
),
2639 if (!new_dev_maps
) {
2640 new_dev_maps
= kzalloc(maps_sz
, GFP_KERNEL
);
2641 if (!new_dev_maps
) {
2642 mutex_unlock(&xps_map_mutex
);
2646 new_dev_maps
->nr_ids
= nr_ids
;
2647 new_dev_maps
->num_tc
= num_tc
;
2650 tci
= j
* num_tc
+ tc
;
2651 map
= copy
? xmap_dereference(dev_maps
->attr_map
[tci
]) : NULL
;
2653 map
= expand_xps_map(map
, j
, index
, type
== XPS_RXQS
);
2657 RCU_INIT_POINTER(new_dev_maps
->attr_map
[tci
], map
);
2661 goto out_no_new_maps
;
2664 /* Increment static keys at most once per type */
2665 static_key_slow_inc_cpuslocked(&xps_needed
);
2666 if (type
== XPS_RXQS
)
2667 static_key_slow_inc_cpuslocked(&xps_rxqs_needed
);
2670 for (j
= 0; j
< nr_ids
; j
++) {
2671 bool skip_tc
= false;
2673 tci
= j
* num_tc
+ tc
;
2674 if (netif_attr_test_mask(j
, mask
, nr_ids
) &&
2675 netif_attr_test_online(j
, online_mask
, nr_ids
)) {
2676 /* add tx-queue to CPU/rx-queue maps */
2681 map
= xmap_dereference(new_dev_maps
->attr_map
[tci
]);
2682 while ((pos
< map
->len
) && (map
->queues
[pos
] != index
))
2685 if (pos
== map
->len
)
2686 map
->queues
[map
->len
++] = index
;
2688 if (type
== XPS_CPUS
) {
2689 if (numa_node_id
== -2)
2690 numa_node_id
= cpu_to_node(j
);
2691 else if (numa_node_id
!= cpu_to_node(j
))
2698 xps_copy_dev_maps(dev_maps
, new_dev_maps
, j
, tc
,
2702 rcu_assign_pointer(dev
->xps_maps
[type
], new_dev_maps
);
2704 /* Cleanup old maps */
2706 goto out_no_old_maps
;
2708 for (j
= 0; j
< dev_maps
->nr_ids
; j
++) {
2709 for (i
= num_tc
, tci
= j
* dev_maps
->num_tc
; i
--; tci
++) {
2710 map
= xmap_dereference(dev_maps
->attr_map
[tci
]);
2715 new_map
= xmap_dereference(new_dev_maps
->attr_map
[tci
]);
2720 RCU_INIT_POINTER(dev_maps
->attr_map
[tci
], NULL
);
2721 kfree_rcu(map
, rcu
);
2725 old_dev_maps
= dev_maps
;
2728 dev_maps
= new_dev_maps
;
2732 if (type
== XPS_CPUS
)
2733 /* update Tx queue numa node */
2734 netdev_queue_numa_node_write(netdev_get_tx_queue(dev
, index
),
2735 (numa_node_id
>= 0) ?
2736 numa_node_id
: NUMA_NO_NODE
);
2741 /* removes tx-queue from unused CPUs/rx-queues */
2742 for (j
= 0; j
< dev_maps
->nr_ids
; j
++) {
2743 tci
= j
* dev_maps
->num_tc
;
2745 for (i
= 0; i
< dev_maps
->num_tc
; i
++, tci
++) {
2747 netif_attr_test_mask(j
, mask
, dev_maps
->nr_ids
) &&
2748 netif_attr_test_online(j
, online_mask
, dev_maps
->nr_ids
))
2751 active
|= remove_xps_queue(dev_maps
,
2752 copy
? old_dev_maps
: NULL
,
2758 kfree_rcu(old_dev_maps
, rcu
);
2760 /* free map if not active */
2762 reset_xps_maps(dev
, dev_maps
, type
);
2765 mutex_unlock(&xps_map_mutex
);
2769 /* remove any maps that we added */
2770 for (j
= 0; j
< nr_ids
; j
++) {
2771 for (i
= num_tc
, tci
= j
* num_tc
; i
--; tci
++) {
2772 new_map
= xmap_dereference(new_dev_maps
->attr_map
[tci
]);
2774 xmap_dereference(dev_maps
->attr_map
[tci
]) :
2776 if (new_map
&& new_map
!= map
)
2781 mutex_unlock(&xps_map_mutex
);
2783 kfree(new_dev_maps
);
2786 EXPORT_SYMBOL_GPL(__netif_set_xps_queue
);
2788 int netif_set_xps_queue(struct net_device
*dev
, const struct cpumask
*mask
,
2794 ret
= __netif_set_xps_queue(dev
, cpumask_bits(mask
), index
, XPS_CPUS
);
2799 EXPORT_SYMBOL(netif_set_xps_queue
);
2802 static void netdev_unbind_all_sb_channels(struct net_device
*dev
)
2804 struct netdev_queue
*txq
= &dev
->_tx
[dev
->num_tx_queues
];
2806 /* Unbind any subordinate channels */
2807 while (txq
-- != &dev
->_tx
[0]) {
2809 netdev_unbind_sb_channel(dev
, txq
->sb_dev
);
2813 void netdev_reset_tc(struct net_device
*dev
)
2816 netif_reset_xps_queues_gt(dev
, 0);
2818 netdev_unbind_all_sb_channels(dev
);
2820 /* Reset TC configuration of device */
2822 memset(dev
->tc_to_txq
, 0, sizeof(dev
->tc_to_txq
));
2823 memset(dev
->prio_tc_map
, 0, sizeof(dev
->prio_tc_map
));
2825 EXPORT_SYMBOL(netdev_reset_tc
);
2827 int netdev_set_tc_queue(struct net_device
*dev
, u8 tc
, u16 count
, u16 offset
)
2829 if (tc
>= dev
->num_tc
)
2833 netif_reset_xps_queues(dev
, offset
, count
);
2835 dev
->tc_to_txq
[tc
].count
= count
;
2836 dev
->tc_to_txq
[tc
].offset
= offset
;
2839 EXPORT_SYMBOL(netdev_set_tc_queue
);
2841 int netdev_set_num_tc(struct net_device
*dev
, u8 num_tc
)
2843 if (num_tc
> TC_MAX_QUEUE
)
2847 netif_reset_xps_queues_gt(dev
, 0);
2849 netdev_unbind_all_sb_channels(dev
);
2851 dev
->num_tc
= num_tc
;
2854 EXPORT_SYMBOL(netdev_set_num_tc
);
2856 void netdev_unbind_sb_channel(struct net_device
*dev
,
2857 struct net_device
*sb_dev
)
2859 struct netdev_queue
*txq
= &dev
->_tx
[dev
->num_tx_queues
];
2862 netif_reset_xps_queues_gt(sb_dev
, 0);
2864 memset(sb_dev
->tc_to_txq
, 0, sizeof(sb_dev
->tc_to_txq
));
2865 memset(sb_dev
->prio_tc_map
, 0, sizeof(sb_dev
->prio_tc_map
));
2867 while (txq
-- != &dev
->_tx
[0]) {
2868 if (txq
->sb_dev
== sb_dev
)
2872 EXPORT_SYMBOL(netdev_unbind_sb_channel
);
2874 int netdev_bind_sb_channel_queue(struct net_device
*dev
,
2875 struct net_device
*sb_dev
,
2876 u8 tc
, u16 count
, u16 offset
)
2878 /* Make certain the sb_dev and dev are already configured */
2879 if (sb_dev
->num_tc
>= 0 || tc
>= dev
->num_tc
)
2882 /* We cannot hand out queues we don't have */
2883 if ((offset
+ count
) > dev
->real_num_tx_queues
)
2886 /* Record the mapping */
2887 sb_dev
->tc_to_txq
[tc
].count
= count
;
2888 sb_dev
->tc_to_txq
[tc
].offset
= offset
;
2890 /* Provide a way for Tx queue to find the tc_to_txq map or
2891 * XPS map for itself.
2894 netdev_get_tx_queue(dev
, count
+ offset
)->sb_dev
= sb_dev
;
2898 EXPORT_SYMBOL(netdev_bind_sb_channel_queue
);
2900 int netdev_set_sb_channel(struct net_device
*dev
, u16 channel
)
2902 /* Do not use a multiqueue device to represent a subordinate channel */
2903 if (netif_is_multiqueue(dev
))
2906 /* We allow channels 1 - 32767 to be used for subordinate channels.
2907 * Channel 0 is meant to be "native" mode and used only to represent
2908 * the main root device. We allow writing 0 to reset the device back
2909 * to normal mode after being used as a subordinate channel.
2911 if (channel
> S16_MAX
)
2914 dev
->num_tc
= -channel
;
2918 EXPORT_SYMBOL(netdev_set_sb_channel
);
2921 * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues
2922 * greater than real_num_tx_queues stale skbs on the qdisc must be flushed.
2924 int netif_set_real_num_tx_queues(struct net_device
*dev
, unsigned int txq
)
2929 disabling
= txq
< dev
->real_num_tx_queues
;
2931 if (txq
< 1 || txq
> dev
->num_tx_queues
)
2934 if (dev
->reg_state
== NETREG_REGISTERED
||
2935 dev
->reg_state
== NETREG_UNREGISTERING
) {
2938 rc
= netdev_queue_update_kobjects(dev
, dev
->real_num_tx_queues
,
2944 netif_setup_tc(dev
, txq
);
2946 dev_qdisc_change_real_num_tx(dev
, txq
);
2948 dev
->real_num_tx_queues
= txq
;
2952 qdisc_reset_all_tx_gt(dev
, txq
);
2954 netif_reset_xps_queues_gt(dev
, txq
);
2958 dev
->real_num_tx_queues
= txq
;
2963 EXPORT_SYMBOL(netif_set_real_num_tx_queues
);
2967 * netif_set_real_num_rx_queues - set actual number of RX queues used
2968 * @dev: Network device
2969 * @rxq: Actual number of RX queues
2971 * This must be called either with the rtnl_lock held or before
2972 * registration of the net device. Returns 0 on success, or a
2973 * negative error code. If called before registration, it always
2976 int netif_set_real_num_rx_queues(struct net_device
*dev
, unsigned int rxq
)
2980 if (rxq
< 1 || rxq
> dev
->num_rx_queues
)
2983 if (dev
->reg_state
== NETREG_REGISTERED
) {
2986 rc
= net_rx_queue_update_kobjects(dev
, dev
->real_num_rx_queues
,
2992 dev
->real_num_rx_queues
= rxq
;
2995 EXPORT_SYMBOL(netif_set_real_num_rx_queues
);
2999 * netif_set_real_num_queues - set actual number of RX and TX queues used
3000 * @dev: Network device
3001 * @txq: Actual number of TX queues
3002 * @rxq: Actual number of RX queues
3004 * Set the real number of both TX and RX queues.
3005 * Does nothing if the number of queues is already correct.
3007 int netif_set_real_num_queues(struct net_device
*dev
,
3008 unsigned int txq
, unsigned int rxq
)
3010 unsigned int old_rxq
= dev
->real_num_rx_queues
;
3013 if (txq
< 1 || txq
> dev
->num_tx_queues
||
3014 rxq
< 1 || rxq
> dev
->num_rx_queues
)
3017 /* Start from increases, so the error path only does decreases -
3018 * decreases can't fail.
3020 if (rxq
> dev
->real_num_rx_queues
) {
3021 err
= netif_set_real_num_rx_queues(dev
, rxq
);
3025 if (txq
> dev
->real_num_tx_queues
) {
3026 err
= netif_set_real_num_tx_queues(dev
, txq
);
3030 if (rxq
< dev
->real_num_rx_queues
)
3031 WARN_ON(netif_set_real_num_rx_queues(dev
, rxq
));
3032 if (txq
< dev
->real_num_tx_queues
)
3033 WARN_ON(netif_set_real_num_tx_queues(dev
, txq
));
3037 WARN_ON(netif_set_real_num_rx_queues(dev
, old_rxq
));
3040 EXPORT_SYMBOL(netif_set_real_num_queues
);
3043 * netif_set_tso_max_size() - set the max size of TSO frames supported
3044 * @dev: netdev to update
3045 * @size: max skb->len of a TSO frame
3047 * Set the limit on the size of TSO super-frames the device can handle.
3048 * Unless explicitly set the stack will assume the value of
3049 * %GSO_LEGACY_MAX_SIZE.
3051 void netif_set_tso_max_size(struct net_device
*dev
, unsigned int size
)
3053 dev
->tso_max_size
= min(GSO_MAX_SIZE
, size
);
3054 if (size
< READ_ONCE(dev
->gso_max_size
))
3055 netif_set_gso_max_size(dev
, size
);
3056 if (size
< READ_ONCE(dev
->gso_ipv4_max_size
))
3057 netif_set_gso_ipv4_max_size(dev
, size
);
3059 EXPORT_SYMBOL(netif_set_tso_max_size
);
3062 * netif_set_tso_max_segs() - set the max number of segs supported for TSO
3063 * @dev: netdev to update
3064 * @segs: max number of TCP segments
3066 * Set the limit on the number of TCP segments the device can generate from
3067 * a single TSO super-frame.
3068 * Unless explicitly set the stack will assume the value of %GSO_MAX_SEGS.
3070 void netif_set_tso_max_segs(struct net_device
*dev
, unsigned int segs
)
3072 dev
->tso_max_segs
= segs
;
3073 if (segs
< READ_ONCE(dev
->gso_max_segs
))
3074 netif_set_gso_max_segs(dev
, segs
);
3076 EXPORT_SYMBOL(netif_set_tso_max_segs
);
3079 * netif_inherit_tso_max() - copy all TSO limits from a lower device to an upper
3080 * @to: netdev to update
3081 * @from: netdev from which to copy the limits
3083 void netif_inherit_tso_max(struct net_device
*to
, const struct net_device
*from
)
3085 netif_set_tso_max_size(to
, from
->tso_max_size
);
3086 netif_set_tso_max_segs(to
, from
->tso_max_segs
);
3088 EXPORT_SYMBOL(netif_inherit_tso_max
);
3091 * netif_get_num_default_rss_queues - default number of RSS queues
3093 * Default value is the number of physical cores if there are only 1 or 2, or
3094 * divided by 2 if there are more.
3096 int netif_get_num_default_rss_queues(void)
3101 if (unlikely(is_kdump_kernel() || !zalloc_cpumask_var(&cpus
, GFP_KERNEL
)))
3104 cpumask_copy(cpus
, cpu_online_mask
);
3105 for_each_cpu(cpu
, cpus
) {
3107 cpumask_andnot(cpus
, cpus
, topology_sibling_cpumask(cpu
));
3109 free_cpumask_var(cpus
);
3111 return count
> 2 ? DIV_ROUND_UP(count
, 2) : count
;
3113 EXPORT_SYMBOL(netif_get_num_default_rss_queues
);
3115 static void __netif_reschedule(struct Qdisc
*q
)
3117 struct softnet_data
*sd
;
3118 unsigned long flags
;
3120 local_irq_save(flags
);
3121 sd
= this_cpu_ptr(&softnet_data
);
3122 q
->next_sched
= NULL
;
3123 *sd
->output_queue_tailp
= q
;
3124 sd
->output_queue_tailp
= &q
->next_sched
;
3125 raise_softirq_irqoff(NET_TX_SOFTIRQ
);
3126 local_irq_restore(flags
);
3129 void __netif_schedule(struct Qdisc
*q
)
3131 if (!test_and_set_bit(__QDISC_STATE_SCHED
, &q
->state
))
3132 __netif_reschedule(q
);
3134 EXPORT_SYMBOL(__netif_schedule
);
3136 struct dev_kfree_skb_cb
{
3137 enum skb_drop_reason reason
;
3140 static struct dev_kfree_skb_cb
*get_kfree_skb_cb(const struct sk_buff
*skb
)
3142 return (struct dev_kfree_skb_cb
*)skb
->cb
;
3145 void netif_schedule_queue(struct netdev_queue
*txq
)
3148 if (!netif_xmit_stopped(txq
)) {
3149 struct Qdisc
*q
= rcu_dereference(txq
->qdisc
);
3151 __netif_schedule(q
);
3155 EXPORT_SYMBOL(netif_schedule_queue
);
3157 void netif_tx_wake_queue(struct netdev_queue
*dev_queue
)
3159 if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF
, &dev_queue
->state
)) {
3163 q
= rcu_dereference(dev_queue
->qdisc
);
3164 __netif_schedule(q
);
3168 EXPORT_SYMBOL(netif_tx_wake_queue
);
3170 void dev_kfree_skb_irq_reason(struct sk_buff
*skb
, enum skb_drop_reason reason
)
3172 unsigned long flags
;
3177 if (likely(refcount_read(&skb
->users
) == 1)) {
3179 refcount_set(&skb
->users
, 0);
3180 } else if (likely(!refcount_dec_and_test(&skb
->users
))) {
3183 get_kfree_skb_cb(skb
)->reason
= reason
;
3184 local_irq_save(flags
);
3185 skb
->next
= __this_cpu_read(softnet_data
.completion_queue
);
3186 __this_cpu_write(softnet_data
.completion_queue
, skb
);
3187 raise_softirq_irqoff(NET_TX_SOFTIRQ
);
3188 local_irq_restore(flags
);
3190 EXPORT_SYMBOL(dev_kfree_skb_irq_reason
);
3192 void dev_kfree_skb_any_reason(struct sk_buff
*skb
, enum skb_drop_reason reason
)
3194 if (in_hardirq() || irqs_disabled())
3195 dev_kfree_skb_irq_reason(skb
, reason
);
3197 kfree_skb_reason(skb
, reason
);
3199 EXPORT_SYMBOL(dev_kfree_skb_any_reason
);
3203 * netif_device_detach - mark device as removed
3204 * @dev: network device
3206 * Mark device as removed from system and therefore no longer available.
3208 void netif_device_detach(struct net_device
*dev
)
3210 if (test_and_clear_bit(__LINK_STATE_PRESENT
, &dev
->state
) &&
3211 netif_running(dev
)) {
3212 netif_tx_stop_all_queues(dev
);
3215 EXPORT_SYMBOL(netif_device_detach
);
3218 * netif_device_attach - mark device as attached
3219 * @dev: network device
3221 * Mark device as attached from system and restart if needed.
3223 void netif_device_attach(struct net_device
*dev
)
3225 if (!test_and_set_bit(__LINK_STATE_PRESENT
, &dev
->state
) &&
3226 netif_running(dev
)) {
3227 netif_tx_wake_all_queues(dev
);
3228 __netdev_watchdog_up(dev
);
3231 EXPORT_SYMBOL(netif_device_attach
);
3234 * Returns a Tx hash based on the given packet descriptor a Tx queues' number
3235 * to be used as a distribution range.
3237 static u16
skb_tx_hash(const struct net_device
*dev
,
3238 const struct net_device
*sb_dev
,
3239 struct sk_buff
*skb
)
3243 u16 qcount
= dev
->real_num_tx_queues
;
3246 u8 tc
= netdev_get_prio_tc_map(dev
, skb
->priority
);
3248 qoffset
= sb_dev
->tc_to_txq
[tc
].offset
;
3249 qcount
= sb_dev
->tc_to_txq
[tc
].count
;
3250 if (unlikely(!qcount
)) {
3251 net_warn_ratelimited("%s: invalid qcount, qoffset %u for tc %u\n",
3252 sb_dev
->name
, qoffset
, tc
);
3254 qcount
= dev
->real_num_tx_queues
;
3258 if (skb_rx_queue_recorded(skb
)) {
3259 DEBUG_NET_WARN_ON_ONCE(qcount
== 0);
3260 hash
= skb_get_rx_queue(skb
);
3261 if (hash
>= qoffset
)
3263 while (unlikely(hash
>= qcount
))
3265 return hash
+ qoffset
;
3268 return (u16
) reciprocal_scale(skb_get_hash(skb
), qcount
) + qoffset
;
3271 void skb_warn_bad_offload(const struct sk_buff
*skb
)
3273 static const netdev_features_t null_features
;
3274 struct net_device
*dev
= skb
->dev
;
3275 const char *name
= "";
3277 if (!net_ratelimit())
3281 if (dev
->dev
.parent
)
3282 name
= dev_driver_string(dev
->dev
.parent
);
3284 name
= netdev_name(dev
);
3286 skb_dump(KERN_WARNING
, skb
, false);
3287 WARN(1, "%s: caps=(%pNF, %pNF)\n",
3288 name
, dev
? &dev
->features
: &null_features
,
3289 skb
->sk
? &skb
->sk
->sk_route_caps
: &null_features
);
3293 * Invalidate hardware checksum when packet is to be mangled, and
3294 * complete checksum manually on outgoing path.
3296 int skb_checksum_help(struct sk_buff
*skb
)
3299 int ret
= 0, offset
;
3301 if (skb
->ip_summed
== CHECKSUM_COMPLETE
)
3302 goto out_set_summed
;
3304 if (unlikely(skb_is_gso(skb
))) {
3305 skb_warn_bad_offload(skb
);
3309 /* Before computing a checksum, we should make sure no frag could
3310 * be modified by an external entity : checksum could be wrong.
3312 if (skb_has_shared_frag(skb
)) {
3313 ret
= __skb_linearize(skb
);
3318 offset
= skb_checksum_start_offset(skb
);
3320 if (unlikely(offset
>= skb_headlen(skb
))) {
3321 DO_ONCE_LITE(skb_dump
, KERN_ERR
, skb
, false);
3322 WARN_ONCE(true, "offset (%d) >= skb_headlen() (%u)\n",
3323 offset
, skb_headlen(skb
));
3326 csum
= skb_checksum(skb
, offset
, skb
->len
- offset
, 0);
3328 offset
+= skb
->csum_offset
;
3329 if (unlikely(offset
+ sizeof(__sum16
) > skb_headlen(skb
))) {
3330 DO_ONCE_LITE(skb_dump
, KERN_ERR
, skb
, false);
3331 WARN_ONCE(true, "offset+2 (%zu) > skb_headlen() (%u)\n",
3332 offset
+ sizeof(__sum16
), skb_headlen(skb
));
3335 ret
= skb_ensure_writable(skb
, offset
+ sizeof(__sum16
));
3339 *(__sum16
*)(skb
->data
+ offset
) = csum_fold(csum
) ?: CSUM_MANGLED_0
;
3341 skb
->ip_summed
= CHECKSUM_NONE
;
3345 EXPORT_SYMBOL(skb_checksum_help
);
3347 int skb_crc32c_csum_help(struct sk_buff
*skb
)
3350 int ret
= 0, offset
, start
;
3352 if (skb
->ip_summed
!= CHECKSUM_PARTIAL
)
3355 if (unlikely(skb_is_gso(skb
)))
3358 /* Before computing a checksum, we should make sure no frag could
3359 * be modified by an external entity : checksum could be wrong.
3361 if (unlikely(skb_has_shared_frag(skb
))) {
3362 ret
= __skb_linearize(skb
);
3366 start
= skb_checksum_start_offset(skb
);
3367 offset
= start
+ offsetof(struct sctphdr
, checksum
);
3368 if (WARN_ON_ONCE(offset
>= skb_headlen(skb
))) {
3373 ret
= skb_ensure_writable(skb
, offset
+ sizeof(__le32
));
3377 crc32c_csum
= cpu_to_le32(~__skb_checksum(skb
, start
,
3378 skb
->len
- start
, ~(__u32
)0,
3380 *(__le32
*)(skb
->data
+ offset
) = crc32c_csum
;
3381 skb_reset_csum_not_inet(skb
);
3386 __be16
skb_network_protocol(struct sk_buff
*skb
, int *depth
)
3388 __be16 type
= skb
->protocol
;
3390 /* Tunnel gso handlers can set protocol to ethernet. */
3391 if (type
== htons(ETH_P_TEB
)) {
3394 if (unlikely(!pskb_may_pull(skb
, sizeof(struct ethhdr
))))
3397 eth
= (struct ethhdr
*)skb
->data
;
3398 type
= eth
->h_proto
;
3401 return vlan_get_protocol_and_depth(skb
, type
, depth
);
3405 /* Take action when hardware reception checksum errors are detected. */
3407 static void do_netdev_rx_csum_fault(struct net_device
*dev
, struct sk_buff
*skb
)
3409 netdev_err(dev
, "hw csum failure\n");
3410 skb_dump(KERN_ERR
, skb
, true);
3414 void netdev_rx_csum_fault(struct net_device
*dev
, struct sk_buff
*skb
)
3416 DO_ONCE_LITE(do_netdev_rx_csum_fault
, dev
, skb
);
3418 EXPORT_SYMBOL(netdev_rx_csum_fault
);
3421 /* XXX: check that highmem exists at all on the given machine. */
3422 static int illegal_highdma(struct net_device
*dev
, struct sk_buff
*skb
)
3424 #ifdef CONFIG_HIGHMEM
3427 if (!(dev
->features
& NETIF_F_HIGHDMA
)) {
3428 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
3429 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
3431 if (PageHighMem(skb_frag_page(frag
)))
3439 /* If MPLS offload request, verify we are testing hardware MPLS features
3440 * instead of standard features for the netdev.
3442 #if IS_ENABLED(CONFIG_NET_MPLS_GSO)
3443 static netdev_features_t
net_mpls_features(struct sk_buff
*skb
,
3444 netdev_features_t features
,
3447 if (eth_p_mpls(type
))
3448 features
&= skb
->dev
->mpls_features
;
3453 static netdev_features_t
net_mpls_features(struct sk_buff
*skb
,
3454 netdev_features_t features
,
3461 static netdev_features_t
harmonize_features(struct sk_buff
*skb
,
3462 netdev_features_t features
)
3466 type
= skb_network_protocol(skb
, NULL
);
3467 features
= net_mpls_features(skb
, features
, type
);
3469 if (skb
->ip_summed
!= CHECKSUM_NONE
&&
3470 !can_checksum_protocol(features
, type
)) {
3471 features
&= ~(NETIF_F_CSUM_MASK
| NETIF_F_GSO_MASK
);
3473 if (illegal_highdma(skb
->dev
, skb
))
3474 features
&= ~NETIF_F_SG
;
3479 netdev_features_t
passthru_features_check(struct sk_buff
*skb
,
3480 struct net_device
*dev
,
3481 netdev_features_t features
)
3485 EXPORT_SYMBOL(passthru_features_check
);
3487 static netdev_features_t
dflt_features_check(struct sk_buff
*skb
,
3488 struct net_device
*dev
,
3489 netdev_features_t features
)
3491 return vlan_features_check(skb
, features
);
3494 static netdev_features_t
gso_features_check(const struct sk_buff
*skb
,
3495 struct net_device
*dev
,
3496 netdev_features_t features
)
3498 u16 gso_segs
= skb_shinfo(skb
)->gso_segs
;
3500 if (gso_segs
> READ_ONCE(dev
->gso_max_segs
))
3501 return features
& ~NETIF_F_GSO_MASK
;
3503 if (!skb_shinfo(skb
)->gso_type
) {
3504 skb_warn_bad_offload(skb
);
3505 return features
& ~NETIF_F_GSO_MASK
;
3508 /* Support for GSO partial features requires software
3509 * intervention before we can actually process the packets
3510 * so we need to strip support for any partial features now
3511 * and we can pull them back in after we have partially
3512 * segmented the frame.
3514 if (!(skb_shinfo(skb
)->gso_type
& SKB_GSO_PARTIAL
))
3515 features
&= ~dev
->gso_partial_features
;
3517 /* Make sure to clear the IPv4 ID mangling feature if the
3518 * IPv4 header has the potential to be fragmented.
3520 if (skb_shinfo(skb
)->gso_type
& SKB_GSO_TCPV4
) {
3521 struct iphdr
*iph
= skb
->encapsulation
?
3522 inner_ip_hdr(skb
) : ip_hdr(skb
);
3524 if (!(iph
->frag_off
& htons(IP_DF
)))
3525 features
&= ~NETIF_F_TSO_MANGLEID
;
3531 netdev_features_t
netif_skb_features(struct sk_buff
*skb
)
3533 struct net_device
*dev
= skb
->dev
;
3534 netdev_features_t features
= dev
->features
;
3536 if (skb_is_gso(skb
))
3537 features
= gso_features_check(skb
, dev
, features
);
3539 /* If encapsulation offload request, verify we are testing
3540 * hardware encapsulation features instead of standard
3541 * features for the netdev
3543 if (skb
->encapsulation
)
3544 features
&= dev
->hw_enc_features
;
3546 if (skb_vlan_tagged(skb
))
3547 features
= netdev_intersect_features(features
,
3548 dev
->vlan_features
|
3549 NETIF_F_HW_VLAN_CTAG_TX
|
3550 NETIF_F_HW_VLAN_STAG_TX
);
3552 if (dev
->netdev_ops
->ndo_features_check
)
3553 features
&= dev
->netdev_ops
->ndo_features_check(skb
, dev
,
3556 features
&= dflt_features_check(skb
, dev
, features
);
3558 return harmonize_features(skb
, features
);
3560 EXPORT_SYMBOL(netif_skb_features
);
3562 static int xmit_one(struct sk_buff
*skb
, struct net_device
*dev
,
3563 struct netdev_queue
*txq
, bool more
)
3568 if (dev_nit_active(dev
))
3569 dev_queue_xmit_nit(skb
, dev
);
3572 trace_net_dev_start_xmit(skb
, dev
);
3573 rc
= netdev_start_xmit(skb
, dev
, txq
, more
);
3574 trace_net_dev_xmit(skb
, rc
, dev
, len
);
3579 struct sk_buff
*dev_hard_start_xmit(struct sk_buff
*first
, struct net_device
*dev
,
3580 struct netdev_queue
*txq
, int *ret
)
3582 struct sk_buff
*skb
= first
;
3583 int rc
= NETDEV_TX_OK
;
3586 struct sk_buff
*next
= skb
->next
;
3588 skb_mark_not_on_list(skb
);
3589 rc
= xmit_one(skb
, dev
, txq
, next
!= NULL
);
3590 if (unlikely(!dev_xmit_complete(rc
))) {
3596 if (netif_tx_queue_stopped(txq
) && skb
) {
3597 rc
= NETDEV_TX_BUSY
;
3607 static struct sk_buff
*validate_xmit_vlan(struct sk_buff
*skb
,
3608 netdev_features_t features
)
3610 if (skb_vlan_tag_present(skb
) &&
3611 !vlan_hw_offload_capable(features
, skb
->vlan_proto
))
3612 skb
= __vlan_hwaccel_push_inside(skb
);
3616 int skb_csum_hwoffload_help(struct sk_buff
*skb
,
3617 const netdev_features_t features
)
3619 if (unlikely(skb_csum_is_sctp(skb
)))
3620 return !!(features
& NETIF_F_SCTP_CRC
) ? 0 :
3621 skb_crc32c_csum_help(skb
);
3623 if (features
& NETIF_F_HW_CSUM
)
3626 if (features
& (NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
)) {
3627 switch (skb
->csum_offset
) {
3628 case offsetof(struct tcphdr
, check
):
3629 case offsetof(struct udphdr
, check
):
3634 return skb_checksum_help(skb
);
3636 EXPORT_SYMBOL(skb_csum_hwoffload_help
);
3638 static struct sk_buff
*validate_xmit_skb(struct sk_buff
*skb
, struct net_device
*dev
, bool *again
)
3640 netdev_features_t features
;
3642 features
= netif_skb_features(skb
);
3643 skb
= validate_xmit_vlan(skb
, features
);
3647 skb
= sk_validate_xmit_skb(skb
, dev
);
3651 if (netif_needs_gso(skb
, features
)) {
3652 struct sk_buff
*segs
;
3654 segs
= skb_gso_segment(skb
, features
);
3662 if (skb_needs_linearize(skb
, features
) &&
3663 __skb_linearize(skb
))
3666 /* If packet is not checksummed and device does not
3667 * support checksumming for this protocol, complete
3668 * checksumming here.
3670 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
3671 if (skb
->encapsulation
)
3672 skb_set_inner_transport_header(skb
,
3673 skb_checksum_start_offset(skb
));
3675 skb_set_transport_header(skb
,
3676 skb_checksum_start_offset(skb
));
3677 if (skb_csum_hwoffload_help(skb
, features
))
3682 skb
= validate_xmit_xfrm(skb
, features
, again
);
3689 dev_core_stats_tx_dropped_inc(dev
);
3693 struct sk_buff
*validate_xmit_skb_list(struct sk_buff
*skb
, struct net_device
*dev
, bool *again
)
3695 struct sk_buff
*next
, *head
= NULL
, *tail
;
3697 for (; skb
!= NULL
; skb
= next
) {
3699 skb_mark_not_on_list(skb
);
3701 /* in case skb wont be segmented, point to itself */
3704 skb
= validate_xmit_skb(skb
, dev
, again
);
3712 /* If skb was segmented, skb->prev points to
3713 * the last segment. If not, it still contains skb.
3719 EXPORT_SYMBOL_GPL(validate_xmit_skb_list
);
3721 static void qdisc_pkt_len_init(struct sk_buff
*skb
)
3723 const struct skb_shared_info
*shinfo
= skb_shinfo(skb
);
3725 qdisc_skb_cb(skb
)->pkt_len
= skb
->len
;
3727 /* To get more precise estimation of bytes sent on wire,
3728 * we add to pkt_len the headers size of all segments
3730 if (shinfo
->gso_size
&& skb_transport_header_was_set(skb
)) {
3731 u16 gso_segs
= shinfo
->gso_segs
;
3732 unsigned int hdr_len
;
3734 /* mac layer + network layer */
3735 hdr_len
= skb_transport_offset(skb
);
3737 /* + transport layer */
3738 if (likely(shinfo
->gso_type
& (SKB_GSO_TCPV4
| SKB_GSO_TCPV6
))) {
3739 const struct tcphdr
*th
;
3740 struct tcphdr _tcphdr
;
3742 th
= skb_header_pointer(skb
, hdr_len
,
3743 sizeof(_tcphdr
), &_tcphdr
);
3745 hdr_len
+= __tcp_hdrlen(th
);
3747 struct udphdr _udphdr
;
3749 if (skb_header_pointer(skb
, hdr_len
,
3750 sizeof(_udphdr
), &_udphdr
))
3751 hdr_len
+= sizeof(struct udphdr
);
3754 if (shinfo
->gso_type
& SKB_GSO_DODGY
)
3755 gso_segs
= DIV_ROUND_UP(skb
->len
- hdr_len
,
3758 qdisc_skb_cb(skb
)->pkt_len
+= (gso_segs
- 1) * hdr_len
;
3762 static int dev_qdisc_enqueue(struct sk_buff
*skb
, struct Qdisc
*q
,
3763 struct sk_buff
**to_free
,
3764 struct netdev_queue
*txq
)
3768 rc
= q
->enqueue(skb
, q
, to_free
) & NET_XMIT_MASK
;
3769 if (rc
== NET_XMIT_SUCCESS
)
3770 trace_qdisc_enqueue(q
, txq
, skb
);
3774 static inline int __dev_xmit_skb(struct sk_buff
*skb
, struct Qdisc
*q
,
3775 struct net_device
*dev
,
3776 struct netdev_queue
*txq
)
3778 spinlock_t
*root_lock
= qdisc_lock(q
);
3779 struct sk_buff
*to_free
= NULL
;
3783 qdisc_calculate_pkt_len(skb
, q
);
3785 if (q
->flags
& TCQ_F_NOLOCK
) {
3786 if (q
->flags
& TCQ_F_CAN_BYPASS
&& nolock_qdisc_is_empty(q
) &&
3787 qdisc_run_begin(q
)) {
3788 /* Retest nolock_qdisc_is_empty() within the protection
3789 * of q->seqlock to protect from racing with requeuing.
3791 if (unlikely(!nolock_qdisc_is_empty(q
))) {
3792 rc
= dev_qdisc_enqueue(skb
, q
, &to_free
, txq
);
3799 qdisc_bstats_cpu_update(q
, skb
);
3800 if (sch_direct_xmit(skb
, q
, dev
, txq
, NULL
, true) &&
3801 !nolock_qdisc_is_empty(q
))
3805 return NET_XMIT_SUCCESS
;
3808 rc
= dev_qdisc_enqueue(skb
, q
, &to_free
, txq
);
3812 if (unlikely(to_free
))
3813 kfree_skb_list_reason(to_free
,
3814 SKB_DROP_REASON_QDISC_DROP
);
3819 * Heuristic to force contended enqueues to serialize on a
3820 * separate lock before trying to get qdisc main lock.
3821 * This permits qdisc->running owner to get the lock more
3822 * often and dequeue packets faster.
3823 * On PREEMPT_RT it is possible to preempt the qdisc owner during xmit
3824 * and then other tasks will only enqueue packets. The packets will be
3825 * sent after the qdisc owner is scheduled again. To prevent this
3826 * scenario the task always serialize on the lock.
3828 contended
= qdisc_is_running(q
) || IS_ENABLED(CONFIG_PREEMPT_RT
);
3829 if (unlikely(contended
))
3830 spin_lock(&q
->busylock
);
3832 spin_lock(root_lock
);
3833 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED
, &q
->state
))) {
3834 __qdisc_drop(skb
, &to_free
);
3836 } else if ((q
->flags
& TCQ_F_CAN_BYPASS
) && !qdisc_qlen(q
) &&
3837 qdisc_run_begin(q
)) {
3839 * This is a work-conserving queue; there are no old skbs
3840 * waiting to be sent out; and the qdisc is not running -
3841 * xmit the skb directly.
3844 qdisc_bstats_update(q
, skb
);
3846 if (sch_direct_xmit(skb
, q
, dev
, txq
, root_lock
, true)) {
3847 if (unlikely(contended
)) {
3848 spin_unlock(&q
->busylock
);
3855 rc
= NET_XMIT_SUCCESS
;
3857 rc
= dev_qdisc_enqueue(skb
, q
, &to_free
, txq
);
3858 if (qdisc_run_begin(q
)) {
3859 if (unlikely(contended
)) {
3860 spin_unlock(&q
->busylock
);
3867 spin_unlock(root_lock
);
3868 if (unlikely(to_free
))
3869 kfree_skb_list_reason(to_free
, SKB_DROP_REASON_QDISC_DROP
);
3870 if (unlikely(contended
))
3871 spin_unlock(&q
->busylock
);
3875 #if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
3876 static void skb_update_prio(struct sk_buff
*skb
)
3878 const struct netprio_map
*map
;
3879 const struct sock
*sk
;
3880 unsigned int prioidx
;
3884 map
= rcu_dereference_bh(skb
->dev
->priomap
);
3887 sk
= skb_to_full_sk(skb
);
3891 prioidx
= sock_cgroup_prioidx(&sk
->sk_cgrp_data
);
3893 if (prioidx
< map
->priomap_len
)
3894 skb
->priority
= map
->priomap
[prioidx
];
3897 #define skb_update_prio(skb)
3901 * dev_loopback_xmit - loop back @skb
3902 * @net: network namespace this loopback is happening in
3903 * @sk: sk needed to be a netfilter okfn
3904 * @skb: buffer to transmit
3906 int dev_loopback_xmit(struct net
*net
, struct sock
*sk
, struct sk_buff
*skb
)
3908 skb_reset_mac_header(skb
);
3909 __skb_pull(skb
, skb_network_offset(skb
));
3910 skb
->pkt_type
= PACKET_LOOPBACK
;
3911 if (skb
->ip_summed
== CHECKSUM_NONE
)
3912 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
3913 DEBUG_NET_WARN_ON_ONCE(!skb_dst(skb
));
3918 EXPORT_SYMBOL(dev_loopback_xmit
);
3920 #ifdef CONFIG_NET_EGRESS
3921 static struct netdev_queue
*
3922 netdev_tx_queue_mapping(struct net_device
*dev
, struct sk_buff
*skb
)
3924 int qm
= skb_get_queue_mapping(skb
);
3926 return netdev_get_tx_queue(dev
, netdev_cap_txqueue(dev
, qm
));
3929 static bool netdev_xmit_txqueue_skipped(void)
3931 return __this_cpu_read(softnet_data
.xmit
.skip_txqueue
);
3934 void netdev_xmit_skip_txqueue(bool skip
)
3936 __this_cpu_write(softnet_data
.xmit
.skip_txqueue
, skip
);
3938 EXPORT_SYMBOL_GPL(netdev_xmit_skip_txqueue
);
3939 #endif /* CONFIG_NET_EGRESS */
3941 #ifdef CONFIG_NET_XGRESS
3942 static int tc_run(struct tcx_entry
*entry
, struct sk_buff
*skb
)
3944 int ret
= TC_ACT_UNSPEC
;
3945 #ifdef CONFIG_NET_CLS_ACT
3946 struct mini_Qdisc
*miniq
= rcu_dereference_bh(entry
->miniq
);
3947 struct tcf_result res
;
3952 tc_skb_cb(skb
)->mru
= 0;
3953 tc_skb_cb(skb
)->post_ct
= false;
3955 mini_qdisc_bstats_cpu_update(miniq
, skb
);
3956 ret
= tcf_classify(skb
, miniq
->block
, miniq
->filter_list
, &res
, false);
3957 /* Only tcf related quirks below. */
3960 mini_qdisc_qstats_cpu_drop(miniq
);
3963 case TC_ACT_RECLASSIFY
:
3964 skb
->tc_index
= TC_H_MIN(res
.classid
);
3967 #endif /* CONFIG_NET_CLS_ACT */
3971 static DEFINE_STATIC_KEY_FALSE(tcx_needed_key
);
3975 static_branch_inc(&tcx_needed_key
);
3980 static_branch_dec(&tcx_needed_key
);
3983 static __always_inline
enum tcx_action_base
3984 tcx_run(const struct bpf_mprog_entry
*entry
, struct sk_buff
*skb
,
3985 const bool needs_mac
)
3987 const struct bpf_mprog_fp
*fp
;
3988 const struct bpf_prog
*prog
;
3992 __skb_push(skb
, skb
->mac_len
);
3993 bpf_mprog_foreach_prog(entry
, fp
, prog
) {
3994 bpf_compute_data_pointers(skb
);
3995 ret
= bpf_prog_run(prog
, skb
);
3996 if (ret
!= TCX_NEXT
)
4000 __skb_pull(skb
, skb
->mac_len
);
4001 return tcx_action_code(skb
, ret
);
4004 static __always_inline
struct sk_buff
*
4005 sch_handle_ingress(struct sk_buff
*skb
, struct packet_type
**pt_prev
, int *ret
,
4006 struct net_device
*orig_dev
, bool *another
)
4008 struct bpf_mprog_entry
*entry
= rcu_dereference_bh(skb
->dev
->tcx_ingress
);
4014 *ret
= deliver_skb(skb
, *pt_prev
, orig_dev
);
4018 qdisc_skb_cb(skb
)->pkt_len
= skb
->len
;
4019 tcx_set_ingress(skb
, true);
4021 if (static_branch_unlikely(&tcx_needed_key
)) {
4022 sch_ret
= tcx_run(entry
, skb
, true);
4023 if (sch_ret
!= TC_ACT_UNSPEC
)
4024 goto ingress_verdict
;
4026 sch_ret
= tc_run(tcx_entry(entry
), skb
);
4029 case TC_ACT_REDIRECT
:
4030 /* skb_mac_header check was done by BPF, so we can safely
4031 * push the L2 header back before redirecting to another
4034 __skb_push(skb
, skb
->mac_len
);
4035 if (skb_do_redirect(skb
) == -EAGAIN
) {
4036 __skb_pull(skb
, skb
->mac_len
);
4040 *ret
= NET_RX_SUCCESS
;
4043 kfree_skb_reason(skb
, SKB_DROP_REASON_TC_INGRESS
);
4046 /* used by tc_run */
4052 case TC_ACT_CONSUMED
:
4053 *ret
= NET_RX_SUCCESS
;
4060 static __always_inline
struct sk_buff
*
4061 sch_handle_egress(struct sk_buff
*skb
, int *ret
, struct net_device
*dev
)
4063 struct bpf_mprog_entry
*entry
= rcu_dereference_bh(dev
->tcx_egress
);
4069 /* qdisc_skb_cb(skb)->pkt_len & tcx_set_ingress() was
4070 * already set by the caller.
4072 if (static_branch_unlikely(&tcx_needed_key
)) {
4073 sch_ret
= tcx_run(entry
, skb
, false);
4074 if (sch_ret
!= TC_ACT_UNSPEC
)
4075 goto egress_verdict
;
4077 sch_ret
= tc_run(tcx_entry(entry
), skb
);
4080 case TC_ACT_REDIRECT
:
4081 /* No need to push/pop skb's mac_header here on egress! */
4082 skb_do_redirect(skb
);
4083 *ret
= NET_XMIT_SUCCESS
;
4086 kfree_skb_reason(skb
, SKB_DROP_REASON_TC_EGRESS
);
4087 *ret
= NET_XMIT_DROP
;
4089 /* used by tc_run */
4095 case TC_ACT_CONSUMED
:
4096 *ret
= NET_XMIT_SUCCESS
;
4103 static __always_inline
struct sk_buff
*
4104 sch_handle_ingress(struct sk_buff
*skb
, struct packet_type
**pt_prev
, int *ret
,
4105 struct net_device
*orig_dev
, bool *another
)
4110 static __always_inline
struct sk_buff
*
4111 sch_handle_egress(struct sk_buff
*skb
, int *ret
, struct net_device
*dev
)
4115 #endif /* CONFIG_NET_XGRESS */
4118 static int __get_xps_queue_idx(struct net_device
*dev
, struct sk_buff
*skb
,
4119 struct xps_dev_maps
*dev_maps
, unsigned int tci
)
4121 int tc
= netdev_get_prio_tc_map(dev
, skb
->priority
);
4122 struct xps_map
*map
;
4123 int queue_index
= -1;
4125 if (tc
>= dev_maps
->num_tc
|| tci
>= dev_maps
->nr_ids
)
4128 tci
*= dev_maps
->num_tc
;
4131 map
= rcu_dereference(dev_maps
->attr_map
[tci
]);
4134 queue_index
= map
->queues
[0];
4136 queue_index
= map
->queues
[reciprocal_scale(
4137 skb_get_hash(skb
), map
->len
)];
4138 if (unlikely(queue_index
>= dev
->real_num_tx_queues
))
4145 static int get_xps_queue(struct net_device
*dev
, struct net_device
*sb_dev
,
4146 struct sk_buff
*skb
)
4149 struct xps_dev_maps
*dev_maps
;
4150 struct sock
*sk
= skb
->sk
;
4151 int queue_index
= -1;
4153 if (!static_key_false(&xps_needed
))
4157 if (!static_key_false(&xps_rxqs_needed
))
4160 dev_maps
= rcu_dereference(sb_dev
->xps_maps
[XPS_RXQS
]);
4162 int tci
= sk_rx_queue_get(sk
);
4165 queue_index
= __get_xps_queue_idx(dev
, skb
, dev_maps
,
4170 if (queue_index
< 0) {
4171 dev_maps
= rcu_dereference(sb_dev
->xps_maps
[XPS_CPUS
]);
4173 unsigned int tci
= skb
->sender_cpu
- 1;
4175 queue_index
= __get_xps_queue_idx(dev
, skb
, dev_maps
,
4187 u16
dev_pick_tx_zero(struct net_device
*dev
, struct sk_buff
*skb
,
4188 struct net_device
*sb_dev
)
4192 EXPORT_SYMBOL(dev_pick_tx_zero
);
4194 u16
dev_pick_tx_cpu_id(struct net_device
*dev
, struct sk_buff
*skb
,
4195 struct net_device
*sb_dev
)
4197 return (u16
)raw_smp_processor_id() % dev
->real_num_tx_queues
;
4199 EXPORT_SYMBOL(dev_pick_tx_cpu_id
);
4201 u16
netdev_pick_tx(struct net_device
*dev
, struct sk_buff
*skb
,
4202 struct net_device
*sb_dev
)
4204 struct sock
*sk
= skb
->sk
;
4205 int queue_index
= sk_tx_queue_get(sk
);
4207 sb_dev
= sb_dev
? : dev
;
4209 if (queue_index
< 0 || skb
->ooo_okay
||
4210 queue_index
>= dev
->real_num_tx_queues
) {
4211 int new_index
= get_xps_queue(dev
, sb_dev
, skb
);
4214 new_index
= skb_tx_hash(dev
, sb_dev
, skb
);
4216 if (queue_index
!= new_index
&& sk
&&
4218 rcu_access_pointer(sk
->sk_dst_cache
))
4219 sk_tx_queue_set(sk
, new_index
);
4221 queue_index
= new_index
;
4226 EXPORT_SYMBOL(netdev_pick_tx
);
4228 struct netdev_queue
*netdev_core_pick_tx(struct net_device
*dev
,
4229 struct sk_buff
*skb
,
4230 struct net_device
*sb_dev
)
4232 int queue_index
= 0;
4235 u32 sender_cpu
= skb
->sender_cpu
- 1;
4237 if (sender_cpu
>= (u32
)NR_CPUS
)
4238 skb
->sender_cpu
= raw_smp_processor_id() + 1;
4241 if (dev
->real_num_tx_queues
!= 1) {
4242 const struct net_device_ops
*ops
= dev
->netdev_ops
;
4244 if (ops
->ndo_select_queue
)
4245 queue_index
= ops
->ndo_select_queue(dev
, skb
, sb_dev
);
4247 queue_index
= netdev_pick_tx(dev
, skb
, sb_dev
);
4249 queue_index
= netdev_cap_txqueue(dev
, queue_index
);
4252 skb_set_queue_mapping(skb
, queue_index
);
4253 return netdev_get_tx_queue(dev
, queue_index
);
4257 * __dev_queue_xmit() - transmit a buffer
4258 * @skb: buffer to transmit
4259 * @sb_dev: suboordinate device used for L2 forwarding offload
4261 * Queue a buffer for transmission to a network device. The caller must
4262 * have set the device and priority and built the buffer before calling
4263 * this function. The function can be called from an interrupt.
4265 * When calling this method, interrupts MUST be enabled. This is because
4266 * the BH enable code must have IRQs enabled so that it will not deadlock.
4268 * Regardless of the return value, the skb is consumed, so it is currently
4269 * difficult to retry a send to this method. (You can bump the ref count
4270 * before sending to hold a reference for retry if you are careful.)
4273 * * 0 - buffer successfully transmitted
4274 * * positive qdisc return code - NET_XMIT_DROP etc.
4275 * * negative errno - other errors
4277 int __dev_queue_xmit(struct sk_buff
*skb
, struct net_device
*sb_dev
)
4279 struct net_device
*dev
= skb
->dev
;
4280 struct netdev_queue
*txq
= NULL
;
4285 skb_reset_mac_header(skb
);
4286 skb_assert_len(skb
);
4288 if (unlikely(skb_shinfo(skb
)->tx_flags
& SKBTX_SCHED_TSTAMP
))
4289 __skb_tstamp_tx(skb
, NULL
, NULL
, skb
->sk
, SCM_TSTAMP_SCHED
);
4291 /* Disable soft irqs for various locks below. Also
4292 * stops preemption for RCU.
4296 skb_update_prio(skb
);
4298 qdisc_pkt_len_init(skb
);
4299 tcx_set_ingress(skb
, false);
4300 #ifdef CONFIG_NET_EGRESS
4301 if (static_branch_unlikely(&egress_needed_key
)) {
4302 if (nf_hook_egress_active()) {
4303 skb
= nf_hook_egress(skb
, &rc
, dev
);
4308 netdev_xmit_skip_txqueue(false);
4310 nf_skip_egress(skb
, true);
4311 skb
= sch_handle_egress(skb
, &rc
, dev
);
4314 nf_skip_egress(skb
, false);
4316 if (netdev_xmit_txqueue_skipped())
4317 txq
= netdev_tx_queue_mapping(dev
, skb
);
4320 /* If device/qdisc don't need skb->dst, release it right now while
4321 * its hot in this cpu cache.
4323 if (dev
->priv_flags
& IFF_XMIT_DST_RELEASE
)
4329 txq
= netdev_core_pick_tx(dev
, skb
, sb_dev
);
4331 q
= rcu_dereference_bh(txq
->qdisc
);
4333 trace_net_dev_queue(skb
);
4335 rc
= __dev_xmit_skb(skb
, q
, dev
, txq
);
4339 /* The device has no queue. Common case for software devices:
4340 * loopback, all the sorts of tunnels...
4342 * Really, it is unlikely that netif_tx_lock protection is necessary
4343 * here. (f.e. loopback and IP tunnels are clean ignoring statistics
4345 * However, it is possible, that they rely on protection
4348 * Check this and shot the lock. It is not prone from deadlocks.
4349 *Either shot noqueue qdisc, it is even simpler 8)
4351 if (dev
->flags
& IFF_UP
) {
4352 int cpu
= smp_processor_id(); /* ok because BHs are off */
4354 /* Other cpus might concurrently change txq->xmit_lock_owner
4355 * to -1 or to their cpu id, but not to our id.
4357 if (READ_ONCE(txq
->xmit_lock_owner
) != cpu
) {
4358 if (dev_xmit_recursion())
4359 goto recursion_alert
;
4361 skb
= validate_xmit_skb(skb
, dev
, &again
);
4365 HARD_TX_LOCK(dev
, txq
, cpu
);
4367 if (!netif_xmit_stopped(txq
)) {
4368 dev_xmit_recursion_inc();
4369 skb
= dev_hard_start_xmit(skb
, dev
, txq
, &rc
);
4370 dev_xmit_recursion_dec();
4371 if (dev_xmit_complete(rc
)) {
4372 HARD_TX_UNLOCK(dev
, txq
);
4376 HARD_TX_UNLOCK(dev
, txq
);
4377 net_crit_ratelimited("Virtual device %s asks to queue packet!\n",
4380 /* Recursion is detected! It is possible,
4384 net_crit_ratelimited("Dead loop on virtual device %s, fix it urgently!\n",
4390 rcu_read_unlock_bh();
4392 dev_core_stats_tx_dropped_inc(dev
);
4393 kfree_skb_list(skb
);
4396 rcu_read_unlock_bh();
4399 EXPORT_SYMBOL(__dev_queue_xmit
);
4401 int __dev_direct_xmit(struct sk_buff
*skb
, u16 queue_id
)
4403 struct net_device
*dev
= skb
->dev
;
4404 struct sk_buff
*orig_skb
= skb
;
4405 struct netdev_queue
*txq
;
4406 int ret
= NETDEV_TX_BUSY
;
4409 if (unlikely(!netif_running(dev
) ||
4410 !netif_carrier_ok(dev
)))
4413 skb
= validate_xmit_skb_list(skb
, dev
, &again
);
4414 if (skb
!= orig_skb
)
4417 skb_set_queue_mapping(skb
, queue_id
);
4418 txq
= skb_get_tx_queue(dev
, skb
);
4422 dev_xmit_recursion_inc();
4423 HARD_TX_LOCK(dev
, txq
, smp_processor_id());
4424 if (!netif_xmit_frozen_or_drv_stopped(txq
))
4425 ret
= netdev_start_xmit(skb
, dev
, txq
, false);
4426 HARD_TX_UNLOCK(dev
, txq
);
4427 dev_xmit_recursion_dec();
4432 dev_core_stats_tx_dropped_inc(dev
);
4433 kfree_skb_list(skb
);
4434 return NET_XMIT_DROP
;
4436 EXPORT_SYMBOL(__dev_direct_xmit
);
4438 /*************************************************************************
4440 *************************************************************************/
4442 int netdev_max_backlog __read_mostly
= 1000;
4443 EXPORT_SYMBOL(netdev_max_backlog
);
4445 int netdev_tstamp_prequeue __read_mostly
= 1;
4446 unsigned int sysctl_skb_defer_max __read_mostly
= 64;
4447 int netdev_budget __read_mostly
= 300;
4448 /* Must be at least 2 jiffes to guarantee 1 jiffy timeout */
4449 unsigned int __read_mostly netdev_budget_usecs
= 2 * USEC_PER_SEC
/ HZ
;
4450 int weight_p __read_mostly
= 64; /* old backlog weight */
4451 int dev_weight_rx_bias __read_mostly
= 1; /* bias for backlog weight */
4452 int dev_weight_tx_bias __read_mostly
= 1; /* bias for output_queue quota */
4453 int dev_rx_weight __read_mostly
= 64;
4454 int dev_tx_weight __read_mostly
= 64;
4456 /* Called with irq disabled */
4457 static inline void ____napi_schedule(struct softnet_data
*sd
,
4458 struct napi_struct
*napi
)
4460 struct task_struct
*thread
;
4462 lockdep_assert_irqs_disabled();
4464 if (test_bit(NAPI_STATE_THREADED
, &napi
->state
)) {
4465 /* Paired with smp_mb__before_atomic() in
4466 * napi_enable()/dev_set_threaded().
4467 * Use READ_ONCE() to guarantee a complete
4468 * read on napi->thread. Only call
4469 * wake_up_process() when it's not NULL.
4471 thread
= READ_ONCE(napi
->thread
);
4473 /* Avoid doing set_bit() if the thread is in
4474 * INTERRUPTIBLE state, cause napi_thread_wait()
4475 * makes sure to proceed with napi polling
4476 * if the thread is explicitly woken from here.
4478 if (READ_ONCE(thread
->__state
) != TASK_INTERRUPTIBLE
)
4479 set_bit(NAPI_STATE_SCHED_THREADED
, &napi
->state
);
4480 wake_up_process(thread
);
4485 list_add_tail(&napi
->poll_list
, &sd
->poll_list
);
4486 WRITE_ONCE(napi
->list_owner
, smp_processor_id());
4487 /* If not called from net_rx_action()
4488 * we have to raise NET_RX_SOFTIRQ.
4490 if (!sd
->in_net_rx_action
)
4491 __raise_softirq_irqoff(NET_RX_SOFTIRQ
);
4496 /* One global table that all flow-based protocols share. */
4497 struct rps_sock_flow_table __rcu
*rps_sock_flow_table __read_mostly
;
4498 EXPORT_SYMBOL(rps_sock_flow_table
);
4499 u32 rps_cpu_mask __read_mostly
;
4500 EXPORT_SYMBOL(rps_cpu_mask
);
4502 struct static_key_false rps_needed __read_mostly
;
4503 EXPORT_SYMBOL(rps_needed
);
4504 struct static_key_false rfs_needed __read_mostly
;
4505 EXPORT_SYMBOL(rfs_needed
);
4507 static struct rps_dev_flow
*
4508 set_rps_cpu(struct net_device
*dev
, struct sk_buff
*skb
,
4509 struct rps_dev_flow
*rflow
, u16 next_cpu
)
4511 if (next_cpu
< nr_cpu_ids
) {
4512 #ifdef CONFIG_RFS_ACCEL
4513 struct netdev_rx_queue
*rxqueue
;
4514 struct rps_dev_flow_table
*flow_table
;
4515 struct rps_dev_flow
*old_rflow
;
4520 /* Should we steer this flow to a different hardware queue? */
4521 if (!skb_rx_queue_recorded(skb
) || !dev
->rx_cpu_rmap
||
4522 !(dev
->features
& NETIF_F_NTUPLE
))
4524 rxq_index
= cpu_rmap_lookup_index(dev
->rx_cpu_rmap
, next_cpu
);
4525 if (rxq_index
== skb_get_rx_queue(skb
))
4528 rxqueue
= dev
->_rx
+ rxq_index
;
4529 flow_table
= rcu_dereference(rxqueue
->rps_flow_table
);
4532 flow_id
= skb_get_hash(skb
) & flow_table
->mask
;
4533 rc
= dev
->netdev_ops
->ndo_rx_flow_steer(dev
, skb
,
4534 rxq_index
, flow_id
);
4538 rflow
= &flow_table
->flows
[flow_id
];
4540 if (old_rflow
->filter
== rflow
->filter
)
4541 old_rflow
->filter
= RPS_NO_FILTER
;
4545 per_cpu(softnet_data
, next_cpu
).input_queue_head
;
4548 rflow
->cpu
= next_cpu
;
4553 * get_rps_cpu is called from netif_receive_skb and returns the target
4554 * CPU from the RPS map of the receiving queue for a given skb.
4555 * rcu_read_lock must be held on entry.
4557 static int get_rps_cpu(struct net_device
*dev
, struct sk_buff
*skb
,
4558 struct rps_dev_flow
**rflowp
)
4560 const struct rps_sock_flow_table
*sock_flow_table
;
4561 struct netdev_rx_queue
*rxqueue
= dev
->_rx
;
4562 struct rps_dev_flow_table
*flow_table
;
4563 struct rps_map
*map
;
4568 if (skb_rx_queue_recorded(skb
)) {
4569 u16 index
= skb_get_rx_queue(skb
);
4571 if (unlikely(index
>= dev
->real_num_rx_queues
)) {
4572 WARN_ONCE(dev
->real_num_rx_queues
> 1,
4573 "%s received packet on queue %u, but number "
4574 "of RX queues is %u\n",
4575 dev
->name
, index
, dev
->real_num_rx_queues
);
4581 /* Avoid computing hash if RFS/RPS is not active for this rxqueue */
4583 flow_table
= rcu_dereference(rxqueue
->rps_flow_table
);
4584 map
= rcu_dereference(rxqueue
->rps_map
);
4585 if (!flow_table
&& !map
)
4588 skb_reset_network_header(skb
);
4589 hash
= skb_get_hash(skb
);
4593 sock_flow_table
= rcu_dereference(rps_sock_flow_table
);
4594 if (flow_table
&& sock_flow_table
) {
4595 struct rps_dev_flow
*rflow
;
4599 /* First check into global flow table if there is a match.
4600 * This READ_ONCE() pairs with WRITE_ONCE() from rps_record_sock_flow().
4602 ident
= READ_ONCE(sock_flow_table
->ents
[hash
& sock_flow_table
->mask
]);
4603 if ((ident
^ hash
) & ~rps_cpu_mask
)
4606 next_cpu
= ident
& rps_cpu_mask
;
4608 /* OK, now we know there is a match,
4609 * we can look at the local (per receive queue) flow table
4611 rflow
= &flow_table
->flows
[hash
& flow_table
->mask
];
4615 * If the desired CPU (where last recvmsg was done) is
4616 * different from current CPU (one in the rx-queue flow
4617 * table entry), switch if one of the following holds:
4618 * - Current CPU is unset (>= nr_cpu_ids).
4619 * - Current CPU is offline.
4620 * - The current CPU's queue tail has advanced beyond the
4621 * last packet that was enqueued using this table entry.
4622 * This guarantees that all previous packets for the flow
4623 * have been dequeued, thus preserving in order delivery.
4625 if (unlikely(tcpu
!= next_cpu
) &&
4626 (tcpu
>= nr_cpu_ids
|| !cpu_online(tcpu
) ||
4627 ((int)(per_cpu(softnet_data
, tcpu
).input_queue_head
-
4628 rflow
->last_qtail
)) >= 0)) {
4630 rflow
= set_rps_cpu(dev
, skb
, rflow
, next_cpu
);
4633 if (tcpu
< nr_cpu_ids
&& cpu_online(tcpu
)) {
4643 tcpu
= map
->cpus
[reciprocal_scale(hash
, map
->len
)];
4644 if (cpu_online(tcpu
)) {
4654 #ifdef CONFIG_RFS_ACCEL
4657 * rps_may_expire_flow - check whether an RFS hardware filter may be removed
4658 * @dev: Device on which the filter was set
4659 * @rxq_index: RX queue index
4660 * @flow_id: Flow ID passed to ndo_rx_flow_steer()
4661 * @filter_id: Filter ID returned by ndo_rx_flow_steer()
4663 * Drivers that implement ndo_rx_flow_steer() should periodically call
4664 * this function for each installed filter and remove the filters for
4665 * which it returns %true.
4667 bool rps_may_expire_flow(struct net_device
*dev
, u16 rxq_index
,
4668 u32 flow_id
, u16 filter_id
)
4670 struct netdev_rx_queue
*rxqueue
= dev
->_rx
+ rxq_index
;
4671 struct rps_dev_flow_table
*flow_table
;
4672 struct rps_dev_flow
*rflow
;
4677 flow_table
= rcu_dereference(rxqueue
->rps_flow_table
);
4678 if (flow_table
&& flow_id
<= flow_table
->mask
) {
4679 rflow
= &flow_table
->flows
[flow_id
];
4680 cpu
= READ_ONCE(rflow
->cpu
);
4681 if (rflow
->filter
== filter_id
&& cpu
< nr_cpu_ids
&&
4682 ((int)(per_cpu(softnet_data
, cpu
).input_queue_head
-
4683 rflow
->last_qtail
) <
4684 (int)(10 * flow_table
->mask
)))
4690 EXPORT_SYMBOL(rps_may_expire_flow
);
4692 #endif /* CONFIG_RFS_ACCEL */
4694 /* Called from hardirq (IPI) context */
4695 static void rps_trigger_softirq(void *data
)
4697 struct softnet_data
*sd
= data
;
4699 ____napi_schedule(sd
, &sd
->backlog
);
4703 #endif /* CONFIG_RPS */
4705 /* Called from hardirq (IPI) context */
4706 static void trigger_rx_softirq(void *data
)
4708 struct softnet_data
*sd
= data
;
4710 __raise_softirq_irqoff(NET_RX_SOFTIRQ
);
4711 smp_store_release(&sd
->defer_ipi_scheduled
, 0);
4715 * After we queued a packet into sd->input_pkt_queue,
4716 * we need to make sure this queue is serviced soon.
4718 * - If this is another cpu queue, link it to our rps_ipi_list,
4719 * and make sure we will process rps_ipi_list from net_rx_action().
4721 * - If this is our own queue, NAPI schedule our backlog.
4722 * Note that this also raises NET_RX_SOFTIRQ.
4724 static void napi_schedule_rps(struct softnet_data
*sd
)
4726 struct softnet_data
*mysd
= this_cpu_ptr(&softnet_data
);
4730 sd
->rps_ipi_next
= mysd
->rps_ipi_list
;
4731 mysd
->rps_ipi_list
= sd
;
4733 /* If not called from net_rx_action() or napi_threaded_poll()
4734 * we have to raise NET_RX_SOFTIRQ.
4736 if (!mysd
->in_net_rx_action
&& !mysd
->in_napi_threaded_poll
)
4737 __raise_softirq_irqoff(NET_RX_SOFTIRQ
);
4740 #endif /* CONFIG_RPS */
4741 __napi_schedule_irqoff(&mysd
->backlog
);
4744 #ifdef CONFIG_NET_FLOW_LIMIT
4745 int netdev_flow_limit_table_len __read_mostly
= (1 << 12);
4748 static bool skb_flow_limit(struct sk_buff
*skb
, unsigned int qlen
)
4750 #ifdef CONFIG_NET_FLOW_LIMIT
4751 struct sd_flow_limit
*fl
;
4752 struct softnet_data
*sd
;
4753 unsigned int old_flow
, new_flow
;
4755 if (qlen
< (READ_ONCE(netdev_max_backlog
) >> 1))
4758 sd
= this_cpu_ptr(&softnet_data
);
4761 fl
= rcu_dereference(sd
->flow_limit
);
4763 new_flow
= skb_get_hash(skb
) & (fl
->num_buckets
- 1);
4764 old_flow
= fl
->history
[fl
->history_head
];
4765 fl
->history
[fl
->history_head
] = new_flow
;
4768 fl
->history_head
&= FLOW_LIMIT_HISTORY
- 1;
4770 if (likely(fl
->buckets
[old_flow
]))
4771 fl
->buckets
[old_flow
]--;
4773 if (++fl
->buckets
[new_flow
] > (FLOW_LIMIT_HISTORY
>> 1)) {
4785 * enqueue_to_backlog is called to queue an skb to a per CPU backlog
4786 * queue (may be a remote CPU queue).
4788 static int enqueue_to_backlog(struct sk_buff
*skb
, int cpu
,
4789 unsigned int *qtail
)
4791 enum skb_drop_reason reason
;
4792 struct softnet_data
*sd
;
4793 unsigned long flags
;
4796 reason
= SKB_DROP_REASON_NOT_SPECIFIED
;
4797 sd
= &per_cpu(softnet_data
, cpu
);
4799 rps_lock_irqsave(sd
, &flags
);
4800 if (!netif_running(skb
->dev
))
4802 qlen
= skb_queue_len(&sd
->input_pkt_queue
);
4803 if (qlen
<= READ_ONCE(netdev_max_backlog
) && !skb_flow_limit(skb
, qlen
)) {
4806 __skb_queue_tail(&sd
->input_pkt_queue
, skb
);
4807 input_queue_tail_incr_save(sd
, qtail
);
4808 rps_unlock_irq_restore(sd
, &flags
);
4809 return NET_RX_SUCCESS
;
4812 /* Schedule NAPI for backlog device
4813 * We can use non atomic operation since we own the queue lock
4815 if (!__test_and_set_bit(NAPI_STATE_SCHED
, &sd
->backlog
.state
))
4816 napi_schedule_rps(sd
);
4819 reason
= SKB_DROP_REASON_CPU_BACKLOG
;
4823 rps_unlock_irq_restore(sd
, &flags
);
4825 dev_core_stats_rx_dropped_inc(skb
->dev
);
4826 kfree_skb_reason(skb
, reason
);
4830 static struct netdev_rx_queue
*netif_get_rxqueue(struct sk_buff
*skb
)
4832 struct net_device
*dev
= skb
->dev
;
4833 struct netdev_rx_queue
*rxqueue
;
4837 if (skb_rx_queue_recorded(skb
)) {
4838 u16 index
= skb_get_rx_queue(skb
);
4840 if (unlikely(index
>= dev
->real_num_rx_queues
)) {
4841 WARN_ONCE(dev
->real_num_rx_queues
> 1,
4842 "%s received packet on queue %u, but number "
4843 "of RX queues is %u\n",
4844 dev
->name
, index
, dev
->real_num_rx_queues
);
4846 return rxqueue
; /* Return first rxqueue */
4853 u32
bpf_prog_run_generic_xdp(struct sk_buff
*skb
, struct xdp_buff
*xdp
,
4854 struct bpf_prog
*xdp_prog
)
4856 void *orig_data
, *orig_data_end
, *hard_start
;
4857 struct netdev_rx_queue
*rxqueue
;
4858 bool orig_bcast
, orig_host
;
4859 u32 mac_len
, frame_sz
;
4860 __be16 orig_eth_type
;
4865 /* The XDP program wants to see the packet starting at the MAC
4868 mac_len
= skb
->data
- skb_mac_header(skb
);
4869 hard_start
= skb
->data
- skb_headroom(skb
);
4871 /* SKB "head" area always have tailroom for skb_shared_info */
4872 frame_sz
= (void *)skb_end_pointer(skb
) - hard_start
;
4873 frame_sz
+= SKB_DATA_ALIGN(sizeof(struct skb_shared_info
));
4875 rxqueue
= netif_get_rxqueue(skb
);
4876 xdp_init_buff(xdp
, frame_sz
, &rxqueue
->xdp_rxq
);
4877 xdp_prepare_buff(xdp
, hard_start
, skb_headroom(skb
) - mac_len
,
4878 skb_headlen(skb
) + mac_len
, true);
4880 orig_data_end
= xdp
->data_end
;
4881 orig_data
= xdp
->data
;
4882 eth
= (struct ethhdr
*)xdp
->data
;
4883 orig_host
= ether_addr_equal_64bits(eth
->h_dest
, skb
->dev
->dev_addr
);
4884 orig_bcast
= is_multicast_ether_addr_64bits(eth
->h_dest
);
4885 orig_eth_type
= eth
->h_proto
;
4887 act
= bpf_prog_run_xdp(xdp_prog
, xdp
);
4889 /* check if bpf_xdp_adjust_head was used */
4890 off
= xdp
->data
- orig_data
;
4893 __skb_pull(skb
, off
);
4895 __skb_push(skb
, -off
);
4897 skb
->mac_header
+= off
;
4898 skb_reset_network_header(skb
);
4901 /* check if bpf_xdp_adjust_tail was used */
4902 off
= xdp
->data_end
- orig_data_end
;
4904 skb_set_tail_pointer(skb
, xdp
->data_end
- xdp
->data
);
4905 skb
->len
+= off
; /* positive on grow, negative on shrink */
4908 /* check if XDP changed eth hdr such SKB needs update */
4909 eth
= (struct ethhdr
*)xdp
->data
;
4910 if ((orig_eth_type
!= eth
->h_proto
) ||
4911 (orig_host
!= ether_addr_equal_64bits(eth
->h_dest
,
4912 skb
->dev
->dev_addr
)) ||
4913 (orig_bcast
!= is_multicast_ether_addr_64bits(eth
->h_dest
))) {
4914 __skb_push(skb
, ETH_HLEN
);
4915 skb
->pkt_type
= PACKET_HOST
;
4916 skb
->protocol
= eth_type_trans(skb
, skb
->dev
);
4919 /* Redirect/Tx gives L2 packet, code that will reuse skb must __skb_pull
4920 * before calling us again on redirect path. We do not call do_redirect
4921 * as we leave that up to the caller.
4923 * Caller is responsible for managing lifetime of skb (i.e. calling
4924 * kfree_skb in response to actions it cannot handle/XDP_DROP).
4929 __skb_push(skb
, mac_len
);
4932 metalen
= xdp
->data
- xdp
->data_meta
;
4934 skb_metadata_set(skb
, metalen
);
4941 static u32
netif_receive_generic_xdp(struct sk_buff
*skb
,
4942 struct xdp_buff
*xdp
,
4943 struct bpf_prog
*xdp_prog
)
4947 /* Reinjected packets coming from act_mirred or similar should
4948 * not get XDP generic processing.
4950 if (skb_is_redirected(skb
))
4953 /* XDP packets must be linear and must have sufficient headroom
4954 * of XDP_PACKET_HEADROOM bytes. This is the guarantee that also
4955 * native XDP provides, thus we need to do it here as well.
4957 if (skb_cloned(skb
) || skb_is_nonlinear(skb
) ||
4958 skb_headroom(skb
) < XDP_PACKET_HEADROOM
) {
4959 int hroom
= XDP_PACKET_HEADROOM
- skb_headroom(skb
);
4960 int troom
= skb
->tail
+ skb
->data_len
- skb
->end
;
4962 /* In case we have to go down the path and also linearize,
4963 * then lets do the pskb_expand_head() work just once here.
4965 if (pskb_expand_head(skb
,
4966 hroom
> 0 ? ALIGN(hroom
, NET_SKB_PAD
) : 0,
4967 troom
> 0 ? troom
+ 128 : 0, GFP_ATOMIC
))
4969 if (skb_linearize(skb
))
4973 act
= bpf_prog_run_generic_xdp(skb
, xdp
, xdp_prog
);
4980 bpf_warn_invalid_xdp_action(skb
->dev
, xdp_prog
, act
);
4983 trace_xdp_exception(skb
->dev
, xdp_prog
, act
);
4994 /* When doing generic XDP we have to bypass the qdisc layer and the
4995 * network taps in order to match in-driver-XDP behavior. This also means
4996 * that XDP packets are able to starve other packets going through a qdisc,
4997 * and DDOS attacks will be more effective. In-driver-XDP use dedicated TX
4998 * queues, so they do not have this starvation issue.
5000 void generic_xdp_tx(struct sk_buff
*skb
, struct bpf_prog
*xdp_prog
)
5002 struct net_device
*dev
= skb
->dev
;
5003 struct netdev_queue
*txq
;
5004 bool free_skb
= true;
5007 txq
= netdev_core_pick_tx(dev
, skb
, NULL
);
5008 cpu
= smp_processor_id();
5009 HARD_TX_LOCK(dev
, txq
, cpu
);
5010 if (!netif_xmit_frozen_or_drv_stopped(txq
)) {
5011 rc
= netdev_start_xmit(skb
, dev
, txq
, 0);
5012 if (dev_xmit_complete(rc
))
5015 HARD_TX_UNLOCK(dev
, txq
);
5017 trace_xdp_exception(dev
, xdp_prog
, XDP_TX
);
5018 dev_core_stats_tx_dropped_inc(dev
);
5023 static DEFINE_STATIC_KEY_FALSE(generic_xdp_needed_key
);
5025 int do_xdp_generic(struct bpf_prog
*xdp_prog
, struct sk_buff
*skb
)
5028 struct xdp_buff xdp
;
5032 act
= netif_receive_generic_xdp(skb
, &xdp
, xdp_prog
);
5033 if (act
!= XDP_PASS
) {
5036 err
= xdp_do_generic_redirect(skb
->dev
, skb
,
5042 generic_xdp_tx(skb
, xdp_prog
);
5050 kfree_skb_reason(skb
, SKB_DROP_REASON_XDP
);
5053 EXPORT_SYMBOL_GPL(do_xdp_generic
);
5055 static int netif_rx_internal(struct sk_buff
*skb
)
5059 net_timestamp_check(READ_ONCE(netdev_tstamp_prequeue
), skb
);
5061 trace_netif_rx(skb
);
5064 if (static_branch_unlikely(&rps_needed
)) {
5065 struct rps_dev_flow voidflow
, *rflow
= &voidflow
;
5070 cpu
= get_rps_cpu(skb
->dev
, skb
, &rflow
);
5072 cpu
= smp_processor_id();
5074 ret
= enqueue_to_backlog(skb
, cpu
, &rflow
->last_qtail
);
5082 ret
= enqueue_to_backlog(skb
, smp_processor_id(), &qtail
);
5088 * __netif_rx - Slightly optimized version of netif_rx
5089 * @skb: buffer to post
5091 * This behaves as netif_rx except that it does not disable bottom halves.
5092 * As a result this function may only be invoked from the interrupt context
5093 * (either hard or soft interrupt).
5095 int __netif_rx(struct sk_buff
*skb
)
5099 lockdep_assert_once(hardirq_count() | softirq_count());
5101 trace_netif_rx_entry(skb
);
5102 ret
= netif_rx_internal(skb
);
5103 trace_netif_rx_exit(ret
);
5106 EXPORT_SYMBOL(__netif_rx
);
5109 * netif_rx - post buffer to the network code
5110 * @skb: buffer to post
5112 * This function receives a packet from a device driver and queues it for
5113 * the upper (protocol) levels to process via the backlog NAPI device. It
5114 * always succeeds. The buffer may be dropped during processing for
5115 * congestion control or by the protocol layers.
5116 * The network buffer is passed via the backlog NAPI device. Modern NIC
5117 * driver should use NAPI and GRO.
5118 * This function can used from interrupt and from process context. The
5119 * caller from process context must not disable interrupts before invoking
5123 * NET_RX_SUCCESS (no congestion)
5124 * NET_RX_DROP (packet was dropped)
5127 int netif_rx(struct sk_buff
*skb
)
5129 bool need_bh_off
= !(hardirq_count() | softirq_count());
5134 trace_netif_rx_entry(skb
);
5135 ret
= netif_rx_internal(skb
);
5136 trace_netif_rx_exit(ret
);
5141 EXPORT_SYMBOL(netif_rx
);
5143 static __latent_entropy
void net_tx_action(struct softirq_action
*h
)
5145 struct softnet_data
*sd
= this_cpu_ptr(&softnet_data
);
5147 if (sd
->completion_queue
) {
5148 struct sk_buff
*clist
;
5150 local_irq_disable();
5151 clist
= sd
->completion_queue
;
5152 sd
->completion_queue
= NULL
;
5156 struct sk_buff
*skb
= clist
;
5158 clist
= clist
->next
;
5160 WARN_ON(refcount_read(&skb
->users
));
5161 if (likely(get_kfree_skb_cb(skb
)->reason
== SKB_CONSUMED
))
5162 trace_consume_skb(skb
, net_tx_action
);
5164 trace_kfree_skb(skb
, net_tx_action
,
5165 get_kfree_skb_cb(skb
)->reason
);
5167 if (skb
->fclone
!= SKB_FCLONE_UNAVAILABLE
)
5170 __napi_kfree_skb(skb
,
5171 get_kfree_skb_cb(skb
)->reason
);
5175 if (sd
->output_queue
) {
5178 local_irq_disable();
5179 head
= sd
->output_queue
;
5180 sd
->output_queue
= NULL
;
5181 sd
->output_queue_tailp
= &sd
->output_queue
;
5187 struct Qdisc
*q
= head
;
5188 spinlock_t
*root_lock
= NULL
;
5190 head
= head
->next_sched
;
5192 /* We need to make sure head->next_sched is read
5193 * before clearing __QDISC_STATE_SCHED
5195 smp_mb__before_atomic();
5197 if (!(q
->flags
& TCQ_F_NOLOCK
)) {
5198 root_lock
= qdisc_lock(q
);
5199 spin_lock(root_lock
);
5200 } else if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED
,
5202 /* There is a synchronize_net() between
5203 * STATE_DEACTIVATED flag being set and
5204 * qdisc_reset()/some_qdisc_is_busy() in
5205 * dev_deactivate(), so we can safely bail out
5206 * early here to avoid data race between
5207 * qdisc_deactivate() and some_qdisc_is_busy()
5208 * for lockless qdisc.
5210 clear_bit(__QDISC_STATE_SCHED
, &q
->state
);
5214 clear_bit(__QDISC_STATE_SCHED
, &q
->state
);
5217 spin_unlock(root_lock
);
5223 xfrm_dev_backlog(sd
);
5226 #if IS_ENABLED(CONFIG_BRIDGE) && IS_ENABLED(CONFIG_ATM_LANE)
5227 /* This hook is defined here for ATM LANE */
5228 int (*br_fdb_test_addr_hook
)(struct net_device
*dev
,
5229 unsigned char *addr
) __read_mostly
;
5230 EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook
);
5234 * netdev_is_rx_handler_busy - check if receive handler is registered
5235 * @dev: device to check
5237 * Check if a receive handler is already registered for a given device.
5238 * Return true if there one.
5240 * The caller must hold the rtnl_mutex.
5242 bool netdev_is_rx_handler_busy(struct net_device
*dev
)
5245 return dev
&& rtnl_dereference(dev
->rx_handler
);
5247 EXPORT_SYMBOL_GPL(netdev_is_rx_handler_busy
);
5250 * netdev_rx_handler_register - register receive handler
5251 * @dev: device to register a handler for
5252 * @rx_handler: receive handler to register
5253 * @rx_handler_data: data pointer that is used by rx handler
5255 * Register a receive handler for a device. This handler will then be
5256 * called from __netif_receive_skb. A negative errno code is returned
5259 * The caller must hold the rtnl_mutex.
5261 * For a general description of rx_handler, see enum rx_handler_result.
5263 int netdev_rx_handler_register(struct net_device
*dev
,
5264 rx_handler_func_t
*rx_handler
,
5265 void *rx_handler_data
)
5267 if (netdev_is_rx_handler_busy(dev
))
5270 if (dev
->priv_flags
& IFF_NO_RX_HANDLER
)
5273 /* Note: rx_handler_data must be set before rx_handler */
5274 rcu_assign_pointer(dev
->rx_handler_data
, rx_handler_data
);
5275 rcu_assign_pointer(dev
->rx_handler
, rx_handler
);
5279 EXPORT_SYMBOL_GPL(netdev_rx_handler_register
);
5282 * netdev_rx_handler_unregister - unregister receive handler
5283 * @dev: device to unregister a handler from
5285 * Unregister a receive handler from a device.
5287 * The caller must hold the rtnl_mutex.
5289 void netdev_rx_handler_unregister(struct net_device
*dev
)
5293 RCU_INIT_POINTER(dev
->rx_handler
, NULL
);
5294 /* a reader seeing a non NULL rx_handler in a rcu_read_lock()
5295 * section has a guarantee to see a non NULL rx_handler_data
5299 RCU_INIT_POINTER(dev
->rx_handler_data
, NULL
);
5301 EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister
);
5304 * Limit the use of PFMEMALLOC reserves to those protocols that implement
5305 * the special handling of PFMEMALLOC skbs.
5307 static bool skb_pfmemalloc_protocol(struct sk_buff
*skb
)
5309 switch (skb
->protocol
) {
5310 case htons(ETH_P_ARP
):
5311 case htons(ETH_P_IP
):
5312 case htons(ETH_P_IPV6
):
5313 case htons(ETH_P_8021Q
):
5314 case htons(ETH_P_8021AD
):
5321 static inline int nf_ingress(struct sk_buff
*skb
, struct packet_type
**pt_prev
,
5322 int *ret
, struct net_device
*orig_dev
)
5324 if (nf_hook_ingress_active(skb
)) {
5328 *ret
= deliver_skb(skb
, *pt_prev
, orig_dev
);
5333 ingress_retval
= nf_hook_ingress(skb
);
5335 return ingress_retval
;
5340 static int __netif_receive_skb_core(struct sk_buff
**pskb
, bool pfmemalloc
,
5341 struct packet_type
**ppt_prev
)
5343 struct packet_type
*ptype
, *pt_prev
;
5344 rx_handler_func_t
*rx_handler
;
5345 struct sk_buff
*skb
= *pskb
;
5346 struct net_device
*orig_dev
;
5347 bool deliver_exact
= false;
5348 int ret
= NET_RX_DROP
;
5351 net_timestamp_check(!READ_ONCE(netdev_tstamp_prequeue
), skb
);
5353 trace_netif_receive_skb(skb
);
5355 orig_dev
= skb
->dev
;
5357 skb_reset_network_header(skb
);
5358 if (!skb_transport_header_was_set(skb
))
5359 skb_reset_transport_header(skb
);
5360 skb_reset_mac_len(skb
);
5365 skb
->skb_iif
= skb
->dev
->ifindex
;
5367 __this_cpu_inc(softnet_data
.processed
);
5369 if (static_branch_unlikely(&generic_xdp_needed_key
)) {
5373 ret2
= do_xdp_generic(rcu_dereference(skb
->dev
->xdp_prog
), skb
);
5376 if (ret2
!= XDP_PASS
) {
5382 if (eth_type_vlan(skb
->protocol
)) {
5383 skb
= skb_vlan_untag(skb
);
5388 if (skb_skip_tc_classify(skb
))
5394 list_for_each_entry_rcu(ptype
, &ptype_all
, list
) {
5396 ret
= deliver_skb(skb
, pt_prev
, orig_dev
);
5400 list_for_each_entry_rcu(ptype
, &skb
->dev
->ptype_all
, list
) {
5402 ret
= deliver_skb(skb
, pt_prev
, orig_dev
);
5407 #ifdef CONFIG_NET_INGRESS
5408 if (static_branch_unlikely(&ingress_needed_key
)) {
5409 bool another
= false;
5411 nf_skip_egress(skb
, true);
5412 skb
= sch_handle_ingress(skb
, &pt_prev
, &ret
, orig_dev
,
5419 nf_skip_egress(skb
, false);
5420 if (nf_ingress(skb
, &pt_prev
, &ret
, orig_dev
) < 0)
5424 skb_reset_redirect(skb
);
5426 if (pfmemalloc
&& !skb_pfmemalloc_protocol(skb
))
5429 if (skb_vlan_tag_present(skb
)) {
5431 ret
= deliver_skb(skb
, pt_prev
, orig_dev
);
5434 if (vlan_do_receive(&skb
))
5436 else if (unlikely(!skb
))
5440 rx_handler
= rcu_dereference(skb
->dev
->rx_handler
);
5443 ret
= deliver_skb(skb
, pt_prev
, orig_dev
);
5446 switch (rx_handler(&skb
)) {
5447 case RX_HANDLER_CONSUMED
:
5448 ret
= NET_RX_SUCCESS
;
5450 case RX_HANDLER_ANOTHER
:
5452 case RX_HANDLER_EXACT
:
5453 deliver_exact
= true;
5455 case RX_HANDLER_PASS
:
5462 if (unlikely(skb_vlan_tag_present(skb
)) && !netdev_uses_dsa(skb
->dev
)) {
5464 if (skb_vlan_tag_get_id(skb
)) {
5465 /* Vlan id is non 0 and vlan_do_receive() above couldn't
5468 skb
->pkt_type
= PACKET_OTHERHOST
;
5469 } else if (eth_type_vlan(skb
->protocol
)) {
5470 /* Outer header is 802.1P with vlan 0, inner header is
5471 * 802.1Q or 802.1AD and vlan_do_receive() above could
5472 * not find vlan dev for vlan id 0.
5474 __vlan_hwaccel_clear_tag(skb
);
5475 skb
= skb_vlan_untag(skb
);
5478 if (vlan_do_receive(&skb
))
5479 /* After stripping off 802.1P header with vlan 0
5480 * vlan dev is found for inner header.
5483 else if (unlikely(!skb
))
5486 /* We have stripped outer 802.1P vlan 0 header.
5487 * But could not find vlan dev.
5488 * check again for vlan id to set OTHERHOST.
5492 /* Note: we might in the future use prio bits
5493 * and set skb->priority like in vlan_do_receive()
5494 * For the time being, just ignore Priority Code Point
5496 __vlan_hwaccel_clear_tag(skb
);
5499 type
= skb
->protocol
;
5501 /* deliver only exact match when indicated */
5502 if (likely(!deliver_exact
)) {
5503 deliver_ptype_list_skb(skb
, &pt_prev
, orig_dev
, type
,
5504 &ptype_base
[ntohs(type
) &
5508 deliver_ptype_list_skb(skb
, &pt_prev
, orig_dev
, type
,
5509 &orig_dev
->ptype_specific
);
5511 if (unlikely(skb
->dev
!= orig_dev
)) {
5512 deliver_ptype_list_skb(skb
, &pt_prev
, orig_dev
, type
,
5513 &skb
->dev
->ptype_specific
);
5517 if (unlikely(skb_orphan_frags_rx(skb
, GFP_ATOMIC
)))
5519 *ppt_prev
= pt_prev
;
5523 dev_core_stats_rx_dropped_inc(skb
->dev
);
5525 dev_core_stats_rx_nohandler_inc(skb
->dev
);
5526 kfree_skb_reason(skb
, SKB_DROP_REASON_UNHANDLED_PROTO
);
5527 /* Jamal, now you will not able to escape explaining
5528 * me how you were going to use this. :-)
5534 /* The invariant here is that if *ppt_prev is not NULL
5535 * then skb should also be non-NULL.
5537 * Apparently *ppt_prev assignment above holds this invariant due to
5538 * skb dereferencing near it.
5544 static int __netif_receive_skb_one_core(struct sk_buff
*skb
, bool pfmemalloc
)
5546 struct net_device
*orig_dev
= skb
->dev
;
5547 struct packet_type
*pt_prev
= NULL
;
5550 ret
= __netif_receive_skb_core(&skb
, pfmemalloc
, &pt_prev
);
5552 ret
= INDIRECT_CALL_INET(pt_prev
->func
, ipv6_rcv
, ip_rcv
, skb
,
5553 skb
->dev
, pt_prev
, orig_dev
);
5558 * netif_receive_skb_core - special purpose version of netif_receive_skb
5559 * @skb: buffer to process
5561 * More direct receive version of netif_receive_skb(). It should
5562 * only be used by callers that have a need to skip RPS and Generic XDP.
5563 * Caller must also take care of handling if ``(page_is_)pfmemalloc``.
5565 * This function may only be called from softirq context and interrupts
5566 * should be enabled.
5568 * Return values (usually ignored):
5569 * NET_RX_SUCCESS: no congestion
5570 * NET_RX_DROP: packet was dropped
5572 int netif_receive_skb_core(struct sk_buff
*skb
)
5577 ret
= __netif_receive_skb_one_core(skb
, false);
5582 EXPORT_SYMBOL(netif_receive_skb_core
);
5584 static inline void __netif_receive_skb_list_ptype(struct list_head
*head
,
5585 struct packet_type
*pt_prev
,
5586 struct net_device
*orig_dev
)
5588 struct sk_buff
*skb
, *next
;
5592 if (list_empty(head
))
5594 if (pt_prev
->list_func
!= NULL
)
5595 INDIRECT_CALL_INET(pt_prev
->list_func
, ipv6_list_rcv
,
5596 ip_list_rcv
, head
, pt_prev
, orig_dev
);
5598 list_for_each_entry_safe(skb
, next
, head
, list
) {
5599 skb_list_del_init(skb
);
5600 pt_prev
->func(skb
, skb
->dev
, pt_prev
, orig_dev
);
5604 static void __netif_receive_skb_list_core(struct list_head
*head
, bool pfmemalloc
)
5606 /* Fast-path assumptions:
5607 * - There is no RX handler.
5608 * - Only one packet_type matches.
5609 * If either of these fails, we will end up doing some per-packet
5610 * processing in-line, then handling the 'last ptype' for the whole
5611 * sublist. This can't cause out-of-order delivery to any single ptype,
5612 * because the 'last ptype' must be constant across the sublist, and all
5613 * other ptypes are handled per-packet.
5615 /* Current (common) ptype of sublist */
5616 struct packet_type
*pt_curr
= NULL
;
5617 /* Current (common) orig_dev of sublist */
5618 struct net_device
*od_curr
= NULL
;
5619 struct list_head sublist
;
5620 struct sk_buff
*skb
, *next
;
5622 INIT_LIST_HEAD(&sublist
);
5623 list_for_each_entry_safe(skb
, next
, head
, list
) {
5624 struct net_device
*orig_dev
= skb
->dev
;
5625 struct packet_type
*pt_prev
= NULL
;
5627 skb_list_del_init(skb
);
5628 __netif_receive_skb_core(&skb
, pfmemalloc
, &pt_prev
);
5631 if (pt_curr
!= pt_prev
|| od_curr
!= orig_dev
) {
5632 /* dispatch old sublist */
5633 __netif_receive_skb_list_ptype(&sublist
, pt_curr
, od_curr
);
5634 /* start new sublist */
5635 INIT_LIST_HEAD(&sublist
);
5639 list_add_tail(&skb
->list
, &sublist
);
5642 /* dispatch final sublist */
5643 __netif_receive_skb_list_ptype(&sublist
, pt_curr
, od_curr
);
5646 static int __netif_receive_skb(struct sk_buff
*skb
)
5650 if (sk_memalloc_socks() && skb_pfmemalloc(skb
)) {
5651 unsigned int noreclaim_flag
;
5654 * PFMEMALLOC skbs are special, they should
5655 * - be delivered to SOCK_MEMALLOC sockets only
5656 * - stay away from userspace
5657 * - have bounded memory usage
5659 * Use PF_MEMALLOC as this saves us from propagating the allocation
5660 * context down to all allocation sites.
5662 noreclaim_flag
= memalloc_noreclaim_save();
5663 ret
= __netif_receive_skb_one_core(skb
, true);
5664 memalloc_noreclaim_restore(noreclaim_flag
);
5666 ret
= __netif_receive_skb_one_core(skb
, false);
5671 static void __netif_receive_skb_list(struct list_head
*head
)
5673 unsigned long noreclaim_flag
= 0;
5674 struct sk_buff
*skb
, *next
;
5675 bool pfmemalloc
= false; /* Is current sublist PF_MEMALLOC? */
5677 list_for_each_entry_safe(skb
, next
, head
, list
) {
5678 if ((sk_memalloc_socks() && skb_pfmemalloc(skb
)) != pfmemalloc
) {
5679 struct list_head sublist
;
5681 /* Handle the previous sublist */
5682 list_cut_before(&sublist
, head
, &skb
->list
);
5683 if (!list_empty(&sublist
))
5684 __netif_receive_skb_list_core(&sublist
, pfmemalloc
);
5685 pfmemalloc
= !pfmemalloc
;
5686 /* See comments in __netif_receive_skb */
5688 noreclaim_flag
= memalloc_noreclaim_save();
5690 memalloc_noreclaim_restore(noreclaim_flag
);
5693 /* Handle the remaining sublist */
5694 if (!list_empty(head
))
5695 __netif_receive_skb_list_core(head
, pfmemalloc
);
5696 /* Restore pflags */
5698 memalloc_noreclaim_restore(noreclaim_flag
);
5701 static int generic_xdp_install(struct net_device
*dev
, struct netdev_bpf
*xdp
)
5703 struct bpf_prog
*old
= rtnl_dereference(dev
->xdp_prog
);
5704 struct bpf_prog
*new = xdp
->prog
;
5707 switch (xdp
->command
) {
5708 case XDP_SETUP_PROG
:
5709 rcu_assign_pointer(dev
->xdp_prog
, new);
5714 static_branch_dec(&generic_xdp_needed_key
);
5715 } else if (new && !old
) {
5716 static_branch_inc(&generic_xdp_needed_key
);
5717 dev_disable_lro(dev
);
5718 dev_disable_gro_hw(dev
);
5730 static int netif_receive_skb_internal(struct sk_buff
*skb
)
5734 net_timestamp_check(READ_ONCE(netdev_tstamp_prequeue
), skb
);
5736 if (skb_defer_rx_timestamp(skb
))
5737 return NET_RX_SUCCESS
;
5741 if (static_branch_unlikely(&rps_needed
)) {
5742 struct rps_dev_flow voidflow
, *rflow
= &voidflow
;
5743 int cpu
= get_rps_cpu(skb
->dev
, skb
, &rflow
);
5746 ret
= enqueue_to_backlog(skb
, cpu
, &rflow
->last_qtail
);
5752 ret
= __netif_receive_skb(skb
);
5757 void netif_receive_skb_list_internal(struct list_head
*head
)
5759 struct sk_buff
*skb
, *next
;
5760 struct list_head sublist
;
5762 INIT_LIST_HEAD(&sublist
);
5763 list_for_each_entry_safe(skb
, next
, head
, list
) {
5764 net_timestamp_check(READ_ONCE(netdev_tstamp_prequeue
), skb
);
5765 skb_list_del_init(skb
);
5766 if (!skb_defer_rx_timestamp(skb
))
5767 list_add_tail(&skb
->list
, &sublist
);
5769 list_splice_init(&sublist
, head
);
5773 if (static_branch_unlikely(&rps_needed
)) {
5774 list_for_each_entry_safe(skb
, next
, head
, list
) {
5775 struct rps_dev_flow voidflow
, *rflow
= &voidflow
;
5776 int cpu
= get_rps_cpu(skb
->dev
, skb
, &rflow
);
5779 /* Will be handled, remove from list */
5780 skb_list_del_init(skb
);
5781 enqueue_to_backlog(skb
, cpu
, &rflow
->last_qtail
);
5786 __netif_receive_skb_list(head
);
5791 * netif_receive_skb - process receive buffer from network
5792 * @skb: buffer to process
5794 * netif_receive_skb() is the main receive data processing function.
5795 * It always succeeds. The buffer may be dropped during processing
5796 * for congestion control or by the protocol layers.
5798 * This function may only be called from softirq context and interrupts
5799 * should be enabled.
5801 * Return values (usually ignored):
5802 * NET_RX_SUCCESS: no congestion
5803 * NET_RX_DROP: packet was dropped
5805 int netif_receive_skb(struct sk_buff
*skb
)
5809 trace_netif_receive_skb_entry(skb
);
5811 ret
= netif_receive_skb_internal(skb
);
5812 trace_netif_receive_skb_exit(ret
);
5816 EXPORT_SYMBOL(netif_receive_skb
);
5819 * netif_receive_skb_list - process many receive buffers from network
5820 * @head: list of skbs to process.
5822 * Since return value of netif_receive_skb() is normally ignored, and
5823 * wouldn't be meaningful for a list, this function returns void.
5825 * This function may only be called from softirq context and interrupts
5826 * should be enabled.
5828 void netif_receive_skb_list(struct list_head
*head
)
5830 struct sk_buff
*skb
;
5832 if (list_empty(head
))
5834 if (trace_netif_receive_skb_list_entry_enabled()) {
5835 list_for_each_entry(skb
, head
, list
)
5836 trace_netif_receive_skb_list_entry(skb
);
5838 netif_receive_skb_list_internal(head
);
5839 trace_netif_receive_skb_list_exit(0);
5841 EXPORT_SYMBOL(netif_receive_skb_list
);
5843 static DEFINE_PER_CPU(struct work_struct
, flush_works
);
5845 /* Network device is going away, flush any packets still pending */
5846 static void flush_backlog(struct work_struct
*work
)
5848 struct sk_buff
*skb
, *tmp
;
5849 struct softnet_data
*sd
;
5852 sd
= this_cpu_ptr(&softnet_data
);
5854 rps_lock_irq_disable(sd
);
5855 skb_queue_walk_safe(&sd
->input_pkt_queue
, skb
, tmp
) {
5856 if (skb
->dev
->reg_state
== NETREG_UNREGISTERING
) {
5857 __skb_unlink(skb
, &sd
->input_pkt_queue
);
5858 dev_kfree_skb_irq(skb
);
5859 input_queue_head_incr(sd
);
5862 rps_unlock_irq_enable(sd
);
5864 skb_queue_walk_safe(&sd
->process_queue
, skb
, tmp
) {
5865 if (skb
->dev
->reg_state
== NETREG_UNREGISTERING
) {
5866 __skb_unlink(skb
, &sd
->process_queue
);
5868 input_queue_head_incr(sd
);
5874 static bool flush_required(int cpu
)
5876 #if IS_ENABLED(CONFIG_RPS)
5877 struct softnet_data
*sd
= &per_cpu(softnet_data
, cpu
);
5880 rps_lock_irq_disable(sd
);
5882 /* as insertion into process_queue happens with the rps lock held,
5883 * process_queue access may race only with dequeue
5885 do_flush
= !skb_queue_empty(&sd
->input_pkt_queue
) ||
5886 !skb_queue_empty_lockless(&sd
->process_queue
);
5887 rps_unlock_irq_enable(sd
);
5891 /* without RPS we can't safely check input_pkt_queue: during a
5892 * concurrent remote skb_queue_splice() we can detect as empty both
5893 * input_pkt_queue and process_queue even if the latter could end-up
5894 * containing a lot of packets.
5899 static void flush_all_backlogs(void)
5901 static cpumask_t flush_cpus
;
5904 /* since we are under rtnl lock protection we can use static data
5905 * for the cpumask and avoid allocating on stack the possibly
5912 cpumask_clear(&flush_cpus
);
5913 for_each_online_cpu(cpu
) {
5914 if (flush_required(cpu
)) {
5915 queue_work_on(cpu
, system_highpri_wq
,
5916 per_cpu_ptr(&flush_works
, cpu
));
5917 cpumask_set_cpu(cpu
, &flush_cpus
);
5921 /* we can have in flight packet[s] on the cpus we are not flushing,
5922 * synchronize_net() in unregister_netdevice_many() will take care of
5925 for_each_cpu(cpu
, &flush_cpus
)
5926 flush_work(per_cpu_ptr(&flush_works
, cpu
));
5931 static void net_rps_send_ipi(struct softnet_data
*remsd
)
5935 struct softnet_data
*next
= remsd
->rps_ipi_next
;
5937 if (cpu_online(remsd
->cpu
))
5938 smp_call_function_single_async(remsd
->cpu
, &remsd
->csd
);
5945 * net_rps_action_and_irq_enable sends any pending IPI's for rps.
5946 * Note: called with local irq disabled, but exits with local irq enabled.
5948 static void net_rps_action_and_irq_enable(struct softnet_data
*sd
)
5951 struct softnet_data
*remsd
= sd
->rps_ipi_list
;
5954 sd
->rps_ipi_list
= NULL
;
5958 /* Send pending IPI's to kick RPS processing on remote cpus. */
5959 net_rps_send_ipi(remsd
);
5965 static bool sd_has_rps_ipi_waiting(struct softnet_data
*sd
)
5968 return sd
->rps_ipi_list
!= NULL
;
5974 static int process_backlog(struct napi_struct
*napi
, int quota
)
5976 struct softnet_data
*sd
= container_of(napi
, struct softnet_data
, backlog
);
5980 /* Check if we have pending ipi, its better to send them now,
5981 * not waiting net_rx_action() end.
5983 if (sd_has_rps_ipi_waiting(sd
)) {
5984 local_irq_disable();
5985 net_rps_action_and_irq_enable(sd
);
5988 napi
->weight
= READ_ONCE(dev_rx_weight
);
5990 struct sk_buff
*skb
;
5992 while ((skb
= __skb_dequeue(&sd
->process_queue
))) {
5994 __netif_receive_skb(skb
);
5996 input_queue_head_incr(sd
);
5997 if (++work
>= quota
)
6002 rps_lock_irq_disable(sd
);
6003 if (skb_queue_empty(&sd
->input_pkt_queue
)) {
6005 * Inline a custom version of __napi_complete().
6006 * only current cpu owns and manipulates this napi,
6007 * and NAPI_STATE_SCHED is the only possible flag set
6009 * We can use a plain write instead of clear_bit(),
6010 * and we dont need an smp_mb() memory barrier.
6015 skb_queue_splice_tail_init(&sd
->input_pkt_queue
,
6016 &sd
->process_queue
);
6018 rps_unlock_irq_enable(sd
);
6025 * __napi_schedule - schedule for receive
6026 * @n: entry to schedule
6028 * The entry's receive function will be scheduled to run.
6029 * Consider using __napi_schedule_irqoff() if hard irqs are masked.
6031 void __napi_schedule(struct napi_struct
*n
)
6033 unsigned long flags
;
6035 local_irq_save(flags
);
6036 ____napi_schedule(this_cpu_ptr(&softnet_data
), n
);
6037 local_irq_restore(flags
);
6039 EXPORT_SYMBOL(__napi_schedule
);
6042 * napi_schedule_prep - check if napi can be scheduled
6045 * Test if NAPI routine is already running, and if not mark
6046 * it as running. This is used as a condition variable to
6047 * insure only one NAPI poll instance runs. We also make
6048 * sure there is no pending NAPI disable.
6050 bool napi_schedule_prep(struct napi_struct
*n
)
6052 unsigned long new, val
= READ_ONCE(n
->state
);
6055 if (unlikely(val
& NAPIF_STATE_DISABLE
))
6057 new = val
| NAPIF_STATE_SCHED
;
6059 /* Sets STATE_MISSED bit if STATE_SCHED was already set
6060 * This was suggested by Alexander Duyck, as compiler
6061 * emits better code than :
6062 * if (val & NAPIF_STATE_SCHED)
6063 * new |= NAPIF_STATE_MISSED;
6065 new |= (val
& NAPIF_STATE_SCHED
) / NAPIF_STATE_SCHED
*
6067 } while (!try_cmpxchg(&n
->state
, &val
, new));
6069 return !(val
& NAPIF_STATE_SCHED
);
6071 EXPORT_SYMBOL(napi_schedule_prep
);
6074 * __napi_schedule_irqoff - schedule for receive
6075 * @n: entry to schedule
6077 * Variant of __napi_schedule() assuming hard irqs are masked.
6079 * On PREEMPT_RT enabled kernels this maps to __napi_schedule()
6080 * because the interrupt disabled assumption might not be true
6081 * due to force-threaded interrupts and spinlock substitution.
6083 void __napi_schedule_irqoff(struct napi_struct
*n
)
6085 if (!IS_ENABLED(CONFIG_PREEMPT_RT
))
6086 ____napi_schedule(this_cpu_ptr(&softnet_data
), n
);
6090 EXPORT_SYMBOL(__napi_schedule_irqoff
);
6092 bool napi_complete_done(struct napi_struct
*n
, int work_done
)
6094 unsigned long flags
, val
, new, timeout
= 0;
6098 * 1) Don't let napi dequeue from the cpu poll list
6099 * just in case its running on a different cpu.
6100 * 2) If we are busy polling, do nothing here, we have
6101 * the guarantee we will be called later.
6103 if (unlikely(n
->state
& (NAPIF_STATE_NPSVC
|
6104 NAPIF_STATE_IN_BUSY_POLL
)))
6109 timeout
= READ_ONCE(n
->dev
->gro_flush_timeout
);
6110 n
->defer_hard_irqs_count
= READ_ONCE(n
->dev
->napi_defer_hard_irqs
);
6112 if (n
->defer_hard_irqs_count
> 0) {
6113 n
->defer_hard_irqs_count
--;
6114 timeout
= READ_ONCE(n
->dev
->gro_flush_timeout
);
6118 if (n
->gro_bitmask
) {
6119 /* When the NAPI instance uses a timeout and keeps postponing
6120 * it, we need to bound somehow the time packets are kept in
6123 napi_gro_flush(n
, !!timeout
);
6128 if (unlikely(!list_empty(&n
->poll_list
))) {
6129 /* If n->poll_list is not empty, we need to mask irqs */
6130 local_irq_save(flags
);
6131 list_del_init(&n
->poll_list
);
6132 local_irq_restore(flags
);
6134 WRITE_ONCE(n
->list_owner
, -1);
6136 val
= READ_ONCE(n
->state
);
6138 WARN_ON_ONCE(!(val
& NAPIF_STATE_SCHED
));
6140 new = val
& ~(NAPIF_STATE_MISSED
| NAPIF_STATE_SCHED
|
6141 NAPIF_STATE_SCHED_THREADED
|
6142 NAPIF_STATE_PREFER_BUSY_POLL
);
6144 /* If STATE_MISSED was set, leave STATE_SCHED set,
6145 * because we will call napi->poll() one more time.
6146 * This C code was suggested by Alexander Duyck to help gcc.
6148 new |= (val
& NAPIF_STATE_MISSED
) / NAPIF_STATE_MISSED
*
6150 } while (!try_cmpxchg(&n
->state
, &val
, new));
6152 if (unlikely(val
& NAPIF_STATE_MISSED
)) {
6158 hrtimer_start(&n
->timer
, ns_to_ktime(timeout
),
6159 HRTIMER_MODE_REL_PINNED
);
6162 EXPORT_SYMBOL(napi_complete_done
);
6164 /* must be called under rcu_read_lock(), as we dont take a reference */
6165 static struct napi_struct
*napi_by_id(unsigned int napi_id
)
6167 unsigned int hash
= napi_id
% HASH_SIZE(napi_hash
);
6168 struct napi_struct
*napi
;
6170 hlist_for_each_entry_rcu(napi
, &napi_hash
[hash
], napi_hash_node
)
6171 if (napi
->napi_id
== napi_id
)
6177 #if defined(CONFIG_NET_RX_BUSY_POLL)
6179 static void __busy_poll_stop(struct napi_struct
*napi
, bool skip_schedule
)
6181 if (!skip_schedule
) {
6182 gro_normal_list(napi
);
6183 __napi_schedule(napi
);
6187 if (napi
->gro_bitmask
) {
6188 /* flush too old packets
6189 * If HZ < 1000, flush all packets.
6191 napi_gro_flush(napi
, HZ
>= 1000);
6194 gro_normal_list(napi
);
6195 clear_bit(NAPI_STATE_SCHED
, &napi
->state
);
6198 static void busy_poll_stop(struct napi_struct
*napi
, void *have_poll_lock
, bool prefer_busy_poll
,
6201 bool skip_schedule
= false;
6202 unsigned long timeout
;
6205 /* Busy polling means there is a high chance device driver hard irq
6206 * could not grab NAPI_STATE_SCHED, and that NAPI_STATE_MISSED was
6207 * set in napi_schedule_prep().
6208 * Since we are about to call napi->poll() once more, we can safely
6209 * clear NAPI_STATE_MISSED.
6211 * Note: x86 could use a single "lock and ..." instruction
6212 * to perform these two clear_bit()
6214 clear_bit(NAPI_STATE_MISSED
, &napi
->state
);
6215 clear_bit(NAPI_STATE_IN_BUSY_POLL
, &napi
->state
);
6219 if (prefer_busy_poll
) {
6220 napi
->defer_hard_irqs_count
= READ_ONCE(napi
->dev
->napi_defer_hard_irqs
);
6221 timeout
= READ_ONCE(napi
->dev
->gro_flush_timeout
);
6222 if (napi
->defer_hard_irqs_count
&& timeout
) {
6223 hrtimer_start(&napi
->timer
, ns_to_ktime(timeout
), HRTIMER_MODE_REL_PINNED
);
6224 skip_schedule
= true;
6228 /* All we really want here is to re-enable device interrupts.
6229 * Ideally, a new ndo_busy_poll_stop() could avoid another round.
6231 rc
= napi
->poll(napi
, budget
);
6232 /* We can't gro_normal_list() here, because napi->poll() might have
6233 * rearmed the napi (napi_complete_done()) in which case it could
6234 * already be running on another CPU.
6236 trace_napi_poll(napi
, rc
, budget
);
6237 netpoll_poll_unlock(have_poll_lock
);
6239 __busy_poll_stop(napi
, skip_schedule
);
6243 void napi_busy_loop(unsigned int napi_id
,
6244 bool (*loop_end
)(void *, unsigned long),
6245 void *loop_end_arg
, bool prefer_busy_poll
, u16 budget
)
6247 unsigned long start_time
= loop_end
? busy_loop_current_time() : 0;
6248 int (*napi_poll
)(struct napi_struct
*napi
, int budget
);
6249 void *have_poll_lock
= NULL
;
6250 struct napi_struct
*napi
;
6257 napi
= napi_by_id(napi_id
);
6261 if (!IS_ENABLED(CONFIG_PREEMPT_RT
))
6268 unsigned long val
= READ_ONCE(napi
->state
);
6270 /* If multiple threads are competing for this napi,
6271 * we avoid dirtying napi->state as much as we can.
6273 if (val
& (NAPIF_STATE_DISABLE
| NAPIF_STATE_SCHED
|
6274 NAPIF_STATE_IN_BUSY_POLL
)) {
6275 if (prefer_busy_poll
)
6276 set_bit(NAPI_STATE_PREFER_BUSY_POLL
, &napi
->state
);
6279 if (cmpxchg(&napi
->state
, val
,
6280 val
| NAPIF_STATE_IN_BUSY_POLL
|
6281 NAPIF_STATE_SCHED
) != val
) {
6282 if (prefer_busy_poll
)
6283 set_bit(NAPI_STATE_PREFER_BUSY_POLL
, &napi
->state
);
6286 have_poll_lock
= netpoll_poll_lock(napi
);
6287 napi_poll
= napi
->poll
;
6289 work
= napi_poll(napi
, budget
);
6290 trace_napi_poll(napi
, work
, budget
);
6291 gro_normal_list(napi
);
6294 __NET_ADD_STATS(dev_net(napi
->dev
),
6295 LINUX_MIB_BUSYPOLLRXPACKETS
, work
);
6298 if (!loop_end
|| loop_end(loop_end_arg
, start_time
))
6301 if (unlikely(need_resched())) {
6303 busy_poll_stop(napi
, have_poll_lock
, prefer_busy_poll
, budget
);
6304 if (!IS_ENABLED(CONFIG_PREEMPT_RT
))
6308 if (loop_end(loop_end_arg
, start_time
))
6315 busy_poll_stop(napi
, have_poll_lock
, prefer_busy_poll
, budget
);
6316 if (!IS_ENABLED(CONFIG_PREEMPT_RT
))
6321 EXPORT_SYMBOL(napi_busy_loop
);
6323 #endif /* CONFIG_NET_RX_BUSY_POLL */
6325 static void napi_hash_add(struct napi_struct
*napi
)
6327 if (test_bit(NAPI_STATE_NO_BUSY_POLL
, &napi
->state
))
6330 spin_lock(&napi_hash_lock
);
6332 /* 0..NR_CPUS range is reserved for sender_cpu use */
6334 if (unlikely(++napi_gen_id
< MIN_NAPI_ID
))
6335 napi_gen_id
= MIN_NAPI_ID
;
6336 } while (napi_by_id(napi_gen_id
));
6337 napi
->napi_id
= napi_gen_id
;
6339 hlist_add_head_rcu(&napi
->napi_hash_node
,
6340 &napi_hash
[napi
->napi_id
% HASH_SIZE(napi_hash
)]);
6342 spin_unlock(&napi_hash_lock
);
6345 /* Warning : caller is responsible to make sure rcu grace period
6346 * is respected before freeing memory containing @napi
6348 static void napi_hash_del(struct napi_struct
*napi
)
6350 spin_lock(&napi_hash_lock
);
6352 hlist_del_init_rcu(&napi
->napi_hash_node
);
6354 spin_unlock(&napi_hash_lock
);
6357 static enum hrtimer_restart
napi_watchdog(struct hrtimer
*timer
)
6359 struct napi_struct
*napi
;
6361 napi
= container_of(timer
, struct napi_struct
, timer
);
6363 /* Note : we use a relaxed variant of napi_schedule_prep() not setting
6364 * NAPI_STATE_MISSED, since we do not react to a device IRQ.
6366 if (!napi_disable_pending(napi
) &&
6367 !test_and_set_bit(NAPI_STATE_SCHED
, &napi
->state
)) {
6368 clear_bit(NAPI_STATE_PREFER_BUSY_POLL
, &napi
->state
);
6369 __napi_schedule_irqoff(napi
);
6372 return HRTIMER_NORESTART
;
6375 static void init_gro_hash(struct napi_struct
*napi
)
6379 for (i
= 0; i
< GRO_HASH_BUCKETS
; i
++) {
6380 INIT_LIST_HEAD(&napi
->gro_hash
[i
].list
);
6381 napi
->gro_hash
[i
].count
= 0;
6383 napi
->gro_bitmask
= 0;
6386 int dev_set_threaded(struct net_device
*dev
, bool threaded
)
6388 struct napi_struct
*napi
;
6391 if (dev
->threaded
== threaded
)
6395 list_for_each_entry(napi
, &dev
->napi_list
, dev_list
) {
6396 if (!napi
->thread
) {
6397 err
= napi_kthread_create(napi
);
6406 dev
->threaded
= threaded
;
6408 /* Make sure kthread is created before THREADED bit
6411 smp_mb__before_atomic();
6413 /* Setting/unsetting threaded mode on a napi might not immediately
6414 * take effect, if the current napi instance is actively being
6415 * polled. In this case, the switch between threaded mode and
6416 * softirq mode will happen in the next round of napi_schedule().
6417 * This should not cause hiccups/stalls to the live traffic.
6419 list_for_each_entry(napi
, &dev
->napi_list
, dev_list
)
6420 assign_bit(NAPI_STATE_THREADED
, &napi
->state
, threaded
);
6424 EXPORT_SYMBOL(dev_set_threaded
);
6426 void netif_napi_add_weight(struct net_device
*dev
, struct napi_struct
*napi
,
6427 int (*poll
)(struct napi_struct
*, int), int weight
)
6429 if (WARN_ON(test_and_set_bit(NAPI_STATE_LISTED
, &napi
->state
)))
6432 INIT_LIST_HEAD(&napi
->poll_list
);
6433 INIT_HLIST_NODE(&napi
->napi_hash_node
);
6434 hrtimer_init(&napi
->timer
, CLOCK_MONOTONIC
, HRTIMER_MODE_REL_PINNED
);
6435 napi
->timer
.function
= napi_watchdog
;
6436 init_gro_hash(napi
);
6438 INIT_LIST_HEAD(&napi
->rx_list
);
6441 if (weight
> NAPI_POLL_WEIGHT
)
6442 netdev_err_once(dev
, "%s() called with weight %d\n", __func__
,
6444 napi
->weight
= weight
;
6446 #ifdef CONFIG_NETPOLL
6447 napi
->poll_owner
= -1;
6449 napi
->list_owner
= -1;
6450 set_bit(NAPI_STATE_SCHED
, &napi
->state
);
6451 set_bit(NAPI_STATE_NPSVC
, &napi
->state
);
6452 list_add_rcu(&napi
->dev_list
, &dev
->napi_list
);
6453 napi_hash_add(napi
);
6454 napi_get_frags_check(napi
);
6455 /* Create kthread for this napi if dev->threaded is set.
6456 * Clear dev->threaded if kthread creation failed so that
6457 * threaded mode will not be enabled in napi_enable().
6459 if (dev
->threaded
&& napi_kthread_create(napi
))
6462 EXPORT_SYMBOL(netif_napi_add_weight
);
6464 void napi_disable(struct napi_struct
*n
)
6466 unsigned long val
, new;
6469 set_bit(NAPI_STATE_DISABLE
, &n
->state
);
6471 val
= READ_ONCE(n
->state
);
6473 while (val
& (NAPIF_STATE_SCHED
| NAPIF_STATE_NPSVC
)) {
6474 usleep_range(20, 200);
6475 val
= READ_ONCE(n
->state
);
6478 new = val
| NAPIF_STATE_SCHED
| NAPIF_STATE_NPSVC
;
6479 new &= ~(NAPIF_STATE_THREADED
| NAPIF_STATE_PREFER_BUSY_POLL
);
6480 } while (!try_cmpxchg(&n
->state
, &val
, new));
6482 hrtimer_cancel(&n
->timer
);
6484 clear_bit(NAPI_STATE_DISABLE
, &n
->state
);
6486 EXPORT_SYMBOL(napi_disable
);
6489 * napi_enable - enable NAPI scheduling
6492 * Resume NAPI from being scheduled on this context.
6493 * Must be paired with napi_disable.
6495 void napi_enable(struct napi_struct
*n
)
6497 unsigned long new, val
= READ_ONCE(n
->state
);
6500 BUG_ON(!test_bit(NAPI_STATE_SCHED
, &val
));
6502 new = val
& ~(NAPIF_STATE_SCHED
| NAPIF_STATE_NPSVC
);
6503 if (n
->dev
->threaded
&& n
->thread
)
6504 new |= NAPIF_STATE_THREADED
;
6505 } while (!try_cmpxchg(&n
->state
, &val
, new));
6507 EXPORT_SYMBOL(napi_enable
);
6509 static void flush_gro_hash(struct napi_struct
*napi
)
6513 for (i
= 0; i
< GRO_HASH_BUCKETS
; i
++) {
6514 struct sk_buff
*skb
, *n
;
6516 list_for_each_entry_safe(skb
, n
, &napi
->gro_hash
[i
].list
, list
)
6518 napi
->gro_hash
[i
].count
= 0;
6522 /* Must be called in process context */
6523 void __netif_napi_del(struct napi_struct
*napi
)
6525 if (!test_and_clear_bit(NAPI_STATE_LISTED
, &napi
->state
))
6528 napi_hash_del(napi
);
6529 list_del_rcu(&napi
->dev_list
);
6530 napi_free_frags(napi
);
6532 flush_gro_hash(napi
);
6533 napi
->gro_bitmask
= 0;
6536 kthread_stop(napi
->thread
);
6537 napi
->thread
= NULL
;
6540 EXPORT_SYMBOL(__netif_napi_del
);
6542 static int __napi_poll(struct napi_struct
*n
, bool *repoll
)
6548 /* This NAPI_STATE_SCHED test is for avoiding a race
6549 * with netpoll's poll_napi(). Only the entity which
6550 * obtains the lock and sees NAPI_STATE_SCHED set will
6551 * actually make the ->poll() call. Therefore we avoid
6552 * accidentally calling ->poll() when NAPI is not scheduled.
6555 if (test_bit(NAPI_STATE_SCHED
, &n
->state
)) {
6556 work
= n
->poll(n
, weight
);
6557 trace_napi_poll(n
, work
, weight
);
6560 if (unlikely(work
> weight
))
6561 netdev_err_once(n
->dev
, "NAPI poll function %pS returned %d, exceeding its budget of %d.\n",
6562 n
->poll
, work
, weight
);
6564 if (likely(work
< weight
))
6567 /* Drivers must not modify the NAPI state if they
6568 * consume the entire weight. In such cases this code
6569 * still "owns" the NAPI instance and therefore can
6570 * move the instance around on the list at-will.
6572 if (unlikely(napi_disable_pending(n
))) {
6577 /* The NAPI context has more processing work, but busy-polling
6578 * is preferred. Exit early.
6580 if (napi_prefer_busy_poll(n
)) {
6581 if (napi_complete_done(n
, work
)) {
6582 /* If timeout is not set, we need to make sure
6583 * that the NAPI is re-scheduled.
6590 if (n
->gro_bitmask
) {
6591 /* flush too old packets
6592 * If HZ < 1000, flush all packets.
6594 napi_gro_flush(n
, HZ
>= 1000);
6599 /* Some drivers may have called napi_schedule
6600 * prior to exhausting their budget.
6602 if (unlikely(!list_empty(&n
->poll_list
))) {
6603 pr_warn_once("%s: Budget exhausted after napi rescheduled\n",
6604 n
->dev
? n
->dev
->name
: "backlog");
6613 static int napi_poll(struct napi_struct
*n
, struct list_head
*repoll
)
6615 bool do_repoll
= false;
6619 list_del_init(&n
->poll_list
);
6621 have
= netpoll_poll_lock(n
);
6623 work
= __napi_poll(n
, &do_repoll
);
6626 list_add_tail(&n
->poll_list
, repoll
);
6628 netpoll_poll_unlock(have
);
6633 static int napi_thread_wait(struct napi_struct
*napi
)
6637 set_current_state(TASK_INTERRUPTIBLE
);
6639 while (!kthread_should_stop()) {
6640 /* Testing SCHED_THREADED bit here to make sure the current
6641 * kthread owns this napi and could poll on this napi.
6642 * Testing SCHED bit is not enough because SCHED bit might be
6643 * set by some other busy poll thread or by napi_disable().
6645 if (test_bit(NAPI_STATE_SCHED_THREADED
, &napi
->state
) || woken
) {
6646 WARN_ON(!list_empty(&napi
->poll_list
));
6647 __set_current_state(TASK_RUNNING
);
6652 /* woken being true indicates this thread owns this napi. */
6654 set_current_state(TASK_INTERRUPTIBLE
);
6656 __set_current_state(TASK_RUNNING
);
6661 static void skb_defer_free_flush(struct softnet_data
*sd
)
6663 struct sk_buff
*skb
, *next
;
6665 /* Paired with WRITE_ONCE() in skb_attempt_defer_free() */
6666 if (!READ_ONCE(sd
->defer_list
))
6669 spin_lock(&sd
->defer_lock
);
6670 skb
= sd
->defer_list
;
6671 sd
->defer_list
= NULL
;
6672 sd
->defer_count
= 0;
6673 spin_unlock(&sd
->defer_lock
);
6675 while (skb
!= NULL
) {
6677 napi_consume_skb(skb
, 1);
6682 static int napi_threaded_poll(void *data
)
6684 struct napi_struct
*napi
= data
;
6685 struct softnet_data
*sd
;
6688 while (!napi_thread_wait(napi
)) {
6690 bool repoll
= false;
6693 sd
= this_cpu_ptr(&softnet_data
);
6694 sd
->in_napi_threaded_poll
= true;
6696 have
= netpoll_poll_lock(napi
);
6697 __napi_poll(napi
, &repoll
);
6698 netpoll_poll_unlock(have
);
6700 sd
->in_napi_threaded_poll
= false;
6703 if (sd_has_rps_ipi_waiting(sd
)) {
6704 local_irq_disable();
6705 net_rps_action_and_irq_enable(sd
);
6707 skb_defer_free_flush(sd
);
6719 static __latent_entropy
void net_rx_action(struct softirq_action
*h
)
6721 struct softnet_data
*sd
= this_cpu_ptr(&softnet_data
);
6722 unsigned long time_limit
= jiffies
+
6723 usecs_to_jiffies(READ_ONCE(netdev_budget_usecs
));
6724 int budget
= READ_ONCE(netdev_budget
);
6729 sd
->in_net_rx_action
= true;
6730 local_irq_disable();
6731 list_splice_init(&sd
->poll_list
, &list
);
6735 struct napi_struct
*n
;
6737 skb_defer_free_flush(sd
);
6739 if (list_empty(&list
)) {
6740 if (list_empty(&repoll
)) {
6741 sd
->in_net_rx_action
= false;
6743 /* We need to check if ____napi_schedule()
6744 * had refilled poll_list while
6745 * sd->in_net_rx_action was true.
6747 if (!list_empty(&sd
->poll_list
))
6749 if (!sd_has_rps_ipi_waiting(sd
))
6755 n
= list_first_entry(&list
, struct napi_struct
, poll_list
);
6756 budget
-= napi_poll(n
, &repoll
);
6758 /* If softirq window is exhausted then punt.
6759 * Allow this to run for 2 jiffies since which will allow
6760 * an average latency of 1.5/HZ.
6762 if (unlikely(budget
<= 0 ||
6763 time_after_eq(jiffies
, time_limit
))) {
6769 local_irq_disable();
6771 list_splice_tail_init(&sd
->poll_list
, &list
);
6772 list_splice_tail(&repoll
, &list
);
6773 list_splice(&list
, &sd
->poll_list
);
6774 if (!list_empty(&sd
->poll_list
))
6775 __raise_softirq_irqoff(NET_RX_SOFTIRQ
);
6777 sd
->in_net_rx_action
= false;
6779 net_rps_action_and_irq_enable(sd
);
6783 struct netdev_adjacent
{
6784 struct net_device
*dev
;
6785 netdevice_tracker dev_tracker
;
6787 /* upper master flag, there can only be one master device per list */
6790 /* lookup ignore flag */
6793 /* counter for the number of times this device was added to us */
6796 /* private field for the users */
6799 struct list_head list
;
6800 struct rcu_head rcu
;
6803 static struct netdev_adjacent
*__netdev_find_adj(struct net_device
*adj_dev
,
6804 struct list_head
*adj_list
)
6806 struct netdev_adjacent
*adj
;
6808 list_for_each_entry(adj
, adj_list
, list
) {
6809 if (adj
->dev
== adj_dev
)
6815 static int ____netdev_has_upper_dev(struct net_device
*upper_dev
,
6816 struct netdev_nested_priv
*priv
)
6818 struct net_device
*dev
= (struct net_device
*)priv
->data
;
6820 return upper_dev
== dev
;
6824 * netdev_has_upper_dev - Check if device is linked to an upper device
6826 * @upper_dev: upper device to check
6828 * Find out if a device is linked to specified upper device and return true
6829 * in case it is. Note that this checks only immediate upper device,
6830 * not through a complete stack of devices. The caller must hold the RTNL lock.
6832 bool netdev_has_upper_dev(struct net_device
*dev
,
6833 struct net_device
*upper_dev
)
6835 struct netdev_nested_priv priv
= {
6836 .data
= (void *)upper_dev
,
6841 return netdev_walk_all_upper_dev_rcu(dev
, ____netdev_has_upper_dev
,
6844 EXPORT_SYMBOL(netdev_has_upper_dev
);
6847 * netdev_has_upper_dev_all_rcu - Check if device is linked to an upper device
6849 * @upper_dev: upper device to check
6851 * Find out if a device is linked to specified upper device and return true
6852 * in case it is. Note that this checks the entire upper device chain.
6853 * The caller must hold rcu lock.
6856 bool netdev_has_upper_dev_all_rcu(struct net_device
*dev
,
6857 struct net_device
*upper_dev
)
6859 struct netdev_nested_priv priv
= {
6860 .data
= (void *)upper_dev
,
6863 return !!netdev_walk_all_upper_dev_rcu(dev
, ____netdev_has_upper_dev
,
6866 EXPORT_SYMBOL(netdev_has_upper_dev_all_rcu
);
6869 * netdev_has_any_upper_dev - Check if device is linked to some device
6872 * Find out if a device is linked to an upper device and return true in case
6873 * it is. The caller must hold the RTNL lock.
6875 bool netdev_has_any_upper_dev(struct net_device
*dev
)
6879 return !list_empty(&dev
->adj_list
.upper
);
6881 EXPORT_SYMBOL(netdev_has_any_upper_dev
);
6884 * netdev_master_upper_dev_get - Get master upper device
6887 * Find a master upper device and return pointer to it or NULL in case
6888 * it's not there. The caller must hold the RTNL lock.
6890 struct net_device
*netdev_master_upper_dev_get(struct net_device
*dev
)
6892 struct netdev_adjacent
*upper
;
6896 if (list_empty(&dev
->adj_list
.upper
))
6899 upper
= list_first_entry(&dev
->adj_list
.upper
,
6900 struct netdev_adjacent
, list
);
6901 if (likely(upper
->master
))
6905 EXPORT_SYMBOL(netdev_master_upper_dev_get
);
6907 static struct net_device
*__netdev_master_upper_dev_get(struct net_device
*dev
)
6909 struct netdev_adjacent
*upper
;
6913 if (list_empty(&dev
->adj_list
.upper
))
6916 upper
= list_first_entry(&dev
->adj_list
.upper
,
6917 struct netdev_adjacent
, list
);
6918 if (likely(upper
->master
) && !upper
->ignore
)
6924 * netdev_has_any_lower_dev - Check if device is linked to some device
6927 * Find out if a device is linked to a lower device and return true in case
6928 * it is. The caller must hold the RTNL lock.
6930 static bool netdev_has_any_lower_dev(struct net_device
*dev
)
6934 return !list_empty(&dev
->adj_list
.lower
);
6937 void *netdev_adjacent_get_private(struct list_head
*adj_list
)
6939 struct netdev_adjacent
*adj
;
6941 adj
= list_entry(adj_list
, struct netdev_adjacent
, list
);
6943 return adj
->private;
6945 EXPORT_SYMBOL(netdev_adjacent_get_private
);
6948 * netdev_upper_get_next_dev_rcu - Get the next dev from upper list
6950 * @iter: list_head ** of the current position
6952 * Gets the next device from the dev's upper list, starting from iter
6953 * position. The caller must hold RCU read lock.
6955 struct net_device
*netdev_upper_get_next_dev_rcu(struct net_device
*dev
,
6956 struct list_head
**iter
)
6958 struct netdev_adjacent
*upper
;
6960 WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
6962 upper
= list_entry_rcu((*iter
)->next
, struct netdev_adjacent
, list
);
6964 if (&upper
->list
== &dev
->adj_list
.upper
)
6967 *iter
= &upper
->list
;
6971 EXPORT_SYMBOL(netdev_upper_get_next_dev_rcu
);
6973 static struct net_device
*__netdev_next_upper_dev(struct net_device
*dev
,
6974 struct list_head
**iter
,
6977 struct netdev_adjacent
*upper
;
6979 upper
= list_entry((*iter
)->next
, struct netdev_adjacent
, list
);
6981 if (&upper
->list
== &dev
->adj_list
.upper
)
6984 *iter
= &upper
->list
;
6985 *ignore
= upper
->ignore
;
6990 static struct net_device
*netdev_next_upper_dev_rcu(struct net_device
*dev
,
6991 struct list_head
**iter
)
6993 struct netdev_adjacent
*upper
;
6995 WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
6997 upper
= list_entry_rcu((*iter
)->next
, struct netdev_adjacent
, list
);
6999 if (&upper
->list
== &dev
->adj_list
.upper
)
7002 *iter
= &upper
->list
;
7007 static int __netdev_walk_all_upper_dev(struct net_device
*dev
,
7008 int (*fn
)(struct net_device
*dev
,
7009 struct netdev_nested_priv
*priv
),
7010 struct netdev_nested_priv
*priv
)
7012 struct net_device
*udev
, *next
, *now
, *dev_stack
[MAX_NEST_DEV
+ 1];
7013 struct list_head
*niter
, *iter
, *iter_stack
[MAX_NEST_DEV
+ 1];
7018 iter
= &dev
->adj_list
.upper
;
7022 ret
= fn(now
, priv
);
7029 udev
= __netdev_next_upper_dev(now
, &iter
, &ignore
);
7036 niter
= &udev
->adj_list
.upper
;
7037 dev_stack
[cur
] = now
;
7038 iter_stack
[cur
++] = iter
;
7045 next
= dev_stack
[--cur
];
7046 niter
= iter_stack
[cur
];
7056 int netdev_walk_all_upper_dev_rcu(struct net_device
*dev
,
7057 int (*fn
)(struct net_device
*dev
,
7058 struct netdev_nested_priv
*priv
),
7059 struct netdev_nested_priv
*priv
)
7061 struct net_device
*udev
, *next
, *now
, *dev_stack
[MAX_NEST_DEV
+ 1];
7062 struct list_head
*niter
, *iter
, *iter_stack
[MAX_NEST_DEV
+ 1];
7066 iter
= &dev
->adj_list
.upper
;
7070 ret
= fn(now
, priv
);
7077 udev
= netdev_next_upper_dev_rcu(now
, &iter
);
7082 niter
= &udev
->adj_list
.upper
;
7083 dev_stack
[cur
] = now
;
7084 iter_stack
[cur
++] = iter
;
7091 next
= dev_stack
[--cur
];
7092 niter
= iter_stack
[cur
];
7101 EXPORT_SYMBOL_GPL(netdev_walk_all_upper_dev_rcu
);
7103 static bool __netdev_has_upper_dev(struct net_device
*dev
,
7104 struct net_device
*upper_dev
)
7106 struct netdev_nested_priv priv
= {
7108 .data
= (void *)upper_dev
,
7113 return __netdev_walk_all_upper_dev(dev
, ____netdev_has_upper_dev
,
7118 * netdev_lower_get_next_private - Get the next ->private from the
7119 * lower neighbour list
7121 * @iter: list_head ** of the current position
7123 * Gets the next netdev_adjacent->private from the dev's lower neighbour
7124 * list, starting from iter position. The caller must hold either hold the
7125 * RTNL lock or its own locking that guarantees that the neighbour lower
7126 * list will remain unchanged.
7128 void *netdev_lower_get_next_private(struct net_device
*dev
,
7129 struct list_head
**iter
)
7131 struct netdev_adjacent
*lower
;
7133 lower
= list_entry(*iter
, struct netdev_adjacent
, list
);
7135 if (&lower
->list
== &dev
->adj_list
.lower
)
7138 *iter
= lower
->list
.next
;
7140 return lower
->private;
7142 EXPORT_SYMBOL(netdev_lower_get_next_private
);
7145 * netdev_lower_get_next_private_rcu - Get the next ->private from the
7146 * lower neighbour list, RCU
7149 * @iter: list_head ** of the current position
7151 * Gets the next netdev_adjacent->private from the dev's lower neighbour
7152 * list, starting from iter position. The caller must hold RCU read lock.
7154 void *netdev_lower_get_next_private_rcu(struct net_device
*dev
,
7155 struct list_head
**iter
)
7157 struct netdev_adjacent
*lower
;
7159 WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_bh_held());
7161 lower
= list_entry_rcu((*iter
)->next
, struct netdev_adjacent
, list
);
7163 if (&lower
->list
== &dev
->adj_list
.lower
)
7166 *iter
= &lower
->list
;
7168 return lower
->private;
7170 EXPORT_SYMBOL(netdev_lower_get_next_private_rcu
);
7173 * netdev_lower_get_next - Get the next device from the lower neighbour
7176 * @iter: list_head ** of the current position
7178 * Gets the next netdev_adjacent from the dev's lower neighbour
7179 * list, starting from iter position. The caller must hold RTNL lock or
7180 * its own locking that guarantees that the neighbour lower
7181 * list will remain unchanged.
7183 void *netdev_lower_get_next(struct net_device
*dev
, struct list_head
**iter
)
7185 struct netdev_adjacent
*lower
;
7187 lower
= list_entry(*iter
, struct netdev_adjacent
, list
);
7189 if (&lower
->list
== &dev
->adj_list
.lower
)
7192 *iter
= lower
->list
.next
;
7196 EXPORT_SYMBOL(netdev_lower_get_next
);
7198 static struct net_device
*netdev_next_lower_dev(struct net_device
*dev
,
7199 struct list_head
**iter
)
7201 struct netdev_adjacent
*lower
;
7203 lower
= list_entry((*iter
)->next
, struct netdev_adjacent
, list
);
7205 if (&lower
->list
== &dev
->adj_list
.lower
)
7208 *iter
= &lower
->list
;
7213 static struct net_device
*__netdev_next_lower_dev(struct net_device
*dev
,
7214 struct list_head
**iter
,
7217 struct netdev_adjacent
*lower
;
7219 lower
= list_entry((*iter
)->next
, struct netdev_adjacent
, list
);
7221 if (&lower
->list
== &dev
->adj_list
.lower
)
7224 *iter
= &lower
->list
;
7225 *ignore
= lower
->ignore
;
7230 int netdev_walk_all_lower_dev(struct net_device
*dev
,
7231 int (*fn
)(struct net_device
*dev
,
7232 struct netdev_nested_priv
*priv
),
7233 struct netdev_nested_priv
*priv
)
7235 struct net_device
*ldev
, *next
, *now
, *dev_stack
[MAX_NEST_DEV
+ 1];
7236 struct list_head
*niter
, *iter
, *iter_stack
[MAX_NEST_DEV
+ 1];
7240 iter
= &dev
->adj_list
.lower
;
7244 ret
= fn(now
, priv
);
7251 ldev
= netdev_next_lower_dev(now
, &iter
);
7256 niter
= &ldev
->adj_list
.lower
;
7257 dev_stack
[cur
] = now
;
7258 iter_stack
[cur
++] = iter
;
7265 next
= dev_stack
[--cur
];
7266 niter
= iter_stack
[cur
];
7275 EXPORT_SYMBOL_GPL(netdev_walk_all_lower_dev
);
7277 static int __netdev_walk_all_lower_dev(struct net_device
*dev
,
7278 int (*fn
)(struct net_device
*dev
,
7279 struct netdev_nested_priv
*priv
),
7280 struct netdev_nested_priv
*priv
)
7282 struct net_device
*ldev
, *next
, *now
, *dev_stack
[MAX_NEST_DEV
+ 1];
7283 struct list_head
*niter
, *iter
, *iter_stack
[MAX_NEST_DEV
+ 1];
7288 iter
= &dev
->adj_list
.lower
;
7292 ret
= fn(now
, priv
);
7299 ldev
= __netdev_next_lower_dev(now
, &iter
, &ignore
);
7306 niter
= &ldev
->adj_list
.lower
;
7307 dev_stack
[cur
] = now
;
7308 iter_stack
[cur
++] = iter
;
7315 next
= dev_stack
[--cur
];
7316 niter
= iter_stack
[cur
];
7326 struct net_device
*netdev_next_lower_dev_rcu(struct net_device
*dev
,
7327 struct list_head
**iter
)
7329 struct netdev_adjacent
*lower
;
7331 lower
= list_entry_rcu((*iter
)->next
, struct netdev_adjacent
, list
);
7332 if (&lower
->list
== &dev
->adj_list
.lower
)
7335 *iter
= &lower
->list
;
7339 EXPORT_SYMBOL(netdev_next_lower_dev_rcu
);
7341 static u8
__netdev_upper_depth(struct net_device
*dev
)
7343 struct net_device
*udev
;
7344 struct list_head
*iter
;
7348 for (iter
= &dev
->adj_list
.upper
,
7349 udev
= __netdev_next_upper_dev(dev
, &iter
, &ignore
);
7351 udev
= __netdev_next_upper_dev(dev
, &iter
, &ignore
)) {
7354 if (max_depth
< udev
->upper_level
)
7355 max_depth
= udev
->upper_level
;
7361 static u8
__netdev_lower_depth(struct net_device
*dev
)
7363 struct net_device
*ldev
;
7364 struct list_head
*iter
;
7368 for (iter
= &dev
->adj_list
.lower
,
7369 ldev
= __netdev_next_lower_dev(dev
, &iter
, &ignore
);
7371 ldev
= __netdev_next_lower_dev(dev
, &iter
, &ignore
)) {
7374 if (max_depth
< ldev
->lower_level
)
7375 max_depth
= ldev
->lower_level
;
7381 static int __netdev_update_upper_level(struct net_device
*dev
,
7382 struct netdev_nested_priv
*__unused
)
7384 dev
->upper_level
= __netdev_upper_depth(dev
) + 1;
7388 #ifdef CONFIG_LOCKDEP
7389 static LIST_HEAD(net_unlink_list
);
7391 static void net_unlink_todo(struct net_device
*dev
)
7393 if (list_empty(&dev
->unlink_list
))
7394 list_add_tail(&dev
->unlink_list
, &net_unlink_list
);
7398 static int __netdev_update_lower_level(struct net_device
*dev
,
7399 struct netdev_nested_priv
*priv
)
7401 dev
->lower_level
= __netdev_lower_depth(dev
) + 1;
7403 #ifdef CONFIG_LOCKDEP
7407 if (priv
->flags
& NESTED_SYNC_IMM
)
7408 dev
->nested_level
= dev
->lower_level
- 1;
7409 if (priv
->flags
& NESTED_SYNC_TODO
)
7410 net_unlink_todo(dev
);
7415 int netdev_walk_all_lower_dev_rcu(struct net_device
*dev
,
7416 int (*fn
)(struct net_device
*dev
,
7417 struct netdev_nested_priv
*priv
),
7418 struct netdev_nested_priv
*priv
)
7420 struct net_device
*ldev
, *next
, *now
, *dev_stack
[MAX_NEST_DEV
+ 1];
7421 struct list_head
*niter
, *iter
, *iter_stack
[MAX_NEST_DEV
+ 1];
7425 iter
= &dev
->adj_list
.lower
;
7429 ret
= fn(now
, priv
);
7436 ldev
= netdev_next_lower_dev_rcu(now
, &iter
);
7441 niter
= &ldev
->adj_list
.lower
;
7442 dev_stack
[cur
] = now
;
7443 iter_stack
[cur
++] = iter
;
7450 next
= dev_stack
[--cur
];
7451 niter
= iter_stack
[cur
];
7460 EXPORT_SYMBOL_GPL(netdev_walk_all_lower_dev_rcu
);
7463 * netdev_lower_get_first_private_rcu - Get the first ->private from the
7464 * lower neighbour list, RCU
7468 * Gets the first netdev_adjacent->private from the dev's lower neighbour
7469 * list. The caller must hold RCU read lock.
7471 void *netdev_lower_get_first_private_rcu(struct net_device
*dev
)
7473 struct netdev_adjacent
*lower
;
7475 lower
= list_first_or_null_rcu(&dev
->adj_list
.lower
,
7476 struct netdev_adjacent
, list
);
7478 return lower
->private;
7481 EXPORT_SYMBOL(netdev_lower_get_first_private_rcu
);
7484 * netdev_master_upper_dev_get_rcu - Get master upper device
7487 * Find a master upper device and return pointer to it or NULL in case
7488 * it's not there. The caller must hold the RCU read lock.
7490 struct net_device
*netdev_master_upper_dev_get_rcu(struct net_device
*dev
)
7492 struct netdev_adjacent
*upper
;
7494 upper
= list_first_or_null_rcu(&dev
->adj_list
.upper
,
7495 struct netdev_adjacent
, list
);
7496 if (upper
&& likely(upper
->master
))
7500 EXPORT_SYMBOL(netdev_master_upper_dev_get_rcu
);
7502 static int netdev_adjacent_sysfs_add(struct net_device
*dev
,
7503 struct net_device
*adj_dev
,
7504 struct list_head
*dev_list
)
7506 char linkname
[IFNAMSIZ
+7];
7508 sprintf(linkname
, dev_list
== &dev
->adj_list
.upper
?
7509 "upper_%s" : "lower_%s", adj_dev
->name
);
7510 return sysfs_create_link(&(dev
->dev
.kobj
), &(adj_dev
->dev
.kobj
),
7513 static void netdev_adjacent_sysfs_del(struct net_device
*dev
,
7515 struct list_head
*dev_list
)
7517 char linkname
[IFNAMSIZ
+7];
7519 sprintf(linkname
, dev_list
== &dev
->adj_list
.upper
?
7520 "upper_%s" : "lower_%s", name
);
7521 sysfs_remove_link(&(dev
->dev
.kobj
), linkname
);
7524 static inline bool netdev_adjacent_is_neigh_list(struct net_device
*dev
,
7525 struct net_device
*adj_dev
,
7526 struct list_head
*dev_list
)
7528 return (dev_list
== &dev
->adj_list
.upper
||
7529 dev_list
== &dev
->adj_list
.lower
) &&
7530 net_eq(dev_net(dev
), dev_net(adj_dev
));
7533 static int __netdev_adjacent_dev_insert(struct net_device
*dev
,
7534 struct net_device
*adj_dev
,
7535 struct list_head
*dev_list
,
7536 void *private, bool master
)
7538 struct netdev_adjacent
*adj
;
7541 adj
= __netdev_find_adj(adj_dev
, dev_list
);
7545 pr_debug("Insert adjacency: dev %s adj_dev %s adj->ref_nr %d\n",
7546 dev
->name
, adj_dev
->name
, adj
->ref_nr
);
7551 adj
= kmalloc(sizeof(*adj
), GFP_KERNEL
);
7556 adj
->master
= master
;
7558 adj
->private = private;
7559 adj
->ignore
= false;
7560 netdev_hold(adj_dev
, &adj
->dev_tracker
, GFP_KERNEL
);
7562 pr_debug("Insert adjacency: dev %s adj_dev %s adj->ref_nr %d; dev_hold on %s\n",
7563 dev
->name
, adj_dev
->name
, adj
->ref_nr
, adj_dev
->name
);
7565 if (netdev_adjacent_is_neigh_list(dev
, adj_dev
, dev_list
)) {
7566 ret
= netdev_adjacent_sysfs_add(dev
, adj_dev
, dev_list
);
7571 /* Ensure that master link is always the first item in list. */
7573 ret
= sysfs_create_link(&(dev
->dev
.kobj
),
7574 &(adj_dev
->dev
.kobj
), "master");
7576 goto remove_symlinks
;
7578 list_add_rcu(&adj
->list
, dev_list
);
7580 list_add_tail_rcu(&adj
->list
, dev_list
);
7586 if (netdev_adjacent_is_neigh_list(dev
, adj_dev
, dev_list
))
7587 netdev_adjacent_sysfs_del(dev
, adj_dev
->name
, dev_list
);
7589 netdev_put(adj_dev
, &adj
->dev_tracker
);
7595 static void __netdev_adjacent_dev_remove(struct net_device
*dev
,
7596 struct net_device
*adj_dev
,
7598 struct list_head
*dev_list
)
7600 struct netdev_adjacent
*adj
;
7602 pr_debug("Remove adjacency: dev %s adj_dev %s ref_nr %d\n",
7603 dev
->name
, adj_dev
->name
, ref_nr
);
7605 adj
= __netdev_find_adj(adj_dev
, dev_list
);
7608 pr_err("Adjacency does not exist for device %s from %s\n",
7609 dev
->name
, adj_dev
->name
);
7614 if (adj
->ref_nr
> ref_nr
) {
7615 pr_debug("adjacency: %s to %s ref_nr - %d = %d\n",
7616 dev
->name
, adj_dev
->name
, ref_nr
,
7617 adj
->ref_nr
- ref_nr
);
7618 adj
->ref_nr
-= ref_nr
;
7623 sysfs_remove_link(&(dev
->dev
.kobj
), "master");
7625 if (netdev_adjacent_is_neigh_list(dev
, adj_dev
, dev_list
))
7626 netdev_adjacent_sysfs_del(dev
, adj_dev
->name
, dev_list
);
7628 list_del_rcu(&adj
->list
);
7629 pr_debug("adjacency: dev_put for %s, because link removed from %s to %s\n",
7630 adj_dev
->name
, dev
->name
, adj_dev
->name
);
7631 netdev_put(adj_dev
, &adj
->dev_tracker
);
7632 kfree_rcu(adj
, rcu
);
7635 static int __netdev_adjacent_dev_link_lists(struct net_device
*dev
,
7636 struct net_device
*upper_dev
,
7637 struct list_head
*up_list
,
7638 struct list_head
*down_list
,
7639 void *private, bool master
)
7643 ret
= __netdev_adjacent_dev_insert(dev
, upper_dev
, up_list
,
7648 ret
= __netdev_adjacent_dev_insert(upper_dev
, dev
, down_list
,
7651 __netdev_adjacent_dev_remove(dev
, upper_dev
, 1, up_list
);
7658 static void __netdev_adjacent_dev_unlink_lists(struct net_device
*dev
,
7659 struct net_device
*upper_dev
,
7661 struct list_head
*up_list
,
7662 struct list_head
*down_list
)
7664 __netdev_adjacent_dev_remove(dev
, upper_dev
, ref_nr
, up_list
);
7665 __netdev_adjacent_dev_remove(upper_dev
, dev
, ref_nr
, down_list
);
7668 static int __netdev_adjacent_dev_link_neighbour(struct net_device
*dev
,
7669 struct net_device
*upper_dev
,
7670 void *private, bool master
)
7672 return __netdev_adjacent_dev_link_lists(dev
, upper_dev
,
7673 &dev
->adj_list
.upper
,
7674 &upper_dev
->adj_list
.lower
,
7678 static void __netdev_adjacent_dev_unlink_neighbour(struct net_device
*dev
,
7679 struct net_device
*upper_dev
)
7681 __netdev_adjacent_dev_unlink_lists(dev
, upper_dev
, 1,
7682 &dev
->adj_list
.upper
,
7683 &upper_dev
->adj_list
.lower
);
7686 static int __netdev_upper_dev_link(struct net_device
*dev
,
7687 struct net_device
*upper_dev
, bool master
,
7688 void *upper_priv
, void *upper_info
,
7689 struct netdev_nested_priv
*priv
,
7690 struct netlink_ext_ack
*extack
)
7692 struct netdev_notifier_changeupper_info changeupper_info
= {
7697 .upper_dev
= upper_dev
,
7700 .upper_info
= upper_info
,
7702 struct net_device
*master_dev
;
7707 if (dev
== upper_dev
)
7710 /* To prevent loops, check if dev is not upper device to upper_dev. */
7711 if (__netdev_has_upper_dev(upper_dev
, dev
))
7714 if ((dev
->lower_level
+ upper_dev
->upper_level
) > MAX_NEST_DEV
)
7718 if (__netdev_has_upper_dev(dev
, upper_dev
))
7721 master_dev
= __netdev_master_upper_dev_get(dev
);
7723 return master_dev
== upper_dev
? -EEXIST
: -EBUSY
;
7726 ret
= call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER
,
7727 &changeupper_info
.info
);
7728 ret
= notifier_to_errno(ret
);
7732 ret
= __netdev_adjacent_dev_link_neighbour(dev
, upper_dev
, upper_priv
,
7737 ret
= call_netdevice_notifiers_info(NETDEV_CHANGEUPPER
,
7738 &changeupper_info
.info
);
7739 ret
= notifier_to_errno(ret
);
7743 __netdev_update_upper_level(dev
, NULL
);
7744 __netdev_walk_all_lower_dev(dev
, __netdev_update_upper_level
, NULL
);
7746 __netdev_update_lower_level(upper_dev
, priv
);
7747 __netdev_walk_all_upper_dev(upper_dev
, __netdev_update_lower_level
,
7753 __netdev_adjacent_dev_unlink_neighbour(dev
, upper_dev
);
7759 * netdev_upper_dev_link - Add a link to the upper device
7761 * @upper_dev: new upper device
7762 * @extack: netlink extended ack
7764 * Adds a link to device which is upper to this one. The caller must hold
7765 * the RTNL lock. On a failure a negative errno code is returned.
7766 * On success the reference counts are adjusted and the function
7769 int netdev_upper_dev_link(struct net_device
*dev
,
7770 struct net_device
*upper_dev
,
7771 struct netlink_ext_ack
*extack
)
7773 struct netdev_nested_priv priv
= {
7774 .flags
= NESTED_SYNC_IMM
| NESTED_SYNC_TODO
,
7778 return __netdev_upper_dev_link(dev
, upper_dev
, false,
7779 NULL
, NULL
, &priv
, extack
);
7781 EXPORT_SYMBOL(netdev_upper_dev_link
);
7784 * netdev_master_upper_dev_link - Add a master link to the upper device
7786 * @upper_dev: new upper device
7787 * @upper_priv: upper device private
7788 * @upper_info: upper info to be passed down via notifier
7789 * @extack: netlink extended ack
7791 * Adds a link to device which is upper to this one. In this case, only
7792 * one master upper device can be linked, although other non-master devices
7793 * might be linked as well. The caller must hold the RTNL lock.
7794 * On a failure a negative errno code is returned. On success the reference
7795 * counts are adjusted and the function returns zero.
7797 int netdev_master_upper_dev_link(struct net_device
*dev
,
7798 struct net_device
*upper_dev
,
7799 void *upper_priv
, void *upper_info
,
7800 struct netlink_ext_ack
*extack
)
7802 struct netdev_nested_priv priv
= {
7803 .flags
= NESTED_SYNC_IMM
| NESTED_SYNC_TODO
,
7807 return __netdev_upper_dev_link(dev
, upper_dev
, true,
7808 upper_priv
, upper_info
, &priv
, extack
);
7810 EXPORT_SYMBOL(netdev_master_upper_dev_link
);
7812 static void __netdev_upper_dev_unlink(struct net_device
*dev
,
7813 struct net_device
*upper_dev
,
7814 struct netdev_nested_priv
*priv
)
7816 struct netdev_notifier_changeupper_info changeupper_info
= {
7820 .upper_dev
= upper_dev
,
7826 changeupper_info
.master
= netdev_master_upper_dev_get(dev
) == upper_dev
;
7828 call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER
,
7829 &changeupper_info
.info
);
7831 __netdev_adjacent_dev_unlink_neighbour(dev
, upper_dev
);
7833 call_netdevice_notifiers_info(NETDEV_CHANGEUPPER
,
7834 &changeupper_info
.info
);
7836 __netdev_update_upper_level(dev
, NULL
);
7837 __netdev_walk_all_lower_dev(dev
, __netdev_update_upper_level
, NULL
);
7839 __netdev_update_lower_level(upper_dev
, priv
);
7840 __netdev_walk_all_upper_dev(upper_dev
, __netdev_update_lower_level
,
7845 * netdev_upper_dev_unlink - Removes a link to upper device
7847 * @upper_dev: new upper device
7849 * Removes a link to device which is upper to this one. The caller must hold
7852 void netdev_upper_dev_unlink(struct net_device
*dev
,
7853 struct net_device
*upper_dev
)
7855 struct netdev_nested_priv priv
= {
7856 .flags
= NESTED_SYNC_TODO
,
7860 __netdev_upper_dev_unlink(dev
, upper_dev
, &priv
);
7862 EXPORT_SYMBOL(netdev_upper_dev_unlink
);
7864 static void __netdev_adjacent_dev_set(struct net_device
*upper_dev
,
7865 struct net_device
*lower_dev
,
7868 struct netdev_adjacent
*adj
;
7870 adj
= __netdev_find_adj(lower_dev
, &upper_dev
->adj_list
.lower
);
7874 adj
= __netdev_find_adj(upper_dev
, &lower_dev
->adj_list
.upper
);
7879 static void netdev_adjacent_dev_disable(struct net_device
*upper_dev
,
7880 struct net_device
*lower_dev
)
7882 __netdev_adjacent_dev_set(upper_dev
, lower_dev
, true);
7885 static void netdev_adjacent_dev_enable(struct net_device
*upper_dev
,
7886 struct net_device
*lower_dev
)
7888 __netdev_adjacent_dev_set(upper_dev
, lower_dev
, false);
7891 int netdev_adjacent_change_prepare(struct net_device
*old_dev
,
7892 struct net_device
*new_dev
,
7893 struct net_device
*dev
,
7894 struct netlink_ext_ack
*extack
)
7896 struct netdev_nested_priv priv
= {
7905 if (old_dev
&& new_dev
!= old_dev
)
7906 netdev_adjacent_dev_disable(dev
, old_dev
);
7907 err
= __netdev_upper_dev_link(new_dev
, dev
, false, NULL
, NULL
, &priv
,
7910 if (old_dev
&& new_dev
!= old_dev
)
7911 netdev_adjacent_dev_enable(dev
, old_dev
);
7917 EXPORT_SYMBOL(netdev_adjacent_change_prepare
);
7919 void netdev_adjacent_change_commit(struct net_device
*old_dev
,
7920 struct net_device
*new_dev
,
7921 struct net_device
*dev
)
7923 struct netdev_nested_priv priv
= {
7924 .flags
= NESTED_SYNC_IMM
| NESTED_SYNC_TODO
,
7928 if (!new_dev
|| !old_dev
)
7931 if (new_dev
== old_dev
)
7934 netdev_adjacent_dev_enable(dev
, old_dev
);
7935 __netdev_upper_dev_unlink(old_dev
, dev
, &priv
);
7937 EXPORT_SYMBOL(netdev_adjacent_change_commit
);
7939 void netdev_adjacent_change_abort(struct net_device
*old_dev
,
7940 struct net_device
*new_dev
,
7941 struct net_device
*dev
)
7943 struct netdev_nested_priv priv
= {
7951 if (old_dev
&& new_dev
!= old_dev
)
7952 netdev_adjacent_dev_enable(dev
, old_dev
);
7954 __netdev_upper_dev_unlink(new_dev
, dev
, &priv
);
7956 EXPORT_SYMBOL(netdev_adjacent_change_abort
);
7959 * netdev_bonding_info_change - Dispatch event about slave change
7961 * @bonding_info: info to dispatch
7963 * Send NETDEV_BONDING_INFO to netdev notifiers with info.
7964 * The caller must hold the RTNL lock.
7966 void netdev_bonding_info_change(struct net_device
*dev
,
7967 struct netdev_bonding_info
*bonding_info
)
7969 struct netdev_notifier_bonding_info info
= {
7973 memcpy(&info
.bonding_info
, bonding_info
,
7974 sizeof(struct netdev_bonding_info
));
7975 call_netdevice_notifiers_info(NETDEV_BONDING_INFO
,
7978 EXPORT_SYMBOL(netdev_bonding_info_change
);
7980 static int netdev_offload_xstats_enable_l3(struct net_device
*dev
,
7981 struct netlink_ext_ack
*extack
)
7983 struct netdev_notifier_offload_xstats_info info
= {
7985 .info
.extack
= extack
,
7986 .type
= NETDEV_OFFLOAD_XSTATS_TYPE_L3
,
7991 dev
->offload_xstats_l3
= kzalloc(sizeof(*dev
->offload_xstats_l3
),
7993 if (!dev
->offload_xstats_l3
)
7996 rc
= call_netdevice_notifiers_info_robust(NETDEV_OFFLOAD_XSTATS_ENABLE
,
7997 NETDEV_OFFLOAD_XSTATS_DISABLE
,
7999 err
= notifier_to_errno(rc
);
8006 kfree(dev
->offload_xstats_l3
);
8007 dev
->offload_xstats_l3
= NULL
;
8011 int netdev_offload_xstats_enable(struct net_device
*dev
,
8012 enum netdev_offload_xstats_type type
,
8013 struct netlink_ext_ack
*extack
)
8017 if (netdev_offload_xstats_enabled(dev
, type
))
8021 case NETDEV_OFFLOAD_XSTATS_TYPE_L3
:
8022 return netdev_offload_xstats_enable_l3(dev
, extack
);
8028 EXPORT_SYMBOL(netdev_offload_xstats_enable
);
8030 static void netdev_offload_xstats_disable_l3(struct net_device
*dev
)
8032 struct netdev_notifier_offload_xstats_info info
= {
8034 .type
= NETDEV_OFFLOAD_XSTATS_TYPE_L3
,
8037 call_netdevice_notifiers_info(NETDEV_OFFLOAD_XSTATS_DISABLE
,
8039 kfree(dev
->offload_xstats_l3
);
8040 dev
->offload_xstats_l3
= NULL
;
8043 int netdev_offload_xstats_disable(struct net_device
*dev
,
8044 enum netdev_offload_xstats_type type
)
8048 if (!netdev_offload_xstats_enabled(dev
, type
))
8052 case NETDEV_OFFLOAD_XSTATS_TYPE_L3
:
8053 netdev_offload_xstats_disable_l3(dev
);
8060 EXPORT_SYMBOL(netdev_offload_xstats_disable
);
8062 static void netdev_offload_xstats_disable_all(struct net_device
*dev
)
8064 netdev_offload_xstats_disable(dev
, NETDEV_OFFLOAD_XSTATS_TYPE_L3
);
8067 static struct rtnl_hw_stats64
*
8068 netdev_offload_xstats_get_ptr(const struct net_device
*dev
,
8069 enum netdev_offload_xstats_type type
)
8072 case NETDEV_OFFLOAD_XSTATS_TYPE_L3
:
8073 return dev
->offload_xstats_l3
;
8080 bool netdev_offload_xstats_enabled(const struct net_device
*dev
,
8081 enum netdev_offload_xstats_type type
)
8085 return netdev_offload_xstats_get_ptr(dev
, type
);
8087 EXPORT_SYMBOL(netdev_offload_xstats_enabled
);
8089 struct netdev_notifier_offload_xstats_ru
{
8093 struct netdev_notifier_offload_xstats_rd
{
8094 struct rtnl_hw_stats64 stats
;
8098 static void netdev_hw_stats64_add(struct rtnl_hw_stats64
*dest
,
8099 const struct rtnl_hw_stats64
*src
)
8101 dest
->rx_packets
+= src
->rx_packets
;
8102 dest
->tx_packets
+= src
->tx_packets
;
8103 dest
->rx_bytes
+= src
->rx_bytes
;
8104 dest
->tx_bytes
+= src
->tx_bytes
;
8105 dest
->rx_errors
+= src
->rx_errors
;
8106 dest
->tx_errors
+= src
->tx_errors
;
8107 dest
->rx_dropped
+= src
->rx_dropped
;
8108 dest
->tx_dropped
+= src
->tx_dropped
;
8109 dest
->multicast
+= src
->multicast
;
8112 static int netdev_offload_xstats_get_used(struct net_device
*dev
,
8113 enum netdev_offload_xstats_type type
,
8115 struct netlink_ext_ack
*extack
)
8117 struct netdev_notifier_offload_xstats_ru report_used
= {};
8118 struct netdev_notifier_offload_xstats_info info
= {
8120 .info
.extack
= extack
,
8122 .report_used
= &report_used
,
8126 WARN_ON(!netdev_offload_xstats_enabled(dev
, type
));
8127 rc
= call_netdevice_notifiers_info(NETDEV_OFFLOAD_XSTATS_REPORT_USED
,
8129 *p_used
= report_used
.used
;
8130 return notifier_to_errno(rc
);
8133 static int netdev_offload_xstats_get_stats(struct net_device
*dev
,
8134 enum netdev_offload_xstats_type type
,
8135 struct rtnl_hw_stats64
*p_stats
,
8137 struct netlink_ext_ack
*extack
)
8139 struct netdev_notifier_offload_xstats_rd report_delta
= {};
8140 struct netdev_notifier_offload_xstats_info info
= {
8142 .info
.extack
= extack
,
8144 .report_delta
= &report_delta
,
8146 struct rtnl_hw_stats64
*stats
;
8149 stats
= netdev_offload_xstats_get_ptr(dev
, type
);
8150 if (WARN_ON(!stats
))
8153 rc
= call_netdevice_notifiers_info(NETDEV_OFFLOAD_XSTATS_REPORT_DELTA
,
8156 /* Cache whatever we got, even if there was an error, otherwise the
8157 * successful stats retrievals would get lost.
8159 netdev_hw_stats64_add(stats
, &report_delta
.stats
);
8163 *p_used
= report_delta
.used
;
8165 return notifier_to_errno(rc
);
8168 int netdev_offload_xstats_get(struct net_device
*dev
,
8169 enum netdev_offload_xstats_type type
,
8170 struct rtnl_hw_stats64
*p_stats
, bool *p_used
,
8171 struct netlink_ext_ack
*extack
)
8176 return netdev_offload_xstats_get_stats(dev
, type
, p_stats
,
8179 return netdev_offload_xstats_get_used(dev
, type
, p_used
,
8182 EXPORT_SYMBOL(netdev_offload_xstats_get
);
8185 netdev_offload_xstats_report_delta(struct netdev_notifier_offload_xstats_rd
*report_delta
,
8186 const struct rtnl_hw_stats64
*stats
)
8188 report_delta
->used
= true;
8189 netdev_hw_stats64_add(&report_delta
->stats
, stats
);
8191 EXPORT_SYMBOL(netdev_offload_xstats_report_delta
);
8194 netdev_offload_xstats_report_used(struct netdev_notifier_offload_xstats_ru
*report_used
)
8196 report_used
->used
= true;
8198 EXPORT_SYMBOL(netdev_offload_xstats_report_used
);
8200 void netdev_offload_xstats_push_delta(struct net_device
*dev
,
8201 enum netdev_offload_xstats_type type
,
8202 const struct rtnl_hw_stats64
*p_stats
)
8204 struct rtnl_hw_stats64
*stats
;
8208 stats
= netdev_offload_xstats_get_ptr(dev
, type
);
8209 if (WARN_ON(!stats
))
8212 netdev_hw_stats64_add(stats
, p_stats
);
8214 EXPORT_SYMBOL(netdev_offload_xstats_push_delta
);
8217 * netdev_get_xmit_slave - Get the xmit slave of master device
8220 * @all_slaves: assume all the slaves are active
8222 * The reference counters are not incremented so the caller must be
8223 * careful with locks. The caller must hold RCU lock.
8224 * %NULL is returned if no slave is found.
8227 struct net_device
*netdev_get_xmit_slave(struct net_device
*dev
,
8228 struct sk_buff
*skb
,
8231 const struct net_device_ops
*ops
= dev
->netdev_ops
;
8233 if (!ops
->ndo_get_xmit_slave
)
8235 return ops
->ndo_get_xmit_slave(dev
, skb
, all_slaves
);
8237 EXPORT_SYMBOL(netdev_get_xmit_slave
);
8239 static struct net_device
*netdev_sk_get_lower_dev(struct net_device
*dev
,
8242 const struct net_device_ops
*ops
= dev
->netdev_ops
;
8244 if (!ops
->ndo_sk_get_lower_dev
)
8246 return ops
->ndo_sk_get_lower_dev(dev
, sk
);
8250 * netdev_sk_get_lowest_dev - Get the lowest device in chain given device and socket
8254 * %NULL is returned if no lower device is found.
8257 struct net_device
*netdev_sk_get_lowest_dev(struct net_device
*dev
,
8260 struct net_device
*lower
;
8262 lower
= netdev_sk_get_lower_dev(dev
, sk
);
8265 lower
= netdev_sk_get_lower_dev(dev
, sk
);
8270 EXPORT_SYMBOL(netdev_sk_get_lowest_dev
);
8272 static void netdev_adjacent_add_links(struct net_device
*dev
)
8274 struct netdev_adjacent
*iter
;
8276 struct net
*net
= dev_net(dev
);
8278 list_for_each_entry(iter
, &dev
->adj_list
.upper
, list
) {
8279 if (!net_eq(net
, dev_net(iter
->dev
)))
8281 netdev_adjacent_sysfs_add(iter
->dev
, dev
,
8282 &iter
->dev
->adj_list
.lower
);
8283 netdev_adjacent_sysfs_add(dev
, iter
->dev
,
8284 &dev
->adj_list
.upper
);
8287 list_for_each_entry(iter
, &dev
->adj_list
.lower
, list
) {
8288 if (!net_eq(net
, dev_net(iter
->dev
)))
8290 netdev_adjacent_sysfs_add(iter
->dev
, dev
,
8291 &iter
->dev
->adj_list
.upper
);
8292 netdev_adjacent_sysfs_add(dev
, iter
->dev
,
8293 &dev
->adj_list
.lower
);
8297 static void netdev_adjacent_del_links(struct net_device
*dev
)
8299 struct netdev_adjacent
*iter
;
8301 struct net
*net
= dev_net(dev
);
8303 list_for_each_entry(iter
, &dev
->adj_list
.upper
, list
) {
8304 if (!net_eq(net
, dev_net(iter
->dev
)))
8306 netdev_adjacent_sysfs_del(iter
->dev
, dev
->name
,
8307 &iter
->dev
->adj_list
.lower
);
8308 netdev_adjacent_sysfs_del(dev
, iter
->dev
->name
,
8309 &dev
->adj_list
.upper
);
8312 list_for_each_entry(iter
, &dev
->adj_list
.lower
, list
) {
8313 if (!net_eq(net
, dev_net(iter
->dev
)))
8315 netdev_adjacent_sysfs_del(iter
->dev
, dev
->name
,
8316 &iter
->dev
->adj_list
.upper
);
8317 netdev_adjacent_sysfs_del(dev
, iter
->dev
->name
,
8318 &dev
->adj_list
.lower
);
8322 void netdev_adjacent_rename_links(struct net_device
*dev
, char *oldname
)
8324 struct netdev_adjacent
*iter
;
8326 struct net
*net
= dev_net(dev
);
8328 list_for_each_entry(iter
, &dev
->adj_list
.upper
, list
) {
8329 if (!net_eq(net
, dev_net(iter
->dev
)))
8331 netdev_adjacent_sysfs_del(iter
->dev
, oldname
,
8332 &iter
->dev
->adj_list
.lower
);
8333 netdev_adjacent_sysfs_add(iter
->dev
, dev
,
8334 &iter
->dev
->adj_list
.lower
);
8337 list_for_each_entry(iter
, &dev
->adj_list
.lower
, list
) {
8338 if (!net_eq(net
, dev_net(iter
->dev
)))
8340 netdev_adjacent_sysfs_del(iter
->dev
, oldname
,
8341 &iter
->dev
->adj_list
.upper
);
8342 netdev_adjacent_sysfs_add(iter
->dev
, dev
,
8343 &iter
->dev
->adj_list
.upper
);
8347 void *netdev_lower_dev_get_private(struct net_device
*dev
,
8348 struct net_device
*lower_dev
)
8350 struct netdev_adjacent
*lower
;
8354 lower
= __netdev_find_adj(lower_dev
, &dev
->adj_list
.lower
);
8358 return lower
->private;
8360 EXPORT_SYMBOL(netdev_lower_dev_get_private
);
8364 * netdev_lower_state_changed - Dispatch event about lower device state change
8365 * @lower_dev: device
8366 * @lower_state_info: state to dispatch
8368 * Send NETDEV_CHANGELOWERSTATE to netdev notifiers with info.
8369 * The caller must hold the RTNL lock.
8371 void netdev_lower_state_changed(struct net_device
*lower_dev
,
8372 void *lower_state_info
)
8374 struct netdev_notifier_changelowerstate_info changelowerstate_info
= {
8375 .info
.dev
= lower_dev
,
8379 changelowerstate_info
.lower_state_info
= lower_state_info
;
8380 call_netdevice_notifiers_info(NETDEV_CHANGELOWERSTATE
,
8381 &changelowerstate_info
.info
);
8383 EXPORT_SYMBOL(netdev_lower_state_changed
);
8385 static void dev_change_rx_flags(struct net_device
*dev
, int flags
)
8387 const struct net_device_ops
*ops
= dev
->netdev_ops
;
8389 if (ops
->ndo_change_rx_flags
)
8390 ops
->ndo_change_rx_flags(dev
, flags
);
8393 static int __dev_set_promiscuity(struct net_device
*dev
, int inc
, bool notify
)
8395 unsigned int old_flags
= dev
->flags
;
8401 dev
->flags
|= IFF_PROMISC
;
8402 dev
->promiscuity
+= inc
;
8403 if (dev
->promiscuity
== 0) {
8406 * If inc causes overflow, untouch promisc and return error.
8409 dev
->flags
&= ~IFF_PROMISC
;
8411 dev
->promiscuity
-= inc
;
8412 netdev_warn(dev
, "promiscuity touches roof, set promiscuity failed. promiscuity feature of device might be broken.\n");
8416 if (dev
->flags
!= old_flags
) {
8417 netdev_info(dev
, "%s promiscuous mode\n",
8418 dev
->flags
& IFF_PROMISC
? "entered" : "left");
8419 if (audit_enabled
) {
8420 current_uid_gid(&uid
, &gid
);
8421 audit_log(audit_context(), GFP_ATOMIC
,
8422 AUDIT_ANOM_PROMISCUOUS
,
8423 "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
8424 dev
->name
, (dev
->flags
& IFF_PROMISC
),
8425 (old_flags
& IFF_PROMISC
),
8426 from_kuid(&init_user_ns
, audit_get_loginuid(current
)),
8427 from_kuid(&init_user_ns
, uid
),
8428 from_kgid(&init_user_ns
, gid
),
8429 audit_get_sessionid(current
));
8432 dev_change_rx_flags(dev
, IFF_PROMISC
);
8435 __dev_notify_flags(dev
, old_flags
, IFF_PROMISC
, 0, NULL
);
8440 * dev_set_promiscuity - update promiscuity count on a device
8444 * Add or remove promiscuity from a device. While the count in the device
8445 * remains above zero the interface remains promiscuous. Once it hits zero
8446 * the device reverts back to normal filtering operation. A negative inc
8447 * value is used to drop promiscuity on the device.
8448 * Return 0 if successful or a negative errno code on error.
8450 int dev_set_promiscuity(struct net_device
*dev
, int inc
)
8452 unsigned int old_flags
= dev
->flags
;
8455 err
= __dev_set_promiscuity(dev
, inc
, true);
8458 if (dev
->flags
!= old_flags
)
8459 dev_set_rx_mode(dev
);
8462 EXPORT_SYMBOL(dev_set_promiscuity
);
8464 static int __dev_set_allmulti(struct net_device
*dev
, int inc
, bool notify
)
8466 unsigned int old_flags
= dev
->flags
, old_gflags
= dev
->gflags
;
8470 dev
->flags
|= IFF_ALLMULTI
;
8471 dev
->allmulti
+= inc
;
8472 if (dev
->allmulti
== 0) {
8475 * If inc causes overflow, untouch allmulti and return error.
8478 dev
->flags
&= ~IFF_ALLMULTI
;
8480 dev
->allmulti
-= inc
;
8481 netdev_warn(dev
, "allmulti touches roof, set allmulti failed. allmulti feature of device might be broken.\n");
8485 if (dev
->flags
^ old_flags
) {
8486 netdev_info(dev
, "%s allmulticast mode\n",
8487 dev
->flags
& IFF_ALLMULTI
? "entered" : "left");
8488 dev_change_rx_flags(dev
, IFF_ALLMULTI
);
8489 dev_set_rx_mode(dev
);
8491 __dev_notify_flags(dev
, old_flags
,
8492 dev
->gflags
^ old_gflags
, 0, NULL
);
8498 * dev_set_allmulti - update allmulti count on a device
8502 * Add or remove reception of all multicast frames to a device. While the
8503 * count in the device remains above zero the interface remains listening
8504 * to all interfaces. Once it hits zero the device reverts back to normal
8505 * filtering operation. A negative @inc value is used to drop the counter
8506 * when releasing a resource needing all multicasts.
8507 * Return 0 if successful or a negative errno code on error.
8510 int dev_set_allmulti(struct net_device
*dev
, int inc
)
8512 return __dev_set_allmulti(dev
, inc
, true);
8514 EXPORT_SYMBOL(dev_set_allmulti
);
8517 * Upload unicast and multicast address lists to device and
8518 * configure RX filtering. When the device doesn't support unicast
8519 * filtering it is put in promiscuous mode while unicast addresses
8522 void __dev_set_rx_mode(struct net_device
*dev
)
8524 const struct net_device_ops
*ops
= dev
->netdev_ops
;
8526 /* dev_open will call this function so the list will stay sane. */
8527 if (!(dev
->flags
&IFF_UP
))
8530 if (!netif_device_present(dev
))
8533 if (!(dev
->priv_flags
& IFF_UNICAST_FLT
)) {
8534 /* Unicast addresses changes may only happen under the rtnl,
8535 * therefore calling __dev_set_promiscuity here is safe.
8537 if (!netdev_uc_empty(dev
) && !dev
->uc_promisc
) {
8538 __dev_set_promiscuity(dev
, 1, false);
8539 dev
->uc_promisc
= true;
8540 } else if (netdev_uc_empty(dev
) && dev
->uc_promisc
) {
8541 __dev_set_promiscuity(dev
, -1, false);
8542 dev
->uc_promisc
= false;
8546 if (ops
->ndo_set_rx_mode
)
8547 ops
->ndo_set_rx_mode(dev
);
8550 void dev_set_rx_mode(struct net_device
*dev
)
8552 netif_addr_lock_bh(dev
);
8553 __dev_set_rx_mode(dev
);
8554 netif_addr_unlock_bh(dev
);
8558 * dev_get_flags - get flags reported to userspace
8561 * Get the combination of flag bits exported through APIs to userspace.
8563 unsigned int dev_get_flags(const struct net_device
*dev
)
8567 flags
= (dev
->flags
& ~(IFF_PROMISC
|
8572 (dev
->gflags
& (IFF_PROMISC
|
8575 if (netif_running(dev
)) {
8576 if (netif_oper_up(dev
))
8577 flags
|= IFF_RUNNING
;
8578 if (netif_carrier_ok(dev
))
8579 flags
|= IFF_LOWER_UP
;
8580 if (netif_dormant(dev
))
8581 flags
|= IFF_DORMANT
;
8586 EXPORT_SYMBOL(dev_get_flags
);
8588 int __dev_change_flags(struct net_device
*dev
, unsigned int flags
,
8589 struct netlink_ext_ack
*extack
)
8591 unsigned int old_flags
= dev
->flags
;
8597 * Set the flags on our device.
8600 dev
->flags
= (flags
& (IFF_DEBUG
| IFF_NOTRAILERS
| IFF_NOARP
|
8601 IFF_DYNAMIC
| IFF_MULTICAST
| IFF_PORTSEL
|
8603 (dev
->flags
& (IFF_UP
| IFF_VOLATILE
| IFF_PROMISC
|
8607 * Load in the correct multicast list now the flags have changed.
8610 if ((old_flags
^ flags
) & IFF_MULTICAST
)
8611 dev_change_rx_flags(dev
, IFF_MULTICAST
);
8613 dev_set_rx_mode(dev
);
8616 * Have we downed the interface. We handle IFF_UP ourselves
8617 * according to user attempts to set it, rather than blindly
8622 if ((old_flags
^ flags
) & IFF_UP
) {
8623 if (old_flags
& IFF_UP
)
8626 ret
= __dev_open(dev
, extack
);
8629 if ((flags
^ dev
->gflags
) & IFF_PROMISC
) {
8630 int inc
= (flags
& IFF_PROMISC
) ? 1 : -1;
8631 unsigned int old_flags
= dev
->flags
;
8633 dev
->gflags
^= IFF_PROMISC
;
8635 if (__dev_set_promiscuity(dev
, inc
, false) >= 0)
8636 if (dev
->flags
!= old_flags
)
8637 dev_set_rx_mode(dev
);
8640 /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
8641 * is important. Some (broken) drivers set IFF_PROMISC, when
8642 * IFF_ALLMULTI is requested not asking us and not reporting.
8644 if ((flags
^ dev
->gflags
) & IFF_ALLMULTI
) {
8645 int inc
= (flags
& IFF_ALLMULTI
) ? 1 : -1;
8647 dev
->gflags
^= IFF_ALLMULTI
;
8648 __dev_set_allmulti(dev
, inc
, false);
8654 void __dev_notify_flags(struct net_device
*dev
, unsigned int old_flags
,
8655 unsigned int gchanges
, u32 portid
,
8656 const struct nlmsghdr
*nlh
)
8658 unsigned int changes
= dev
->flags
^ old_flags
;
8661 rtmsg_ifinfo(RTM_NEWLINK
, dev
, gchanges
, GFP_ATOMIC
, portid
, nlh
);
8663 if (changes
& IFF_UP
) {
8664 if (dev
->flags
& IFF_UP
)
8665 call_netdevice_notifiers(NETDEV_UP
, dev
);
8667 call_netdevice_notifiers(NETDEV_DOWN
, dev
);
8670 if (dev
->flags
& IFF_UP
&&
8671 (changes
& ~(IFF_UP
| IFF_PROMISC
| IFF_ALLMULTI
| IFF_VOLATILE
))) {
8672 struct netdev_notifier_change_info change_info
= {
8676 .flags_changed
= changes
,
8679 call_netdevice_notifiers_info(NETDEV_CHANGE
, &change_info
.info
);
8684 * dev_change_flags - change device settings
8686 * @flags: device state flags
8687 * @extack: netlink extended ack
8689 * Change settings on device based state flags. The flags are
8690 * in the userspace exported format.
8692 int dev_change_flags(struct net_device
*dev
, unsigned int flags
,
8693 struct netlink_ext_ack
*extack
)
8696 unsigned int changes
, old_flags
= dev
->flags
, old_gflags
= dev
->gflags
;
8698 ret
= __dev_change_flags(dev
, flags
, extack
);
8702 changes
= (old_flags
^ dev
->flags
) | (old_gflags
^ dev
->gflags
);
8703 __dev_notify_flags(dev
, old_flags
, changes
, 0, NULL
);
8706 EXPORT_SYMBOL(dev_change_flags
);
8708 int __dev_set_mtu(struct net_device
*dev
, int new_mtu
)
8710 const struct net_device_ops
*ops
= dev
->netdev_ops
;
8712 if (ops
->ndo_change_mtu
)
8713 return ops
->ndo_change_mtu(dev
, new_mtu
);
8715 /* Pairs with all the lockless reads of dev->mtu in the stack */
8716 WRITE_ONCE(dev
->mtu
, new_mtu
);
8719 EXPORT_SYMBOL(__dev_set_mtu
);
8721 int dev_validate_mtu(struct net_device
*dev
, int new_mtu
,
8722 struct netlink_ext_ack
*extack
)
8724 /* MTU must be positive, and in range */
8725 if (new_mtu
< 0 || new_mtu
< dev
->min_mtu
) {
8726 NL_SET_ERR_MSG(extack
, "mtu less than device minimum");
8730 if (dev
->max_mtu
> 0 && new_mtu
> dev
->max_mtu
) {
8731 NL_SET_ERR_MSG(extack
, "mtu greater than device maximum");
8738 * dev_set_mtu_ext - Change maximum transfer unit
8740 * @new_mtu: new transfer unit
8741 * @extack: netlink extended ack
8743 * Change the maximum transfer size of the network device.
8745 int dev_set_mtu_ext(struct net_device
*dev
, int new_mtu
,
8746 struct netlink_ext_ack
*extack
)
8750 if (new_mtu
== dev
->mtu
)
8753 err
= dev_validate_mtu(dev
, new_mtu
, extack
);
8757 if (!netif_device_present(dev
))
8760 err
= call_netdevice_notifiers(NETDEV_PRECHANGEMTU
, dev
);
8761 err
= notifier_to_errno(err
);
8765 orig_mtu
= dev
->mtu
;
8766 err
= __dev_set_mtu(dev
, new_mtu
);
8769 err
= call_netdevice_notifiers_mtu(NETDEV_CHANGEMTU
, dev
,
8771 err
= notifier_to_errno(err
);
8773 /* setting mtu back and notifying everyone again,
8774 * so that they have a chance to revert changes.
8776 __dev_set_mtu(dev
, orig_mtu
);
8777 call_netdevice_notifiers_mtu(NETDEV_CHANGEMTU
, dev
,
8784 int dev_set_mtu(struct net_device
*dev
, int new_mtu
)
8786 struct netlink_ext_ack extack
;
8789 memset(&extack
, 0, sizeof(extack
));
8790 err
= dev_set_mtu_ext(dev
, new_mtu
, &extack
);
8791 if (err
&& extack
._msg
)
8792 net_err_ratelimited("%s: %s\n", dev
->name
, extack
._msg
);
8795 EXPORT_SYMBOL(dev_set_mtu
);
8798 * dev_change_tx_queue_len - Change TX queue length of a netdevice
8800 * @new_len: new tx queue length
8802 int dev_change_tx_queue_len(struct net_device
*dev
, unsigned long new_len
)
8804 unsigned int orig_len
= dev
->tx_queue_len
;
8807 if (new_len
!= (unsigned int)new_len
)
8810 if (new_len
!= orig_len
) {
8811 dev
->tx_queue_len
= new_len
;
8812 res
= call_netdevice_notifiers(NETDEV_CHANGE_TX_QUEUE_LEN
, dev
);
8813 res
= notifier_to_errno(res
);
8816 res
= dev_qdisc_change_tx_queue_len(dev
);
8824 netdev_err(dev
, "refused to change device tx_queue_len\n");
8825 dev
->tx_queue_len
= orig_len
;
8830 * dev_set_group - Change group this device belongs to
8832 * @new_group: group this device should belong to
8834 void dev_set_group(struct net_device
*dev
, int new_group
)
8836 dev
->group
= new_group
;
8840 * dev_pre_changeaddr_notify - Call NETDEV_PRE_CHANGEADDR.
8842 * @addr: new address
8843 * @extack: netlink extended ack
8845 int dev_pre_changeaddr_notify(struct net_device
*dev
, const char *addr
,
8846 struct netlink_ext_ack
*extack
)
8848 struct netdev_notifier_pre_changeaddr_info info
= {
8850 .info
.extack
= extack
,
8855 rc
= call_netdevice_notifiers_info(NETDEV_PRE_CHANGEADDR
, &info
.info
);
8856 return notifier_to_errno(rc
);
8858 EXPORT_SYMBOL(dev_pre_changeaddr_notify
);
8861 * dev_set_mac_address - Change Media Access Control Address
8864 * @extack: netlink extended ack
8866 * Change the hardware (MAC) address of the device
8868 int dev_set_mac_address(struct net_device
*dev
, struct sockaddr
*sa
,
8869 struct netlink_ext_ack
*extack
)
8871 const struct net_device_ops
*ops
= dev
->netdev_ops
;
8874 if (!ops
->ndo_set_mac_address
)
8876 if (sa
->sa_family
!= dev
->type
)
8878 if (!netif_device_present(dev
))
8880 err
= dev_pre_changeaddr_notify(dev
, sa
->sa_data
, extack
);
8883 if (memcmp(dev
->dev_addr
, sa
->sa_data
, dev
->addr_len
)) {
8884 err
= ops
->ndo_set_mac_address(dev
, sa
);
8888 dev
->addr_assign_type
= NET_ADDR_SET
;
8889 call_netdevice_notifiers(NETDEV_CHANGEADDR
, dev
);
8890 add_device_randomness(dev
->dev_addr
, dev
->addr_len
);
8893 EXPORT_SYMBOL(dev_set_mac_address
);
8895 static DECLARE_RWSEM(dev_addr_sem
);
8897 int dev_set_mac_address_user(struct net_device
*dev
, struct sockaddr
*sa
,
8898 struct netlink_ext_ack
*extack
)
8902 down_write(&dev_addr_sem
);
8903 ret
= dev_set_mac_address(dev
, sa
, extack
);
8904 up_write(&dev_addr_sem
);
8907 EXPORT_SYMBOL(dev_set_mac_address_user
);
8909 int dev_get_mac_address(struct sockaddr
*sa
, struct net
*net
, char *dev_name
)
8911 size_t size
= sizeof(sa
->sa_data_min
);
8912 struct net_device
*dev
;
8915 down_read(&dev_addr_sem
);
8918 dev
= dev_get_by_name_rcu(net
, dev_name
);
8924 memset(sa
->sa_data
, 0, size
);
8926 memcpy(sa
->sa_data
, dev
->dev_addr
,
8927 min_t(size_t, size
, dev
->addr_len
));
8928 sa
->sa_family
= dev
->type
;
8932 up_read(&dev_addr_sem
);
8935 EXPORT_SYMBOL(dev_get_mac_address
);
8938 * dev_change_carrier - Change device carrier
8940 * @new_carrier: new value
8942 * Change device carrier
8944 int dev_change_carrier(struct net_device
*dev
, bool new_carrier
)
8946 const struct net_device_ops
*ops
= dev
->netdev_ops
;
8948 if (!ops
->ndo_change_carrier
)
8950 if (!netif_device_present(dev
))
8952 return ops
->ndo_change_carrier(dev
, new_carrier
);
8956 * dev_get_phys_port_id - Get device physical port ID
8960 * Get device physical port ID
8962 int dev_get_phys_port_id(struct net_device
*dev
,
8963 struct netdev_phys_item_id
*ppid
)
8965 const struct net_device_ops
*ops
= dev
->netdev_ops
;
8967 if (!ops
->ndo_get_phys_port_id
)
8969 return ops
->ndo_get_phys_port_id(dev
, ppid
);
8973 * dev_get_phys_port_name - Get device physical port name
8976 * @len: limit of bytes to copy to name
8978 * Get device physical port name
8980 int dev_get_phys_port_name(struct net_device
*dev
,
8981 char *name
, size_t len
)
8983 const struct net_device_ops
*ops
= dev
->netdev_ops
;
8986 if (ops
->ndo_get_phys_port_name
) {
8987 err
= ops
->ndo_get_phys_port_name(dev
, name
, len
);
8988 if (err
!= -EOPNOTSUPP
)
8991 return devlink_compat_phys_port_name_get(dev
, name
, len
);
8995 * dev_get_port_parent_id - Get the device's port parent identifier
8996 * @dev: network device
8997 * @ppid: pointer to a storage for the port's parent identifier
8998 * @recurse: allow/disallow recursion to lower devices
9000 * Get the devices's port parent identifier
9002 int dev_get_port_parent_id(struct net_device
*dev
,
9003 struct netdev_phys_item_id
*ppid
,
9006 const struct net_device_ops
*ops
= dev
->netdev_ops
;
9007 struct netdev_phys_item_id first
= { };
9008 struct net_device
*lower_dev
;
9009 struct list_head
*iter
;
9012 if (ops
->ndo_get_port_parent_id
) {
9013 err
= ops
->ndo_get_port_parent_id(dev
, ppid
);
9014 if (err
!= -EOPNOTSUPP
)
9018 err
= devlink_compat_switch_id_get(dev
, ppid
);
9019 if (!recurse
|| err
!= -EOPNOTSUPP
)
9022 netdev_for_each_lower_dev(dev
, lower_dev
, iter
) {
9023 err
= dev_get_port_parent_id(lower_dev
, ppid
, true);
9028 else if (memcmp(&first
, ppid
, sizeof(*ppid
)))
9034 EXPORT_SYMBOL(dev_get_port_parent_id
);
9037 * netdev_port_same_parent_id - Indicate if two network devices have
9038 * the same port parent identifier
9039 * @a: first network device
9040 * @b: second network device
9042 bool netdev_port_same_parent_id(struct net_device
*a
, struct net_device
*b
)
9044 struct netdev_phys_item_id a_id
= { };
9045 struct netdev_phys_item_id b_id
= { };
9047 if (dev_get_port_parent_id(a
, &a_id
, true) ||
9048 dev_get_port_parent_id(b
, &b_id
, true))
9051 return netdev_phys_item_id_same(&a_id
, &b_id
);
9053 EXPORT_SYMBOL(netdev_port_same_parent_id
);
9056 * dev_change_proto_down - set carrier according to proto_down.
9059 * @proto_down: new value
9061 int dev_change_proto_down(struct net_device
*dev
, bool proto_down
)
9063 if (!(dev
->priv_flags
& IFF_CHANGE_PROTO_DOWN
))
9065 if (!netif_device_present(dev
))
9068 netif_carrier_off(dev
);
9070 netif_carrier_on(dev
);
9071 dev
->proto_down
= proto_down
;
9076 * dev_change_proto_down_reason - proto down reason
9079 * @mask: proto down mask
9080 * @value: proto down value
9082 void dev_change_proto_down_reason(struct net_device
*dev
, unsigned long mask
,
9088 dev
->proto_down_reason
= value
;
9090 for_each_set_bit(b
, &mask
, 32) {
9091 if (value
& (1 << b
))
9092 dev
->proto_down_reason
|= BIT(b
);
9094 dev
->proto_down_reason
&= ~BIT(b
);
9099 struct bpf_xdp_link
{
9100 struct bpf_link link
;
9101 struct net_device
*dev
; /* protected by rtnl_lock, no refcnt held */
9105 static enum bpf_xdp_mode
dev_xdp_mode(struct net_device
*dev
, u32 flags
)
9107 if (flags
& XDP_FLAGS_HW_MODE
)
9109 if (flags
& XDP_FLAGS_DRV_MODE
)
9110 return XDP_MODE_DRV
;
9111 if (flags
& XDP_FLAGS_SKB_MODE
)
9112 return XDP_MODE_SKB
;
9113 return dev
->netdev_ops
->ndo_bpf
? XDP_MODE_DRV
: XDP_MODE_SKB
;
9116 static bpf_op_t
dev_xdp_bpf_op(struct net_device
*dev
, enum bpf_xdp_mode mode
)
9120 return generic_xdp_install
;
9123 return dev
->netdev_ops
->ndo_bpf
;
9129 static struct bpf_xdp_link
*dev_xdp_link(struct net_device
*dev
,
9130 enum bpf_xdp_mode mode
)
9132 return dev
->xdp_state
[mode
].link
;
9135 static struct bpf_prog
*dev_xdp_prog(struct net_device
*dev
,
9136 enum bpf_xdp_mode mode
)
9138 struct bpf_xdp_link
*link
= dev_xdp_link(dev
, mode
);
9141 return link
->link
.prog
;
9142 return dev
->xdp_state
[mode
].prog
;
9145 u8
dev_xdp_prog_count(struct net_device
*dev
)
9150 for (i
= 0; i
< __MAX_XDP_MODE
; i
++)
9151 if (dev
->xdp_state
[i
].prog
|| dev
->xdp_state
[i
].link
)
9155 EXPORT_SYMBOL_GPL(dev_xdp_prog_count
);
9157 u32
dev_xdp_prog_id(struct net_device
*dev
, enum bpf_xdp_mode mode
)
9159 struct bpf_prog
*prog
= dev_xdp_prog(dev
, mode
);
9161 return prog
? prog
->aux
->id
: 0;
9164 static void dev_xdp_set_link(struct net_device
*dev
, enum bpf_xdp_mode mode
,
9165 struct bpf_xdp_link
*link
)
9167 dev
->xdp_state
[mode
].link
= link
;
9168 dev
->xdp_state
[mode
].prog
= NULL
;
9171 static void dev_xdp_set_prog(struct net_device
*dev
, enum bpf_xdp_mode mode
,
9172 struct bpf_prog
*prog
)
9174 dev
->xdp_state
[mode
].link
= NULL
;
9175 dev
->xdp_state
[mode
].prog
= prog
;
9178 static int dev_xdp_install(struct net_device
*dev
, enum bpf_xdp_mode mode
,
9179 bpf_op_t bpf_op
, struct netlink_ext_ack
*extack
,
9180 u32 flags
, struct bpf_prog
*prog
)
9182 struct netdev_bpf xdp
;
9185 memset(&xdp
, 0, sizeof(xdp
));
9186 xdp
.command
= mode
== XDP_MODE_HW
? XDP_SETUP_PROG_HW
: XDP_SETUP_PROG
;
9187 xdp
.extack
= extack
;
9191 /* Drivers assume refcnt is already incremented (i.e, prog pointer is
9192 * "moved" into driver), so they don't increment it on their own, but
9193 * they do decrement refcnt when program is detached or replaced.
9194 * Given net_device also owns link/prog, we need to bump refcnt here
9195 * to prevent drivers from underflowing it.
9199 err
= bpf_op(dev
, &xdp
);
9206 if (mode
!= XDP_MODE_HW
)
9207 bpf_prog_change_xdp(dev_xdp_prog(dev
, mode
), prog
);
9212 static void dev_xdp_uninstall(struct net_device
*dev
)
9214 struct bpf_xdp_link
*link
;
9215 struct bpf_prog
*prog
;
9216 enum bpf_xdp_mode mode
;
9221 for (mode
= XDP_MODE_SKB
; mode
< __MAX_XDP_MODE
; mode
++) {
9222 prog
= dev_xdp_prog(dev
, mode
);
9226 bpf_op
= dev_xdp_bpf_op(dev
, mode
);
9230 WARN_ON(dev_xdp_install(dev
, mode
, bpf_op
, NULL
, 0, NULL
));
9232 /* auto-detach link from net device */
9233 link
= dev_xdp_link(dev
, mode
);
9239 dev_xdp_set_link(dev
, mode
, NULL
);
9243 static int dev_xdp_attach(struct net_device
*dev
, struct netlink_ext_ack
*extack
,
9244 struct bpf_xdp_link
*link
, struct bpf_prog
*new_prog
,
9245 struct bpf_prog
*old_prog
, u32 flags
)
9247 unsigned int num_modes
= hweight32(flags
& XDP_FLAGS_MODES
);
9248 struct bpf_prog
*cur_prog
;
9249 struct net_device
*upper
;
9250 struct list_head
*iter
;
9251 enum bpf_xdp_mode mode
;
9257 /* either link or prog attachment, never both */
9258 if (link
&& (new_prog
|| old_prog
))
9260 /* link supports only XDP mode flags */
9261 if (link
&& (flags
& ~XDP_FLAGS_MODES
)) {
9262 NL_SET_ERR_MSG(extack
, "Invalid XDP flags for BPF link attachment");
9265 /* just one XDP mode bit should be set, zero defaults to drv/skb mode */
9266 if (num_modes
> 1) {
9267 NL_SET_ERR_MSG(extack
, "Only one XDP mode flag can be set");
9270 /* avoid ambiguity if offload + drv/skb mode progs are both loaded */
9271 if (!num_modes
&& dev_xdp_prog_count(dev
) > 1) {
9272 NL_SET_ERR_MSG(extack
,
9273 "More than one program loaded, unset mode is ambiguous");
9276 /* old_prog != NULL implies XDP_FLAGS_REPLACE is set */
9277 if (old_prog
&& !(flags
& XDP_FLAGS_REPLACE
)) {
9278 NL_SET_ERR_MSG(extack
, "XDP_FLAGS_REPLACE is not specified");
9282 mode
= dev_xdp_mode(dev
, flags
);
9283 /* can't replace attached link */
9284 if (dev_xdp_link(dev
, mode
)) {
9285 NL_SET_ERR_MSG(extack
, "Can't replace active BPF XDP link");
9289 /* don't allow if an upper device already has a program */
9290 netdev_for_each_upper_dev_rcu(dev
, upper
, iter
) {
9291 if (dev_xdp_prog_count(upper
) > 0) {
9292 NL_SET_ERR_MSG(extack
, "Cannot attach when an upper device already has a program");
9297 cur_prog
= dev_xdp_prog(dev
, mode
);
9298 /* can't replace attached prog with link */
9299 if (link
&& cur_prog
) {
9300 NL_SET_ERR_MSG(extack
, "Can't replace active XDP program with BPF link");
9303 if ((flags
& XDP_FLAGS_REPLACE
) && cur_prog
!= old_prog
) {
9304 NL_SET_ERR_MSG(extack
, "Active program does not match expected");
9308 /* put effective new program into new_prog */
9310 new_prog
= link
->link
.prog
;
9313 bool offload
= mode
== XDP_MODE_HW
;
9314 enum bpf_xdp_mode other_mode
= mode
== XDP_MODE_SKB
9315 ? XDP_MODE_DRV
: XDP_MODE_SKB
;
9317 if ((flags
& XDP_FLAGS_UPDATE_IF_NOEXIST
) && cur_prog
) {
9318 NL_SET_ERR_MSG(extack
, "XDP program already attached");
9321 if (!offload
&& dev_xdp_prog(dev
, other_mode
)) {
9322 NL_SET_ERR_MSG(extack
, "Native and generic XDP can't be active at the same time");
9325 if (!offload
&& bpf_prog_is_offloaded(new_prog
->aux
)) {
9326 NL_SET_ERR_MSG(extack
, "Using offloaded program without HW_MODE flag is not supported");
9329 if (bpf_prog_is_dev_bound(new_prog
->aux
) && !bpf_offload_dev_match(new_prog
, dev
)) {
9330 NL_SET_ERR_MSG(extack
, "Program bound to different device");
9333 if (new_prog
->expected_attach_type
== BPF_XDP_DEVMAP
) {
9334 NL_SET_ERR_MSG(extack
, "BPF_XDP_DEVMAP programs can not be attached to a device");
9337 if (new_prog
->expected_attach_type
== BPF_XDP_CPUMAP
) {
9338 NL_SET_ERR_MSG(extack
, "BPF_XDP_CPUMAP programs can not be attached to a device");
9343 /* don't call drivers if the effective program didn't change */
9344 if (new_prog
!= cur_prog
) {
9345 bpf_op
= dev_xdp_bpf_op(dev
, mode
);
9347 NL_SET_ERR_MSG(extack
, "Underlying driver does not support XDP in native mode");
9351 err
= dev_xdp_install(dev
, mode
, bpf_op
, extack
, flags
, new_prog
);
9357 dev_xdp_set_link(dev
, mode
, link
);
9359 dev_xdp_set_prog(dev
, mode
, new_prog
);
9361 bpf_prog_put(cur_prog
);
9366 static int dev_xdp_attach_link(struct net_device
*dev
,
9367 struct netlink_ext_ack
*extack
,
9368 struct bpf_xdp_link
*link
)
9370 return dev_xdp_attach(dev
, extack
, link
, NULL
, NULL
, link
->flags
);
9373 static int dev_xdp_detach_link(struct net_device
*dev
,
9374 struct netlink_ext_ack
*extack
,
9375 struct bpf_xdp_link
*link
)
9377 enum bpf_xdp_mode mode
;
9382 mode
= dev_xdp_mode(dev
, link
->flags
);
9383 if (dev_xdp_link(dev
, mode
) != link
)
9386 bpf_op
= dev_xdp_bpf_op(dev
, mode
);
9387 WARN_ON(dev_xdp_install(dev
, mode
, bpf_op
, NULL
, 0, NULL
));
9388 dev_xdp_set_link(dev
, mode
, NULL
);
9392 static void bpf_xdp_link_release(struct bpf_link
*link
)
9394 struct bpf_xdp_link
*xdp_link
= container_of(link
, struct bpf_xdp_link
, link
);
9398 /* if racing with net_device's tear down, xdp_link->dev might be
9399 * already NULL, in which case link was already auto-detached
9401 if (xdp_link
->dev
) {
9402 WARN_ON(dev_xdp_detach_link(xdp_link
->dev
, NULL
, xdp_link
));
9403 xdp_link
->dev
= NULL
;
9409 static int bpf_xdp_link_detach(struct bpf_link
*link
)
9411 bpf_xdp_link_release(link
);
9415 static void bpf_xdp_link_dealloc(struct bpf_link
*link
)
9417 struct bpf_xdp_link
*xdp_link
= container_of(link
, struct bpf_xdp_link
, link
);
9422 static void bpf_xdp_link_show_fdinfo(const struct bpf_link
*link
,
9423 struct seq_file
*seq
)
9425 struct bpf_xdp_link
*xdp_link
= container_of(link
, struct bpf_xdp_link
, link
);
9430 ifindex
= xdp_link
->dev
->ifindex
;
9433 seq_printf(seq
, "ifindex:\t%u\n", ifindex
);
9436 static int bpf_xdp_link_fill_link_info(const struct bpf_link
*link
,
9437 struct bpf_link_info
*info
)
9439 struct bpf_xdp_link
*xdp_link
= container_of(link
, struct bpf_xdp_link
, link
);
9444 ifindex
= xdp_link
->dev
->ifindex
;
9447 info
->xdp
.ifindex
= ifindex
;
9451 static int bpf_xdp_link_update(struct bpf_link
*link
, struct bpf_prog
*new_prog
,
9452 struct bpf_prog
*old_prog
)
9454 struct bpf_xdp_link
*xdp_link
= container_of(link
, struct bpf_xdp_link
, link
);
9455 enum bpf_xdp_mode mode
;
9461 /* link might have been auto-released already, so fail */
9462 if (!xdp_link
->dev
) {
9467 if (old_prog
&& link
->prog
!= old_prog
) {
9471 old_prog
= link
->prog
;
9472 if (old_prog
->type
!= new_prog
->type
||
9473 old_prog
->expected_attach_type
!= new_prog
->expected_attach_type
) {
9478 if (old_prog
== new_prog
) {
9479 /* no-op, don't disturb drivers */
9480 bpf_prog_put(new_prog
);
9484 mode
= dev_xdp_mode(xdp_link
->dev
, xdp_link
->flags
);
9485 bpf_op
= dev_xdp_bpf_op(xdp_link
->dev
, mode
);
9486 err
= dev_xdp_install(xdp_link
->dev
, mode
, bpf_op
, NULL
,
9487 xdp_link
->flags
, new_prog
);
9491 old_prog
= xchg(&link
->prog
, new_prog
);
9492 bpf_prog_put(old_prog
);
9499 static const struct bpf_link_ops bpf_xdp_link_lops
= {
9500 .release
= bpf_xdp_link_release
,
9501 .dealloc
= bpf_xdp_link_dealloc
,
9502 .detach
= bpf_xdp_link_detach
,
9503 .show_fdinfo
= bpf_xdp_link_show_fdinfo
,
9504 .fill_link_info
= bpf_xdp_link_fill_link_info
,
9505 .update_prog
= bpf_xdp_link_update
,
9508 int bpf_xdp_link_attach(const union bpf_attr
*attr
, struct bpf_prog
*prog
)
9510 struct net
*net
= current
->nsproxy
->net_ns
;
9511 struct bpf_link_primer link_primer
;
9512 struct netlink_ext_ack extack
= {};
9513 struct bpf_xdp_link
*link
;
9514 struct net_device
*dev
;
9518 dev
= dev_get_by_index(net
, attr
->link_create
.target_ifindex
);
9524 link
= kzalloc(sizeof(*link
), GFP_USER
);
9530 bpf_link_init(&link
->link
, BPF_LINK_TYPE_XDP
, &bpf_xdp_link_lops
, prog
);
9532 link
->flags
= attr
->link_create
.flags
;
9534 err
= bpf_link_prime(&link
->link
, &link_primer
);
9540 err
= dev_xdp_attach_link(dev
, &extack
, link
);
9545 bpf_link_cleanup(&link_primer
);
9546 trace_bpf_xdp_link_attach_failed(extack
._msg
);
9550 fd
= bpf_link_settle(&link_primer
);
9551 /* link itself doesn't hold dev's refcnt to not complicate shutdown */
9564 * dev_change_xdp_fd - set or clear a bpf program for a device rx path
9566 * @extack: netlink extended ack
9567 * @fd: new program fd or negative value to clear
9568 * @expected_fd: old program fd that userspace expects to replace or clear
9569 * @flags: xdp-related flags
9571 * Set or clear a bpf program for a device
9573 int dev_change_xdp_fd(struct net_device
*dev
, struct netlink_ext_ack
*extack
,
9574 int fd
, int expected_fd
, u32 flags
)
9576 enum bpf_xdp_mode mode
= dev_xdp_mode(dev
, flags
);
9577 struct bpf_prog
*new_prog
= NULL
, *old_prog
= NULL
;
9583 new_prog
= bpf_prog_get_type_dev(fd
, BPF_PROG_TYPE_XDP
,
9584 mode
!= XDP_MODE_SKB
);
9585 if (IS_ERR(new_prog
))
9586 return PTR_ERR(new_prog
);
9589 if (expected_fd
>= 0) {
9590 old_prog
= bpf_prog_get_type_dev(expected_fd
, BPF_PROG_TYPE_XDP
,
9591 mode
!= XDP_MODE_SKB
);
9592 if (IS_ERR(old_prog
)) {
9593 err
= PTR_ERR(old_prog
);
9599 err
= dev_xdp_attach(dev
, extack
, NULL
, new_prog
, old_prog
, flags
);
9602 if (err
&& new_prog
)
9603 bpf_prog_put(new_prog
);
9605 bpf_prog_put(old_prog
);
9610 * dev_index_reserve() - allocate an ifindex in a namespace
9611 * @net: the applicable net namespace
9612 * @ifindex: requested ifindex, pass %0 to get one allocated
9614 * Allocate a ifindex for a new device. Caller must either use the ifindex
9615 * to store the device (via list_netdevice()) or call dev_index_release()
9616 * to give the index up.
9618 * Return: a suitable unique value for a new device interface number or -errno.
9620 static int dev_index_reserve(struct net
*net
, u32 ifindex
)
9624 if (ifindex
> INT_MAX
) {
9625 DEBUG_NET_WARN_ON_ONCE(1);
9630 err
= xa_alloc_cyclic(&net
->dev_by_index
, &ifindex
, NULL
,
9631 xa_limit_31b
, &net
->ifindex
, GFP_KERNEL
);
9633 err
= xa_insert(&net
->dev_by_index
, ifindex
, NULL
, GFP_KERNEL
);
9640 static void dev_index_release(struct net
*net
, int ifindex
)
9642 /* Expect only unused indexes, unlist_netdevice() removes the used */
9643 WARN_ON(xa_erase(&net
->dev_by_index
, ifindex
));
9646 /* Delayed registration/unregisteration */
9647 LIST_HEAD(net_todo_list
);
9648 DECLARE_WAIT_QUEUE_HEAD(netdev_unregistering_wq
);
9650 static void net_set_todo(struct net_device
*dev
)
9652 list_add_tail(&dev
->todo_list
, &net_todo_list
);
9653 atomic_inc(&dev_net(dev
)->dev_unreg_count
);
9656 static netdev_features_t
netdev_sync_upper_features(struct net_device
*lower
,
9657 struct net_device
*upper
, netdev_features_t features
)
9659 netdev_features_t upper_disables
= NETIF_F_UPPER_DISABLES
;
9660 netdev_features_t feature
;
9663 for_each_netdev_feature(upper_disables
, feature_bit
) {
9664 feature
= __NETIF_F_BIT(feature_bit
);
9665 if (!(upper
->wanted_features
& feature
)
9666 && (features
& feature
)) {
9667 netdev_dbg(lower
, "Dropping feature %pNF, upper dev %s has it off.\n",
9668 &feature
, upper
->name
);
9669 features
&= ~feature
;
9676 static void netdev_sync_lower_features(struct net_device
*upper
,
9677 struct net_device
*lower
, netdev_features_t features
)
9679 netdev_features_t upper_disables
= NETIF_F_UPPER_DISABLES
;
9680 netdev_features_t feature
;
9683 for_each_netdev_feature(upper_disables
, feature_bit
) {
9684 feature
= __NETIF_F_BIT(feature_bit
);
9685 if (!(features
& feature
) && (lower
->features
& feature
)) {
9686 netdev_dbg(upper
, "Disabling feature %pNF on lower dev %s.\n",
9687 &feature
, lower
->name
);
9688 lower
->wanted_features
&= ~feature
;
9689 __netdev_update_features(lower
);
9691 if (unlikely(lower
->features
& feature
))
9692 netdev_WARN(upper
, "failed to disable %pNF on %s!\n",
9693 &feature
, lower
->name
);
9695 netdev_features_change(lower
);
9700 static netdev_features_t
netdev_fix_features(struct net_device
*dev
,
9701 netdev_features_t features
)
9703 /* Fix illegal checksum combinations */
9704 if ((features
& NETIF_F_HW_CSUM
) &&
9705 (features
& (NETIF_F_IP_CSUM
|NETIF_F_IPV6_CSUM
))) {
9706 netdev_warn(dev
, "mixed HW and IP checksum settings.\n");
9707 features
&= ~(NETIF_F_IP_CSUM
|NETIF_F_IPV6_CSUM
);
9710 /* TSO requires that SG is present as well. */
9711 if ((features
& NETIF_F_ALL_TSO
) && !(features
& NETIF_F_SG
)) {
9712 netdev_dbg(dev
, "Dropping TSO features since no SG feature.\n");
9713 features
&= ~NETIF_F_ALL_TSO
;
9716 if ((features
& NETIF_F_TSO
) && !(features
& NETIF_F_HW_CSUM
) &&
9717 !(features
& NETIF_F_IP_CSUM
)) {
9718 netdev_dbg(dev
, "Dropping TSO features since no CSUM feature.\n");
9719 features
&= ~NETIF_F_TSO
;
9720 features
&= ~NETIF_F_TSO_ECN
;
9723 if ((features
& NETIF_F_TSO6
) && !(features
& NETIF_F_HW_CSUM
) &&
9724 !(features
& NETIF_F_IPV6_CSUM
)) {
9725 netdev_dbg(dev
, "Dropping TSO6 features since no CSUM feature.\n");
9726 features
&= ~NETIF_F_TSO6
;
9729 /* TSO with IPv4 ID mangling requires IPv4 TSO be enabled */
9730 if ((features
& NETIF_F_TSO_MANGLEID
) && !(features
& NETIF_F_TSO
))
9731 features
&= ~NETIF_F_TSO_MANGLEID
;
9733 /* TSO ECN requires that TSO is present as well. */
9734 if ((features
& NETIF_F_ALL_TSO
) == NETIF_F_TSO_ECN
)
9735 features
&= ~NETIF_F_TSO_ECN
;
9737 /* Software GSO depends on SG. */
9738 if ((features
& NETIF_F_GSO
) && !(features
& NETIF_F_SG
)) {
9739 netdev_dbg(dev
, "Dropping NETIF_F_GSO since no SG feature.\n");
9740 features
&= ~NETIF_F_GSO
;
9743 /* GSO partial features require GSO partial be set */
9744 if ((features
& dev
->gso_partial_features
) &&
9745 !(features
& NETIF_F_GSO_PARTIAL
)) {
9747 "Dropping partially supported GSO features since no GSO partial.\n");
9748 features
&= ~dev
->gso_partial_features
;
9751 if (!(features
& NETIF_F_RXCSUM
)) {
9752 /* NETIF_F_GRO_HW implies doing RXCSUM since every packet
9753 * successfully merged by hardware must also have the
9754 * checksum verified by hardware. If the user does not
9755 * want to enable RXCSUM, logically, we should disable GRO_HW.
9757 if (features
& NETIF_F_GRO_HW
) {
9758 netdev_dbg(dev
, "Dropping NETIF_F_GRO_HW since no RXCSUM feature.\n");
9759 features
&= ~NETIF_F_GRO_HW
;
9763 /* LRO/HW-GRO features cannot be combined with RX-FCS */
9764 if (features
& NETIF_F_RXFCS
) {
9765 if (features
& NETIF_F_LRO
) {
9766 netdev_dbg(dev
, "Dropping LRO feature since RX-FCS is requested.\n");
9767 features
&= ~NETIF_F_LRO
;
9770 if (features
& NETIF_F_GRO_HW
) {
9771 netdev_dbg(dev
, "Dropping HW-GRO feature since RX-FCS is requested.\n");
9772 features
&= ~NETIF_F_GRO_HW
;
9776 if ((features
& NETIF_F_GRO_HW
) && (features
& NETIF_F_LRO
)) {
9777 netdev_dbg(dev
, "Dropping LRO feature since HW-GRO is requested.\n");
9778 features
&= ~NETIF_F_LRO
;
9781 if (features
& NETIF_F_HW_TLS_TX
) {
9782 bool ip_csum
= (features
& (NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
)) ==
9783 (NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
);
9784 bool hw_csum
= features
& NETIF_F_HW_CSUM
;
9786 if (!ip_csum
&& !hw_csum
) {
9787 netdev_dbg(dev
, "Dropping TLS TX HW offload feature since no CSUM feature.\n");
9788 features
&= ~NETIF_F_HW_TLS_TX
;
9792 if ((features
& NETIF_F_HW_TLS_RX
) && !(features
& NETIF_F_RXCSUM
)) {
9793 netdev_dbg(dev
, "Dropping TLS RX HW offload feature since no RXCSUM feature.\n");
9794 features
&= ~NETIF_F_HW_TLS_RX
;
9800 int __netdev_update_features(struct net_device
*dev
)
9802 struct net_device
*upper
, *lower
;
9803 netdev_features_t features
;
9804 struct list_head
*iter
;
9809 features
= netdev_get_wanted_features(dev
);
9811 if (dev
->netdev_ops
->ndo_fix_features
)
9812 features
= dev
->netdev_ops
->ndo_fix_features(dev
, features
);
9814 /* driver might be less strict about feature dependencies */
9815 features
= netdev_fix_features(dev
, features
);
9817 /* some features can't be enabled if they're off on an upper device */
9818 netdev_for_each_upper_dev_rcu(dev
, upper
, iter
)
9819 features
= netdev_sync_upper_features(dev
, upper
, features
);
9821 if (dev
->features
== features
)
9824 netdev_dbg(dev
, "Features changed: %pNF -> %pNF\n",
9825 &dev
->features
, &features
);
9827 if (dev
->netdev_ops
->ndo_set_features
)
9828 err
= dev
->netdev_ops
->ndo_set_features(dev
, features
);
9832 if (unlikely(err
< 0)) {
9834 "set_features() failed (%d); wanted %pNF, left %pNF\n",
9835 err
, &features
, &dev
->features
);
9836 /* return non-0 since some features might have changed and
9837 * it's better to fire a spurious notification than miss it
9843 /* some features must be disabled on lower devices when disabled
9844 * on an upper device (think: bonding master or bridge)
9846 netdev_for_each_lower_dev(dev
, lower
, iter
)
9847 netdev_sync_lower_features(dev
, lower
, features
);
9850 netdev_features_t diff
= features
^ dev
->features
;
9852 if (diff
& NETIF_F_RX_UDP_TUNNEL_PORT
) {
9853 /* udp_tunnel_{get,drop}_rx_info both need
9854 * NETIF_F_RX_UDP_TUNNEL_PORT enabled on the
9855 * device, or they won't do anything.
9856 * Thus we need to update dev->features
9857 * *before* calling udp_tunnel_get_rx_info,
9858 * but *after* calling udp_tunnel_drop_rx_info.
9860 if (features
& NETIF_F_RX_UDP_TUNNEL_PORT
) {
9861 dev
->features
= features
;
9862 udp_tunnel_get_rx_info(dev
);
9864 udp_tunnel_drop_rx_info(dev
);
9868 if (diff
& NETIF_F_HW_VLAN_CTAG_FILTER
) {
9869 if (features
& NETIF_F_HW_VLAN_CTAG_FILTER
) {
9870 dev
->features
= features
;
9871 err
|= vlan_get_rx_ctag_filter_info(dev
);
9873 vlan_drop_rx_ctag_filter_info(dev
);
9877 if (diff
& NETIF_F_HW_VLAN_STAG_FILTER
) {
9878 if (features
& NETIF_F_HW_VLAN_STAG_FILTER
) {
9879 dev
->features
= features
;
9880 err
|= vlan_get_rx_stag_filter_info(dev
);
9882 vlan_drop_rx_stag_filter_info(dev
);
9886 dev
->features
= features
;
9889 return err
< 0 ? 0 : 1;
9893 * netdev_update_features - recalculate device features
9894 * @dev: the device to check
9896 * Recalculate dev->features set and send notifications if it
9897 * has changed. Should be called after driver or hardware dependent
9898 * conditions might have changed that influence the features.
9900 void netdev_update_features(struct net_device
*dev
)
9902 if (__netdev_update_features(dev
))
9903 netdev_features_change(dev
);
9905 EXPORT_SYMBOL(netdev_update_features
);
9908 * netdev_change_features - recalculate device features
9909 * @dev: the device to check
9911 * Recalculate dev->features set and send notifications even
9912 * if they have not changed. Should be called instead of
9913 * netdev_update_features() if also dev->vlan_features might
9914 * have changed to allow the changes to be propagated to stacked
9917 void netdev_change_features(struct net_device
*dev
)
9919 __netdev_update_features(dev
);
9920 netdev_features_change(dev
);
9922 EXPORT_SYMBOL(netdev_change_features
);
9925 * netif_stacked_transfer_operstate - transfer operstate
9926 * @rootdev: the root or lower level device to transfer state from
9927 * @dev: the device to transfer operstate to
9929 * Transfer operational state from root to device. This is normally
9930 * called when a stacking relationship exists between the root
9931 * device and the device(a leaf device).
9933 void netif_stacked_transfer_operstate(const struct net_device
*rootdev
,
9934 struct net_device
*dev
)
9936 if (rootdev
->operstate
== IF_OPER_DORMANT
)
9937 netif_dormant_on(dev
);
9939 netif_dormant_off(dev
);
9941 if (rootdev
->operstate
== IF_OPER_TESTING
)
9942 netif_testing_on(dev
);
9944 netif_testing_off(dev
);
9946 if (netif_carrier_ok(rootdev
))
9947 netif_carrier_on(dev
);
9949 netif_carrier_off(dev
);
9951 EXPORT_SYMBOL(netif_stacked_transfer_operstate
);
9953 static int netif_alloc_rx_queues(struct net_device
*dev
)
9955 unsigned int i
, count
= dev
->num_rx_queues
;
9956 struct netdev_rx_queue
*rx
;
9957 size_t sz
= count
* sizeof(*rx
);
9962 rx
= kvzalloc(sz
, GFP_KERNEL_ACCOUNT
| __GFP_RETRY_MAYFAIL
);
9968 for (i
= 0; i
< count
; i
++) {
9971 /* XDP RX-queue setup */
9972 err
= xdp_rxq_info_reg(&rx
[i
].xdp_rxq
, dev
, i
, 0);
9979 /* Rollback successful reg's and free other resources */
9981 xdp_rxq_info_unreg(&rx
[i
].xdp_rxq
);
9987 static void netif_free_rx_queues(struct net_device
*dev
)
9989 unsigned int i
, count
= dev
->num_rx_queues
;
9991 /* netif_alloc_rx_queues alloc failed, resources have been unreg'ed */
9995 for (i
= 0; i
< count
; i
++)
9996 xdp_rxq_info_unreg(&dev
->_rx
[i
].xdp_rxq
);
10001 static void netdev_init_one_queue(struct net_device
*dev
,
10002 struct netdev_queue
*queue
, void *_unused
)
10004 /* Initialize queue lock */
10005 spin_lock_init(&queue
->_xmit_lock
);
10006 netdev_set_xmit_lockdep_class(&queue
->_xmit_lock
, dev
->type
);
10007 queue
->xmit_lock_owner
= -1;
10008 netdev_queue_numa_node_write(queue
, NUMA_NO_NODE
);
10011 dql_init(&queue
->dql
, HZ
);
10015 static void netif_free_tx_queues(struct net_device
*dev
)
10020 static int netif_alloc_netdev_queues(struct net_device
*dev
)
10022 unsigned int count
= dev
->num_tx_queues
;
10023 struct netdev_queue
*tx
;
10024 size_t sz
= count
* sizeof(*tx
);
10026 if (count
< 1 || count
> 0xffff)
10029 tx
= kvzalloc(sz
, GFP_KERNEL_ACCOUNT
| __GFP_RETRY_MAYFAIL
);
10035 netdev_for_each_tx_queue(dev
, netdev_init_one_queue
, NULL
);
10036 spin_lock_init(&dev
->tx_global_lock
);
10041 void netif_tx_stop_all_queues(struct net_device
*dev
)
10045 for (i
= 0; i
< dev
->num_tx_queues
; i
++) {
10046 struct netdev_queue
*txq
= netdev_get_tx_queue(dev
, i
);
10048 netif_tx_stop_queue(txq
);
10051 EXPORT_SYMBOL(netif_tx_stop_all_queues
);
10054 * register_netdevice() - register a network device
10055 * @dev: device to register
10057 * Take a prepared network device structure and make it externally accessible.
10058 * A %NETDEV_REGISTER message is sent to the netdev notifier chain.
10059 * Callers must hold the rtnl lock - you may want register_netdev()
10062 int register_netdevice(struct net_device
*dev
)
10065 struct net
*net
= dev_net(dev
);
10067 BUILD_BUG_ON(sizeof(netdev_features_t
) * BITS_PER_BYTE
<
10068 NETDEV_FEATURE_COUNT
);
10069 BUG_ON(dev_boot_phase
);
10074 /* When net_device's are persistent, this will be fatal. */
10075 BUG_ON(dev
->reg_state
!= NETREG_UNINITIALIZED
);
10078 ret
= ethtool_check_ops(dev
->ethtool_ops
);
10082 spin_lock_init(&dev
->addr_list_lock
);
10083 netdev_set_addr_lockdep_class(dev
);
10085 ret
= dev_get_valid_name(net
, dev
, dev
->name
);
10090 dev
->name_node
= netdev_name_node_head_alloc(dev
);
10091 if (!dev
->name_node
)
10094 /* Init, if this function is available */
10095 if (dev
->netdev_ops
->ndo_init
) {
10096 ret
= dev
->netdev_ops
->ndo_init(dev
);
10100 goto err_free_name
;
10104 if (((dev
->hw_features
| dev
->features
) &
10105 NETIF_F_HW_VLAN_CTAG_FILTER
) &&
10106 (!dev
->netdev_ops
->ndo_vlan_rx_add_vid
||
10107 !dev
->netdev_ops
->ndo_vlan_rx_kill_vid
)) {
10108 netdev_WARN(dev
, "Buggy VLAN acceleration in driver!\n");
10113 ret
= dev_index_reserve(net
, dev
->ifindex
);
10116 dev
->ifindex
= ret
;
10118 /* Transfer changeable features to wanted_features and enable
10119 * software offloads (GSO and GRO).
10121 dev
->hw_features
|= (NETIF_F_SOFT_FEATURES
| NETIF_F_SOFT_FEATURES_OFF
);
10122 dev
->features
|= NETIF_F_SOFT_FEATURES
;
10124 if (dev
->udp_tunnel_nic_info
) {
10125 dev
->features
|= NETIF_F_RX_UDP_TUNNEL_PORT
;
10126 dev
->hw_features
|= NETIF_F_RX_UDP_TUNNEL_PORT
;
10129 dev
->wanted_features
= dev
->features
& dev
->hw_features
;
10131 if (!(dev
->flags
& IFF_LOOPBACK
))
10132 dev
->hw_features
|= NETIF_F_NOCACHE_COPY
;
10134 /* If IPv4 TCP segmentation offload is supported we should also
10135 * allow the device to enable segmenting the frame with the option
10136 * of ignoring a static IP ID value. This doesn't enable the
10137 * feature itself but allows the user to enable it later.
10139 if (dev
->hw_features
& NETIF_F_TSO
)
10140 dev
->hw_features
|= NETIF_F_TSO_MANGLEID
;
10141 if (dev
->vlan_features
& NETIF_F_TSO
)
10142 dev
->vlan_features
|= NETIF_F_TSO_MANGLEID
;
10143 if (dev
->mpls_features
& NETIF_F_TSO
)
10144 dev
->mpls_features
|= NETIF_F_TSO_MANGLEID
;
10145 if (dev
->hw_enc_features
& NETIF_F_TSO
)
10146 dev
->hw_enc_features
|= NETIF_F_TSO_MANGLEID
;
10148 /* Make NETIF_F_HIGHDMA inheritable to VLAN devices.
10150 dev
->vlan_features
|= NETIF_F_HIGHDMA
;
10152 /* Make NETIF_F_SG inheritable to tunnel devices.
10154 dev
->hw_enc_features
|= NETIF_F_SG
| NETIF_F_GSO_PARTIAL
;
10156 /* Make NETIF_F_SG inheritable to MPLS.
10158 dev
->mpls_features
|= NETIF_F_SG
;
10160 ret
= call_netdevice_notifiers(NETDEV_POST_INIT
, dev
);
10161 ret
= notifier_to_errno(ret
);
10163 goto err_ifindex_release
;
10165 ret
= netdev_register_kobject(dev
);
10166 write_lock(&dev_base_lock
);
10167 dev
->reg_state
= ret
? NETREG_UNREGISTERED
: NETREG_REGISTERED
;
10168 write_unlock(&dev_base_lock
);
10170 goto err_uninit_notify
;
10172 __netdev_update_features(dev
);
10175 * Default initial state at registry is that the
10176 * device is present.
10179 set_bit(__LINK_STATE_PRESENT
, &dev
->state
);
10181 linkwatch_init_dev(dev
);
10183 dev_init_scheduler(dev
);
10185 netdev_hold(dev
, &dev
->dev_registered_tracker
, GFP_KERNEL
);
10186 list_netdevice(dev
);
10188 add_device_randomness(dev
->dev_addr
, dev
->addr_len
);
10190 /* If the device has permanent device address, driver should
10191 * set dev_addr and also addr_assign_type should be set to
10192 * NET_ADDR_PERM (default value).
10194 if (dev
->addr_assign_type
== NET_ADDR_PERM
)
10195 memcpy(dev
->perm_addr
, dev
->dev_addr
, dev
->addr_len
);
10197 /* Notify protocols, that a new device appeared. */
10198 ret
= call_netdevice_notifiers(NETDEV_REGISTER
, dev
);
10199 ret
= notifier_to_errno(ret
);
10201 /* Expect explicit free_netdev() on failure */
10202 dev
->needs_free_netdev
= false;
10203 unregister_netdevice_queue(dev
, NULL
);
10207 * Prevent userspace races by waiting until the network
10208 * device is fully setup before sending notifications.
10210 if (!dev
->rtnl_link_ops
||
10211 dev
->rtnl_link_state
== RTNL_LINK_INITIALIZED
)
10212 rtmsg_ifinfo(RTM_NEWLINK
, dev
, ~0U, GFP_KERNEL
, 0, NULL
);
10218 call_netdevice_notifiers(NETDEV_PRE_UNINIT
, dev
);
10219 err_ifindex_release
:
10220 dev_index_release(net
, dev
->ifindex
);
10222 if (dev
->netdev_ops
->ndo_uninit
)
10223 dev
->netdev_ops
->ndo_uninit(dev
);
10224 if (dev
->priv_destructor
)
10225 dev
->priv_destructor(dev
);
10227 netdev_name_node_free(dev
->name_node
);
10230 EXPORT_SYMBOL(register_netdevice
);
10233 * init_dummy_netdev - init a dummy network device for NAPI
10234 * @dev: device to init
10236 * This takes a network device structure and initialize the minimum
10237 * amount of fields so it can be used to schedule NAPI polls without
10238 * registering a full blown interface. This is to be used by drivers
10239 * that need to tie several hardware interfaces to a single NAPI
10240 * poll scheduler due to HW limitations.
10242 int init_dummy_netdev(struct net_device
*dev
)
10244 /* Clear everything. Note we don't initialize spinlocks
10245 * are they aren't supposed to be taken by any of the
10246 * NAPI code and this dummy netdev is supposed to be
10247 * only ever used for NAPI polls
10249 memset(dev
, 0, sizeof(struct net_device
));
10251 /* make sure we BUG if trying to hit standard
10252 * register/unregister code path
10254 dev
->reg_state
= NETREG_DUMMY
;
10256 /* NAPI wants this */
10257 INIT_LIST_HEAD(&dev
->napi_list
);
10259 /* a dummy interface is started by default */
10260 set_bit(__LINK_STATE_PRESENT
, &dev
->state
);
10261 set_bit(__LINK_STATE_START
, &dev
->state
);
10263 /* napi_busy_loop stats accounting wants this */
10264 dev_net_set(dev
, &init_net
);
10266 /* Note : We dont allocate pcpu_refcnt for dummy devices,
10267 * because users of this 'device' dont need to change
10273 EXPORT_SYMBOL_GPL(init_dummy_netdev
);
10277 * register_netdev - register a network device
10278 * @dev: device to register
10280 * Take a completed network device structure and add it to the kernel
10281 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
10282 * chain. 0 is returned on success. A negative errno code is returned
10283 * on a failure to set up the device, or if the name is a duplicate.
10285 * This is a wrapper around register_netdevice that takes the rtnl semaphore
10286 * and expands the device name if you passed a format string to
10289 int register_netdev(struct net_device
*dev
)
10293 if (rtnl_lock_killable())
10295 err
= register_netdevice(dev
);
10299 EXPORT_SYMBOL(register_netdev
);
10301 int netdev_refcnt_read(const struct net_device
*dev
)
10303 #ifdef CONFIG_PCPU_DEV_REFCNT
10306 for_each_possible_cpu(i
)
10307 refcnt
+= *per_cpu_ptr(dev
->pcpu_refcnt
, i
);
10310 return refcount_read(&dev
->dev_refcnt
);
10313 EXPORT_SYMBOL(netdev_refcnt_read
);
10315 int netdev_unregister_timeout_secs __read_mostly
= 10;
10317 #define WAIT_REFS_MIN_MSECS 1
10318 #define WAIT_REFS_MAX_MSECS 250
10320 * netdev_wait_allrefs_any - wait until all references are gone.
10321 * @list: list of net_devices to wait on
10323 * This is called when unregistering network devices.
10325 * Any protocol or device that holds a reference should register
10326 * for netdevice notification, and cleanup and put back the
10327 * reference if they receive an UNREGISTER event.
10328 * We can get stuck here if buggy protocols don't correctly
10331 static struct net_device
*netdev_wait_allrefs_any(struct list_head
*list
)
10333 unsigned long rebroadcast_time
, warning_time
;
10334 struct net_device
*dev
;
10337 rebroadcast_time
= warning_time
= jiffies
;
10339 list_for_each_entry(dev
, list
, todo_list
)
10340 if (netdev_refcnt_read(dev
) == 1)
10344 if (time_after(jiffies
, rebroadcast_time
+ 1 * HZ
)) {
10347 /* Rebroadcast unregister notification */
10348 list_for_each_entry(dev
, list
, todo_list
)
10349 call_netdevice_notifiers(NETDEV_UNREGISTER
, dev
);
10355 list_for_each_entry(dev
, list
, todo_list
)
10356 if (test_bit(__LINK_STATE_LINKWATCH_PENDING
,
10358 /* We must not have linkwatch events
10359 * pending on unregister. If this
10360 * happens, we simply run the queue
10361 * unscheduled, resulting in a noop
10364 linkwatch_run_queue();
10370 rebroadcast_time
= jiffies
;
10375 wait
= WAIT_REFS_MIN_MSECS
;
10378 wait
= min(wait
<< 1, WAIT_REFS_MAX_MSECS
);
10381 list_for_each_entry(dev
, list
, todo_list
)
10382 if (netdev_refcnt_read(dev
) == 1)
10385 if (time_after(jiffies
, warning_time
+
10386 READ_ONCE(netdev_unregister_timeout_secs
) * HZ
)) {
10387 list_for_each_entry(dev
, list
, todo_list
) {
10388 pr_emerg("unregister_netdevice: waiting for %s to become free. Usage count = %d\n",
10389 dev
->name
, netdev_refcnt_read(dev
));
10390 ref_tracker_dir_print(&dev
->refcnt_tracker
, 10);
10393 warning_time
= jiffies
;
10398 /* The sequence is:
10402 * register_netdevice(x1);
10403 * register_netdevice(x2);
10405 * unregister_netdevice(y1);
10406 * unregister_netdevice(y2);
10412 * We are invoked by rtnl_unlock().
10413 * This allows us to deal with problems:
10414 * 1) We can delete sysfs objects which invoke hotplug
10415 * without deadlocking with linkwatch via keventd.
10416 * 2) Since we run with the RTNL semaphore not held, we can sleep
10417 * safely in order to wait for the netdev refcnt to drop to zero.
10419 * We must not return until all unregister events added during
10420 * the interval the lock was held have been completed.
10422 void netdev_run_todo(void)
10424 struct net_device
*dev
, *tmp
;
10425 struct list_head list
;
10426 #ifdef CONFIG_LOCKDEP
10427 struct list_head unlink_list
;
10429 list_replace_init(&net_unlink_list
, &unlink_list
);
10431 while (!list_empty(&unlink_list
)) {
10432 struct net_device
*dev
= list_first_entry(&unlink_list
,
10435 list_del_init(&dev
->unlink_list
);
10436 dev
->nested_level
= dev
->lower_level
- 1;
10440 /* Snapshot list, allow later requests */
10441 list_replace_init(&net_todo_list
, &list
);
10445 /* Wait for rcu callbacks to finish before next phase */
10446 if (!list_empty(&list
))
10449 list_for_each_entry_safe(dev
, tmp
, &list
, todo_list
) {
10450 if (unlikely(dev
->reg_state
!= NETREG_UNREGISTERING
)) {
10451 netdev_WARN(dev
, "run_todo but not unregistering\n");
10452 list_del(&dev
->todo_list
);
10456 write_lock(&dev_base_lock
);
10457 dev
->reg_state
= NETREG_UNREGISTERED
;
10458 write_unlock(&dev_base_lock
);
10459 linkwatch_forget_dev(dev
);
10462 while (!list_empty(&list
)) {
10463 dev
= netdev_wait_allrefs_any(&list
);
10464 list_del(&dev
->todo_list
);
10467 BUG_ON(netdev_refcnt_read(dev
) != 1);
10468 BUG_ON(!list_empty(&dev
->ptype_all
));
10469 BUG_ON(!list_empty(&dev
->ptype_specific
));
10470 WARN_ON(rcu_access_pointer(dev
->ip_ptr
));
10471 WARN_ON(rcu_access_pointer(dev
->ip6_ptr
));
10473 if (dev
->priv_destructor
)
10474 dev
->priv_destructor(dev
);
10475 if (dev
->needs_free_netdev
)
10478 if (atomic_dec_and_test(&dev_net(dev
)->dev_unreg_count
))
10479 wake_up(&netdev_unregistering_wq
);
10481 /* Free network device */
10482 kobject_put(&dev
->dev
.kobj
);
10486 /* Convert net_device_stats to rtnl_link_stats64. rtnl_link_stats64 has
10487 * all the same fields in the same order as net_device_stats, with only
10488 * the type differing, but rtnl_link_stats64 may have additional fields
10489 * at the end for newer counters.
10491 void netdev_stats_to_stats64(struct rtnl_link_stats64
*stats64
,
10492 const struct net_device_stats
*netdev_stats
)
10494 size_t i
, n
= sizeof(*netdev_stats
) / sizeof(atomic_long_t
);
10495 const atomic_long_t
*src
= (atomic_long_t
*)netdev_stats
;
10496 u64
*dst
= (u64
*)stats64
;
10498 BUILD_BUG_ON(n
> sizeof(*stats64
) / sizeof(u64
));
10499 for (i
= 0; i
< n
; i
++)
10500 dst
[i
] = (unsigned long)atomic_long_read(&src
[i
]);
10501 /* zero out counters that only exist in rtnl_link_stats64 */
10502 memset((char *)stats64
+ n
* sizeof(u64
), 0,
10503 sizeof(*stats64
) - n
* sizeof(u64
));
10505 EXPORT_SYMBOL(netdev_stats_to_stats64
);
10507 struct net_device_core_stats __percpu
*netdev_core_stats_alloc(struct net_device
*dev
)
10509 struct net_device_core_stats __percpu
*p
;
10511 p
= alloc_percpu_gfp(struct net_device_core_stats
,
10512 GFP_ATOMIC
| __GFP_NOWARN
);
10514 if (p
&& cmpxchg(&dev
->core_stats
, NULL
, p
))
10517 /* This READ_ONCE() pairs with the cmpxchg() above */
10518 return READ_ONCE(dev
->core_stats
);
10520 EXPORT_SYMBOL(netdev_core_stats_alloc
);
10523 * dev_get_stats - get network device statistics
10524 * @dev: device to get statistics from
10525 * @storage: place to store stats
10527 * Get network statistics from device. Return @storage.
10528 * The device driver may provide its own method by setting
10529 * dev->netdev_ops->get_stats64 or dev->netdev_ops->get_stats;
10530 * otherwise the internal statistics structure is used.
10532 struct rtnl_link_stats64
*dev_get_stats(struct net_device
*dev
,
10533 struct rtnl_link_stats64
*storage
)
10535 const struct net_device_ops
*ops
= dev
->netdev_ops
;
10536 const struct net_device_core_stats __percpu
*p
;
10538 if (ops
->ndo_get_stats64
) {
10539 memset(storage
, 0, sizeof(*storage
));
10540 ops
->ndo_get_stats64(dev
, storage
);
10541 } else if (ops
->ndo_get_stats
) {
10542 netdev_stats_to_stats64(storage
, ops
->ndo_get_stats(dev
));
10544 netdev_stats_to_stats64(storage
, &dev
->stats
);
10547 /* This READ_ONCE() pairs with the write in netdev_core_stats_alloc() */
10548 p
= READ_ONCE(dev
->core_stats
);
10550 const struct net_device_core_stats
*core_stats
;
10553 for_each_possible_cpu(i
) {
10554 core_stats
= per_cpu_ptr(p
, i
);
10555 storage
->rx_dropped
+= READ_ONCE(core_stats
->rx_dropped
);
10556 storage
->tx_dropped
+= READ_ONCE(core_stats
->tx_dropped
);
10557 storage
->rx_nohandler
+= READ_ONCE(core_stats
->rx_nohandler
);
10558 storage
->rx_otherhost_dropped
+= READ_ONCE(core_stats
->rx_otherhost_dropped
);
10563 EXPORT_SYMBOL(dev_get_stats
);
10566 * dev_fetch_sw_netstats - get per-cpu network device statistics
10567 * @s: place to store stats
10568 * @netstats: per-cpu network stats to read from
10570 * Read per-cpu network statistics and populate the related fields in @s.
10572 void dev_fetch_sw_netstats(struct rtnl_link_stats64
*s
,
10573 const struct pcpu_sw_netstats __percpu
*netstats
)
10577 for_each_possible_cpu(cpu
) {
10578 u64 rx_packets
, rx_bytes
, tx_packets
, tx_bytes
;
10579 const struct pcpu_sw_netstats
*stats
;
10580 unsigned int start
;
10582 stats
= per_cpu_ptr(netstats
, cpu
);
10584 start
= u64_stats_fetch_begin(&stats
->syncp
);
10585 rx_packets
= u64_stats_read(&stats
->rx_packets
);
10586 rx_bytes
= u64_stats_read(&stats
->rx_bytes
);
10587 tx_packets
= u64_stats_read(&stats
->tx_packets
);
10588 tx_bytes
= u64_stats_read(&stats
->tx_bytes
);
10589 } while (u64_stats_fetch_retry(&stats
->syncp
, start
));
10591 s
->rx_packets
+= rx_packets
;
10592 s
->rx_bytes
+= rx_bytes
;
10593 s
->tx_packets
+= tx_packets
;
10594 s
->tx_bytes
+= tx_bytes
;
10597 EXPORT_SYMBOL_GPL(dev_fetch_sw_netstats
);
10600 * dev_get_tstats64 - ndo_get_stats64 implementation
10601 * @dev: device to get statistics from
10602 * @s: place to store stats
10604 * Populate @s from dev->stats and dev->tstats. Can be used as
10605 * ndo_get_stats64() callback.
10607 void dev_get_tstats64(struct net_device
*dev
, struct rtnl_link_stats64
*s
)
10609 netdev_stats_to_stats64(s
, &dev
->stats
);
10610 dev_fetch_sw_netstats(s
, dev
->tstats
);
10612 EXPORT_SYMBOL_GPL(dev_get_tstats64
);
10614 struct netdev_queue
*dev_ingress_queue_create(struct net_device
*dev
)
10616 struct netdev_queue
*queue
= dev_ingress_queue(dev
);
10618 #ifdef CONFIG_NET_CLS_ACT
10621 queue
= kzalloc(sizeof(*queue
), GFP_KERNEL
);
10624 netdev_init_one_queue(dev
, queue
, NULL
);
10625 RCU_INIT_POINTER(queue
->qdisc
, &noop_qdisc
);
10626 RCU_INIT_POINTER(queue
->qdisc_sleeping
, &noop_qdisc
);
10627 rcu_assign_pointer(dev
->ingress_queue
, queue
);
10632 static const struct ethtool_ops default_ethtool_ops
;
10634 void netdev_set_default_ethtool_ops(struct net_device
*dev
,
10635 const struct ethtool_ops
*ops
)
10637 if (dev
->ethtool_ops
== &default_ethtool_ops
)
10638 dev
->ethtool_ops
= ops
;
10640 EXPORT_SYMBOL_GPL(netdev_set_default_ethtool_ops
);
10643 * netdev_sw_irq_coalesce_default_on() - enable SW IRQ coalescing by default
10644 * @dev: netdev to enable the IRQ coalescing on
10646 * Sets a conservative default for SW IRQ coalescing. Users can use
10647 * sysfs attributes to override the default values.
10649 void netdev_sw_irq_coalesce_default_on(struct net_device
*dev
)
10651 WARN_ON(dev
->reg_state
== NETREG_REGISTERED
);
10653 if (!IS_ENABLED(CONFIG_PREEMPT_RT
)) {
10654 dev
->gro_flush_timeout
= 20000;
10655 dev
->napi_defer_hard_irqs
= 1;
10658 EXPORT_SYMBOL_GPL(netdev_sw_irq_coalesce_default_on
);
10660 void netdev_freemem(struct net_device
*dev
)
10662 char *addr
= (char *)dev
- dev
->padded
;
10668 * alloc_netdev_mqs - allocate network device
10669 * @sizeof_priv: size of private data to allocate space for
10670 * @name: device name format string
10671 * @name_assign_type: origin of device name
10672 * @setup: callback to initialize device
10673 * @txqs: the number of TX subqueues to allocate
10674 * @rxqs: the number of RX subqueues to allocate
10676 * Allocates a struct net_device with private data area for driver use
10677 * and performs basic initialization. Also allocates subqueue structs
10678 * for each queue on the device.
10680 struct net_device
*alloc_netdev_mqs(int sizeof_priv
, const char *name
,
10681 unsigned char name_assign_type
,
10682 void (*setup
)(struct net_device
*),
10683 unsigned int txqs
, unsigned int rxqs
)
10685 struct net_device
*dev
;
10686 unsigned int alloc_size
;
10687 struct net_device
*p
;
10689 BUG_ON(strlen(name
) >= sizeof(dev
->name
));
10692 pr_err("alloc_netdev: Unable to allocate device with zero queues\n");
10697 pr_err("alloc_netdev: Unable to allocate device with zero RX queues\n");
10701 alloc_size
= sizeof(struct net_device
);
10703 /* ensure 32-byte alignment of private area */
10704 alloc_size
= ALIGN(alloc_size
, NETDEV_ALIGN
);
10705 alloc_size
+= sizeof_priv
;
10707 /* ensure 32-byte alignment of whole construct */
10708 alloc_size
+= NETDEV_ALIGN
- 1;
10710 p
= kvzalloc(alloc_size
, GFP_KERNEL_ACCOUNT
| __GFP_RETRY_MAYFAIL
);
10714 dev
= PTR_ALIGN(p
, NETDEV_ALIGN
);
10715 dev
->padded
= (char *)dev
- (char *)p
;
10717 ref_tracker_dir_init(&dev
->refcnt_tracker
, 128, name
);
10718 #ifdef CONFIG_PCPU_DEV_REFCNT
10719 dev
->pcpu_refcnt
= alloc_percpu(int);
10720 if (!dev
->pcpu_refcnt
)
10724 refcount_set(&dev
->dev_refcnt
, 1);
10727 if (dev_addr_init(dev
))
10733 dev_net_set(dev
, &init_net
);
10735 dev
->gso_max_size
= GSO_LEGACY_MAX_SIZE
;
10736 dev
->xdp_zc_max_segs
= 1;
10737 dev
->gso_max_segs
= GSO_MAX_SEGS
;
10738 dev
->gro_max_size
= GRO_LEGACY_MAX_SIZE
;
10739 dev
->gso_ipv4_max_size
= GSO_LEGACY_MAX_SIZE
;
10740 dev
->gro_ipv4_max_size
= GRO_LEGACY_MAX_SIZE
;
10741 dev
->tso_max_size
= TSO_LEGACY_MAX_SIZE
;
10742 dev
->tso_max_segs
= TSO_MAX_SEGS
;
10743 dev
->upper_level
= 1;
10744 dev
->lower_level
= 1;
10745 #ifdef CONFIG_LOCKDEP
10746 dev
->nested_level
= 0;
10747 INIT_LIST_HEAD(&dev
->unlink_list
);
10750 INIT_LIST_HEAD(&dev
->napi_list
);
10751 INIT_LIST_HEAD(&dev
->unreg_list
);
10752 INIT_LIST_HEAD(&dev
->close_list
);
10753 INIT_LIST_HEAD(&dev
->link_watch_list
);
10754 INIT_LIST_HEAD(&dev
->adj_list
.upper
);
10755 INIT_LIST_HEAD(&dev
->adj_list
.lower
);
10756 INIT_LIST_HEAD(&dev
->ptype_all
);
10757 INIT_LIST_HEAD(&dev
->ptype_specific
);
10758 INIT_LIST_HEAD(&dev
->net_notifier_list
);
10759 #ifdef CONFIG_NET_SCHED
10760 hash_init(dev
->qdisc_hash
);
10762 dev
->priv_flags
= IFF_XMIT_DST_RELEASE
| IFF_XMIT_DST_RELEASE_PERM
;
10765 if (!dev
->tx_queue_len
) {
10766 dev
->priv_flags
|= IFF_NO_QUEUE
;
10767 dev
->tx_queue_len
= DEFAULT_TX_QUEUE_LEN
;
10770 dev
->num_tx_queues
= txqs
;
10771 dev
->real_num_tx_queues
= txqs
;
10772 if (netif_alloc_netdev_queues(dev
))
10775 dev
->num_rx_queues
= rxqs
;
10776 dev
->real_num_rx_queues
= rxqs
;
10777 if (netif_alloc_rx_queues(dev
))
10780 strcpy(dev
->name
, name
);
10781 dev
->name_assign_type
= name_assign_type
;
10782 dev
->group
= INIT_NETDEV_GROUP
;
10783 if (!dev
->ethtool_ops
)
10784 dev
->ethtool_ops
= &default_ethtool_ops
;
10786 nf_hook_netdev_init(dev
);
10795 #ifdef CONFIG_PCPU_DEV_REFCNT
10796 free_percpu(dev
->pcpu_refcnt
);
10799 netdev_freemem(dev
);
10802 EXPORT_SYMBOL(alloc_netdev_mqs
);
10805 * free_netdev - free network device
10808 * This function does the last stage of destroying an allocated device
10809 * interface. The reference to the device object is released. If this
10810 * is the last reference then it will be freed.Must be called in process
10813 void free_netdev(struct net_device
*dev
)
10815 struct napi_struct
*p
, *n
;
10819 /* When called immediately after register_netdevice() failed the unwind
10820 * handling may still be dismantling the device. Handle that case by
10821 * deferring the free.
10823 if (dev
->reg_state
== NETREG_UNREGISTERING
) {
10825 dev
->needs_free_netdev
= true;
10829 netif_free_tx_queues(dev
);
10830 netif_free_rx_queues(dev
);
10832 kfree(rcu_dereference_protected(dev
->ingress_queue
, 1));
10834 /* Flush device addresses */
10835 dev_addr_flush(dev
);
10837 list_for_each_entry_safe(p
, n
, &dev
->napi_list
, dev_list
)
10840 ref_tracker_dir_exit(&dev
->refcnt_tracker
);
10841 #ifdef CONFIG_PCPU_DEV_REFCNT
10842 free_percpu(dev
->pcpu_refcnt
);
10843 dev
->pcpu_refcnt
= NULL
;
10845 free_percpu(dev
->core_stats
);
10846 dev
->core_stats
= NULL
;
10847 free_percpu(dev
->xdp_bulkq
);
10848 dev
->xdp_bulkq
= NULL
;
10850 /* Compatibility with error handling in drivers */
10851 if (dev
->reg_state
== NETREG_UNINITIALIZED
) {
10852 netdev_freemem(dev
);
10856 BUG_ON(dev
->reg_state
!= NETREG_UNREGISTERED
);
10857 dev
->reg_state
= NETREG_RELEASED
;
10859 /* will free via device release */
10860 put_device(&dev
->dev
);
10862 EXPORT_SYMBOL(free_netdev
);
10865 * synchronize_net - Synchronize with packet receive processing
10867 * Wait for packets currently being received to be done.
10868 * Does not block later packets from starting.
10870 void synchronize_net(void)
10873 if (rtnl_is_locked())
10874 synchronize_rcu_expedited();
10878 EXPORT_SYMBOL(synchronize_net
);
10881 * unregister_netdevice_queue - remove device from the kernel
10885 * This function shuts down a device interface and removes it
10886 * from the kernel tables.
10887 * If head not NULL, device is queued to be unregistered later.
10889 * Callers must hold the rtnl semaphore. You may want
10890 * unregister_netdev() instead of this.
10893 void unregister_netdevice_queue(struct net_device
*dev
, struct list_head
*head
)
10898 list_move_tail(&dev
->unreg_list
, head
);
10902 list_add(&dev
->unreg_list
, &single
);
10903 unregister_netdevice_many(&single
);
10906 EXPORT_SYMBOL(unregister_netdevice_queue
);
10908 void unregister_netdevice_many_notify(struct list_head
*head
,
10909 u32 portid
, const struct nlmsghdr
*nlh
)
10911 struct net_device
*dev
, *tmp
;
10912 LIST_HEAD(close_head
);
10914 BUG_ON(dev_boot_phase
);
10917 if (list_empty(head
))
10920 list_for_each_entry_safe(dev
, tmp
, head
, unreg_list
) {
10921 /* Some devices call without registering
10922 * for initialization unwind. Remove those
10923 * devices and proceed with the remaining.
10925 if (dev
->reg_state
== NETREG_UNINITIALIZED
) {
10926 pr_debug("unregister_netdevice: device %s/%p never was registered\n",
10930 list_del(&dev
->unreg_list
);
10933 dev
->dismantle
= true;
10934 BUG_ON(dev
->reg_state
!= NETREG_REGISTERED
);
10937 /* If device is running, close it first. */
10938 list_for_each_entry(dev
, head
, unreg_list
)
10939 list_add_tail(&dev
->close_list
, &close_head
);
10940 dev_close_many(&close_head
, true);
10942 list_for_each_entry(dev
, head
, unreg_list
) {
10943 /* And unlink it from device chain. */
10944 write_lock(&dev_base_lock
);
10945 unlist_netdevice(dev
, false);
10946 dev
->reg_state
= NETREG_UNREGISTERING
;
10947 write_unlock(&dev_base_lock
);
10949 flush_all_backlogs();
10953 list_for_each_entry(dev
, head
, unreg_list
) {
10954 struct sk_buff
*skb
= NULL
;
10956 /* Shutdown queueing discipline. */
10958 dev_tcx_uninstall(dev
);
10959 dev_xdp_uninstall(dev
);
10960 bpf_dev_bound_netdev_unregister(dev
);
10962 netdev_offload_xstats_disable_all(dev
);
10964 /* Notify protocols, that we are about to destroy
10965 * this device. They should clean all the things.
10967 call_netdevice_notifiers(NETDEV_UNREGISTER
, dev
);
10969 if (!dev
->rtnl_link_ops
||
10970 dev
->rtnl_link_state
== RTNL_LINK_INITIALIZED
)
10971 skb
= rtmsg_ifinfo_build_skb(RTM_DELLINK
, dev
, ~0U, 0,
10972 GFP_KERNEL
, NULL
, 0,
10976 * Flush the unicast and multicast chains
10981 netdev_name_node_alt_flush(dev
);
10982 netdev_name_node_free(dev
->name_node
);
10984 call_netdevice_notifiers(NETDEV_PRE_UNINIT
, dev
);
10986 if (dev
->netdev_ops
->ndo_uninit
)
10987 dev
->netdev_ops
->ndo_uninit(dev
);
10990 rtmsg_ifinfo_send(skb
, dev
, GFP_KERNEL
, portid
, nlh
);
10992 /* Notifier chain MUST detach us all upper devices. */
10993 WARN_ON(netdev_has_any_upper_dev(dev
));
10994 WARN_ON(netdev_has_any_lower_dev(dev
));
10996 /* Remove entries from kobject tree */
10997 netdev_unregister_kobject(dev
);
10999 /* Remove XPS queueing entries */
11000 netif_reset_xps_queues_gt(dev
, 0);
11006 list_for_each_entry(dev
, head
, unreg_list
) {
11007 netdev_put(dev
, &dev
->dev_registered_tracker
);
11015 * unregister_netdevice_many - unregister many devices
11016 * @head: list of devices
11018 * Note: As most callers use a stack allocated list_head,
11019 * we force a list_del() to make sure stack wont be corrupted later.
11021 void unregister_netdevice_many(struct list_head
*head
)
11023 unregister_netdevice_many_notify(head
, 0, NULL
);
11025 EXPORT_SYMBOL(unregister_netdevice_many
);
11028 * unregister_netdev - remove device from the kernel
11031 * This function shuts down a device interface and removes it
11032 * from the kernel tables.
11034 * This is just a wrapper for unregister_netdevice that takes
11035 * the rtnl semaphore. In general you want to use this and not
11036 * unregister_netdevice.
11038 void unregister_netdev(struct net_device
*dev
)
11041 unregister_netdevice(dev
);
11044 EXPORT_SYMBOL(unregister_netdev
);
11047 * __dev_change_net_namespace - move device to different nethost namespace
11049 * @net: network namespace
11050 * @pat: If not NULL name pattern to try if the current device name
11051 * is already taken in the destination network namespace.
11052 * @new_ifindex: If not zero, specifies device index in the target
11055 * This function shuts down a device interface and moves it
11056 * to a new network namespace. On success 0 is returned, on
11057 * a failure a netagive errno code is returned.
11059 * Callers must hold the rtnl semaphore.
11062 int __dev_change_net_namespace(struct net_device
*dev
, struct net
*net
,
11063 const char *pat
, int new_ifindex
)
11065 struct netdev_name_node
*name_node
;
11066 struct net
*net_old
= dev_net(dev
);
11067 char new_name
[IFNAMSIZ
] = {};
11072 /* Don't allow namespace local devices to be moved. */
11074 if (dev
->features
& NETIF_F_NETNS_LOCAL
)
11077 /* Ensure the device has been registrered */
11078 if (dev
->reg_state
!= NETREG_REGISTERED
)
11081 /* Get out if there is nothing todo */
11083 if (net_eq(net_old
, net
))
11086 /* Pick the destination device name, and ensure
11087 * we can use it in the destination network namespace.
11090 if (netdev_name_in_use(net
, dev
->name
)) {
11091 /* We get here if we can't use the current device name */
11094 err
= dev_prep_valid_name(net
, dev
, pat
, new_name
);
11098 /* Check that none of the altnames conflicts. */
11100 netdev_for_each_altname(dev
, name_node
)
11101 if (netdev_name_in_use(net
, name_node
->name
))
11104 /* Check that new_ifindex isn't used yet. */
11106 err
= dev_index_reserve(net
, new_ifindex
);
11110 /* If there is an ifindex conflict assign a new one */
11111 err
= dev_index_reserve(net
, dev
->ifindex
);
11113 err
= dev_index_reserve(net
, 0);
11120 * And now a mini version of register_netdevice unregister_netdevice.
11123 /* If device is running close it first. */
11126 /* And unlink it from device chain */
11127 unlist_netdevice(dev
, true);
11131 /* Shutdown queueing discipline. */
11134 /* Notify protocols, that we are about to destroy
11135 * this device. They should clean all the things.
11137 * Note that dev->reg_state stays at NETREG_REGISTERED.
11138 * This is wanted because this way 8021q and macvlan know
11139 * the device is just moving and can keep their slaves up.
11141 call_netdevice_notifiers(NETDEV_UNREGISTER
, dev
);
11144 new_nsid
= peernet2id_alloc(dev_net(dev
), net
, GFP_KERNEL
);
11146 rtmsg_ifinfo_newnet(RTM_DELLINK
, dev
, ~0U, GFP_KERNEL
, &new_nsid
,
11150 * Flush the unicast and multicast chains
11155 /* Send a netdev-removed uevent to the old namespace */
11156 kobject_uevent(&dev
->dev
.kobj
, KOBJ_REMOVE
);
11157 netdev_adjacent_del_links(dev
);
11159 /* Move per-net netdevice notifiers that are following the netdevice */
11160 move_netdevice_notifiers_dev_net(dev
, net
);
11162 /* Actually switch the network namespace */
11163 dev_net_set(dev
, net
);
11164 dev
->ifindex
= new_ifindex
;
11166 /* Send a netdev-add uevent to the new namespace */
11167 kobject_uevent(&dev
->dev
.kobj
, KOBJ_ADD
);
11168 netdev_adjacent_add_links(dev
);
11170 if (new_name
[0]) /* Rename the netdev to prepared name */
11171 strscpy(dev
->name
, new_name
, IFNAMSIZ
);
11173 /* Fixup kobjects */
11174 err
= device_rename(&dev
->dev
, dev
->name
);
11177 /* Adapt owner in case owning user namespace of target network
11178 * namespace is different from the original one.
11180 err
= netdev_change_owner(dev
, net_old
, net
);
11183 /* Add the device back in the hashes */
11184 list_netdevice(dev
);
11186 /* Notify protocols, that a new device appeared. */
11187 call_netdevice_notifiers(NETDEV_REGISTER
, dev
);
11190 * Prevent userspace races by waiting until the network
11191 * device is fully setup before sending notifications.
11193 rtmsg_ifinfo(RTM_NEWLINK
, dev
, ~0U, GFP_KERNEL
, 0, NULL
);
11200 EXPORT_SYMBOL_GPL(__dev_change_net_namespace
);
11202 static int dev_cpu_dead(unsigned int oldcpu
)
11204 struct sk_buff
**list_skb
;
11205 struct sk_buff
*skb
;
11207 struct softnet_data
*sd
, *oldsd
, *remsd
= NULL
;
11209 local_irq_disable();
11210 cpu
= smp_processor_id();
11211 sd
= &per_cpu(softnet_data
, cpu
);
11212 oldsd
= &per_cpu(softnet_data
, oldcpu
);
11214 /* Find end of our completion_queue. */
11215 list_skb
= &sd
->completion_queue
;
11217 list_skb
= &(*list_skb
)->next
;
11218 /* Append completion queue from offline CPU. */
11219 *list_skb
= oldsd
->completion_queue
;
11220 oldsd
->completion_queue
= NULL
;
11222 /* Append output queue from offline CPU. */
11223 if (oldsd
->output_queue
) {
11224 *sd
->output_queue_tailp
= oldsd
->output_queue
;
11225 sd
->output_queue_tailp
= oldsd
->output_queue_tailp
;
11226 oldsd
->output_queue
= NULL
;
11227 oldsd
->output_queue_tailp
= &oldsd
->output_queue
;
11229 /* Append NAPI poll list from offline CPU, with one exception :
11230 * process_backlog() must be called by cpu owning percpu backlog.
11231 * We properly handle process_queue & input_pkt_queue later.
11233 while (!list_empty(&oldsd
->poll_list
)) {
11234 struct napi_struct
*napi
= list_first_entry(&oldsd
->poll_list
,
11235 struct napi_struct
,
11238 list_del_init(&napi
->poll_list
);
11239 if (napi
->poll
== process_backlog
)
11242 ____napi_schedule(sd
, napi
);
11245 raise_softirq_irqoff(NET_TX_SOFTIRQ
);
11246 local_irq_enable();
11249 remsd
= oldsd
->rps_ipi_list
;
11250 oldsd
->rps_ipi_list
= NULL
;
11252 /* send out pending IPI's on offline CPU */
11253 net_rps_send_ipi(remsd
);
11255 /* Process offline CPU's input_pkt_queue */
11256 while ((skb
= __skb_dequeue(&oldsd
->process_queue
))) {
11258 input_queue_head_incr(oldsd
);
11260 while ((skb
= skb_dequeue(&oldsd
->input_pkt_queue
))) {
11262 input_queue_head_incr(oldsd
);
11269 * netdev_increment_features - increment feature set by one
11270 * @all: current feature set
11271 * @one: new feature set
11272 * @mask: mask feature set
11274 * Computes a new feature set after adding a device with feature set
11275 * @one to the master device with current feature set @all. Will not
11276 * enable anything that is off in @mask. Returns the new feature set.
11278 netdev_features_t
netdev_increment_features(netdev_features_t all
,
11279 netdev_features_t one
, netdev_features_t mask
)
11281 if (mask
& NETIF_F_HW_CSUM
)
11282 mask
|= NETIF_F_CSUM_MASK
;
11283 mask
|= NETIF_F_VLAN_CHALLENGED
;
11285 all
|= one
& (NETIF_F_ONE_FOR_ALL
| NETIF_F_CSUM_MASK
) & mask
;
11286 all
&= one
| ~NETIF_F_ALL_FOR_ALL
;
11288 /* If one device supports hw checksumming, set for all. */
11289 if (all
& NETIF_F_HW_CSUM
)
11290 all
&= ~(NETIF_F_CSUM_MASK
& ~NETIF_F_HW_CSUM
);
11294 EXPORT_SYMBOL(netdev_increment_features
);
11296 static struct hlist_head
* __net_init
netdev_create_hash(void)
11299 struct hlist_head
*hash
;
11301 hash
= kmalloc_array(NETDEV_HASHENTRIES
, sizeof(*hash
), GFP_KERNEL
);
11303 for (i
= 0; i
< NETDEV_HASHENTRIES
; i
++)
11304 INIT_HLIST_HEAD(&hash
[i
]);
11309 /* Initialize per network namespace state */
11310 static int __net_init
netdev_init(struct net
*net
)
11312 BUILD_BUG_ON(GRO_HASH_BUCKETS
>
11313 8 * sizeof_field(struct napi_struct
, gro_bitmask
));
11315 INIT_LIST_HEAD(&net
->dev_base_head
);
11317 net
->dev_name_head
= netdev_create_hash();
11318 if (net
->dev_name_head
== NULL
)
11321 net
->dev_index_head
= netdev_create_hash();
11322 if (net
->dev_index_head
== NULL
)
11325 xa_init_flags(&net
->dev_by_index
, XA_FLAGS_ALLOC1
);
11327 RAW_INIT_NOTIFIER_HEAD(&net
->netdev_chain
);
11332 kfree(net
->dev_name_head
);
11338 * netdev_drivername - network driver for the device
11339 * @dev: network device
11341 * Determine network driver for device.
11343 const char *netdev_drivername(const struct net_device
*dev
)
11345 const struct device_driver
*driver
;
11346 const struct device
*parent
;
11347 const char *empty
= "";
11349 parent
= dev
->dev
.parent
;
11353 driver
= parent
->driver
;
11354 if (driver
&& driver
->name
)
11355 return driver
->name
;
11359 static void __netdev_printk(const char *level
, const struct net_device
*dev
,
11360 struct va_format
*vaf
)
11362 if (dev
&& dev
->dev
.parent
) {
11363 dev_printk_emit(level
[1] - '0',
11366 dev_driver_string(dev
->dev
.parent
),
11367 dev_name(dev
->dev
.parent
),
11368 netdev_name(dev
), netdev_reg_state(dev
),
11371 printk("%s%s%s: %pV",
11372 level
, netdev_name(dev
), netdev_reg_state(dev
), vaf
);
11374 printk("%s(NULL net_device): %pV", level
, vaf
);
11378 void netdev_printk(const char *level
, const struct net_device
*dev
,
11379 const char *format
, ...)
11381 struct va_format vaf
;
11384 va_start(args
, format
);
11389 __netdev_printk(level
, dev
, &vaf
);
11393 EXPORT_SYMBOL(netdev_printk
);
11395 #define define_netdev_printk_level(func, level) \
11396 void func(const struct net_device *dev, const char *fmt, ...) \
11398 struct va_format vaf; \
11401 va_start(args, fmt); \
11406 __netdev_printk(level, dev, &vaf); \
11410 EXPORT_SYMBOL(func);
11412 define_netdev_printk_level(netdev_emerg
, KERN_EMERG
);
11413 define_netdev_printk_level(netdev_alert
, KERN_ALERT
);
11414 define_netdev_printk_level(netdev_crit
, KERN_CRIT
);
11415 define_netdev_printk_level(netdev_err
, KERN_ERR
);
11416 define_netdev_printk_level(netdev_warn
, KERN_WARNING
);
11417 define_netdev_printk_level(netdev_notice
, KERN_NOTICE
);
11418 define_netdev_printk_level(netdev_info
, KERN_INFO
);
11420 static void __net_exit
netdev_exit(struct net
*net
)
11422 kfree(net
->dev_name_head
);
11423 kfree(net
->dev_index_head
);
11424 xa_destroy(&net
->dev_by_index
);
11425 if (net
!= &init_net
)
11426 WARN_ON_ONCE(!list_empty(&net
->dev_base_head
));
11429 static struct pernet_operations __net_initdata netdev_net_ops
= {
11430 .init
= netdev_init
,
11431 .exit
= netdev_exit
,
11434 static void __net_exit
default_device_exit_net(struct net
*net
)
11436 struct net_device
*dev
, *aux
;
11438 * Push all migratable network devices back to the
11439 * initial network namespace
11442 for_each_netdev_safe(net
, dev
, aux
) {
11444 char fb_name
[IFNAMSIZ
];
11446 /* Ignore unmoveable devices (i.e. loopback) */
11447 if (dev
->features
& NETIF_F_NETNS_LOCAL
)
11450 /* Leave virtual devices for the generic cleanup */
11451 if (dev
->rtnl_link_ops
&& !dev
->rtnl_link_ops
->netns_refund
)
11454 /* Push remaining network devices to init_net */
11455 snprintf(fb_name
, IFNAMSIZ
, "dev%d", dev
->ifindex
);
11456 if (netdev_name_in_use(&init_net
, fb_name
))
11457 snprintf(fb_name
, IFNAMSIZ
, "dev%%d");
11458 err
= dev_change_net_namespace(dev
, &init_net
, fb_name
);
11460 pr_emerg("%s: failed to move %s to init_net: %d\n",
11461 __func__
, dev
->name
, err
);
11467 static void __net_exit
default_device_exit_batch(struct list_head
*net_list
)
11469 /* At exit all network devices most be removed from a network
11470 * namespace. Do this in the reverse order of registration.
11471 * Do this across as many network namespaces as possible to
11472 * improve batching efficiency.
11474 struct net_device
*dev
;
11476 LIST_HEAD(dev_kill_list
);
11479 list_for_each_entry(net
, net_list
, exit_list
) {
11480 default_device_exit_net(net
);
11484 list_for_each_entry(net
, net_list
, exit_list
) {
11485 for_each_netdev_reverse(net
, dev
) {
11486 if (dev
->rtnl_link_ops
&& dev
->rtnl_link_ops
->dellink
)
11487 dev
->rtnl_link_ops
->dellink(dev
, &dev_kill_list
);
11489 unregister_netdevice_queue(dev
, &dev_kill_list
);
11492 unregister_netdevice_many(&dev_kill_list
);
11496 static struct pernet_operations __net_initdata default_device_ops
= {
11497 .exit_batch
= default_device_exit_batch
,
11501 * Initialize the DEV module. At boot time this walks the device list and
11502 * unhooks any devices that fail to initialise (normally hardware not
11503 * present) and leaves us with a valid list of present and active devices.
11508 * This is called single threaded during boot, so no need
11509 * to take the rtnl semaphore.
11511 static int __init
net_dev_init(void)
11513 int i
, rc
= -ENOMEM
;
11515 BUG_ON(!dev_boot_phase
);
11517 if (dev_proc_init())
11520 if (netdev_kobject_init())
11523 INIT_LIST_HEAD(&ptype_all
);
11524 for (i
= 0; i
< PTYPE_HASH_SIZE
; i
++)
11525 INIT_LIST_HEAD(&ptype_base
[i
]);
11527 if (register_pernet_subsys(&netdev_net_ops
))
11531 * Initialise the packet receive queues.
11534 for_each_possible_cpu(i
) {
11535 struct work_struct
*flush
= per_cpu_ptr(&flush_works
, i
);
11536 struct softnet_data
*sd
= &per_cpu(softnet_data
, i
);
11538 INIT_WORK(flush
, flush_backlog
);
11540 skb_queue_head_init(&sd
->input_pkt_queue
);
11541 skb_queue_head_init(&sd
->process_queue
);
11542 #ifdef CONFIG_XFRM_OFFLOAD
11543 skb_queue_head_init(&sd
->xfrm_backlog
);
11545 INIT_LIST_HEAD(&sd
->poll_list
);
11546 sd
->output_queue_tailp
= &sd
->output_queue
;
11548 INIT_CSD(&sd
->csd
, rps_trigger_softirq
, sd
);
11551 INIT_CSD(&sd
->defer_csd
, trigger_rx_softirq
, sd
);
11552 spin_lock_init(&sd
->defer_lock
);
11554 init_gro_hash(&sd
->backlog
);
11555 sd
->backlog
.poll
= process_backlog
;
11556 sd
->backlog
.weight
= weight_p
;
11559 dev_boot_phase
= 0;
11561 /* The loopback device is special if any other network devices
11562 * is present in a network namespace the loopback device must
11563 * be present. Since we now dynamically allocate and free the
11564 * loopback device ensure this invariant is maintained by
11565 * keeping the loopback device as the first device on the
11566 * list of network devices. Ensuring the loopback devices
11567 * is the first device that appears and the last network device
11570 if (register_pernet_device(&loopback_net_ops
))
11573 if (register_pernet_device(&default_device_ops
))
11576 open_softirq(NET_TX_SOFTIRQ
, net_tx_action
);
11577 open_softirq(NET_RX_SOFTIRQ
, net_rx_action
);
11579 rc
= cpuhp_setup_state_nocalls(CPUHP_NET_DEV_DEAD
, "net/dev:dead",
11580 NULL
, dev_cpu_dead
);
11587 subsys_initcall(net_dev_init
);