1 // SPDX-License-Identifier: GPL-2.0-only
3 * Common framework for low-level network console, dump, and debugger code
5 * Sep 8 2003 Matt Mackall <mpm@selenic.com>
7 * based on the netconsole code from:
9 * Copyright (C) 2001 Ingo Molnar <mingo@redhat.com>
10 * Copyright (C) 2002 Red Hat, Inc.
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 #include <linux/moduleparam.h>
16 #include <linux/kernel.h>
17 #include <linux/netdevice.h>
18 #include <linux/etherdevice.h>
19 #include <linux/string.h>
20 #include <linux/if_arp.h>
21 #include <linux/inetdevice.h>
22 #include <linux/inet.h>
23 #include <linux/interrupt.h>
24 #include <linux/netpoll.h>
25 #include <linux/sched.h>
26 #include <linux/delay.h>
27 #include <linux/rcupdate.h>
28 #include <linux/workqueue.h>
29 #include <linux/slab.h>
30 #include <linux/export.h>
31 #include <linux/if_vlan.h>
34 #include <net/addrconf.h>
35 #include <net/ndisc.h>
36 #include <net/ip6_checksum.h>
37 #include <asm/unaligned.h>
38 #include <trace/events/napi.h>
39 #include <linux/kconfig.h>
42 * We maintain a small pool of fully-sized skbs, to make sure the
43 * message gets out even in extreme OOM situations.
46 #define MAX_UDP_CHUNK 1460
49 static struct sk_buff_head skb_pool
;
51 DEFINE_STATIC_SRCU(netpoll_srcu
);
53 #define USEC_PER_POLL 50
55 #define MAX_SKB_SIZE \
56 (sizeof(struct ethhdr) + \
57 sizeof(struct iphdr) + \
58 sizeof(struct udphdr) + \
61 static void zap_completion_queue(void);
63 static unsigned int carrier_timeout
= 4;
64 module_param(carrier_timeout
, uint
, 0644);
66 #define np_info(np, fmt, ...) \
67 pr_info("%s: " fmt, np->name, ##__VA_ARGS__)
68 #define np_err(np, fmt, ...) \
69 pr_err("%s: " fmt, np->name, ##__VA_ARGS__)
70 #define np_notice(np, fmt, ...) \
71 pr_notice("%s: " fmt, np->name, ##__VA_ARGS__)
73 static netdev_tx_t
netpoll_start_xmit(struct sk_buff
*skb
,
74 struct net_device
*dev
,
75 struct netdev_queue
*txq
)
77 netdev_tx_t status
= NETDEV_TX_OK
;
78 netdev_features_t features
;
80 features
= netif_skb_features(skb
);
82 if (skb_vlan_tag_present(skb
) &&
83 !vlan_hw_offload_capable(features
, skb
->vlan_proto
)) {
84 skb
= __vlan_hwaccel_push_inside(skb
);
86 /* This is actually a packet drop, but we
87 * don't want the code that calls this
88 * function to try and operate on a NULL skb.
94 status
= netdev_start_xmit(skb
, dev
, txq
, false);
100 static void queue_process(struct work_struct
*work
)
102 struct netpoll_info
*npinfo
=
103 container_of(work
, struct netpoll_info
, tx_work
.work
);
107 while ((skb
= skb_dequeue(&npinfo
->txq
))) {
108 struct net_device
*dev
= skb
->dev
;
109 struct netdev_queue
*txq
;
110 unsigned int q_index
;
112 if (!netif_device_present(dev
) || !netif_running(dev
)) {
117 local_irq_save(flags
);
118 /* check if skb->queue_mapping is still valid */
119 q_index
= skb_get_queue_mapping(skb
);
120 if (unlikely(q_index
>= dev
->real_num_tx_queues
)) {
121 q_index
= q_index
% dev
->real_num_tx_queues
;
122 skb_set_queue_mapping(skb
, q_index
);
124 txq
= netdev_get_tx_queue(dev
, q_index
);
125 HARD_TX_LOCK(dev
, txq
, smp_processor_id());
126 if (netif_xmit_frozen_or_stopped(txq
) ||
127 !dev_xmit_complete(netpoll_start_xmit(skb
, dev
, txq
))) {
128 skb_queue_head(&npinfo
->txq
, skb
);
129 HARD_TX_UNLOCK(dev
, txq
);
130 local_irq_restore(flags
);
132 schedule_delayed_work(&npinfo
->tx_work
, HZ
/10);
135 HARD_TX_UNLOCK(dev
, txq
);
136 local_irq_restore(flags
);
140 static int netif_local_xmit_active(struct net_device
*dev
)
144 for (i
= 0; i
< dev
->num_tx_queues
; i
++) {
145 struct netdev_queue
*txq
= netdev_get_tx_queue(dev
, i
);
147 if (READ_ONCE(txq
->xmit_lock_owner
) == smp_processor_id())
154 static void poll_one_napi(struct napi_struct
*napi
)
158 /* If we set this bit but see that it has already been set,
159 * that indicates that napi has been disabled and we need
160 * to abort this operation
162 if (test_and_set_bit(NAPI_STATE_NPSVC
, &napi
->state
))
165 /* We explicilty pass the polling call a budget of 0 to
166 * indicate that we are clearing the Tx path only.
168 work
= napi
->poll(napi
, 0);
169 WARN_ONCE(work
, "%pS exceeded budget in poll\n", napi
->poll
);
170 trace_napi_poll(napi
, work
, 0);
172 clear_bit(NAPI_STATE_NPSVC
, &napi
->state
);
175 static void poll_napi(struct net_device
*dev
)
177 struct napi_struct
*napi
;
178 int cpu
= smp_processor_id();
180 list_for_each_entry_rcu(napi
, &dev
->napi_list
, dev_list
) {
181 if (cmpxchg(&napi
->poll_owner
, -1, cpu
) == -1) {
183 smp_store_release(&napi
->poll_owner
, -1);
188 void netpoll_poll_dev(struct net_device
*dev
)
190 struct netpoll_info
*ni
= rcu_dereference_bh(dev
->npinfo
);
191 const struct net_device_ops
*ops
;
193 /* Don't do any rx activity if the dev_lock mutex is held
194 * the dev_open/close paths use this to block netpoll activity
195 * while changing device state
197 if (!ni
|| down_trylock(&ni
->dev_lock
))
200 /* Some drivers will take the same locks in poll and xmit,
201 * we can't poll if local CPU is already in xmit.
203 if (!netif_running(dev
) || netif_local_xmit_active(dev
)) {
208 ops
= dev
->netdev_ops
;
209 if (ops
->ndo_poll_controller
)
210 ops
->ndo_poll_controller(dev
);
216 zap_completion_queue();
218 EXPORT_SYMBOL(netpoll_poll_dev
);
220 void netpoll_poll_disable(struct net_device
*dev
)
222 struct netpoll_info
*ni
;
225 idx
= srcu_read_lock(&netpoll_srcu
);
226 ni
= srcu_dereference(dev
->npinfo
, &netpoll_srcu
);
229 srcu_read_unlock(&netpoll_srcu
, idx
);
231 EXPORT_SYMBOL(netpoll_poll_disable
);
233 void netpoll_poll_enable(struct net_device
*dev
)
235 struct netpoll_info
*ni
;
237 ni
= rcu_dereference(dev
->npinfo
);
242 EXPORT_SYMBOL(netpoll_poll_enable
);
244 static void refill_skbs(void)
249 spin_lock_irqsave(&skb_pool
.lock
, flags
);
250 while (skb_pool
.qlen
< MAX_SKBS
) {
251 skb
= alloc_skb(MAX_SKB_SIZE
, GFP_ATOMIC
);
255 __skb_queue_tail(&skb_pool
, skb
);
257 spin_unlock_irqrestore(&skb_pool
.lock
, flags
);
260 static void zap_completion_queue(void)
263 struct softnet_data
*sd
= &get_cpu_var(softnet_data
);
265 if (sd
->completion_queue
) {
266 struct sk_buff
*clist
;
268 local_irq_save(flags
);
269 clist
= sd
->completion_queue
;
270 sd
->completion_queue
= NULL
;
271 local_irq_restore(flags
);
273 while (clist
!= NULL
) {
274 struct sk_buff
*skb
= clist
;
276 if (!skb_irq_freeable(skb
)) {
277 refcount_set(&skb
->users
, 1);
278 dev_kfree_skb_any(skb
); /* put this one back */
285 put_cpu_var(softnet_data
);
288 static struct sk_buff
*find_skb(struct netpoll
*np
, int len
, int reserve
)
293 zap_completion_queue();
297 skb
= alloc_skb(len
, GFP_ATOMIC
);
299 skb
= skb_dequeue(&skb_pool
);
303 netpoll_poll_dev(np
->dev
);
309 refcount_set(&skb
->users
, 1);
310 skb_reserve(skb
, reserve
);
314 static int netpoll_owner_active(struct net_device
*dev
)
316 struct napi_struct
*napi
;
318 list_for_each_entry_rcu(napi
, &dev
->napi_list
, dev_list
) {
319 if (napi
->poll_owner
== smp_processor_id())
325 /* call with IRQ disabled */
326 static netdev_tx_t
__netpoll_send_skb(struct netpoll
*np
, struct sk_buff
*skb
)
328 netdev_tx_t status
= NETDEV_TX_BUSY
;
329 struct net_device
*dev
;
331 /* It is up to the caller to keep npinfo alive. */
332 struct netpoll_info
*npinfo
;
334 lockdep_assert_irqs_disabled();
337 npinfo
= rcu_dereference_bh(dev
->npinfo
);
339 if (!npinfo
|| !netif_running(dev
) || !netif_device_present(dev
)) {
340 dev_kfree_skb_irq(skb
);
341 return NET_XMIT_DROP
;
344 /* don't get messages out of order, and no recursion */
345 if (skb_queue_len(&npinfo
->txq
) == 0 && !netpoll_owner_active(dev
)) {
346 struct netdev_queue
*txq
;
348 txq
= netdev_core_pick_tx(dev
, skb
, NULL
);
350 /* try until next clock tick */
351 for (tries
= jiffies_to_usecs(1)/USEC_PER_POLL
;
352 tries
> 0; --tries
) {
353 if (HARD_TX_TRYLOCK(dev
, txq
)) {
354 if (!netif_xmit_stopped(txq
))
355 status
= netpoll_start_xmit(skb
, dev
, txq
);
357 HARD_TX_UNLOCK(dev
, txq
);
359 if (dev_xmit_complete(status
))
364 /* tickle device maybe there is some cleanup */
365 netpoll_poll_dev(np
->dev
);
367 udelay(USEC_PER_POLL
);
370 WARN_ONCE(!irqs_disabled(),
371 "netpoll_send_skb_on_dev(): %s enabled interrupts in poll (%pS)\n",
372 dev
->name
, dev
->netdev_ops
->ndo_start_xmit
);
376 if (!dev_xmit_complete(status
)) {
377 skb_queue_tail(&npinfo
->txq
, skb
);
378 schedule_delayed_work(&npinfo
->tx_work
,0);
383 netdev_tx_t
netpoll_send_skb(struct netpoll
*np
, struct sk_buff
*skb
)
389 dev_kfree_skb_irq(skb
);
392 local_irq_save(flags
);
393 ret
= __netpoll_send_skb(np
, skb
);
394 local_irq_restore(flags
);
398 EXPORT_SYMBOL(netpoll_send_skb
);
400 void netpoll_send_udp(struct netpoll
*np
, const char *msg
, int len
)
402 int total_len
, ip_len
, udp_len
;
407 static atomic_t ip_ident
;
408 struct ipv6hdr
*ip6h
;
410 if (!IS_ENABLED(CONFIG_PREEMPT_RT
))
411 WARN_ON_ONCE(!irqs_disabled());
413 udp_len
= len
+ sizeof(*udph
);
415 ip_len
= udp_len
+ sizeof(*ip6h
);
417 ip_len
= udp_len
+ sizeof(*iph
);
419 total_len
= ip_len
+ LL_RESERVED_SPACE(np
->dev
);
421 skb
= find_skb(np
, total_len
+ np
->dev
->needed_tailroom
,
426 skb_copy_to_linear_data(skb
, msg
, len
);
429 skb_push(skb
, sizeof(*udph
));
430 skb_reset_transport_header(skb
);
432 udph
->source
= htons(np
->local_port
);
433 udph
->dest
= htons(np
->remote_port
);
434 udph
->len
= htons(udp_len
);
438 udph
->check
= csum_ipv6_magic(&np
->local_ip
.in6
,
440 udp_len
, IPPROTO_UDP
,
441 csum_partial(udph
, udp_len
, 0));
442 if (udph
->check
== 0)
443 udph
->check
= CSUM_MANGLED_0
;
445 skb_push(skb
, sizeof(*ip6h
));
446 skb_reset_network_header(skb
);
447 ip6h
= ipv6_hdr(skb
);
449 /* ip6h->version = 6; ip6h->priority = 0; */
450 *(unsigned char *)ip6h
= 0x60;
451 ip6h
->flow_lbl
[0] = 0;
452 ip6h
->flow_lbl
[1] = 0;
453 ip6h
->flow_lbl
[2] = 0;
455 ip6h
->payload_len
= htons(sizeof(struct udphdr
) + len
);
456 ip6h
->nexthdr
= IPPROTO_UDP
;
457 ip6h
->hop_limit
= 32;
458 ip6h
->saddr
= np
->local_ip
.in6
;
459 ip6h
->daddr
= np
->remote_ip
.in6
;
461 eth
= skb_push(skb
, ETH_HLEN
);
462 skb_reset_mac_header(skb
);
463 skb
->protocol
= eth
->h_proto
= htons(ETH_P_IPV6
);
466 udph
->check
= csum_tcpudp_magic(np
->local_ip
.ip
,
468 udp_len
, IPPROTO_UDP
,
469 csum_partial(udph
, udp_len
, 0));
470 if (udph
->check
== 0)
471 udph
->check
= CSUM_MANGLED_0
;
473 skb_push(skb
, sizeof(*iph
));
474 skb_reset_network_header(skb
);
477 /* iph->version = 4; iph->ihl = 5; */
478 *(unsigned char *)iph
= 0x45;
480 put_unaligned(htons(ip_len
), &(iph
->tot_len
));
481 iph
->id
= htons(atomic_inc_return(&ip_ident
));
484 iph
->protocol
= IPPROTO_UDP
;
486 put_unaligned(np
->local_ip
.ip
, &(iph
->saddr
));
487 put_unaligned(np
->remote_ip
.ip
, &(iph
->daddr
));
488 iph
->check
= ip_fast_csum((unsigned char *)iph
, iph
->ihl
);
490 eth
= skb_push(skb
, ETH_HLEN
);
491 skb_reset_mac_header(skb
);
492 skb
->protocol
= eth
->h_proto
= htons(ETH_P_IP
);
495 ether_addr_copy(eth
->h_source
, np
->dev
->dev_addr
);
496 ether_addr_copy(eth
->h_dest
, np
->remote_mac
);
500 netpoll_send_skb(np
, skb
);
502 EXPORT_SYMBOL(netpoll_send_udp
);
504 void netpoll_print_options(struct netpoll
*np
)
506 np_info(np
, "local port %d\n", np
->local_port
);
508 np_info(np
, "local IPv6 address %pI6c\n", &np
->local_ip
.in6
);
510 np_info(np
, "local IPv4 address %pI4\n", &np
->local_ip
.ip
);
511 np_info(np
, "interface '%s'\n", np
->dev_name
);
512 np_info(np
, "remote port %d\n", np
->remote_port
);
514 np_info(np
, "remote IPv6 address %pI6c\n", &np
->remote_ip
.in6
);
516 np_info(np
, "remote IPv4 address %pI4\n", &np
->remote_ip
.ip
);
517 np_info(np
, "remote ethernet address %pM\n", np
->remote_mac
);
519 EXPORT_SYMBOL(netpoll_print_options
);
521 static int netpoll_parse_ip_addr(const char *str
, union inet_addr
*addr
)
525 if (!strchr(str
, ':') &&
526 in4_pton(str
, -1, (void *)addr
, -1, &end
) > 0) {
530 if (in6_pton(str
, -1, addr
->in6
.s6_addr
, -1, &end
) > 0) {
531 #if IS_ENABLED(CONFIG_IPV6)
541 int netpoll_parse_options(struct netpoll
*np
, char *opt
)
543 char *cur
=opt
, *delim
;
545 bool ipversion_set
= false;
548 if ((delim
= strchr(cur
, '@')) == NULL
)
551 if (kstrtou16(cur
, 10, &np
->local_port
))
558 ipversion_set
= true;
559 if ((delim
= strchr(cur
, '/')) == NULL
)
562 ipv6
= netpoll_parse_ip_addr(cur
, &np
->local_ip
);
566 np
->ipv6
= (bool)ipv6
;
572 /* parse out dev name */
573 if ((delim
= strchr(cur
, ',')) == NULL
)
576 strscpy(np
->dev_name
, cur
, sizeof(np
->dev_name
));
583 if ((delim
= strchr(cur
, '@')) == NULL
)
586 if (*cur
== ' ' || *cur
== '\t')
587 np_info(np
, "warning: whitespace is not allowed\n");
588 if (kstrtou16(cur
, 10, &np
->remote_port
))
595 if ((delim
= strchr(cur
, '/')) == NULL
)
598 ipv6
= netpoll_parse_ip_addr(cur
, &np
->remote_ip
);
601 else if (ipversion_set
&& np
->ipv6
!= (bool)ipv6
)
604 np
->ipv6
= (bool)ipv6
;
609 if (!mac_pton(cur
, np
->remote_mac
))
613 netpoll_print_options(np
);
618 np_info(np
, "couldn't parse config at '%s'!\n", cur
);
621 EXPORT_SYMBOL(netpoll_parse_options
);
623 int __netpoll_setup(struct netpoll
*np
, struct net_device
*ndev
)
625 struct netpoll_info
*npinfo
;
626 const struct net_device_ops
*ops
;
630 strscpy(np
->dev_name
, ndev
->name
, IFNAMSIZ
);
632 if (ndev
->priv_flags
& IFF_DISABLE_NETPOLL
) {
633 np_err(np
, "%s doesn't support polling, aborting\n",
640 npinfo
= kmalloc(sizeof(*npinfo
), GFP_KERNEL
);
646 sema_init(&npinfo
->dev_lock
, 1);
647 skb_queue_head_init(&npinfo
->txq
);
648 INIT_DELAYED_WORK(&npinfo
->tx_work
, queue_process
);
650 refcount_set(&npinfo
->refcnt
, 1);
652 ops
= np
->dev
->netdev_ops
;
653 if (ops
->ndo_netpoll_setup
) {
654 err
= ops
->ndo_netpoll_setup(ndev
, npinfo
);
659 npinfo
= rtnl_dereference(ndev
->npinfo
);
660 refcount_inc(&npinfo
->refcnt
);
663 npinfo
->netpoll
= np
;
665 /* last thing to do is link it to the net device structure */
666 rcu_assign_pointer(ndev
->npinfo
, npinfo
);
675 EXPORT_SYMBOL_GPL(__netpoll_setup
);
677 int netpoll_setup(struct netpoll
*np
)
679 struct net_device
*ndev
= NULL
;
680 struct in_device
*in_dev
;
684 if (np
->dev_name
[0]) {
685 struct net
*net
= current
->nsproxy
->net_ns
;
686 ndev
= __dev_get_by_name(net
, np
->dev_name
);
689 np_err(np
, "%s doesn't exist, aborting\n", np
->dev_name
);
693 netdev_hold(ndev
, &np
->dev_tracker
, GFP_KERNEL
);
695 if (netdev_master_upper_dev_get(ndev
)) {
696 np_err(np
, "%s is a slave device, aborting\n", np
->dev_name
);
701 if (!netif_running(ndev
)) {
702 unsigned long atmost
;
704 np_info(np
, "device %s not up yet, forcing it\n", np
->dev_name
);
706 err
= dev_open(ndev
, NULL
);
709 np_err(np
, "failed to open %s\n", ndev
->name
);
714 atmost
= jiffies
+ carrier_timeout
* HZ
;
715 while (!netif_carrier_ok(ndev
)) {
716 if (time_after(jiffies
, atmost
)) {
717 np_notice(np
, "timeout waiting for carrier\n");
726 if (!np
->local_ip
.ip
) {
728 const struct in_ifaddr
*ifa
;
730 in_dev
= __in_dev_get_rtnl(ndev
);
734 ifa
= rtnl_dereference(in_dev
->ifa_list
);
737 np_err(np
, "no IP address for %s, aborting\n",
743 np
->local_ip
.ip
= ifa
->ifa_local
;
744 np_info(np
, "local IP %pI4\n", &np
->local_ip
.ip
);
746 #if IS_ENABLED(CONFIG_IPV6)
747 struct inet6_dev
*idev
;
750 idev
= __in6_dev_get(ndev
);
752 struct inet6_ifaddr
*ifp
;
754 read_lock_bh(&idev
->lock
);
755 list_for_each_entry(ifp
, &idev
->addr_list
, if_list
) {
756 if (!!(ipv6_addr_type(&ifp
->addr
) & IPV6_ADDR_LINKLOCAL
) !=
757 !!(ipv6_addr_type(&np
->remote_ip
.in6
) & IPV6_ADDR_LINKLOCAL
))
759 np
->local_ip
.in6
= ifp
->addr
;
763 read_unlock_bh(&idev
->lock
);
766 np_err(np
, "no IPv6 address for %s, aborting\n",
770 np_info(np
, "local IPv6 %pI6c\n", &np
->local_ip
.in6
);
772 np_err(np
, "IPv6 is not supported %s, aborting\n",
780 /* fill up the skb queue */
783 err
= __netpoll_setup(np
, ndev
);
790 netdev_put(ndev
, &np
->dev_tracker
);
795 EXPORT_SYMBOL(netpoll_setup
);
797 static int __init
netpoll_init(void)
799 skb_queue_head_init(&skb_pool
);
802 core_initcall(netpoll_init
);
804 static void rcu_cleanup_netpoll_info(struct rcu_head
*rcu_head
)
806 struct netpoll_info
*npinfo
=
807 container_of(rcu_head
, struct netpoll_info
, rcu
);
809 skb_queue_purge(&npinfo
->txq
);
811 /* we can't call cancel_delayed_work_sync here, as we are in softirq */
812 cancel_delayed_work(&npinfo
->tx_work
);
814 /* clean after last, unfinished work */
815 __skb_queue_purge(&npinfo
->txq
);
816 /* now cancel it again */
817 cancel_delayed_work(&npinfo
->tx_work
);
821 void __netpoll_cleanup(struct netpoll
*np
)
823 struct netpoll_info
*npinfo
;
825 npinfo
= rtnl_dereference(np
->dev
->npinfo
);
829 synchronize_srcu(&netpoll_srcu
);
831 if (refcount_dec_and_test(&npinfo
->refcnt
)) {
832 const struct net_device_ops
*ops
;
834 ops
= np
->dev
->netdev_ops
;
835 if (ops
->ndo_netpoll_cleanup
)
836 ops
->ndo_netpoll_cleanup(np
->dev
);
838 RCU_INIT_POINTER(np
->dev
->npinfo
, NULL
);
839 call_rcu(&npinfo
->rcu
, rcu_cleanup_netpoll_info
);
841 RCU_INIT_POINTER(np
->dev
->npinfo
, NULL
);
843 EXPORT_SYMBOL_GPL(__netpoll_cleanup
);
845 void __netpoll_free(struct netpoll
*np
)
849 /* Wait for transmitting packets to finish before freeing. */
851 __netpoll_cleanup(np
);
854 EXPORT_SYMBOL_GPL(__netpoll_free
);
856 void netpoll_cleanup(struct netpoll
*np
)
861 __netpoll_cleanup(np
);
862 netdev_put(np
->dev
, &np
->dev_tracker
);
867 EXPORT_SYMBOL(netpoll_cleanup
);