1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* A network driver using virtio.
4 * Copyright 2007 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation
7 #include <linux/netdevice.h>
8 #include <linux/etherdevice.h>
9 #include <linux/ethtool.h>
10 #include <linux/module.h>
11 #include <linux/virtio.h>
12 #include <linux/virtio_net.h>
13 #include <linux/bpf.h>
14 #include <linux/bpf_trace.h>
15 #include <linux/scatterlist.h>
16 #include <linux/if_vlan.h>
17 #include <linux/slab.h>
18 #include <linux/cpu.h>
19 #include <linux/average.h>
20 #include <linux/filter.h>
21 #include <linux/kernel.h>
22 #include <net/route.h>
24 #include <net/net_failover.h>
25 #include <net/netdev_rx_queue.h>
27 static int napi_weight
= NAPI_POLL_WEIGHT
;
28 module_param(napi_weight
, int, 0444);
30 static bool csum
= true, gso
= true, napi_tx
= true;
31 module_param(csum
, bool, 0444);
32 module_param(gso
, bool, 0444);
33 module_param(napi_tx
, bool, 0644);
35 /* FIXME: MTU in config. */
36 #define GOOD_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN)
37 #define GOOD_COPY_LEN 128
39 #define VIRTNET_RX_PAD (NET_IP_ALIGN + NET_SKB_PAD)
41 /* Amount of XDP headroom to prepend to packets for use by xdp_adjust_head */
42 #define VIRTIO_XDP_HEADROOM 256
44 /* Separating two types of XDP xmit */
45 #define VIRTIO_XDP_TX BIT(0)
46 #define VIRTIO_XDP_REDIR BIT(1)
48 #define VIRTIO_XDP_FLAG BIT(0)
50 /* RX packet size EWMA. The average packet size is used to determine the packet
51 * buffer size when refilling RX rings. As the entire RX ring may be refilled
52 * at once, the weight is chosen so that the EWMA will be insensitive to short-
53 * term, transient changes in packet size.
55 DECLARE_EWMA(pkt_len
, 0, 64)
57 #define VIRTNET_DRIVER_VERSION "1.0.0"
59 static const unsigned long guest_offloads
[] = {
60 VIRTIO_NET_F_GUEST_TSO4
,
61 VIRTIO_NET_F_GUEST_TSO6
,
62 VIRTIO_NET_F_GUEST_ECN
,
63 VIRTIO_NET_F_GUEST_UFO
,
64 VIRTIO_NET_F_GUEST_CSUM
,
65 VIRTIO_NET_F_GUEST_USO4
,
66 VIRTIO_NET_F_GUEST_USO6
,
67 VIRTIO_NET_F_GUEST_HDRLEN
70 #define GUEST_OFFLOAD_GRO_HW_MASK ((1ULL << VIRTIO_NET_F_GUEST_TSO4) | \
71 (1ULL << VIRTIO_NET_F_GUEST_TSO6) | \
72 (1ULL << VIRTIO_NET_F_GUEST_ECN) | \
73 (1ULL << VIRTIO_NET_F_GUEST_UFO) | \
74 (1ULL << VIRTIO_NET_F_GUEST_USO4) | \
75 (1ULL << VIRTIO_NET_F_GUEST_USO6))
77 struct virtnet_stat_desc
{
78 char desc
[ETH_GSTRING_LEN
];
82 struct virtnet_sq_stats
{
83 struct u64_stats_sync syncp
;
92 struct virtnet_rq_stats
{
93 struct u64_stats_sync syncp
;
104 #define VIRTNET_SQ_STAT(m) offsetof(struct virtnet_sq_stats, m)
105 #define VIRTNET_RQ_STAT(m) offsetof(struct virtnet_rq_stats, m)
107 static const struct virtnet_stat_desc virtnet_sq_stats_desc
[] = {
108 { "packets", VIRTNET_SQ_STAT(packets
) },
109 { "bytes", VIRTNET_SQ_STAT(bytes
) },
110 { "xdp_tx", VIRTNET_SQ_STAT(xdp_tx
) },
111 { "xdp_tx_drops", VIRTNET_SQ_STAT(xdp_tx_drops
) },
112 { "kicks", VIRTNET_SQ_STAT(kicks
) },
113 { "tx_timeouts", VIRTNET_SQ_STAT(tx_timeouts
) },
116 static const struct virtnet_stat_desc virtnet_rq_stats_desc
[] = {
117 { "packets", VIRTNET_RQ_STAT(packets
) },
118 { "bytes", VIRTNET_RQ_STAT(bytes
) },
119 { "drops", VIRTNET_RQ_STAT(drops
) },
120 { "xdp_packets", VIRTNET_RQ_STAT(xdp_packets
) },
121 { "xdp_tx", VIRTNET_RQ_STAT(xdp_tx
) },
122 { "xdp_redirects", VIRTNET_RQ_STAT(xdp_redirects
) },
123 { "xdp_drops", VIRTNET_RQ_STAT(xdp_drops
) },
124 { "kicks", VIRTNET_RQ_STAT(kicks
) },
127 #define VIRTNET_SQ_STATS_LEN ARRAY_SIZE(virtnet_sq_stats_desc)
128 #define VIRTNET_RQ_STATS_LEN ARRAY_SIZE(virtnet_rq_stats_desc)
130 struct virtnet_interrupt_coalesce
{
135 /* The dma information of pages allocated at a time. */
136 struct virtnet_rq_dma
{
143 /* Internal representation of a send virtqueue */
145 /* Virtqueue associated with this send _queue */
146 struct virtqueue
*vq
;
148 /* TX: fragments + linear part + virtio header */
149 struct scatterlist sg
[MAX_SKB_FRAGS
+ 2];
151 /* Name of the send queue: output.$index */
154 struct virtnet_sq_stats stats
;
156 struct virtnet_interrupt_coalesce intr_coal
;
158 struct napi_struct napi
;
160 /* Record whether sq is in reset state. */
164 /* Internal representation of a receive virtqueue */
165 struct receive_queue
{
166 /* Virtqueue associated with this receive_queue */
167 struct virtqueue
*vq
;
169 struct napi_struct napi
;
171 struct bpf_prog __rcu
*xdp_prog
;
173 struct virtnet_rq_stats stats
;
175 struct virtnet_interrupt_coalesce intr_coal
;
177 /* Chain pages by the private ptr. */
180 /* Average packet length for mergeable receive buffers. */
181 struct ewma_pkt_len mrg_avg_pkt_len
;
183 /* Page frag for packet buffer allocation. */
184 struct page_frag alloc_frag
;
186 /* RX: fragments + linear part + virtio header */
187 struct scatterlist sg
[MAX_SKB_FRAGS
+ 2];
189 /* Min single buffer size for mergeable buffers case. */
190 unsigned int min_buf_len
;
192 /* Name of this receive queue: input.$index */
195 struct xdp_rxq_info xdp_rxq
;
197 /* Record the last dma info to free after new pages is allocated. */
198 struct virtnet_rq_dma
*last_dma
;
204 /* This structure can contain rss message with maximum settings for indirection table and keysize
205 * Note, that default structure that describes RSS configuration virtio_net_rss_config
206 * contains same info but can't handle table values.
207 * In any case, structure would be passed to virtio hw through sg_buf split by parts
208 * because table sizes may be differ according to the device configuration.
210 #define VIRTIO_NET_RSS_MAX_KEY_SIZE 40
211 #define VIRTIO_NET_RSS_MAX_TABLE_LEN 128
212 struct virtio_net_ctrl_rss
{
214 u16 indirection_table_mask
;
215 u16 unclassified_queue
;
216 u16 indirection_table
[VIRTIO_NET_RSS_MAX_TABLE_LEN
];
219 u8 key
[VIRTIO_NET_RSS_MAX_KEY_SIZE
];
222 /* Control VQ buffers: protected by the rtnl lock */
224 struct virtio_net_ctrl_hdr hdr
;
225 virtio_net_ctrl_ack status
;
226 struct virtio_net_ctrl_mq mq
;
231 struct virtio_net_ctrl_rss rss
;
232 struct virtio_net_ctrl_coal_tx coal_tx
;
233 struct virtio_net_ctrl_coal_rx coal_rx
;
234 struct virtio_net_ctrl_coal_vq coal_vq
;
237 struct virtnet_info
{
238 struct virtio_device
*vdev
;
239 struct virtqueue
*cvq
;
240 struct net_device
*dev
;
241 struct send_queue
*sq
;
242 struct receive_queue
*rq
;
245 /* Max # of queue pairs supported by the device */
248 /* # of queue pairs currently used by the driver */
249 u16 curr_queue_pairs
;
251 /* # of XDP queue pairs currently used by the driver */
254 /* xdp_queue_pairs may be 0, when xdp is already loaded. So add this. */
257 /* I like... big packets and I cannot lie! */
260 /* number of sg entries allocated for big packets */
261 unsigned int big_packets_num_skbfrags
;
263 /* Host will merge rx buffers for big packets (shake it! shake it!) */
264 bool mergeable_rx_bufs
;
266 /* Host supports rss and/or hash report */
268 bool has_rss_hash_report
;
270 u16 rss_indir_table_size
;
271 u32 rss_hash_types_supported
;
272 u32 rss_hash_types_saved
;
274 /* Has control virtqueue */
277 /* Host can handle any s/g split between our header and packet data */
280 /* Packet virtio header size */
283 /* Work struct for delayed refilling if we run low on memory. */
284 struct delayed_work refill
;
286 /* Is delayed refill enabled? */
289 /* The lock to synchronize the access to refill_enabled */
290 spinlock_t refill_lock
;
292 /* Work struct for config space updates */
293 struct work_struct config_work
;
295 /* Does the affinity hint is set for virtqueues? */
296 bool affinity_hint_set
;
298 /* CPU hotplug instances for online & dead */
299 struct hlist_node node
;
300 struct hlist_node node_dead
;
302 struct control_buf
*ctrl
;
304 /* Ethtool settings */
308 /* Interrupt coalescing settings */
309 struct virtnet_interrupt_coalesce intr_coal_tx
;
310 struct virtnet_interrupt_coalesce intr_coal_rx
;
312 unsigned long guest_offloads
;
313 unsigned long guest_offloads_capable
;
315 /* failover when STANDBY feature enabled */
316 struct failover
*failover
;
319 struct padded_vnet_hdr
{
320 struct virtio_net_hdr_v1_hash hdr
;
322 * hdr is in a separate sg buffer, and data sg buffer shares same page
323 * with this header sg. This padding makes next sg 16 byte aligned
329 struct virtio_net_common_hdr
{
331 struct virtio_net_hdr hdr
;
332 struct virtio_net_hdr_mrg_rxbuf mrg_hdr
;
333 struct virtio_net_hdr_v1_hash hash_v1_hdr
;
337 static void virtnet_rq_free_unused_buf(struct virtqueue
*vq
, void *buf
);
338 static void virtnet_sq_free_unused_buf(struct virtqueue
*vq
, void *buf
);
340 static bool is_xdp_frame(void *ptr
)
342 return (unsigned long)ptr
& VIRTIO_XDP_FLAG
;
345 static void *xdp_to_ptr(struct xdp_frame
*ptr
)
347 return (void *)((unsigned long)ptr
| VIRTIO_XDP_FLAG
);
350 static struct xdp_frame
*ptr_to_xdp(void *ptr
)
352 return (struct xdp_frame
*)((unsigned long)ptr
& ~VIRTIO_XDP_FLAG
);
355 /* Converting between virtqueue no. and kernel tx/rx queue no.
356 * 0:rx0 1:tx0 2:rx1 3:tx1 ... 2N:rxN 2N+1:txN 2N+2:cvq
358 static int vq2txq(struct virtqueue
*vq
)
360 return (vq
->index
- 1) / 2;
363 static int txq2vq(int txq
)
368 static int vq2rxq(struct virtqueue
*vq
)
370 return vq
->index
/ 2;
373 static int rxq2vq(int rxq
)
378 static inline struct virtio_net_common_hdr
*
379 skb_vnet_common_hdr(struct sk_buff
*skb
)
381 return (struct virtio_net_common_hdr
*)skb
->cb
;
385 * private is used to chain pages for big packets, put the whole
386 * most recent used list in the beginning for reuse
388 static void give_pages(struct receive_queue
*rq
, struct page
*page
)
392 /* Find end of list, sew whole thing into vi->rq.pages. */
393 for (end
= page
; end
->private; end
= (struct page
*)end
->private);
394 end
->private = (unsigned long)rq
->pages
;
398 static struct page
*get_a_page(struct receive_queue
*rq
, gfp_t gfp_mask
)
400 struct page
*p
= rq
->pages
;
403 rq
->pages
= (struct page
*)p
->private;
404 /* clear private here, it is used to chain pages */
407 p
= alloc_page(gfp_mask
);
411 static void enable_delayed_refill(struct virtnet_info
*vi
)
413 spin_lock_bh(&vi
->refill_lock
);
414 vi
->refill_enabled
= true;
415 spin_unlock_bh(&vi
->refill_lock
);
418 static void disable_delayed_refill(struct virtnet_info
*vi
)
420 spin_lock_bh(&vi
->refill_lock
);
421 vi
->refill_enabled
= false;
422 spin_unlock_bh(&vi
->refill_lock
);
425 static void virtqueue_napi_schedule(struct napi_struct
*napi
,
426 struct virtqueue
*vq
)
428 if (napi_schedule_prep(napi
)) {
429 virtqueue_disable_cb(vq
);
430 __napi_schedule(napi
);
434 static void virtqueue_napi_complete(struct napi_struct
*napi
,
435 struct virtqueue
*vq
, int processed
)
439 opaque
= virtqueue_enable_cb_prepare(vq
);
440 if (napi_complete_done(napi
, processed
)) {
441 if (unlikely(virtqueue_poll(vq
, opaque
)))
442 virtqueue_napi_schedule(napi
, vq
);
444 virtqueue_disable_cb(vq
);
448 static void skb_xmit_done(struct virtqueue
*vq
)
450 struct virtnet_info
*vi
= vq
->vdev
->priv
;
451 struct napi_struct
*napi
= &vi
->sq
[vq2txq(vq
)].napi
;
453 /* Suppress further interrupts. */
454 virtqueue_disable_cb(vq
);
457 virtqueue_napi_schedule(napi
, vq
);
459 /* We were probably waiting for more output buffers. */
460 netif_wake_subqueue(vi
->dev
, vq2txq(vq
));
463 #define MRG_CTX_HEADER_SHIFT 22
464 static void *mergeable_len_to_ctx(unsigned int truesize
,
465 unsigned int headroom
)
467 return (void *)(unsigned long)((headroom
<< MRG_CTX_HEADER_SHIFT
) | truesize
);
470 static unsigned int mergeable_ctx_to_headroom(void *mrg_ctx
)
472 return (unsigned long)mrg_ctx
>> MRG_CTX_HEADER_SHIFT
;
475 static unsigned int mergeable_ctx_to_truesize(void *mrg_ctx
)
477 return (unsigned long)mrg_ctx
& ((1 << MRG_CTX_HEADER_SHIFT
) - 1);
480 static struct sk_buff
*virtnet_build_skb(void *buf
, unsigned int buflen
,
481 unsigned int headroom
,
486 skb
= build_skb(buf
, buflen
);
490 skb_reserve(skb
, headroom
);
496 /* Called from bottom half context */
497 static struct sk_buff
*page_to_skb(struct virtnet_info
*vi
,
498 struct receive_queue
*rq
,
499 struct page
*page
, unsigned int offset
,
500 unsigned int len
, unsigned int truesize
,
501 unsigned int headroom
)
504 struct virtio_net_common_hdr
*hdr
;
505 unsigned int copy
, hdr_len
, hdr_padded_len
;
506 struct page
*page_to_free
= NULL
;
507 int tailroom
, shinfo_size
;
508 char *p
, *hdr_p
, *buf
;
510 p
= page_address(page
) + offset
;
513 hdr_len
= vi
->hdr_len
;
514 if (vi
->mergeable_rx_bufs
)
515 hdr_padded_len
= hdr_len
;
517 hdr_padded_len
= sizeof(struct padded_vnet_hdr
);
521 offset
+= hdr_padded_len
;
523 tailroom
= truesize
- headroom
- hdr_padded_len
- len
;
525 shinfo_size
= SKB_DATA_ALIGN(sizeof(struct skb_shared_info
));
527 /* copy small packet so we can reuse these pages */
528 if (!NET_IP_ALIGN
&& len
> GOOD_COPY_LEN
&& tailroom
>= shinfo_size
) {
529 skb
= virtnet_build_skb(buf
, truesize
, p
- buf
, len
);
533 page
= (struct page
*)page
->private;
535 give_pages(rq
, page
);
539 /* copy small packet so we can reuse these pages for small data */
540 skb
= napi_alloc_skb(&rq
->napi
, GOOD_COPY_LEN
);
544 /* Copy all frame if it fits skb->head, otherwise
545 * we let virtio_net_hdr_to_skb() and GRO pull headers as needed.
547 if (len
<= skb_tailroom(skb
))
551 skb_put_data(skb
, p
, copy
);
556 if (vi
->mergeable_rx_bufs
) {
558 skb_add_rx_frag(skb
, 0, page
, offset
, len
, truesize
);
565 * Verify that we can indeed put this data into a skb.
566 * This is here to handle cases when the device erroneously
567 * tries to receive more than is possible. This is usually
568 * the case of a broken device.
570 if (unlikely(len
> MAX_SKB_FRAGS
* PAGE_SIZE
)) {
571 net_dbg_ratelimited("%s: too much data\n", skb
->dev
->name
);
575 BUG_ON(offset
>= PAGE_SIZE
);
577 unsigned int frag_size
= min((unsigned)PAGE_SIZE
- offset
, len
);
578 skb_add_rx_frag(skb
, skb_shinfo(skb
)->nr_frags
, page
, offset
,
579 frag_size
, truesize
);
581 page
= (struct page
*)page
->private;
586 give_pages(rq
, page
);
589 hdr
= skb_vnet_common_hdr(skb
);
590 memcpy(hdr
, hdr_p
, hdr_len
);
592 put_page(page_to_free
);
597 static void virtnet_rq_unmap(struct receive_queue
*rq
, void *buf
, u32 len
)
599 struct page
*page
= virt_to_head_page(buf
);
600 struct virtnet_rq_dma
*dma
;
604 head
= page_address(page
);
610 if (dma
->need_sync
&& len
) {
611 offset
= buf
- (head
+ sizeof(*dma
));
613 virtqueue_dma_sync_single_range_for_cpu(rq
->vq
, dma
->addr
,
621 virtqueue_dma_unmap_single_attrs(rq
->vq
, dma
->addr
, dma
->len
,
622 DMA_FROM_DEVICE
, DMA_ATTR_SKIP_CPU_SYNC
);
626 static void *virtnet_rq_get_buf(struct receive_queue
*rq
, u32
*len
, void **ctx
)
630 buf
= virtqueue_get_buf_ctx(rq
->vq
, len
, ctx
);
631 if (buf
&& rq
->do_dma
)
632 virtnet_rq_unmap(rq
, buf
, *len
);
637 static void *virtnet_rq_detach_unused_buf(struct receive_queue
*rq
)
641 buf
= virtqueue_detach_unused_buf(rq
->vq
);
642 if (buf
&& rq
->do_dma
)
643 virtnet_rq_unmap(rq
, buf
, 0);
648 static void virtnet_rq_init_one_sg(struct receive_queue
*rq
, void *buf
, u32 len
)
650 struct virtnet_rq_dma
*dma
;
656 sg_init_one(rq
->sg
, buf
, len
);
660 head
= page_address(rq
->alloc_frag
.page
);
666 addr
= dma
->addr
- sizeof(*dma
) + offset
;
668 sg_init_table(rq
->sg
, 1);
669 rq
->sg
[0].dma_address
= addr
;
670 rq
->sg
[0].length
= len
;
673 static void *virtnet_rq_alloc(struct receive_queue
*rq
, u32 size
, gfp_t gfp
)
675 struct page_frag
*alloc_frag
= &rq
->alloc_frag
;
676 struct virtnet_rq_dma
*dma
;
680 if (unlikely(!skb_page_frag_refill(size
, alloc_frag
, gfp
)))
683 head
= page_address(alloc_frag
->page
);
689 if (!alloc_frag
->offset
) {
691 /* Now, the new page is allocated, the last dma
692 * will not be used. So the dma can be unmapped
695 virtnet_rq_unmap(rq
, rq
->last_dma
, 0);
699 dma
->len
= alloc_frag
->size
- sizeof(*dma
);
701 addr
= virtqueue_dma_map_single_attrs(rq
->vq
, dma
+ 1,
702 dma
->len
, DMA_FROM_DEVICE
, 0);
703 if (virtqueue_dma_mapping_error(rq
->vq
, addr
))
707 dma
->need_sync
= virtqueue_dma_need_sync(rq
->vq
, addr
);
709 /* Add a reference to dma to prevent the entire dma from
710 * being released during error handling. This reference
711 * will be freed after the pages are no longer used.
713 get_page(alloc_frag
->page
);
715 alloc_frag
->offset
= sizeof(*dma
);
723 buf
= head
+ alloc_frag
->offset
;
725 get_page(alloc_frag
->page
);
726 alloc_frag
->offset
+= size
;
731 static void virtnet_rq_set_premapped(struct virtnet_info
*vi
)
735 /* disable for big mode */
736 if (!vi
->mergeable_rx_bufs
&& vi
->big_packets
)
739 for (i
= 0; i
< vi
->max_queue_pairs
; i
++) {
740 if (virtqueue_set_dma_premapped(vi
->rq
[i
].vq
))
743 vi
->rq
[i
].do_dma
= true;
747 static void free_old_xmit_skbs(struct send_queue
*sq
, bool in_napi
)
750 unsigned int packets
= 0;
751 unsigned int bytes
= 0;
754 while ((ptr
= virtqueue_get_buf(sq
->vq
, &len
)) != NULL
) {
755 if (likely(!is_xdp_frame(ptr
))) {
756 struct sk_buff
*skb
= ptr
;
758 pr_debug("Sent skb %p\n", skb
);
761 napi_consume_skb(skb
, in_napi
);
763 struct xdp_frame
*frame
= ptr_to_xdp(ptr
);
765 bytes
+= xdp_get_frame_len(frame
);
766 xdp_return_frame(frame
);
771 /* Avoid overhead when no packets have been processed
772 * happens when called speculatively from start_xmit.
777 u64_stats_update_begin(&sq
->stats
.syncp
);
778 sq
->stats
.bytes
+= bytes
;
779 sq
->stats
.packets
+= packets
;
780 u64_stats_update_end(&sq
->stats
.syncp
);
783 static bool is_xdp_raw_buffer_queue(struct virtnet_info
*vi
, int q
)
785 if (q
< (vi
->curr_queue_pairs
- vi
->xdp_queue_pairs
))
787 else if (q
< vi
->curr_queue_pairs
)
793 static void check_sq_full_and_disable(struct virtnet_info
*vi
,
794 struct net_device
*dev
,
795 struct send_queue
*sq
)
797 bool use_napi
= sq
->napi
.weight
;
802 /* If running out of space, stop queue to avoid getting packets that we
803 * are then unable to transmit.
804 * An alternative would be to force queuing layer to requeue the skb by
805 * returning NETDEV_TX_BUSY. However, NETDEV_TX_BUSY should not be
806 * returned in a normal path of operation: it means that driver is not
807 * maintaining the TX queue stop/start state properly, and causes
808 * the stack to do a non-trivial amount of useless work.
809 * Since most packets only take 1 or 2 ring slots, stopping the queue
810 * early means 16 slots are typically wasted.
812 if (sq
->vq
->num_free
< 2+MAX_SKB_FRAGS
) {
813 netif_stop_subqueue(dev
, qnum
);
815 if (unlikely(!virtqueue_enable_cb_delayed(sq
->vq
)))
816 virtqueue_napi_schedule(&sq
->napi
, sq
->vq
);
817 } else if (unlikely(!virtqueue_enable_cb_delayed(sq
->vq
))) {
818 /* More just got used, free them then recheck. */
819 free_old_xmit_skbs(sq
, false);
820 if (sq
->vq
->num_free
>= 2+MAX_SKB_FRAGS
) {
821 netif_start_subqueue(dev
, qnum
);
822 virtqueue_disable_cb(sq
->vq
);
828 static int __virtnet_xdp_xmit_one(struct virtnet_info
*vi
,
829 struct send_queue
*sq
,
830 struct xdp_frame
*xdpf
)
832 struct virtio_net_hdr_mrg_rxbuf
*hdr
;
833 struct skb_shared_info
*shinfo
;
837 if (unlikely(xdpf
->headroom
< vi
->hdr_len
))
840 if (unlikely(xdp_frame_has_frags(xdpf
))) {
841 shinfo
= xdp_get_shared_info_from_frame(xdpf
);
842 nr_frags
= shinfo
->nr_frags
;
845 /* In wrapping function virtnet_xdp_xmit(), we need to free
846 * up the pending old buffers, where we need to calculate the
847 * position of skb_shared_info in xdp_get_frame_len() and
848 * xdp_return_frame(), which will involve to xdpf->data and
849 * xdpf->headroom. Therefore, we need to update the value of
850 * headroom synchronously here.
852 xdpf
->headroom
-= vi
->hdr_len
;
853 xdpf
->data
-= vi
->hdr_len
;
854 /* Zero header and leave csum up to XDP layers */
856 memset(hdr
, 0, vi
->hdr_len
);
857 xdpf
->len
+= vi
->hdr_len
;
859 sg_init_table(sq
->sg
, nr_frags
+ 1);
860 sg_set_buf(sq
->sg
, xdpf
->data
, xdpf
->len
);
861 for (i
= 0; i
< nr_frags
; i
++) {
862 skb_frag_t
*frag
= &shinfo
->frags
[i
];
864 sg_set_page(&sq
->sg
[i
+ 1], skb_frag_page(frag
),
865 skb_frag_size(frag
), skb_frag_off(frag
));
868 err
= virtqueue_add_outbuf(sq
->vq
, sq
->sg
, nr_frags
+ 1,
869 xdp_to_ptr(xdpf
), GFP_ATOMIC
);
871 return -ENOSPC
; /* Caller handle free/refcnt */
876 /* when vi->curr_queue_pairs > nr_cpu_ids, the txq/sq is only used for xdp tx on
877 * the current cpu, so it does not need to be locked.
879 * Here we use marco instead of inline functions because we have to deal with
880 * three issues at the same time: 1. the choice of sq. 2. judge and execute the
881 * lock/unlock of txq 3. make sparse happy. It is difficult for two inline
882 * functions to perfectly solve these three problems at the same time.
884 #define virtnet_xdp_get_sq(vi) ({ \
885 int cpu = smp_processor_id(); \
886 struct netdev_queue *txq; \
887 typeof(vi) v = (vi); \
890 if (v->curr_queue_pairs > nr_cpu_ids) { \
891 qp = v->curr_queue_pairs - v->xdp_queue_pairs; \
893 txq = netdev_get_tx_queue(v->dev, qp); \
894 __netif_tx_acquire(txq); \
896 qp = cpu % v->curr_queue_pairs; \
897 txq = netdev_get_tx_queue(v->dev, qp); \
898 __netif_tx_lock(txq, cpu); \
903 #define virtnet_xdp_put_sq(vi, q) { \
904 struct netdev_queue *txq; \
905 typeof(vi) v = (vi); \
907 txq = netdev_get_tx_queue(v->dev, (q) - v->sq); \
908 if (v->curr_queue_pairs > nr_cpu_ids) \
909 __netif_tx_release(txq); \
911 __netif_tx_unlock(txq); \
914 static int virtnet_xdp_xmit(struct net_device
*dev
,
915 int n
, struct xdp_frame
**frames
, u32 flags
)
917 struct virtnet_info
*vi
= netdev_priv(dev
);
918 struct receive_queue
*rq
= vi
->rq
;
919 struct bpf_prog
*xdp_prog
;
920 struct send_queue
*sq
;
930 /* Only allow ndo_xdp_xmit if XDP is loaded on dev, as this
931 * indicate XDP resources have been successfully allocated.
933 xdp_prog
= rcu_access_pointer(rq
->xdp_prog
);
937 sq
= virtnet_xdp_get_sq(vi
);
939 if (unlikely(flags
& ~XDP_XMIT_FLAGS_MASK
)) {
944 /* Free up any pending old buffers before queueing new ones. */
945 while ((ptr
= virtqueue_get_buf(sq
->vq
, &len
)) != NULL
) {
946 if (likely(is_xdp_frame(ptr
))) {
947 struct xdp_frame
*frame
= ptr_to_xdp(ptr
);
949 bytes
+= xdp_get_frame_len(frame
);
950 xdp_return_frame(frame
);
952 struct sk_buff
*skb
= ptr
;
955 napi_consume_skb(skb
, false);
960 for (i
= 0; i
< n
; i
++) {
961 struct xdp_frame
*xdpf
= frames
[i
];
963 if (__virtnet_xdp_xmit_one(vi
, sq
, xdpf
))
969 if (!is_xdp_raw_buffer_queue(vi
, sq
- vi
->sq
))
970 check_sq_full_and_disable(vi
, dev
, sq
);
972 if (flags
& XDP_XMIT_FLUSH
) {
973 if (virtqueue_kick_prepare(sq
->vq
) && virtqueue_notify(sq
->vq
))
977 u64_stats_update_begin(&sq
->stats
.syncp
);
978 sq
->stats
.bytes
+= bytes
;
979 sq
->stats
.packets
+= packets
;
980 sq
->stats
.xdp_tx
+= n
;
981 sq
->stats
.xdp_tx_drops
+= n
- nxmit
;
982 sq
->stats
.kicks
+= kicks
;
983 u64_stats_update_end(&sq
->stats
.syncp
);
985 virtnet_xdp_put_sq(vi
, sq
);
989 static void put_xdp_frags(struct xdp_buff
*xdp
)
991 struct skb_shared_info
*shinfo
;
992 struct page
*xdp_page
;
995 if (xdp_buff_has_frags(xdp
)) {
996 shinfo
= xdp_get_shared_info_from_buff(xdp
);
997 for (i
= 0; i
< shinfo
->nr_frags
; i
++) {
998 xdp_page
= skb_frag_page(&shinfo
->frags
[i
]);
1004 static int virtnet_xdp_handler(struct bpf_prog
*xdp_prog
, struct xdp_buff
*xdp
,
1005 struct net_device
*dev
,
1006 unsigned int *xdp_xmit
,
1007 struct virtnet_rq_stats
*stats
)
1009 struct xdp_frame
*xdpf
;
1013 act
= bpf_prog_run_xdp(xdp_prog
, xdp
);
1014 stats
->xdp_packets
++;
1022 xdpf
= xdp_convert_buff_to_frame(xdp
);
1023 if (unlikely(!xdpf
)) {
1024 netdev_dbg(dev
, "convert buff to frame failed for xdp\n");
1028 err
= virtnet_xdp_xmit(dev
, 1, &xdpf
, 0);
1029 if (unlikely(!err
)) {
1030 xdp_return_frame_rx_napi(xdpf
);
1031 } else if (unlikely(err
< 0)) {
1032 trace_xdp_exception(dev
, xdp_prog
, act
);
1035 *xdp_xmit
|= VIRTIO_XDP_TX
;
1039 stats
->xdp_redirects
++;
1040 err
= xdp_do_redirect(dev
, xdp
, xdp_prog
);
1044 *xdp_xmit
|= VIRTIO_XDP_REDIR
;
1048 bpf_warn_invalid_xdp_action(dev
, xdp_prog
, act
);
1051 trace_xdp_exception(dev
, xdp_prog
, act
);
1058 static unsigned int virtnet_get_headroom(struct virtnet_info
*vi
)
1060 return vi
->xdp_enabled
? VIRTIO_XDP_HEADROOM
: 0;
1063 /* We copy the packet for XDP in the following cases:
1065 * 1) Packet is scattered across multiple rx buffers.
1066 * 2) Headroom space is insufficient.
1068 * This is inefficient but it's a temporary condition that
1069 * we hit right after XDP is enabled and until queue is refilled
1070 * with large buffers with sufficient headroom - so it should affect
1071 * at most queue size packets.
1072 * Afterwards, the conditions to enable
1073 * XDP should preclude the underlying device from sending packets
1074 * across multiple buffers (num_buf > 1), and we make sure buffers
1075 * have enough headroom.
1077 static struct page
*xdp_linearize_page(struct receive_queue
*rq
,
1084 int tailroom
= SKB_DATA_ALIGN(sizeof(struct skb_shared_info
));
1087 if (page_off
+ *len
+ tailroom
> PAGE_SIZE
)
1090 page
= alloc_page(GFP_ATOMIC
);
1094 memcpy(page_address(page
) + page_off
, page_address(p
) + offset
, *len
);
1097 while (--*num_buf
) {
1098 unsigned int buflen
;
1102 buf
= virtnet_rq_get_buf(rq
, &buflen
, NULL
);
1106 p
= virt_to_head_page(buf
);
1107 off
= buf
- page_address(p
);
1109 /* guard against a misconfigured or uncooperative backend that
1110 * is sending packet larger than the MTU.
1112 if ((page_off
+ buflen
+ tailroom
) > PAGE_SIZE
) {
1117 memcpy(page_address(page
) + page_off
,
1118 page_address(p
) + off
, buflen
);
1123 /* Headroom does not contribute to packet length */
1124 *len
= page_off
- VIRTIO_XDP_HEADROOM
;
1127 __free_pages(page
, 0);
1131 static struct sk_buff
*receive_small_build_skb(struct virtnet_info
*vi
,
1132 unsigned int xdp_headroom
,
1136 unsigned int header_offset
;
1137 unsigned int headroom
;
1138 unsigned int buflen
;
1139 struct sk_buff
*skb
;
1141 header_offset
= VIRTNET_RX_PAD
+ xdp_headroom
;
1142 headroom
= vi
->hdr_len
+ header_offset
;
1143 buflen
= SKB_DATA_ALIGN(GOOD_PACKET_LEN
+ headroom
) +
1144 SKB_DATA_ALIGN(sizeof(struct skb_shared_info
));
1146 skb
= virtnet_build_skb(buf
, buflen
, headroom
, len
);
1150 buf
+= header_offset
;
1151 memcpy(skb_vnet_common_hdr(skb
), buf
, vi
->hdr_len
);
1156 static struct sk_buff
*receive_small_xdp(struct net_device
*dev
,
1157 struct virtnet_info
*vi
,
1158 struct receive_queue
*rq
,
1159 struct bpf_prog
*xdp_prog
,
1161 unsigned int xdp_headroom
,
1163 unsigned int *xdp_xmit
,
1164 struct virtnet_rq_stats
*stats
)
1166 unsigned int header_offset
= VIRTNET_RX_PAD
+ xdp_headroom
;
1167 unsigned int headroom
= vi
->hdr_len
+ header_offset
;
1168 struct virtio_net_hdr_mrg_rxbuf
*hdr
= buf
+ header_offset
;
1169 struct page
*page
= virt_to_head_page(buf
);
1170 struct page
*xdp_page
;
1171 unsigned int buflen
;
1172 struct xdp_buff xdp
;
1173 struct sk_buff
*skb
;
1174 unsigned int metasize
= 0;
1177 if (unlikely(hdr
->hdr
.gso_type
))
1180 buflen
= SKB_DATA_ALIGN(GOOD_PACKET_LEN
+ headroom
) +
1181 SKB_DATA_ALIGN(sizeof(struct skb_shared_info
));
1183 if (unlikely(xdp_headroom
< virtnet_get_headroom(vi
))) {
1184 int offset
= buf
- page_address(page
) + header_offset
;
1185 unsigned int tlen
= len
+ vi
->hdr_len
;
1188 xdp_headroom
= virtnet_get_headroom(vi
);
1189 header_offset
= VIRTNET_RX_PAD
+ xdp_headroom
;
1190 headroom
= vi
->hdr_len
+ header_offset
;
1191 buflen
= SKB_DATA_ALIGN(GOOD_PACKET_LEN
+ headroom
) +
1192 SKB_DATA_ALIGN(sizeof(struct skb_shared_info
));
1193 xdp_page
= xdp_linearize_page(rq
, &num_buf
, page
,
1194 offset
, header_offset
,
1199 buf
= page_address(xdp_page
);
1204 xdp_init_buff(&xdp
, buflen
, &rq
->xdp_rxq
);
1205 xdp_prepare_buff(&xdp
, buf
+ VIRTNET_RX_PAD
+ vi
->hdr_len
,
1206 xdp_headroom
, len
, true);
1208 act
= virtnet_xdp_handler(xdp_prog
, &xdp
, dev
, xdp_xmit
, stats
);
1212 /* Recalculate length in case bpf program changed it */
1213 len
= xdp
.data_end
- xdp
.data
;
1214 metasize
= xdp
.data
- xdp
.data_meta
;
1225 skb
= virtnet_build_skb(buf
, buflen
, xdp
.data
- buf
, len
);
1230 skb_metadata_set(skb
, metasize
);
1243 static struct sk_buff
*receive_small(struct net_device
*dev
,
1244 struct virtnet_info
*vi
,
1245 struct receive_queue
*rq
,
1246 void *buf
, void *ctx
,
1248 unsigned int *xdp_xmit
,
1249 struct virtnet_rq_stats
*stats
)
1251 unsigned int xdp_headroom
= (unsigned long)ctx
;
1252 struct page
*page
= virt_to_head_page(buf
);
1253 struct sk_buff
*skb
;
1256 stats
->bytes
+= len
;
1258 if (unlikely(len
> GOOD_PACKET_LEN
)) {
1259 pr_debug("%s: rx error: len %u exceeds max size %d\n",
1260 dev
->name
, len
, GOOD_PACKET_LEN
);
1261 dev
->stats
.rx_length_errors
++;
1265 if (unlikely(vi
->xdp_enabled
)) {
1266 struct bpf_prog
*xdp_prog
;
1269 xdp_prog
= rcu_dereference(rq
->xdp_prog
);
1271 skb
= receive_small_xdp(dev
, vi
, rq
, xdp_prog
, buf
,
1272 xdp_headroom
, len
, xdp_xmit
,
1280 skb
= receive_small_build_skb(vi
, xdp_headroom
, buf
, len
);
1290 static struct sk_buff
*receive_big(struct net_device
*dev
,
1291 struct virtnet_info
*vi
,
1292 struct receive_queue
*rq
,
1295 struct virtnet_rq_stats
*stats
)
1297 struct page
*page
= buf
;
1298 struct sk_buff
*skb
=
1299 page_to_skb(vi
, rq
, page
, 0, len
, PAGE_SIZE
, 0);
1301 stats
->bytes
+= len
- vi
->hdr_len
;
1309 give_pages(rq
, page
);
1313 static void mergeable_buf_free(struct receive_queue
*rq
, int num_buf
,
1314 struct net_device
*dev
,
1315 struct virtnet_rq_stats
*stats
)
1321 while (num_buf
-- > 1) {
1322 buf
= virtnet_rq_get_buf(rq
, &len
, NULL
);
1323 if (unlikely(!buf
)) {
1324 pr_debug("%s: rx error: %d buffers missing\n",
1325 dev
->name
, num_buf
);
1326 dev
->stats
.rx_length_errors
++;
1329 stats
->bytes
+= len
;
1330 page
= virt_to_head_page(buf
);
1335 /* Why not use xdp_build_skb_from_frame() ?
1336 * XDP core assumes that xdp frags are PAGE_SIZE in length, while in
1337 * virtio-net there are 2 points that do not match its requirements:
1338 * 1. The size of the prefilled buffer is not fixed before xdp is set.
1339 * 2. xdp_build_skb_from_frame() does more checks that we don't need,
1340 * like eth_type_trans() (which virtio-net does in receive_buf()).
1342 static struct sk_buff
*build_skb_from_xdp_buff(struct net_device
*dev
,
1343 struct virtnet_info
*vi
,
1344 struct xdp_buff
*xdp
,
1345 unsigned int xdp_frags_truesz
)
1347 struct skb_shared_info
*sinfo
= xdp_get_shared_info_from_buff(xdp
);
1348 unsigned int headroom
, data_len
;
1349 struct sk_buff
*skb
;
1353 if (unlikely(xdp
->data_end
> xdp_data_hard_end(xdp
))) {
1354 pr_debug("Error building skb as missing reserved tailroom for xdp");
1358 if (unlikely(xdp_buff_has_frags(xdp
)))
1359 nr_frags
= sinfo
->nr_frags
;
1361 skb
= build_skb(xdp
->data_hard_start
, xdp
->frame_sz
);
1365 headroom
= xdp
->data
- xdp
->data_hard_start
;
1366 data_len
= xdp
->data_end
- xdp
->data
;
1367 skb_reserve(skb
, headroom
);
1368 __skb_put(skb
, data_len
);
1370 metasize
= xdp
->data
- xdp
->data_meta
;
1371 metasize
= metasize
> 0 ? metasize
: 0;
1373 skb_metadata_set(skb
, metasize
);
1375 if (unlikely(xdp_buff_has_frags(xdp
)))
1376 xdp_update_skb_shared_info(skb
, nr_frags
,
1377 sinfo
->xdp_frags_size
,
1379 xdp_buff_is_frag_pfmemalloc(xdp
));
1384 /* TODO: build xdp in big mode */
1385 static int virtnet_build_xdp_buff_mrg(struct net_device
*dev
,
1386 struct virtnet_info
*vi
,
1387 struct receive_queue
*rq
,
1388 struct xdp_buff
*xdp
,
1391 unsigned int frame_sz
,
1393 unsigned int *xdp_frags_truesize
,
1394 struct virtnet_rq_stats
*stats
)
1396 struct virtio_net_hdr_mrg_rxbuf
*hdr
= buf
;
1397 unsigned int headroom
, tailroom
, room
;
1398 unsigned int truesize
, cur_frag_size
;
1399 struct skb_shared_info
*shinfo
;
1400 unsigned int xdp_frags_truesz
= 0;
1406 xdp_init_buff(xdp
, frame_sz
, &rq
->xdp_rxq
);
1407 xdp_prepare_buff(xdp
, buf
- VIRTIO_XDP_HEADROOM
,
1408 VIRTIO_XDP_HEADROOM
+ vi
->hdr_len
, len
- vi
->hdr_len
, true);
1414 /* If we want to build multi-buffer xdp, we need
1415 * to specify that the flags of xdp_buff have the
1416 * XDP_FLAGS_HAS_FRAG bit.
1418 if (!xdp_buff_has_frags(xdp
))
1419 xdp_buff_set_frags_flag(xdp
);
1421 shinfo
= xdp_get_shared_info_from_buff(xdp
);
1422 shinfo
->nr_frags
= 0;
1423 shinfo
->xdp_frags_size
= 0;
1426 if (*num_buf
> MAX_SKB_FRAGS
+ 1)
1429 while (--*num_buf
> 0) {
1430 buf
= virtnet_rq_get_buf(rq
, &len
, &ctx
);
1431 if (unlikely(!buf
)) {
1432 pr_debug("%s: rx error: %d buffers out of %d missing\n",
1433 dev
->name
, *num_buf
,
1434 virtio16_to_cpu(vi
->vdev
, hdr
->num_buffers
));
1435 dev
->stats
.rx_length_errors
++;
1439 stats
->bytes
+= len
;
1440 page
= virt_to_head_page(buf
);
1441 offset
= buf
- page_address(page
);
1443 truesize
= mergeable_ctx_to_truesize(ctx
);
1444 headroom
= mergeable_ctx_to_headroom(ctx
);
1445 tailroom
= headroom
? sizeof(struct skb_shared_info
) : 0;
1446 room
= SKB_DATA_ALIGN(headroom
+ tailroom
);
1448 cur_frag_size
= truesize
;
1449 xdp_frags_truesz
+= cur_frag_size
;
1450 if (unlikely(len
> truesize
- room
|| cur_frag_size
> PAGE_SIZE
)) {
1452 pr_debug("%s: rx error: len %u exceeds truesize %lu\n",
1453 dev
->name
, len
, (unsigned long)(truesize
- room
));
1454 dev
->stats
.rx_length_errors
++;
1458 frag
= &shinfo
->frags
[shinfo
->nr_frags
++];
1459 skb_frag_fill_page_desc(frag
, page
, offset
, len
);
1460 if (page_is_pfmemalloc(page
))
1461 xdp_buff_set_frag_pfmemalloc(xdp
);
1463 shinfo
->xdp_frags_size
+= len
;
1466 *xdp_frags_truesize
= xdp_frags_truesz
;
1474 static void *mergeable_xdp_get_buf(struct virtnet_info
*vi
,
1475 struct receive_queue
*rq
,
1476 struct bpf_prog
*xdp_prog
,
1478 unsigned int *frame_sz
,
1483 struct virtio_net_hdr_mrg_rxbuf
*hdr
)
1485 unsigned int truesize
= mergeable_ctx_to_truesize(ctx
);
1486 unsigned int headroom
= mergeable_ctx_to_headroom(ctx
);
1487 struct page
*xdp_page
;
1488 unsigned int xdp_room
;
1490 /* Transient failure which in theory could occur if
1491 * in-flight packets from before XDP was enabled reach
1492 * the receive path after XDP is loaded.
1494 if (unlikely(hdr
->hdr
.gso_type
))
1497 /* Now XDP core assumes frag size is PAGE_SIZE, but buffers
1498 * with headroom may add hole in truesize, which
1499 * make their length exceed PAGE_SIZE. So we disabled the
1500 * hole mechanism for xdp. See add_recvbuf_mergeable().
1502 *frame_sz
= truesize
;
1504 if (likely(headroom
>= virtnet_get_headroom(vi
) &&
1505 (*num_buf
== 1 || xdp_prog
->aux
->xdp_has_frags
))) {
1506 return page_address(*page
) + offset
;
1509 /* This happens when headroom is not enough because
1510 * of the buffer was prefilled before XDP is set.
1511 * This should only happen for the first several packets.
1512 * In fact, vq reset can be used here to help us clean up
1513 * the prefilled buffers, but many existing devices do not
1514 * support it, and we don't want to bother users who are
1515 * using xdp normally.
1517 if (!xdp_prog
->aux
->xdp_has_frags
) {
1518 /* linearize data for XDP */
1519 xdp_page
= xdp_linearize_page(rq
, num_buf
,
1521 VIRTIO_XDP_HEADROOM
,
1526 xdp_room
= SKB_DATA_ALIGN(VIRTIO_XDP_HEADROOM
+
1527 sizeof(struct skb_shared_info
));
1528 if (*len
+ xdp_room
> PAGE_SIZE
)
1531 xdp_page
= alloc_page(GFP_ATOMIC
);
1535 memcpy(page_address(xdp_page
) + VIRTIO_XDP_HEADROOM
,
1536 page_address(*page
) + offset
, *len
);
1539 *frame_sz
= PAGE_SIZE
;
1545 return page_address(*page
) + VIRTIO_XDP_HEADROOM
;
1548 static struct sk_buff
*receive_mergeable_xdp(struct net_device
*dev
,
1549 struct virtnet_info
*vi
,
1550 struct receive_queue
*rq
,
1551 struct bpf_prog
*xdp_prog
,
1555 unsigned int *xdp_xmit
,
1556 struct virtnet_rq_stats
*stats
)
1558 struct virtio_net_hdr_mrg_rxbuf
*hdr
= buf
;
1559 int num_buf
= virtio16_to_cpu(vi
->vdev
, hdr
->num_buffers
);
1560 struct page
*page
= virt_to_head_page(buf
);
1561 int offset
= buf
- page_address(page
);
1562 unsigned int xdp_frags_truesz
= 0;
1563 struct sk_buff
*head_skb
;
1564 unsigned int frame_sz
;
1565 struct xdp_buff xdp
;
1570 data
= mergeable_xdp_get_buf(vi
, rq
, xdp_prog
, ctx
, &frame_sz
, &num_buf
, &page
,
1572 if (unlikely(!data
))
1575 err
= virtnet_build_xdp_buff_mrg(dev
, vi
, rq
, &xdp
, data
, len
, frame_sz
,
1576 &num_buf
, &xdp_frags_truesz
, stats
);
1580 act
= virtnet_xdp_handler(xdp_prog
, &xdp
, dev
, xdp_xmit
, stats
);
1584 head_skb
= build_skb_from_xdp_buff(dev
, vi
, &xdp
, xdp_frags_truesz
);
1585 if (unlikely(!head_skb
))
1597 put_xdp_frags(&xdp
);
1601 mergeable_buf_free(rq
, num_buf
, dev
, stats
);
1608 static struct sk_buff
*receive_mergeable(struct net_device
*dev
,
1609 struct virtnet_info
*vi
,
1610 struct receive_queue
*rq
,
1614 unsigned int *xdp_xmit
,
1615 struct virtnet_rq_stats
*stats
)
1617 struct virtio_net_hdr_mrg_rxbuf
*hdr
= buf
;
1618 int num_buf
= virtio16_to_cpu(vi
->vdev
, hdr
->num_buffers
);
1619 struct page
*page
= virt_to_head_page(buf
);
1620 int offset
= buf
- page_address(page
);
1621 struct sk_buff
*head_skb
, *curr_skb
;
1622 unsigned int truesize
= mergeable_ctx_to_truesize(ctx
);
1623 unsigned int headroom
= mergeable_ctx_to_headroom(ctx
);
1624 unsigned int tailroom
= headroom
? sizeof(struct skb_shared_info
) : 0;
1625 unsigned int room
= SKB_DATA_ALIGN(headroom
+ tailroom
);
1628 stats
->bytes
+= len
- vi
->hdr_len
;
1630 if (unlikely(len
> truesize
- room
)) {
1631 pr_debug("%s: rx error: len %u exceeds truesize %lu\n",
1632 dev
->name
, len
, (unsigned long)(truesize
- room
));
1633 dev
->stats
.rx_length_errors
++;
1637 if (unlikely(vi
->xdp_enabled
)) {
1638 struct bpf_prog
*xdp_prog
;
1641 xdp_prog
= rcu_dereference(rq
->xdp_prog
);
1643 head_skb
= receive_mergeable_xdp(dev
, vi
, rq
, xdp_prog
, buf
, ctx
,
1644 len
, xdp_xmit
, stats
);
1651 head_skb
= page_to_skb(vi
, rq
, page
, offset
, len
, truesize
, headroom
);
1652 curr_skb
= head_skb
;
1654 if (unlikely(!curr_skb
))
1659 buf
= virtnet_rq_get_buf(rq
, &len
, &ctx
);
1660 if (unlikely(!buf
)) {
1661 pr_debug("%s: rx error: %d buffers out of %d missing\n",
1663 virtio16_to_cpu(vi
->vdev
,
1665 dev
->stats
.rx_length_errors
++;
1669 stats
->bytes
+= len
;
1670 page
= virt_to_head_page(buf
);
1672 truesize
= mergeable_ctx_to_truesize(ctx
);
1673 headroom
= mergeable_ctx_to_headroom(ctx
);
1674 tailroom
= headroom
? sizeof(struct skb_shared_info
) : 0;
1675 room
= SKB_DATA_ALIGN(headroom
+ tailroom
);
1676 if (unlikely(len
> truesize
- room
)) {
1677 pr_debug("%s: rx error: len %u exceeds truesize %lu\n",
1678 dev
->name
, len
, (unsigned long)(truesize
- room
));
1679 dev
->stats
.rx_length_errors
++;
1683 num_skb_frags
= skb_shinfo(curr_skb
)->nr_frags
;
1684 if (unlikely(num_skb_frags
== MAX_SKB_FRAGS
)) {
1685 struct sk_buff
*nskb
= alloc_skb(0, GFP_ATOMIC
);
1687 if (unlikely(!nskb
))
1689 if (curr_skb
== head_skb
)
1690 skb_shinfo(curr_skb
)->frag_list
= nskb
;
1692 curr_skb
->next
= nskb
;
1694 head_skb
->truesize
+= nskb
->truesize
;
1697 if (curr_skb
!= head_skb
) {
1698 head_skb
->data_len
+= len
;
1699 head_skb
->len
+= len
;
1700 head_skb
->truesize
+= truesize
;
1702 offset
= buf
- page_address(page
);
1703 if (skb_can_coalesce(curr_skb
, num_skb_frags
, page
, offset
)) {
1705 skb_coalesce_rx_frag(curr_skb
, num_skb_frags
- 1,
1708 skb_add_rx_frag(curr_skb
, num_skb_frags
, page
,
1709 offset
, len
, truesize
);
1713 ewma_pkt_len_add(&rq
->mrg_avg_pkt_len
, head_skb
->len
);
1718 mergeable_buf_free(rq
, num_buf
, dev
, stats
);
1722 dev_kfree_skb(head_skb
);
1726 static void virtio_skb_set_hash(const struct virtio_net_hdr_v1_hash
*hdr_hash
,
1727 struct sk_buff
*skb
)
1729 enum pkt_hash_types rss_hash_type
;
1731 if (!hdr_hash
|| !skb
)
1734 switch (__le16_to_cpu(hdr_hash
->hash_report
)) {
1735 case VIRTIO_NET_HASH_REPORT_TCPv4
:
1736 case VIRTIO_NET_HASH_REPORT_UDPv4
:
1737 case VIRTIO_NET_HASH_REPORT_TCPv6
:
1738 case VIRTIO_NET_HASH_REPORT_UDPv6
:
1739 case VIRTIO_NET_HASH_REPORT_TCPv6_EX
:
1740 case VIRTIO_NET_HASH_REPORT_UDPv6_EX
:
1741 rss_hash_type
= PKT_HASH_TYPE_L4
;
1743 case VIRTIO_NET_HASH_REPORT_IPv4
:
1744 case VIRTIO_NET_HASH_REPORT_IPv6
:
1745 case VIRTIO_NET_HASH_REPORT_IPv6_EX
:
1746 rss_hash_type
= PKT_HASH_TYPE_L3
;
1748 case VIRTIO_NET_HASH_REPORT_NONE
:
1750 rss_hash_type
= PKT_HASH_TYPE_NONE
;
1752 skb_set_hash(skb
, __le32_to_cpu(hdr_hash
->hash_value
), rss_hash_type
);
1755 static void receive_buf(struct virtnet_info
*vi
, struct receive_queue
*rq
,
1756 void *buf
, unsigned int len
, void **ctx
,
1757 unsigned int *xdp_xmit
,
1758 struct virtnet_rq_stats
*stats
)
1760 struct net_device
*dev
= vi
->dev
;
1761 struct sk_buff
*skb
;
1762 struct virtio_net_common_hdr
*hdr
;
1764 if (unlikely(len
< vi
->hdr_len
+ ETH_HLEN
)) {
1765 pr_debug("%s: short packet %i\n", dev
->name
, len
);
1766 dev
->stats
.rx_length_errors
++;
1767 virtnet_rq_free_unused_buf(rq
->vq
, buf
);
1771 if (vi
->mergeable_rx_bufs
)
1772 skb
= receive_mergeable(dev
, vi
, rq
, buf
, ctx
, len
, xdp_xmit
,
1774 else if (vi
->big_packets
)
1775 skb
= receive_big(dev
, vi
, rq
, buf
, len
, stats
);
1777 skb
= receive_small(dev
, vi
, rq
, buf
, ctx
, len
, xdp_xmit
, stats
);
1782 hdr
= skb_vnet_common_hdr(skb
);
1783 if (dev
->features
& NETIF_F_RXHASH
&& vi
->has_rss_hash_report
)
1784 virtio_skb_set_hash(&hdr
->hash_v1_hdr
, skb
);
1786 if (hdr
->hdr
.flags
& VIRTIO_NET_HDR_F_DATA_VALID
)
1787 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1789 if (virtio_net_hdr_to_skb(skb
, &hdr
->hdr
,
1790 virtio_is_little_endian(vi
->vdev
))) {
1791 net_warn_ratelimited("%s: bad gso: type: %u, size: %u\n",
1792 dev
->name
, hdr
->hdr
.gso_type
,
1797 skb_record_rx_queue(skb
, vq2rxq(rq
->vq
));
1798 skb
->protocol
= eth_type_trans(skb
, dev
);
1799 pr_debug("Receiving skb proto 0x%04x len %i type %i\n",
1800 ntohs(skb
->protocol
), skb
->len
, skb
->pkt_type
);
1802 napi_gro_receive(&rq
->napi
, skb
);
1806 dev
->stats
.rx_frame_errors
++;
1810 /* Unlike mergeable buffers, all buffers are allocated to the
1811 * same size, except for the headroom. For this reason we do
1812 * not need to use mergeable_len_to_ctx here - it is enough
1813 * to store the headroom as the context ignoring the truesize.
1815 static int add_recvbuf_small(struct virtnet_info
*vi
, struct receive_queue
*rq
,
1819 unsigned int xdp_headroom
= virtnet_get_headroom(vi
);
1820 void *ctx
= (void *)(unsigned long)xdp_headroom
;
1821 int len
= vi
->hdr_len
+ VIRTNET_RX_PAD
+ GOOD_PACKET_LEN
+ xdp_headroom
;
1824 len
= SKB_DATA_ALIGN(len
) +
1825 SKB_DATA_ALIGN(sizeof(struct skb_shared_info
));
1827 buf
= virtnet_rq_alloc(rq
, len
, gfp
);
1831 virtnet_rq_init_one_sg(rq
, buf
+ VIRTNET_RX_PAD
+ xdp_headroom
,
1832 vi
->hdr_len
+ GOOD_PACKET_LEN
);
1834 err
= virtqueue_add_inbuf_ctx(rq
->vq
, rq
->sg
, 1, buf
, ctx
, gfp
);
1837 virtnet_rq_unmap(rq
, buf
, 0);
1838 put_page(virt_to_head_page(buf
));
1844 static int add_recvbuf_big(struct virtnet_info
*vi
, struct receive_queue
*rq
,
1847 struct page
*first
, *list
= NULL
;
1851 sg_init_table(rq
->sg
, vi
->big_packets_num_skbfrags
+ 2);
1853 /* page in rq->sg[vi->big_packets_num_skbfrags + 1] is list tail */
1854 for (i
= vi
->big_packets_num_skbfrags
+ 1; i
> 1; --i
) {
1855 first
= get_a_page(rq
, gfp
);
1858 give_pages(rq
, list
);
1861 sg_set_buf(&rq
->sg
[i
], page_address(first
), PAGE_SIZE
);
1863 /* chain new page in list head to match sg */
1864 first
->private = (unsigned long)list
;
1868 first
= get_a_page(rq
, gfp
);
1870 give_pages(rq
, list
);
1873 p
= page_address(first
);
1875 /* rq->sg[0], rq->sg[1] share the same page */
1876 /* a separated rq->sg[0] for header - required in case !any_header_sg */
1877 sg_set_buf(&rq
->sg
[0], p
, vi
->hdr_len
);
1879 /* rq->sg[1] for data packet, from offset */
1880 offset
= sizeof(struct padded_vnet_hdr
);
1881 sg_set_buf(&rq
->sg
[1], p
+ offset
, PAGE_SIZE
- offset
);
1883 /* chain first in list head */
1884 first
->private = (unsigned long)list
;
1885 err
= virtqueue_add_inbuf(rq
->vq
, rq
->sg
, vi
->big_packets_num_skbfrags
+ 2,
1888 give_pages(rq
, first
);
1893 static unsigned int get_mergeable_buf_len(struct receive_queue
*rq
,
1894 struct ewma_pkt_len
*avg_pkt_len
,
1897 struct virtnet_info
*vi
= rq
->vq
->vdev
->priv
;
1898 const size_t hdr_len
= vi
->hdr_len
;
1902 return PAGE_SIZE
- room
;
1904 len
= hdr_len
+ clamp_t(unsigned int, ewma_pkt_len_read(avg_pkt_len
),
1905 rq
->min_buf_len
, PAGE_SIZE
- hdr_len
);
1907 return ALIGN(len
, L1_CACHE_BYTES
);
1910 static int add_recvbuf_mergeable(struct virtnet_info
*vi
,
1911 struct receive_queue
*rq
, gfp_t gfp
)
1913 struct page_frag
*alloc_frag
= &rq
->alloc_frag
;
1914 unsigned int headroom
= virtnet_get_headroom(vi
);
1915 unsigned int tailroom
= headroom
? sizeof(struct skb_shared_info
) : 0;
1916 unsigned int room
= SKB_DATA_ALIGN(headroom
+ tailroom
);
1917 unsigned int len
, hole
;
1922 /* Extra tailroom is needed to satisfy XDP's assumption. This
1923 * means rx frags coalescing won't work, but consider we've
1924 * disabled GSO for XDP, it won't be a big issue.
1926 len
= get_mergeable_buf_len(rq
, &rq
->mrg_avg_pkt_len
, room
);
1928 buf
= virtnet_rq_alloc(rq
, len
+ room
, gfp
);
1932 buf
+= headroom
; /* advance address leaving hole at front of pkt */
1933 hole
= alloc_frag
->size
- alloc_frag
->offset
;
1934 if (hole
< len
+ room
) {
1935 /* To avoid internal fragmentation, if there is very likely not
1936 * enough space for another buffer, add the remaining space to
1937 * the current buffer.
1938 * XDP core assumes that frame_size of xdp_buff and the length
1939 * of the frag are PAGE_SIZE, so we disable the hole mechanism.
1943 alloc_frag
->offset
+= hole
;
1946 virtnet_rq_init_one_sg(rq
, buf
, len
);
1948 ctx
= mergeable_len_to_ctx(len
+ room
, headroom
);
1949 err
= virtqueue_add_inbuf_ctx(rq
->vq
, rq
->sg
, 1, buf
, ctx
, gfp
);
1952 virtnet_rq_unmap(rq
, buf
, 0);
1953 put_page(virt_to_head_page(buf
));
1960 * Returns false if we couldn't fill entirely (OOM).
1962 * Normally run in the receive path, but can also be run from ndo_open
1963 * before we're receiving packets, or from refill_work which is
1964 * careful to disable receiving (using napi_disable).
1966 static bool try_fill_recv(struct virtnet_info
*vi
, struct receive_queue
*rq
,
1973 if (vi
->mergeable_rx_bufs
)
1974 err
= add_recvbuf_mergeable(vi
, rq
, gfp
);
1975 else if (vi
->big_packets
)
1976 err
= add_recvbuf_big(vi
, rq
, gfp
);
1978 err
= add_recvbuf_small(vi
, rq
, gfp
);
1980 oom
= err
== -ENOMEM
;
1983 } while (rq
->vq
->num_free
);
1984 if (virtqueue_kick_prepare(rq
->vq
) && virtqueue_notify(rq
->vq
)) {
1985 unsigned long flags
;
1987 flags
= u64_stats_update_begin_irqsave(&rq
->stats
.syncp
);
1989 u64_stats_update_end_irqrestore(&rq
->stats
.syncp
, flags
);
1995 static void skb_recv_done(struct virtqueue
*rvq
)
1997 struct virtnet_info
*vi
= rvq
->vdev
->priv
;
1998 struct receive_queue
*rq
= &vi
->rq
[vq2rxq(rvq
)];
2000 virtqueue_napi_schedule(&rq
->napi
, rvq
);
2003 static void virtnet_napi_enable(struct virtqueue
*vq
, struct napi_struct
*napi
)
2007 /* If all buffers were filled by other side before we napi_enabled, we
2008 * won't get another interrupt, so process any outstanding packets now.
2009 * Call local_bh_enable after to trigger softIRQ processing.
2012 virtqueue_napi_schedule(napi
, vq
);
2016 static void virtnet_napi_tx_enable(struct virtnet_info
*vi
,
2017 struct virtqueue
*vq
,
2018 struct napi_struct
*napi
)
2023 /* Tx napi touches cachelines on the cpu handling tx interrupts. Only
2024 * enable the feature if this is likely affine with the transmit path.
2026 if (!vi
->affinity_hint_set
) {
2031 return virtnet_napi_enable(vq
, napi
);
2034 static void virtnet_napi_tx_disable(struct napi_struct
*napi
)
2040 static void refill_work(struct work_struct
*work
)
2042 struct virtnet_info
*vi
=
2043 container_of(work
, struct virtnet_info
, refill
.work
);
2047 for (i
= 0; i
< vi
->curr_queue_pairs
; i
++) {
2048 struct receive_queue
*rq
= &vi
->rq
[i
];
2050 napi_disable(&rq
->napi
);
2051 still_empty
= !try_fill_recv(vi
, rq
, GFP_KERNEL
);
2052 virtnet_napi_enable(rq
->vq
, &rq
->napi
);
2054 /* In theory, this can happen: if we don't get any buffers in
2055 * we will *never* try to fill again.
2058 schedule_delayed_work(&vi
->refill
, HZ
/2);
2062 static int virtnet_receive(struct receive_queue
*rq
, int budget
,
2063 unsigned int *xdp_xmit
)
2065 struct virtnet_info
*vi
= rq
->vq
->vdev
->priv
;
2066 struct virtnet_rq_stats stats
= {};
2071 if (!vi
->big_packets
|| vi
->mergeable_rx_bufs
) {
2074 while (stats
.packets
< budget
&&
2075 (buf
= virtnet_rq_get_buf(rq
, &len
, &ctx
))) {
2076 receive_buf(vi
, rq
, buf
, len
, ctx
, xdp_xmit
, &stats
);
2080 while (stats
.packets
< budget
&&
2081 (buf
= virtnet_rq_get_buf(rq
, &len
, NULL
)) != NULL
) {
2082 receive_buf(vi
, rq
, buf
, len
, NULL
, xdp_xmit
, &stats
);
2087 if (rq
->vq
->num_free
> min((unsigned int)budget
, virtqueue_get_vring_size(rq
->vq
)) / 2) {
2088 if (!try_fill_recv(vi
, rq
, GFP_ATOMIC
)) {
2089 spin_lock(&vi
->refill_lock
);
2090 if (vi
->refill_enabled
)
2091 schedule_delayed_work(&vi
->refill
, 0);
2092 spin_unlock(&vi
->refill_lock
);
2096 u64_stats_update_begin(&rq
->stats
.syncp
);
2097 for (i
= 0; i
< VIRTNET_RQ_STATS_LEN
; i
++) {
2098 size_t offset
= virtnet_rq_stats_desc
[i
].offset
;
2101 item
= (u64
*)((u8
*)&rq
->stats
+ offset
);
2102 *item
+= *(u64
*)((u8
*)&stats
+ offset
);
2104 u64_stats_update_end(&rq
->stats
.syncp
);
2106 return stats
.packets
;
2109 static void virtnet_poll_cleantx(struct receive_queue
*rq
)
2111 struct virtnet_info
*vi
= rq
->vq
->vdev
->priv
;
2112 unsigned int index
= vq2rxq(rq
->vq
);
2113 struct send_queue
*sq
= &vi
->sq
[index
];
2114 struct netdev_queue
*txq
= netdev_get_tx_queue(vi
->dev
, index
);
2116 if (!sq
->napi
.weight
|| is_xdp_raw_buffer_queue(vi
, index
))
2119 if (__netif_tx_trylock(txq
)) {
2121 __netif_tx_unlock(txq
);
2126 virtqueue_disable_cb(sq
->vq
);
2127 free_old_xmit_skbs(sq
, true);
2128 } while (unlikely(!virtqueue_enable_cb_delayed(sq
->vq
)));
2130 if (sq
->vq
->num_free
>= 2 + MAX_SKB_FRAGS
)
2131 netif_tx_wake_queue(txq
);
2133 __netif_tx_unlock(txq
);
2137 static int virtnet_poll(struct napi_struct
*napi
, int budget
)
2139 struct receive_queue
*rq
=
2140 container_of(napi
, struct receive_queue
, napi
);
2141 struct virtnet_info
*vi
= rq
->vq
->vdev
->priv
;
2142 struct send_queue
*sq
;
2143 unsigned int received
;
2144 unsigned int xdp_xmit
= 0;
2146 virtnet_poll_cleantx(rq
);
2148 received
= virtnet_receive(rq
, budget
, &xdp_xmit
);
2150 if (xdp_xmit
& VIRTIO_XDP_REDIR
)
2153 /* Out of packets? */
2154 if (received
< budget
)
2155 virtqueue_napi_complete(napi
, rq
->vq
, received
);
2157 if (xdp_xmit
& VIRTIO_XDP_TX
) {
2158 sq
= virtnet_xdp_get_sq(vi
);
2159 if (virtqueue_kick_prepare(sq
->vq
) && virtqueue_notify(sq
->vq
)) {
2160 u64_stats_update_begin(&sq
->stats
.syncp
);
2162 u64_stats_update_end(&sq
->stats
.syncp
);
2164 virtnet_xdp_put_sq(vi
, sq
);
2170 static void virtnet_disable_queue_pair(struct virtnet_info
*vi
, int qp_index
)
2172 virtnet_napi_tx_disable(&vi
->sq
[qp_index
].napi
);
2173 napi_disable(&vi
->rq
[qp_index
].napi
);
2174 xdp_rxq_info_unreg(&vi
->rq
[qp_index
].xdp_rxq
);
2177 static int virtnet_enable_queue_pair(struct virtnet_info
*vi
, int qp_index
)
2179 struct net_device
*dev
= vi
->dev
;
2182 err
= xdp_rxq_info_reg(&vi
->rq
[qp_index
].xdp_rxq
, dev
, qp_index
,
2183 vi
->rq
[qp_index
].napi
.napi_id
);
2187 err
= xdp_rxq_info_reg_mem_model(&vi
->rq
[qp_index
].xdp_rxq
,
2188 MEM_TYPE_PAGE_SHARED
, NULL
);
2190 goto err_xdp_reg_mem_model
;
2192 virtnet_napi_enable(vi
->rq
[qp_index
].vq
, &vi
->rq
[qp_index
].napi
);
2193 virtnet_napi_tx_enable(vi
, vi
->sq
[qp_index
].vq
, &vi
->sq
[qp_index
].napi
);
2197 err_xdp_reg_mem_model
:
2198 xdp_rxq_info_unreg(&vi
->rq
[qp_index
].xdp_rxq
);
2202 static int virtnet_open(struct net_device
*dev
)
2204 struct virtnet_info
*vi
= netdev_priv(dev
);
2207 enable_delayed_refill(vi
);
2209 for (i
= 0; i
< vi
->max_queue_pairs
; i
++) {
2210 if (i
< vi
->curr_queue_pairs
)
2211 /* Make sure we have some buffers: if oom use wq. */
2212 if (!try_fill_recv(vi
, &vi
->rq
[i
], GFP_KERNEL
))
2213 schedule_delayed_work(&vi
->refill
, 0);
2215 err
= virtnet_enable_queue_pair(vi
, i
);
2223 disable_delayed_refill(vi
);
2224 cancel_delayed_work_sync(&vi
->refill
);
2226 for (i
--; i
>= 0; i
--)
2227 virtnet_disable_queue_pair(vi
, i
);
2231 static int virtnet_poll_tx(struct napi_struct
*napi
, int budget
)
2233 struct send_queue
*sq
= container_of(napi
, struct send_queue
, napi
);
2234 struct virtnet_info
*vi
= sq
->vq
->vdev
->priv
;
2235 unsigned int index
= vq2txq(sq
->vq
);
2236 struct netdev_queue
*txq
;
2240 if (unlikely(is_xdp_raw_buffer_queue(vi
, index
))) {
2241 /* We don't need to enable cb for XDP */
2242 napi_complete_done(napi
, 0);
2246 txq
= netdev_get_tx_queue(vi
->dev
, index
);
2247 __netif_tx_lock(txq
, raw_smp_processor_id());
2248 virtqueue_disable_cb(sq
->vq
);
2249 free_old_xmit_skbs(sq
, true);
2251 if (sq
->vq
->num_free
>= 2 + MAX_SKB_FRAGS
)
2252 netif_tx_wake_queue(txq
);
2254 opaque
= virtqueue_enable_cb_prepare(sq
->vq
);
2256 done
= napi_complete_done(napi
, 0);
2259 virtqueue_disable_cb(sq
->vq
);
2261 __netif_tx_unlock(txq
);
2264 if (unlikely(virtqueue_poll(sq
->vq
, opaque
))) {
2265 if (napi_schedule_prep(napi
)) {
2266 __netif_tx_lock(txq
, raw_smp_processor_id());
2267 virtqueue_disable_cb(sq
->vq
);
2268 __netif_tx_unlock(txq
);
2269 __napi_schedule(napi
);
2277 static int xmit_skb(struct send_queue
*sq
, struct sk_buff
*skb
)
2279 struct virtio_net_hdr_mrg_rxbuf
*hdr
;
2280 const unsigned char *dest
= ((struct ethhdr
*)skb
->data
)->h_dest
;
2281 struct virtnet_info
*vi
= sq
->vq
->vdev
->priv
;
2283 unsigned hdr_len
= vi
->hdr_len
;
2286 pr_debug("%s: xmit %p %pM\n", vi
->dev
->name
, skb
, dest
);
2288 can_push
= vi
->any_header_sg
&&
2289 !((unsigned long)skb
->data
& (__alignof__(*hdr
) - 1)) &&
2290 !skb_header_cloned(skb
) && skb_headroom(skb
) >= hdr_len
;
2291 /* Even if we can, don't push here yet as this would skew
2292 * csum_start offset below. */
2294 hdr
= (struct virtio_net_hdr_mrg_rxbuf
*)(skb
->data
- hdr_len
);
2296 hdr
= &skb_vnet_common_hdr(skb
)->mrg_hdr
;
2298 if (virtio_net_hdr_from_skb(skb
, &hdr
->hdr
,
2299 virtio_is_little_endian(vi
->vdev
), false,
2303 if (vi
->mergeable_rx_bufs
)
2304 hdr
->num_buffers
= 0;
2306 sg_init_table(sq
->sg
, skb_shinfo(skb
)->nr_frags
+ (can_push
? 1 : 2));
2308 __skb_push(skb
, hdr_len
);
2309 num_sg
= skb_to_sgvec(skb
, sq
->sg
, 0, skb
->len
);
2310 if (unlikely(num_sg
< 0))
2312 /* Pull header back to avoid skew in tx bytes calculations. */
2313 __skb_pull(skb
, hdr_len
);
2315 sg_set_buf(sq
->sg
, hdr
, hdr_len
);
2316 num_sg
= skb_to_sgvec(skb
, sq
->sg
+ 1, 0, skb
->len
);
2317 if (unlikely(num_sg
< 0))
2321 return virtqueue_add_outbuf(sq
->vq
, sq
->sg
, num_sg
, skb
, GFP_ATOMIC
);
2324 static netdev_tx_t
start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
2326 struct virtnet_info
*vi
= netdev_priv(dev
);
2327 int qnum
= skb_get_queue_mapping(skb
);
2328 struct send_queue
*sq
= &vi
->sq
[qnum
];
2330 struct netdev_queue
*txq
= netdev_get_tx_queue(dev
, qnum
);
2331 bool kick
= !netdev_xmit_more();
2332 bool use_napi
= sq
->napi
.weight
;
2334 /* Free up any pending old buffers before queueing new ones. */
2337 virtqueue_disable_cb(sq
->vq
);
2339 free_old_xmit_skbs(sq
, false);
2341 } while (use_napi
&& kick
&&
2342 unlikely(!virtqueue_enable_cb_delayed(sq
->vq
)));
2344 /* timestamp packet in software */
2345 skb_tx_timestamp(skb
);
2347 /* Try to transmit */
2348 err
= xmit_skb(sq
, skb
);
2350 /* This should not happen! */
2351 if (unlikely(err
)) {
2352 dev
->stats
.tx_fifo_errors
++;
2353 if (net_ratelimit())
2355 "Unexpected TXQ (%d) queue failure: %d\n",
2357 dev
->stats
.tx_dropped
++;
2358 dev_kfree_skb_any(skb
);
2359 return NETDEV_TX_OK
;
2362 /* Don't wait up for transmitted skbs to be freed. */
2368 check_sq_full_and_disable(vi
, dev
, sq
);
2370 if (kick
|| netif_xmit_stopped(txq
)) {
2371 if (virtqueue_kick_prepare(sq
->vq
) && virtqueue_notify(sq
->vq
)) {
2372 u64_stats_update_begin(&sq
->stats
.syncp
);
2374 u64_stats_update_end(&sq
->stats
.syncp
);
2378 return NETDEV_TX_OK
;
2381 static int virtnet_rx_resize(struct virtnet_info
*vi
,
2382 struct receive_queue
*rq
, u32 ring_num
)
2384 bool running
= netif_running(vi
->dev
);
2387 qindex
= rq
- vi
->rq
;
2390 napi_disable(&rq
->napi
);
2392 err
= virtqueue_resize(rq
->vq
, ring_num
, virtnet_rq_free_unused_buf
);
2394 netdev_err(vi
->dev
, "resize rx fail: rx queue index: %d err: %d\n", qindex
, err
);
2396 if (!try_fill_recv(vi
, rq
, GFP_KERNEL
))
2397 schedule_delayed_work(&vi
->refill
, 0);
2400 virtnet_napi_enable(rq
->vq
, &rq
->napi
);
2404 static int virtnet_tx_resize(struct virtnet_info
*vi
,
2405 struct send_queue
*sq
, u32 ring_num
)
2407 bool running
= netif_running(vi
->dev
);
2408 struct netdev_queue
*txq
;
2411 qindex
= sq
- vi
->sq
;
2414 virtnet_napi_tx_disable(&sq
->napi
);
2416 txq
= netdev_get_tx_queue(vi
->dev
, qindex
);
2418 /* 1. wait all ximt complete
2419 * 2. fix the race of netif_stop_subqueue() vs netif_start_subqueue()
2421 __netif_tx_lock_bh(txq
);
2423 /* Prevent rx poll from accessing sq. */
2426 /* Prevent the upper layer from trying to send packets. */
2427 netif_stop_subqueue(vi
->dev
, qindex
);
2429 __netif_tx_unlock_bh(txq
);
2431 err
= virtqueue_resize(sq
->vq
, ring_num
, virtnet_sq_free_unused_buf
);
2433 netdev_err(vi
->dev
, "resize tx fail: tx queue index: %d err: %d\n", qindex
, err
);
2435 __netif_tx_lock_bh(txq
);
2437 netif_tx_wake_queue(txq
);
2438 __netif_tx_unlock_bh(txq
);
2441 virtnet_napi_tx_enable(vi
, sq
->vq
, &sq
->napi
);
2446 * Send command via the control virtqueue and check status. Commands
2447 * supported by the hypervisor, as indicated by feature bits, should
2448 * never fail unless improperly formatted.
2450 static bool virtnet_send_command(struct virtnet_info
*vi
, u8
class, u8 cmd
,
2451 struct scatterlist
*out
)
2453 struct scatterlist
*sgs
[4], hdr
, stat
;
2454 unsigned out_num
= 0, tmp
;
2457 /* Caller should know better */
2458 BUG_ON(!virtio_has_feature(vi
->vdev
, VIRTIO_NET_F_CTRL_VQ
));
2460 vi
->ctrl
->status
= ~0;
2461 vi
->ctrl
->hdr
.class = class;
2462 vi
->ctrl
->hdr
.cmd
= cmd
;
2464 sg_init_one(&hdr
, &vi
->ctrl
->hdr
, sizeof(vi
->ctrl
->hdr
));
2465 sgs
[out_num
++] = &hdr
;
2468 sgs
[out_num
++] = out
;
2470 /* Add return status. */
2471 sg_init_one(&stat
, &vi
->ctrl
->status
, sizeof(vi
->ctrl
->status
));
2472 sgs
[out_num
] = &stat
;
2474 BUG_ON(out_num
+ 1 > ARRAY_SIZE(sgs
));
2475 ret
= virtqueue_add_sgs(vi
->cvq
, sgs
, out_num
, 1, vi
, GFP_ATOMIC
);
2477 dev_warn(&vi
->vdev
->dev
,
2478 "Failed to add sgs for command vq: %d\n.", ret
);
2482 if (unlikely(!virtqueue_kick(vi
->cvq
)))
2483 return vi
->ctrl
->status
== VIRTIO_NET_OK
;
2485 /* Spin for a response, the kick causes an ioport write, trapping
2486 * into the hypervisor, so the request should be handled immediately.
2488 while (!virtqueue_get_buf(vi
->cvq
, &tmp
) &&
2489 !virtqueue_is_broken(vi
->cvq
))
2492 return vi
->ctrl
->status
== VIRTIO_NET_OK
;
2495 static int virtnet_set_mac_address(struct net_device
*dev
, void *p
)
2497 struct virtnet_info
*vi
= netdev_priv(dev
);
2498 struct virtio_device
*vdev
= vi
->vdev
;
2500 struct sockaddr
*addr
;
2501 struct scatterlist sg
;
2503 if (virtio_has_feature(vi
->vdev
, VIRTIO_NET_F_STANDBY
))
2506 addr
= kmemdup(p
, sizeof(*addr
), GFP_KERNEL
);
2510 ret
= eth_prepare_mac_addr_change(dev
, addr
);
2514 if (virtio_has_feature(vdev
, VIRTIO_NET_F_CTRL_MAC_ADDR
)) {
2515 sg_init_one(&sg
, addr
->sa_data
, dev
->addr_len
);
2516 if (!virtnet_send_command(vi
, VIRTIO_NET_CTRL_MAC
,
2517 VIRTIO_NET_CTRL_MAC_ADDR_SET
, &sg
)) {
2518 dev_warn(&vdev
->dev
,
2519 "Failed to set mac address by vq command.\n");
2523 } else if (virtio_has_feature(vdev
, VIRTIO_NET_F_MAC
) &&
2524 !virtio_has_feature(vdev
, VIRTIO_F_VERSION_1
)) {
2527 /* Naturally, this has an atomicity problem. */
2528 for (i
= 0; i
< dev
->addr_len
; i
++)
2529 virtio_cwrite8(vdev
,
2530 offsetof(struct virtio_net_config
, mac
) +
2531 i
, addr
->sa_data
[i
]);
2534 eth_commit_mac_addr_change(dev
, p
);
2542 static void virtnet_stats(struct net_device
*dev
,
2543 struct rtnl_link_stats64
*tot
)
2545 struct virtnet_info
*vi
= netdev_priv(dev
);
2549 for (i
= 0; i
< vi
->max_queue_pairs
; i
++) {
2550 u64 tpackets
, tbytes
, terrors
, rpackets
, rbytes
, rdrops
;
2551 struct receive_queue
*rq
= &vi
->rq
[i
];
2552 struct send_queue
*sq
= &vi
->sq
[i
];
2555 start
= u64_stats_fetch_begin(&sq
->stats
.syncp
);
2556 tpackets
= sq
->stats
.packets
;
2557 tbytes
= sq
->stats
.bytes
;
2558 terrors
= sq
->stats
.tx_timeouts
;
2559 } while (u64_stats_fetch_retry(&sq
->stats
.syncp
, start
));
2562 start
= u64_stats_fetch_begin(&rq
->stats
.syncp
);
2563 rpackets
= rq
->stats
.packets
;
2564 rbytes
= rq
->stats
.bytes
;
2565 rdrops
= rq
->stats
.drops
;
2566 } while (u64_stats_fetch_retry(&rq
->stats
.syncp
, start
));
2568 tot
->rx_packets
+= rpackets
;
2569 tot
->tx_packets
+= tpackets
;
2570 tot
->rx_bytes
+= rbytes
;
2571 tot
->tx_bytes
+= tbytes
;
2572 tot
->rx_dropped
+= rdrops
;
2573 tot
->tx_errors
+= terrors
;
2576 tot
->tx_dropped
= dev
->stats
.tx_dropped
;
2577 tot
->tx_fifo_errors
= dev
->stats
.tx_fifo_errors
;
2578 tot
->rx_length_errors
= dev
->stats
.rx_length_errors
;
2579 tot
->rx_frame_errors
= dev
->stats
.rx_frame_errors
;
2582 static void virtnet_ack_link_announce(struct virtnet_info
*vi
)
2585 if (!virtnet_send_command(vi
, VIRTIO_NET_CTRL_ANNOUNCE
,
2586 VIRTIO_NET_CTRL_ANNOUNCE_ACK
, NULL
))
2587 dev_warn(&vi
->dev
->dev
, "Failed to ack link announce.\n");
2591 static int _virtnet_set_queues(struct virtnet_info
*vi
, u16 queue_pairs
)
2593 struct scatterlist sg
;
2594 struct net_device
*dev
= vi
->dev
;
2596 if (!vi
->has_cvq
|| !virtio_has_feature(vi
->vdev
, VIRTIO_NET_F_MQ
))
2599 vi
->ctrl
->mq
.virtqueue_pairs
= cpu_to_virtio16(vi
->vdev
, queue_pairs
);
2600 sg_init_one(&sg
, &vi
->ctrl
->mq
, sizeof(vi
->ctrl
->mq
));
2602 if (!virtnet_send_command(vi
, VIRTIO_NET_CTRL_MQ
,
2603 VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET
, &sg
)) {
2604 dev_warn(&dev
->dev
, "Fail to set num of queue pairs to %d\n",
2608 vi
->curr_queue_pairs
= queue_pairs
;
2609 /* virtnet_open() will refill when device is going to up. */
2610 if (dev
->flags
& IFF_UP
)
2611 schedule_delayed_work(&vi
->refill
, 0);
2617 static int virtnet_set_queues(struct virtnet_info
*vi
, u16 queue_pairs
)
2622 err
= _virtnet_set_queues(vi
, queue_pairs
);
2627 static int virtnet_close(struct net_device
*dev
)
2629 struct virtnet_info
*vi
= netdev_priv(dev
);
2632 /* Make sure NAPI doesn't schedule refill work */
2633 disable_delayed_refill(vi
);
2634 /* Make sure refill_work doesn't re-enable napi! */
2635 cancel_delayed_work_sync(&vi
->refill
);
2637 for (i
= 0; i
< vi
->max_queue_pairs
; i
++)
2638 virtnet_disable_queue_pair(vi
, i
);
2643 static void virtnet_set_rx_mode(struct net_device
*dev
)
2645 struct virtnet_info
*vi
= netdev_priv(dev
);
2646 struct scatterlist sg
[2];
2647 struct virtio_net_ctrl_mac
*mac_data
;
2648 struct netdev_hw_addr
*ha
;
2654 /* We can't dynamically set ndo_set_rx_mode, so return gracefully */
2655 if (!virtio_has_feature(vi
->vdev
, VIRTIO_NET_F_CTRL_RX
))
2658 vi
->ctrl
->promisc
= ((dev
->flags
& IFF_PROMISC
) != 0);
2659 vi
->ctrl
->allmulti
= ((dev
->flags
& IFF_ALLMULTI
) != 0);
2661 sg_init_one(sg
, &vi
->ctrl
->promisc
, sizeof(vi
->ctrl
->promisc
));
2663 if (!virtnet_send_command(vi
, VIRTIO_NET_CTRL_RX
,
2664 VIRTIO_NET_CTRL_RX_PROMISC
, sg
))
2665 dev_warn(&dev
->dev
, "Failed to %sable promisc mode.\n",
2666 vi
->ctrl
->promisc
? "en" : "dis");
2668 sg_init_one(sg
, &vi
->ctrl
->allmulti
, sizeof(vi
->ctrl
->allmulti
));
2670 if (!virtnet_send_command(vi
, VIRTIO_NET_CTRL_RX
,
2671 VIRTIO_NET_CTRL_RX_ALLMULTI
, sg
))
2672 dev_warn(&dev
->dev
, "Failed to %sable allmulti mode.\n",
2673 vi
->ctrl
->allmulti
? "en" : "dis");
2675 uc_count
= netdev_uc_count(dev
);
2676 mc_count
= netdev_mc_count(dev
);
2677 /* MAC filter - use one buffer for both lists */
2678 buf
= kzalloc(((uc_count
+ mc_count
) * ETH_ALEN
) +
2679 (2 * sizeof(mac_data
->entries
)), GFP_ATOMIC
);
2684 sg_init_table(sg
, 2);
2686 /* Store the unicast list and count in the front of the buffer */
2687 mac_data
->entries
= cpu_to_virtio32(vi
->vdev
, uc_count
);
2689 netdev_for_each_uc_addr(ha
, dev
)
2690 memcpy(&mac_data
->macs
[i
++][0], ha
->addr
, ETH_ALEN
);
2692 sg_set_buf(&sg
[0], mac_data
,
2693 sizeof(mac_data
->entries
) + (uc_count
* ETH_ALEN
));
2695 /* multicast list and count fill the end */
2696 mac_data
= (void *)&mac_data
->macs
[uc_count
][0];
2698 mac_data
->entries
= cpu_to_virtio32(vi
->vdev
, mc_count
);
2700 netdev_for_each_mc_addr(ha
, dev
)
2701 memcpy(&mac_data
->macs
[i
++][0], ha
->addr
, ETH_ALEN
);
2703 sg_set_buf(&sg
[1], mac_data
,
2704 sizeof(mac_data
->entries
) + (mc_count
* ETH_ALEN
));
2706 if (!virtnet_send_command(vi
, VIRTIO_NET_CTRL_MAC
,
2707 VIRTIO_NET_CTRL_MAC_TABLE_SET
, sg
))
2708 dev_warn(&dev
->dev
, "Failed to set MAC filter table.\n");
2713 static int virtnet_vlan_rx_add_vid(struct net_device
*dev
,
2714 __be16 proto
, u16 vid
)
2716 struct virtnet_info
*vi
= netdev_priv(dev
);
2717 struct scatterlist sg
;
2719 vi
->ctrl
->vid
= cpu_to_virtio16(vi
->vdev
, vid
);
2720 sg_init_one(&sg
, &vi
->ctrl
->vid
, sizeof(vi
->ctrl
->vid
));
2722 if (!virtnet_send_command(vi
, VIRTIO_NET_CTRL_VLAN
,
2723 VIRTIO_NET_CTRL_VLAN_ADD
, &sg
))
2724 dev_warn(&dev
->dev
, "Failed to add VLAN ID %d.\n", vid
);
2728 static int virtnet_vlan_rx_kill_vid(struct net_device
*dev
,
2729 __be16 proto
, u16 vid
)
2731 struct virtnet_info
*vi
= netdev_priv(dev
);
2732 struct scatterlist sg
;
2734 vi
->ctrl
->vid
= cpu_to_virtio16(vi
->vdev
, vid
);
2735 sg_init_one(&sg
, &vi
->ctrl
->vid
, sizeof(vi
->ctrl
->vid
));
2737 if (!virtnet_send_command(vi
, VIRTIO_NET_CTRL_VLAN
,
2738 VIRTIO_NET_CTRL_VLAN_DEL
, &sg
))
2739 dev_warn(&dev
->dev
, "Failed to kill VLAN ID %d.\n", vid
);
2743 static void virtnet_clean_affinity(struct virtnet_info
*vi
)
2747 if (vi
->affinity_hint_set
) {
2748 for (i
= 0; i
< vi
->max_queue_pairs
; i
++) {
2749 virtqueue_set_affinity(vi
->rq
[i
].vq
, NULL
);
2750 virtqueue_set_affinity(vi
->sq
[i
].vq
, NULL
);
2753 vi
->affinity_hint_set
= false;
2757 static void virtnet_set_affinity(struct virtnet_info
*vi
)
2766 if (!zalloc_cpumask_var(&mask
, GFP_KERNEL
)) {
2767 virtnet_clean_affinity(vi
);
2771 num_cpu
= num_online_cpus();
2772 stride
= max_t(int, num_cpu
/ vi
->curr_queue_pairs
, 1);
2773 stragglers
= num_cpu
>= vi
->curr_queue_pairs
?
2774 num_cpu
% vi
->curr_queue_pairs
:
2776 cpu
= cpumask_first(cpu_online_mask
);
2778 for (i
= 0; i
< vi
->curr_queue_pairs
; i
++) {
2779 group_size
= stride
+ (i
< stragglers
? 1 : 0);
2781 for (j
= 0; j
< group_size
; j
++) {
2782 cpumask_set_cpu(cpu
, mask
);
2783 cpu
= cpumask_next_wrap(cpu
, cpu_online_mask
,
2786 virtqueue_set_affinity(vi
->rq
[i
].vq
, mask
);
2787 virtqueue_set_affinity(vi
->sq
[i
].vq
, mask
);
2788 __netif_set_xps_queue(vi
->dev
, cpumask_bits(mask
), i
, XPS_CPUS
);
2789 cpumask_clear(mask
);
2792 vi
->affinity_hint_set
= true;
2793 free_cpumask_var(mask
);
2796 static int virtnet_cpu_online(unsigned int cpu
, struct hlist_node
*node
)
2798 struct virtnet_info
*vi
= hlist_entry_safe(node
, struct virtnet_info
,
2800 virtnet_set_affinity(vi
);
2804 static int virtnet_cpu_dead(unsigned int cpu
, struct hlist_node
*node
)
2806 struct virtnet_info
*vi
= hlist_entry_safe(node
, struct virtnet_info
,
2808 virtnet_set_affinity(vi
);
2812 static int virtnet_cpu_down_prep(unsigned int cpu
, struct hlist_node
*node
)
2814 struct virtnet_info
*vi
= hlist_entry_safe(node
, struct virtnet_info
,
2817 virtnet_clean_affinity(vi
);
2821 static enum cpuhp_state virtionet_online
;
2823 static int virtnet_cpu_notif_add(struct virtnet_info
*vi
)
2827 ret
= cpuhp_state_add_instance_nocalls(virtionet_online
, &vi
->node
);
2830 ret
= cpuhp_state_add_instance_nocalls(CPUHP_VIRT_NET_DEAD
,
2834 cpuhp_state_remove_instance_nocalls(virtionet_online
, &vi
->node
);
2838 static void virtnet_cpu_notif_remove(struct virtnet_info
*vi
)
2840 cpuhp_state_remove_instance_nocalls(virtionet_online
, &vi
->node
);
2841 cpuhp_state_remove_instance_nocalls(CPUHP_VIRT_NET_DEAD
,
2845 static void virtnet_get_ringparam(struct net_device
*dev
,
2846 struct ethtool_ringparam
*ring
,
2847 struct kernel_ethtool_ringparam
*kernel_ring
,
2848 struct netlink_ext_ack
*extack
)
2850 struct virtnet_info
*vi
= netdev_priv(dev
);
2852 ring
->rx_max_pending
= vi
->rq
[0].vq
->num_max
;
2853 ring
->tx_max_pending
= vi
->sq
[0].vq
->num_max
;
2854 ring
->rx_pending
= virtqueue_get_vring_size(vi
->rq
[0].vq
);
2855 ring
->tx_pending
= virtqueue_get_vring_size(vi
->sq
[0].vq
);
2858 static int virtnet_set_ringparam(struct net_device
*dev
,
2859 struct ethtool_ringparam
*ring
,
2860 struct kernel_ethtool_ringparam
*kernel_ring
,
2861 struct netlink_ext_ack
*extack
)
2863 struct virtnet_info
*vi
= netdev_priv(dev
);
2864 u32 rx_pending
, tx_pending
;
2865 struct receive_queue
*rq
;
2866 struct send_queue
*sq
;
2869 if (ring
->rx_mini_pending
|| ring
->rx_jumbo_pending
)
2872 rx_pending
= virtqueue_get_vring_size(vi
->rq
[0].vq
);
2873 tx_pending
= virtqueue_get_vring_size(vi
->sq
[0].vq
);
2875 if (ring
->rx_pending
== rx_pending
&&
2876 ring
->tx_pending
== tx_pending
)
2879 if (ring
->rx_pending
> vi
->rq
[0].vq
->num_max
)
2882 if (ring
->tx_pending
> vi
->sq
[0].vq
->num_max
)
2885 for (i
= 0; i
< vi
->max_queue_pairs
; i
++) {
2889 if (ring
->tx_pending
!= tx_pending
) {
2890 err
= virtnet_tx_resize(vi
, sq
, ring
->tx_pending
);
2895 if (ring
->rx_pending
!= rx_pending
) {
2896 err
= virtnet_rx_resize(vi
, rq
, ring
->rx_pending
);
2905 static bool virtnet_commit_rss_command(struct virtnet_info
*vi
)
2907 struct net_device
*dev
= vi
->dev
;
2908 struct scatterlist sgs
[4];
2909 unsigned int sg_buf_size
;
2912 sg_init_table(sgs
, 4);
2914 sg_buf_size
= offsetof(struct virtio_net_ctrl_rss
, indirection_table
);
2915 sg_set_buf(&sgs
[0], &vi
->ctrl
->rss
, sg_buf_size
);
2917 sg_buf_size
= sizeof(uint16_t) * (vi
->ctrl
->rss
.indirection_table_mask
+ 1);
2918 sg_set_buf(&sgs
[1], vi
->ctrl
->rss
.indirection_table
, sg_buf_size
);
2920 sg_buf_size
= offsetof(struct virtio_net_ctrl_rss
, key
)
2921 - offsetof(struct virtio_net_ctrl_rss
, max_tx_vq
);
2922 sg_set_buf(&sgs
[2], &vi
->ctrl
->rss
.max_tx_vq
, sg_buf_size
);
2924 sg_buf_size
= vi
->rss_key_size
;
2925 sg_set_buf(&sgs
[3], vi
->ctrl
->rss
.key
, sg_buf_size
);
2927 if (!virtnet_send_command(vi
, VIRTIO_NET_CTRL_MQ
,
2928 vi
->has_rss
? VIRTIO_NET_CTRL_MQ_RSS_CONFIG
2929 : VIRTIO_NET_CTRL_MQ_HASH_CONFIG
, sgs
)) {
2930 dev_warn(&dev
->dev
, "VIRTIONET issue with committing RSS sgs\n");
2936 static void virtnet_init_default_rss(struct virtnet_info
*vi
)
2941 vi
->ctrl
->rss
.hash_types
= vi
->rss_hash_types_supported
;
2942 vi
->rss_hash_types_saved
= vi
->rss_hash_types_supported
;
2943 vi
->ctrl
->rss
.indirection_table_mask
= vi
->rss_indir_table_size
2944 ? vi
->rss_indir_table_size
- 1 : 0;
2945 vi
->ctrl
->rss
.unclassified_queue
= 0;
2947 for (; i
< vi
->rss_indir_table_size
; ++i
) {
2948 indir_val
= ethtool_rxfh_indir_default(i
, vi
->curr_queue_pairs
);
2949 vi
->ctrl
->rss
.indirection_table
[i
] = indir_val
;
2952 vi
->ctrl
->rss
.max_tx_vq
= vi
->has_rss
? vi
->curr_queue_pairs
: 0;
2953 vi
->ctrl
->rss
.hash_key_length
= vi
->rss_key_size
;
2955 netdev_rss_key_fill(vi
->ctrl
->rss
.key
, vi
->rss_key_size
);
2958 static void virtnet_get_hashflow(const struct virtnet_info
*vi
, struct ethtool_rxnfc
*info
)
2961 switch (info
->flow_type
) {
2963 if (vi
->rss_hash_types_saved
& VIRTIO_NET_RSS_HASH_TYPE_TCPv4
) {
2964 info
->data
= RXH_IP_SRC
| RXH_IP_DST
|
2965 RXH_L4_B_0_1
| RXH_L4_B_2_3
;
2966 } else if (vi
->rss_hash_types_saved
& VIRTIO_NET_RSS_HASH_TYPE_IPv4
) {
2967 info
->data
= RXH_IP_SRC
| RXH_IP_DST
;
2971 if (vi
->rss_hash_types_saved
& VIRTIO_NET_RSS_HASH_TYPE_TCPv6
) {
2972 info
->data
= RXH_IP_SRC
| RXH_IP_DST
|
2973 RXH_L4_B_0_1
| RXH_L4_B_2_3
;
2974 } else if (vi
->rss_hash_types_saved
& VIRTIO_NET_RSS_HASH_TYPE_IPv6
) {
2975 info
->data
= RXH_IP_SRC
| RXH_IP_DST
;
2979 if (vi
->rss_hash_types_saved
& VIRTIO_NET_RSS_HASH_TYPE_UDPv4
) {
2980 info
->data
= RXH_IP_SRC
| RXH_IP_DST
|
2981 RXH_L4_B_0_1
| RXH_L4_B_2_3
;
2982 } else if (vi
->rss_hash_types_saved
& VIRTIO_NET_RSS_HASH_TYPE_IPv4
) {
2983 info
->data
= RXH_IP_SRC
| RXH_IP_DST
;
2987 if (vi
->rss_hash_types_saved
& VIRTIO_NET_RSS_HASH_TYPE_UDPv6
) {
2988 info
->data
= RXH_IP_SRC
| RXH_IP_DST
|
2989 RXH_L4_B_0_1
| RXH_L4_B_2_3
;
2990 } else if (vi
->rss_hash_types_saved
& VIRTIO_NET_RSS_HASH_TYPE_IPv6
) {
2991 info
->data
= RXH_IP_SRC
| RXH_IP_DST
;
2995 if (vi
->rss_hash_types_saved
& VIRTIO_NET_RSS_HASH_TYPE_IPv4
)
2996 info
->data
= RXH_IP_SRC
| RXH_IP_DST
;
3000 if (vi
->rss_hash_types_saved
& VIRTIO_NET_RSS_HASH_TYPE_IPv6
)
3001 info
->data
= RXH_IP_SRC
| RXH_IP_DST
;
3010 static bool virtnet_set_hashflow(struct virtnet_info
*vi
, struct ethtool_rxnfc
*info
)
3012 u32 new_hashtypes
= vi
->rss_hash_types_saved
;
3013 bool is_disable
= info
->data
& RXH_DISCARD
;
3014 bool is_l4
= info
->data
== (RXH_IP_SRC
| RXH_IP_DST
| RXH_L4_B_0_1
| RXH_L4_B_2_3
);
3016 /* supports only 'sd', 'sdfn' and 'r' */
3017 if (!((info
->data
== (RXH_IP_SRC
| RXH_IP_DST
)) | is_l4
| is_disable
))
3020 switch (info
->flow_type
) {
3022 new_hashtypes
&= ~(VIRTIO_NET_RSS_HASH_TYPE_IPv4
| VIRTIO_NET_RSS_HASH_TYPE_TCPv4
);
3024 new_hashtypes
|= VIRTIO_NET_RSS_HASH_TYPE_IPv4
3025 | (is_l4
? VIRTIO_NET_RSS_HASH_TYPE_TCPv4
: 0);
3028 new_hashtypes
&= ~(VIRTIO_NET_RSS_HASH_TYPE_IPv4
| VIRTIO_NET_RSS_HASH_TYPE_UDPv4
);
3030 new_hashtypes
|= VIRTIO_NET_RSS_HASH_TYPE_IPv4
3031 | (is_l4
? VIRTIO_NET_RSS_HASH_TYPE_UDPv4
: 0);
3034 new_hashtypes
&= ~VIRTIO_NET_RSS_HASH_TYPE_IPv4
;
3036 new_hashtypes
= VIRTIO_NET_RSS_HASH_TYPE_IPv4
;
3039 new_hashtypes
&= ~(VIRTIO_NET_RSS_HASH_TYPE_IPv6
| VIRTIO_NET_RSS_HASH_TYPE_TCPv6
);
3041 new_hashtypes
|= VIRTIO_NET_RSS_HASH_TYPE_IPv6
3042 | (is_l4
? VIRTIO_NET_RSS_HASH_TYPE_TCPv6
: 0);
3045 new_hashtypes
&= ~(VIRTIO_NET_RSS_HASH_TYPE_IPv6
| VIRTIO_NET_RSS_HASH_TYPE_UDPv6
);
3047 new_hashtypes
|= VIRTIO_NET_RSS_HASH_TYPE_IPv6
3048 | (is_l4
? VIRTIO_NET_RSS_HASH_TYPE_UDPv6
: 0);
3051 new_hashtypes
&= ~VIRTIO_NET_RSS_HASH_TYPE_IPv6
;
3053 new_hashtypes
= VIRTIO_NET_RSS_HASH_TYPE_IPv6
;
3056 /* unsupported flow */
3060 /* if unsupported hashtype was set */
3061 if (new_hashtypes
!= (new_hashtypes
& vi
->rss_hash_types_supported
))
3064 if (new_hashtypes
!= vi
->rss_hash_types_saved
) {
3065 vi
->rss_hash_types_saved
= new_hashtypes
;
3066 vi
->ctrl
->rss
.hash_types
= vi
->rss_hash_types_saved
;
3067 if (vi
->dev
->features
& NETIF_F_RXHASH
)
3068 return virtnet_commit_rss_command(vi
);
3074 static void virtnet_get_drvinfo(struct net_device
*dev
,
3075 struct ethtool_drvinfo
*info
)
3077 struct virtnet_info
*vi
= netdev_priv(dev
);
3078 struct virtio_device
*vdev
= vi
->vdev
;
3080 strscpy(info
->driver
, KBUILD_MODNAME
, sizeof(info
->driver
));
3081 strscpy(info
->version
, VIRTNET_DRIVER_VERSION
, sizeof(info
->version
));
3082 strscpy(info
->bus_info
, virtio_bus_name(vdev
), sizeof(info
->bus_info
));
3086 /* TODO: Eliminate OOO packets during switching */
3087 static int virtnet_set_channels(struct net_device
*dev
,
3088 struct ethtool_channels
*channels
)
3090 struct virtnet_info
*vi
= netdev_priv(dev
);
3091 u16 queue_pairs
= channels
->combined_count
;
3094 /* We don't support separate rx/tx channels.
3095 * We don't allow setting 'other' channels.
3097 if (channels
->rx_count
|| channels
->tx_count
|| channels
->other_count
)
3100 if (queue_pairs
> vi
->max_queue_pairs
|| queue_pairs
== 0)
3103 /* For now we don't support modifying channels while XDP is loaded
3104 * also when XDP is loaded all RX queues have XDP programs so we only
3105 * need to check a single RX queue.
3107 if (vi
->rq
[0].xdp_prog
)
3111 err
= _virtnet_set_queues(vi
, queue_pairs
);
3116 virtnet_set_affinity(vi
);
3119 netif_set_real_num_tx_queues(dev
, queue_pairs
);
3120 netif_set_real_num_rx_queues(dev
, queue_pairs
);
3125 static void virtnet_get_strings(struct net_device
*dev
, u32 stringset
, u8
*data
)
3127 struct virtnet_info
*vi
= netdev_priv(dev
);
3131 switch (stringset
) {
3133 for (i
= 0; i
< vi
->curr_queue_pairs
; i
++) {
3134 for (j
= 0; j
< VIRTNET_RQ_STATS_LEN
; j
++)
3135 ethtool_sprintf(&p
, "rx_queue_%u_%s", i
,
3136 virtnet_rq_stats_desc
[j
].desc
);
3139 for (i
= 0; i
< vi
->curr_queue_pairs
; i
++) {
3140 for (j
= 0; j
< VIRTNET_SQ_STATS_LEN
; j
++)
3141 ethtool_sprintf(&p
, "tx_queue_%u_%s", i
,
3142 virtnet_sq_stats_desc
[j
].desc
);
3148 static int virtnet_get_sset_count(struct net_device
*dev
, int sset
)
3150 struct virtnet_info
*vi
= netdev_priv(dev
);
3154 return vi
->curr_queue_pairs
* (VIRTNET_RQ_STATS_LEN
+
3155 VIRTNET_SQ_STATS_LEN
);
3161 static void virtnet_get_ethtool_stats(struct net_device
*dev
,
3162 struct ethtool_stats
*stats
, u64
*data
)
3164 struct virtnet_info
*vi
= netdev_priv(dev
);
3165 unsigned int idx
= 0, start
, i
, j
;
3166 const u8
*stats_base
;
3169 for (i
= 0; i
< vi
->curr_queue_pairs
; i
++) {
3170 struct receive_queue
*rq
= &vi
->rq
[i
];
3172 stats_base
= (u8
*)&rq
->stats
;
3174 start
= u64_stats_fetch_begin(&rq
->stats
.syncp
);
3175 for (j
= 0; j
< VIRTNET_RQ_STATS_LEN
; j
++) {
3176 offset
= virtnet_rq_stats_desc
[j
].offset
;
3177 data
[idx
+ j
] = *(u64
*)(stats_base
+ offset
);
3179 } while (u64_stats_fetch_retry(&rq
->stats
.syncp
, start
));
3180 idx
+= VIRTNET_RQ_STATS_LEN
;
3183 for (i
= 0; i
< vi
->curr_queue_pairs
; i
++) {
3184 struct send_queue
*sq
= &vi
->sq
[i
];
3186 stats_base
= (u8
*)&sq
->stats
;
3188 start
= u64_stats_fetch_begin(&sq
->stats
.syncp
);
3189 for (j
= 0; j
< VIRTNET_SQ_STATS_LEN
; j
++) {
3190 offset
= virtnet_sq_stats_desc
[j
].offset
;
3191 data
[idx
+ j
] = *(u64
*)(stats_base
+ offset
);
3193 } while (u64_stats_fetch_retry(&sq
->stats
.syncp
, start
));
3194 idx
+= VIRTNET_SQ_STATS_LEN
;
3198 static void virtnet_get_channels(struct net_device
*dev
,
3199 struct ethtool_channels
*channels
)
3201 struct virtnet_info
*vi
= netdev_priv(dev
);
3203 channels
->combined_count
= vi
->curr_queue_pairs
;
3204 channels
->max_combined
= vi
->max_queue_pairs
;
3205 channels
->max_other
= 0;
3206 channels
->rx_count
= 0;
3207 channels
->tx_count
= 0;
3208 channels
->other_count
= 0;
3211 static int virtnet_set_link_ksettings(struct net_device
*dev
,
3212 const struct ethtool_link_ksettings
*cmd
)
3214 struct virtnet_info
*vi
= netdev_priv(dev
);
3216 return ethtool_virtdev_set_link_ksettings(dev
, cmd
,
3217 &vi
->speed
, &vi
->duplex
);
3220 static int virtnet_get_link_ksettings(struct net_device
*dev
,
3221 struct ethtool_link_ksettings
*cmd
)
3223 struct virtnet_info
*vi
= netdev_priv(dev
);
3225 cmd
->base
.speed
= vi
->speed
;
3226 cmd
->base
.duplex
= vi
->duplex
;
3227 cmd
->base
.port
= PORT_OTHER
;
3232 static int virtnet_send_notf_coal_cmds(struct virtnet_info
*vi
,
3233 struct ethtool_coalesce
*ec
)
3235 struct scatterlist sgs_tx
, sgs_rx
;
3237 vi
->ctrl
->coal_tx
.tx_usecs
= cpu_to_le32(ec
->tx_coalesce_usecs
);
3238 vi
->ctrl
->coal_tx
.tx_max_packets
= cpu_to_le32(ec
->tx_max_coalesced_frames
);
3239 sg_init_one(&sgs_tx
, &vi
->ctrl
->coal_tx
, sizeof(vi
->ctrl
->coal_tx
));
3241 if (!virtnet_send_command(vi
, VIRTIO_NET_CTRL_NOTF_COAL
,
3242 VIRTIO_NET_CTRL_NOTF_COAL_TX_SET
,
3246 /* Save parameters */
3247 vi
->intr_coal_tx
.max_usecs
= ec
->tx_coalesce_usecs
;
3248 vi
->intr_coal_tx
.max_packets
= ec
->tx_max_coalesced_frames
;
3250 vi
->ctrl
->coal_rx
.rx_usecs
= cpu_to_le32(ec
->rx_coalesce_usecs
);
3251 vi
->ctrl
->coal_rx
.rx_max_packets
= cpu_to_le32(ec
->rx_max_coalesced_frames
);
3252 sg_init_one(&sgs_rx
, &vi
->ctrl
->coal_rx
, sizeof(vi
->ctrl
->coal_rx
));
3254 if (!virtnet_send_command(vi
, VIRTIO_NET_CTRL_NOTF_COAL
,
3255 VIRTIO_NET_CTRL_NOTF_COAL_RX_SET
,
3259 /* Save parameters */
3260 vi
->intr_coal_rx
.max_usecs
= ec
->rx_coalesce_usecs
;
3261 vi
->intr_coal_rx
.max_packets
= ec
->rx_max_coalesced_frames
;
3266 static int virtnet_send_ctrl_coal_vq_cmd(struct virtnet_info
*vi
,
3267 u16 vqn
, u32 max_usecs
, u32 max_packets
)
3269 struct scatterlist sgs
;
3271 vi
->ctrl
->coal_vq
.vqn
= cpu_to_le16(vqn
);
3272 vi
->ctrl
->coal_vq
.coal
.max_usecs
= cpu_to_le32(max_usecs
);
3273 vi
->ctrl
->coal_vq
.coal
.max_packets
= cpu_to_le32(max_packets
);
3274 sg_init_one(&sgs
, &vi
->ctrl
->coal_vq
, sizeof(vi
->ctrl
->coal_vq
));
3276 if (!virtnet_send_command(vi
, VIRTIO_NET_CTRL_NOTF_COAL
,
3277 VIRTIO_NET_CTRL_NOTF_COAL_VQ_SET
,
3284 static int virtnet_send_notf_coal_vq_cmds(struct virtnet_info
*vi
,
3285 struct ethtool_coalesce
*ec
,
3290 if (ec
->rx_coalesce_usecs
|| ec
->rx_max_coalesced_frames
) {
3291 err
= virtnet_send_ctrl_coal_vq_cmd(vi
, rxq2vq(queue
),
3292 ec
->rx_coalesce_usecs
,
3293 ec
->rx_max_coalesced_frames
);
3296 /* Save parameters */
3297 vi
->rq
[queue
].intr_coal
.max_usecs
= ec
->rx_coalesce_usecs
;
3298 vi
->rq
[queue
].intr_coal
.max_packets
= ec
->rx_max_coalesced_frames
;
3301 if (ec
->tx_coalesce_usecs
|| ec
->tx_max_coalesced_frames
) {
3302 err
= virtnet_send_ctrl_coal_vq_cmd(vi
, txq2vq(queue
),
3303 ec
->tx_coalesce_usecs
,
3304 ec
->tx_max_coalesced_frames
);
3307 /* Save parameters */
3308 vi
->sq
[queue
].intr_coal
.max_usecs
= ec
->tx_coalesce_usecs
;
3309 vi
->sq
[queue
].intr_coal
.max_packets
= ec
->tx_max_coalesced_frames
;
3315 static int virtnet_coal_params_supported(struct ethtool_coalesce
*ec
)
3317 /* usecs coalescing is supported only if VIRTIO_NET_F_NOTF_COAL
3318 * feature is negotiated.
3320 if (ec
->rx_coalesce_usecs
|| ec
->tx_coalesce_usecs
)
3323 if (ec
->tx_max_coalesced_frames
> 1 ||
3324 ec
->rx_max_coalesced_frames
!= 1)
3330 static int virtnet_should_update_vq_weight(int dev_flags
, int weight
,
3331 int vq_weight
, bool *should_update
)
3333 if (weight
^ vq_weight
) {
3334 if (dev_flags
& IFF_UP
)
3336 *should_update
= true;
3342 static int virtnet_set_coalesce(struct net_device
*dev
,
3343 struct ethtool_coalesce
*ec
,
3344 struct kernel_ethtool_coalesce
*kernel_coal
,
3345 struct netlink_ext_ack
*extack
)
3347 struct virtnet_info
*vi
= netdev_priv(dev
);
3348 int ret
, queue_number
, napi_weight
;
3349 bool update_napi
= false;
3351 /* Can't change NAPI weight if the link is up */
3352 napi_weight
= ec
->tx_max_coalesced_frames
? NAPI_POLL_WEIGHT
: 0;
3353 for (queue_number
= 0; queue_number
< vi
->max_queue_pairs
; queue_number
++) {
3354 ret
= virtnet_should_update_vq_weight(dev
->flags
, napi_weight
,
3355 vi
->sq
[queue_number
].napi
.weight
,
3361 /* All queues that belong to [queue_number, vi->max_queue_pairs] will be
3362 * updated for the sake of simplicity, which might not be necessary
3368 if (virtio_has_feature(vi
->vdev
, VIRTIO_NET_F_NOTF_COAL
))
3369 ret
= virtnet_send_notf_coal_cmds(vi
, ec
);
3371 ret
= virtnet_coal_params_supported(ec
);
3377 for (; queue_number
< vi
->max_queue_pairs
; queue_number
++)
3378 vi
->sq
[queue_number
].napi
.weight
= napi_weight
;
3384 static int virtnet_get_coalesce(struct net_device
*dev
,
3385 struct ethtool_coalesce
*ec
,
3386 struct kernel_ethtool_coalesce
*kernel_coal
,
3387 struct netlink_ext_ack
*extack
)
3389 struct virtnet_info
*vi
= netdev_priv(dev
);
3391 if (virtio_has_feature(vi
->vdev
, VIRTIO_NET_F_NOTF_COAL
)) {
3392 ec
->rx_coalesce_usecs
= vi
->intr_coal_rx
.max_usecs
;
3393 ec
->tx_coalesce_usecs
= vi
->intr_coal_tx
.max_usecs
;
3394 ec
->tx_max_coalesced_frames
= vi
->intr_coal_tx
.max_packets
;
3395 ec
->rx_max_coalesced_frames
= vi
->intr_coal_rx
.max_packets
;
3397 ec
->rx_max_coalesced_frames
= 1;
3399 if (vi
->sq
[0].napi
.weight
)
3400 ec
->tx_max_coalesced_frames
= 1;
3406 static int virtnet_set_per_queue_coalesce(struct net_device
*dev
,
3408 struct ethtool_coalesce
*ec
)
3410 struct virtnet_info
*vi
= netdev_priv(dev
);
3411 int ret
, napi_weight
;
3412 bool update_napi
= false;
3414 if (queue
>= vi
->max_queue_pairs
)
3417 /* Can't change NAPI weight if the link is up */
3418 napi_weight
= ec
->tx_max_coalesced_frames
? NAPI_POLL_WEIGHT
: 0;
3419 ret
= virtnet_should_update_vq_weight(dev
->flags
, napi_weight
,
3420 vi
->sq
[queue
].napi
.weight
,
3425 if (virtio_has_feature(vi
->vdev
, VIRTIO_NET_F_VQ_NOTF_COAL
))
3426 ret
= virtnet_send_notf_coal_vq_cmds(vi
, ec
, queue
);
3428 ret
= virtnet_coal_params_supported(ec
);
3434 vi
->sq
[queue
].napi
.weight
= napi_weight
;
3439 static int virtnet_get_per_queue_coalesce(struct net_device
*dev
,
3441 struct ethtool_coalesce
*ec
)
3443 struct virtnet_info
*vi
= netdev_priv(dev
);
3445 if (queue
>= vi
->max_queue_pairs
)
3448 if (virtio_has_feature(vi
->vdev
, VIRTIO_NET_F_VQ_NOTF_COAL
)) {
3449 ec
->rx_coalesce_usecs
= vi
->rq
[queue
].intr_coal
.max_usecs
;
3450 ec
->tx_coalesce_usecs
= vi
->sq
[queue
].intr_coal
.max_usecs
;
3451 ec
->tx_max_coalesced_frames
= vi
->sq
[queue
].intr_coal
.max_packets
;
3452 ec
->rx_max_coalesced_frames
= vi
->rq
[queue
].intr_coal
.max_packets
;
3454 ec
->rx_max_coalesced_frames
= 1;
3456 if (vi
->sq
[0].napi
.weight
)
3457 ec
->tx_max_coalesced_frames
= 1;
3463 static void virtnet_init_settings(struct net_device
*dev
)
3465 struct virtnet_info
*vi
= netdev_priv(dev
);
3467 vi
->speed
= SPEED_UNKNOWN
;
3468 vi
->duplex
= DUPLEX_UNKNOWN
;
3471 static void virtnet_update_settings(struct virtnet_info
*vi
)
3476 if (!virtio_has_feature(vi
->vdev
, VIRTIO_NET_F_SPEED_DUPLEX
))
3479 virtio_cread_le(vi
->vdev
, struct virtio_net_config
, speed
, &speed
);
3481 if (ethtool_validate_speed(speed
))
3484 virtio_cread_le(vi
->vdev
, struct virtio_net_config
, duplex
, &duplex
);
3486 if (ethtool_validate_duplex(duplex
))
3487 vi
->duplex
= duplex
;
3490 static u32
virtnet_get_rxfh_key_size(struct net_device
*dev
)
3492 return ((struct virtnet_info
*)netdev_priv(dev
))->rss_key_size
;
3495 static u32
virtnet_get_rxfh_indir_size(struct net_device
*dev
)
3497 return ((struct virtnet_info
*)netdev_priv(dev
))->rss_indir_table_size
;
3500 static int virtnet_get_rxfh(struct net_device
*dev
, u32
*indir
, u8
*key
, u8
*hfunc
)
3502 struct virtnet_info
*vi
= netdev_priv(dev
);
3506 for (i
= 0; i
< vi
->rss_indir_table_size
; ++i
)
3507 indir
[i
] = vi
->ctrl
->rss
.indirection_table
[i
];
3511 memcpy(key
, vi
->ctrl
->rss
.key
, vi
->rss_key_size
);
3514 *hfunc
= ETH_RSS_HASH_TOP
;
3519 static int virtnet_set_rxfh(struct net_device
*dev
, const u32
*indir
, const u8
*key
, const u8 hfunc
)
3521 struct virtnet_info
*vi
= netdev_priv(dev
);
3524 if (hfunc
!= ETH_RSS_HASH_NO_CHANGE
&& hfunc
!= ETH_RSS_HASH_TOP
)
3528 for (i
= 0; i
< vi
->rss_indir_table_size
; ++i
)
3529 vi
->ctrl
->rss
.indirection_table
[i
] = indir
[i
];
3532 memcpy(vi
->ctrl
->rss
.key
, key
, vi
->rss_key_size
);
3534 virtnet_commit_rss_command(vi
);
3539 static int virtnet_get_rxnfc(struct net_device
*dev
, struct ethtool_rxnfc
*info
, u32
*rule_locs
)
3541 struct virtnet_info
*vi
= netdev_priv(dev
);
3544 switch (info
->cmd
) {
3545 case ETHTOOL_GRXRINGS
:
3546 info
->data
= vi
->curr_queue_pairs
;
3549 virtnet_get_hashflow(vi
, info
);
3558 static int virtnet_set_rxnfc(struct net_device
*dev
, struct ethtool_rxnfc
*info
)
3560 struct virtnet_info
*vi
= netdev_priv(dev
);
3563 switch (info
->cmd
) {
3565 if (!virtnet_set_hashflow(vi
, info
))
3576 static const struct ethtool_ops virtnet_ethtool_ops
= {
3577 .supported_coalesce_params
= ETHTOOL_COALESCE_MAX_FRAMES
|
3578 ETHTOOL_COALESCE_USECS
,
3579 .get_drvinfo
= virtnet_get_drvinfo
,
3580 .get_link
= ethtool_op_get_link
,
3581 .get_ringparam
= virtnet_get_ringparam
,
3582 .set_ringparam
= virtnet_set_ringparam
,
3583 .get_strings
= virtnet_get_strings
,
3584 .get_sset_count
= virtnet_get_sset_count
,
3585 .get_ethtool_stats
= virtnet_get_ethtool_stats
,
3586 .set_channels
= virtnet_set_channels
,
3587 .get_channels
= virtnet_get_channels
,
3588 .get_ts_info
= ethtool_op_get_ts_info
,
3589 .get_link_ksettings
= virtnet_get_link_ksettings
,
3590 .set_link_ksettings
= virtnet_set_link_ksettings
,
3591 .set_coalesce
= virtnet_set_coalesce
,
3592 .get_coalesce
= virtnet_get_coalesce
,
3593 .set_per_queue_coalesce
= virtnet_set_per_queue_coalesce
,
3594 .get_per_queue_coalesce
= virtnet_get_per_queue_coalesce
,
3595 .get_rxfh_key_size
= virtnet_get_rxfh_key_size
,
3596 .get_rxfh_indir_size
= virtnet_get_rxfh_indir_size
,
3597 .get_rxfh
= virtnet_get_rxfh
,
3598 .set_rxfh
= virtnet_set_rxfh
,
3599 .get_rxnfc
= virtnet_get_rxnfc
,
3600 .set_rxnfc
= virtnet_set_rxnfc
,
3603 static void virtnet_freeze_down(struct virtio_device
*vdev
)
3605 struct virtnet_info
*vi
= vdev
->priv
;
3607 /* Make sure no work handler is accessing the device */
3608 flush_work(&vi
->config_work
);
3610 netif_tx_lock_bh(vi
->dev
);
3611 netif_device_detach(vi
->dev
);
3612 netif_tx_unlock_bh(vi
->dev
);
3613 if (netif_running(vi
->dev
))
3614 virtnet_close(vi
->dev
);
3617 static int init_vqs(struct virtnet_info
*vi
);
3619 static int virtnet_restore_up(struct virtio_device
*vdev
)
3621 struct virtnet_info
*vi
= vdev
->priv
;
3628 virtio_device_ready(vdev
);
3630 enable_delayed_refill(vi
);
3632 if (netif_running(vi
->dev
)) {
3633 err
= virtnet_open(vi
->dev
);
3638 netif_tx_lock_bh(vi
->dev
);
3639 netif_device_attach(vi
->dev
);
3640 netif_tx_unlock_bh(vi
->dev
);
3644 static int virtnet_set_guest_offloads(struct virtnet_info
*vi
, u64 offloads
)
3646 struct scatterlist sg
;
3647 vi
->ctrl
->offloads
= cpu_to_virtio64(vi
->vdev
, offloads
);
3649 sg_init_one(&sg
, &vi
->ctrl
->offloads
, sizeof(vi
->ctrl
->offloads
));
3651 if (!virtnet_send_command(vi
, VIRTIO_NET_CTRL_GUEST_OFFLOADS
,
3652 VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET
, &sg
)) {
3653 dev_warn(&vi
->dev
->dev
, "Fail to set guest offload.\n");
3660 static int virtnet_clear_guest_offloads(struct virtnet_info
*vi
)
3664 if (!vi
->guest_offloads
)
3667 return virtnet_set_guest_offloads(vi
, offloads
);
3670 static int virtnet_restore_guest_offloads(struct virtnet_info
*vi
)
3672 u64 offloads
= vi
->guest_offloads
;
3674 if (!vi
->guest_offloads
)
3677 return virtnet_set_guest_offloads(vi
, offloads
);
3680 static int virtnet_xdp_set(struct net_device
*dev
, struct bpf_prog
*prog
,
3681 struct netlink_ext_ack
*extack
)
3683 unsigned int room
= SKB_DATA_ALIGN(VIRTIO_XDP_HEADROOM
+
3684 sizeof(struct skb_shared_info
));
3685 unsigned int max_sz
= PAGE_SIZE
- room
- ETH_HLEN
;
3686 struct virtnet_info
*vi
= netdev_priv(dev
);
3687 struct bpf_prog
*old_prog
;
3688 u16 xdp_qp
= 0, curr_qp
;
3691 if (!virtio_has_feature(vi
->vdev
, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS
)
3692 && (virtio_has_feature(vi
->vdev
, VIRTIO_NET_F_GUEST_TSO4
) ||
3693 virtio_has_feature(vi
->vdev
, VIRTIO_NET_F_GUEST_TSO6
) ||
3694 virtio_has_feature(vi
->vdev
, VIRTIO_NET_F_GUEST_ECN
) ||
3695 virtio_has_feature(vi
->vdev
, VIRTIO_NET_F_GUEST_UFO
) ||
3696 virtio_has_feature(vi
->vdev
, VIRTIO_NET_F_GUEST_CSUM
) ||
3697 virtio_has_feature(vi
->vdev
, VIRTIO_NET_F_GUEST_USO4
) ||
3698 virtio_has_feature(vi
->vdev
, VIRTIO_NET_F_GUEST_USO6
))) {
3699 NL_SET_ERR_MSG_MOD(extack
, "Can't set XDP while host is implementing GRO_HW/CSUM, disable GRO_HW/CSUM first");
3703 if (vi
->mergeable_rx_bufs
&& !vi
->any_header_sg
) {
3704 NL_SET_ERR_MSG_MOD(extack
, "XDP expects header/data in single page, any_header_sg required");
3708 if (prog
&& !prog
->aux
->xdp_has_frags
&& dev
->mtu
> max_sz
) {
3709 NL_SET_ERR_MSG_MOD(extack
, "MTU too large to enable XDP without frags");
3710 netdev_warn(dev
, "single-buffer XDP requires MTU less than %u\n", max_sz
);
3714 curr_qp
= vi
->curr_queue_pairs
- vi
->xdp_queue_pairs
;
3716 xdp_qp
= nr_cpu_ids
;
3718 /* XDP requires extra queues for XDP_TX */
3719 if (curr_qp
+ xdp_qp
> vi
->max_queue_pairs
) {
3720 netdev_warn_once(dev
, "XDP request %i queues but max is %i. XDP_TX and XDP_REDIRECT will operate in a slower locked tx mode.\n",
3721 curr_qp
+ xdp_qp
, vi
->max_queue_pairs
);
3725 old_prog
= rtnl_dereference(vi
->rq
[0].xdp_prog
);
3726 if (!prog
&& !old_prog
)
3730 bpf_prog_add(prog
, vi
->max_queue_pairs
- 1);
3732 /* Make sure NAPI is not using any XDP TX queues for RX. */
3733 if (netif_running(dev
)) {
3734 for (i
= 0; i
< vi
->max_queue_pairs
; i
++) {
3735 napi_disable(&vi
->rq
[i
].napi
);
3736 virtnet_napi_tx_disable(&vi
->sq
[i
].napi
);
3741 for (i
= 0; i
< vi
->max_queue_pairs
; i
++) {
3742 rcu_assign_pointer(vi
->rq
[i
].xdp_prog
, prog
);
3744 virtnet_restore_guest_offloads(vi
);
3749 err
= _virtnet_set_queues(vi
, curr_qp
+ xdp_qp
);
3752 netif_set_real_num_rx_queues(dev
, curr_qp
+ xdp_qp
);
3753 vi
->xdp_queue_pairs
= xdp_qp
;
3756 vi
->xdp_enabled
= true;
3757 for (i
= 0; i
< vi
->max_queue_pairs
; i
++) {
3758 rcu_assign_pointer(vi
->rq
[i
].xdp_prog
, prog
);
3759 if (i
== 0 && !old_prog
)
3760 virtnet_clear_guest_offloads(vi
);
3763 xdp_features_set_redirect_target(dev
, true);
3765 xdp_features_clear_redirect_target(dev
);
3766 vi
->xdp_enabled
= false;
3769 for (i
= 0; i
< vi
->max_queue_pairs
; i
++) {
3771 bpf_prog_put(old_prog
);
3772 if (netif_running(dev
)) {
3773 virtnet_napi_enable(vi
->rq
[i
].vq
, &vi
->rq
[i
].napi
);
3774 virtnet_napi_tx_enable(vi
, vi
->sq
[i
].vq
,
3783 virtnet_clear_guest_offloads(vi
);
3784 for (i
= 0; i
< vi
->max_queue_pairs
; i
++)
3785 rcu_assign_pointer(vi
->rq
[i
].xdp_prog
, old_prog
);
3788 if (netif_running(dev
)) {
3789 for (i
= 0; i
< vi
->max_queue_pairs
; i
++) {
3790 virtnet_napi_enable(vi
->rq
[i
].vq
, &vi
->rq
[i
].napi
);
3791 virtnet_napi_tx_enable(vi
, vi
->sq
[i
].vq
,
3796 bpf_prog_sub(prog
, vi
->max_queue_pairs
- 1);
3800 static int virtnet_xdp(struct net_device
*dev
, struct netdev_bpf
*xdp
)
3802 switch (xdp
->command
) {
3803 case XDP_SETUP_PROG
:
3804 return virtnet_xdp_set(dev
, xdp
->prog
, xdp
->extack
);
3810 static int virtnet_get_phys_port_name(struct net_device
*dev
, char *buf
,
3813 struct virtnet_info
*vi
= netdev_priv(dev
);
3816 if (!virtio_has_feature(vi
->vdev
, VIRTIO_NET_F_STANDBY
))
3819 ret
= snprintf(buf
, len
, "sby");
3826 static int virtnet_set_features(struct net_device
*dev
,
3827 netdev_features_t features
)
3829 struct virtnet_info
*vi
= netdev_priv(dev
);
3833 if ((dev
->features
^ features
) & NETIF_F_GRO_HW
) {
3834 if (vi
->xdp_enabled
)
3837 if (features
& NETIF_F_GRO_HW
)
3838 offloads
= vi
->guest_offloads_capable
;
3840 offloads
= vi
->guest_offloads_capable
&
3841 ~GUEST_OFFLOAD_GRO_HW_MASK
;
3843 err
= virtnet_set_guest_offloads(vi
, offloads
);
3846 vi
->guest_offloads
= offloads
;
3849 if ((dev
->features
^ features
) & NETIF_F_RXHASH
) {
3850 if (features
& NETIF_F_RXHASH
)
3851 vi
->ctrl
->rss
.hash_types
= vi
->rss_hash_types_saved
;
3853 vi
->ctrl
->rss
.hash_types
= VIRTIO_NET_HASH_REPORT_NONE
;
3855 if (!virtnet_commit_rss_command(vi
))
3862 static void virtnet_tx_timeout(struct net_device
*dev
, unsigned int txqueue
)
3864 struct virtnet_info
*priv
= netdev_priv(dev
);
3865 struct send_queue
*sq
= &priv
->sq
[txqueue
];
3866 struct netdev_queue
*txq
= netdev_get_tx_queue(dev
, txqueue
);
3868 u64_stats_update_begin(&sq
->stats
.syncp
);
3869 sq
->stats
.tx_timeouts
++;
3870 u64_stats_update_end(&sq
->stats
.syncp
);
3872 netdev_err(dev
, "TX timeout on queue: %u, sq: %s, vq: 0x%x, name: %s, %u usecs ago\n",
3873 txqueue
, sq
->name
, sq
->vq
->index
, sq
->vq
->name
,
3874 jiffies_to_usecs(jiffies
- READ_ONCE(txq
->trans_start
)));
3877 static const struct net_device_ops virtnet_netdev
= {
3878 .ndo_open
= virtnet_open
,
3879 .ndo_stop
= virtnet_close
,
3880 .ndo_start_xmit
= start_xmit
,
3881 .ndo_validate_addr
= eth_validate_addr
,
3882 .ndo_set_mac_address
= virtnet_set_mac_address
,
3883 .ndo_set_rx_mode
= virtnet_set_rx_mode
,
3884 .ndo_get_stats64
= virtnet_stats
,
3885 .ndo_vlan_rx_add_vid
= virtnet_vlan_rx_add_vid
,
3886 .ndo_vlan_rx_kill_vid
= virtnet_vlan_rx_kill_vid
,
3887 .ndo_bpf
= virtnet_xdp
,
3888 .ndo_xdp_xmit
= virtnet_xdp_xmit
,
3889 .ndo_features_check
= passthru_features_check
,
3890 .ndo_get_phys_port_name
= virtnet_get_phys_port_name
,
3891 .ndo_set_features
= virtnet_set_features
,
3892 .ndo_tx_timeout
= virtnet_tx_timeout
,
3895 static void virtnet_config_changed_work(struct work_struct
*work
)
3897 struct virtnet_info
*vi
=
3898 container_of(work
, struct virtnet_info
, config_work
);
3901 if (virtio_cread_feature(vi
->vdev
, VIRTIO_NET_F_STATUS
,
3902 struct virtio_net_config
, status
, &v
) < 0)
3905 if (v
& VIRTIO_NET_S_ANNOUNCE
) {
3906 netdev_notify_peers(vi
->dev
);
3907 virtnet_ack_link_announce(vi
);
3910 /* Ignore unknown (future) status bits */
3911 v
&= VIRTIO_NET_S_LINK_UP
;
3913 if (vi
->status
== v
)
3918 if (vi
->status
& VIRTIO_NET_S_LINK_UP
) {
3919 virtnet_update_settings(vi
);
3920 netif_carrier_on(vi
->dev
);
3921 netif_tx_wake_all_queues(vi
->dev
);
3923 netif_carrier_off(vi
->dev
);
3924 netif_tx_stop_all_queues(vi
->dev
);
3928 static void virtnet_config_changed(struct virtio_device
*vdev
)
3930 struct virtnet_info
*vi
= vdev
->priv
;
3932 schedule_work(&vi
->config_work
);
3935 static void virtnet_free_queues(struct virtnet_info
*vi
)
3939 for (i
= 0; i
< vi
->max_queue_pairs
; i
++) {
3940 __netif_napi_del(&vi
->rq
[i
].napi
);
3941 __netif_napi_del(&vi
->sq
[i
].napi
);
3944 /* We called __netif_napi_del(),
3945 * we need to respect an RCU grace period before freeing vi->rq
3954 static void _free_receive_bufs(struct virtnet_info
*vi
)
3956 struct bpf_prog
*old_prog
;
3959 for (i
= 0; i
< vi
->max_queue_pairs
; i
++) {
3960 while (vi
->rq
[i
].pages
)
3961 __free_pages(get_a_page(&vi
->rq
[i
], GFP_KERNEL
), 0);
3963 old_prog
= rtnl_dereference(vi
->rq
[i
].xdp_prog
);
3964 RCU_INIT_POINTER(vi
->rq
[i
].xdp_prog
, NULL
);
3966 bpf_prog_put(old_prog
);
3970 static void free_receive_bufs(struct virtnet_info
*vi
)
3973 _free_receive_bufs(vi
);
3977 static void free_receive_page_frags(struct virtnet_info
*vi
)
3980 for (i
= 0; i
< vi
->max_queue_pairs
; i
++)
3981 if (vi
->rq
[i
].alloc_frag
.page
) {
3982 if (vi
->rq
[i
].do_dma
&& vi
->rq
[i
].last_dma
)
3983 virtnet_rq_unmap(&vi
->rq
[i
], vi
->rq
[i
].last_dma
, 0);
3984 put_page(vi
->rq
[i
].alloc_frag
.page
);
3988 static void virtnet_sq_free_unused_buf(struct virtqueue
*vq
, void *buf
)
3990 if (!is_xdp_frame(buf
))
3993 xdp_return_frame(ptr_to_xdp(buf
));
3996 static void virtnet_rq_free_unused_buf(struct virtqueue
*vq
, void *buf
)
3998 struct virtnet_info
*vi
= vq
->vdev
->priv
;
4001 if (vi
->mergeable_rx_bufs
)
4002 put_page(virt_to_head_page(buf
));
4003 else if (vi
->big_packets
)
4004 give_pages(&vi
->rq
[i
], buf
);
4006 put_page(virt_to_head_page(buf
));
4009 static void free_unused_bufs(struct virtnet_info
*vi
)
4014 for (i
= 0; i
< vi
->max_queue_pairs
; i
++) {
4015 struct virtqueue
*vq
= vi
->sq
[i
].vq
;
4016 while ((buf
= virtqueue_detach_unused_buf(vq
)) != NULL
)
4017 virtnet_sq_free_unused_buf(vq
, buf
);
4021 for (i
= 0; i
< vi
->max_queue_pairs
; i
++) {
4022 struct receive_queue
*rq
= &vi
->rq
[i
];
4024 while ((buf
= virtnet_rq_detach_unused_buf(rq
)) != NULL
)
4025 virtnet_rq_free_unused_buf(rq
->vq
, buf
);
4030 static void virtnet_del_vqs(struct virtnet_info
*vi
)
4032 struct virtio_device
*vdev
= vi
->vdev
;
4034 virtnet_clean_affinity(vi
);
4036 vdev
->config
->del_vqs(vdev
);
4038 virtnet_free_queues(vi
);
4041 /* How large should a single buffer be so a queue full of these can fit at
4042 * least one full packet?
4043 * Logic below assumes the mergeable buffer header is used.
4045 static unsigned int mergeable_min_buf_len(struct virtnet_info
*vi
, struct virtqueue
*vq
)
4047 const unsigned int hdr_len
= vi
->hdr_len
;
4048 unsigned int rq_size
= virtqueue_get_vring_size(vq
);
4049 unsigned int packet_len
= vi
->big_packets
? IP_MAX_MTU
: vi
->dev
->max_mtu
;
4050 unsigned int buf_len
= hdr_len
+ ETH_HLEN
+ VLAN_HLEN
+ packet_len
;
4051 unsigned int min_buf_len
= DIV_ROUND_UP(buf_len
, rq_size
);
4053 return max(max(min_buf_len
, hdr_len
) - hdr_len
,
4054 (unsigned int)GOOD_PACKET_LEN
);
4057 static int virtnet_find_vqs(struct virtnet_info
*vi
)
4059 vq_callback_t
**callbacks
;
4060 struct virtqueue
**vqs
;
4066 /* We expect 1 RX virtqueue followed by 1 TX virtqueue, followed by
4067 * possible N-1 RX/TX queue pairs used in multiqueue mode, followed by
4068 * possible control vq.
4070 total_vqs
= vi
->max_queue_pairs
* 2 +
4071 virtio_has_feature(vi
->vdev
, VIRTIO_NET_F_CTRL_VQ
);
4073 /* Allocate space for find_vqs parameters */
4074 vqs
= kcalloc(total_vqs
, sizeof(*vqs
), GFP_KERNEL
);
4077 callbacks
= kmalloc_array(total_vqs
, sizeof(*callbacks
), GFP_KERNEL
);
4080 names
= kmalloc_array(total_vqs
, sizeof(*names
), GFP_KERNEL
);
4083 if (!vi
->big_packets
|| vi
->mergeable_rx_bufs
) {
4084 ctx
= kcalloc(total_vqs
, sizeof(*ctx
), GFP_KERNEL
);
4091 /* Parameters for control virtqueue, if any */
4093 callbacks
[total_vqs
- 1] = NULL
;
4094 names
[total_vqs
- 1] = "control";
4097 /* Allocate/initialize parameters for send/receive virtqueues */
4098 for (i
= 0; i
< vi
->max_queue_pairs
; i
++) {
4099 callbacks
[rxq2vq(i
)] = skb_recv_done
;
4100 callbacks
[txq2vq(i
)] = skb_xmit_done
;
4101 sprintf(vi
->rq
[i
].name
, "input.%d", i
);
4102 sprintf(vi
->sq
[i
].name
, "output.%d", i
);
4103 names
[rxq2vq(i
)] = vi
->rq
[i
].name
;
4104 names
[txq2vq(i
)] = vi
->sq
[i
].name
;
4106 ctx
[rxq2vq(i
)] = true;
4109 ret
= virtio_find_vqs_ctx(vi
->vdev
, total_vqs
, vqs
, callbacks
,
4115 vi
->cvq
= vqs
[total_vqs
- 1];
4116 if (virtio_has_feature(vi
->vdev
, VIRTIO_NET_F_CTRL_VLAN
))
4117 vi
->dev
->features
|= NETIF_F_HW_VLAN_CTAG_FILTER
;
4120 for (i
= 0; i
< vi
->max_queue_pairs
; i
++) {
4121 vi
->rq
[i
].vq
= vqs
[rxq2vq(i
)];
4122 vi
->rq
[i
].min_buf_len
= mergeable_min_buf_len(vi
, vi
->rq
[i
].vq
);
4123 vi
->sq
[i
].vq
= vqs
[txq2vq(i
)];
4126 /* run here: ret == 0. */
4141 static int virtnet_alloc_queues(struct virtnet_info
*vi
)
4146 vi
->ctrl
= kzalloc(sizeof(*vi
->ctrl
), GFP_KERNEL
);
4152 vi
->sq
= kcalloc(vi
->max_queue_pairs
, sizeof(*vi
->sq
), GFP_KERNEL
);
4155 vi
->rq
= kcalloc(vi
->max_queue_pairs
, sizeof(*vi
->rq
), GFP_KERNEL
);
4159 INIT_DELAYED_WORK(&vi
->refill
, refill_work
);
4160 for (i
= 0; i
< vi
->max_queue_pairs
; i
++) {
4161 vi
->rq
[i
].pages
= NULL
;
4162 netif_napi_add_weight(vi
->dev
, &vi
->rq
[i
].napi
, virtnet_poll
,
4164 netif_napi_add_tx_weight(vi
->dev
, &vi
->sq
[i
].napi
,
4166 napi_tx
? napi_weight
: 0);
4168 sg_init_table(vi
->rq
[i
].sg
, ARRAY_SIZE(vi
->rq
[i
].sg
));
4169 ewma_pkt_len_init(&vi
->rq
[i
].mrg_avg_pkt_len
);
4170 sg_init_table(vi
->sq
[i
].sg
, ARRAY_SIZE(vi
->sq
[i
].sg
));
4172 u64_stats_init(&vi
->rq
[i
].stats
.syncp
);
4173 u64_stats_init(&vi
->sq
[i
].stats
.syncp
);
4186 static int init_vqs(struct virtnet_info
*vi
)
4190 /* Allocate send & receive queues */
4191 ret
= virtnet_alloc_queues(vi
);
4195 ret
= virtnet_find_vqs(vi
);
4199 virtnet_rq_set_premapped(vi
);
4202 virtnet_set_affinity(vi
);
4208 virtnet_free_queues(vi
);
4214 static ssize_t
mergeable_rx_buffer_size_show(struct netdev_rx_queue
*queue
,
4217 struct virtnet_info
*vi
= netdev_priv(queue
->dev
);
4218 unsigned int queue_index
= get_netdev_rx_queue_index(queue
);
4219 unsigned int headroom
= virtnet_get_headroom(vi
);
4220 unsigned int tailroom
= headroom
? sizeof(struct skb_shared_info
) : 0;
4221 struct ewma_pkt_len
*avg
;
4223 BUG_ON(queue_index
>= vi
->max_queue_pairs
);
4224 avg
= &vi
->rq
[queue_index
].mrg_avg_pkt_len
;
4225 return sprintf(buf
, "%u\n",
4226 get_mergeable_buf_len(&vi
->rq
[queue_index
], avg
,
4227 SKB_DATA_ALIGN(headroom
+ tailroom
)));
4230 static struct rx_queue_attribute mergeable_rx_buffer_size_attribute
=
4231 __ATTR_RO(mergeable_rx_buffer_size
);
4233 static struct attribute
*virtio_net_mrg_rx_attrs
[] = {
4234 &mergeable_rx_buffer_size_attribute
.attr
,
4238 static const struct attribute_group virtio_net_mrg_rx_group
= {
4239 .name
= "virtio_net",
4240 .attrs
= virtio_net_mrg_rx_attrs
4244 static bool virtnet_fail_on_feature(struct virtio_device
*vdev
,
4246 const char *fname
, const char *dname
)
4248 if (!virtio_has_feature(vdev
, fbit
))
4251 dev_err(&vdev
->dev
, "device advertises feature %s but not %s",
4257 #define VIRTNET_FAIL_ON(vdev, fbit, dbit) \
4258 virtnet_fail_on_feature(vdev, fbit, #fbit, dbit)
4260 static bool virtnet_validate_features(struct virtio_device
*vdev
)
4262 if (!virtio_has_feature(vdev
, VIRTIO_NET_F_CTRL_VQ
) &&
4263 (VIRTNET_FAIL_ON(vdev
, VIRTIO_NET_F_CTRL_RX
,
4264 "VIRTIO_NET_F_CTRL_VQ") ||
4265 VIRTNET_FAIL_ON(vdev
, VIRTIO_NET_F_CTRL_VLAN
,
4266 "VIRTIO_NET_F_CTRL_VQ") ||
4267 VIRTNET_FAIL_ON(vdev
, VIRTIO_NET_F_GUEST_ANNOUNCE
,
4268 "VIRTIO_NET_F_CTRL_VQ") ||
4269 VIRTNET_FAIL_ON(vdev
, VIRTIO_NET_F_MQ
, "VIRTIO_NET_F_CTRL_VQ") ||
4270 VIRTNET_FAIL_ON(vdev
, VIRTIO_NET_F_CTRL_MAC_ADDR
,
4271 "VIRTIO_NET_F_CTRL_VQ") ||
4272 VIRTNET_FAIL_ON(vdev
, VIRTIO_NET_F_RSS
,
4273 "VIRTIO_NET_F_CTRL_VQ") ||
4274 VIRTNET_FAIL_ON(vdev
, VIRTIO_NET_F_HASH_REPORT
,
4275 "VIRTIO_NET_F_CTRL_VQ") ||
4276 VIRTNET_FAIL_ON(vdev
, VIRTIO_NET_F_NOTF_COAL
,
4277 "VIRTIO_NET_F_CTRL_VQ") ||
4278 VIRTNET_FAIL_ON(vdev
, VIRTIO_NET_F_VQ_NOTF_COAL
,
4279 "VIRTIO_NET_F_CTRL_VQ"))) {
4286 #define MIN_MTU ETH_MIN_MTU
4287 #define MAX_MTU ETH_MAX_MTU
4289 static int virtnet_validate(struct virtio_device
*vdev
)
4291 if (!vdev
->config
->get
) {
4292 dev_err(&vdev
->dev
, "%s failure: config access disabled\n",
4297 if (!virtnet_validate_features(vdev
))
4300 if (virtio_has_feature(vdev
, VIRTIO_NET_F_MTU
)) {
4301 int mtu
= virtio_cread16(vdev
,
4302 offsetof(struct virtio_net_config
,
4305 __virtio_clear_bit(vdev
, VIRTIO_NET_F_MTU
);
4308 if (virtio_has_feature(vdev
, VIRTIO_NET_F_STANDBY
) &&
4309 !virtio_has_feature(vdev
, VIRTIO_NET_F_MAC
)) {
4310 dev_warn(&vdev
->dev
, "device advertises feature VIRTIO_NET_F_STANDBY but not VIRTIO_NET_F_MAC, disabling standby");
4311 __virtio_clear_bit(vdev
, VIRTIO_NET_F_STANDBY
);
4317 static bool virtnet_check_guest_gso(const struct virtnet_info
*vi
)
4319 return virtio_has_feature(vi
->vdev
, VIRTIO_NET_F_GUEST_TSO4
) ||
4320 virtio_has_feature(vi
->vdev
, VIRTIO_NET_F_GUEST_TSO6
) ||
4321 virtio_has_feature(vi
->vdev
, VIRTIO_NET_F_GUEST_ECN
) ||
4322 virtio_has_feature(vi
->vdev
, VIRTIO_NET_F_GUEST_UFO
) ||
4323 (virtio_has_feature(vi
->vdev
, VIRTIO_NET_F_GUEST_USO4
) &&
4324 virtio_has_feature(vi
->vdev
, VIRTIO_NET_F_GUEST_USO6
));
4327 static void virtnet_set_big_packets(struct virtnet_info
*vi
, const int mtu
)
4329 bool guest_gso
= virtnet_check_guest_gso(vi
);
4331 /* If device can receive ANY guest GSO packets, regardless of mtu,
4332 * allocate packets of maximum size, otherwise limit it to only
4333 * mtu size worth only.
4335 if (mtu
> ETH_DATA_LEN
|| guest_gso
) {
4336 vi
->big_packets
= true;
4337 vi
->big_packets_num_skbfrags
= guest_gso
? MAX_SKB_FRAGS
: DIV_ROUND_UP(mtu
, PAGE_SIZE
);
4341 static int virtnet_probe(struct virtio_device
*vdev
)
4343 int i
, err
= -ENOMEM
;
4344 struct net_device
*dev
;
4345 struct virtnet_info
*vi
;
4346 u16 max_queue_pairs
;
4349 /* Find if host supports multiqueue/rss virtio_net device */
4350 max_queue_pairs
= 1;
4351 if (virtio_has_feature(vdev
, VIRTIO_NET_F_MQ
) || virtio_has_feature(vdev
, VIRTIO_NET_F_RSS
))
4353 virtio_cread16(vdev
, offsetof(struct virtio_net_config
, max_virtqueue_pairs
));
4355 /* We need at least 2 queue's */
4356 if (max_queue_pairs
< VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN
||
4357 max_queue_pairs
> VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX
||
4358 !virtio_has_feature(vdev
, VIRTIO_NET_F_CTRL_VQ
))
4359 max_queue_pairs
= 1;
4361 /* Allocate ourselves a network device with room for our info */
4362 dev
= alloc_etherdev_mq(sizeof(struct virtnet_info
), max_queue_pairs
);
4366 /* Set up network device as normal. */
4367 dev
->priv_flags
|= IFF_UNICAST_FLT
| IFF_LIVE_ADDR_CHANGE
|
4368 IFF_TX_SKB_NO_LINEAR
;
4369 dev
->netdev_ops
= &virtnet_netdev
;
4370 dev
->features
= NETIF_F_HIGHDMA
;
4372 dev
->ethtool_ops
= &virtnet_ethtool_ops
;
4373 SET_NETDEV_DEV(dev
, &vdev
->dev
);
4375 /* Do we support "hardware" checksums? */
4376 if (virtio_has_feature(vdev
, VIRTIO_NET_F_CSUM
)) {
4377 /* This opens up the world of extra features. */
4378 dev
->hw_features
|= NETIF_F_HW_CSUM
| NETIF_F_SG
;
4380 dev
->features
|= NETIF_F_HW_CSUM
| NETIF_F_SG
;
4382 if (virtio_has_feature(vdev
, VIRTIO_NET_F_GSO
)) {
4383 dev
->hw_features
|= NETIF_F_TSO
4384 | NETIF_F_TSO_ECN
| NETIF_F_TSO6
;
4386 /* Individual feature bits: what can host handle? */
4387 if (virtio_has_feature(vdev
, VIRTIO_NET_F_HOST_TSO4
))
4388 dev
->hw_features
|= NETIF_F_TSO
;
4389 if (virtio_has_feature(vdev
, VIRTIO_NET_F_HOST_TSO6
))
4390 dev
->hw_features
|= NETIF_F_TSO6
;
4391 if (virtio_has_feature(vdev
, VIRTIO_NET_F_HOST_ECN
))
4392 dev
->hw_features
|= NETIF_F_TSO_ECN
;
4393 if (virtio_has_feature(vdev
, VIRTIO_NET_F_HOST_USO
))
4394 dev
->hw_features
|= NETIF_F_GSO_UDP_L4
;
4396 dev
->features
|= NETIF_F_GSO_ROBUST
;
4399 dev
->features
|= dev
->hw_features
& NETIF_F_ALL_TSO
;
4400 /* (!csum && gso) case will be fixed by register_netdev() */
4402 if (virtio_has_feature(vdev
, VIRTIO_NET_F_GUEST_CSUM
))
4403 dev
->features
|= NETIF_F_RXCSUM
;
4404 if (virtio_has_feature(vdev
, VIRTIO_NET_F_GUEST_TSO4
) ||
4405 virtio_has_feature(vdev
, VIRTIO_NET_F_GUEST_TSO6
))
4406 dev
->features
|= NETIF_F_GRO_HW
;
4407 if (virtio_has_feature(vdev
, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS
))
4408 dev
->hw_features
|= NETIF_F_GRO_HW
;
4410 dev
->vlan_features
= dev
->features
;
4411 dev
->xdp_features
= NETDEV_XDP_ACT_BASIC
| NETDEV_XDP_ACT_REDIRECT
;
4413 /* MTU range: 68 - 65535 */
4414 dev
->min_mtu
= MIN_MTU
;
4415 dev
->max_mtu
= MAX_MTU
;
4417 /* Configuration may specify what MAC to use. Otherwise random. */
4418 if (virtio_has_feature(vdev
, VIRTIO_NET_F_MAC
)) {
4421 virtio_cread_bytes(vdev
,
4422 offsetof(struct virtio_net_config
, mac
),
4424 eth_hw_addr_set(dev
, addr
);
4426 eth_hw_addr_random(dev
);
4427 dev_info(&vdev
->dev
, "Assigned random MAC address %pM\n",
4431 /* Set up our device-specific information */
4432 vi
= netdev_priv(dev
);
4437 INIT_WORK(&vi
->config_work
, virtnet_config_changed_work
);
4438 spin_lock_init(&vi
->refill_lock
);
4440 if (virtio_has_feature(vdev
, VIRTIO_NET_F_MRG_RXBUF
)) {
4441 vi
->mergeable_rx_bufs
= true;
4442 dev
->xdp_features
|= NETDEV_XDP_ACT_RX_SG
;
4445 if (virtio_has_feature(vi
->vdev
, VIRTIO_NET_F_NOTF_COAL
)) {
4446 vi
->intr_coal_rx
.max_usecs
= 0;
4447 vi
->intr_coal_tx
.max_usecs
= 0;
4448 vi
->intr_coal_tx
.max_packets
= 0;
4449 vi
->intr_coal_rx
.max_packets
= 0;
4452 if (virtio_has_feature(vdev
, VIRTIO_NET_F_HASH_REPORT
))
4453 vi
->has_rss_hash_report
= true;
4455 if (virtio_has_feature(vdev
, VIRTIO_NET_F_RSS
))
4458 if (vi
->has_rss
|| vi
->has_rss_hash_report
) {
4459 vi
->rss_indir_table_size
=
4460 virtio_cread16(vdev
, offsetof(struct virtio_net_config
,
4461 rss_max_indirection_table_length
));
4463 virtio_cread8(vdev
, offsetof(struct virtio_net_config
, rss_max_key_size
));
4465 vi
->rss_hash_types_supported
=
4466 virtio_cread32(vdev
, offsetof(struct virtio_net_config
, supported_hash_types
));
4467 vi
->rss_hash_types_supported
&=
4468 ~(VIRTIO_NET_RSS_HASH_TYPE_IP_EX
|
4469 VIRTIO_NET_RSS_HASH_TYPE_TCP_EX
|
4470 VIRTIO_NET_RSS_HASH_TYPE_UDP_EX
);
4472 dev
->hw_features
|= NETIF_F_RXHASH
;
4475 if (vi
->has_rss_hash_report
)
4476 vi
->hdr_len
= sizeof(struct virtio_net_hdr_v1_hash
);
4477 else if (virtio_has_feature(vdev
, VIRTIO_NET_F_MRG_RXBUF
) ||
4478 virtio_has_feature(vdev
, VIRTIO_F_VERSION_1
))
4479 vi
->hdr_len
= sizeof(struct virtio_net_hdr_mrg_rxbuf
);
4481 vi
->hdr_len
= sizeof(struct virtio_net_hdr
);
4483 if (virtio_has_feature(vdev
, VIRTIO_F_ANY_LAYOUT
) ||
4484 virtio_has_feature(vdev
, VIRTIO_F_VERSION_1
))
4485 vi
->any_header_sg
= true;
4487 if (virtio_has_feature(vdev
, VIRTIO_NET_F_CTRL_VQ
))
4490 if (virtio_has_feature(vdev
, VIRTIO_NET_F_MTU
)) {
4491 mtu
= virtio_cread16(vdev
,
4492 offsetof(struct virtio_net_config
,
4494 if (mtu
< dev
->min_mtu
) {
4495 /* Should never trigger: MTU was previously validated
4496 * in virtnet_validate.
4499 "device MTU appears to have changed it is now %d < %d",
4509 virtnet_set_big_packets(vi
, mtu
);
4511 if (vi
->any_header_sg
)
4512 dev
->needed_headroom
= vi
->hdr_len
;
4514 /* Enable multiqueue by default */
4515 if (num_online_cpus() >= max_queue_pairs
)
4516 vi
->curr_queue_pairs
= max_queue_pairs
;
4518 vi
->curr_queue_pairs
= num_online_cpus();
4519 vi
->max_queue_pairs
= max_queue_pairs
;
4521 /* Allocate/initialize the rx/tx queues, and invoke find_vqs */
4527 if (vi
->mergeable_rx_bufs
)
4528 dev
->sysfs_rx_queue_group
= &virtio_net_mrg_rx_group
;
4530 netif_set_real_num_tx_queues(dev
, vi
->curr_queue_pairs
);
4531 netif_set_real_num_rx_queues(dev
, vi
->curr_queue_pairs
);
4533 virtnet_init_settings(dev
);
4535 if (virtio_has_feature(vdev
, VIRTIO_NET_F_STANDBY
)) {
4536 vi
->failover
= net_failover_create(vi
->dev
);
4537 if (IS_ERR(vi
->failover
)) {
4538 err
= PTR_ERR(vi
->failover
);
4543 if (vi
->has_rss
|| vi
->has_rss_hash_report
)
4544 virtnet_init_default_rss(vi
);
4546 /* serialize netdev register + virtio_device_ready() with ndo_open() */
4549 err
= register_netdevice(dev
);
4551 pr_debug("virtio_net: registering device failed\n");
4556 virtio_device_ready(vdev
);
4558 _virtnet_set_queues(vi
, vi
->curr_queue_pairs
);
4560 /* a random MAC address has been assigned, notify the device.
4561 * We don't fail probe if VIRTIO_NET_F_CTRL_MAC_ADDR is not there
4562 * because many devices work fine without getting MAC explicitly
4564 if (!virtio_has_feature(vdev
, VIRTIO_NET_F_MAC
) &&
4565 virtio_has_feature(vi
->vdev
, VIRTIO_NET_F_CTRL_MAC_ADDR
)) {
4566 struct scatterlist sg
;
4568 sg_init_one(&sg
, dev
->dev_addr
, dev
->addr_len
);
4569 if (!virtnet_send_command(vi
, VIRTIO_NET_CTRL_MAC
,
4570 VIRTIO_NET_CTRL_MAC_ADDR_SET
, &sg
)) {
4571 pr_debug("virtio_net: setting MAC address failed\n");
4574 goto free_unregister_netdev
;
4580 err
= virtnet_cpu_notif_add(vi
);
4582 pr_debug("virtio_net: registering cpu notifier failed\n");
4583 goto free_unregister_netdev
;
4586 /* Assume link up if device can't report link status,
4587 otherwise get link status from config. */
4588 netif_carrier_off(dev
);
4589 if (virtio_has_feature(vi
->vdev
, VIRTIO_NET_F_STATUS
)) {
4590 schedule_work(&vi
->config_work
);
4592 vi
->status
= VIRTIO_NET_S_LINK_UP
;
4593 virtnet_update_settings(vi
);
4594 netif_carrier_on(dev
);
4597 for (i
= 0; i
< ARRAY_SIZE(guest_offloads
); i
++)
4598 if (virtio_has_feature(vi
->vdev
, guest_offloads
[i
]))
4599 set_bit(guest_offloads
[i
], &vi
->guest_offloads
);
4600 vi
->guest_offloads_capable
= vi
->guest_offloads
;
4602 pr_debug("virtnet: registered device %s with %d RX and TX vq's\n",
4603 dev
->name
, max_queue_pairs
);
4607 free_unregister_netdev
:
4608 unregister_netdev(dev
);
4610 net_failover_destroy(vi
->failover
);
4612 virtio_reset_device(vdev
);
4613 cancel_delayed_work_sync(&vi
->refill
);
4614 free_receive_page_frags(vi
);
4615 virtnet_del_vqs(vi
);
4621 static void remove_vq_common(struct virtnet_info
*vi
)
4623 virtio_reset_device(vi
->vdev
);
4625 /* Free unused buffers in both send and recv, if any. */
4626 free_unused_bufs(vi
);
4628 free_receive_bufs(vi
);
4630 free_receive_page_frags(vi
);
4632 virtnet_del_vqs(vi
);
4635 static void virtnet_remove(struct virtio_device
*vdev
)
4637 struct virtnet_info
*vi
= vdev
->priv
;
4639 virtnet_cpu_notif_remove(vi
);
4641 /* Make sure no work handler is accessing the device. */
4642 flush_work(&vi
->config_work
);
4644 unregister_netdev(vi
->dev
);
4646 net_failover_destroy(vi
->failover
);
4648 remove_vq_common(vi
);
4650 free_netdev(vi
->dev
);
4653 static __maybe_unused
int virtnet_freeze(struct virtio_device
*vdev
)
4655 struct virtnet_info
*vi
= vdev
->priv
;
4657 virtnet_cpu_notif_remove(vi
);
4658 virtnet_freeze_down(vdev
);
4659 remove_vq_common(vi
);
4664 static __maybe_unused
int virtnet_restore(struct virtio_device
*vdev
)
4666 struct virtnet_info
*vi
= vdev
->priv
;
4669 err
= virtnet_restore_up(vdev
);
4672 virtnet_set_queues(vi
, vi
->curr_queue_pairs
);
4674 err
= virtnet_cpu_notif_add(vi
);
4676 virtnet_freeze_down(vdev
);
4677 remove_vq_common(vi
);
4684 static struct virtio_device_id id_table
[] = {
4685 { VIRTIO_ID_NET
, VIRTIO_DEV_ANY_ID
},
4689 #define VIRTNET_FEATURES \
4690 VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM, \
4692 VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6, \
4693 VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6, \
4694 VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO, \
4695 VIRTIO_NET_F_HOST_USO, VIRTIO_NET_F_GUEST_USO4, VIRTIO_NET_F_GUEST_USO6, \
4696 VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ, \
4697 VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN, \
4698 VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ, \
4699 VIRTIO_NET_F_CTRL_MAC_ADDR, \
4700 VIRTIO_NET_F_MTU, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS, \
4701 VIRTIO_NET_F_SPEED_DUPLEX, VIRTIO_NET_F_STANDBY, \
4702 VIRTIO_NET_F_RSS, VIRTIO_NET_F_HASH_REPORT, VIRTIO_NET_F_NOTF_COAL, \
4703 VIRTIO_NET_F_VQ_NOTF_COAL, \
4704 VIRTIO_NET_F_GUEST_HDRLEN
4706 static unsigned int features
[] = {
4710 static unsigned int features_legacy
[] = {
4713 VIRTIO_F_ANY_LAYOUT
,
4716 static struct virtio_driver virtio_net_driver
= {
4717 .feature_table
= features
,
4718 .feature_table_size
= ARRAY_SIZE(features
),
4719 .feature_table_legacy
= features_legacy
,
4720 .feature_table_size_legacy
= ARRAY_SIZE(features_legacy
),
4721 .driver
.name
= KBUILD_MODNAME
,
4722 .driver
.owner
= THIS_MODULE
,
4723 .id_table
= id_table
,
4724 .validate
= virtnet_validate
,
4725 .probe
= virtnet_probe
,
4726 .remove
= virtnet_remove
,
4727 .config_changed
= virtnet_config_changed
,
4728 #ifdef CONFIG_PM_SLEEP
4729 .freeze
= virtnet_freeze
,
4730 .restore
= virtnet_restore
,
4734 static __init
int virtio_net_driver_init(void)
4738 ret
= cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN
, "virtio/net:online",
4740 virtnet_cpu_down_prep
);
4743 virtionet_online
= ret
;
4744 ret
= cpuhp_setup_state_multi(CPUHP_VIRT_NET_DEAD
, "virtio/net:dead",
4745 NULL
, virtnet_cpu_dead
);
4748 ret
= register_virtio_driver(&virtio_net_driver
);
4753 cpuhp_remove_multi_state(CPUHP_VIRT_NET_DEAD
);
4755 cpuhp_remove_multi_state(virtionet_online
);
4759 module_init(virtio_net_driver_init
);
4761 static __exit
void virtio_net_driver_exit(void)
4763 unregister_virtio_driver(&virtio_net_driver
);
4764 cpuhp_remove_multi_state(CPUHP_VIRT_NET_DEAD
);
4765 cpuhp_remove_multi_state(virtionet_online
);
4767 module_exit(virtio_net_driver_exit
);
4769 MODULE_DEVICE_TABLE(virtio
, id_table
);
4770 MODULE_DESCRIPTION("Virtio network driver");
4771 MODULE_LICENSE("GPL");