2 * Copyright (c) 2009, Microsoft Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, see <http://www.gnu.org/licenses/>.
17 * Haiyang Zhang <haiyangz@microsoft.com>
18 * Hank Janssen <hjanssen@microsoft.com>
20 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
22 #include <linux/init.h>
23 #include <linux/atomic.h>
24 #include <linux/module.h>
25 #include <linux/highmem.h>
26 #include <linux/device.h>
28 #include <linux/delay.h>
29 #include <linux/netdevice.h>
30 #include <linux/inetdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/if_vlan.h>
35 #include <linux/slab.h>
36 #include <linux/rtnetlink.h>
37 #include <linux/netpoll.h>
40 #include <net/route.h>
42 #include <net/pkt_sched.h>
43 #include <net/checksum.h>
44 #include <net/ip6_checksum.h>
46 #include "hyperv_net.h"
48 #define RING_SIZE_MIN 64
49 #define NETVSC_MIN_TX_SECTIONS 10
50 #define NETVSC_DEFAULT_TX 192 /* ~1M */
51 #define NETVSC_MIN_RX_SECTIONS 10 /* ~64K */
52 #define NETVSC_DEFAULT_RX 10485 /* Max ~16M */
54 #define LINKCHANGE_INT (2 * HZ)
55 #define VF_TAKEOVER_INT (HZ / 10)
57 static int ring_size
= 128;
58 module_param(ring_size
, int, S_IRUGO
);
59 MODULE_PARM_DESC(ring_size
, "Ring buffer size (# of pages)");
61 static const u32 default_msg
= NETIF_MSG_DRV
| NETIF_MSG_PROBE
|
62 NETIF_MSG_LINK
| NETIF_MSG_IFUP
|
63 NETIF_MSG_IFDOWN
| NETIF_MSG_RX_ERR
|
66 static int debug
= -1;
67 module_param(debug
, int, S_IRUGO
);
68 MODULE_PARM_DESC(debug
, "Debug level (0=none,...,16=all)");
70 static void netvsc_set_multicast_list(struct net_device
*net
)
72 struct net_device_context
*net_device_ctx
= netdev_priv(net
);
73 struct netvsc_device
*nvdev
= rtnl_dereference(net_device_ctx
->nvdev
);
75 rndis_filter_update(nvdev
);
78 static int netvsc_open(struct net_device
*net
)
80 struct net_device_context
*ndev_ctx
= netdev_priv(net
);
81 struct net_device
*vf_netdev
= rtnl_dereference(ndev_ctx
->vf_netdev
);
82 struct netvsc_device
*nvdev
= rtnl_dereference(ndev_ctx
->nvdev
);
83 struct rndis_device
*rdev
;
86 netif_carrier_off(net
);
88 /* Open up the device */
89 ret
= rndis_filter_open(nvdev
);
91 netdev_err(net
, "unable to open device (ret %d).\n", ret
);
95 netif_tx_wake_all_queues(net
);
97 rdev
= nvdev
->extension
;
99 if (!rdev
->link_state
)
100 netif_carrier_on(net
);
103 /* Setting synthetic device up transparently sets
104 * slave as up. If open fails, then slave will be
105 * still be offline (and not used).
107 ret
= dev_open(vf_netdev
);
110 "unable to open slave: %s: %d\n",
111 vf_netdev
->name
, ret
);
116 static int netvsc_close(struct net_device
*net
)
118 struct net_device_context
*net_device_ctx
= netdev_priv(net
);
119 struct net_device
*vf_netdev
120 = rtnl_dereference(net_device_ctx
->vf_netdev
);
121 struct netvsc_device
*nvdev
= rtnl_dereference(net_device_ctx
->nvdev
);
123 u32 aread
, i
, msec
= 10, retry
= 0, retry_max
= 20;
124 struct vmbus_channel
*chn
;
126 netif_tx_disable(net
);
128 /* No need to close rndis filter if it is removed already */
132 ret
= rndis_filter_close(nvdev
);
134 netdev_err(net
, "unable to close device (ret %d).\n", ret
);
138 /* Ensure pending bytes in ring are read */
141 for (i
= 0; i
< nvdev
->num_chn
; i
++) {
142 chn
= nvdev
->chan_table
[i
].channel
;
146 aread
= hv_get_bytes_to_read(&chn
->inbound
);
150 aread
= hv_get_bytes_to_read(&chn
->outbound
);
156 if (retry
> retry_max
|| aread
== 0)
166 netdev_err(net
, "Ring buffer not empty after closing rndis\n");
172 dev_close(vf_netdev
);
177 static void *init_ppi_data(struct rndis_message
*msg
, u32 ppi_size
,
180 struct rndis_packet
*rndis_pkt
;
181 struct rndis_per_packet_info
*ppi
;
183 rndis_pkt
= &msg
->msg
.pkt
;
184 rndis_pkt
->data_offset
+= ppi_size
;
186 ppi
= (struct rndis_per_packet_info
*)((void *)rndis_pkt
+
187 rndis_pkt
->per_pkt_info_offset
+ rndis_pkt
->per_pkt_info_len
);
189 ppi
->size
= ppi_size
;
190 ppi
->type
= pkt_type
;
191 ppi
->ppi_offset
= sizeof(struct rndis_per_packet_info
);
193 rndis_pkt
->per_pkt_info_len
+= ppi_size
;
198 /* Azure hosts don't support non-TCP port numbers in hashing for fragmented
199 * packets. We can use ethtool to change UDP hash level when necessary.
201 static inline u32
netvsc_get_hash(
203 const struct net_device_context
*ndc
)
205 struct flow_keys flow
;
206 u32 hash
, pkt_proto
= 0;
207 static u32 hashrnd __read_mostly
;
209 net_get_random_once(&hashrnd
, sizeof(hashrnd
));
211 if (!skb_flow_dissect_flow_keys(skb
, &flow
, 0))
214 switch (flow
.basic
.ip_proto
) {
216 if (flow
.basic
.n_proto
== htons(ETH_P_IP
))
217 pkt_proto
= HV_TCP4_L4HASH
;
218 else if (flow
.basic
.n_proto
== htons(ETH_P_IPV6
))
219 pkt_proto
= HV_TCP6_L4HASH
;
224 if (flow
.basic
.n_proto
== htons(ETH_P_IP
))
225 pkt_proto
= HV_UDP4_L4HASH
;
226 else if (flow
.basic
.n_proto
== htons(ETH_P_IPV6
))
227 pkt_proto
= HV_UDP6_L4HASH
;
232 if (pkt_proto
& ndc
->l4_hash
) {
233 return skb_get_hash(skb
);
235 if (flow
.basic
.n_proto
== htons(ETH_P_IP
))
236 hash
= jhash2((u32
*)&flow
.addrs
.v4addrs
, 2, hashrnd
);
237 else if (flow
.basic
.n_proto
== htons(ETH_P_IPV6
))
238 hash
= jhash2((u32
*)&flow
.addrs
.v6addrs
, 8, hashrnd
);
242 skb_set_hash(skb
, hash
, PKT_HASH_TYPE_L3
);
248 static inline int netvsc_get_tx_queue(struct net_device
*ndev
,
249 struct sk_buff
*skb
, int old_idx
)
251 const struct net_device_context
*ndc
= netdev_priv(ndev
);
252 struct sock
*sk
= skb
->sk
;
255 q_idx
= ndc
->tx_table
[netvsc_get_hash(skb
, ndc
) &
256 (VRSS_SEND_TAB_SIZE
- 1)];
258 /* If queue index changed record the new value */
259 if (q_idx
!= old_idx
&&
260 sk
&& sk_fullsock(sk
) && rcu_access_pointer(sk
->sk_dst_cache
))
261 sk_tx_queue_set(sk
, q_idx
);
267 * Select queue for transmit.
269 * If a valid queue has already been assigned, then use that.
270 * Otherwise compute tx queue based on hash and the send table.
272 * This is basically similar to default (__netdev_pick_tx) with the added step
273 * of using the host send_table when no other queue has been assigned.
275 * TODO support XPS - but get_xps_queue not exported
277 static u16
netvsc_pick_tx(struct net_device
*ndev
, struct sk_buff
*skb
)
279 int q_idx
= sk_tx_queue_get(skb
->sk
);
281 if (q_idx
< 0 || skb
->ooo_okay
|| q_idx
>= ndev
->real_num_tx_queues
) {
282 /* If forwarding a packet, we use the recorded queue when
283 * available for better cache locality.
285 if (skb_rx_queue_recorded(skb
))
286 q_idx
= skb_get_rx_queue(skb
);
288 q_idx
= netvsc_get_tx_queue(ndev
, skb
, q_idx
);
294 static u16
netvsc_select_queue(struct net_device
*ndev
, struct sk_buff
*skb
,
296 select_queue_fallback_t fallback
)
298 struct net_device_context
*ndc
= netdev_priv(ndev
);
299 struct net_device
*vf_netdev
;
303 vf_netdev
= rcu_dereference(ndc
->vf_netdev
);
305 txq
= skb_rx_queue_recorded(skb
) ? skb_get_rx_queue(skb
) : 0;
306 qdisc_skb_cb(skb
)->slave_dev_queue_mapping
= skb
->queue_mapping
;
308 txq
= netvsc_pick_tx(ndev
, skb
);
312 while (unlikely(txq
>= ndev
->real_num_tx_queues
))
313 txq
-= ndev
->real_num_tx_queues
;
318 static u32
fill_pg_buf(struct page
*page
, u32 offset
, u32 len
,
319 struct hv_page_buffer
*pb
)
323 /* Deal with compund pages by ignoring unused part
326 page
+= (offset
>> PAGE_SHIFT
);
327 offset
&= ~PAGE_MASK
;
332 bytes
= PAGE_SIZE
- offset
;
335 pb
[j
].pfn
= page_to_pfn(page
);
336 pb
[j
].offset
= offset
;
342 if (offset
== PAGE_SIZE
&& len
) {
352 static u32
init_page_array(void *hdr
, u32 len
, struct sk_buff
*skb
,
353 struct hv_netvsc_packet
*packet
,
354 struct hv_page_buffer
*pb
)
357 char *data
= skb
->data
;
358 int frags
= skb_shinfo(skb
)->nr_frags
;
361 /* The packet is laid out thus:
362 * 1. hdr: RNDIS header and PPI
364 * 3. skb fragment data
366 slots_used
+= fill_pg_buf(virt_to_page(hdr
),
368 len
, &pb
[slots_used
]);
370 packet
->rmsg_size
= len
;
371 packet
->rmsg_pgcnt
= slots_used
;
373 slots_used
+= fill_pg_buf(virt_to_page(data
),
374 offset_in_page(data
),
375 skb_headlen(skb
), &pb
[slots_used
]);
377 for (i
= 0; i
< frags
; i
++) {
378 skb_frag_t
*frag
= skb_shinfo(skb
)->frags
+ i
;
380 slots_used
+= fill_pg_buf(skb_frag_page(frag
),
382 skb_frag_size(frag
), &pb
[slots_used
]);
387 static int count_skb_frag_slots(struct sk_buff
*skb
)
389 int i
, frags
= skb_shinfo(skb
)->nr_frags
;
392 for (i
= 0; i
< frags
; i
++) {
393 skb_frag_t
*frag
= skb_shinfo(skb
)->frags
+ i
;
394 unsigned long size
= skb_frag_size(frag
);
395 unsigned long offset
= frag
->page_offset
;
397 /* Skip unused frames from start of page */
398 offset
&= ~PAGE_MASK
;
399 pages
+= PFN_UP(offset
+ size
);
404 static int netvsc_get_slots(struct sk_buff
*skb
)
406 char *data
= skb
->data
;
407 unsigned int offset
= offset_in_page(data
);
408 unsigned int len
= skb_headlen(skb
);
412 slots
= DIV_ROUND_UP(offset
+ len
, PAGE_SIZE
);
413 frag_slots
= count_skb_frag_slots(skb
);
414 return slots
+ frag_slots
;
417 static u32
net_checksum_info(struct sk_buff
*skb
)
419 if (skb
->protocol
== htons(ETH_P_IP
)) {
420 struct iphdr
*ip
= ip_hdr(skb
);
422 if (ip
->protocol
== IPPROTO_TCP
)
423 return TRANSPORT_INFO_IPV4_TCP
;
424 else if (ip
->protocol
== IPPROTO_UDP
)
425 return TRANSPORT_INFO_IPV4_UDP
;
427 struct ipv6hdr
*ip6
= ipv6_hdr(skb
);
429 if (ip6
->nexthdr
== IPPROTO_TCP
)
430 return TRANSPORT_INFO_IPV6_TCP
;
431 else if (ip6
->nexthdr
== IPPROTO_UDP
)
432 return TRANSPORT_INFO_IPV6_UDP
;
435 return TRANSPORT_INFO_NOT_IP
;
438 /* Send skb on the slave VF device. */
439 static int netvsc_vf_xmit(struct net_device
*net
, struct net_device
*vf_netdev
,
442 struct net_device_context
*ndev_ctx
= netdev_priv(net
);
443 unsigned int len
= skb
->len
;
446 skb
->dev
= vf_netdev
;
447 skb
->queue_mapping
= qdisc_skb_cb(skb
)->slave_dev_queue_mapping
;
449 rc
= dev_queue_xmit(skb
);
450 if (likely(rc
== NET_XMIT_SUCCESS
|| rc
== NET_XMIT_CN
)) {
451 struct netvsc_vf_pcpu_stats
*pcpu_stats
452 = this_cpu_ptr(ndev_ctx
->vf_stats
);
454 u64_stats_update_begin(&pcpu_stats
->syncp
);
455 pcpu_stats
->tx_packets
++;
456 pcpu_stats
->tx_bytes
+= len
;
457 u64_stats_update_end(&pcpu_stats
->syncp
);
459 this_cpu_inc(ndev_ctx
->vf_stats
->tx_dropped
);
465 static int netvsc_start_xmit(struct sk_buff
*skb
, struct net_device
*net
)
467 struct net_device_context
*net_device_ctx
= netdev_priv(net
);
468 struct hv_netvsc_packet
*packet
= NULL
;
470 unsigned int num_data_pgs
;
471 struct rndis_message
*rndis_msg
;
472 struct rndis_packet
*rndis_pkt
;
473 struct net_device
*vf_netdev
;
475 struct rndis_per_packet_info
*ppi
;
477 struct hv_page_buffer pb
[MAX_PAGE_BUFFER_COUNT
];
479 /* if VF is present and up then redirect packets
480 * already called with rcu_read_lock_bh
482 vf_netdev
= rcu_dereference_bh(net_device_ctx
->vf_netdev
);
483 if (vf_netdev
&& netif_running(vf_netdev
) &&
484 !netpoll_tx_running(net
))
485 return netvsc_vf_xmit(net
, vf_netdev
, skb
);
487 /* We will atmost need two pages to describe the rndis
488 * header. We can only transmit MAX_PAGE_BUFFER_COUNT number
489 * of pages in a single packet. If skb is scattered around
490 * more pages we try linearizing it.
493 num_data_pgs
= netvsc_get_slots(skb
) + 2;
495 if (unlikely(num_data_pgs
> MAX_PAGE_BUFFER_COUNT
)) {
496 ++net_device_ctx
->eth_stats
.tx_scattered
;
498 if (skb_linearize(skb
))
501 num_data_pgs
= netvsc_get_slots(skb
) + 2;
502 if (num_data_pgs
> MAX_PAGE_BUFFER_COUNT
) {
503 ++net_device_ctx
->eth_stats
.tx_too_big
;
509 * Place the rndis header in the skb head room and
510 * the skb->cb will be used for hv_netvsc_packet
513 ret
= skb_cow_head(skb
, RNDIS_AND_PPI_SIZE
);
517 /* Use the skb control buffer for building up the packet */
518 BUILD_BUG_ON(sizeof(struct hv_netvsc_packet
) >
519 FIELD_SIZEOF(struct sk_buff
, cb
));
520 packet
= (struct hv_netvsc_packet
*)skb
->cb
;
522 packet
->q_idx
= skb_get_queue_mapping(skb
);
524 packet
->total_data_buflen
= skb
->len
;
525 packet
->total_bytes
= skb
->len
;
526 packet
->total_packets
= 1;
528 rndis_msg
= (struct rndis_message
*)skb
->head
;
530 memset(rndis_msg
, 0, RNDIS_AND_PPI_SIZE
);
532 /* Add the rndis header */
533 rndis_msg
->ndis_msg_type
= RNDIS_MSG_PACKET
;
534 rndis_msg
->msg_len
= packet
->total_data_buflen
;
535 rndis_pkt
= &rndis_msg
->msg
.pkt
;
536 rndis_pkt
->data_offset
= sizeof(struct rndis_packet
);
537 rndis_pkt
->data_len
= packet
->total_data_buflen
;
538 rndis_pkt
->per_pkt_info_offset
= sizeof(struct rndis_packet
);
540 rndis_msg_size
= RNDIS_MESSAGE_SIZE(struct rndis_packet
);
542 hash
= skb_get_hash_raw(skb
);
543 if (hash
!= 0 && net
->real_num_tx_queues
> 1) {
544 rndis_msg_size
+= NDIS_HASH_PPI_SIZE
;
545 ppi
= init_ppi_data(rndis_msg
, NDIS_HASH_PPI_SIZE
,
547 *(u32
*)((void *)ppi
+ ppi
->ppi_offset
) = hash
;
550 if (skb_vlan_tag_present(skb
)) {
551 struct ndis_pkt_8021q_info
*vlan
;
553 rndis_msg_size
+= NDIS_VLAN_PPI_SIZE
;
554 ppi
= init_ppi_data(rndis_msg
, NDIS_VLAN_PPI_SIZE
,
557 vlan
= (void *)ppi
+ ppi
->ppi_offset
;
558 vlan
->vlanid
= skb
->vlan_tci
& VLAN_VID_MASK
;
559 vlan
->pri
= (skb
->vlan_tci
& VLAN_PRIO_MASK
) >>
563 if (skb_is_gso(skb
)) {
564 struct ndis_tcp_lso_info
*lso_info
;
566 rndis_msg_size
+= NDIS_LSO_PPI_SIZE
;
567 ppi
= init_ppi_data(rndis_msg
, NDIS_LSO_PPI_SIZE
,
568 TCP_LARGESEND_PKTINFO
);
570 lso_info
= (void *)ppi
+ ppi
->ppi_offset
;
572 lso_info
->lso_v2_transmit
.type
= NDIS_TCP_LARGE_SEND_OFFLOAD_V2_TYPE
;
573 if (skb
->protocol
== htons(ETH_P_IP
)) {
574 lso_info
->lso_v2_transmit
.ip_version
=
575 NDIS_TCP_LARGE_SEND_OFFLOAD_IPV4
;
576 ip_hdr(skb
)->tot_len
= 0;
577 ip_hdr(skb
)->check
= 0;
578 tcp_hdr(skb
)->check
=
579 ~csum_tcpudp_magic(ip_hdr(skb
)->saddr
,
580 ip_hdr(skb
)->daddr
, 0, IPPROTO_TCP
, 0);
582 lso_info
->lso_v2_transmit
.ip_version
=
583 NDIS_TCP_LARGE_SEND_OFFLOAD_IPV6
;
584 ipv6_hdr(skb
)->payload_len
= 0;
585 tcp_hdr(skb
)->check
=
586 ~csum_ipv6_magic(&ipv6_hdr(skb
)->saddr
,
587 &ipv6_hdr(skb
)->daddr
, 0, IPPROTO_TCP
, 0);
589 lso_info
->lso_v2_transmit
.tcp_header_offset
= skb_transport_offset(skb
);
590 lso_info
->lso_v2_transmit
.mss
= skb_shinfo(skb
)->gso_size
;
591 } else if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
592 if (net_checksum_info(skb
) & net_device_ctx
->tx_checksum_mask
) {
593 struct ndis_tcp_ip_checksum_info
*csum_info
;
595 rndis_msg_size
+= NDIS_CSUM_PPI_SIZE
;
596 ppi
= init_ppi_data(rndis_msg
, NDIS_CSUM_PPI_SIZE
,
597 TCPIP_CHKSUM_PKTINFO
);
599 csum_info
= (struct ndis_tcp_ip_checksum_info
*)((void *)ppi
+
602 csum_info
->transmit
.tcp_header_offset
= skb_transport_offset(skb
);
604 if (skb
->protocol
== htons(ETH_P_IP
)) {
605 csum_info
->transmit
.is_ipv4
= 1;
607 if (ip_hdr(skb
)->protocol
== IPPROTO_TCP
)
608 csum_info
->transmit
.tcp_checksum
= 1;
610 csum_info
->transmit
.udp_checksum
= 1;
612 csum_info
->transmit
.is_ipv6
= 1;
614 if (ipv6_hdr(skb
)->nexthdr
== IPPROTO_TCP
)
615 csum_info
->transmit
.tcp_checksum
= 1;
617 csum_info
->transmit
.udp_checksum
= 1;
620 /* Can't do offload of this type of checksum */
621 if (skb_checksum_help(skb
))
626 /* Start filling in the page buffers with the rndis hdr */
627 rndis_msg
->msg_len
+= rndis_msg_size
;
628 packet
->total_data_buflen
= rndis_msg
->msg_len
;
629 packet
->page_buf_cnt
= init_page_array(rndis_msg
, rndis_msg_size
,
632 /* timestamp packet in software */
633 skb_tx_timestamp(skb
);
635 ret
= netvsc_send(net_device_ctx
, packet
, rndis_msg
, pb
, skb
);
636 if (likely(ret
== 0))
639 if (ret
== -EAGAIN
) {
640 ++net_device_ctx
->eth_stats
.tx_busy
;
641 return NETDEV_TX_BUSY
;
645 ++net_device_ctx
->eth_stats
.tx_no_space
;
648 dev_kfree_skb_any(skb
);
649 net
->stats
.tx_dropped
++;
654 ++net_device_ctx
->eth_stats
.tx_no_memory
;
659 * netvsc_linkstatus_callback - Link up/down notification
661 void netvsc_linkstatus_callback(struct hv_device
*device_obj
,
662 struct rndis_message
*resp
)
664 struct rndis_indicate_status
*indicate
= &resp
->msg
.indicate_status
;
665 struct net_device
*net
;
666 struct net_device_context
*ndev_ctx
;
667 struct netvsc_reconfig
*event
;
670 net
= hv_get_drvdata(device_obj
);
675 ndev_ctx
= netdev_priv(net
);
677 /* Update the physical link speed when changing to another vSwitch */
678 if (indicate
->status
== RNDIS_STATUS_LINK_SPEED_CHANGE
) {
681 speed
= *(u32
*)((void *)indicate
682 + indicate
->status_buf_offset
) / 10000;
683 ndev_ctx
->speed
= speed
;
687 /* Handle these link change statuses below */
688 if (indicate
->status
!= RNDIS_STATUS_NETWORK_CHANGE
&&
689 indicate
->status
!= RNDIS_STATUS_MEDIA_CONNECT
&&
690 indicate
->status
!= RNDIS_STATUS_MEDIA_DISCONNECT
)
693 if (net
->reg_state
!= NETREG_REGISTERED
)
696 event
= kzalloc(sizeof(*event
), GFP_ATOMIC
);
699 event
->event
= indicate
->status
;
701 spin_lock_irqsave(&ndev_ctx
->lock
, flags
);
702 list_add_tail(&event
->list
, &ndev_ctx
->reconfig_events
);
703 spin_unlock_irqrestore(&ndev_ctx
->lock
, flags
);
705 schedule_delayed_work(&ndev_ctx
->dwork
, 0);
708 static struct sk_buff
*netvsc_alloc_recv_skb(struct net_device
*net
,
709 struct napi_struct
*napi
,
710 const struct ndis_tcp_ip_checksum_info
*csum_info
,
711 const struct ndis_pkt_8021q_info
*vlan
,
712 void *data
, u32 buflen
)
716 skb
= napi_alloc_skb(napi
, buflen
);
721 * Copy to skb. This copy is needed here since the memory pointed by
722 * hv_netvsc_packet cannot be deallocated
724 skb_put_data(skb
, data
, buflen
);
726 skb
->protocol
= eth_type_trans(skb
, net
);
728 /* skb is already created with CHECKSUM_NONE */
729 skb_checksum_none_assert(skb
);
732 * In Linux, the IP checksum is always checked.
733 * Do L4 checksum offload if enabled and present.
735 if (csum_info
&& (net
->features
& NETIF_F_RXCSUM
)) {
736 if (csum_info
->receive
.tcp_checksum_succeeded
||
737 csum_info
->receive
.udp_checksum_succeeded
)
738 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
742 u16 vlan_tci
= vlan
->vlanid
| (vlan
->pri
<< VLAN_PRIO_SHIFT
);
744 __vlan_hwaccel_put_tag(skb
, htons(ETH_P_8021Q
),
752 * netvsc_recv_callback - Callback when we receive a packet from the
753 * "wire" on the specified device.
755 int netvsc_recv_callback(struct net_device
*net
,
756 struct vmbus_channel
*channel
,
758 const struct ndis_tcp_ip_checksum_info
*csum_info
,
759 const struct ndis_pkt_8021q_info
*vlan
)
761 struct net_device_context
*net_device_ctx
= netdev_priv(net
);
762 struct netvsc_device
*net_device
;
763 u16 q_idx
= channel
->offermsg
.offer
.sub_channel_index
;
764 struct netvsc_channel
*nvchan
;
766 struct netvsc_stats
*rx_stats
;
768 if (net
->reg_state
!= NETREG_REGISTERED
)
769 return NVSP_STAT_FAIL
;
772 net_device
= rcu_dereference(net_device_ctx
->nvdev
);
773 if (unlikely(!net_device
))
776 nvchan
= &net_device
->chan_table
[q_idx
];
778 /* Allocate a skb - TODO direct I/O to pages? */
779 skb
= netvsc_alloc_recv_skb(net
, &nvchan
->napi
,
780 csum_info
, vlan
, data
, len
);
781 if (unlikely(!skb
)) {
783 ++net
->stats
.rx_dropped
;
785 return NVSP_STAT_FAIL
;
788 skb_record_rx_queue(skb
, q_idx
);
791 * Even if injecting the packet, record the statistics
792 * on the synthetic device because modifying the VF device
793 * statistics will not work correctly.
795 rx_stats
= &nvchan
->rx_stats
;
796 u64_stats_update_begin(&rx_stats
->syncp
);
798 rx_stats
->bytes
+= len
;
800 if (skb
->pkt_type
== PACKET_BROADCAST
)
801 ++rx_stats
->broadcast
;
802 else if (skb
->pkt_type
== PACKET_MULTICAST
)
803 ++rx_stats
->multicast
;
804 u64_stats_update_end(&rx_stats
->syncp
);
806 napi_gro_receive(&nvchan
->napi
, skb
);
812 static void netvsc_get_drvinfo(struct net_device
*net
,
813 struct ethtool_drvinfo
*info
)
815 strlcpy(info
->driver
, KBUILD_MODNAME
, sizeof(info
->driver
));
816 strlcpy(info
->fw_version
, "N/A", sizeof(info
->fw_version
));
819 static void netvsc_get_channels(struct net_device
*net
,
820 struct ethtool_channels
*channel
)
822 struct net_device_context
*net_device_ctx
= netdev_priv(net
);
823 struct netvsc_device
*nvdev
= rtnl_dereference(net_device_ctx
->nvdev
);
826 channel
->max_combined
= nvdev
->max_chn
;
827 channel
->combined_count
= nvdev
->num_chn
;
831 static int netvsc_set_channels(struct net_device
*net
,
832 struct ethtool_channels
*channels
)
834 struct net_device_context
*net_device_ctx
= netdev_priv(net
);
835 struct hv_device
*dev
= net_device_ctx
->device_ctx
;
836 struct netvsc_device
*nvdev
= rtnl_dereference(net_device_ctx
->nvdev
);
837 unsigned int orig
, count
= channels
->combined_count
;
838 struct netvsc_device_info device_info
;
842 /* We do not support separate count for rx, tx, or other */
844 channels
->rx_count
|| channels
->tx_count
|| channels
->other_count
)
847 if (!nvdev
|| nvdev
->destroy
)
850 if (nvdev
->nvsp_version
< NVSP_PROTOCOL_VERSION_5
)
853 if (count
> nvdev
->max_chn
)
856 orig
= nvdev
->num_chn
;
857 was_opened
= rndis_filter_opened(nvdev
);
859 rndis_filter_close(nvdev
);
861 memset(&device_info
, 0, sizeof(device_info
));
862 device_info
.num_chn
= count
;
863 device_info
.ring_size
= ring_size
;
864 device_info
.send_sections
= nvdev
->send_section_cnt
;
865 device_info
.send_section_size
= nvdev
->send_section_size
;
866 device_info
.recv_sections
= nvdev
->recv_section_cnt
;
867 device_info
.recv_section_size
= nvdev
->recv_section_size
;
869 rndis_filter_device_remove(dev
, nvdev
);
871 nvdev
= rndis_filter_device_add(dev
, &device_info
);
873 ret
= PTR_ERR(nvdev
);
874 device_info
.num_chn
= orig
;
875 nvdev
= rndis_filter_device_add(dev
, &device_info
);
878 netdev_err(net
, "restoring channel setting failed: %ld\n",
885 rndis_filter_open(nvdev
);
887 /* We may have missed link change notifications */
888 net_device_ctx
->last_reconfig
= 0;
889 schedule_delayed_work(&net_device_ctx
->dwork
, 0);
895 netvsc_validate_ethtool_ss_cmd(const struct ethtool_link_ksettings
*cmd
)
897 struct ethtool_link_ksettings diff1
= *cmd
;
898 struct ethtool_link_ksettings diff2
= {};
900 diff1
.base
.speed
= 0;
901 diff1
.base
.duplex
= 0;
902 /* advertising and cmd are usually set */
903 ethtool_link_ksettings_zero_link_mode(&diff1
, advertising
);
905 /* We set port to PORT_OTHER */
906 diff2
.base
.port
= PORT_OTHER
;
908 return !memcmp(&diff1
, &diff2
, sizeof(diff1
));
911 static void netvsc_init_settings(struct net_device
*dev
)
913 struct net_device_context
*ndc
= netdev_priv(dev
);
915 ndc
->l4_hash
= HV_DEFAULT_L4HASH
;
917 ndc
->speed
= SPEED_UNKNOWN
;
918 ndc
->duplex
= DUPLEX_FULL
;
921 static int netvsc_get_link_ksettings(struct net_device
*dev
,
922 struct ethtool_link_ksettings
*cmd
)
924 struct net_device_context
*ndc
= netdev_priv(dev
);
926 cmd
->base
.speed
= ndc
->speed
;
927 cmd
->base
.duplex
= ndc
->duplex
;
928 cmd
->base
.port
= PORT_OTHER
;
933 static int netvsc_set_link_ksettings(struct net_device
*dev
,
934 const struct ethtool_link_ksettings
*cmd
)
936 struct net_device_context
*ndc
= netdev_priv(dev
);
939 speed
= cmd
->base
.speed
;
940 if (!ethtool_validate_speed(speed
) ||
941 !ethtool_validate_duplex(cmd
->base
.duplex
) ||
942 !netvsc_validate_ethtool_ss_cmd(cmd
))
946 ndc
->duplex
= cmd
->base
.duplex
;
951 static int netvsc_change_mtu(struct net_device
*ndev
, int mtu
)
953 struct net_device_context
*ndevctx
= netdev_priv(ndev
);
954 struct net_device
*vf_netdev
= rtnl_dereference(ndevctx
->vf_netdev
);
955 struct netvsc_device
*nvdev
= rtnl_dereference(ndevctx
->nvdev
);
956 struct hv_device
*hdev
= ndevctx
->device_ctx
;
957 int orig_mtu
= ndev
->mtu
;
958 struct netvsc_device_info device_info
;
962 if (!nvdev
|| nvdev
->destroy
)
965 /* Change MTU of underlying VF netdev first. */
967 ret
= dev_set_mtu(vf_netdev
, mtu
);
972 netif_device_detach(ndev
);
973 was_opened
= rndis_filter_opened(nvdev
);
975 rndis_filter_close(nvdev
);
977 memset(&device_info
, 0, sizeof(device_info
));
978 device_info
.ring_size
= ring_size
;
979 device_info
.num_chn
= nvdev
->num_chn
;
980 device_info
.send_sections
= nvdev
->send_section_cnt
;
981 device_info
.send_section_size
= nvdev
->send_section_size
;
982 device_info
.recv_sections
= nvdev
->recv_section_cnt
;
983 device_info
.recv_section_size
= nvdev
->recv_section_size
;
985 rndis_filter_device_remove(hdev
, nvdev
);
989 nvdev
= rndis_filter_device_add(hdev
, &device_info
);
991 ret
= PTR_ERR(nvdev
);
993 /* Attempt rollback to original MTU */
994 ndev
->mtu
= orig_mtu
;
995 nvdev
= rndis_filter_device_add(hdev
, &device_info
);
998 dev_set_mtu(vf_netdev
, orig_mtu
);
1000 if (IS_ERR(nvdev
)) {
1001 netdev_err(ndev
, "restoring mtu failed: %ld\n",
1008 rndis_filter_open(nvdev
);
1010 netif_device_attach(ndev
);
1012 /* We may have missed link change notifications */
1013 schedule_delayed_work(&ndevctx
->dwork
, 0);
1018 static void netvsc_get_vf_stats(struct net_device
*net
,
1019 struct netvsc_vf_pcpu_stats
*tot
)
1021 struct net_device_context
*ndev_ctx
= netdev_priv(net
);
1024 memset(tot
, 0, sizeof(*tot
));
1026 for_each_possible_cpu(i
) {
1027 const struct netvsc_vf_pcpu_stats
*stats
1028 = per_cpu_ptr(ndev_ctx
->vf_stats
, i
);
1029 u64 rx_packets
, rx_bytes
, tx_packets
, tx_bytes
;
1033 start
= u64_stats_fetch_begin_irq(&stats
->syncp
);
1034 rx_packets
= stats
->rx_packets
;
1035 tx_packets
= stats
->tx_packets
;
1036 rx_bytes
= stats
->rx_bytes
;
1037 tx_bytes
= stats
->tx_bytes
;
1038 } while (u64_stats_fetch_retry_irq(&stats
->syncp
, start
));
1040 tot
->rx_packets
+= rx_packets
;
1041 tot
->tx_packets
+= tx_packets
;
1042 tot
->rx_bytes
+= rx_bytes
;
1043 tot
->tx_bytes
+= tx_bytes
;
1044 tot
->tx_dropped
+= stats
->tx_dropped
;
1048 static void netvsc_get_stats64(struct net_device
*net
,
1049 struct rtnl_link_stats64
*t
)
1051 struct net_device_context
*ndev_ctx
= netdev_priv(net
);
1052 struct netvsc_device
*nvdev
= rcu_dereference_rtnl(ndev_ctx
->nvdev
);
1053 struct netvsc_vf_pcpu_stats vf_tot
;
1059 netdev_stats_to_stats64(t
, &net
->stats
);
1061 netvsc_get_vf_stats(net
, &vf_tot
);
1062 t
->rx_packets
+= vf_tot
.rx_packets
;
1063 t
->tx_packets
+= vf_tot
.tx_packets
;
1064 t
->rx_bytes
+= vf_tot
.rx_bytes
;
1065 t
->tx_bytes
+= vf_tot
.tx_bytes
;
1066 t
->tx_dropped
+= vf_tot
.tx_dropped
;
1068 for (i
= 0; i
< nvdev
->num_chn
; i
++) {
1069 const struct netvsc_channel
*nvchan
= &nvdev
->chan_table
[i
];
1070 const struct netvsc_stats
*stats
;
1071 u64 packets
, bytes
, multicast
;
1074 stats
= &nvchan
->tx_stats
;
1076 start
= u64_stats_fetch_begin_irq(&stats
->syncp
);
1077 packets
= stats
->packets
;
1078 bytes
= stats
->bytes
;
1079 } while (u64_stats_fetch_retry_irq(&stats
->syncp
, start
));
1081 t
->tx_bytes
+= bytes
;
1082 t
->tx_packets
+= packets
;
1084 stats
= &nvchan
->rx_stats
;
1086 start
= u64_stats_fetch_begin_irq(&stats
->syncp
);
1087 packets
= stats
->packets
;
1088 bytes
= stats
->bytes
;
1089 multicast
= stats
->multicast
+ stats
->broadcast
;
1090 } while (u64_stats_fetch_retry_irq(&stats
->syncp
, start
));
1092 t
->rx_bytes
+= bytes
;
1093 t
->rx_packets
+= packets
;
1094 t
->multicast
+= multicast
;
1098 static int netvsc_set_mac_addr(struct net_device
*ndev
, void *p
)
1100 struct net_device_context
*ndc
= netdev_priv(ndev
);
1101 struct net_device
*vf_netdev
= rtnl_dereference(ndc
->vf_netdev
);
1102 struct netvsc_device
*nvdev
= rtnl_dereference(ndc
->nvdev
);
1103 struct sockaddr
*addr
= p
;
1106 err
= eth_prepare_mac_addr_change(ndev
, p
);
1114 err
= dev_set_mac_address(vf_netdev
, addr
);
1119 err
= rndis_filter_set_device_mac(nvdev
, addr
->sa_data
);
1121 eth_commit_mac_addr_change(ndev
, p
);
1122 } else if (vf_netdev
) {
1123 /* rollback change on VF */
1124 memcpy(addr
->sa_data
, ndev
->dev_addr
, ETH_ALEN
);
1125 dev_set_mac_address(vf_netdev
, addr
);
1131 static const struct {
1132 char name
[ETH_GSTRING_LEN
];
1134 } netvsc_stats
[] = {
1135 { "tx_scattered", offsetof(struct netvsc_ethtool_stats
, tx_scattered
) },
1136 { "tx_no_memory", offsetof(struct netvsc_ethtool_stats
, tx_no_memory
) },
1137 { "tx_no_space", offsetof(struct netvsc_ethtool_stats
, tx_no_space
) },
1138 { "tx_too_big", offsetof(struct netvsc_ethtool_stats
, tx_too_big
) },
1139 { "tx_busy", offsetof(struct netvsc_ethtool_stats
, tx_busy
) },
1140 { "tx_send_full", offsetof(struct netvsc_ethtool_stats
, tx_send_full
) },
1141 { "rx_comp_busy", offsetof(struct netvsc_ethtool_stats
, rx_comp_busy
) },
1142 { "stop_queue", offsetof(struct netvsc_ethtool_stats
, stop_queue
) },
1143 { "wake_queue", offsetof(struct netvsc_ethtool_stats
, wake_queue
) },
1145 { "vf_rx_packets", offsetof(struct netvsc_vf_pcpu_stats
, rx_packets
) },
1146 { "vf_rx_bytes", offsetof(struct netvsc_vf_pcpu_stats
, rx_bytes
) },
1147 { "vf_tx_packets", offsetof(struct netvsc_vf_pcpu_stats
, tx_packets
) },
1148 { "vf_tx_bytes", offsetof(struct netvsc_vf_pcpu_stats
, tx_bytes
) },
1149 { "vf_tx_dropped", offsetof(struct netvsc_vf_pcpu_stats
, tx_dropped
) },
1152 #define NETVSC_GLOBAL_STATS_LEN ARRAY_SIZE(netvsc_stats)
1153 #define NETVSC_VF_STATS_LEN ARRAY_SIZE(vf_stats)
1155 /* 4 statistics per queue (rx/tx packets/bytes) */
1156 #define NETVSC_QUEUE_STATS_LEN(dev) ((dev)->num_chn * 4)
1158 static int netvsc_get_sset_count(struct net_device
*dev
, int string_set
)
1160 struct net_device_context
*ndc
= netdev_priv(dev
);
1161 struct netvsc_device
*nvdev
= rtnl_dereference(ndc
->nvdev
);
1166 switch (string_set
) {
1168 return NETVSC_GLOBAL_STATS_LEN
1169 + NETVSC_VF_STATS_LEN
1170 + NETVSC_QUEUE_STATS_LEN(nvdev
);
1176 static void netvsc_get_ethtool_stats(struct net_device
*dev
,
1177 struct ethtool_stats
*stats
, u64
*data
)
1179 struct net_device_context
*ndc
= netdev_priv(dev
);
1180 struct netvsc_device
*nvdev
= rtnl_dereference(ndc
->nvdev
);
1181 const void *nds
= &ndc
->eth_stats
;
1182 const struct netvsc_stats
*qstats
;
1183 struct netvsc_vf_pcpu_stats sum
;
1191 for (i
= 0; i
< NETVSC_GLOBAL_STATS_LEN
; i
++)
1192 data
[i
] = *(unsigned long *)(nds
+ netvsc_stats
[i
].offset
);
1194 netvsc_get_vf_stats(dev
, &sum
);
1195 for (j
= 0; j
< NETVSC_VF_STATS_LEN
; j
++)
1196 data
[i
++] = *(u64
*)((void *)&sum
+ vf_stats
[j
].offset
);
1198 for (j
= 0; j
< nvdev
->num_chn
; j
++) {
1199 qstats
= &nvdev
->chan_table
[j
].tx_stats
;
1202 start
= u64_stats_fetch_begin_irq(&qstats
->syncp
);
1203 packets
= qstats
->packets
;
1204 bytes
= qstats
->bytes
;
1205 } while (u64_stats_fetch_retry_irq(&qstats
->syncp
, start
));
1206 data
[i
++] = packets
;
1209 qstats
= &nvdev
->chan_table
[j
].rx_stats
;
1211 start
= u64_stats_fetch_begin_irq(&qstats
->syncp
);
1212 packets
= qstats
->packets
;
1213 bytes
= qstats
->bytes
;
1214 } while (u64_stats_fetch_retry_irq(&qstats
->syncp
, start
));
1215 data
[i
++] = packets
;
1220 static void netvsc_get_strings(struct net_device
*dev
, u32 stringset
, u8
*data
)
1222 struct net_device_context
*ndc
= netdev_priv(dev
);
1223 struct netvsc_device
*nvdev
= rtnl_dereference(ndc
->nvdev
);
1230 switch (stringset
) {
1232 for (i
= 0; i
< ARRAY_SIZE(netvsc_stats
); i
++) {
1233 memcpy(p
, netvsc_stats
[i
].name
, ETH_GSTRING_LEN
);
1234 p
+= ETH_GSTRING_LEN
;
1237 for (i
= 0; i
< ARRAY_SIZE(vf_stats
); i
++) {
1238 memcpy(p
, vf_stats
[i
].name
, ETH_GSTRING_LEN
);
1239 p
+= ETH_GSTRING_LEN
;
1242 for (i
= 0; i
< nvdev
->num_chn
; i
++) {
1243 sprintf(p
, "tx_queue_%u_packets", i
);
1244 p
+= ETH_GSTRING_LEN
;
1245 sprintf(p
, "tx_queue_%u_bytes", i
);
1246 p
+= ETH_GSTRING_LEN
;
1247 sprintf(p
, "rx_queue_%u_packets", i
);
1248 p
+= ETH_GSTRING_LEN
;
1249 sprintf(p
, "rx_queue_%u_bytes", i
);
1250 p
+= ETH_GSTRING_LEN
;
1258 netvsc_get_rss_hash_opts(struct net_device_context
*ndc
,
1259 struct ethtool_rxnfc
*info
)
1261 const u32 l4_flag
= RXH_L4_B_0_1
| RXH_L4_B_2_3
;
1263 info
->data
= RXH_IP_SRC
| RXH_IP_DST
;
1265 switch (info
->flow_type
) {
1267 if (ndc
->l4_hash
& HV_TCP4_L4HASH
)
1268 info
->data
|= l4_flag
;
1273 if (ndc
->l4_hash
& HV_TCP6_L4HASH
)
1274 info
->data
|= l4_flag
;
1279 if (ndc
->l4_hash
& HV_UDP4_L4HASH
)
1280 info
->data
|= l4_flag
;
1285 if (ndc
->l4_hash
& HV_UDP6_L4HASH
)
1286 info
->data
|= l4_flag
;
1302 netvsc_get_rxnfc(struct net_device
*dev
, struct ethtool_rxnfc
*info
,
1305 struct net_device_context
*ndc
= netdev_priv(dev
);
1306 struct netvsc_device
*nvdev
= rtnl_dereference(ndc
->nvdev
);
1311 switch (info
->cmd
) {
1312 case ETHTOOL_GRXRINGS
:
1313 info
->data
= nvdev
->num_chn
;
1317 return netvsc_get_rss_hash_opts(ndc
, info
);
1322 static int netvsc_set_rss_hash_opts(struct net_device_context
*ndc
,
1323 struct ethtool_rxnfc
*info
)
1325 if (info
->data
== (RXH_IP_SRC
| RXH_IP_DST
|
1326 RXH_L4_B_0_1
| RXH_L4_B_2_3
)) {
1327 switch (info
->flow_type
) {
1329 ndc
->l4_hash
|= HV_TCP4_L4HASH
;
1333 ndc
->l4_hash
|= HV_TCP6_L4HASH
;
1337 ndc
->l4_hash
|= HV_UDP4_L4HASH
;
1341 ndc
->l4_hash
|= HV_UDP6_L4HASH
;
1351 if (info
->data
== (RXH_IP_SRC
| RXH_IP_DST
)) {
1352 switch (info
->flow_type
) {
1354 ndc
->l4_hash
&= ~HV_TCP4_L4HASH
;
1358 ndc
->l4_hash
&= ~HV_TCP6_L4HASH
;
1362 ndc
->l4_hash
&= ~HV_UDP4_L4HASH
;
1366 ndc
->l4_hash
&= ~HV_UDP6_L4HASH
;
1380 netvsc_set_rxnfc(struct net_device
*ndev
, struct ethtool_rxnfc
*info
)
1382 struct net_device_context
*ndc
= netdev_priv(ndev
);
1384 if (info
->cmd
== ETHTOOL_SRXFH
)
1385 return netvsc_set_rss_hash_opts(ndc
, info
);
1390 #ifdef CONFIG_NET_POLL_CONTROLLER
1391 static void netvsc_poll_controller(struct net_device
*dev
)
1393 struct net_device_context
*ndc
= netdev_priv(dev
);
1394 struct netvsc_device
*ndev
;
1398 ndev
= rcu_dereference(ndc
->nvdev
);
1400 for (i
= 0; i
< ndev
->num_chn
; i
++) {
1401 struct netvsc_channel
*nvchan
= &ndev
->chan_table
[i
];
1403 napi_schedule(&nvchan
->napi
);
1410 static u32
netvsc_get_rxfh_key_size(struct net_device
*dev
)
1412 return NETVSC_HASH_KEYLEN
;
1415 static u32
netvsc_rss_indir_size(struct net_device
*dev
)
1420 static int netvsc_get_rxfh(struct net_device
*dev
, u32
*indir
, u8
*key
,
1423 struct net_device_context
*ndc
= netdev_priv(dev
);
1424 struct netvsc_device
*ndev
= rtnl_dereference(ndc
->nvdev
);
1425 struct rndis_device
*rndis_dev
;
1432 *hfunc
= ETH_RSS_HASH_TOP
; /* Toeplitz */
1434 rndis_dev
= ndev
->extension
;
1436 for (i
= 0; i
< ITAB_NUM
; i
++)
1437 indir
[i
] = rndis_dev
->rx_table
[i
];
1441 memcpy(key
, rndis_dev
->rss_key
, NETVSC_HASH_KEYLEN
);
1446 static int netvsc_set_rxfh(struct net_device
*dev
, const u32
*indir
,
1447 const u8
*key
, const u8 hfunc
)
1449 struct net_device_context
*ndc
= netdev_priv(dev
);
1450 struct netvsc_device
*ndev
= rtnl_dereference(ndc
->nvdev
);
1451 struct rndis_device
*rndis_dev
;
1457 if (hfunc
!= ETH_RSS_HASH_NO_CHANGE
&& hfunc
!= ETH_RSS_HASH_TOP
)
1460 rndis_dev
= ndev
->extension
;
1462 for (i
= 0; i
< ITAB_NUM
; i
++)
1463 if (indir
[i
] >= ndev
->num_chn
)
1466 for (i
= 0; i
< ITAB_NUM
; i
++)
1467 rndis_dev
->rx_table
[i
] = indir
[i
];
1474 key
= rndis_dev
->rss_key
;
1477 return rndis_filter_set_rss_param(rndis_dev
, key
);
1480 /* Hyper-V RNDIS protocol does not have ring in the HW sense.
1481 * It does have pre-allocated receive area which is divided into sections.
1483 static void __netvsc_get_ringparam(struct netvsc_device
*nvdev
,
1484 struct ethtool_ringparam
*ring
)
1488 ring
->rx_pending
= nvdev
->recv_section_cnt
;
1489 ring
->tx_pending
= nvdev
->send_section_cnt
;
1491 if (nvdev
->nvsp_version
<= NVSP_PROTOCOL_VERSION_2
)
1492 max_buf_size
= NETVSC_RECEIVE_BUFFER_SIZE_LEGACY
;
1494 max_buf_size
= NETVSC_RECEIVE_BUFFER_SIZE
;
1496 ring
->rx_max_pending
= max_buf_size
/ nvdev
->recv_section_size
;
1497 ring
->tx_max_pending
= NETVSC_SEND_BUFFER_SIZE
1498 / nvdev
->send_section_size
;
1501 static void netvsc_get_ringparam(struct net_device
*ndev
,
1502 struct ethtool_ringparam
*ring
)
1504 struct net_device_context
*ndevctx
= netdev_priv(ndev
);
1505 struct netvsc_device
*nvdev
= rtnl_dereference(ndevctx
->nvdev
);
1510 __netvsc_get_ringparam(nvdev
, ring
);
1513 static int netvsc_set_ringparam(struct net_device
*ndev
,
1514 struct ethtool_ringparam
*ring
)
1516 struct net_device_context
*ndevctx
= netdev_priv(ndev
);
1517 struct netvsc_device
*nvdev
= rtnl_dereference(ndevctx
->nvdev
);
1518 struct hv_device
*hdev
= ndevctx
->device_ctx
;
1519 struct netvsc_device_info device_info
;
1520 struct ethtool_ringparam orig
;
1525 if (!nvdev
|| nvdev
->destroy
)
1528 memset(&orig
, 0, sizeof(orig
));
1529 __netvsc_get_ringparam(nvdev
, &orig
);
1531 new_tx
= clamp_t(u32
, ring
->tx_pending
,
1532 NETVSC_MIN_TX_SECTIONS
, orig
.tx_max_pending
);
1533 new_rx
= clamp_t(u32
, ring
->rx_pending
,
1534 NETVSC_MIN_RX_SECTIONS
, orig
.rx_max_pending
);
1536 if (new_tx
== orig
.tx_pending
&&
1537 new_rx
== orig
.rx_pending
)
1538 return 0; /* no change */
1540 memset(&device_info
, 0, sizeof(device_info
));
1541 device_info
.num_chn
= nvdev
->num_chn
;
1542 device_info
.ring_size
= ring_size
;
1543 device_info
.send_sections
= new_tx
;
1544 device_info
.send_section_size
= nvdev
->send_section_size
;
1545 device_info
.recv_sections
= new_rx
;
1546 device_info
.recv_section_size
= nvdev
->recv_section_size
;
1548 netif_device_detach(ndev
);
1549 was_opened
= rndis_filter_opened(nvdev
);
1551 rndis_filter_close(nvdev
);
1553 rndis_filter_device_remove(hdev
, nvdev
);
1555 nvdev
= rndis_filter_device_add(hdev
, &device_info
);
1556 if (IS_ERR(nvdev
)) {
1557 ret
= PTR_ERR(nvdev
);
1559 device_info
.send_sections
= orig
.tx_pending
;
1560 device_info
.recv_sections
= orig
.rx_pending
;
1561 nvdev
= rndis_filter_device_add(hdev
, &device_info
);
1562 if (IS_ERR(nvdev
)) {
1563 netdev_err(ndev
, "restoring ringparam failed: %ld\n",
1570 rndis_filter_open(nvdev
);
1571 netif_device_attach(ndev
);
1573 /* We may have missed link change notifications */
1574 ndevctx
->last_reconfig
= 0;
1575 schedule_delayed_work(&ndevctx
->dwork
, 0);
1580 static const struct ethtool_ops ethtool_ops
= {
1581 .get_drvinfo
= netvsc_get_drvinfo
,
1582 .get_link
= ethtool_op_get_link
,
1583 .get_ethtool_stats
= netvsc_get_ethtool_stats
,
1584 .get_sset_count
= netvsc_get_sset_count
,
1585 .get_strings
= netvsc_get_strings
,
1586 .get_channels
= netvsc_get_channels
,
1587 .set_channels
= netvsc_set_channels
,
1588 .get_ts_info
= ethtool_op_get_ts_info
,
1589 .get_rxnfc
= netvsc_get_rxnfc
,
1590 .set_rxnfc
= netvsc_set_rxnfc
,
1591 .get_rxfh_key_size
= netvsc_get_rxfh_key_size
,
1592 .get_rxfh_indir_size
= netvsc_rss_indir_size
,
1593 .get_rxfh
= netvsc_get_rxfh
,
1594 .set_rxfh
= netvsc_set_rxfh
,
1595 .get_link_ksettings
= netvsc_get_link_ksettings
,
1596 .set_link_ksettings
= netvsc_set_link_ksettings
,
1597 .get_ringparam
= netvsc_get_ringparam
,
1598 .set_ringparam
= netvsc_set_ringparam
,
1601 static const struct net_device_ops device_ops
= {
1602 .ndo_open
= netvsc_open
,
1603 .ndo_stop
= netvsc_close
,
1604 .ndo_start_xmit
= netvsc_start_xmit
,
1605 .ndo_set_rx_mode
= netvsc_set_multicast_list
,
1606 .ndo_change_mtu
= netvsc_change_mtu
,
1607 .ndo_validate_addr
= eth_validate_addr
,
1608 .ndo_set_mac_address
= netvsc_set_mac_addr
,
1609 .ndo_select_queue
= netvsc_select_queue
,
1610 .ndo_get_stats64
= netvsc_get_stats64
,
1611 #ifdef CONFIG_NET_POLL_CONTROLLER
1612 .ndo_poll_controller
= netvsc_poll_controller
,
1617 * Handle link status changes. For RNDIS_STATUS_NETWORK_CHANGE emulate link
1618 * down/up sequence. In case of RNDIS_STATUS_MEDIA_CONNECT when carrier is
1619 * present send GARP packet to network peers with netif_notify_peers().
1621 static void netvsc_link_change(struct work_struct
*w
)
1623 struct net_device_context
*ndev_ctx
=
1624 container_of(w
, struct net_device_context
, dwork
.work
);
1625 struct hv_device
*device_obj
= ndev_ctx
->device_ctx
;
1626 struct net_device
*net
= hv_get_drvdata(device_obj
);
1627 struct netvsc_device
*net_device
;
1628 struct rndis_device
*rdev
;
1629 struct netvsc_reconfig
*event
= NULL
;
1630 bool notify
= false, reschedule
= false;
1631 unsigned long flags
, next_reconfig
, delay
;
1633 /* if changes are happening, comeback later */
1634 if (!rtnl_trylock()) {
1635 schedule_delayed_work(&ndev_ctx
->dwork
, LINKCHANGE_INT
);
1639 net_device
= rtnl_dereference(ndev_ctx
->nvdev
);
1643 rdev
= net_device
->extension
;
1645 next_reconfig
= ndev_ctx
->last_reconfig
+ LINKCHANGE_INT
;
1646 if (time_is_after_jiffies(next_reconfig
)) {
1647 /* link_watch only sends one notification with current state
1648 * per second, avoid doing reconfig more frequently. Handle
1651 delay
= next_reconfig
- jiffies
;
1652 delay
= delay
< LINKCHANGE_INT
? delay
: LINKCHANGE_INT
;
1653 schedule_delayed_work(&ndev_ctx
->dwork
, delay
);
1656 ndev_ctx
->last_reconfig
= jiffies
;
1658 spin_lock_irqsave(&ndev_ctx
->lock
, flags
);
1659 if (!list_empty(&ndev_ctx
->reconfig_events
)) {
1660 event
= list_first_entry(&ndev_ctx
->reconfig_events
,
1661 struct netvsc_reconfig
, list
);
1662 list_del(&event
->list
);
1663 reschedule
= !list_empty(&ndev_ctx
->reconfig_events
);
1665 spin_unlock_irqrestore(&ndev_ctx
->lock
, flags
);
1670 switch (event
->event
) {
1671 /* Only the following events are possible due to the check in
1672 * netvsc_linkstatus_callback()
1674 case RNDIS_STATUS_MEDIA_CONNECT
:
1675 if (rdev
->link_state
) {
1676 rdev
->link_state
= false;
1677 netif_carrier_on(net
);
1678 netif_tx_wake_all_queues(net
);
1684 case RNDIS_STATUS_MEDIA_DISCONNECT
:
1685 if (!rdev
->link_state
) {
1686 rdev
->link_state
= true;
1687 netif_carrier_off(net
);
1688 netif_tx_stop_all_queues(net
);
1692 case RNDIS_STATUS_NETWORK_CHANGE
:
1693 /* Only makes sense if carrier is present */
1694 if (!rdev
->link_state
) {
1695 rdev
->link_state
= true;
1696 netif_carrier_off(net
);
1697 netif_tx_stop_all_queues(net
);
1698 event
->event
= RNDIS_STATUS_MEDIA_CONNECT
;
1699 spin_lock_irqsave(&ndev_ctx
->lock
, flags
);
1700 list_add(&event
->list
, &ndev_ctx
->reconfig_events
);
1701 spin_unlock_irqrestore(&ndev_ctx
->lock
, flags
);
1710 netdev_notify_peers(net
);
1712 /* link_watch only sends one notification with current state per
1713 * second, handle next reconfig event in 2 seconds.
1716 schedule_delayed_work(&ndev_ctx
->dwork
, LINKCHANGE_INT
);
1724 static struct net_device
*get_netvsc_bymac(const u8
*mac
)
1726 struct net_device
*dev
;
1730 for_each_netdev(&init_net
, dev
) {
1731 if (dev
->netdev_ops
!= &device_ops
)
1732 continue; /* not a netvsc device */
1734 if (ether_addr_equal(mac
, dev
->perm_addr
))
1741 static struct net_device
*get_netvsc_byref(struct net_device
*vf_netdev
)
1743 struct net_device
*dev
;
1747 for_each_netdev(&init_net
, dev
) {
1748 struct net_device_context
*net_device_ctx
;
1750 if (dev
->netdev_ops
!= &device_ops
)
1751 continue; /* not a netvsc device */
1753 net_device_ctx
= netdev_priv(dev
);
1754 if (!rtnl_dereference(net_device_ctx
->nvdev
))
1755 continue; /* device is removed */
1757 if (rtnl_dereference(net_device_ctx
->vf_netdev
) == vf_netdev
)
1758 return dev
; /* a match */
1764 /* Called when VF is injecting data into network stack.
1765 * Change the associated network device from VF to netvsc.
1766 * note: already called with rcu_read_lock
1768 static rx_handler_result_t
netvsc_vf_handle_frame(struct sk_buff
**pskb
)
1770 struct sk_buff
*skb
= *pskb
;
1771 struct net_device
*ndev
= rcu_dereference(skb
->dev
->rx_handler_data
);
1772 struct net_device_context
*ndev_ctx
= netdev_priv(ndev
);
1773 struct netvsc_vf_pcpu_stats
*pcpu_stats
1774 = this_cpu_ptr(ndev_ctx
->vf_stats
);
1778 u64_stats_update_begin(&pcpu_stats
->syncp
);
1779 pcpu_stats
->rx_packets
++;
1780 pcpu_stats
->rx_bytes
+= skb
->len
;
1781 u64_stats_update_end(&pcpu_stats
->syncp
);
1783 return RX_HANDLER_ANOTHER
;
1786 static int netvsc_vf_join(struct net_device
*vf_netdev
,
1787 struct net_device
*ndev
)
1789 struct net_device_context
*ndev_ctx
= netdev_priv(ndev
);
1792 ret
= netdev_rx_handler_register(vf_netdev
,
1793 netvsc_vf_handle_frame
, ndev
);
1795 netdev_err(vf_netdev
,
1796 "can not register netvsc VF receive handler (err = %d)\n",
1798 goto rx_handler_failed
;
1801 ret
= netdev_upper_dev_link(vf_netdev
, ndev
, NULL
);
1803 netdev_err(vf_netdev
,
1804 "can not set master device %s (err = %d)\n",
1806 goto upper_link_failed
;
1809 /* set slave flag before open to prevent IPv6 addrconf */
1810 vf_netdev
->flags
|= IFF_SLAVE
;
1812 schedule_delayed_work(&ndev_ctx
->vf_takeover
, VF_TAKEOVER_INT
);
1814 call_netdevice_notifiers(NETDEV_JOIN
, vf_netdev
);
1816 netdev_info(vf_netdev
, "joined to %s\n", ndev
->name
);
1820 netdev_rx_handler_unregister(vf_netdev
);
1825 static void __netvsc_vf_setup(struct net_device
*ndev
,
1826 struct net_device
*vf_netdev
)
1830 /* Align MTU of VF with master */
1831 ret
= dev_set_mtu(vf_netdev
, ndev
->mtu
);
1833 netdev_warn(vf_netdev
,
1834 "unable to change mtu to %u\n", ndev
->mtu
);
1836 if (netif_running(ndev
)) {
1837 ret
= dev_open(vf_netdev
);
1839 netdev_warn(vf_netdev
,
1840 "unable to open: %d\n", ret
);
1844 /* Setup VF as slave of the synthetic device.
1845 * Runs in workqueue to avoid recursion in netlink callbacks.
1847 static void netvsc_vf_setup(struct work_struct
*w
)
1849 struct net_device_context
*ndev_ctx
1850 = container_of(w
, struct net_device_context
, vf_takeover
.work
);
1851 struct net_device
*ndev
= hv_get_drvdata(ndev_ctx
->device_ctx
);
1852 struct net_device
*vf_netdev
;
1854 if (!rtnl_trylock()) {
1855 schedule_delayed_work(&ndev_ctx
->vf_takeover
, 0);
1859 vf_netdev
= rtnl_dereference(ndev_ctx
->vf_netdev
);
1861 __netvsc_vf_setup(ndev
, vf_netdev
);
1866 static int netvsc_register_vf(struct net_device
*vf_netdev
)
1868 struct net_device
*ndev
;
1869 struct net_device_context
*net_device_ctx
;
1870 struct netvsc_device
*netvsc_dev
;
1872 if (vf_netdev
->addr_len
!= ETH_ALEN
)
1876 * We will use the MAC address to locate the synthetic interface to
1877 * associate with the VF interface. If we don't find a matching
1878 * synthetic interface, move on.
1880 ndev
= get_netvsc_bymac(vf_netdev
->perm_addr
);
1884 net_device_ctx
= netdev_priv(ndev
);
1885 netvsc_dev
= rtnl_dereference(net_device_ctx
->nvdev
);
1886 if (!netvsc_dev
|| rtnl_dereference(net_device_ctx
->vf_netdev
))
1889 if (netvsc_vf_join(vf_netdev
, ndev
) != 0)
1892 netdev_info(ndev
, "VF registering: %s\n", vf_netdev
->name
);
1894 dev_hold(vf_netdev
);
1895 rcu_assign_pointer(net_device_ctx
->vf_netdev
, vf_netdev
);
1899 /* VF up/down change detected, schedule to change data path */
1900 static int netvsc_vf_changed(struct net_device
*vf_netdev
)
1902 struct net_device_context
*net_device_ctx
;
1903 struct netvsc_device
*netvsc_dev
;
1904 struct net_device
*ndev
;
1905 bool vf_is_up
= netif_running(vf_netdev
);
1907 ndev
= get_netvsc_byref(vf_netdev
);
1911 net_device_ctx
= netdev_priv(ndev
);
1912 netvsc_dev
= rtnl_dereference(net_device_ctx
->nvdev
);
1916 netvsc_switch_datapath(ndev
, vf_is_up
);
1917 netdev_info(ndev
, "Data path switched %s VF: %s\n",
1918 vf_is_up
? "to" : "from", vf_netdev
->name
);
1923 static int netvsc_unregister_vf(struct net_device
*vf_netdev
)
1925 struct net_device
*ndev
;
1926 struct net_device_context
*net_device_ctx
;
1928 ndev
= get_netvsc_byref(vf_netdev
);
1932 net_device_ctx
= netdev_priv(ndev
);
1933 cancel_delayed_work_sync(&net_device_ctx
->vf_takeover
);
1935 netdev_info(ndev
, "VF unregistering: %s\n", vf_netdev
->name
);
1937 netdev_rx_handler_unregister(vf_netdev
);
1938 netdev_upper_dev_unlink(vf_netdev
, ndev
);
1939 RCU_INIT_POINTER(net_device_ctx
->vf_netdev
, NULL
);
1945 static int netvsc_probe(struct hv_device
*dev
,
1946 const struct hv_vmbus_device_id
*dev_id
)
1948 struct net_device
*net
= NULL
;
1949 struct net_device_context
*net_device_ctx
;
1950 struct netvsc_device_info device_info
;
1951 struct netvsc_device
*nvdev
;
1954 net
= alloc_etherdev_mq(sizeof(struct net_device_context
),
1959 netif_carrier_off(net
);
1961 netvsc_init_settings(net
);
1963 net_device_ctx
= netdev_priv(net
);
1964 net_device_ctx
->device_ctx
= dev
;
1965 net_device_ctx
->msg_enable
= netif_msg_init(debug
, default_msg
);
1966 if (netif_msg_probe(net_device_ctx
))
1967 netdev_dbg(net
, "netvsc msg_enable: %d\n",
1968 net_device_ctx
->msg_enable
);
1970 hv_set_drvdata(dev
, net
);
1972 INIT_DELAYED_WORK(&net_device_ctx
->dwork
, netvsc_link_change
);
1974 spin_lock_init(&net_device_ctx
->lock
);
1975 INIT_LIST_HEAD(&net_device_ctx
->reconfig_events
);
1976 INIT_DELAYED_WORK(&net_device_ctx
->vf_takeover
, netvsc_vf_setup
);
1978 net_device_ctx
->vf_stats
1979 = netdev_alloc_pcpu_stats(struct netvsc_vf_pcpu_stats
);
1980 if (!net_device_ctx
->vf_stats
)
1983 net
->netdev_ops
= &device_ops
;
1984 net
->ethtool_ops
= ðtool_ops
;
1985 SET_NETDEV_DEV(net
, &dev
->device
);
1987 /* We always need headroom for rndis header */
1988 net
->needed_headroom
= RNDIS_AND_PPI_SIZE
;
1990 /* Initialize the number of queues to be 1, we may change it if more
1991 * channels are offered later.
1993 netif_set_real_num_tx_queues(net
, 1);
1994 netif_set_real_num_rx_queues(net
, 1);
1996 /* Notify the netvsc driver of the new device */
1997 memset(&device_info
, 0, sizeof(device_info
));
1998 device_info
.ring_size
= ring_size
;
1999 device_info
.num_chn
= VRSS_CHANNEL_DEFAULT
;
2000 device_info
.send_sections
= NETVSC_DEFAULT_TX
;
2001 device_info
.send_section_size
= NETVSC_SEND_SECTION_SIZE
;
2002 device_info
.recv_sections
= NETVSC_DEFAULT_RX
;
2003 device_info
.recv_section_size
= NETVSC_RECV_SECTION_SIZE
;
2005 nvdev
= rndis_filter_device_add(dev
, &device_info
);
2006 if (IS_ERR(nvdev
)) {
2007 ret
= PTR_ERR(nvdev
);
2008 netdev_err(net
, "unable to add netvsc device (ret %d)\n", ret
);
2012 memcpy(net
->dev_addr
, device_info
.mac_adr
, ETH_ALEN
);
2014 /* hw_features computed in rndis_filter_device_add */
2015 net
->features
= net
->hw_features
|
2016 NETIF_F_HIGHDMA
| NETIF_F_SG
|
2017 NETIF_F_HW_VLAN_CTAG_TX
| NETIF_F_HW_VLAN_CTAG_RX
;
2018 net
->vlan_features
= net
->features
;
2020 netdev_lockdep_set_classes(net
);
2022 /* MTU range: 68 - 1500 or 65521 */
2023 net
->min_mtu
= NETVSC_MTU_MIN
;
2024 if (nvdev
->nvsp_version
>= NVSP_PROTOCOL_VERSION_2
)
2025 net
->max_mtu
= NETVSC_MTU
- ETH_HLEN
;
2027 net
->max_mtu
= ETH_DATA_LEN
;
2029 ret
= register_netdev(net
);
2031 pr_err("Unable to register netdev.\n");
2032 goto register_failed
;
2038 rndis_filter_device_remove(dev
, nvdev
);
2040 free_percpu(net_device_ctx
->vf_stats
);
2042 hv_set_drvdata(dev
, NULL
);
2048 static int netvsc_remove(struct hv_device
*dev
)
2050 struct net_device_context
*ndev_ctx
;
2051 struct net_device
*vf_netdev
;
2052 struct net_device
*net
;
2054 net
= hv_get_drvdata(dev
);
2056 dev_err(&dev
->device
, "No net device to remove\n");
2060 ndev_ctx
= netdev_priv(net
);
2062 netif_device_detach(net
);
2064 cancel_delayed_work_sync(&ndev_ctx
->dwork
);
2067 * Call to the vsc driver to let it know that the device is being
2068 * removed. Also blocks mtu and channel changes.
2071 vf_netdev
= rtnl_dereference(ndev_ctx
->vf_netdev
);
2073 netvsc_unregister_vf(vf_netdev
);
2075 unregister_netdevice(net
);
2077 rndis_filter_device_remove(dev
,
2078 rtnl_dereference(ndev_ctx
->nvdev
));
2081 hv_set_drvdata(dev
, NULL
);
2083 free_percpu(ndev_ctx
->vf_stats
);
2088 static const struct hv_vmbus_device_id id_table
[] = {
2094 MODULE_DEVICE_TABLE(vmbus
, id_table
);
2096 /* The one and only one */
2097 static struct hv_driver netvsc_drv
= {
2098 .name
= KBUILD_MODNAME
,
2099 .id_table
= id_table
,
2100 .probe
= netvsc_probe
,
2101 .remove
= netvsc_remove
,
2105 * On Hyper-V, every VF interface is matched with a corresponding
2106 * synthetic interface. The synthetic interface is presented first
2107 * to the guest. When the corresponding VF instance is registered,
2108 * we will take care of switching the data path.
2110 static int netvsc_netdev_event(struct notifier_block
*this,
2111 unsigned long event
, void *ptr
)
2113 struct net_device
*event_dev
= netdev_notifier_info_to_dev(ptr
);
2115 /* Skip our own events */
2116 if (event_dev
->netdev_ops
== &device_ops
)
2119 /* Avoid non-Ethernet type devices */
2120 if (event_dev
->type
!= ARPHRD_ETHER
)
2123 /* Avoid Vlan dev with same MAC registering as VF */
2124 if (is_vlan_dev(event_dev
))
2127 /* Avoid Bonding master dev with same MAC registering as VF */
2128 if ((event_dev
->priv_flags
& IFF_BONDING
) &&
2129 (event_dev
->flags
& IFF_MASTER
))
2133 case NETDEV_REGISTER
:
2134 return netvsc_register_vf(event_dev
);
2135 case NETDEV_UNREGISTER
:
2136 return netvsc_unregister_vf(event_dev
);
2139 return netvsc_vf_changed(event_dev
);
2145 static struct notifier_block netvsc_netdev_notifier
= {
2146 .notifier_call
= netvsc_netdev_event
,
2149 static void __exit
netvsc_drv_exit(void)
2151 unregister_netdevice_notifier(&netvsc_netdev_notifier
);
2152 vmbus_driver_unregister(&netvsc_drv
);
2155 static int __init
netvsc_drv_init(void)
2159 if (ring_size
< RING_SIZE_MIN
) {
2160 ring_size
= RING_SIZE_MIN
;
2161 pr_info("Increased ring_size to %d (min allowed)\n",
2164 ret
= vmbus_driver_register(&netvsc_drv
);
2169 register_netdevice_notifier(&netvsc_netdev_notifier
);
2173 MODULE_LICENSE("GPL");
2174 MODULE_DESCRIPTION("Microsoft Hyper-V network driver");
2176 module_init(netvsc_drv_init
);
2177 module_exit(netvsc_drv_exit
);