2 * Copyright (c) 2009, Microsoft Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, see <http://www.gnu.org/licenses/>.
17 * Haiyang Zhang <haiyangz@microsoft.com>
18 * Hank Janssen <hjanssen@microsoft.com>
20 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
22 #include <linux/kernel.h>
23 #include <linux/sched.h>
24 #include <linux/wait.h>
26 #include <linux/delay.h>
28 #include <linux/slab.h>
29 #include <linux/netdevice.h>
30 #include <linux/if_ether.h>
31 #include <linux/vmalloc.h>
32 #include <linux/rtnetlink.h>
33 #include <linux/prefetch.h>
35 #include <asm/sync_bitops.h>
37 #include "hyperv_net.h"
40 * Switch the data path from the synthetic interface to the VF
43 void netvsc_switch_datapath(struct net_device
*ndev
, bool vf
)
45 struct net_device_context
*net_device_ctx
= netdev_priv(ndev
);
46 struct hv_device
*dev
= net_device_ctx
->device_ctx
;
47 struct netvsc_device
*nv_dev
= rtnl_dereference(net_device_ctx
->nvdev
);
48 struct nvsp_message
*init_pkt
= &nv_dev
->channel_init_pkt
;
50 memset(init_pkt
, 0, sizeof(struct nvsp_message
));
51 init_pkt
->hdr
.msg_type
= NVSP_MSG4_TYPE_SWITCH_DATA_PATH
;
53 init_pkt
->msg
.v4_msg
.active_dp
.active_datapath
=
56 init_pkt
->msg
.v4_msg
.active_dp
.active_datapath
=
57 NVSP_DATAPATH_SYNTHETIC
;
59 vmbus_sendpacket(dev
->channel
, init_pkt
,
60 sizeof(struct nvsp_message
),
61 (unsigned long)init_pkt
,
62 VM_PKT_DATA_INBAND
, 0);
65 static struct netvsc_device
*alloc_net_device(void)
67 struct netvsc_device
*net_device
;
69 net_device
= kzalloc(sizeof(struct netvsc_device
), GFP_KERNEL
);
73 init_waitqueue_head(&net_device
->wait_drain
);
74 net_device
->destroy
= false;
75 atomic_set(&net_device
->open_cnt
, 0);
76 net_device
->max_pkt
= RNDIS_MAX_PKT_DEFAULT
;
77 net_device
->pkt_align
= RNDIS_PKT_ALIGN_DEFAULT
;
79 init_completion(&net_device
->channel_init_wait
);
80 init_waitqueue_head(&net_device
->subchan_open
);
81 INIT_WORK(&net_device
->subchan_work
, rndis_set_subchannel
);
86 static void free_netvsc_device(struct rcu_head
*head
)
88 struct netvsc_device
*nvdev
89 = container_of(head
, struct netvsc_device
, rcu
);
92 for (i
= 0; i
< VRSS_CHANNEL_MAX
; i
++)
93 vfree(nvdev
->chan_table
[i
].mrc
.slots
);
98 static void free_netvsc_device_rcu(struct netvsc_device
*nvdev
)
100 call_rcu(&nvdev
->rcu
, free_netvsc_device
);
103 static void netvsc_destroy_buf(struct hv_device
*device
)
105 struct nvsp_message
*revoke_packet
;
106 struct net_device
*ndev
= hv_get_drvdata(device
);
107 struct net_device_context
*ndc
= netdev_priv(ndev
);
108 struct netvsc_device
*net_device
= rtnl_dereference(ndc
->nvdev
);
112 * If we got a section count, it means we received a
113 * SendReceiveBufferComplete msg (ie sent
114 * NvspMessage1TypeSendReceiveBuffer msg) therefore, we need
115 * to send a revoke msg here
117 if (net_device
->recv_section_cnt
) {
118 /* Send the revoke receive buffer */
119 revoke_packet
= &net_device
->revoke_packet
;
120 memset(revoke_packet
, 0, sizeof(struct nvsp_message
));
122 revoke_packet
->hdr
.msg_type
=
123 NVSP_MSG1_TYPE_REVOKE_RECV_BUF
;
124 revoke_packet
->msg
.v1_msg
.
125 revoke_recv_buf
.id
= NETVSC_RECEIVE_BUFFER_ID
;
127 ret
= vmbus_sendpacket(device
->channel
,
129 sizeof(struct nvsp_message
),
130 (unsigned long)revoke_packet
,
131 VM_PKT_DATA_INBAND
, 0);
132 /* If the failure is because the channel is rescinded;
133 * ignore the failure since we cannot send on a rescinded
134 * channel. This would allow us to properly cleanup
135 * even when the channel is rescinded.
137 if (device
->channel
->rescind
)
140 * If we failed here, we might as well return and
141 * have a leak rather than continue and a bugchk
144 netdev_err(ndev
, "unable to send "
145 "revoke receive buffer to netvsp\n");
148 net_device
->recv_section_cnt
= 0;
151 /* Teardown the gpadl on the vsp end */
152 if (net_device
->recv_buf_gpadl_handle
) {
153 ret
= vmbus_teardown_gpadl(device
->channel
,
154 net_device
->recv_buf_gpadl_handle
);
156 /* If we failed here, we might as well return and have a leak
157 * rather than continue and a bugchk
161 "unable to teardown receive buffer's gpadl\n");
164 net_device
->recv_buf_gpadl_handle
= 0;
167 if (net_device
->recv_buf
) {
168 /* Free up the receive buffer */
169 vfree(net_device
->recv_buf
);
170 net_device
->recv_buf
= NULL
;
173 /* Deal with the send buffer we may have setup.
174 * If we got a send section size, it means we received a
175 * NVSP_MSG1_TYPE_SEND_SEND_BUF_COMPLETE msg (ie sent
176 * NVSP_MSG1_TYPE_SEND_SEND_BUF msg) therefore, we need
177 * to send a revoke msg here
179 if (net_device
->send_section_cnt
) {
180 /* Send the revoke receive buffer */
181 revoke_packet
= &net_device
->revoke_packet
;
182 memset(revoke_packet
, 0, sizeof(struct nvsp_message
));
184 revoke_packet
->hdr
.msg_type
=
185 NVSP_MSG1_TYPE_REVOKE_SEND_BUF
;
186 revoke_packet
->msg
.v1_msg
.revoke_send_buf
.id
=
187 NETVSC_SEND_BUFFER_ID
;
189 ret
= vmbus_sendpacket(device
->channel
,
191 sizeof(struct nvsp_message
),
192 (unsigned long)revoke_packet
,
193 VM_PKT_DATA_INBAND
, 0);
195 /* If the failure is because the channel is rescinded;
196 * ignore the failure since we cannot send on a rescinded
197 * channel. This would allow us to properly cleanup
198 * even when the channel is rescinded.
200 if (device
->channel
->rescind
)
203 /* If we failed here, we might as well return and
204 * have a leak rather than continue and a bugchk
207 netdev_err(ndev
, "unable to send "
208 "revoke send buffer to netvsp\n");
211 net_device
->send_section_cnt
= 0;
213 /* Teardown the gpadl on the vsp end */
214 if (net_device
->send_buf_gpadl_handle
) {
215 ret
= vmbus_teardown_gpadl(device
->channel
,
216 net_device
->send_buf_gpadl_handle
);
218 /* If we failed here, we might as well return and have a leak
219 * rather than continue and a bugchk
223 "unable to teardown send buffer's gpadl\n");
226 net_device
->send_buf_gpadl_handle
= 0;
228 if (net_device
->send_buf
) {
229 /* Free up the send buffer */
230 vfree(net_device
->send_buf
);
231 net_device
->send_buf
= NULL
;
233 kfree(net_device
->send_section_map
);
236 int netvsc_alloc_recv_comp_ring(struct netvsc_device
*net_device
, u32 q_idx
)
238 struct netvsc_channel
*nvchan
= &net_device
->chan_table
[q_idx
];
239 int node
= cpu_to_node(nvchan
->channel
->target_cpu
);
242 size
= net_device
->recv_completion_cnt
* sizeof(struct recv_comp_data
);
243 nvchan
->mrc
.slots
= vzalloc_node(size
, node
);
244 if (!nvchan
->mrc
.slots
)
245 nvchan
->mrc
.slots
= vzalloc(size
);
247 return nvchan
->mrc
.slots
? 0 : -ENOMEM
;
250 static int netvsc_init_buf(struct hv_device
*device
,
251 struct netvsc_device
*net_device
,
252 const struct netvsc_device_info
*device_info
)
254 struct nvsp_1_message_send_receive_buffer_complete
*resp
;
255 struct net_device
*ndev
= hv_get_drvdata(device
);
256 struct nvsp_message
*init_packet
;
257 unsigned int buf_size
;
261 /* Get receive buffer area. */
262 buf_size
= device_info
->recv_sections
* device_info
->recv_section_size
;
263 buf_size
= roundup(buf_size
, PAGE_SIZE
);
265 net_device
->recv_buf
= vzalloc(buf_size
);
266 if (!net_device
->recv_buf
) {
268 "unable to allocate receive buffer of size %u\n",
275 * Establish the gpadl handle for this buffer on this
276 * channel. Note: This call uses the vmbus connection rather
277 * than the channel to establish the gpadl handle.
279 ret
= vmbus_establish_gpadl(device
->channel
, net_device
->recv_buf
,
281 &net_device
->recv_buf_gpadl_handle
);
284 "unable to establish receive buffer's gpadl\n");
288 /* Notify the NetVsp of the gpadl handle */
289 init_packet
= &net_device
->channel_init_pkt
;
290 memset(init_packet
, 0, sizeof(struct nvsp_message
));
291 init_packet
->hdr
.msg_type
= NVSP_MSG1_TYPE_SEND_RECV_BUF
;
292 init_packet
->msg
.v1_msg
.send_recv_buf
.
293 gpadl_handle
= net_device
->recv_buf_gpadl_handle
;
294 init_packet
->msg
.v1_msg
.
295 send_recv_buf
.id
= NETVSC_RECEIVE_BUFFER_ID
;
297 /* Send the gpadl notification request */
298 ret
= vmbus_sendpacket(device
->channel
, init_packet
,
299 sizeof(struct nvsp_message
),
300 (unsigned long)init_packet
,
302 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED
);
305 "unable to send receive buffer's gpadl to netvsp\n");
309 wait_for_completion(&net_device
->channel_init_wait
);
311 /* Check the response */
312 resp
= &init_packet
->msg
.v1_msg
.send_recv_buf_complete
;
313 if (resp
->status
!= NVSP_STAT_SUCCESS
) {
315 "Unable to complete receive buffer initialization with NetVsp - status %d\n",
321 /* Parse the response */
322 netdev_dbg(ndev
, "Receive sections: %u sub_allocs: size %u count: %u\n",
323 resp
->num_sections
, resp
->sections
[0].sub_alloc_size
,
324 resp
->sections
[0].num_sub_allocs
);
326 /* There should only be one section for the entire receive buffer */
327 if (resp
->num_sections
!= 1 || resp
->sections
[0].offset
!= 0) {
332 net_device
->recv_section_size
= resp
->sections
[0].sub_alloc_size
;
333 net_device
->recv_section_cnt
= resp
->sections
[0].num_sub_allocs
;
335 /* Setup receive completion ring */
336 net_device
->recv_completion_cnt
337 = round_up(net_device
->recv_section_cnt
+ 1,
338 PAGE_SIZE
/ sizeof(u64
));
339 ret
= netvsc_alloc_recv_comp_ring(net_device
, 0);
343 /* Now setup the send buffer. */
344 buf_size
= device_info
->send_sections
* device_info
->send_section_size
;
345 buf_size
= round_up(buf_size
, PAGE_SIZE
);
347 net_device
->send_buf
= vzalloc(buf_size
);
348 if (!net_device
->send_buf
) {
349 netdev_err(ndev
, "unable to allocate send buffer of size %u\n",
355 /* Establish the gpadl handle for this buffer on this
356 * channel. Note: This call uses the vmbus connection rather
357 * than the channel to establish the gpadl handle.
359 ret
= vmbus_establish_gpadl(device
->channel
, net_device
->send_buf
,
361 &net_device
->send_buf_gpadl_handle
);
364 "unable to establish send buffer's gpadl\n");
368 /* Notify the NetVsp of the gpadl handle */
369 init_packet
= &net_device
->channel_init_pkt
;
370 memset(init_packet
, 0, sizeof(struct nvsp_message
));
371 init_packet
->hdr
.msg_type
= NVSP_MSG1_TYPE_SEND_SEND_BUF
;
372 init_packet
->msg
.v1_msg
.send_send_buf
.gpadl_handle
=
373 net_device
->send_buf_gpadl_handle
;
374 init_packet
->msg
.v1_msg
.send_send_buf
.id
= NETVSC_SEND_BUFFER_ID
;
376 /* Send the gpadl notification request */
377 ret
= vmbus_sendpacket(device
->channel
, init_packet
,
378 sizeof(struct nvsp_message
),
379 (unsigned long)init_packet
,
381 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED
);
384 "unable to send send buffer's gpadl to netvsp\n");
388 wait_for_completion(&net_device
->channel_init_wait
);
390 /* Check the response */
391 if (init_packet
->msg
.v1_msg
.
392 send_send_buf_complete
.status
!= NVSP_STAT_SUCCESS
) {
393 netdev_err(ndev
, "Unable to complete send buffer "
394 "initialization with NetVsp - status %d\n",
395 init_packet
->msg
.v1_msg
.
396 send_send_buf_complete
.status
);
401 /* Parse the response */
402 net_device
->send_section_size
= init_packet
->msg
.
403 v1_msg
.send_send_buf_complete
.section_size
;
405 /* Section count is simply the size divided by the section size. */
406 net_device
->send_section_cnt
= buf_size
/ net_device
->send_section_size
;
408 netdev_dbg(ndev
, "Send section size: %d, Section count:%d\n",
409 net_device
->send_section_size
, net_device
->send_section_cnt
);
411 /* Setup state for managing the send buffer. */
412 map_words
= DIV_ROUND_UP(net_device
->send_section_cnt
, BITS_PER_LONG
);
414 net_device
->send_section_map
= kcalloc(map_words
, sizeof(ulong
), GFP_KERNEL
);
415 if (net_device
->send_section_map
== NULL
) {
423 netvsc_destroy_buf(device
);
429 /* Negotiate NVSP protocol version */
430 static int negotiate_nvsp_ver(struct hv_device
*device
,
431 struct netvsc_device
*net_device
,
432 struct nvsp_message
*init_packet
,
435 struct net_device
*ndev
= hv_get_drvdata(device
);
438 memset(init_packet
, 0, sizeof(struct nvsp_message
));
439 init_packet
->hdr
.msg_type
= NVSP_MSG_TYPE_INIT
;
440 init_packet
->msg
.init_msg
.init
.min_protocol_ver
= nvsp_ver
;
441 init_packet
->msg
.init_msg
.init
.max_protocol_ver
= nvsp_ver
;
443 /* Send the init request */
444 ret
= vmbus_sendpacket(device
->channel
, init_packet
,
445 sizeof(struct nvsp_message
),
446 (unsigned long)init_packet
,
448 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED
);
453 wait_for_completion(&net_device
->channel_init_wait
);
455 if (init_packet
->msg
.init_msg
.init_complete
.status
!=
459 if (nvsp_ver
== NVSP_PROTOCOL_VERSION_1
)
462 /* NVSPv2 or later: Send NDIS config */
463 memset(init_packet
, 0, sizeof(struct nvsp_message
));
464 init_packet
->hdr
.msg_type
= NVSP_MSG2_TYPE_SEND_NDIS_CONFIG
;
465 init_packet
->msg
.v2_msg
.send_ndis_config
.mtu
= ndev
->mtu
+ ETH_HLEN
;
466 init_packet
->msg
.v2_msg
.send_ndis_config
.capability
.ieee8021q
= 1;
468 if (nvsp_ver
>= NVSP_PROTOCOL_VERSION_5
) {
469 init_packet
->msg
.v2_msg
.send_ndis_config
.capability
.sriov
= 1;
471 /* Teaming bit is needed to receive link speed updates */
472 init_packet
->msg
.v2_msg
.send_ndis_config
.capability
.teaming
= 1;
475 ret
= vmbus_sendpacket(device
->channel
, init_packet
,
476 sizeof(struct nvsp_message
),
477 (unsigned long)init_packet
,
478 VM_PKT_DATA_INBAND
, 0);
483 static int netvsc_connect_vsp(struct hv_device
*device
,
484 struct netvsc_device
*net_device
,
485 const struct netvsc_device_info
*device_info
)
487 static const u32 ver_list
[] = {
488 NVSP_PROTOCOL_VERSION_1
, NVSP_PROTOCOL_VERSION_2
,
489 NVSP_PROTOCOL_VERSION_4
, NVSP_PROTOCOL_VERSION_5
491 struct nvsp_message
*init_packet
;
492 int ndis_version
, i
, ret
;
494 init_packet
= &net_device
->channel_init_pkt
;
496 /* Negotiate the latest NVSP protocol supported */
497 for (i
= ARRAY_SIZE(ver_list
) - 1; i
>= 0; i
--)
498 if (negotiate_nvsp_ver(device
, net_device
, init_packet
,
500 net_device
->nvsp_version
= ver_list
[i
];
509 pr_debug("Negotiated NVSP version:%x\n", net_device
->nvsp_version
);
511 /* Send the ndis version */
512 memset(init_packet
, 0, sizeof(struct nvsp_message
));
514 if (net_device
->nvsp_version
<= NVSP_PROTOCOL_VERSION_4
)
515 ndis_version
= 0x00060001;
517 ndis_version
= 0x0006001e;
519 init_packet
->hdr
.msg_type
= NVSP_MSG1_TYPE_SEND_NDIS_VER
;
520 init_packet
->msg
.v1_msg
.
521 send_ndis_ver
.ndis_major_ver
=
522 (ndis_version
& 0xFFFF0000) >> 16;
523 init_packet
->msg
.v1_msg
.
524 send_ndis_ver
.ndis_minor_ver
=
525 ndis_version
& 0xFFFF;
527 /* Send the init request */
528 ret
= vmbus_sendpacket(device
->channel
, init_packet
,
529 sizeof(struct nvsp_message
),
530 (unsigned long)init_packet
,
531 VM_PKT_DATA_INBAND
, 0);
536 ret
= netvsc_init_buf(device
, net_device
, device_info
);
542 static void netvsc_disconnect_vsp(struct hv_device
*device
)
544 netvsc_destroy_buf(device
);
548 * netvsc_device_remove - Callback when the root bus device is removed
550 void netvsc_device_remove(struct hv_device
*device
)
552 struct net_device
*ndev
= hv_get_drvdata(device
);
553 struct net_device_context
*net_device_ctx
= netdev_priv(ndev
);
554 struct netvsc_device
*net_device
555 = rtnl_dereference(net_device_ctx
->nvdev
);
558 cancel_work_sync(&net_device
->subchan_work
);
560 netvsc_disconnect_vsp(device
);
562 RCU_INIT_POINTER(net_device_ctx
->nvdev
, NULL
);
565 * At this point, no one should be accessing net_device
568 netdev_dbg(ndev
, "net device safe to remove\n");
570 /* Now, we can close the channel safely */
571 vmbus_close(device
->channel
);
573 /* And dissassociate NAPI context from device */
574 for (i
= 0; i
< net_device
->num_chn
; i
++)
575 netif_napi_del(&net_device
->chan_table
[i
].napi
);
577 /* Release all resources */
578 free_netvsc_device_rcu(net_device
);
581 #define RING_AVAIL_PERCENT_HIWATER 20
582 #define RING_AVAIL_PERCENT_LOWATER 10
585 * Get the percentage of available bytes to write in the ring.
586 * The return value is in range from 0 to 100.
588 static inline u32
hv_ringbuf_avail_percent(
589 struct hv_ring_buffer_info
*ring_info
)
591 u32 avail_read
, avail_write
;
593 hv_get_ringbuffer_availbytes(ring_info
, &avail_read
, &avail_write
);
595 return avail_write
* 100 / ring_info
->ring_datasize
;
598 static inline void netvsc_free_send_slot(struct netvsc_device
*net_device
,
601 sync_change_bit(index
, net_device
->send_section_map
);
604 static void netvsc_send_tx_complete(struct netvsc_device
*net_device
,
605 struct vmbus_channel
*incoming_channel
,
606 struct hv_device
*device
,
607 const struct vmpacket_descriptor
*desc
,
610 struct sk_buff
*skb
= (struct sk_buff
*)(unsigned long)desc
->trans_id
;
611 struct net_device
*ndev
= hv_get_drvdata(device
);
612 struct net_device_context
*ndev_ctx
= netdev_priv(ndev
);
613 struct vmbus_channel
*channel
= device
->channel
;
617 /* Notify the layer above us */
619 const struct hv_netvsc_packet
*packet
620 = (struct hv_netvsc_packet
*)skb
->cb
;
621 u32 send_index
= packet
->send_buf_index
;
622 struct netvsc_stats
*tx_stats
;
624 if (send_index
!= NETVSC_INVALID_INDEX
)
625 netvsc_free_send_slot(net_device
, send_index
);
626 q_idx
= packet
->q_idx
;
627 channel
= incoming_channel
;
629 tx_stats
= &net_device
->chan_table
[q_idx
].tx_stats
;
631 u64_stats_update_begin(&tx_stats
->syncp
);
632 tx_stats
->packets
+= packet
->total_packets
;
633 tx_stats
->bytes
+= packet
->total_bytes
;
634 u64_stats_update_end(&tx_stats
->syncp
);
636 napi_consume_skb(skb
, budget
);
640 atomic_dec_return(&net_device
->chan_table
[q_idx
].queue_sends
);
642 if (net_device
->destroy
&& queue_sends
== 0)
643 wake_up(&net_device
->wait_drain
);
645 if (netif_tx_queue_stopped(netdev_get_tx_queue(ndev
, q_idx
)) &&
646 (hv_ringbuf_avail_percent(&channel
->outbound
) > RING_AVAIL_PERCENT_HIWATER
||
648 netif_tx_wake_queue(netdev_get_tx_queue(ndev
, q_idx
));
649 ndev_ctx
->eth_stats
.wake_queue
++;
653 static void netvsc_send_completion(struct netvsc_device
*net_device
,
654 struct vmbus_channel
*incoming_channel
,
655 struct hv_device
*device
,
656 const struct vmpacket_descriptor
*desc
,
659 struct nvsp_message
*nvsp_packet
= hv_pkt_data(desc
);
660 struct net_device
*ndev
= hv_get_drvdata(device
);
662 switch (nvsp_packet
->hdr
.msg_type
) {
663 case NVSP_MSG_TYPE_INIT_COMPLETE
:
664 case NVSP_MSG1_TYPE_SEND_RECV_BUF_COMPLETE
:
665 case NVSP_MSG1_TYPE_SEND_SEND_BUF_COMPLETE
:
666 case NVSP_MSG5_TYPE_SUBCHANNEL
:
667 /* Copy the response back */
668 memcpy(&net_device
->channel_init_pkt
, nvsp_packet
,
669 sizeof(struct nvsp_message
));
670 complete(&net_device
->channel_init_wait
);
673 case NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE
:
674 netvsc_send_tx_complete(net_device
, incoming_channel
,
675 device
, desc
, budget
);
680 "Unknown send completion type %d received!!\n",
681 nvsp_packet
->hdr
.msg_type
);
685 static u32
netvsc_get_next_send_section(struct netvsc_device
*net_device
)
687 unsigned long *map_addr
= net_device
->send_section_map
;
690 for_each_clear_bit(i
, map_addr
, net_device
->send_section_cnt
) {
691 if (sync_test_and_set_bit(i
, map_addr
) == 0)
695 return NETVSC_INVALID_INDEX
;
698 static u32
netvsc_copy_to_send_buf(struct netvsc_device
*net_device
,
699 unsigned int section_index
,
701 struct hv_netvsc_packet
*packet
,
702 struct rndis_message
*rndis_msg
,
703 struct hv_page_buffer
*pb
,
706 char *start
= net_device
->send_buf
;
707 char *dest
= start
+ (section_index
* net_device
->send_section_size
)
712 u32 remain
= packet
->total_data_buflen
% net_device
->pkt_align
;
713 u32 page_count
= packet
->cp_partial
? packet
->rmsg_pgcnt
:
714 packet
->page_buf_cnt
;
717 if (skb
->xmit_more
&& remain
&& !packet
->cp_partial
) {
718 padding
= net_device
->pkt_align
- remain
;
719 rndis_msg
->msg_len
+= padding
;
720 packet
->total_data_buflen
+= padding
;
723 for (i
= 0; i
< page_count
; i
++) {
724 char *src
= phys_to_virt(pb
[i
].pfn
<< PAGE_SHIFT
);
725 u32 offset
= pb
[i
].offset
;
728 memcpy(dest
, (src
+ offset
), len
);
734 memset(dest
, 0, padding
);
741 static inline int netvsc_send_pkt(
742 struct hv_device
*device
,
743 struct hv_netvsc_packet
*packet
,
744 struct netvsc_device
*net_device
,
745 struct hv_page_buffer
*pb
,
748 struct nvsp_message nvmsg
;
749 struct nvsp_1_message_send_rndis_packet
* const rpkt
=
750 &nvmsg
.msg
.v1_msg
.send_rndis_pkt
;
751 struct netvsc_channel
* const nvchan
=
752 &net_device
->chan_table
[packet
->q_idx
];
753 struct vmbus_channel
*out_channel
= nvchan
->channel
;
754 struct net_device
*ndev
= hv_get_drvdata(device
);
755 struct net_device_context
*ndev_ctx
= netdev_priv(ndev
);
756 struct netdev_queue
*txq
= netdev_get_tx_queue(ndev
, packet
->q_idx
);
759 u32 ring_avail
= hv_ringbuf_avail_percent(&out_channel
->outbound
);
761 nvmsg
.hdr
.msg_type
= NVSP_MSG1_TYPE_SEND_RNDIS_PKT
;
763 rpkt
->channel_type
= 0; /* 0 is RMC_DATA */
765 rpkt
->channel_type
= 1; /* 1 is RMC_CONTROL */
767 rpkt
->send_buf_section_index
= packet
->send_buf_index
;
768 if (packet
->send_buf_index
== NETVSC_INVALID_INDEX
)
769 rpkt
->send_buf_section_size
= 0;
771 rpkt
->send_buf_section_size
= packet
->total_data_buflen
;
775 if (out_channel
->rescind
)
778 if (packet
->page_buf_cnt
) {
779 if (packet
->cp_partial
)
780 pb
+= packet
->rmsg_pgcnt
;
782 ret
= vmbus_sendpacket_pagebuffer(out_channel
,
783 pb
, packet
->page_buf_cnt
,
784 &nvmsg
, sizeof(nvmsg
),
787 ret
= vmbus_sendpacket(out_channel
,
788 &nvmsg
, sizeof(nvmsg
),
789 req_id
, VM_PKT_DATA_INBAND
,
790 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED
);
794 atomic_inc_return(&nvchan
->queue_sends
);
796 if (ring_avail
< RING_AVAIL_PERCENT_LOWATER
) {
797 netif_tx_stop_queue(txq
);
798 ndev_ctx
->eth_stats
.stop_queue
++;
800 } else if (ret
== -EAGAIN
) {
801 netif_tx_stop_queue(txq
);
802 ndev_ctx
->eth_stats
.stop_queue
++;
803 if (atomic_read(&nvchan
->queue_sends
) < 1) {
804 netif_tx_wake_queue(txq
);
805 ndev_ctx
->eth_stats
.wake_queue
++;
810 "Unable to send packet pages %u len %u, ret %d\n",
811 packet
->page_buf_cnt
, packet
->total_data_buflen
,
818 /* Move packet out of multi send data (msd), and clear msd */
819 static inline void move_pkt_msd(struct hv_netvsc_packet
**msd_send
,
820 struct sk_buff
**msd_skb
,
821 struct multi_send_data
*msdp
)
823 *msd_skb
= msdp
->skb
;
824 *msd_send
= msdp
->pkt
;
830 /* RCU already held by caller */
831 int netvsc_send(struct net_device_context
*ndev_ctx
,
832 struct hv_netvsc_packet
*packet
,
833 struct rndis_message
*rndis_msg
,
834 struct hv_page_buffer
*pb
,
837 struct netvsc_device
*net_device
838 = rcu_dereference_bh(ndev_ctx
->nvdev
);
839 struct hv_device
*device
= ndev_ctx
->device_ctx
;
841 struct netvsc_channel
*nvchan
;
842 u32 pktlen
= packet
->total_data_buflen
, msd_len
= 0;
843 unsigned int section_index
= NETVSC_INVALID_INDEX
;
844 struct multi_send_data
*msdp
;
845 struct hv_netvsc_packet
*msd_send
= NULL
, *cur_send
= NULL
;
846 struct sk_buff
*msd_skb
= NULL
;
848 bool xmit_more
= (skb
!= NULL
) ? skb
->xmit_more
: false;
850 /* If device is rescinded, return error and packet will get dropped. */
851 if (unlikely(!net_device
|| net_device
->destroy
))
854 /* We may race with netvsc_connect_vsp()/netvsc_init_buf() and get
855 * here before the negotiation with the host is finished and
856 * send_section_map may not be allocated yet.
858 if (unlikely(!net_device
->send_section_map
))
861 nvchan
= &net_device
->chan_table
[packet
->q_idx
];
862 packet
->send_buf_index
= NETVSC_INVALID_INDEX
;
863 packet
->cp_partial
= false;
865 /* Send control message directly without accessing msd (Multi-Send
866 * Data) field which may be changed during data packet processing.
873 /* batch packets in send buffer if possible */
876 msd_len
= msdp
->pkt
->total_data_buflen
;
878 try_batch
= msd_len
> 0 && msdp
->count
< net_device
->max_pkt
;
879 if (try_batch
&& msd_len
+ pktlen
+ net_device
->pkt_align
<
880 net_device
->send_section_size
) {
881 section_index
= msdp
->pkt
->send_buf_index
;
883 } else if (try_batch
&& msd_len
+ packet
->rmsg_size
<
884 net_device
->send_section_size
) {
885 section_index
= msdp
->pkt
->send_buf_index
;
886 packet
->cp_partial
= true;
888 } else if (pktlen
+ net_device
->pkt_align
<
889 net_device
->send_section_size
) {
890 section_index
= netvsc_get_next_send_section(net_device
);
891 if (unlikely(section_index
== NETVSC_INVALID_INDEX
)) {
892 ++ndev_ctx
->eth_stats
.tx_send_full
;
894 move_pkt_msd(&msd_send
, &msd_skb
, msdp
);
899 if (section_index
!= NETVSC_INVALID_INDEX
) {
900 netvsc_copy_to_send_buf(net_device
,
901 section_index
, msd_len
,
902 packet
, rndis_msg
, pb
, skb
);
904 packet
->send_buf_index
= section_index
;
906 if (packet
->cp_partial
) {
907 packet
->page_buf_cnt
-= packet
->rmsg_pgcnt
;
908 packet
->total_data_buflen
= msd_len
+ packet
->rmsg_size
;
910 packet
->page_buf_cnt
= 0;
911 packet
->total_data_buflen
+= msd_len
;
915 packet
->total_packets
+= msdp
->pkt
->total_packets
;
916 packet
->total_bytes
+= msdp
->pkt
->total_bytes
;
920 dev_consume_skb_any(msdp
->skb
);
922 if (xmit_more
&& !packet
->cp_partial
) {
933 move_pkt_msd(&msd_send
, &msd_skb
, msdp
);
938 int m_ret
= netvsc_send_pkt(device
, msd_send
, net_device
,
942 netvsc_free_send_slot(net_device
,
943 msd_send
->send_buf_index
);
944 dev_kfree_skb_any(msd_skb
);
950 ret
= netvsc_send_pkt(device
, cur_send
, net_device
, pb
, skb
);
952 if (ret
!= 0 && section_index
!= NETVSC_INVALID_INDEX
)
953 netvsc_free_send_slot(net_device
, section_index
);
958 /* Send pending recv completions */
959 static int send_recv_completions(struct net_device
*ndev
,
960 struct netvsc_device
*nvdev
,
961 struct netvsc_channel
*nvchan
)
963 struct multi_recv_comp
*mrc
= &nvchan
->mrc
;
964 struct recv_comp_msg
{
965 struct nvsp_message_header hdr
;
968 struct recv_comp_msg msg
= {
969 .hdr
.msg_type
= NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE
,
973 while (mrc
->first
!= mrc
->next
) {
974 const struct recv_comp_data
*rcd
975 = mrc
->slots
+ mrc
->first
;
977 msg
.status
= rcd
->status
;
978 ret
= vmbus_sendpacket(nvchan
->channel
, &msg
, sizeof(msg
),
979 rcd
->tid
, VM_PKT_COMP
, 0);
981 struct net_device_context
*ndev_ctx
= netdev_priv(ndev
);
983 ++ndev_ctx
->eth_stats
.rx_comp_busy
;
987 if (++mrc
->first
== nvdev
->recv_completion_cnt
)
991 /* receive completion ring has been emptied */
992 if (unlikely(nvdev
->destroy
))
993 wake_up(&nvdev
->wait_drain
);
998 /* Count how many receive completions are outstanding */
999 static void recv_comp_slot_avail(const struct netvsc_device
*nvdev
,
1000 const struct multi_recv_comp
*mrc
,
1001 u32
*filled
, u32
*avail
)
1003 u32 count
= nvdev
->recv_completion_cnt
;
1005 if (mrc
->next
>= mrc
->first
)
1006 *filled
= mrc
->next
- mrc
->first
;
1008 *filled
= (count
- mrc
->first
) + mrc
->next
;
1010 *avail
= count
- *filled
- 1;
1013 /* Add receive complete to ring to send to host. */
1014 static void enq_receive_complete(struct net_device
*ndev
,
1015 struct netvsc_device
*nvdev
, u16 q_idx
,
1016 u64 tid
, u32 status
)
1018 struct netvsc_channel
*nvchan
= &nvdev
->chan_table
[q_idx
];
1019 struct multi_recv_comp
*mrc
= &nvchan
->mrc
;
1020 struct recv_comp_data
*rcd
;
1023 recv_comp_slot_avail(nvdev
, mrc
, &filled
, &avail
);
1025 if (unlikely(filled
> NAPI_POLL_WEIGHT
)) {
1026 send_recv_completions(ndev
, nvdev
, nvchan
);
1027 recv_comp_slot_avail(nvdev
, mrc
, &filled
, &avail
);
1030 if (unlikely(!avail
)) {
1031 netdev_err(ndev
, "Recv_comp full buf q:%hd, tid:%llx\n",
1036 rcd
= mrc
->slots
+ mrc
->next
;
1038 rcd
->status
= status
;
1040 if (++mrc
->next
== nvdev
->recv_completion_cnt
)
1044 static int netvsc_receive(struct net_device
*ndev
,
1045 struct netvsc_device
*net_device
,
1046 struct net_device_context
*net_device_ctx
,
1047 struct hv_device
*device
,
1048 struct vmbus_channel
*channel
,
1049 const struct vmpacket_descriptor
*desc
,
1050 struct nvsp_message
*nvsp
)
1052 const struct vmtransfer_page_packet_header
*vmxferpage_packet
1053 = container_of(desc
, const struct vmtransfer_page_packet_header
, d
);
1054 u16 q_idx
= channel
->offermsg
.offer
.sub_channel_index
;
1055 char *recv_buf
= net_device
->recv_buf
;
1056 u32 status
= NVSP_STAT_SUCCESS
;
1060 /* Make sure this is a valid nvsp packet */
1061 if (unlikely(nvsp
->hdr
.msg_type
!= NVSP_MSG1_TYPE_SEND_RNDIS_PKT
)) {
1062 netif_err(net_device_ctx
, rx_err
, ndev
,
1063 "Unknown nvsp packet type received %u\n",
1064 nvsp
->hdr
.msg_type
);
1068 if (unlikely(vmxferpage_packet
->xfer_pageset_id
!= NETVSC_RECEIVE_BUFFER_ID
)) {
1069 netif_err(net_device_ctx
, rx_err
, ndev
,
1070 "Invalid xfer page set id - expecting %x got %x\n",
1071 NETVSC_RECEIVE_BUFFER_ID
,
1072 vmxferpage_packet
->xfer_pageset_id
);
1076 count
= vmxferpage_packet
->range_cnt
;
1078 /* Each range represents 1 RNDIS pkt that contains 1 ethernet frame */
1079 for (i
= 0; i
< count
; i
++) {
1080 void *data
= recv_buf
1081 + vmxferpage_packet
->ranges
[i
].byte_offset
;
1082 u32 buflen
= vmxferpage_packet
->ranges
[i
].byte_count
;
1084 /* Pass it to the upper layer */
1085 status
= rndis_filter_receive(ndev
, net_device
, device
,
1086 channel
, data
, buflen
);
1089 enq_receive_complete(ndev
, net_device
, q_idx
,
1090 vmxferpage_packet
->d
.trans_id
, status
);
1095 static void netvsc_send_table(struct hv_device
*hdev
,
1096 struct nvsp_message
*nvmsg
)
1098 struct net_device
*ndev
= hv_get_drvdata(hdev
);
1099 struct net_device_context
*net_device_ctx
= netdev_priv(ndev
);
1103 count
= nvmsg
->msg
.v5_msg
.send_table
.count
;
1104 if (count
!= VRSS_SEND_TAB_SIZE
) {
1105 netdev_err(ndev
, "Received wrong send-table size:%u\n", count
);
1109 tab
= (u32
*)((unsigned long)&nvmsg
->msg
.v5_msg
.send_table
+
1110 nvmsg
->msg
.v5_msg
.send_table
.offset
);
1112 for (i
= 0; i
< count
; i
++)
1113 net_device_ctx
->tx_table
[i
] = tab
[i
];
1116 static void netvsc_send_vf(struct net_device_context
*net_device_ctx
,
1117 struct nvsp_message
*nvmsg
)
1119 net_device_ctx
->vf_alloc
= nvmsg
->msg
.v4_msg
.vf_assoc
.allocated
;
1120 net_device_ctx
->vf_serial
= nvmsg
->msg
.v4_msg
.vf_assoc
.serial
;
1123 static inline void netvsc_receive_inband(struct hv_device
*hdev
,
1124 struct net_device_context
*net_device_ctx
,
1125 struct nvsp_message
*nvmsg
)
1127 switch (nvmsg
->hdr
.msg_type
) {
1128 case NVSP_MSG5_TYPE_SEND_INDIRECTION_TABLE
:
1129 netvsc_send_table(hdev
, nvmsg
);
1132 case NVSP_MSG4_TYPE_SEND_VF_ASSOCIATION
:
1133 netvsc_send_vf(net_device_ctx
, nvmsg
);
1138 static int netvsc_process_raw_pkt(struct hv_device
*device
,
1139 struct vmbus_channel
*channel
,
1140 struct netvsc_device
*net_device
,
1141 struct net_device
*ndev
,
1142 const struct vmpacket_descriptor
*desc
,
1145 struct net_device_context
*net_device_ctx
= netdev_priv(ndev
);
1146 struct nvsp_message
*nvmsg
= hv_pkt_data(desc
);
1148 switch (desc
->type
) {
1150 netvsc_send_completion(net_device
, channel
, device
,
1154 case VM_PKT_DATA_USING_XFER_PAGES
:
1155 return netvsc_receive(ndev
, net_device
, net_device_ctx
,
1156 device
, channel
, desc
, nvmsg
);
1159 case VM_PKT_DATA_INBAND
:
1160 netvsc_receive_inband(device
, net_device_ctx
, nvmsg
);
1164 netdev_err(ndev
, "unhandled packet type %d, tid %llx\n",
1165 desc
->type
, desc
->trans_id
);
1172 static struct hv_device
*netvsc_channel_to_device(struct vmbus_channel
*channel
)
1174 struct vmbus_channel
*primary
= channel
->primary_channel
;
1176 return primary
? primary
->device_obj
: channel
->device_obj
;
1179 /* Network processing softirq
1180 * Process data in incoming ring buffer from host
1181 * Stops when ring is empty or budget is met or exceeded.
1183 int netvsc_poll(struct napi_struct
*napi
, int budget
)
1185 struct netvsc_channel
*nvchan
1186 = container_of(napi
, struct netvsc_channel
, napi
);
1187 struct netvsc_device
*net_device
= nvchan
->net_device
;
1188 struct vmbus_channel
*channel
= nvchan
->channel
;
1189 struct hv_device
*device
= netvsc_channel_to_device(channel
);
1190 struct net_device
*ndev
= hv_get_drvdata(device
);
1193 /* If starting a new interval */
1195 nvchan
->desc
= hv_pkt_iter_first(channel
);
1197 while (nvchan
->desc
&& work_done
< budget
) {
1198 work_done
+= netvsc_process_raw_pkt(device
, channel
, net_device
,
1199 ndev
, nvchan
->desc
, budget
);
1200 nvchan
->desc
= hv_pkt_iter_next(channel
, nvchan
->desc
);
1203 /* If send of pending receive completions suceeded
1204 * and did not exhaust NAPI budget this time
1205 * and not doing busy poll
1206 * then re-enable host interrupts
1207 * and reschedule if ring is not empty.
1209 if (send_recv_completions(ndev
, net_device
, nvchan
) == 0 &&
1210 work_done
< budget
&&
1211 napi_complete_done(napi
, work_done
) &&
1212 hv_end_read(&channel
->inbound
)) {
1213 hv_begin_read(&channel
->inbound
);
1214 napi_reschedule(napi
);
1217 /* Driver may overshoot since multiple packets per descriptor */
1218 return min(work_done
, budget
);
1221 /* Call back when data is available in host ring buffer.
1222 * Processing is deferred until network softirq (NAPI)
1224 void netvsc_channel_cb(void *context
)
1226 struct netvsc_channel
*nvchan
= context
;
1227 struct vmbus_channel
*channel
= nvchan
->channel
;
1228 struct hv_ring_buffer_info
*rbi
= &channel
->inbound
;
1230 /* preload first vmpacket descriptor */
1231 prefetch(hv_get_ring_buffer(rbi
) + rbi
->priv_read_index
);
1233 if (napi_schedule_prep(&nvchan
->napi
)) {
1234 /* disable interupts from host */
1237 __napi_schedule(&nvchan
->napi
);
1242 * netvsc_device_add - Callback when the device belonging to this
1245 struct netvsc_device
*netvsc_device_add(struct hv_device
*device
,
1246 const struct netvsc_device_info
*device_info
)
1249 int ring_size
= device_info
->ring_size
;
1250 struct netvsc_device
*net_device
;
1251 struct net_device
*ndev
= hv_get_drvdata(device
);
1252 struct net_device_context
*net_device_ctx
= netdev_priv(ndev
);
1254 net_device
= alloc_net_device();
1256 return ERR_PTR(-ENOMEM
);
1258 for (i
= 0; i
< VRSS_SEND_TAB_SIZE
; i
++)
1259 net_device_ctx
->tx_table
[i
] = 0;
1261 net_device
->ring_size
= ring_size
;
1263 /* Because the device uses NAPI, all the interrupt batching and
1264 * control is done via Net softirq, not the channel handling
1266 set_channel_read_mode(device
->channel
, HV_CALL_ISR
);
1268 /* If we're reopening the device we may have multiple queues, fill the
1269 * chn_table with the default channel to use it before subchannels are
1271 * Initialize the channel state before we open;
1272 * we can be interrupted as soon as we open the channel.
1275 for (i
= 0; i
< VRSS_CHANNEL_MAX
; i
++) {
1276 struct netvsc_channel
*nvchan
= &net_device
->chan_table
[i
];
1278 nvchan
->channel
= device
->channel
;
1279 nvchan
->net_device
= net_device
;
1280 u64_stats_init(&nvchan
->tx_stats
.syncp
);
1281 u64_stats_init(&nvchan
->rx_stats
.syncp
);
1284 /* Enable NAPI handler before init callbacks */
1285 netif_napi_add(ndev
, &net_device
->chan_table
[0].napi
,
1286 netvsc_poll
, NAPI_POLL_WEIGHT
);
1288 /* Open the channel */
1289 ret
= vmbus_open(device
->channel
, ring_size
* PAGE_SIZE
,
1290 ring_size
* PAGE_SIZE
, NULL
, 0,
1292 net_device
->chan_table
);
1295 netif_napi_del(&net_device
->chan_table
[0].napi
);
1296 netdev_err(ndev
, "unable to open channel: %d\n", ret
);
1300 /* Channel is opened */
1301 netdev_dbg(ndev
, "hv_netvsc channel opened successfully\n");
1303 napi_enable(&net_device
->chan_table
[0].napi
);
1305 /* Writing nvdev pointer unlocks netvsc_send(), make sure chn_table is
1308 rcu_assign_pointer(net_device_ctx
->nvdev
, net_device
);
1310 /* Connect with the NetVsp */
1311 ret
= netvsc_connect_vsp(device
, net_device
, device_info
);
1314 "unable to connect to NetVSP - %d\n", ret
);
1321 RCU_INIT_POINTER(net_device_ctx
->nvdev
, NULL
);
1322 napi_disable(&net_device
->chan_table
[0].napi
);
1324 /* Now, we can close the channel safely */
1325 vmbus_close(device
->channel
);
1328 free_netvsc_device(&net_device
->rcu
);
1330 return ERR_PTR(ret
);