2 * Copyright (c) 2009, Microsoft Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
18 * Haiyang Zhang <haiyangz@microsoft.com>
19 * Hank Janssen <hjanssen@microsoft.com>
21 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
23 #include <linux/kernel.h>
24 #include <linux/interrupt.h>
25 #include <linux/sched.h>
26 #include <linux/wait.h>
28 #include <linux/slab.h>
29 #include <linux/list.h>
30 #include <linux/module.h>
31 #include <linux/completion.h>
32 #include <linux/delay.h>
33 #include <linux/hyperv.h>
34 #include <asm/mshyperv.h>
36 #include "hyperv_vmbus.h"
38 static void init_vp_index(struct vmbus_channel
*channel
, u16 dev_type
);
40 static const struct vmbus_device vmbus_devs
[] = {
48 { .dev_type
= HV_SCSI
,
72 { .dev_type
= HV_PCIE
,
77 /* Synthetic Frame Buffer */
83 /* Synthetic Keyboard */
90 { .dev_type
= HV_MOUSE
,
104 .perf_device
= false,
110 .perf_device
= false,
114 { .dev_type
= HV_SHUTDOWN
,
116 .perf_device
= false,
120 { .dev_type
= HV_FCOPY
,
122 .perf_device
= false,
126 { .dev_type
= HV_BACKUP
,
128 .perf_device
= false,
134 .perf_device
= false,
138 { .dev_type
= HV_UNKNOWN
,
139 .perf_device
= false,
143 static const struct {
145 } vmbus_unsupported_devs
[] = {
152 * The rescinded channel may be blocked waiting for a response from the host;
155 static void vmbus_rescind_cleanup(struct vmbus_channel
*channel
)
157 struct vmbus_channel_msginfo
*msginfo
;
161 spin_lock_irqsave(&vmbus_connection
.channelmsg_lock
, flags
);
162 channel
->rescind
= true;
163 list_for_each_entry(msginfo
, &vmbus_connection
.chn_msg_list
,
166 if (msginfo
->waiting_channel
== channel
) {
167 complete(&msginfo
->waitevent
);
171 spin_unlock_irqrestore(&vmbus_connection
.channelmsg_lock
, flags
);
174 static bool is_unsupported_vmbus_devs(const uuid_le
*guid
)
178 for (i
= 0; i
< ARRAY_SIZE(vmbus_unsupported_devs
); i
++)
179 if (!uuid_le_cmp(*guid
, vmbus_unsupported_devs
[i
].guid
))
184 static u16
hv_get_dev_type(const struct vmbus_channel
*channel
)
186 const uuid_le
*guid
= &channel
->offermsg
.offer
.if_type
;
189 if (is_hvsock_channel(channel
) || is_unsupported_vmbus_devs(guid
))
192 for (i
= HV_IDE
; i
< HV_UNKNOWN
; i
++) {
193 if (!uuid_le_cmp(*guid
, vmbus_devs
[i
].guid
))
196 pr_info("Unknown GUID: %pUl\n", guid
);
201 * vmbus_prep_negotiate_resp() - Create default response for Hyper-V Negotiate message
202 * @icmsghdrp: Pointer to msg header structure
203 * @icmsg_negotiate: Pointer to negotiate message structure
204 * @buf: Raw buffer channel data
206 * @icmsghdrp is of type &struct icmsg_hdr.
207 * Set up and fill in default negotiate response message.
209 * The fw_version and fw_vercnt specifies the framework version that
212 * The srv_version and srv_vercnt specifies the service
213 * versions we can support.
215 * Versions are given in decreasing order.
217 * nego_fw_version and nego_srv_version store the selected protocol versions.
219 * Mainly used by Hyper-V drivers.
221 bool vmbus_prep_negotiate_resp(struct icmsg_hdr
*icmsghdrp
,
222 u8
*buf
, const int *fw_version
, int fw_vercnt
,
223 const int *srv_version
, int srv_vercnt
,
224 int *nego_fw_version
, int *nego_srv_version
)
226 int icframe_major
, icframe_minor
;
227 int icmsg_major
, icmsg_minor
;
228 int fw_major
, fw_minor
;
229 int srv_major
, srv_minor
;
231 bool found_match
= false;
232 struct icmsg_negotiate
*negop
;
234 icmsghdrp
->icmsgsize
= 0x10;
235 negop
= (struct icmsg_negotiate
*)&buf
[
236 sizeof(struct vmbuspipe_hdr
) +
237 sizeof(struct icmsg_hdr
)];
239 icframe_major
= negop
->icframe_vercnt
;
242 icmsg_major
= negop
->icmsg_vercnt
;
246 * Select the framework version number we will
250 for (i
= 0; i
< fw_vercnt
; i
++) {
251 fw_major
= (fw_version
[i
] >> 16);
252 fw_minor
= (fw_version
[i
] & 0xFFFF);
254 for (j
= 0; j
< negop
->icframe_vercnt
; j
++) {
255 if ((negop
->icversion_data
[j
].major
== fw_major
) &&
256 (negop
->icversion_data
[j
].minor
== fw_minor
)) {
257 icframe_major
= negop
->icversion_data
[j
].major
;
258 icframe_minor
= negop
->icversion_data
[j
].minor
;
273 for (i
= 0; i
< srv_vercnt
; i
++) {
274 srv_major
= (srv_version
[i
] >> 16);
275 srv_minor
= (srv_version
[i
] & 0xFFFF);
277 for (j
= negop
->icframe_vercnt
;
278 (j
< negop
->icframe_vercnt
+ negop
->icmsg_vercnt
);
281 if ((negop
->icversion_data
[j
].major
== srv_major
) &&
282 (negop
->icversion_data
[j
].minor
== srv_minor
)) {
284 icmsg_major
= negop
->icversion_data
[j
].major
;
285 icmsg_minor
= negop
->icversion_data
[j
].minor
;
296 * Respond with the framework and service
297 * version numbers we can support.
302 negop
->icframe_vercnt
= 0;
303 negop
->icmsg_vercnt
= 0;
305 negop
->icframe_vercnt
= 1;
306 negop
->icmsg_vercnt
= 1;
310 *nego_fw_version
= (icframe_major
<< 16) | icframe_minor
;
312 if (nego_srv_version
)
313 *nego_srv_version
= (icmsg_major
<< 16) | icmsg_minor
;
315 negop
->icversion_data
[0].major
= icframe_major
;
316 negop
->icversion_data
[0].minor
= icframe_minor
;
317 negop
->icversion_data
[1].major
= icmsg_major
;
318 negop
->icversion_data
[1].minor
= icmsg_minor
;
322 EXPORT_SYMBOL_GPL(vmbus_prep_negotiate_resp
);
325 * alloc_channel - Allocate and initialize a vmbus channel object
327 static struct vmbus_channel
*alloc_channel(void)
329 struct vmbus_channel
*channel
;
331 channel
= kzalloc(sizeof(*channel
), GFP_ATOMIC
);
335 spin_lock_init(&channel
->lock
);
336 init_completion(&channel
->rescind_event
);
338 INIT_LIST_HEAD(&channel
->sc_list
);
339 INIT_LIST_HEAD(&channel
->percpu_list
);
341 tasklet_init(&channel
->callback_event
,
342 vmbus_on_event
, (unsigned long)channel
);
348 * free_channel - Release the resources used by the vmbus channel object
350 static void free_channel(struct vmbus_channel
*channel
)
352 tasklet_kill(&channel
->callback_event
);
354 kfree_rcu(channel
, rcu
);
357 static void percpu_channel_enq(void *arg
)
359 struct vmbus_channel
*channel
= arg
;
360 struct hv_per_cpu_context
*hv_cpu
361 = this_cpu_ptr(hv_context
.cpu_context
);
363 list_add_tail_rcu(&channel
->percpu_list
, &hv_cpu
->chan_list
);
366 static void percpu_channel_deq(void *arg
)
368 struct vmbus_channel
*channel
= arg
;
370 list_del_rcu(&channel
->percpu_list
);
374 static void vmbus_release_relid(u32 relid
)
376 struct vmbus_channel_relid_released msg
;
378 memset(&msg
, 0, sizeof(struct vmbus_channel_relid_released
));
379 msg
.child_relid
= relid
;
380 msg
.header
.msgtype
= CHANNELMSG_RELID_RELEASED
;
381 vmbus_post_msg(&msg
, sizeof(struct vmbus_channel_relid_released
),
385 void hv_process_channel_removal(u32 relid
)
388 struct vmbus_channel
*primary_channel
, *channel
;
390 BUG_ON(!mutex_is_locked(&vmbus_connection
.channel_mutex
));
393 * Make sure channel is valid as we may have raced.
395 channel
= relid2channel(relid
);
399 BUG_ON(!channel
->rescind
);
400 if (channel
->target_cpu
!= get_cpu()) {
402 smp_call_function_single(channel
->target_cpu
,
403 percpu_channel_deq
, channel
, true);
405 percpu_channel_deq(channel
);
409 if (channel
->primary_channel
== NULL
) {
410 list_del(&channel
->listentry
);
412 primary_channel
= channel
;
414 primary_channel
= channel
->primary_channel
;
415 spin_lock_irqsave(&primary_channel
->lock
, flags
);
416 list_del(&channel
->sc_list
);
417 primary_channel
->num_sc
--;
418 spin_unlock_irqrestore(&primary_channel
->lock
, flags
);
422 * We need to free the bit for init_vp_index() to work in the case
423 * of sub-channel, when we reload drivers like hv_netvsc.
425 if (channel
->affinity_policy
== HV_LOCALIZED
)
426 cpumask_clear_cpu(channel
->target_cpu
,
427 &primary_channel
->alloced_cpus_in_node
);
429 vmbus_release_relid(relid
);
431 free_channel(channel
);
434 void vmbus_free_channels(void)
436 struct vmbus_channel
*channel
, *tmp
;
438 list_for_each_entry_safe(channel
, tmp
, &vmbus_connection
.chn_list
,
440 /* hv_process_channel_removal() needs this */
441 channel
->rescind
= true;
443 vmbus_device_unregister(channel
->device_obj
);
447 /* Note: the function can run concurrently for primary/sub channels. */
448 static void vmbus_add_channel_work(struct work_struct
*work
)
450 struct vmbus_channel
*newchannel
=
451 container_of(work
, struct vmbus_channel
, add_channel_work
);
452 struct vmbus_channel
*primary_channel
= newchannel
->primary_channel
;
457 dev_type
= hv_get_dev_type(newchannel
);
459 init_vp_index(newchannel
, dev_type
);
461 if (newchannel
->target_cpu
!= get_cpu()) {
463 smp_call_function_single(newchannel
->target_cpu
,
467 percpu_channel_enq(newchannel
);
472 * This state is used to indicate a successful open
473 * so that when we do close the channel normally, we
474 * can cleanup properly.
476 newchannel
->state
= CHANNEL_OPEN_STATE
;
478 if (primary_channel
!= NULL
) {
479 /* newchannel is a sub-channel. */
481 if (primary_channel
->sc_creation_callback
!= NULL
)
482 primary_channel
->sc_creation_callback(newchannel
);
484 newchannel
->probe_done
= true;
489 * Start the process of binding the primary channel to the driver
491 newchannel
->device_obj
= vmbus_device_create(
492 &newchannel
->offermsg
.offer
.if_type
,
493 &newchannel
->offermsg
.offer
.if_instance
,
495 if (!newchannel
->device_obj
)
498 newchannel
->device_obj
->device_id
= dev_type
;
500 * Add the new device to the bus. This will kick off device-driver
501 * binding which eventually invokes the device driver's AddDevice()
504 ret
= vmbus_device_register(newchannel
->device_obj
);
507 pr_err("unable to add child device object (relid %d)\n",
508 newchannel
->offermsg
.child_relid
);
509 kfree(newchannel
->device_obj
);
513 newchannel
->probe_done
= true;
517 mutex_lock(&vmbus_connection
.channel_mutex
);
520 * We need to set the flag, otherwise
521 * vmbus_onoffer_rescind() can be blocked.
523 newchannel
->probe_done
= true;
525 if (primary_channel
== NULL
) {
526 list_del(&newchannel
->listentry
);
528 spin_lock_irqsave(&primary_channel
->lock
, flags
);
529 list_del(&newchannel
->sc_list
);
530 spin_unlock_irqrestore(&primary_channel
->lock
, flags
);
533 mutex_unlock(&vmbus_connection
.channel_mutex
);
535 if (newchannel
->target_cpu
!= get_cpu()) {
537 smp_call_function_single(newchannel
->target_cpu
,
541 percpu_channel_deq(newchannel
);
545 vmbus_release_relid(newchannel
->offermsg
.child_relid
);
547 free_channel(newchannel
);
551 * vmbus_process_offer - Process the offer by creating a channel/device
552 * associated with this offer
554 static void vmbus_process_offer(struct vmbus_channel
*newchannel
)
556 struct vmbus_channel
*channel
;
557 struct workqueue_struct
*wq
;
561 mutex_lock(&vmbus_connection
.channel_mutex
);
564 * Now that we have acquired the channel_mutex,
565 * we can release the potentially racing rescind thread.
567 atomic_dec(&vmbus_connection
.offer_in_progress
);
569 list_for_each_entry(channel
, &vmbus_connection
.chn_list
, listentry
) {
570 if (!uuid_le_cmp(channel
->offermsg
.offer
.if_type
,
571 newchannel
->offermsg
.offer
.if_type
) &&
572 !uuid_le_cmp(channel
->offermsg
.offer
.if_instance
,
573 newchannel
->offermsg
.offer
.if_instance
)) {
580 list_add_tail(&newchannel
->listentry
,
581 &vmbus_connection
.chn_list
);
584 * Check to see if this is a valid sub-channel.
586 if (newchannel
->offermsg
.offer
.sub_channel_index
== 0) {
587 mutex_unlock(&vmbus_connection
.channel_mutex
);
589 * Don't call free_channel(), because newchannel->kobj
590 * is not initialized yet.
597 * Process the sub-channel.
599 newchannel
->primary_channel
= channel
;
600 spin_lock_irqsave(&channel
->lock
, flags
);
601 list_add_tail(&newchannel
->sc_list
, &channel
->sc_list
);
602 spin_unlock_irqrestore(&channel
->lock
, flags
);
605 mutex_unlock(&vmbus_connection
.channel_mutex
);
608 * vmbus_process_offer() mustn't call channel->sc_creation_callback()
609 * directly for sub-channels, because sc_creation_callback() ->
610 * vmbus_open() may never get the host's response to the
611 * OPEN_CHANNEL message (the host may rescind a channel at any time,
612 * e.g. in the case of hot removing a NIC), and vmbus_onoffer_rescind()
613 * may not wake up the vmbus_open() as it's blocked due to a non-zero
614 * vmbus_connection.offer_in_progress, and finally we have a deadlock.
616 * The above is also true for primary channels, if the related device
617 * drivers use sync probing mode by default.
619 * And, usually the handling of primary channels and sub-channels can
620 * depend on each other, so we should offload them to different
621 * workqueues to avoid possible deadlock, e.g. in sync-probing mode,
622 * NIC1's netvsc_subchan_work() can race with NIC2's netvsc_probe() ->
623 * rtnl_lock(), and causes deadlock: the former gets the rtnl_lock
624 * and waits for all the sub-channels to appear, but the latter
625 * can't get the rtnl_lock and this blocks the handling of
628 INIT_WORK(&newchannel
->add_channel_work
, vmbus_add_channel_work
);
629 wq
= fnew
? vmbus_connection
.handle_primary_chan_wq
:
630 vmbus_connection
.handle_sub_chan_wq
;
631 queue_work(wq
, &newchannel
->add_channel_work
);
635 * We use this state to statically distribute the channel interrupt load.
637 static int next_numa_node_id
;
639 * init_vp_index() accesses global variables like next_numa_node_id, and
640 * it can run concurrently for primary channels and sub-channels: see
641 * vmbus_process_offer(), so we need the lock to protect the global
644 static DEFINE_SPINLOCK(bind_channel_to_cpu_lock
);
647 * Starting with Win8, we can statically distribute the incoming
648 * channel interrupt load by binding a channel to VCPU.
649 * We do this in a hierarchical fashion:
650 * First distribute the primary channels across available NUMA nodes
651 * and then distribute the subchannels amongst the CPUs in the NUMA
652 * node assigned to the primary channel.
654 * For pre-win8 hosts or non-performance critical channels we assign the
655 * first CPU in the first NUMA node.
657 static void init_vp_index(struct vmbus_channel
*channel
, u16 dev_type
)
660 bool perf_chn
= vmbus_devs
[dev_type
].perf_device
;
661 struct vmbus_channel
*primary
= channel
->primary_channel
;
663 cpumask_var_t available_mask
;
664 struct cpumask
*alloced_mask
;
666 if ((vmbus_proto_version
== VERSION_WS2008
) ||
667 (vmbus_proto_version
== VERSION_WIN7
) || (!perf_chn
) ||
668 !alloc_cpumask_var(&available_mask
, GFP_KERNEL
)) {
670 * Prior to win8, all channel interrupts are
671 * delivered on cpu 0.
672 * Also if the channel is not a performance critical
673 * channel, bind it to cpu 0.
674 * In case alloc_cpumask_var() fails, bind it to cpu 0.
676 channel
->numa_node
= 0;
677 channel
->target_cpu
= 0;
678 channel
->target_vp
= hv_cpu_number_to_vp_number(0);
682 spin_lock(&bind_channel_to_cpu_lock
);
685 * Based on the channel affinity policy, we will assign the NUMA
689 if ((channel
->affinity_policy
== HV_BALANCED
) || (!primary
)) {
691 next_node
= next_numa_node_id
++;
692 if (next_node
== nr_node_ids
) {
693 next_node
= next_numa_node_id
= 0;
696 if (cpumask_empty(cpumask_of_node(next_node
)))
700 channel
->numa_node
= next_node
;
703 alloced_mask
= &hv_context
.hv_numa_map
[primary
->numa_node
];
705 if (cpumask_weight(alloced_mask
) ==
706 cpumask_weight(cpumask_of_node(primary
->numa_node
))) {
708 * We have cycled through all the CPUs in the node;
709 * reset the alloced map.
711 cpumask_clear(alloced_mask
);
714 cpumask_xor(available_mask
, alloced_mask
,
715 cpumask_of_node(primary
->numa_node
));
719 if (primary
->affinity_policy
== HV_LOCALIZED
) {
721 * Normally Hyper-V host doesn't create more subchannels
722 * than there are VCPUs on the node but it is possible when not
723 * all present VCPUs on the node are initialized by guest.
724 * Clear the alloced_cpus_in_node to start over.
726 if (cpumask_equal(&primary
->alloced_cpus_in_node
,
727 cpumask_of_node(primary
->numa_node
)))
728 cpumask_clear(&primary
->alloced_cpus_in_node
);
732 cur_cpu
= cpumask_next(cur_cpu
, available_mask
);
733 if (cur_cpu
>= nr_cpu_ids
) {
735 cpumask_copy(available_mask
,
736 cpumask_of_node(primary
->numa_node
));
740 if (primary
->affinity_policy
== HV_LOCALIZED
) {
742 * NOTE: in the case of sub-channel, we clear the
743 * sub-channel related bit(s) in
744 * primary->alloced_cpus_in_node in
745 * hv_process_channel_removal(), so when we
746 * reload drivers like hv_netvsc in SMP guest, here
747 * we're able to re-allocate
748 * bit from primary->alloced_cpus_in_node.
750 if (!cpumask_test_cpu(cur_cpu
,
751 &primary
->alloced_cpus_in_node
)) {
752 cpumask_set_cpu(cur_cpu
,
753 &primary
->alloced_cpus_in_node
);
754 cpumask_set_cpu(cur_cpu
, alloced_mask
);
758 cpumask_set_cpu(cur_cpu
, alloced_mask
);
763 channel
->target_cpu
= cur_cpu
;
764 channel
->target_vp
= hv_cpu_number_to_vp_number(cur_cpu
);
766 spin_unlock(&bind_channel_to_cpu_lock
);
768 free_cpumask_var(available_mask
);
771 static void vmbus_wait_for_unload(void)
775 struct hv_message
*msg
;
776 struct vmbus_channel_message_header
*hdr
;
780 * CHANNELMSG_UNLOAD_RESPONSE is always delivered to the CPU which was
781 * used for initial contact or to CPU0 depending on host version. When
782 * we're crashing on a different CPU let's hope that IRQ handler on
783 * the cpu which receives CHANNELMSG_UNLOAD_RESPONSE is still
784 * functional and vmbus_unload_response() will complete
785 * vmbus_connection.unload_event. If not, the last thing we can do is
786 * read message pages for all CPUs directly.
788 * Wait no more than 10 seconds so that the panic path can't get
789 * hung forever in case the response message isn't seen.
791 for (i
= 0; i
< 1000; i
++) {
792 if (completion_done(&vmbus_connection
.unload_event
))
795 for_each_online_cpu(cpu
) {
796 struct hv_per_cpu_context
*hv_cpu
797 = per_cpu_ptr(hv_context
.cpu_context
, cpu
);
799 page_addr
= hv_cpu
->synic_message_page
;
800 msg
= (struct hv_message
*)page_addr
801 + VMBUS_MESSAGE_SINT
;
803 message_type
= READ_ONCE(msg
->header
.message_type
);
804 if (message_type
== HVMSG_NONE
)
807 hdr
= (struct vmbus_channel_message_header
*)
810 if (hdr
->msgtype
== CHANNELMSG_UNLOAD_RESPONSE
)
811 complete(&vmbus_connection
.unload_event
);
813 vmbus_signal_eom(msg
, message_type
);
820 * We're crashing and already got the UNLOAD_RESPONSE, cleanup all
821 * maybe-pending messages on all CPUs to be able to receive new
822 * messages after we reconnect.
824 for_each_online_cpu(cpu
) {
825 struct hv_per_cpu_context
*hv_cpu
826 = per_cpu_ptr(hv_context
.cpu_context
, cpu
);
828 page_addr
= hv_cpu
->synic_message_page
;
829 msg
= (struct hv_message
*)page_addr
+ VMBUS_MESSAGE_SINT
;
830 msg
->header
.message_type
= HVMSG_NONE
;
835 * vmbus_unload_response - Handler for the unload response.
837 static void vmbus_unload_response(struct vmbus_channel_message_header
*hdr
)
840 * This is a global event; just wakeup the waiting thread.
841 * Once we successfully unload, we can cleanup the monitor state.
843 complete(&vmbus_connection
.unload_event
);
846 void vmbus_initiate_unload(bool crash
)
848 struct vmbus_channel_message_header hdr
;
850 /* Pre-Win2012R2 hosts don't support reconnect */
851 if (vmbus_proto_version
< VERSION_WIN8_1
)
854 init_completion(&vmbus_connection
.unload_event
);
855 memset(&hdr
, 0, sizeof(struct vmbus_channel_message_header
));
856 hdr
.msgtype
= CHANNELMSG_UNLOAD
;
857 vmbus_post_msg(&hdr
, sizeof(struct vmbus_channel_message_header
),
861 * vmbus_initiate_unload() is also called on crash and the crash can be
862 * happening in an interrupt context, where scheduling is impossible.
865 wait_for_completion(&vmbus_connection
.unload_event
);
867 vmbus_wait_for_unload();
871 * vmbus_onoffer - Handler for channel offers from vmbus in parent partition.
874 static void vmbus_onoffer(struct vmbus_channel_message_header
*hdr
)
876 struct vmbus_channel_offer_channel
*offer
;
877 struct vmbus_channel
*newchannel
;
879 offer
= (struct vmbus_channel_offer_channel
*)hdr
;
881 /* Allocate the channel object and save this offer. */
882 newchannel
= alloc_channel();
884 vmbus_release_relid(offer
->child_relid
);
885 atomic_dec(&vmbus_connection
.offer_in_progress
);
886 pr_err("Unable to allocate channel object\n");
891 * Setup state for signalling the host.
893 newchannel
->sig_event
= VMBUS_EVENT_CONNECTION_ID
;
895 if (vmbus_proto_version
!= VERSION_WS2008
) {
896 newchannel
->is_dedicated_interrupt
=
897 (offer
->is_dedicated_interrupt
!= 0);
898 newchannel
->sig_event
= offer
->connection_id
;
901 memcpy(&newchannel
->offermsg
, offer
,
902 sizeof(struct vmbus_channel_offer_channel
));
903 newchannel
->monitor_grp
= (u8
)offer
->monitorid
/ 32;
904 newchannel
->monitor_bit
= (u8
)offer
->monitorid
% 32;
906 vmbus_process_offer(newchannel
);
910 * vmbus_onoffer_rescind - Rescind offer handler.
912 * We queue a work item to process this offer synchronously
914 static void vmbus_onoffer_rescind(struct vmbus_channel_message_header
*hdr
)
916 struct vmbus_channel_rescind_offer
*rescind
;
917 struct vmbus_channel
*channel
;
920 rescind
= (struct vmbus_channel_rescind_offer
*)hdr
;
923 * The offer msg and the corresponding rescind msg
924 * from the host are guranteed to be ordered -
925 * offer comes in first and then the rescind.
926 * Since we process these events in work elements,
927 * and with preemption, we may end up processing
928 * the events out of order. Given that we handle these
929 * work elements on the same CPU, this is possible only
930 * in the case of preemption. In any case wait here
931 * until the offer processing has moved beyond the
932 * point where the channel is discoverable.
935 while (atomic_read(&vmbus_connection
.offer_in_progress
) != 0) {
937 * We wait here until any channel offer is currently
943 mutex_lock(&vmbus_connection
.channel_mutex
);
944 channel
= relid2channel(rescind
->child_relid
);
945 mutex_unlock(&vmbus_connection
.channel_mutex
);
947 if (channel
== NULL
) {
949 * We failed in processing the offer message;
950 * we would have cleaned up the relid in that
957 * Before setting channel->rescind in vmbus_rescind_cleanup(), we
958 * should make sure the channel callback is not running any more.
960 vmbus_reset_channel_cb(channel
);
963 * Now wait for offer handling to complete.
965 vmbus_rescind_cleanup(channel
);
966 while (READ_ONCE(channel
->probe_done
) == false) {
968 * We wait here until any channel offer is currently
975 * At this point, the rescind handling can proceed safely.
978 if (channel
->device_obj
) {
979 if (channel
->chn_rescind_callback
) {
980 channel
->chn_rescind_callback(channel
);
984 * We will have to unregister this device from the
987 dev
= get_device(&channel
->device_obj
->device
);
989 vmbus_device_unregister(channel
->device_obj
);
993 if (channel
->primary_channel
!= NULL
) {
995 * Sub-channel is being rescinded. Following is the channel
996 * close sequence when initiated from the driveri (refer to
997 * vmbus_close() for details):
998 * 1. Close all sub-channels first
999 * 2. Then close the primary channel.
1001 mutex_lock(&vmbus_connection
.channel_mutex
);
1002 if (channel
->state
== CHANNEL_OPEN_STATE
) {
1004 * The channel is currently not open;
1005 * it is safe for us to cleanup the channel.
1007 hv_process_channel_removal(rescind
->child_relid
);
1009 complete(&channel
->rescind_event
);
1011 mutex_unlock(&vmbus_connection
.channel_mutex
);
1015 void vmbus_hvsock_device_unregister(struct vmbus_channel
*channel
)
1017 BUG_ON(!is_hvsock_channel(channel
));
1019 /* We always get a rescind msg when a connection is closed. */
1020 while (!READ_ONCE(channel
->probe_done
) || !READ_ONCE(channel
->rescind
))
1023 vmbus_device_unregister(channel
->device_obj
);
1025 EXPORT_SYMBOL_GPL(vmbus_hvsock_device_unregister
);
1029 * vmbus_onoffers_delivered -
1030 * This is invoked when all offers have been delivered.
1032 * Nothing to do here.
1034 static void vmbus_onoffers_delivered(
1035 struct vmbus_channel_message_header
*hdr
)
1040 * vmbus_onopen_result - Open result handler.
1042 * This is invoked when we received a response to our channel open request.
1043 * Find the matching request, copy the response and signal the requesting
1046 static void vmbus_onopen_result(struct vmbus_channel_message_header
*hdr
)
1048 struct vmbus_channel_open_result
*result
;
1049 struct vmbus_channel_msginfo
*msginfo
;
1050 struct vmbus_channel_message_header
*requestheader
;
1051 struct vmbus_channel_open_channel
*openmsg
;
1052 unsigned long flags
;
1054 result
= (struct vmbus_channel_open_result
*)hdr
;
1057 * Find the open msg, copy the result and signal/unblock the wait event
1059 spin_lock_irqsave(&vmbus_connection
.channelmsg_lock
, flags
);
1061 list_for_each_entry(msginfo
, &vmbus_connection
.chn_msg_list
,
1064 (struct vmbus_channel_message_header
*)msginfo
->msg
;
1066 if (requestheader
->msgtype
== CHANNELMSG_OPENCHANNEL
) {
1068 (struct vmbus_channel_open_channel
*)msginfo
->msg
;
1069 if (openmsg
->child_relid
== result
->child_relid
&&
1070 openmsg
->openid
== result
->openid
) {
1071 memcpy(&msginfo
->response
.open_result
,
1074 struct vmbus_channel_open_result
));
1075 complete(&msginfo
->waitevent
);
1080 spin_unlock_irqrestore(&vmbus_connection
.channelmsg_lock
, flags
);
1084 * vmbus_ongpadl_created - GPADL created handler.
1086 * This is invoked when we received a response to our gpadl create request.
1087 * Find the matching request, copy the response and signal the requesting
1090 static void vmbus_ongpadl_created(struct vmbus_channel_message_header
*hdr
)
1092 struct vmbus_channel_gpadl_created
*gpadlcreated
;
1093 struct vmbus_channel_msginfo
*msginfo
;
1094 struct vmbus_channel_message_header
*requestheader
;
1095 struct vmbus_channel_gpadl_header
*gpadlheader
;
1096 unsigned long flags
;
1098 gpadlcreated
= (struct vmbus_channel_gpadl_created
*)hdr
;
1101 * Find the establish msg, copy the result and signal/unblock the wait
1104 spin_lock_irqsave(&vmbus_connection
.channelmsg_lock
, flags
);
1106 list_for_each_entry(msginfo
, &vmbus_connection
.chn_msg_list
,
1109 (struct vmbus_channel_message_header
*)msginfo
->msg
;
1111 if (requestheader
->msgtype
== CHANNELMSG_GPADL_HEADER
) {
1113 (struct vmbus_channel_gpadl_header
*)requestheader
;
1115 if ((gpadlcreated
->child_relid
==
1116 gpadlheader
->child_relid
) &&
1117 (gpadlcreated
->gpadl
== gpadlheader
->gpadl
)) {
1118 memcpy(&msginfo
->response
.gpadl_created
,
1121 struct vmbus_channel_gpadl_created
));
1122 complete(&msginfo
->waitevent
);
1127 spin_unlock_irqrestore(&vmbus_connection
.channelmsg_lock
, flags
);
1131 * vmbus_ongpadl_torndown - GPADL torndown handler.
1133 * This is invoked when we received a response to our gpadl teardown request.
1134 * Find the matching request, copy the response and signal the requesting
1137 static void vmbus_ongpadl_torndown(
1138 struct vmbus_channel_message_header
*hdr
)
1140 struct vmbus_channel_gpadl_torndown
*gpadl_torndown
;
1141 struct vmbus_channel_msginfo
*msginfo
;
1142 struct vmbus_channel_message_header
*requestheader
;
1143 struct vmbus_channel_gpadl_teardown
*gpadl_teardown
;
1144 unsigned long flags
;
1146 gpadl_torndown
= (struct vmbus_channel_gpadl_torndown
*)hdr
;
1149 * Find the open msg, copy the result and signal/unblock the wait event
1151 spin_lock_irqsave(&vmbus_connection
.channelmsg_lock
, flags
);
1153 list_for_each_entry(msginfo
, &vmbus_connection
.chn_msg_list
,
1156 (struct vmbus_channel_message_header
*)msginfo
->msg
;
1158 if (requestheader
->msgtype
== CHANNELMSG_GPADL_TEARDOWN
) {
1160 (struct vmbus_channel_gpadl_teardown
*)requestheader
;
1162 if (gpadl_torndown
->gpadl
== gpadl_teardown
->gpadl
) {
1163 memcpy(&msginfo
->response
.gpadl_torndown
,
1166 struct vmbus_channel_gpadl_torndown
));
1167 complete(&msginfo
->waitevent
);
1172 spin_unlock_irqrestore(&vmbus_connection
.channelmsg_lock
, flags
);
1176 * vmbus_onversion_response - Version response handler
1178 * This is invoked when we received a response to our initiate contact request.
1179 * Find the matching request, copy the response and signal the requesting
1182 static void vmbus_onversion_response(
1183 struct vmbus_channel_message_header
*hdr
)
1185 struct vmbus_channel_msginfo
*msginfo
;
1186 struct vmbus_channel_message_header
*requestheader
;
1187 struct vmbus_channel_version_response
*version_response
;
1188 unsigned long flags
;
1190 version_response
= (struct vmbus_channel_version_response
*)hdr
;
1191 spin_lock_irqsave(&vmbus_connection
.channelmsg_lock
, flags
);
1193 list_for_each_entry(msginfo
, &vmbus_connection
.chn_msg_list
,
1196 (struct vmbus_channel_message_header
*)msginfo
->msg
;
1198 if (requestheader
->msgtype
==
1199 CHANNELMSG_INITIATE_CONTACT
) {
1200 memcpy(&msginfo
->response
.version_response
,
1202 sizeof(struct vmbus_channel_version_response
));
1203 complete(&msginfo
->waitevent
);
1206 spin_unlock_irqrestore(&vmbus_connection
.channelmsg_lock
, flags
);
1209 /* Channel message dispatch table */
1210 const struct vmbus_channel_message_table_entry
1211 channel_message_table
[CHANNELMSG_COUNT
] = {
1212 { CHANNELMSG_INVALID
, 0, NULL
},
1213 { CHANNELMSG_OFFERCHANNEL
, 0, vmbus_onoffer
},
1214 { CHANNELMSG_RESCIND_CHANNELOFFER
, 0, vmbus_onoffer_rescind
},
1215 { CHANNELMSG_REQUESTOFFERS
, 0, NULL
},
1216 { CHANNELMSG_ALLOFFERS_DELIVERED
, 1, vmbus_onoffers_delivered
},
1217 { CHANNELMSG_OPENCHANNEL
, 0, NULL
},
1218 { CHANNELMSG_OPENCHANNEL_RESULT
, 1, vmbus_onopen_result
},
1219 { CHANNELMSG_CLOSECHANNEL
, 0, NULL
},
1220 { CHANNELMSG_GPADL_HEADER
, 0, NULL
},
1221 { CHANNELMSG_GPADL_BODY
, 0, NULL
},
1222 { CHANNELMSG_GPADL_CREATED
, 1, vmbus_ongpadl_created
},
1223 { CHANNELMSG_GPADL_TEARDOWN
, 0, NULL
},
1224 { CHANNELMSG_GPADL_TORNDOWN
, 1, vmbus_ongpadl_torndown
},
1225 { CHANNELMSG_RELID_RELEASED
, 0, NULL
},
1226 { CHANNELMSG_INITIATE_CONTACT
, 0, NULL
},
1227 { CHANNELMSG_VERSION_RESPONSE
, 1, vmbus_onversion_response
},
1228 { CHANNELMSG_UNLOAD
, 0, NULL
},
1229 { CHANNELMSG_UNLOAD_RESPONSE
, 1, vmbus_unload_response
},
1230 { CHANNELMSG_18
, 0, NULL
},
1231 { CHANNELMSG_19
, 0, NULL
},
1232 { CHANNELMSG_20
, 0, NULL
},
1233 { CHANNELMSG_TL_CONNECT_REQUEST
, 0, NULL
},
1234 { CHANNELMSG_22
, 0, NULL
},
1235 { CHANNELMSG_TL_CONNECT_RESULT
, 0, NULL
},
1239 * vmbus_onmessage - Handler for channel protocol messages.
1241 * This is invoked in the vmbus worker thread context.
1243 void vmbus_onmessage(void *context
)
1245 struct hv_message
*msg
= context
;
1246 struct vmbus_channel_message_header
*hdr
;
1248 hdr
= (struct vmbus_channel_message_header
*)msg
->u
.payload
;
1251 * vmbus_on_msg_dpc() makes sure the hdr->msgtype here can not go
1252 * out of bound and the message_handler pointer can not be NULL.
1254 channel_message_table
[hdr
->msgtype
].message_handler(hdr
);
1258 * vmbus_request_offers - Send a request to get all our pending offers.
1260 int vmbus_request_offers(void)
1262 struct vmbus_channel_message_header
*msg
;
1263 struct vmbus_channel_msginfo
*msginfo
;
1266 msginfo
= kmalloc(sizeof(*msginfo
) +
1267 sizeof(struct vmbus_channel_message_header
),
1272 msg
= (struct vmbus_channel_message_header
*)msginfo
->msg
;
1274 msg
->msgtype
= CHANNELMSG_REQUESTOFFERS
;
1277 ret
= vmbus_post_msg(msg
, sizeof(struct vmbus_channel_message_header
),
1280 pr_err("Unable to request offers - %d\n", ret
);
1292 * Retrieve the (sub) channel on which to send an outgoing request.
1293 * When a primary channel has multiple sub-channels, we try to
1294 * distribute the load equally amongst all available channels.
1296 struct vmbus_channel
*vmbus_get_outgoing_channel(struct vmbus_channel
*primary
)
1298 struct list_head
*cur
, *tmp
;
1300 struct vmbus_channel
*cur_channel
;
1301 struct vmbus_channel
*outgoing_channel
= primary
;
1305 if (list_empty(&primary
->sc_list
))
1306 return outgoing_channel
;
1308 next_channel
= primary
->next_oc
++;
1310 if (next_channel
> (primary
->num_sc
)) {
1311 primary
->next_oc
= 0;
1312 return outgoing_channel
;
1315 cur_cpu
= hv_cpu_number_to_vp_number(smp_processor_id());
1316 list_for_each_safe(cur
, tmp
, &primary
->sc_list
) {
1317 cur_channel
= list_entry(cur
, struct vmbus_channel
, sc_list
);
1318 if (cur_channel
->state
!= CHANNEL_OPENED_STATE
)
1321 if (cur_channel
->target_vp
== cur_cpu
)
1324 if (i
== next_channel
)
1330 return outgoing_channel
;
1332 EXPORT_SYMBOL_GPL(vmbus_get_outgoing_channel
);
1334 static void invoke_sc_cb(struct vmbus_channel
*primary_channel
)
1336 struct list_head
*cur
, *tmp
;
1337 struct vmbus_channel
*cur_channel
;
1339 if (primary_channel
->sc_creation_callback
== NULL
)
1342 list_for_each_safe(cur
, tmp
, &primary_channel
->sc_list
) {
1343 cur_channel
= list_entry(cur
, struct vmbus_channel
, sc_list
);
1345 primary_channel
->sc_creation_callback(cur_channel
);
1349 void vmbus_set_sc_create_callback(struct vmbus_channel
*primary_channel
,
1350 void (*sc_cr_cb
)(struct vmbus_channel
*new_sc
))
1352 primary_channel
->sc_creation_callback
= sc_cr_cb
;
1354 EXPORT_SYMBOL_GPL(vmbus_set_sc_create_callback
);
1356 bool vmbus_are_subchannels_present(struct vmbus_channel
*primary
)
1360 ret
= !list_empty(&primary
->sc_list
);
1364 * Invoke the callback on sub-channel creation.
1365 * This will present a uniform interface to the
1368 invoke_sc_cb(primary
);
1373 EXPORT_SYMBOL_GPL(vmbus_are_subchannels_present
);
1375 void vmbus_set_chn_rescind_callback(struct vmbus_channel
*channel
,
1376 void (*chn_rescind_cb
)(struct vmbus_channel
*))
1378 channel
->chn_rescind_callback
= chn_rescind_cb
;
1380 EXPORT_SYMBOL_GPL(vmbus_set_chn_rescind_callback
);