]> git.ipfire.org Git - thirdparty/linux.git/blob - drivers/hv/hv.c
Merge tag 'riscv-for-linus-5.7-rc4' of git://git.kernel.org/pub/scm/linux/kernel...
[thirdparty/linux.git] / drivers / hv / hv.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (c) 2009, Microsoft Corporation.
4 *
5 * Authors:
6 * Haiyang Zhang <haiyangz@microsoft.com>
7 * Hank Janssen <hjanssen@microsoft.com>
8 */
9 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10
11 #include <linux/kernel.h>
12 #include <linux/mm.h>
13 #include <linux/slab.h>
14 #include <linux/vmalloc.h>
15 #include <linux/hyperv.h>
16 #include <linux/version.h>
17 #include <linux/random.h>
18 #include <linux/clockchips.h>
19 #include <clocksource/hyperv_timer.h>
20 #include <asm/mshyperv.h>
21 #include "hyperv_vmbus.h"
22
23 /* The one and only */
24 struct hv_context hv_context;
25
26 /*
27 * hv_init - Main initialization routine.
28 *
29 * This routine must be called before any other routines in here are called
30 */
31 int hv_init(void)
32 {
33 hv_context.cpu_context = alloc_percpu(struct hv_per_cpu_context);
34 if (!hv_context.cpu_context)
35 return -ENOMEM;
36 return 0;
37 }
38
39 /*
40 * hv_post_message - Post a message using the hypervisor message IPC.
41 *
42 * This involves a hypercall.
43 */
44 int hv_post_message(union hv_connection_id connection_id,
45 enum hv_message_type message_type,
46 void *payload, size_t payload_size)
47 {
48 struct hv_input_post_message *aligned_msg;
49 struct hv_per_cpu_context *hv_cpu;
50 u64 status;
51
52 if (payload_size > HV_MESSAGE_PAYLOAD_BYTE_COUNT)
53 return -EMSGSIZE;
54
55 hv_cpu = get_cpu_ptr(hv_context.cpu_context);
56 aligned_msg = hv_cpu->post_msg_page;
57 aligned_msg->connectionid = connection_id;
58 aligned_msg->reserved = 0;
59 aligned_msg->message_type = message_type;
60 aligned_msg->payload_size = payload_size;
61 memcpy((void *)aligned_msg->payload, payload, payload_size);
62
63 status = hv_do_hypercall(HVCALL_POST_MESSAGE, aligned_msg, NULL);
64
65 /* Preemption must remain disabled until after the hypercall
66 * so some other thread can't get scheduled onto this cpu and
67 * corrupt the per-cpu post_msg_page
68 */
69 put_cpu_ptr(hv_cpu);
70
71 return status & 0xFFFF;
72 }
73
74 int hv_synic_alloc(void)
75 {
76 int cpu;
77 struct hv_per_cpu_context *hv_cpu;
78
79 /*
80 * First, zero all per-cpu memory areas so hv_synic_free() can
81 * detect what memory has been allocated and cleanup properly
82 * after any failures.
83 */
84 for_each_present_cpu(cpu) {
85 hv_cpu = per_cpu_ptr(hv_context.cpu_context, cpu);
86 memset(hv_cpu, 0, sizeof(*hv_cpu));
87 }
88
89 hv_context.hv_numa_map = kcalloc(nr_node_ids, sizeof(struct cpumask),
90 GFP_KERNEL);
91 if (hv_context.hv_numa_map == NULL) {
92 pr_err("Unable to allocate NUMA map\n");
93 goto err;
94 }
95
96 for_each_present_cpu(cpu) {
97 hv_cpu = per_cpu_ptr(hv_context.cpu_context, cpu);
98
99 tasklet_init(&hv_cpu->msg_dpc,
100 vmbus_on_msg_dpc, (unsigned long) hv_cpu);
101
102 hv_cpu->synic_message_page =
103 (void *)get_zeroed_page(GFP_ATOMIC);
104 if (hv_cpu->synic_message_page == NULL) {
105 pr_err("Unable to allocate SYNIC message page\n");
106 goto err;
107 }
108
109 hv_cpu->synic_event_page = (void *)get_zeroed_page(GFP_ATOMIC);
110 if (hv_cpu->synic_event_page == NULL) {
111 pr_err("Unable to allocate SYNIC event page\n");
112 goto err;
113 }
114
115 hv_cpu->post_msg_page = (void *)get_zeroed_page(GFP_ATOMIC);
116 if (hv_cpu->post_msg_page == NULL) {
117 pr_err("Unable to allocate post msg page\n");
118 goto err;
119 }
120
121 INIT_LIST_HEAD(&hv_cpu->chan_list);
122 }
123
124 return 0;
125 err:
126 /*
127 * Any memory allocations that succeeded will be freed when
128 * the caller cleans up by calling hv_synic_free()
129 */
130 return -ENOMEM;
131 }
132
133
134 void hv_synic_free(void)
135 {
136 int cpu;
137
138 for_each_present_cpu(cpu) {
139 struct hv_per_cpu_context *hv_cpu
140 = per_cpu_ptr(hv_context.cpu_context, cpu);
141
142 free_page((unsigned long)hv_cpu->synic_event_page);
143 free_page((unsigned long)hv_cpu->synic_message_page);
144 free_page((unsigned long)hv_cpu->post_msg_page);
145 }
146
147 kfree(hv_context.hv_numa_map);
148 }
149
150 /*
151 * hv_synic_init - Initialize the Synthetic Interrupt Controller.
152 *
153 * If it is already initialized by another entity (ie x2v shim), we need to
154 * retrieve the initialized message and event pages. Otherwise, we create and
155 * initialize the message and event pages.
156 */
157 void hv_synic_enable_regs(unsigned int cpu)
158 {
159 struct hv_per_cpu_context *hv_cpu
160 = per_cpu_ptr(hv_context.cpu_context, cpu);
161 union hv_synic_simp simp;
162 union hv_synic_siefp siefp;
163 union hv_synic_sint shared_sint;
164 union hv_synic_scontrol sctrl;
165
166 /* Setup the Synic's message page */
167 hv_get_simp(simp.as_uint64);
168 simp.simp_enabled = 1;
169 simp.base_simp_gpa = virt_to_phys(hv_cpu->synic_message_page)
170 >> PAGE_SHIFT;
171
172 hv_set_simp(simp.as_uint64);
173
174 /* Setup the Synic's event page */
175 hv_get_siefp(siefp.as_uint64);
176 siefp.siefp_enabled = 1;
177 siefp.base_siefp_gpa = virt_to_phys(hv_cpu->synic_event_page)
178 >> PAGE_SHIFT;
179
180 hv_set_siefp(siefp.as_uint64);
181
182 /* Setup the shared SINT. */
183 hv_get_synint_state(VMBUS_MESSAGE_SINT, shared_sint.as_uint64);
184
185 shared_sint.vector = HYPERVISOR_CALLBACK_VECTOR;
186 shared_sint.masked = false;
187 shared_sint.auto_eoi = hv_recommend_using_aeoi();
188 hv_set_synint_state(VMBUS_MESSAGE_SINT, shared_sint.as_uint64);
189
190 /* Enable the global synic bit */
191 hv_get_synic_state(sctrl.as_uint64);
192 sctrl.enable = 1;
193
194 hv_set_synic_state(sctrl.as_uint64);
195 }
196
197 int hv_synic_init(unsigned int cpu)
198 {
199 hv_synic_enable_regs(cpu);
200
201 hv_stimer_legacy_init(cpu, VMBUS_MESSAGE_SINT);
202
203 return 0;
204 }
205
206 /*
207 * hv_synic_cleanup - Cleanup routine for hv_synic_init().
208 */
209 void hv_synic_disable_regs(unsigned int cpu)
210 {
211 union hv_synic_sint shared_sint;
212 union hv_synic_simp simp;
213 union hv_synic_siefp siefp;
214 union hv_synic_scontrol sctrl;
215
216 hv_get_synint_state(VMBUS_MESSAGE_SINT, shared_sint.as_uint64);
217
218 shared_sint.masked = 1;
219
220 /* Need to correctly cleanup in the case of SMP!!! */
221 /* Disable the interrupt */
222 hv_set_synint_state(VMBUS_MESSAGE_SINT, shared_sint.as_uint64);
223
224 hv_get_simp(simp.as_uint64);
225 simp.simp_enabled = 0;
226 simp.base_simp_gpa = 0;
227
228 hv_set_simp(simp.as_uint64);
229
230 hv_get_siefp(siefp.as_uint64);
231 siefp.siefp_enabled = 0;
232 siefp.base_siefp_gpa = 0;
233
234 hv_set_siefp(siefp.as_uint64);
235
236 /* Disable the global synic bit */
237 hv_get_synic_state(sctrl.as_uint64);
238 sctrl.enable = 0;
239 hv_set_synic_state(sctrl.as_uint64);
240 }
241
242 int hv_synic_cleanup(unsigned int cpu)
243 {
244 struct vmbus_channel *channel, *sc;
245 bool channel_found = false;
246 unsigned long flags;
247
248 /*
249 * Search for channels which are bound to the CPU we're about to
250 * cleanup. In case we find one and vmbus is still connected we need to
251 * fail, this will effectively prevent CPU offlining. There is no way
252 * we can re-bind channels to different CPUs for now.
253 */
254 mutex_lock(&vmbus_connection.channel_mutex);
255 list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) {
256 if (channel->target_cpu == cpu) {
257 channel_found = true;
258 break;
259 }
260 spin_lock_irqsave(&channel->lock, flags);
261 list_for_each_entry(sc, &channel->sc_list, sc_list) {
262 if (sc->target_cpu == cpu) {
263 channel_found = true;
264 break;
265 }
266 }
267 spin_unlock_irqrestore(&channel->lock, flags);
268 if (channel_found)
269 break;
270 }
271 mutex_unlock(&vmbus_connection.channel_mutex);
272
273 if (channel_found && vmbus_connection.conn_state == CONNECTED)
274 return -EBUSY;
275
276 hv_stimer_legacy_cleanup(cpu);
277
278 hv_synic_disable_regs(cpu);
279
280 return 0;
281 }