]> git.ipfire.org Git - thirdparty/kernel/stable.git/blob - drivers/hv/ring_buffer.c
Drivers: hv: vmbus: Check for ring when getting debug info
[thirdparty/kernel/stable.git] / drivers / hv / ring_buffer.c
1 /*
2 *
3 * Copyright (c) 2009, Microsoft Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
16 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 *
18 * Authors:
19 * Haiyang Zhang <haiyangz@microsoft.com>
20 * Hank Janssen <hjanssen@microsoft.com>
21 * K. Y. Srinivasan <kys@microsoft.com>
22 *
23 */
24 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
25
26 #include <linux/kernel.h>
27 #include <linux/mm.h>
28 #include <linux/hyperv.h>
29 #include <linux/uio.h>
30 #include <linux/vmalloc.h>
31 #include <linux/slab.h>
32 #include <linux/prefetch.h>
33
34 #include "hyperv_vmbus.h"
35
36 #define VMBUS_PKT_TRAILER 8
37
38 /*
39 * When we write to the ring buffer, check if the host needs to
40 * be signaled. Here is the details of this protocol:
41 *
42 * 1. The host guarantees that while it is draining the
43 * ring buffer, it will set the interrupt_mask to
44 * indicate it does not need to be interrupted when
45 * new data is placed.
46 *
47 * 2. The host guarantees that it will completely drain
48 * the ring buffer before exiting the read loop. Further,
49 * once the ring buffer is empty, it will clear the
50 * interrupt_mask and re-check to see if new data has
51 * arrived.
52 *
53 * KYS: Oct. 30, 2016:
54 * It looks like Windows hosts have logic to deal with DOS attacks that
55 * can be triggered if it receives interrupts when it is not expecting
56 * the interrupt. The host expects interrupts only when the ring
57 * transitions from empty to non-empty (or full to non full on the guest
58 * to host ring).
59 * So, base the signaling decision solely on the ring state until the
60 * host logic is fixed.
61 */
62
63 static void hv_signal_on_write(u32 old_write, struct vmbus_channel *channel)
64 {
65 struct hv_ring_buffer_info *rbi = &channel->outbound;
66
67 virt_mb();
68 if (READ_ONCE(rbi->ring_buffer->interrupt_mask))
69 return;
70
71 /* check interrupt_mask before read_index */
72 virt_rmb();
73 /*
74 * This is the only case we need to signal when the
75 * ring transitions from being empty to non-empty.
76 */
77 if (old_write == READ_ONCE(rbi->ring_buffer->read_index))
78 vmbus_setevent(channel);
79 }
80
81 /* Get the next write location for the specified ring buffer. */
82 static inline u32
83 hv_get_next_write_location(struct hv_ring_buffer_info *ring_info)
84 {
85 u32 next = ring_info->ring_buffer->write_index;
86
87 return next;
88 }
89
90 /* Set the next write location for the specified ring buffer. */
91 static inline void
92 hv_set_next_write_location(struct hv_ring_buffer_info *ring_info,
93 u32 next_write_location)
94 {
95 ring_info->ring_buffer->write_index = next_write_location;
96 }
97
98 /* Set the next read location for the specified ring buffer. */
99 static inline void
100 hv_set_next_read_location(struct hv_ring_buffer_info *ring_info,
101 u32 next_read_location)
102 {
103 ring_info->ring_buffer->read_index = next_read_location;
104 ring_info->priv_read_index = next_read_location;
105 }
106
107 /* Get the size of the ring buffer. */
108 static inline u32
109 hv_get_ring_buffersize(const struct hv_ring_buffer_info *ring_info)
110 {
111 return ring_info->ring_datasize;
112 }
113
114 /* Get the read and write indices as u64 of the specified ring buffer. */
115 static inline u64
116 hv_get_ring_bufferindices(struct hv_ring_buffer_info *ring_info)
117 {
118 return (u64)ring_info->ring_buffer->write_index << 32;
119 }
120
121 /*
122 * Helper routine to copy from source to ring buffer.
123 * Assume there is enough room. Handles wrap-around in dest case only!!
124 */
125 static u32 hv_copyto_ringbuffer(
126 struct hv_ring_buffer_info *ring_info,
127 u32 start_write_offset,
128 const void *src,
129 u32 srclen)
130 {
131 void *ring_buffer = hv_get_ring_buffer(ring_info);
132 u32 ring_buffer_size = hv_get_ring_buffersize(ring_info);
133
134 memcpy(ring_buffer + start_write_offset, src, srclen);
135
136 start_write_offset += srclen;
137 if (start_write_offset >= ring_buffer_size)
138 start_write_offset -= ring_buffer_size;
139
140 return start_write_offset;
141 }
142
143 /* Get various debug metrics for the specified ring buffer. */
144 int hv_ringbuffer_get_debuginfo(const struct hv_ring_buffer_info *ring_info,
145 struct hv_ring_buffer_debug_info *debug_info)
146 {
147 u32 bytes_avail_towrite;
148 u32 bytes_avail_toread;
149
150 if (!ring_info->ring_buffer)
151 return -EINVAL;
152
153 hv_get_ringbuffer_availbytes(ring_info,
154 &bytes_avail_toread,
155 &bytes_avail_towrite);
156 debug_info->bytes_avail_toread = bytes_avail_toread;
157 debug_info->bytes_avail_towrite = bytes_avail_towrite;
158 debug_info->current_read_index = ring_info->ring_buffer->read_index;
159 debug_info->current_write_index = ring_info->ring_buffer->write_index;
160 debug_info->current_interrupt_mask
161 = ring_info->ring_buffer->interrupt_mask;
162 return 0;
163 }
164 EXPORT_SYMBOL_GPL(hv_ringbuffer_get_debuginfo);
165
166 /* Initialize the ring buffer. */
167 int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info,
168 struct page *pages, u32 page_cnt)
169 {
170 int i;
171 struct page **pages_wraparound;
172
173 BUILD_BUG_ON((sizeof(struct hv_ring_buffer) != PAGE_SIZE));
174
175 memset(ring_info, 0, sizeof(struct hv_ring_buffer_info));
176
177 /*
178 * First page holds struct hv_ring_buffer, do wraparound mapping for
179 * the rest.
180 */
181 pages_wraparound = kzalloc(sizeof(struct page *) * (page_cnt * 2 - 1),
182 GFP_KERNEL);
183 if (!pages_wraparound)
184 return -ENOMEM;
185
186 pages_wraparound[0] = pages;
187 for (i = 0; i < 2 * (page_cnt - 1); i++)
188 pages_wraparound[i + 1] = &pages[i % (page_cnt - 1) + 1];
189
190 ring_info->ring_buffer = (struct hv_ring_buffer *)
191 vmap(pages_wraparound, page_cnt * 2 - 1, VM_MAP, PAGE_KERNEL);
192
193 kfree(pages_wraparound);
194
195
196 if (!ring_info->ring_buffer)
197 return -ENOMEM;
198
199 ring_info->ring_buffer->read_index =
200 ring_info->ring_buffer->write_index = 0;
201
202 /* Set the feature bit for enabling flow control. */
203 ring_info->ring_buffer->feature_bits.value = 1;
204
205 ring_info->ring_size = page_cnt << PAGE_SHIFT;
206 ring_info->ring_datasize = ring_info->ring_size -
207 sizeof(struct hv_ring_buffer);
208
209 spin_lock_init(&ring_info->ring_lock);
210
211 return 0;
212 }
213
214 /* Cleanup the ring buffer. */
215 void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info)
216 {
217 vunmap(ring_info->ring_buffer);
218 }
219
220 /* Write to the ring buffer. */
221 int hv_ringbuffer_write(struct vmbus_channel *channel,
222 const struct kvec *kv_list, u32 kv_count)
223 {
224 int i;
225 u32 bytes_avail_towrite;
226 u32 totalbytes_towrite = sizeof(u64);
227 u32 next_write_location;
228 u32 old_write;
229 u64 prev_indices;
230 unsigned long flags;
231 struct hv_ring_buffer_info *outring_info = &channel->outbound;
232
233 if (channel->rescind)
234 return -ENODEV;
235
236 for (i = 0; i < kv_count; i++)
237 totalbytes_towrite += kv_list[i].iov_len;
238
239 spin_lock_irqsave(&outring_info->ring_lock, flags);
240
241 bytes_avail_towrite = hv_get_bytes_to_write(outring_info);
242
243 /*
244 * If there is only room for the packet, assume it is full.
245 * Otherwise, the next time around, we think the ring buffer
246 * is empty since the read index == write index.
247 */
248 if (bytes_avail_towrite <= totalbytes_towrite) {
249 spin_unlock_irqrestore(&outring_info->ring_lock, flags);
250 return -EAGAIN;
251 }
252
253 /* Write to the ring buffer */
254 next_write_location = hv_get_next_write_location(outring_info);
255
256 old_write = next_write_location;
257
258 for (i = 0; i < kv_count; i++) {
259 next_write_location = hv_copyto_ringbuffer(outring_info,
260 next_write_location,
261 kv_list[i].iov_base,
262 kv_list[i].iov_len);
263 }
264
265 /* Set previous packet start */
266 prev_indices = hv_get_ring_bufferindices(outring_info);
267
268 next_write_location = hv_copyto_ringbuffer(outring_info,
269 next_write_location,
270 &prev_indices,
271 sizeof(u64));
272
273 /* Issue a full memory barrier before updating the write index */
274 virt_mb();
275
276 /* Now, update the write location */
277 hv_set_next_write_location(outring_info, next_write_location);
278
279
280 spin_unlock_irqrestore(&outring_info->ring_lock, flags);
281
282 hv_signal_on_write(old_write, channel);
283
284 if (channel->rescind)
285 return -ENODEV;
286
287 return 0;
288 }
289
290 int hv_ringbuffer_read(struct vmbus_channel *channel,
291 void *buffer, u32 buflen, u32 *buffer_actual_len,
292 u64 *requestid, bool raw)
293 {
294 struct vmpacket_descriptor *desc;
295 u32 packetlen, offset;
296
297 if (unlikely(buflen == 0))
298 return -EINVAL;
299
300 *buffer_actual_len = 0;
301 *requestid = 0;
302
303 /* Make sure there is something to read */
304 desc = hv_pkt_iter_first(channel);
305 if (desc == NULL) {
306 /*
307 * No error is set when there is even no header, drivers are
308 * supposed to analyze buffer_actual_len.
309 */
310 return 0;
311 }
312
313 offset = raw ? 0 : (desc->offset8 << 3);
314 packetlen = (desc->len8 << 3) - offset;
315 *buffer_actual_len = packetlen;
316 *requestid = desc->trans_id;
317
318 if (unlikely(packetlen > buflen))
319 return -ENOBUFS;
320
321 /* since ring is double mapped, only one copy is necessary */
322 memcpy(buffer, (const char *)desc + offset, packetlen);
323
324 /* Advance ring index to next packet descriptor */
325 __hv_pkt_iter_next(channel, desc);
326
327 /* Notify host of update */
328 hv_pkt_iter_close(channel);
329
330 return 0;
331 }
332
333 /*
334 * Determine number of bytes available in ring buffer after
335 * the current iterator (priv_read_index) location.
336 *
337 * This is similar to hv_get_bytes_to_read but with private
338 * read index instead.
339 */
340 static u32 hv_pkt_iter_avail(const struct hv_ring_buffer_info *rbi)
341 {
342 u32 priv_read_loc = rbi->priv_read_index;
343 u32 write_loc = READ_ONCE(rbi->ring_buffer->write_index);
344
345 if (write_loc >= priv_read_loc)
346 return write_loc - priv_read_loc;
347 else
348 return (rbi->ring_datasize - priv_read_loc) + write_loc;
349 }
350
351 /*
352 * Get first vmbus packet from ring buffer after read_index
353 *
354 * If ring buffer is empty, returns NULL and no other action needed.
355 */
356 struct vmpacket_descriptor *hv_pkt_iter_first(struct vmbus_channel *channel)
357 {
358 struct hv_ring_buffer_info *rbi = &channel->inbound;
359 struct vmpacket_descriptor *desc;
360
361 if (hv_pkt_iter_avail(rbi) < sizeof(struct vmpacket_descriptor))
362 return NULL;
363
364 desc = hv_get_ring_buffer(rbi) + rbi->priv_read_index;
365 if (desc)
366 prefetch((char *)desc + (desc->len8 << 3));
367
368 return desc;
369 }
370 EXPORT_SYMBOL_GPL(hv_pkt_iter_first);
371
372 /*
373 * Get next vmbus packet from ring buffer.
374 *
375 * Advances the current location (priv_read_index) and checks for more
376 * data. If the end of the ring buffer is reached, then return NULL.
377 */
378 struct vmpacket_descriptor *
379 __hv_pkt_iter_next(struct vmbus_channel *channel,
380 const struct vmpacket_descriptor *desc)
381 {
382 struct hv_ring_buffer_info *rbi = &channel->inbound;
383 u32 packetlen = desc->len8 << 3;
384 u32 dsize = rbi->ring_datasize;
385
386 /* bump offset to next potential packet */
387 rbi->priv_read_index += packetlen + VMBUS_PKT_TRAILER;
388 if (rbi->priv_read_index >= dsize)
389 rbi->priv_read_index -= dsize;
390
391 /* more data? */
392 return hv_pkt_iter_first(channel);
393 }
394 EXPORT_SYMBOL_GPL(__hv_pkt_iter_next);
395
396 /* How many bytes were read in this iterator cycle */
397 static u32 hv_pkt_iter_bytes_read(const struct hv_ring_buffer_info *rbi,
398 u32 start_read_index)
399 {
400 if (rbi->priv_read_index >= start_read_index)
401 return rbi->priv_read_index - start_read_index;
402 else
403 return rbi->ring_datasize - start_read_index +
404 rbi->priv_read_index;
405 }
406
407 /*
408 * Update host ring buffer after iterating over packets.
409 */
410 void hv_pkt_iter_close(struct vmbus_channel *channel)
411 {
412 struct hv_ring_buffer_info *rbi = &channel->inbound;
413 u32 curr_write_sz, pending_sz, bytes_read, start_read_index;
414
415 /*
416 * Make sure all reads are done before we update the read index since
417 * the writer may start writing to the read area once the read index
418 * is updated.
419 */
420 virt_rmb();
421 start_read_index = rbi->ring_buffer->read_index;
422 rbi->ring_buffer->read_index = rbi->priv_read_index;
423
424 if (!rbi->ring_buffer->feature_bits.feat_pending_send_sz)
425 return;
426
427 /*
428 * Issue a full memory barrier before making the signaling decision.
429 * Here is the reason for having this barrier:
430 * If the reading of the pend_sz (in this function)
431 * were to be reordered and read before we commit the new read
432 * index (in the calling function) we could
433 * have a problem. If the host were to set the pending_sz after we
434 * have sampled pending_sz and go to sleep before we commit the
435 * read index, we could miss sending the interrupt. Issue a full
436 * memory barrier to address this.
437 */
438 virt_mb();
439
440 pending_sz = READ_ONCE(rbi->ring_buffer->pending_send_sz);
441 if (!pending_sz)
442 return;
443
444 /*
445 * Ensure the read of write_index in hv_get_bytes_to_write()
446 * happens after the read of pending_send_sz.
447 */
448 virt_rmb();
449 curr_write_sz = hv_get_bytes_to_write(rbi);
450 bytes_read = hv_pkt_iter_bytes_read(rbi, start_read_index);
451
452 /*
453 * If there was space before we began iteration,
454 * then host was not blocked.
455 */
456
457 if (curr_write_sz - bytes_read > pending_sz)
458 return;
459
460 /* If pending write will not fit, don't give false hope. */
461 if (curr_write_sz <= pending_sz)
462 return;
463
464 vmbus_setevent(channel);
465 }
466 EXPORT_SYMBOL_GPL(hv_pkt_iter_close);