]> git.ipfire.org Git - people/pmueller/ipfire-2.x.git/blob - src/patches/suse-2.6.27.25/patches.xen/xen3-fixup-xen
Updated xen patches taken from suse.
[people/pmueller/ipfire-2.x.git] / src / patches / suse-2.6.27.25 / patches.xen / xen3-fixup-xen
1 Subject: Fix Xen build wrt. Xen files coming from mainline.
2 From: http://xenbits.xensource.com/linux-2.6.18-xen.hg (tip 728:832aac894efd)
3 Patch-mainline: obsolete
4
5 Acked-by: jbeulich@novell.com
6
7 Index: head-2008-11-25/drivers/xen/Makefile
8 ===================================================================
9 --- head-2008-11-25.orig/drivers/xen/Makefile 2008-11-25 12:33:06.000000000 +0100
10 +++ head-2008-11-25/drivers/xen/Makefile 2008-11-25 12:35:56.000000000 +0100
11 @@ -1,4 +1,25 @@
12 -obj-y += grant-table.o features.o events.o manage.o
13 +obj-y += core/
14 +obj-y += console/
15 +obj-y += evtchn/
16 obj-y += xenbus/
17 -obj-$(CONFIG_XEN_XENCOMM) += xencomm.o
18 -obj-$(CONFIG_XEN_BALLOON) += balloon.o
19 +obj-y += char/
20 +
21 +obj-y += util.o
22 +obj-$(CONFIG_XEN_BALLOON) += balloon/
23 +obj-$(CONFIG_XEN_BLKDEV_BACKEND) += blkback/
24 +obj-$(CONFIG_XEN_BLKDEV_TAP) += blktap/
25 +obj-$(CONFIG_XEN_NETDEV_BACKEND) += netback/
26 +obj-$(CONFIG_XEN_TPMDEV_BACKEND) += tpmback/
27 +obj-$(CONFIG_XEN_BLKDEV_FRONTEND) += blkfront/
28 +obj-$(CONFIG_XEN_NETDEV_FRONTEND) += netfront/
29 +obj-$(CONFIG_XEN_PCIDEV_BACKEND) += pciback/
30 +obj-$(CONFIG_XEN_PCIDEV_FRONTEND) += pcifront/
31 +obj-$(CONFIG_XEN_FRAMEBUFFER) += fbfront/
32 +obj-$(CONFIG_XEN_KEYBOARD) += fbfront/
33 +obj-$(CONFIG_XEN_SCSI_BACKEND) += scsiback/
34 +obj-$(CONFIG_XEN_SCSI_FRONTEND) += scsifront/
35 +obj-$(CONFIG_XEN_PRIVCMD) += privcmd/
36 +obj-$(CONFIG_XEN_GRANT_DEV) += gntdev/
37 +obj-$(CONFIG_XEN_NETDEV_ACCEL_SFC_UTIL) += sfc_netutil/
38 +obj-$(CONFIG_XEN_NETDEV_ACCEL_SFC_FRONTEND) += sfc_netfront/
39 +obj-$(CONFIG_XEN_NETDEV_ACCEL_SFC_BACKEND) += sfc_netback/
40 Index: head-2008-11-25/drivers/xen/xenbus/Makefile
41 ===================================================================
42 --- head-2008-11-25.orig/drivers/xen/xenbus/Makefile 2008-11-25 12:33:06.000000000 +0100
43 +++ head-2008-11-25/drivers/xen/xenbus/Makefile 2008-11-25 12:35:56.000000000 +0100
44 @@ -1,7 +1,9 @@
45 -obj-y += xenbus.o
46 +obj-y += xenbus_client.o xenbus_comms.o xenbus_xs.o xenbus_probe.o
47 +obj-$(CONFIG_XEN_BACKEND) += xenbus_be.o
48
49 -xenbus-objs =
50 -xenbus-objs += xenbus_client.o
51 -xenbus-objs += xenbus_comms.o
52 -xenbus-objs += xenbus_xs.o
53 -xenbus-objs += xenbus_probe.o
54 +xenbus_be-objs =
55 +xenbus_be-objs += xenbus_backend_client.o
56 +
57 +xenbus-$(CONFIG_XEN_BACKEND) += xenbus_probe_backend.o
58 +obj-y += $(xenbus-y) $(xenbus-m)
59 +obj-$(CONFIG_XEN_XENBUS_DEV) += xenbus_dev.o
60 Index: head-2008-11-25/drivers/xen/xenbus/xenbus_client.c
61 ===================================================================
62 --- head-2008-11-25.orig/drivers/xen/xenbus/xenbus_client.c 2008-11-25 12:33:06.000000000 +0100
63 +++ head-2008-11-25/drivers/xen/xenbus/xenbus_client.c 2008-11-25 12:35:56.000000000 +0100
64 @@ -30,14 +30,18 @@
65 * IN THE SOFTWARE.
66 */
67
68 -#include <linux/types.h>
69 -#include <linux/vmalloc.h>
70 -#include <asm/xen/hypervisor.h>
71 -#include <xen/interface/xen.h>
72 -#include <xen/interface/event_channel.h>
73 -#include <xen/events.h>
74 -#include <xen/grant_table.h>
75 +#include <linux/slab.h>
76 +#include <xen/evtchn.h>
77 +#include <xen/gnttab.h>
78 #include <xen/xenbus.h>
79 +#include <xen/driver_util.h>
80 +
81 +#ifdef HAVE_XEN_PLATFORM_COMPAT_H
82 +#include <xen/platform-compat.h>
83 +#endif
84 +
85 +#define DPRINTK(fmt, args...) \
86 + pr_debug("xenbus_client (%s:%d) " fmt ".\n", __FUNCTION__, __LINE__, ##args)
87
88 const char *xenbus_strstate(enum xenbus_state state)
89 {
90 @@ -54,20 +58,6 @@ const char *xenbus_strstate(enum xenbus_
91 }
92 EXPORT_SYMBOL_GPL(xenbus_strstate);
93
94 -/**
95 - * xenbus_watch_path - register a watch
96 - * @dev: xenbus device
97 - * @path: path to watch
98 - * @watch: watch to register
99 - * @callback: callback to register
100 - *
101 - * Register a @watch on the given path, using the given xenbus_watch structure
102 - * for storage, and the given @callback function as the callback. Return 0 on
103 - * success, or -errno on error. On success, the given @path will be saved as
104 - * @watch->node, and remains the caller's to free. On error, @watch->node will
105 - * be NULL, the device will switch to %XenbusStateClosing, and the error will
106 - * be saved in the store.
107 - */
108 int xenbus_watch_path(struct xenbus_device *dev, const char *path,
109 struct xenbus_watch *watch,
110 void (*callback)(struct xenbus_watch *,
111 @@ -91,58 +81,26 @@ int xenbus_watch_path(struct xenbus_devi
112 EXPORT_SYMBOL_GPL(xenbus_watch_path);
113
114
115 -/**
116 - * xenbus_watch_pathfmt - register a watch on a sprintf-formatted path
117 - * @dev: xenbus device
118 - * @watch: watch to register
119 - * @callback: callback to register
120 - * @pathfmt: format of path to watch
121 - *
122 - * Register a watch on the given @path, using the given xenbus_watch
123 - * structure for storage, and the given @callback function as the callback.
124 - * Return 0 on success, or -errno on error. On success, the watched path
125 - * (@path/@path2) will be saved as @watch->node, and becomes the caller's to
126 - * kfree(). On error, watch->node will be NULL, so the caller has nothing to
127 - * free, the device will switch to %XenbusStateClosing, and the error will be
128 - * saved in the store.
129 - */
130 -int xenbus_watch_pathfmt(struct xenbus_device *dev,
131 - struct xenbus_watch *watch,
132 - void (*callback)(struct xenbus_watch *,
133 - const char **, unsigned int),
134 - const char *pathfmt, ...)
135 +int xenbus_watch_path2(struct xenbus_device *dev, const char *path,
136 + const char *path2, struct xenbus_watch *watch,
137 + void (*callback)(struct xenbus_watch *,
138 + const char **, unsigned int))
139 {
140 int err;
141 - va_list ap;
142 - char *path;
143 -
144 - va_start(ap, pathfmt);
145 - path = kvasprintf(GFP_NOIO | __GFP_HIGH, pathfmt, ap);
146 - va_end(ap);
147 -
148 - if (!path) {
149 + char *state = kasprintf(GFP_NOIO | __GFP_HIGH, "%s/%s", path, path2);
150 + if (!state) {
151 xenbus_dev_fatal(dev, -ENOMEM, "allocating path for watch");
152 return -ENOMEM;
153 }
154 - err = xenbus_watch_path(dev, path, watch, callback);
155 + err = xenbus_watch_path(dev, state, watch, callback);
156
157 if (err)
158 - kfree(path);
159 + kfree(state);
160 return err;
161 }
162 -EXPORT_SYMBOL_GPL(xenbus_watch_pathfmt);
163 +EXPORT_SYMBOL_GPL(xenbus_watch_path2);
164
165
166 -/**
167 - * xenbus_switch_state
168 - * @dev: xenbus device
169 - * @xbt: transaction handle
170 - * @state: new state
171 - *
172 - * Advertise in the store a change of the given driver to the given new_state.
173 - * Return 0 on success, or -errno on error. On error, the device will switch
174 - * to XenbusStateClosing, and the error will be saved in the store.
175 - */
176 int xenbus_switch_state(struct xenbus_device *dev, enum xenbus_state state)
177 {
178 /* We check whether the state is currently set to the given value, and
179 @@ -201,13 +159,12 @@ static char *error_path(struct xenbus_de
180 }
181
182
183 -static void xenbus_va_dev_error(struct xenbus_device *dev, int err,
184 - const char *fmt, va_list ap)
185 +void _dev_error(struct xenbus_device *dev, int err, const char *fmt,
186 + va_list ap)
187 {
188 int ret;
189 unsigned int len;
190 - char *printf_buffer = NULL;
191 - char *path_buffer = NULL;
192 + char *printf_buffer = NULL, *path_buffer = NULL;
193
194 #define PRINTF_BUFFER_SIZE 4096
195 printf_buffer = kmalloc(PRINTF_BUFFER_SIZE, GFP_KERNEL);
196 @@ -224,74 +181,51 @@ static void xenbus_va_dev_error(struct x
197 path_buffer = error_path(dev);
198
199 if (path_buffer == NULL) {
200 - dev_err(&dev->dev, "failed to write error node for %s (%s)\n",
201 + printk("xenbus: failed to write error node for %s (%s)\n",
202 dev->nodename, printf_buffer);
203 goto fail;
204 }
205
206 if (xenbus_write(XBT_NIL, path_buffer, "error", printf_buffer) != 0) {
207 - dev_err(&dev->dev, "failed to write error node for %s (%s)\n",
208 + printk("xenbus: failed to write error node for %s (%s)\n",
209 dev->nodename, printf_buffer);
210 goto fail;
211 }
212
213 fail:
214 - kfree(printf_buffer);
215 - kfree(path_buffer);
216 + if (printf_buffer)
217 + kfree(printf_buffer);
218 + if (path_buffer)
219 + kfree(path_buffer);
220 }
221
222
223 -/**
224 - * xenbus_dev_error
225 - * @dev: xenbus device
226 - * @err: error to report
227 - * @fmt: error message format
228 - *
229 - * Report the given negative errno into the store, along with the given
230 - * formatted message.
231 - */
232 -void xenbus_dev_error(struct xenbus_device *dev, int err, const char *fmt, ...)
233 +void xenbus_dev_error(struct xenbus_device *dev, int err, const char *fmt,
234 + ...)
235 {
236 va_list ap;
237
238 va_start(ap, fmt);
239 - xenbus_va_dev_error(dev, err, fmt, ap);
240 + _dev_error(dev, err, fmt, ap);
241 va_end(ap);
242 }
243 EXPORT_SYMBOL_GPL(xenbus_dev_error);
244
245 -/**
246 - * xenbus_dev_fatal
247 - * @dev: xenbus device
248 - * @err: error to report
249 - * @fmt: error message format
250 - *
251 - * Equivalent to xenbus_dev_error(dev, err, fmt, args), followed by
252 - * xenbus_switch_state(dev, NULL, XenbusStateClosing) to schedule an orderly
253 - * closedown of this driver and its peer.
254 - */
255
256 -void xenbus_dev_fatal(struct xenbus_device *dev, int err, const char *fmt, ...)
257 +void xenbus_dev_fatal(struct xenbus_device *dev, int err, const char *fmt,
258 + ...)
259 {
260 va_list ap;
261
262 va_start(ap, fmt);
263 - xenbus_va_dev_error(dev, err, fmt, ap);
264 + _dev_error(dev, err, fmt, ap);
265 va_end(ap);
266
267 xenbus_switch_state(dev, XenbusStateClosing);
268 }
269 EXPORT_SYMBOL_GPL(xenbus_dev_fatal);
270
271 -/**
272 - * xenbus_grant_ring
273 - * @dev: xenbus device
274 - * @ring_mfn: mfn of ring to grant
275 -
276 - * Grant access to the given @ring_mfn to the peer of the given device. Return
277 - * 0 on success, or -errno on error. On error, the device will switch to
278 - * XenbusStateClosing, and the error will be saved in the store.
279 - */
280 +
281 int xenbus_grant_ring(struct xenbus_device *dev, unsigned long ring_mfn)
282 {
283 int err = gnttab_grant_foreign_access(dev->otherend_id, ring_mfn, 0);
284 @@ -302,18 +236,12 @@ int xenbus_grant_ring(struct xenbus_devi
285 EXPORT_SYMBOL_GPL(xenbus_grant_ring);
286
287
288 -/**
289 - * Allocate an event channel for the given xenbus_device, assigning the newly
290 - * created local port to *port. Return 0 on success, or -errno on error. On
291 - * error, the device will switch to XenbusStateClosing, and the error will be
292 - * saved in the store.
293 - */
294 int xenbus_alloc_evtchn(struct xenbus_device *dev, int *port)
295 {
296 struct evtchn_alloc_unbound alloc_unbound;
297 int err;
298
299 - alloc_unbound.dom = DOMID_SELF;
300 + alloc_unbound.dom = DOMID_SELF;
301 alloc_unbound.remote_dom = dev->otherend_id;
302
303 err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound,
304 @@ -328,36 +256,6 @@ int xenbus_alloc_evtchn(struct xenbus_de
305 EXPORT_SYMBOL_GPL(xenbus_alloc_evtchn);
306
307
308 -/**
309 - * Bind to an existing interdomain event channel in another domain. Returns 0
310 - * on success and stores the local port in *port. On error, returns -errno,
311 - * switches the device to XenbusStateClosing, and saves the error in XenStore.
312 - */
313 -int xenbus_bind_evtchn(struct xenbus_device *dev, int remote_port, int *port)
314 -{
315 - struct evtchn_bind_interdomain bind_interdomain;
316 - int err;
317 -
318 - bind_interdomain.remote_dom = dev->otherend_id;
319 - bind_interdomain.remote_port = remote_port;
320 -
321 - err = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain,
322 - &bind_interdomain);
323 - if (err)
324 - xenbus_dev_fatal(dev, err,
325 - "binding to event channel %d from domain %d",
326 - remote_port, dev->otherend_id);
327 - else
328 - *port = bind_interdomain.local_port;
329 -
330 - return err;
331 -}
332 -EXPORT_SYMBOL_GPL(xenbus_bind_evtchn);
333 -
334 -
335 -/**
336 - * Free an existing event channel. Returns 0 on success or -errno on error.
337 - */
338 int xenbus_free_evtchn(struct xenbus_device *dev, int port)
339 {
340 struct evtchn_close close;
341 @@ -374,189 +272,6 @@ int xenbus_free_evtchn(struct xenbus_dev
342 EXPORT_SYMBOL_GPL(xenbus_free_evtchn);
343
344
345 -/**
346 - * xenbus_map_ring_valloc
347 - * @dev: xenbus device
348 - * @gnt_ref: grant reference
349 - * @vaddr: pointer to address to be filled out by mapping
350 - *
351 - * Based on Rusty Russell's skeleton driver's map_page.
352 - * Map a page of memory into this domain from another domain's grant table.
353 - * xenbus_map_ring_valloc allocates a page of virtual address space, maps the
354 - * page to that address, and sets *vaddr to that address.
355 - * Returns 0 on success, and GNTST_* (see xen/include/interface/grant_table.h)
356 - * or -ENOMEM on error. If an error is returned, device will switch to
357 - * XenbusStateClosing and the error message will be saved in XenStore.
358 - */
359 -int xenbus_map_ring_valloc(struct xenbus_device *dev, int gnt_ref, void **vaddr)
360 -{
361 - struct gnttab_map_grant_ref op = {
362 - .flags = GNTMAP_host_map,
363 - .ref = gnt_ref,
364 - .dom = dev->otherend_id,
365 - };
366 - struct vm_struct *area;
367 -
368 - *vaddr = NULL;
369 -
370 - area = xen_alloc_vm_area(PAGE_SIZE);
371 - if (!area)
372 - return -ENOMEM;
373 -
374 - op.host_addr = (unsigned long)area->addr;
375 -
376 - if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1))
377 - BUG();
378 -
379 - if (op.status != GNTST_okay) {
380 - xen_free_vm_area(area);
381 - xenbus_dev_fatal(dev, op.status,
382 - "mapping in shared page %d from domain %d",
383 - gnt_ref, dev->otherend_id);
384 - return op.status;
385 - }
386 -
387 - /* Stuff the handle in an unused field */
388 - area->phys_addr = (unsigned long)op.handle;
389 -
390 - *vaddr = area->addr;
391 - return 0;
392 -}
393 -EXPORT_SYMBOL_GPL(xenbus_map_ring_valloc);
394 -
395 -
396 -/**
397 - * xenbus_map_ring
398 - * @dev: xenbus device
399 - * @gnt_ref: grant reference
400 - * @handle: pointer to grant handle to be filled
401 - * @vaddr: address to be mapped to
402 - *
403 - * Map a page of memory into this domain from another domain's grant table.
404 - * xenbus_map_ring does not allocate the virtual address space (you must do
405 - * this yourself!). It only maps in the page to the specified address.
406 - * Returns 0 on success, and GNTST_* (see xen/include/interface/grant_table.h)
407 - * or -ENOMEM on error. If an error is returned, device will switch to
408 - * XenbusStateClosing and the error message will be saved in XenStore.
409 - */
410 -int xenbus_map_ring(struct xenbus_device *dev, int gnt_ref,
411 - grant_handle_t *handle, void *vaddr)
412 -{
413 - struct gnttab_map_grant_ref op = {
414 - .host_addr = (unsigned long)vaddr,
415 - .flags = GNTMAP_host_map,
416 - .ref = gnt_ref,
417 - .dom = dev->otherend_id,
418 - };
419 -
420 - if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1))
421 - BUG();
422 -
423 - if (op.status != GNTST_okay) {
424 - xenbus_dev_fatal(dev, op.status,
425 - "mapping in shared page %d from domain %d",
426 - gnt_ref, dev->otherend_id);
427 - } else
428 - *handle = op.handle;
429 -
430 - return op.status;
431 -}
432 -EXPORT_SYMBOL_GPL(xenbus_map_ring);
433 -
434 -
435 -/**
436 - * xenbus_unmap_ring_vfree
437 - * @dev: xenbus device
438 - * @vaddr: addr to unmap
439 - *
440 - * Based on Rusty Russell's skeleton driver's unmap_page.
441 - * Unmap a page of memory in this domain that was imported from another domain.
442 - * Use xenbus_unmap_ring_vfree if you mapped in your memory with
443 - * xenbus_map_ring_valloc (it will free the virtual address space).
444 - * Returns 0 on success and returns GNTST_* on error
445 - * (see xen/include/interface/grant_table.h).
446 - */
447 -int xenbus_unmap_ring_vfree(struct xenbus_device *dev, void *vaddr)
448 -{
449 - struct vm_struct *area;
450 - struct gnttab_unmap_grant_ref op = {
451 - .host_addr = (unsigned long)vaddr,
452 - };
453 -
454 - /* It'd be nice if linux/vmalloc.h provided a find_vm_area(void *addr)
455 - * method so that we don't have to muck with vmalloc internals here.
456 - * We could force the user to hang on to their struct vm_struct from
457 - * xenbus_map_ring_valloc, but these 6 lines considerably simplify
458 - * this API.
459 - */
460 - read_lock(&vmlist_lock);
461 - for (area = vmlist; area != NULL; area = area->next) {
462 - if (area->addr == vaddr)
463 - break;
464 - }
465 - read_unlock(&vmlist_lock);
466 -
467 - if (!area) {
468 - xenbus_dev_error(dev, -ENOENT,
469 - "can't find mapped virtual address %p", vaddr);
470 - return GNTST_bad_virt_addr;
471 - }
472 -
473 - op.handle = (grant_handle_t)area->phys_addr;
474 -
475 - if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1))
476 - BUG();
477 -
478 - if (op.status == GNTST_okay)
479 - xen_free_vm_area(area);
480 - else
481 - xenbus_dev_error(dev, op.status,
482 - "unmapping page at handle %d error %d",
483 - (int16_t)area->phys_addr, op.status);
484 -
485 - return op.status;
486 -}
487 -EXPORT_SYMBOL_GPL(xenbus_unmap_ring_vfree);
488 -
489 -
490 -/**
491 - * xenbus_unmap_ring
492 - * @dev: xenbus device
493 - * @handle: grant handle
494 - * @vaddr: addr to unmap
495 - *
496 - * Unmap a page of memory in this domain that was imported from another domain.
497 - * Returns 0 on success and returns GNTST_* on error
498 - * (see xen/include/interface/grant_table.h).
499 - */
500 -int xenbus_unmap_ring(struct xenbus_device *dev,
501 - grant_handle_t handle, void *vaddr)
502 -{
503 - struct gnttab_unmap_grant_ref op = {
504 - .host_addr = (unsigned long)vaddr,
505 - .handle = handle,
506 - };
507 -
508 - if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1))
509 - BUG();
510 -
511 - if (op.status != GNTST_okay)
512 - xenbus_dev_error(dev, op.status,
513 - "unmapping page at handle %d error %d",
514 - handle, op.status);
515 -
516 - return op.status;
517 -}
518 -EXPORT_SYMBOL_GPL(xenbus_unmap_ring);
519 -
520 -
521 -/**
522 - * xenbus_read_driver_state
523 - * @path: path for driver
524 - *
525 - * Return the state of the driver rooted at the given store path, or
526 - * XenbusStateUnknown if no state can be read.
527 - */
528 enum xenbus_state xenbus_read_driver_state(const char *path)
529 {
530 enum xenbus_state result;
531 Index: head-2008-11-25/drivers/xen/xenbus/xenbus_comms.c
532 ===================================================================
533 --- head-2008-11-25.orig/drivers/xen/xenbus/xenbus_comms.c 2008-11-25 12:33:06.000000000 +0100
534 +++ head-2008-11-25/drivers/xen/xenbus/xenbus_comms.c 2008-11-25 12:35:56.000000000 +0100
535 @@ -34,19 +34,28 @@
536 #include <linux/interrupt.h>
537 #include <linux/sched.h>
538 #include <linux/err.h>
539 +#include <linux/ptrace.h>
540 +#include <linux/workqueue.h>
541 +#include <xen/evtchn.h>
542 #include <xen/xenbus.h>
543 -#include <asm/xen/hypervisor.h>
544 -#include <xen/events.h>
545 -#include <xen/page.h>
546 +
547 +#include <asm/hypervisor.h>
548 +
549 #include "xenbus_comms.h"
550
551 +#ifdef HAVE_XEN_PLATFORM_COMPAT_H
552 +#include <xen/platform-compat.h>
553 +#endif
554 +
555 static int xenbus_irq;
556
557 -static DECLARE_WORK(probe_work, xenbus_probe);
558 +extern void xenbus_probe(void *);
559 +extern int xenstored_ready;
560 +static DECLARE_WORK(probe_work, xenbus_probe, NULL);
561
562 static DECLARE_WAIT_QUEUE_HEAD(xb_waitq);
563
564 -static irqreturn_t wake_waiting(int irq, void *unused)
565 +static irqreturn_t wake_waiting(int irq, void *unused, struct pt_regs *regs)
566 {
567 if (unlikely(xenstored_ready == 0)) {
568 xenstored_ready = 1;
569 @@ -82,13 +91,6 @@ static const void *get_input_chunk(XENST
570 return buf + MASK_XENSTORE_IDX(cons);
571 }
572
573 -/**
574 - * xb_write - low level write
575 - * @data: buffer to send
576 - * @len: length of buffer
577 - *
578 - * Returns 0 on success, error otherwise.
579 - */
580 int xb_write(const void *data, unsigned len)
581 {
582 struct xenstore_domain_interface *intf = xen_store_interface;
583 @@ -197,12 +199,11 @@ int xb_read(void *data, unsigned len)
584 return 0;
585 }
586
587 -/**
588 - * xb_init_comms - Set up interrupt handler off store event channel.
589 - */
590 +/* Set up interrupt handler off store event channel. */
591 int xb_init_comms(void)
592 {
593 struct xenstore_domain_interface *intf = xen_store_interface;
594 + int err;
595
596 if (intf->req_prod != intf->req_cons)
597 printk(KERN_ERR "XENBUS request ring is not quiescent "
598 @@ -215,20 +216,18 @@ int xb_init_comms(void)
599 intf->rsp_cons = intf->rsp_prod;
600 }
601
602 - if (xenbus_irq) {
603 - /* Already have an irq; assume we're resuming */
604 - rebind_evtchn_irq(xen_store_evtchn, xenbus_irq);
605 - } else {
606 - int err;
607 - err = bind_evtchn_to_irqhandler(xen_store_evtchn, wake_waiting,
608 - 0, "xenbus", &xb_waitq);
609 - if (err <= 0) {
610 - printk(KERN_ERR "XENBUS request irq failed %i\n", err);
611 - return err;
612 - }
613 + if (xenbus_irq)
614 + unbind_from_irqhandler(xenbus_irq, &xb_waitq);
615
616 - xenbus_irq = err;
617 + err = bind_caller_port_to_irqhandler(
618 + xen_store_evtchn, wake_waiting,
619 + 0, "xenbus", &xb_waitq);
620 + if (err <= 0) {
621 + printk(KERN_ERR "XENBUS request irq failed %i\n", err);
622 + return err;
623 }
624
625 + xenbus_irq = err;
626 +
627 return 0;
628 }
629 Index: head-2008-11-25/drivers/xen/xenbus/xenbus_probe.c
630 ===================================================================
631 --- head-2008-11-25.orig/drivers/xen/xenbus/xenbus_probe.c 2008-11-25 12:33:06.000000000 +0100
632 +++ head-2008-11-25/drivers/xen/xenbus/xenbus_probe.c 2008-11-25 12:35:56.000000000 +0100
633 @@ -4,6 +4,7 @@
634 * Copyright (C) 2005 Rusty Russell, IBM Corporation
635 * Copyright (C) 2005 Mike Wray, Hewlett-Packard
636 * Copyright (C) 2005, 2006 XenSource Ltd
637 + * Copyright (C) 2007 Solarflare Communications, Inc.
638 *
639 * This program is free software; you can redistribute it and/or
640 * modify it under the terms of the GNU General Public License version 2
641 @@ -32,7 +33,7 @@
642
643 #define DPRINTK(fmt, args...) \
644 pr_debug("xenbus_probe (%s:%d) " fmt ".\n", \
645 - __func__, __LINE__, ##args)
646 + __FUNCTION__, __LINE__, ##args)
647
648 #include <linux/kernel.h>
649 #include <linux/err.h>
650 @@ -41,24 +42,35 @@
651 #include <linux/fcntl.h>
652 #include <linux/mm.h>
653 #include <linux/notifier.h>
654 -#include <linux/kthread.h>
655 #include <linux/mutex.h>
656 -#include <linux/io.h>
657 +#include <linux/module.h>
658
659 +#include <asm/io.h>
660 #include <asm/page.h>
661 +#include <asm/maddr.h>
662 #include <asm/pgtable.h>
663 -#include <asm/xen/hypervisor.h>
664 +#include <asm/hypervisor.h>
665 #include <xen/xenbus.h>
666 -#include <xen/events.h>
667 -#include <xen/page.h>
668 +#include <xen/xen_proc.h>
669 +#include <xen/evtchn.h>
670 +#include <xen/features.h>
671 +#ifdef MODULE
672 +#include <xen/hvm.h>
673 +#endif
674
675 #include "xenbus_comms.h"
676 #include "xenbus_probe.h"
677
678 +#ifdef HAVE_XEN_PLATFORM_COMPAT_H
679 +#include <xen/platform-compat.h>
680 +#endif
681 +
682 int xen_store_evtchn;
683 struct xenstore_domain_interface *xen_store_interface;
684 static unsigned long xen_store_mfn;
685
686 +extern struct mutex xenwatch_mutex;
687 +
688 static BLOCKING_NOTIFIER_HEAD(xenstore_chain);
689
690 static void wait_for_devices(struct xenbus_driver *xendrv);
691 @@ -88,16 +100,6 @@ int xenbus_match(struct device *_dev, st
692 return match_device(drv->ids, to_xenbus_device(_dev)) != NULL;
693 }
694
695 -static int xenbus_uevent(struct device *_dev, struct kobj_uevent_env *env)
696 -{
697 - struct xenbus_device *dev = to_xenbus_device(_dev);
698 -
699 - if (add_uevent_var(env, "MODALIAS=xen:%s", dev->devicetype))
700 - return -ENOMEM;
701 -
702 - return 0;
703 -}
704 -
705 /* device/<type>/<id> => <type>-<id> */
706 static int frontend_bus_id(char bus_id[BUS_ID_SIZE], const char *nodename)
707 {
708 @@ -166,6 +168,30 @@ static int read_backend_details(struct x
709 return read_otherend_details(xendev, "backend-id", "backend");
710 }
711
712 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
713 +static int xenbus_uevent_frontend(struct device *dev, char **envp,
714 + int num_envp, char *buffer, int buffer_size)
715 +{
716 + struct xenbus_device *xdev;
717 + int length = 0, i = 0;
718 +
719 + if (dev == NULL)
720 + return -ENODEV;
721 + xdev = to_xenbus_device(dev);
722 + if (xdev == NULL)
723 + return -ENODEV;
724 +
725 + /* stuff we want to pass to /sbin/hotplug */
726 + add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &length,
727 + "XENBUS_TYPE=%s", xdev->devicetype);
728 + add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &length,
729 + "XENBUS_PATH=%s", xdev->nodename);
730 + add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &length,
731 + "MODALIAS=xen:%s", xdev->devicetype);
732 +
733 + return 0;
734 +}
735 +#endif
736
737 /* Bus type for frontend drivers. */
738 static struct xen_bus_type xenbus_frontend = {
739 @@ -173,13 +199,19 @@ static struct xen_bus_type xenbus_fronte
740 .levels = 2, /* device/type/<id> */
741 .get_bus_id = frontend_bus_id,
742 .probe = xenbus_probe_frontend,
743 + .error = -ENODEV,
744 .bus = {
745 .name = "xen",
746 .match = xenbus_match,
747 - .uevent = xenbus_uevent,
748 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
749 .probe = xenbus_dev_probe,
750 .remove = xenbus_dev_remove,
751 .shutdown = xenbus_dev_shutdown,
752 + .uevent = xenbus_uevent_frontend,
753 +#endif
754 + },
755 + .dev = {
756 + .bus_id = "xen",
757 },
758 };
759
760 @@ -196,17 +228,16 @@ static void otherend_changed(struct xenb
761 if (!dev->otherend ||
762 strncmp(dev->otherend, vec[XS_WATCH_PATH],
763 strlen(dev->otherend))) {
764 - dev_dbg(&dev->dev, "Ignoring watch at %s\n",
765 - vec[XS_WATCH_PATH]);
766 + DPRINTK("Ignoring watch at %s", vec[XS_WATCH_PATH]);
767 return;
768 }
769
770 state = xenbus_read_driver_state(dev->otherend);
771
772 - dev_dbg(&dev->dev, "state is %d, (%s), %s, %s\n",
773 - state, xenbus_strstate(state), dev->otherend_watch.node,
774 - vec[XS_WATCH_PATH]);
775 + DPRINTK("state is %d (%s), %s, %s", state, xenbus_strstate(state),
776 + dev->otherend_watch.node, vec[XS_WATCH_PATH]);
777
778 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
779 /*
780 * Ignore xenbus transitions during shutdown. This prevents us doing
781 * work that can fail e.g., when the rootfs is gone.
782 @@ -220,6 +251,7 @@ static void otherend_changed(struct xenb
783 xenbus_frontend_closed(dev);
784 return;
785 }
786 +#endif
787
788 if (drv->otherend_changed)
789 drv->otherend_changed(dev, state);
790 @@ -239,8 +271,8 @@ static int talk_to_otherend(struct xenbu
791
792 static int watch_otherend(struct xenbus_device *dev)
793 {
794 - return xenbus_watch_pathfmt(dev, &dev->otherend_watch, otherend_changed,
795 - "%s/%s", dev->otherend, "state");
796 + return xenbus_watch_path2(dev, dev->otherend, "state",
797 + &dev->otherend_watch, otherend_changed);
798 }
799
800
801 @@ -266,8 +298,9 @@ int xenbus_dev_probe(struct device *_dev
802
803 err = talk_to_otherend(dev);
804 if (err) {
805 - dev_warn(&dev->dev, "talk_to_otherend on %s failed.\n",
806 - dev->nodename);
807 + printk(KERN_WARNING
808 + "xenbus_probe: talk_to_otherend on %s failed.\n",
809 + dev->nodename);
810 return err;
811 }
812
813 @@ -277,7 +310,8 @@ int xenbus_dev_probe(struct device *_dev
814
815 err = watch_otherend(dev);
816 if (err) {
817 - dev_warn(&dev->dev, "watch_otherend on %s failed.\n",
818 + printk(KERN_WARNING
819 + "xenbus_probe: watch_otherend on %s failed.\n",
820 dev->nodename);
821 return err;
822 }
823 @@ -313,43 +347,55 @@ static void xenbus_dev_shutdown(struct d
824
825 DPRINTK("%s", dev->nodename);
826
827 + if (is_initial_xendomain())
828 + return;
829 +
830 get_device(&dev->dev);
831 if (dev->state != XenbusStateConnected) {
832 - printk(KERN_INFO "%s: %s: %s != Connected, skipping\n", __func__,
833 + printk("%s: %s: %s != Connected, skipping\n", __FUNCTION__,
834 dev->nodename, xenbus_strstate(dev->state));
835 goto out;
836 }
837 xenbus_switch_state(dev, XenbusStateClosing);
838 timeout = wait_for_completion_timeout(&dev->down, timeout);
839 if (!timeout)
840 - printk(KERN_INFO "%s: %s timeout closing device\n",
841 - __func__, dev->nodename);
842 + printk("%s: %s timeout closing device\n", __FUNCTION__, dev->nodename);
843 out:
844 put_device(&dev->dev);
845 }
846
847 int xenbus_register_driver_common(struct xenbus_driver *drv,
848 - struct xen_bus_type *bus,
849 - struct module *owner,
850 - const char *mod_name)
851 + struct xen_bus_type *bus)
852 {
853 + int ret;
854 +
855 + if (bus->error)
856 + return bus->error;
857 +
858 drv->driver.name = drv->name;
859 drv->driver.bus = &bus->bus;
860 - drv->driver.owner = owner;
861 - drv->driver.mod_name = mod_name;
862 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10)
863 + drv->driver.owner = drv->owner;
864 +#endif
865 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16)
866 + drv->driver.probe = xenbus_dev_probe;
867 + drv->driver.remove = xenbus_dev_remove;
868 + drv->driver.shutdown = xenbus_dev_shutdown;
869 +#endif
870
871 - return driver_register(&drv->driver);
872 + mutex_lock(&xenwatch_mutex);
873 + ret = driver_register(&drv->driver);
874 + mutex_unlock(&xenwatch_mutex);
875 + return ret;
876 }
877
878 -int __xenbus_register_frontend(struct xenbus_driver *drv,
879 - struct module *owner, const char *mod_name)
880 +int xenbus_register_frontend(struct xenbus_driver *drv)
881 {
882 int ret;
883
884 drv->read_otherend_details = read_backend_details;
885
886 - ret = xenbus_register_driver_common(drv, &xenbus_frontend,
887 - owner, mod_name);
888 + ret = xenbus_register_driver_common(drv, &xenbus_frontend);
889 if (ret)
890 return ret;
891
892 @@ -358,7 +404,7 @@ int __xenbus_register_frontend(struct xe
893
894 return 0;
895 }
896 -EXPORT_SYMBOL_GPL(__xenbus_register_frontend);
897 +EXPORT_SYMBOL_GPL(xenbus_register_frontend);
898
899 void xenbus_unregister_driver(struct xenbus_driver *drv)
900 {
901 @@ -436,25 +482,25 @@ static void xenbus_dev_release(struct de
902 }
903
904 static ssize_t xendev_show_nodename(struct device *dev,
905 - struct device_attribute *attr, char *buf)
906 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,13)
907 + struct device_attribute *attr,
908 +#endif
909 + char *buf)
910 {
911 return sprintf(buf, "%s\n", to_xenbus_device(dev)->nodename);
912 }
913 DEVICE_ATTR(nodename, S_IRUSR | S_IRGRP | S_IROTH, xendev_show_nodename, NULL);
914
915 static ssize_t xendev_show_devtype(struct device *dev,
916 - struct device_attribute *attr, char *buf)
917 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,13)
918 + struct device_attribute *attr,
919 +#endif
920 + char *buf)
921 {
922 return sprintf(buf, "%s\n", to_xenbus_device(dev)->devicetype);
923 }
924 DEVICE_ATTR(devtype, S_IRUSR | S_IRGRP | S_IROTH, xendev_show_devtype, NULL);
925
926 -static ssize_t xendev_show_modalias(struct device *dev,
927 - struct device_attribute *attr, char *buf)
928 -{
929 - return sprintf(buf, "xen:%s\n", to_xenbus_device(dev)->devicetype);
930 -}
931 -DEVICE_ATTR(modalias, S_IRUSR | S_IRGRP | S_IROTH, xendev_show_modalias, NULL);
932
933 int xenbus_probe_node(struct xen_bus_type *bus,
934 const char *type,
935 @@ -467,6 +513,9 @@ int xenbus_probe_node(struct xen_bus_typ
936
937 enum xenbus_state state = xenbus_read_driver_state(nodename);
938
939 + if (bus->error)
940 + return bus->error;
941 +
942 if (state != XenbusStateInitialising) {
943 /* Device is not new, so ignore it. This can happen if a
944 device is going away after switching to Closed. */
945 @@ -491,6 +540,7 @@ int xenbus_probe_node(struct xen_bus_typ
946 xendev->devicetype = tmpstring;
947 init_completion(&xendev->down);
948
949 + xendev->dev.parent = &bus->dev;
950 xendev->dev.bus = &bus->bus;
951 xendev->dev.release = xenbus_dev_release;
952
953 @@ -505,22 +555,15 @@ int xenbus_probe_node(struct xen_bus_typ
954
955 err = device_create_file(&xendev->dev, &dev_attr_nodename);
956 if (err)
957 - goto fail_unregister;
958 -
959 + goto unregister;
960 err = device_create_file(&xendev->dev, &dev_attr_devtype);
961 if (err)
962 - goto fail_remove_nodename;
963 -
964 - err = device_create_file(&xendev->dev, &dev_attr_modalias);
965 - if (err)
966 - goto fail_remove_devtype;
967 + goto unregister;
968
969 return 0;
970 -fail_remove_devtype:
971 - device_remove_file(&xendev->dev, &dev_attr_devtype);
972 -fail_remove_nodename:
973 +unregister:
974 device_remove_file(&xendev->dev, &dev_attr_nodename);
975 -fail_unregister:
976 + device_remove_file(&xendev->dev, &dev_attr_devtype);
977 device_unregister(&xendev->dev);
978 fail:
979 kfree(xendev);
980 @@ -533,8 +576,7 @@ static int xenbus_probe_frontend(const c
981 char *nodename;
982 int err;
983
984 - nodename = kasprintf(GFP_KERNEL, "%s/%s/%s",
985 - xenbus_frontend.root, type, name);
986 + nodename = kasprintf(GFP_KERNEL, "%s/%s/%s", xenbus_frontend.root, type, name);
987 if (!nodename)
988 return -ENOMEM;
989
990 @@ -571,6 +613,9 @@ int xenbus_probe_devices(struct xen_bus_
991 char **dir;
992 unsigned int i, dir_n;
993
994 + if (bus->error)
995 + return bus->error;
996 +
997 dir = xenbus_directory(XBT_NIL, bus->root, "", &dir_n);
998 if (IS_ERR(dir))
999 return PTR_ERR(dir);
1000 @@ -607,15 +652,15 @@ static int strsep_len(const char *str, c
1001 return (len == 0) ? i : -ERANGE;
1002 }
1003
1004 -void xenbus_dev_changed(const char *node, struct xen_bus_type *bus)
1005 +void dev_changed(const char *node, struct xen_bus_type *bus)
1006 {
1007 int exists, rootlen;
1008 struct xenbus_device *dev;
1009 char type[BUS_ID_SIZE];
1010 const char *p, *root;
1011
1012 - if (char_count(node, '/') < 2)
1013 - return;
1014 + if (bus->error || char_count(node, '/') < 2)
1015 + return;
1016
1017 exists = xenbus_exists(XBT_NIL, node, "");
1018 if (!exists) {
1019 @@ -649,7 +694,7 @@ static void frontend_changed(struct xenb
1020 {
1021 DPRINTK("");
1022
1023 - xenbus_dev_changed(vec[XS_WATCH_PATH], &xenbus_frontend);
1024 + dev_changed(vec[XS_WATCH_PATH], &xenbus_frontend);
1025 }
1026
1027 /* We watch for devices appearing and vanishing. */
1028 @@ -748,7 +793,8 @@ void xenbus_suspend(void)
1029 {
1030 DPRINTK("");
1031
1032 - bus_for_each_dev(&xenbus_frontend.bus, NULL, NULL, suspend_dev);
1033 + if (!xenbus_frontend.error)
1034 + bus_for_each_dev(&xenbus_frontend.bus, NULL, NULL, suspend_dev);
1035 xenbus_backend_suspend(suspend_dev);
1036 xs_suspend();
1037 }
1038 @@ -758,7 +804,8 @@ void xenbus_resume(void)
1039 {
1040 xb_init_comms();
1041 xs_resume();
1042 - bus_for_each_dev(&xenbus_frontend.bus, NULL, NULL, resume_dev);
1043 + if (!xenbus_frontend.error)
1044 + bus_for_each_dev(&xenbus_frontend.bus, NULL, NULL, resume_dev);
1045 xenbus_backend_resume(resume_dev);
1046 }
1047 EXPORT_SYMBOL_GPL(xenbus_resume);
1048 @@ -766,7 +813,8 @@ EXPORT_SYMBOL_GPL(xenbus_resume);
1049 void xenbus_suspend_cancel(void)
1050 {
1051 xs_suspend_cancel();
1052 - bus_for_each_dev(&xenbus_frontend.bus, NULL, NULL, suspend_cancel_dev);
1053 + if (!xenbus_frontend.error)
1054 + bus_for_each_dev(&xenbus_frontend.bus, NULL, NULL, suspend_cancel_dev);
1055 xenbus_backend_resume(suspend_cancel_dev);
1056 }
1057 EXPORT_SYMBOL_GPL(xenbus_suspend_cancel);
1058 @@ -794,7 +842,8 @@ void unregister_xenstore_notifier(struct
1059 }
1060 EXPORT_SYMBOL_GPL(unregister_xenstore_notifier);
1061
1062 -void xenbus_probe(struct work_struct *unused)
1063 +
1064 +void xenbus_probe(void *unused)
1065 {
1066 BUG_ON((xenstored_ready <= 0));
1067
1068 @@ -807,63 +856,171 @@ void xenbus_probe(struct work_struct *un
1069 blocking_notifier_call_chain(&xenstore_chain, 0, NULL);
1070 }
1071
1072 -static int __init xenbus_probe_init(void)
1073 +
1074 +#if defined(CONFIG_PROC_FS) && defined(CONFIG_XEN_PRIVILEGED_GUEST)
1075 +static struct file_operations xsd_kva_fops;
1076 +static struct proc_dir_entry *xsd_kva_intf;
1077 +static struct proc_dir_entry *xsd_port_intf;
1078 +
1079 +static int xsd_kva_mmap(struct file *file, struct vm_area_struct *vma)
1080 +{
1081 + size_t size = vma->vm_end - vma->vm_start;
1082 +
1083 + if ((size > PAGE_SIZE) || (vma->vm_pgoff != 0))
1084 + return -EINVAL;
1085 +
1086 + if (remap_pfn_range(vma, vma->vm_start, mfn_to_pfn(xen_store_mfn),
1087 + size, vma->vm_page_prot))
1088 + return -EAGAIN;
1089 +
1090 + return 0;
1091 +}
1092 +
1093 +static int xsd_kva_read(char *page, char **start, off_t off,
1094 + int count, int *eof, void *data)
1095 +{
1096 + int len;
1097 +
1098 + len = sprintf(page, "0x%p", xen_store_interface);
1099 + *eof = 1;
1100 + return len;
1101 +}
1102 +
1103 +static int xsd_port_read(char *page, char **start, off_t off,
1104 + int count, int *eof, void *data)
1105 +{
1106 + int len;
1107 +
1108 + len = sprintf(page, "%d", xen_store_evtchn);
1109 + *eof = 1;
1110 + return len;
1111 +}
1112 +#endif
1113 +
1114 +static int xenbus_probe_init(void)
1115 {
1116 int err = 0;
1117 + unsigned long page = 0;
1118
1119 DPRINTK("");
1120
1121 - err = -ENODEV;
1122 if (!is_running_on_xen())
1123 - goto out_error;
1124 + return -ENODEV;
1125
1126 /* Register ourselves with the kernel bus subsystem */
1127 - err = bus_register(&xenbus_frontend.bus);
1128 - if (err)
1129 - goto out_error;
1130 -
1131 - err = xenbus_backend_bus_register();
1132 - if (err)
1133 - goto out_unreg_front;
1134 + xenbus_frontend.error = bus_register(&xenbus_frontend.bus);
1135 + if (xenbus_frontend.error)
1136 + printk(KERN_WARNING
1137 + "XENBUS: Error registering frontend bus: %i\n",
1138 + xenbus_frontend.error);
1139 + xenbus_backend_bus_register();
1140
1141 /*
1142 * Domain0 doesn't have a store_evtchn or store_mfn yet.
1143 */
1144 if (is_initial_xendomain()) {
1145 - /* dom0 not yet supported */
1146 + struct evtchn_alloc_unbound alloc_unbound;
1147 +
1148 + /* Allocate page. */
1149 + page = get_zeroed_page(GFP_KERNEL);
1150 + if (!page)
1151 + return -ENOMEM;
1152 +
1153 + xen_store_mfn = xen_start_info->store_mfn =
1154 + pfn_to_mfn(virt_to_phys((void *)page) >>
1155 + PAGE_SHIFT);
1156 +
1157 + /* Next allocate a local port which xenstored can bind to */
1158 + alloc_unbound.dom = DOMID_SELF;
1159 + alloc_unbound.remote_dom = 0;
1160 +
1161 + err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound,
1162 + &alloc_unbound);
1163 + if (err == -ENOSYS)
1164 + goto err;
1165 + BUG_ON(err);
1166 + xen_store_evtchn = xen_start_info->store_evtchn =
1167 + alloc_unbound.port;
1168 +
1169 +#if defined(CONFIG_PROC_FS) && defined(CONFIG_XEN_PRIVILEGED_GUEST)
1170 + /* And finally publish the above info in /proc/xen */
1171 + xsd_kva_intf = create_xen_proc_entry("xsd_kva", 0600);
1172 + if (xsd_kva_intf) {
1173 + memcpy(&xsd_kva_fops, xsd_kva_intf->proc_fops,
1174 + sizeof(xsd_kva_fops));
1175 + xsd_kva_fops.mmap = xsd_kva_mmap;
1176 + xsd_kva_intf->proc_fops = &xsd_kva_fops;
1177 + xsd_kva_intf->read_proc = xsd_kva_read;
1178 + }
1179 + xsd_port_intf = create_xen_proc_entry("xsd_port", 0400);
1180 + if (xsd_port_intf)
1181 + xsd_port_intf->read_proc = xsd_port_read;
1182 +#endif
1183 + xen_store_interface = mfn_to_virt(xen_store_mfn);
1184 } else {
1185 xenstored_ready = 1;
1186 +#ifdef CONFIG_XEN
1187 xen_store_evtchn = xen_start_info->store_evtchn;
1188 xen_store_mfn = xen_start_info->store_mfn;
1189 + xen_store_interface = mfn_to_virt(xen_store_mfn);
1190 +#else
1191 + xen_store_evtchn = hvm_get_parameter(HVM_PARAM_STORE_EVTCHN);
1192 + xen_store_mfn = hvm_get_parameter(HVM_PARAM_STORE_PFN);
1193 + xen_store_interface = ioremap(xen_store_mfn << PAGE_SHIFT,
1194 + PAGE_SIZE);
1195 +#endif
1196 }
1197 - xen_store_interface = mfn_to_virt(xen_store_mfn);
1198 +
1199 +
1200 + xenbus_dev_init();
1201
1202 /* Initialize the interface to xenstore. */
1203 err = xs_init();
1204 if (err) {
1205 printk(KERN_WARNING
1206 "XENBUS: Error initializing xenstore comms: %i\n", err);
1207 - goto out_unreg_back;
1208 + goto err;
1209 }
1210
1211 + /* Register ourselves with the kernel device subsystem */
1212 + if (!xenbus_frontend.error) {
1213 + xenbus_frontend.error = device_register(&xenbus_frontend.dev);
1214 + if (xenbus_frontend.error) {
1215 + bus_unregister(&xenbus_frontend.bus);
1216 + printk(KERN_WARNING
1217 + "XENBUS: Error registering frontend device: %i\n",
1218 + xenbus_frontend.error);
1219 + }
1220 + }
1221 + xenbus_backend_device_register();
1222 +
1223 if (!is_initial_xendomain())
1224 xenbus_probe(NULL);
1225
1226 return 0;
1227
1228 - out_unreg_back:
1229 - xenbus_backend_bus_unregister();
1230 + err:
1231 + if (page)
1232 + free_page(page);
1233
1234 - out_unreg_front:
1235 - bus_unregister(&xenbus_frontend.bus);
1236 + /*
1237 + * Do not unregister the xenbus front/backend buses here. The buses
1238 + * must exist because front/backend drivers will use them when they are
1239 + * registered.
1240 + */
1241
1242 - out_error:
1243 return err;
1244 }
1245
1246 +#ifdef CONFIG_XEN
1247 postcore_initcall(xenbus_probe_init);
1248 -
1249 -MODULE_LICENSE("GPL");
1250 +MODULE_LICENSE("Dual BSD/GPL");
1251 +#else
1252 +int xenbus_init(void)
1253 +{
1254 + return xenbus_probe_init();
1255 +}
1256 +#endif
1257
1258 static int is_disconnected_device(struct device *dev, void *data)
1259 {
1260 @@ -883,12 +1040,14 @@ static int is_disconnected_device(struct
1261 return 0;
1262
1263 xendrv = to_xenbus_driver(dev->driver);
1264 - return (xendev->state != XenbusStateConnected ||
1265 + return (xendev->state < XenbusStateConnected ||
1266 (xendrv->is_ready && !xendrv->is_ready(xendev)));
1267 }
1268
1269 static int exists_disconnected_device(struct device_driver *drv)
1270 {
1271 + if (xenbus_frontend.error)
1272 + return xenbus_frontend.error;
1273 return bus_for_each_dev(&xenbus_frontend.bus, NULL, drv,
1274 is_disconnected_device);
1275 }
1276 @@ -897,6 +1056,7 @@ static int print_device_status(struct de
1277 {
1278 struct xenbus_device *xendev = to_xenbus_device(dev);
1279 struct device_driver *drv = data;
1280 + struct xenbus_driver *xendrv;
1281
1282 /* Is this operation limited to a particular driver? */
1283 if (drv && (dev->driver != drv))
1284 @@ -906,12 +1066,23 @@ static int print_device_status(struct de
1285 /* Information only: is this too noisy? */
1286 printk(KERN_INFO "XENBUS: Device with no driver: %s\n",
1287 xendev->nodename);
1288 - } else if (xendev->state != XenbusStateConnected) {
1289 + return 0;
1290 + }
1291 +
1292 + if (xendev->state < XenbusStateConnected) {
1293 + enum xenbus_state rstate = XenbusStateUnknown;
1294 + if (xendev->otherend)
1295 + rstate = xenbus_read_driver_state(xendev->otherend);
1296 printk(KERN_WARNING "XENBUS: Timeout connecting "
1297 - "to device: %s (state %d)\n",
1298 - xendev->nodename, xendev->state);
1299 + "to device: %s (local state %d, remote state %d)\n",
1300 + xendev->nodename, xendev->state, rstate);
1301 }
1302
1303 + xendrv = to_xenbus_driver(dev->driver);
1304 + if (xendrv->is_ready && !xendrv->is_ready(xendev))
1305 + printk(KERN_WARNING "XENBUS: Device not ready: %s\n",
1306 + xendev->nodename);
1307 +
1308 return 0;
1309 }
1310
1311 @@ -919,7 +1090,7 @@ static int print_device_status(struct de
1312 static int ready_to_wait_for_devices;
1313
1314 /*
1315 - * On a 10 second timeout, wait for all devices currently configured. We need
1316 + * On a 5-minute timeout, wait for all devices currently configured. We need
1317 * to do this to guarantee that the filesystems and / or network devices
1318 * needed for boot are available, before we can allow the boot to proceed.
1319 *
1320 @@ -934,18 +1105,30 @@ static int ready_to_wait_for_devices;
1321 */
1322 static void wait_for_devices(struct xenbus_driver *xendrv)
1323 {
1324 - unsigned long timeout = jiffies + 10*HZ;
1325 + unsigned long start = jiffies;
1326 struct device_driver *drv = xendrv ? &xendrv->driver : NULL;
1327 + unsigned int seconds_waited = 0;
1328
1329 if (!ready_to_wait_for_devices || !is_running_on_xen())
1330 return;
1331
1332 while (exists_disconnected_device(drv)) {
1333 - if (time_after(jiffies, timeout))
1334 - break;
1335 + if (time_after(jiffies, start + (seconds_waited+5)*HZ)) {
1336 + if (!seconds_waited)
1337 + printk(KERN_WARNING "XENBUS: Waiting for "
1338 + "devices to initialise: ");
1339 + seconds_waited += 5;
1340 + printk("%us...", 300 - seconds_waited);
1341 + if (seconds_waited == 300)
1342 + break;
1343 + }
1344 +
1345 schedule_timeout_interruptible(HZ/10);
1346 }
1347
1348 + if (seconds_waited)
1349 + printk("\n");
1350 +
1351 bus_for_each_dev(&xenbus_frontend.bus, NULL, drv,
1352 print_device_status);
1353 }
1354 @@ -953,10 +1136,18 @@ static void wait_for_devices(struct xenb
1355 #ifndef MODULE
1356 static int __init boot_wait_for_devices(void)
1357 {
1358 - ready_to_wait_for_devices = 1;
1359 - wait_for_devices(NULL);
1360 + if (!xenbus_frontend.error) {
1361 + ready_to_wait_for_devices = 1;
1362 + wait_for_devices(NULL);
1363 + }
1364 return 0;
1365 }
1366
1367 late_initcall(boot_wait_for_devices);
1368 #endif
1369 +
1370 +int xenbus_for_each_frontend(void *arg, int (*fn)(struct device *, void *))
1371 +{
1372 + return bus_for_each_dev(&xenbus_frontend.bus, NULL, arg, fn);
1373 +}
1374 +EXPORT_SYMBOL_GPL(xenbus_for_each_frontend);
1375 Index: head-2008-11-25/drivers/xen/xenbus/xenbus_probe.h
1376 ===================================================================
1377 --- head-2008-11-25.orig/drivers/xen/xenbus/xenbus_probe.h 2008-11-25 12:33:06.000000000 +0100
1378 +++ head-2008-11-25/drivers/xen/xenbus/xenbus_probe.h 2008-11-25 12:35:56.000000000 +0100
1379 @@ -34,41 +34,42 @@
1380 #ifndef _XENBUS_PROBE_H
1381 #define _XENBUS_PROBE_H
1382
1383 -#ifdef CONFIG_XEN_BACKEND
1384 +#if defined(CONFIG_XEN_BACKEND) || defined(CONFIG_XEN_BACKEND_MODULE)
1385 extern void xenbus_backend_suspend(int (*fn)(struct device *, void *));
1386 extern void xenbus_backend_resume(int (*fn)(struct device *, void *));
1387 extern void xenbus_backend_probe_and_watch(void);
1388 -extern int xenbus_backend_bus_register(void);
1389 -extern void xenbus_backend_bus_unregister(void);
1390 +extern void xenbus_backend_bus_register(void);
1391 +extern void xenbus_backend_device_register(void);
1392 #else
1393 static inline void xenbus_backend_suspend(int (*fn)(struct device *, void *)) {}
1394 static inline void xenbus_backend_resume(int (*fn)(struct device *, void *)) {}
1395 static inline void xenbus_backend_probe_and_watch(void) {}
1396 -static inline int xenbus_backend_bus_register(void) { return 0; }
1397 -static inline void xenbus_backend_bus_unregister(void) {}
1398 +static inline void xenbus_backend_bus_register(void) {}
1399 +static inline void xenbus_backend_device_register(void) {}
1400 #endif
1401
1402 struct xen_bus_type
1403 {
1404 char *root;
1405 + int error;
1406 unsigned int levels;
1407 int (*get_bus_id)(char bus_id[BUS_ID_SIZE], const char *nodename);
1408 int (*probe)(const char *type, const char *dir);
1409 struct bus_type bus;
1410 + struct device dev;
1411 };
1412
1413 extern int xenbus_match(struct device *_dev, struct device_driver *_drv);
1414 extern int xenbus_dev_probe(struct device *_dev);
1415 extern int xenbus_dev_remove(struct device *_dev);
1416 extern int xenbus_register_driver_common(struct xenbus_driver *drv,
1417 - struct xen_bus_type *bus,
1418 - struct module *owner,
1419 - const char *mod_name);
1420 + struct xen_bus_type *bus);
1421 extern int xenbus_probe_node(struct xen_bus_type *bus,
1422 const char *type,
1423 const char *nodename);
1424 extern int xenbus_probe_devices(struct xen_bus_type *bus);
1425
1426 -extern void xenbus_dev_changed(const char *node, struct xen_bus_type *bus);
1427 +extern void dev_changed(const char *node, struct xen_bus_type *bus);
1428
1429 #endif
1430 +
1431 Index: head-2008-11-25/drivers/xen/xenbus/xenbus_xs.c
1432 ===================================================================
1433 --- head-2008-11-25.orig/drivers/xen/xenbus/xenbus_xs.c 2008-11-25 12:33:06.000000000 +0100
1434 +++ head-2008-11-25/drivers/xen/xenbus/xenbus_xs.c 2008-11-25 12:35:56.000000000 +0100
1435 @@ -47,6 +47,14 @@
1436 #include <xen/xenbus.h>
1437 #include "xenbus_comms.h"
1438
1439 +#ifdef HAVE_XEN_PLATFORM_COMPAT_H
1440 +#include <xen/platform-compat.h>
1441 +#endif
1442 +
1443 +#ifndef PF_NOFREEZE /* Old kernel (pre-2.6.6). */
1444 +#define PF_NOFREEZE 0
1445 +#endif
1446 +
1447 struct xs_stored_msg {
1448 struct list_head list;
1449
1450 @@ -108,7 +116,7 @@ static DEFINE_SPINLOCK(watch_events_lock
1451 * carrying out work.
1452 */
1453 static pid_t xenwatch_pid;
1454 -static DEFINE_MUTEX(xenwatch_mutex);
1455 +/* static */ DEFINE_MUTEX(xenwatch_mutex);
1456 static DECLARE_WAIT_QUEUE_HEAD(watch_events_waitq);
1457
1458 static int get_error(const char *errorstring)
1459 @@ -177,7 +185,7 @@ void *xenbus_dev_request_and_reply(struc
1460
1461 mutex_unlock(&xs_state.request_mutex);
1462
1463 - if ((msg->type == XS_TRANSACTION_END) ||
1464 + if ((req_msg.type == XS_TRANSACTION_END) ||
1465 ((req_msg.type == XS_TRANSACTION_START) &&
1466 (msg->type == XS_ERROR)))
1467 up_read(&xs_state.transaction_mutex);
1468 @@ -213,7 +221,7 @@ static void *xs_talkv(struct xenbus_tran
1469 }
1470
1471 for (i = 0; i < num_vecs; i++) {
1472 - err = xb_write(iovec[i].iov_base, iovec[i].iov_len);
1473 + err = xb_write(iovec[i].iov_base, iovec[i].iov_len);;
1474 if (err) {
1475 mutex_unlock(&xs_state.request_mutex);
1476 return ERR_PTR(err);
1477 @@ -294,7 +302,7 @@ static char **split(char *strings, unsig
1478 char *p, **ret;
1479
1480 /* Count the strings. */
1481 - *num = count_strings(strings, len);
1482 + *num = count_strings(strings, len) + 1;
1483
1484 /* Transfer to one big alloc for easy freeing. */
1485 ret = kmalloc(*num * sizeof(char *) + len, GFP_NOIO | __GFP_HIGH);
1486 @@ -308,6 +316,7 @@ static char **split(char *strings, unsig
1487 strings = (char *)&ret[*num];
1488 for (p = strings, *num = 0; p < strings + len; p += strlen(p) + 1)
1489 ret[(*num)++] = p;
1490 + ret[*num] = strings + len;
1491
1492 return ret;
1493 }
1494 @@ -498,7 +507,7 @@ int xenbus_printf(struct xenbus_transact
1495 #define PRINTF_BUFFER_SIZE 4096
1496 char *printf_buffer;
1497
1498 - printf_buffer = kmalloc(PRINTF_BUFFER_SIZE, GFP_KERNEL);
1499 + printf_buffer = kmalloc(PRINTF_BUFFER_SIZE, GFP_NOIO | __GFP_HIGH);
1500 if (printf_buffer == NULL)
1501 return -ENOMEM;
1502
1503 @@ -621,6 +630,8 @@ void unregister_xenbus_watch(struct xenb
1504 char token[sizeof(watch) * 2 + 1];
1505 int err;
1506
1507 + BUG_ON(watch->flags & XBWF_new_thread);
1508 +
1509 sprintf(token, "%lX", (long)watch);
1510
1511 down_read(&xs_state.watch_mutex);
1512 @@ -638,11 +649,6 @@ void unregister_xenbus_watch(struct xenb
1513
1514 up_read(&xs_state.watch_mutex);
1515
1516 - /* Make sure there are no callbacks running currently (unless
1517 - its us) */
1518 - if (current->pid != xenwatch_pid)
1519 - mutex_lock(&xenwatch_mutex);
1520 -
1521 /* Cancel pending watch events. */
1522 spin_lock(&watch_events_lock);
1523 list_for_each_entry_safe(msg, tmp, &watch_events, list) {
1524 @@ -654,8 +660,11 @@ void unregister_xenbus_watch(struct xenb
1525 }
1526 spin_unlock(&watch_events_lock);
1527
1528 - if (current->pid != xenwatch_pid)
1529 + /* Flush any currently-executing callback, unless we are it. :-) */
1530 + if (current->pid != xenwatch_pid) {
1531 + mutex_lock(&xenwatch_mutex);
1532 mutex_unlock(&xenwatch_mutex);
1533 + }
1534 }
1535 EXPORT_SYMBOL_GPL(unregister_xenbus_watch);
1536
1537 @@ -693,11 +702,30 @@ void xs_suspend_cancel(void)
1538 up_write(&xs_state.transaction_mutex);
1539 }
1540
1541 +static int xenwatch_handle_callback(void *data)
1542 +{
1543 + struct xs_stored_msg *msg = data;
1544 +
1545 + msg->u.watch.handle->callback(msg->u.watch.handle,
1546 + (const char **)msg->u.watch.vec,
1547 + msg->u.watch.vec_size);
1548 +
1549 + kfree(msg->u.watch.vec);
1550 + kfree(msg);
1551 +
1552 + /* Kill this kthread if we were spawned just for this callback. */
1553 + if (current->pid != xenwatch_pid)
1554 + do_exit(0);
1555 +
1556 + return 0;
1557 +}
1558 +
1559 static int xenwatch_thread(void *unused)
1560 {
1561 struct list_head *ent;
1562 struct xs_stored_msg *msg;
1563
1564 + current->flags |= PF_NOFREEZE;
1565 for (;;) {
1566 wait_event_interruptible(watch_events_waitq,
1567 !list_empty(&watch_events));
1568 @@ -713,17 +741,29 @@ static int xenwatch_thread(void *unused)
1569 list_del(ent);
1570 spin_unlock(&watch_events_lock);
1571
1572 - if (ent != &watch_events) {
1573 - msg = list_entry(ent, struct xs_stored_msg, list);
1574 - msg->u.watch.handle->callback(
1575 - msg->u.watch.handle,
1576 - (const char **)msg->u.watch.vec,
1577 - msg->u.watch.vec_size);
1578 - kfree(msg->u.watch.vec);
1579 - kfree(msg);
1580 + if (ent == &watch_events) {
1581 + mutex_unlock(&xenwatch_mutex);
1582 + continue;
1583 }
1584
1585 - mutex_unlock(&xenwatch_mutex);
1586 + msg = list_entry(ent, struct xs_stored_msg, list);
1587 +
1588 + /*
1589 + * Unlock the mutex before running an XBWF_new_thread
1590 + * handler. kthread_run can block which can deadlock
1591 + * against unregister_xenbus_watch() if we need to
1592 + * unregister other watches in order to make
1593 + * progress. This can occur on resume before the swap
1594 + * device is attached.
1595 + */
1596 + if (msg->u.watch.handle->flags & XBWF_new_thread) {
1597 + mutex_unlock(&xenwatch_mutex);
1598 + kthread_run(xenwatch_handle_callback,
1599 + msg, "xenwatch_cb");
1600 + } else {
1601 + xenwatch_handle_callback(msg);
1602 + mutex_unlock(&xenwatch_mutex);
1603 + }
1604 }
1605
1606 return 0;
1607 @@ -817,6 +857,7 @@ static int xenbus_thread(void *unused)
1608 {
1609 int err;
1610
1611 + current->flags |= PF_NOFREEZE;
1612 for (;;) {
1613 err = process_msg();
1614 if (err)
1615 Index: head-2008-11-25/include/xen/balloon.h
1616 ===================================================================
1617 --- head-2008-11-25.orig/include/xen/balloon.h 2008-11-25 12:33:06.000000000 +0100
1618 +++ head-2008-11-25/include/xen/balloon.h 2008-11-25 12:35:56.000000000 +0100
1619 @@ -31,12 +31,9 @@
1620 * IN THE SOFTWARE.
1621 */
1622
1623 -#ifndef __XEN_BALLOON_H__
1624 -#define __XEN_BALLOON_H__
1625 +#ifndef __ASM_BALLOON_H__
1626 +#define __ASM_BALLOON_H__
1627
1628 -#include <linux/spinlock.h>
1629 -
1630 -#if 0
1631 /*
1632 * Inform the balloon driver that it should allow some slop for device-driver
1633 * memory activities.
1634 @@ -56,6 +53,5 @@ void balloon_release_driver_page(struct
1635 extern spinlock_t balloon_lock;
1636 #define balloon_lock(__flags) spin_lock_irqsave(&balloon_lock, __flags)
1637 #define balloon_unlock(__flags) spin_unlock_irqrestore(&balloon_lock, __flags)
1638 -#endif
1639
1640 -#endif /* __XEN_BALLOON_H__ */
1641 +#endif /* __ASM_BALLOON_H__ */
1642 Index: head-2008-11-25/include/xen/interface/callback.h
1643 ===================================================================
1644 --- head-2008-11-25.orig/include/xen/interface/callback.h 2008-11-25 12:33:06.000000000 +0100
1645 +++ head-2008-11-25/include/xen/interface/callback.h 2008-11-25 12:35:56.000000000 +0100
1646 @@ -86,6 +86,8 @@ struct callback_register {
1647 uint16_t flags;
1648 xen_callback_t address;
1649 };
1650 +typedef struct callback_register callback_register_t;
1651 +DEFINE_XEN_GUEST_HANDLE(callback_register_t);
1652
1653 /*
1654 * Unregister a callback.
1655 @@ -98,5 +100,22 @@ struct callback_unregister {
1656 uint16_t type;
1657 uint16_t _unused;
1658 };
1659 +typedef struct callback_unregister callback_unregister_t;
1660 +DEFINE_XEN_GUEST_HANDLE(callback_unregister_t);
1661 +
1662 +#if __XEN_INTERFACE_VERSION__ < 0x00030207
1663 +#undef CALLBACKTYPE_sysenter
1664 +#define CALLBACKTYPE_sysenter CALLBACKTYPE_sysenter_deprecated
1665 +#endif
1666
1667 #endif /* __XEN_PUBLIC_CALLBACK_H__ */
1668 +
1669 +/*
1670 + * Local variables:
1671 + * mode: C
1672 + * c-set-style: "BSD"
1673 + * c-basic-offset: 4
1674 + * tab-width: 4
1675 + * indent-tabs-mode: nil
1676 + * End:
1677 + */
1678 Index: head-2008-11-25/include/xen/interface/elfnote.h
1679 ===================================================================
1680 --- head-2008-11-25.orig/include/xen/interface/elfnote.h 2008-11-25 12:33:06.000000000 +0100
1681 +++ head-2008-11-25/include/xen/interface/elfnote.h 2008-11-25 12:35:56.000000000 +0100
1682 @@ -3,6 +3,24 @@
1683 *
1684 * Definitions used for the Xen ELF notes.
1685 *
1686 + * Permission is hereby granted, free of charge, to any person obtaining a copy
1687 + * of this software and associated documentation files (the "Software"), to
1688 + * deal in the Software without restriction, including without limitation the
1689 + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
1690 + * sell copies of the Software, and to permit persons to whom the Software is
1691 + * furnished to do so, subject to the following conditions:
1692 + *
1693 + * The above copyright notice and this permission notice shall be included in
1694 + * all copies or substantial portions of the Software.
1695 + *
1696 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
1697 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
1698 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
1699 + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
1700 + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
1701 + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
1702 + * DEALINGS IN THE SOFTWARE.
1703 + *
1704 * Copyright (c) 2006, Ian Campbell, XenSource Ltd.
1705 */
1706
1707 @@ -10,7 +28,7 @@
1708 #define __XEN_PUBLIC_ELFNOTE_H__
1709
1710 /*
1711 - * The notes should live in a SHT_NOTE segment and have "Xen" in the
1712 + * The notes should live in a PT_NOTE segment and have "Xen" in the
1713 * name field.
1714 *
1715 * Numeric types are either 4 or 8 bytes depending on the content of
1716 @@ -22,8 +40,6 @@
1717
1718 /*
1719 * NAME=VALUE pair (string).
1720 - *
1721 - * LEGACY: FEATURES and PAE
1722 */
1723 #define XEN_ELFNOTE_INFO 0
1724
1725 @@ -90,7 +106,12 @@
1726 #define XEN_ELFNOTE_LOADER 8
1727
1728 /*
1729 - * The kernel supports PAE (x86/32 only, string = "yes" or "no").
1730 + * The kernel supports PAE (x86/32 only, string = "yes", "no" or
1731 + * "bimodal").
1732 + *
1733 + * For compatibility with Xen 3.0.3 and earlier the "bimodal" setting
1734 + * may be given as "yes,bimodal" which will cause older Xen to treat
1735 + * this kernel as PAE.
1736 *
1737 * LEGACY: PAE (n.b. The legacy interface included a provision to
1738 * indicate 'extended-cr3' support allowing L3 page tables to be
1739 @@ -140,6 +161,65 @@
1740 */
1741 #define XEN_ELFNOTE_SUSPEND_CANCEL 14
1742
1743 +/*
1744 + * The number of the highest elfnote defined.
1745 + */
1746 +#define XEN_ELFNOTE_MAX XEN_ELFNOTE_SUSPEND_CANCEL
1747 +
1748 +/*
1749 + * System information exported through crash notes.
1750 + *
1751 + * The kexec / kdump code will create one XEN_ELFNOTE_CRASH_INFO
1752 + * note in case of a system crash. This note will contain various
1753 + * information about the system, see xen/include/xen/elfcore.h.
1754 + */
1755 +#define XEN_ELFNOTE_CRASH_INFO 0x1000001
1756 +
1757 +/*
1758 + * System registers exported through crash notes.
1759 + *
1760 + * The kexec / kdump code will create one XEN_ELFNOTE_CRASH_REGS
1761 + * note per cpu in case of a system crash. This note is architecture
1762 + * specific and will contain registers not saved in the "CORE" note.
1763 + * See xen/include/xen/elfcore.h for more information.
1764 + */
1765 +#define XEN_ELFNOTE_CRASH_REGS 0x1000002
1766 +
1767 +
1768 +/*
1769 + * xen dump-core none note.
1770 + * xm dump-core code will create one XEN_ELFNOTE_DUMPCORE_NONE
1771 + * in its dump file to indicate that the file is xen dump-core
1772 + * file. This note doesn't have any other information.
1773 + * See tools/libxc/xc_core.h for more information.
1774 + */
1775 +#define XEN_ELFNOTE_DUMPCORE_NONE 0x2000000
1776 +
1777 +/*
1778 + * xen dump-core header note.
1779 + * xm dump-core code will create one XEN_ELFNOTE_DUMPCORE_HEADER
1780 + * in its dump file.
1781 + * See tools/libxc/xc_core.h for more information.
1782 + */
1783 +#define XEN_ELFNOTE_DUMPCORE_HEADER 0x2000001
1784 +
1785 +/*
1786 + * xen dump-core xen version note.
1787 + * xm dump-core code will create one XEN_ELFNOTE_DUMPCORE_XEN_VERSION
1788 + * in its dump file. It contains the xen version obtained via the
1789 + * XENVER hypercall.
1790 + * See tools/libxc/xc_core.h for more information.
1791 + */
1792 +#define XEN_ELFNOTE_DUMPCORE_XEN_VERSION 0x2000002
1793 +
1794 +/*
1795 + * xen dump-core format version note.
1796 + * xm dump-core code will create one XEN_ELFNOTE_DUMPCORE_FORMAT_VERSION
1797 + * in its dump file. It contains a format version identifier.
1798 + * See tools/libxc/xc_core.h for more information.
1799 + */
1800 +#define XEN_ELFNOTE_DUMPCORE_FORMAT_VERSION 0x2000003
1801 +
1802 #endif /* __XEN_PUBLIC_ELFNOTE_H__ */
1803
1804 /*
1805 Index: head-2008-11-25/include/xen/interface/event_channel.h
1806 ===================================================================
1807 --- head-2008-11-25.orig/include/xen/interface/event_channel.h 2008-11-25 12:33:06.000000000 +0100
1808 +++ head-2008-11-25/include/xen/interface/event_channel.h 2008-11-25 12:35:56.000000000 +0100
1809 @@ -3,14 +3,39 @@
1810 *
1811 * Event channels between domains.
1812 *
1813 + * Permission is hereby granted, free of charge, to any person obtaining a copy
1814 + * of this software and associated documentation files (the "Software"), to
1815 + * deal in the Software without restriction, including without limitation the
1816 + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
1817 + * sell copies of the Software, and to permit persons to whom the Software is
1818 + * furnished to do so, subject to the following conditions:
1819 + *
1820 + * The above copyright notice and this permission notice shall be included in
1821 + * all copies or substantial portions of the Software.
1822 + *
1823 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
1824 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
1825 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
1826 + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
1827 + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
1828 + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
1829 + * DEALINGS IN THE SOFTWARE.
1830 + *
1831 * Copyright (c) 2003-2004, K A Fraser.
1832 */
1833
1834 #ifndef __XEN_PUBLIC_EVENT_CHANNEL_H__
1835 #define __XEN_PUBLIC_EVENT_CHANNEL_H__
1836
1837 +/*
1838 + * Prototype for this hypercall is:
1839 + * int event_channel_op(int cmd, void *args)
1840 + * @cmd == EVTCHNOP_??? (event-channel operation).
1841 + * @args == Operation-specific extra arguments (NULL if none).
1842 + */
1843 +
1844 typedef uint32_t evtchn_port_t;
1845 -DEFINE_GUEST_HANDLE(evtchn_port_t);
1846 +DEFINE_XEN_GUEST_HANDLE(evtchn_port_t);
1847
1848 /*
1849 * EVTCHNOP_alloc_unbound: Allocate a port in domain <dom> and mark as
1850 @@ -20,13 +45,14 @@ DEFINE_GUEST_HANDLE(evtchn_port_t);
1851 * 1. If the caller is unprivileged then <dom> must be DOMID_SELF.
1852 * 2. <rdom> may be DOMID_SELF, allowing loopback connections.
1853 */
1854 -#define EVTCHNOP_alloc_unbound 6
1855 +#define EVTCHNOP_alloc_unbound 6
1856 struct evtchn_alloc_unbound {
1857 - /* IN parameters */
1858 - domid_t dom, remote_dom;
1859 - /* OUT parameters */
1860 - evtchn_port_t port;
1861 + /* IN parameters */
1862 + domid_t dom, remote_dom;
1863 + /* OUT parameters */
1864 + evtchn_port_t port;
1865 };
1866 +typedef struct evtchn_alloc_unbound evtchn_alloc_unbound_t;
1867
1868 /*
1869 * EVTCHNOP_bind_interdomain: Construct an interdomain event channel between
1870 @@ -39,29 +65,35 @@ struct evtchn_alloc_unbound {
1871 */
1872 #define EVTCHNOP_bind_interdomain 0
1873 struct evtchn_bind_interdomain {
1874 - /* IN parameters. */
1875 - domid_t remote_dom;
1876 - evtchn_port_t remote_port;
1877 - /* OUT parameters. */
1878 - evtchn_port_t local_port;
1879 + /* IN parameters. */
1880 + domid_t remote_dom;
1881 + evtchn_port_t remote_port;
1882 + /* OUT parameters. */
1883 + evtchn_port_t local_port;
1884 };
1885 +typedef struct evtchn_bind_interdomain evtchn_bind_interdomain_t;
1886
1887 /*
1888 * EVTCHNOP_bind_virq: Bind a local event channel to VIRQ <irq> on specified
1889 * vcpu.
1890 * NOTES:
1891 - * 1. A virtual IRQ may be bound to at most one event channel per vcpu.
1892 - * 2. The allocated event channel is bound to the specified vcpu. The binding
1893 - * may not be changed.
1894 + * 1. Virtual IRQs are classified as per-vcpu or global. See the VIRQ list
1895 + * in xen.h for the classification of each VIRQ.
1896 + * 2. Global VIRQs must be allocated on VCPU0 but can subsequently be
1897 + * re-bound via EVTCHNOP_bind_vcpu.
1898 + * 3. Per-vcpu VIRQs may be bound to at most one event channel per vcpu.
1899 + * The allocated event channel is bound to the specified vcpu and the
1900 + * binding cannot be changed.
1901 */
1902 -#define EVTCHNOP_bind_virq 1
1903 +#define EVTCHNOP_bind_virq 1
1904 struct evtchn_bind_virq {
1905 - /* IN parameters. */
1906 - uint32_t virq;
1907 - uint32_t vcpu;
1908 - /* OUT parameters. */
1909 - evtchn_port_t port;
1910 + /* IN parameters. */
1911 + uint32_t virq;
1912 + uint32_t vcpu;
1913 + /* OUT parameters. */
1914 + evtchn_port_t port;
1915 };
1916 +typedef struct evtchn_bind_virq evtchn_bind_virq_t;
1917
1918 /*
1919 * EVTCHNOP_bind_pirq: Bind a local event channel to PIRQ <irq>.
1920 @@ -69,15 +101,16 @@ struct evtchn_bind_virq {
1921 * 1. A physical IRQ may be bound to at most one event channel per domain.
1922 * 2. Only a sufficiently-privileged domain may bind to a physical IRQ.
1923 */
1924 -#define EVTCHNOP_bind_pirq 2
1925 +#define EVTCHNOP_bind_pirq 2
1926 struct evtchn_bind_pirq {
1927 - /* IN parameters. */
1928 - uint32_t pirq;
1929 + /* IN parameters. */
1930 + uint32_t pirq;
1931 #define BIND_PIRQ__WILL_SHARE 1
1932 - uint32_t flags; /* BIND_PIRQ__* */
1933 - /* OUT parameters. */
1934 - evtchn_port_t port;
1935 + uint32_t flags; /* BIND_PIRQ__* */
1936 + /* OUT parameters. */
1937 + evtchn_port_t port;
1938 };
1939 +typedef struct evtchn_bind_pirq evtchn_bind_pirq_t;
1940
1941 /*
1942 * EVTCHNOP_bind_ipi: Bind a local event channel to receive events.
1943 @@ -85,33 +118,36 @@ struct evtchn_bind_pirq {
1944 * 1. The allocated event channel is bound to the specified vcpu. The binding
1945 * may not be changed.
1946 */
1947 -#define EVTCHNOP_bind_ipi 7
1948 +#define EVTCHNOP_bind_ipi 7
1949 struct evtchn_bind_ipi {
1950 - uint32_t vcpu;
1951 - /* OUT parameters. */
1952 - evtchn_port_t port;
1953 + uint32_t vcpu;
1954 + /* OUT parameters. */
1955 + evtchn_port_t port;
1956 };
1957 +typedef struct evtchn_bind_ipi evtchn_bind_ipi_t;
1958
1959 /*
1960 * EVTCHNOP_close: Close a local event channel <port>. If the channel is
1961 * interdomain then the remote end is placed in the unbound state
1962 * (EVTCHNSTAT_unbound), awaiting a new connection.
1963 */
1964 -#define EVTCHNOP_close 3
1965 +#define EVTCHNOP_close 3
1966 struct evtchn_close {
1967 - /* IN parameters. */
1968 - evtchn_port_t port;
1969 + /* IN parameters. */
1970 + evtchn_port_t port;
1971 };
1972 +typedef struct evtchn_close evtchn_close_t;
1973
1974 /*
1975 * EVTCHNOP_send: Send an event to the remote end of the channel whose local
1976 * endpoint is <port>.
1977 */
1978 -#define EVTCHNOP_send 4
1979 +#define EVTCHNOP_send 4
1980 struct evtchn_send {
1981 - /* IN parameters. */
1982 - evtchn_port_t port;
1983 + /* IN parameters. */
1984 + evtchn_port_t port;
1985 };
1986 +typedef struct evtchn_send evtchn_send_t;
1987
1988 /*
1989 * EVTCHNOP_status: Get the current status of the communication channel which
1990 @@ -121,75 +157,108 @@ struct evtchn_send {
1991 * 2. Only a sufficiently-privileged domain may obtain the status of an event
1992 * channel for which <dom> is not DOMID_SELF.
1993 */
1994 -#define EVTCHNOP_status 5
1995 +#define EVTCHNOP_status 5
1996 struct evtchn_status {
1997 - /* IN parameters */
1998 - domid_t dom;
1999 - evtchn_port_t port;
2000 - /* OUT parameters */
2001 -#define EVTCHNSTAT_closed 0 /* Channel is not in use. */
2002 -#define EVTCHNSTAT_unbound 1 /* Channel is waiting interdom connection.*/
2003 -#define EVTCHNSTAT_interdomain 2 /* Channel is connected to remote domain. */
2004 -#define EVTCHNSTAT_pirq 3 /* Channel is bound to a phys IRQ line. */
2005 -#define EVTCHNSTAT_virq 4 /* Channel is bound to a virtual IRQ line */
2006 -#define EVTCHNSTAT_ipi 5 /* Channel is bound to a virtual IPI line */
2007 - uint32_t status;
2008 - uint32_t vcpu; /* VCPU to which this channel is bound. */
2009 - union {
2010 - struct {
2011 - domid_t dom;
2012 - } unbound; /* EVTCHNSTAT_unbound */
2013 - struct {
2014 - domid_t dom;
2015 - evtchn_port_t port;
2016 - } interdomain; /* EVTCHNSTAT_interdomain */
2017 - uint32_t pirq; /* EVTCHNSTAT_pirq */
2018 - uint32_t virq; /* EVTCHNSTAT_virq */
2019 - } u;
2020 + /* IN parameters */
2021 + domid_t dom;
2022 + evtchn_port_t port;
2023 + /* OUT parameters */
2024 +#define EVTCHNSTAT_closed 0 /* Channel is not in use. */
2025 +#define EVTCHNSTAT_unbound 1 /* Channel is waiting interdom connection.*/
2026 +#define EVTCHNSTAT_interdomain 2 /* Channel is connected to remote domain. */
2027 +#define EVTCHNSTAT_pirq 3 /* Channel is bound to a phys IRQ line. */
2028 +#define EVTCHNSTAT_virq 4 /* Channel is bound to a virtual IRQ line */
2029 +#define EVTCHNSTAT_ipi 5 /* Channel is bound to a virtual IPI line */
2030 + uint32_t status;
2031 + uint32_t vcpu; /* VCPU to which this channel is bound. */
2032 + union {
2033 + struct {
2034 + domid_t dom;
2035 + } unbound; /* EVTCHNSTAT_unbound */
2036 + struct {
2037 + domid_t dom;
2038 + evtchn_port_t port;
2039 + } interdomain; /* EVTCHNSTAT_interdomain */
2040 + uint32_t pirq; /* EVTCHNSTAT_pirq */
2041 + uint32_t virq; /* EVTCHNSTAT_virq */
2042 + } u;
2043 };
2044 +typedef struct evtchn_status evtchn_status_t;
2045
2046 /*
2047 * EVTCHNOP_bind_vcpu: Specify which vcpu a channel should notify when an
2048 * event is pending.
2049 * NOTES:
2050 - * 1. IPI- and VIRQ-bound channels always notify the vcpu that initialised
2051 - * the binding. This binding cannot be changed.
2052 - * 2. All other channels notify vcpu0 by default. This default is set when
2053 + * 1. IPI-bound channels always notify the vcpu specified at bind time.
2054 + * This binding cannot be changed.
2055 + * 2. Per-VCPU VIRQ channels always notify the vcpu specified at bind time.
2056 + * This binding cannot be changed.
2057 + * 3. All other channels notify vcpu0 by default. This default is set when
2058 * the channel is allocated (a port that is freed and subsequently reused
2059 * has its binding reset to vcpu0).
2060 */
2061 -#define EVTCHNOP_bind_vcpu 8
2062 +#define EVTCHNOP_bind_vcpu 8
2063 struct evtchn_bind_vcpu {
2064 - /* IN parameters. */
2065 - evtchn_port_t port;
2066 - uint32_t vcpu;
2067 + /* IN parameters. */
2068 + evtchn_port_t port;
2069 + uint32_t vcpu;
2070 };
2071 +typedef struct evtchn_bind_vcpu evtchn_bind_vcpu_t;
2072
2073 /*
2074 * EVTCHNOP_unmask: Unmask the specified local event-channel port and deliver
2075 * a notification to the appropriate VCPU if an event is pending.
2076 */
2077 -#define EVTCHNOP_unmask 9
2078 +#define EVTCHNOP_unmask 9
2079 struct evtchn_unmask {
2080 - /* IN parameters. */
2081 - evtchn_port_t port;
2082 + /* IN parameters. */
2083 + evtchn_port_t port;
2084 +};
2085 +typedef struct evtchn_unmask evtchn_unmask_t;
2086 +
2087 +/*
2088 + * EVTCHNOP_reset: Close all event channels associated with specified domain.
2089 + * NOTES:
2090 + * 1. <dom> may be specified as DOMID_SELF.
2091 + * 2. Only a sufficiently-privileged domain may specify other than DOMID_SELF.
2092 + */
2093 +#define EVTCHNOP_reset 10
2094 +struct evtchn_reset {
2095 + /* IN parameters. */
2096 + domid_t dom;
2097 };
2098 +typedef struct evtchn_reset evtchn_reset_t;
2099
2100 +/*
2101 + * Argument to event_channel_op_compat() hypercall. Superceded by new
2102 + * event_channel_op() hypercall since 0x00030202.
2103 + */
2104 struct evtchn_op {
2105 - uint32_t cmd; /* EVTCHNOP_* */
2106 - union {
2107 - struct evtchn_alloc_unbound alloc_unbound;
2108 - struct evtchn_bind_interdomain bind_interdomain;
2109 - struct evtchn_bind_virq bind_virq;
2110 - struct evtchn_bind_pirq bind_pirq;
2111 - struct evtchn_bind_ipi bind_ipi;
2112 - struct evtchn_close close;
2113 - struct evtchn_send send;
2114 - struct evtchn_status status;
2115 - struct evtchn_bind_vcpu bind_vcpu;
2116 - struct evtchn_unmask unmask;
2117 - } u;
2118 + uint32_t cmd; /* EVTCHNOP_* */
2119 + union {
2120 + struct evtchn_alloc_unbound alloc_unbound;
2121 + struct evtchn_bind_interdomain bind_interdomain;
2122 + struct evtchn_bind_virq bind_virq;
2123 + struct evtchn_bind_pirq bind_pirq;
2124 + struct evtchn_bind_ipi bind_ipi;
2125 + struct evtchn_close close;
2126 + struct evtchn_send send;
2127 + struct evtchn_status status;
2128 + struct evtchn_bind_vcpu bind_vcpu;
2129 + struct evtchn_unmask unmask;
2130 + } u;
2131 };
2132 -DEFINE_GUEST_HANDLE_STRUCT(evtchn_op);
2133 +typedef struct evtchn_op evtchn_op_t;
2134 +DEFINE_XEN_GUEST_HANDLE(evtchn_op_t);
2135
2136 #endif /* __XEN_PUBLIC_EVENT_CHANNEL_H__ */
2137 +
2138 +/*
2139 + * Local variables:
2140 + * mode: C
2141 + * c-set-style: "BSD"
2142 + * c-basic-offset: 4
2143 + * tab-width: 4
2144 + * indent-tabs-mode: nil
2145 + * End:
2146 + */
2147 Index: head-2008-11-25/include/xen/interface/features.h
2148 ===================================================================
2149 --- head-2008-11-25.orig/include/xen/interface/features.h 2008-11-25 12:33:06.000000000 +0100
2150 +++ head-2008-11-25/include/xen/interface/features.h 2008-11-25 12:22:34.000000000 +0100
2151 @@ -3,6 +3,24 @@
2152 *
2153 * Feature flags, reported by XENVER_get_features.
2154 *
2155 + * Permission is hereby granted, free of charge, to any person obtaining a copy
2156 + * of this software and associated documentation files (the "Software"), to
2157 + * deal in the Software without restriction, including without limitation the
2158 + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
2159 + * sell copies of the Software, and to permit persons to whom the Software is
2160 + * furnished to do so, subject to the following conditions:
2161 + *
2162 + * The above copyright notice and this permission notice shall be included in
2163 + * all copies or substantial portions of the Software.
2164 + *
2165 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
2166 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
2167 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
2168 + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
2169 + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
2170 + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
2171 + * DEALINGS IN THE SOFTWARE.
2172 + *
2173 * Copyright (c) 2006, Keir Fraser <keir@xensource.com>
2174 */
2175
2176 @@ -41,6 +59,25 @@
2177 /* x86: Does this Xen host support the MMU_PT_UPDATE_PRESERVE_AD hypercall? */
2178 #define XENFEAT_mmu_pt_update_preserve_ad 5
2179
2180 +/* x86: Does this Xen host support the MMU_{CLEAR,COPY}_PAGE hypercall? */
2181 +#define XENFEAT_highmem_assist 6
2182 +
2183 +/*
2184 + * If set, GNTTABOP_map_grant_ref honors flags to be placed into guest kernel
2185 + * available pte bits.
2186 + */
2187 +#define XENFEAT_gnttab_map_avail_bits 7
2188 +
2189 #define XENFEAT_NR_SUBMAPS 1
2190
2191 #endif /* __XEN_PUBLIC_FEATURES_H__ */
2192 +
2193 +/*
2194 + * Local variables:
2195 + * mode: C
2196 + * c-set-style: "BSD"
2197 + * c-basic-offset: 4
2198 + * tab-width: 4
2199 + * indent-tabs-mode: nil
2200 + * End:
2201 + */
2202 Index: head-2008-11-25/include/xen/interface/grant_table.h
2203 ===================================================================
2204 --- head-2008-11-25.orig/include/xen/interface/grant_table.h 2008-11-25 12:33:06.000000000 +0100
2205 +++ head-2008-11-25/include/xen/interface/grant_table.h 2008-11-25 12:22:34.000000000 +0100
2206 @@ -100,6 +100,7 @@ struct grant_entry {
2207 */
2208 uint32_t frame;
2209 };
2210 +typedef struct grant_entry grant_entry_t;
2211
2212 /*
2213 * Type of grant entry.
2214 @@ -118,6 +119,7 @@ struct grant_entry {
2215 * GTF_readonly: Restrict @domid to read-only mappings and accesses. [GST]
2216 * GTF_reading: Grant entry is currently mapped for reading by @domid. [XEN]
2217 * GTF_writing: Grant entry is currently mapped for writing by @domid. [XEN]
2218 + * GTF_PAT, GTF_PWT, GTF_PCD: (x86) cache attribute flags for the grant [GST]
2219 */
2220 #define _GTF_readonly (2)
2221 #define GTF_readonly (1U<<_GTF_readonly)
2222 @@ -125,6 +127,12 @@ struct grant_entry {
2223 #define GTF_reading (1U<<_GTF_reading)
2224 #define _GTF_writing (4)
2225 #define GTF_writing (1U<<_GTF_writing)
2226 +#define _GTF_PWT (5)
2227 +#define GTF_PWT (1U<<_GTF_PWT)
2228 +#define _GTF_PCD (6)
2229 +#define GTF_PCD (1U<<_GTF_PCD)
2230 +#define _GTF_PAT (7)
2231 +#define GTF_PAT (1U<<_GTF_PAT)
2232
2233 /*
2234 * Subflags for GTF_accept_transfer:
2235 @@ -185,7 +193,8 @@ struct gnttab_map_grant_ref {
2236 grant_handle_t handle;
2237 uint64_t dev_bus_addr;
2238 };
2239 -DEFINE_GUEST_HANDLE_STRUCT(gnttab_map_grant_ref);
2240 +typedef struct gnttab_map_grant_ref gnttab_map_grant_ref_t;
2241 +DEFINE_XEN_GUEST_HANDLE(gnttab_map_grant_ref_t);
2242
2243 /*
2244 * GNTTABOP_unmap_grant_ref: Destroy one or more grant-reference mappings
2245 @@ -207,7 +216,8 @@ struct gnttab_unmap_grant_ref {
2246 /* OUT parameters. */
2247 int16_t status; /* GNTST_* */
2248 };
2249 -DEFINE_GUEST_HANDLE_STRUCT(gnttab_unmap_grant_ref);
2250 +typedef struct gnttab_unmap_grant_ref gnttab_unmap_grant_ref_t;
2251 +DEFINE_XEN_GUEST_HANDLE(gnttab_unmap_grant_ref_t);
2252
2253 /*
2254 * GNTTABOP_setup_table: Set up a grant table for <dom> comprising at least
2255 @@ -225,9 +235,10 @@ struct gnttab_setup_table {
2256 uint32_t nr_frames;
2257 /* OUT parameters. */
2258 int16_t status; /* GNTST_* */
2259 - GUEST_HANDLE(ulong) frame_list;
2260 + XEN_GUEST_HANDLE(ulong) frame_list;
2261 };
2262 -DEFINE_GUEST_HANDLE_STRUCT(gnttab_setup_table);
2263 +typedef struct gnttab_setup_table gnttab_setup_table_t;
2264 +DEFINE_XEN_GUEST_HANDLE(gnttab_setup_table_t);
2265
2266 /*
2267 * GNTTABOP_dump_table: Dump the contents of the grant table to the
2268 @@ -240,7 +251,8 @@ struct gnttab_dump_table {
2269 /* OUT parameters. */
2270 int16_t status; /* GNTST_* */
2271 };
2272 -DEFINE_GUEST_HANDLE_STRUCT(gnttab_dump_table);
2273 +typedef struct gnttab_dump_table gnttab_dump_table_t;
2274 +DEFINE_XEN_GUEST_HANDLE(gnttab_dump_table_t);
2275
2276 /*
2277 * GNTTABOP_transfer_grant_ref: Transfer <frame> to a foreign domain. The
2278 @@ -253,13 +265,15 @@ DEFINE_GUEST_HANDLE_STRUCT(gnttab_dump_t
2279 #define GNTTABOP_transfer 4
2280 struct gnttab_transfer {
2281 /* IN parameters. */
2282 - unsigned long mfn;
2283 + xen_pfn_t mfn;
2284 domid_t domid;
2285 grant_ref_t ref;
2286 /* OUT parameters. */
2287 int16_t status;
2288 };
2289 -DEFINE_GUEST_HANDLE_STRUCT(gnttab_transfer);
2290 +typedef struct gnttab_transfer gnttab_transfer_t;
2291 +DEFINE_XEN_GUEST_HANDLE(gnttab_transfer_t);
2292 +
2293
2294 /*
2295 * GNTTABOP_copy: Hypervisor based copy
2296 @@ -285,22 +299,22 @@ DEFINE_GUEST_HANDLE_STRUCT(gnttab_transf
2297 #define GNTCOPY_dest_gref (1<<_GNTCOPY_dest_gref)
2298
2299 #define GNTTABOP_copy 5
2300 -struct gnttab_copy {
2301 - /* IN parameters. */
2302 - struct {
2303 - union {
2304 - grant_ref_t ref;
2305 - unsigned long gmfn;
2306 - } u;
2307 - domid_t domid;
2308 - uint16_t offset;
2309 - } source, dest;
2310 - uint16_t len;
2311 - uint16_t flags; /* GNTCOPY_* */
2312 - /* OUT parameters. */
2313 - int16_t status;
2314 -};
2315 -DEFINE_GUEST_HANDLE_STRUCT(gnttab_copy);
2316 +typedef struct gnttab_copy {
2317 + /* IN parameters. */
2318 + struct {
2319 + union {
2320 + grant_ref_t ref;
2321 + xen_pfn_t gmfn;
2322 + } u;
2323 + domid_t domid;
2324 + uint16_t offset;
2325 + } source, dest;
2326 + uint16_t len;
2327 + uint16_t flags; /* GNTCOPY_* */
2328 + /* OUT parameters. */
2329 + int16_t status;
2330 +} gnttab_copy_t;
2331 +DEFINE_XEN_GUEST_HANDLE(gnttab_copy_t);
2332
2333 /*
2334 * GNTTABOP_query_size: Query the current and maximum sizes of the shared
2335 @@ -318,10 +332,35 @@ struct gnttab_query_size {
2336 uint32_t max_nr_frames;
2337 int16_t status; /* GNTST_* */
2338 };
2339 -DEFINE_GUEST_HANDLE_STRUCT(gnttab_query_size);
2340 +typedef struct gnttab_query_size gnttab_query_size_t;
2341 +DEFINE_XEN_GUEST_HANDLE(gnttab_query_size_t);
2342
2343 /*
2344 - * Bitfield values for update_pin_status.flags.
2345 + * GNTTABOP_unmap_and_replace: Destroy one or more grant-reference mappings
2346 + * tracked by <handle> but atomically replace the page table entry with one
2347 + * pointing to the machine address under <new_addr>. <new_addr> will be
2348 + * redirected to the null entry.
2349 + * NOTES:
2350 + * 1. The call may fail in an undefined manner if either mapping is not
2351 + * tracked by <handle>.
2352 + * 2. After executing a batch of unmaps, it is guaranteed that no stale
2353 + * mappings will remain in the device or host TLBs.
2354 + */
2355 +#define GNTTABOP_unmap_and_replace 7
2356 +struct gnttab_unmap_and_replace {
2357 + /* IN parameters. */
2358 + uint64_t host_addr;
2359 + uint64_t new_addr;
2360 + grant_handle_t handle;
2361 + /* OUT parameters. */
2362 + int16_t status; /* GNTST_* */
2363 +};
2364 +typedef struct gnttab_unmap_and_replace gnttab_unmap_and_replace_t;
2365 +DEFINE_XEN_GUEST_HANDLE(gnttab_unmap_and_replace_t);
2366 +
2367 +
2368 +/*
2369 + * Bitfield values for gnttab_map_grant_ref.flags.
2370 */
2371 /* Map the grant entry for access by I/O devices. */
2372 #define _GNTMAP_device_map (0)
2373 @@ -349,6 +388,13 @@ DEFINE_GUEST_HANDLE_STRUCT(gnttab_query_
2374 #define GNTMAP_contains_pte (1<<_GNTMAP_contains_pte)
2375
2376 /*
2377 + * Bits to be placed in guest kernel available PTE bits (architecture
2378 + * dependent; only supported when XENFEAT_gnttab_map_avail_bits is set).
2379 + */
2380 +#define _GNTMAP_guest_avail0 (16)
2381 +#define GNTMAP_guest_avail_mask ((uint32_t)~0 << _GNTMAP_guest_avail0)
2382 +
2383 +/*
2384 * Values for error status returns. All errors are -ve.
2385 */
2386 #define GNTST_okay (0) /* Normal return. */
2387 @@ -361,7 +407,8 @@ DEFINE_GUEST_HANDLE_STRUCT(gnttab_query_
2388 #define GNTST_no_device_space (-7) /* Out of space in I/O MMU. */
2389 #define GNTST_permission_denied (-8) /* Not enough privilege for operation. */
2390 #define GNTST_bad_page (-9) /* Specified page was invalid for op. */
2391 -#define GNTST_bad_copy_arg (-10) /* copy arguments cross page boundary */
2392 +#define GNTST_bad_copy_arg (-10) /* copy arguments cross page boundary. */
2393 +#define GNTST_address_too_big (-11) /* transfer page address too large. */
2394
2395 #define GNTTABOP_error_msgs { \
2396 "okay", \
2397 @@ -374,7 +421,18 @@ DEFINE_GUEST_HANDLE_STRUCT(gnttab_query_
2398 "no spare translation slot in the I/O MMU", \
2399 "permission denied", \
2400 "bad page", \
2401 - "copy arguments cross page boundary" \
2402 + "copy arguments cross page boundary", \
2403 + "page address size too large" \
2404 }
2405
2406 #endif /* __XEN_PUBLIC_GRANT_TABLE_H__ */
2407 +
2408 +/*
2409 + * Local variables:
2410 + * mode: C
2411 + * c-set-style: "BSD"
2412 + * c-basic-offset: 4
2413 + * tab-width: 4
2414 + * indent-tabs-mode: nil
2415 + * End:
2416 + */
2417 Index: head-2008-11-25/include/xen/interface/io/blkif.h
2418 ===================================================================
2419 --- head-2008-11-25.orig/include/xen/interface/io/blkif.h 2008-11-25 12:33:06.000000000 +0100
2420 +++ head-2008-11-25/include/xen/interface/io/blkif.h 2008-11-25 12:35:56.000000000 +0100
2421 @@ -3,6 +3,24 @@
2422 *
2423 * Unified block-device I/O interface for Xen guest OSes.
2424 *
2425 + * Permission is hereby granted, free of charge, to any person obtaining a copy
2426 + * of this software and associated documentation files (the "Software"), to
2427 + * deal in the Software without restriction, including without limitation the
2428 + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
2429 + * sell copies of the Software, and to permit persons to whom the Software is
2430 + * furnished to do so, subject to the following conditions:
2431 + *
2432 + * The above copyright notice and this permission notice shall be included in
2433 + * all copies or substantial portions of the Software.
2434 + *
2435 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
2436 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
2437 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
2438 + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
2439 + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
2440 + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
2441 + * DEALINGS IN THE SOFTWARE.
2442 + *
2443 * Copyright (c) 2003-2004, Keir Fraser
2444 */
2445
2446 @@ -24,8 +42,10 @@
2447 * rsp_event appropriately (e.g., using RING_FINAL_CHECK_FOR_RESPONSES()).
2448 */
2449
2450 -typedef uint16_t blkif_vdev_t;
2451 -typedef uint64_t blkif_sector_t;
2452 +#ifndef blkif_vdev_t
2453 +#define blkif_vdev_t uint16_t
2454 +#endif
2455 +#define blkif_sector_t uint64_t
2456
2457 /*
2458 * REQUEST CODES.
2459 @@ -34,7 +54,7 @@ typedef uint64_t blkif_sector_t;
2460 #define BLKIF_OP_WRITE 1
2461 /*
2462 * Recognised only if "feature-barrier" is present in backend xenbus info.
2463 - * The "feature_barrier" node contains a boolean indicating whether barrier
2464 + * The "feature-barrier" node contains a boolean indicating whether barrier
2465 * requests are likely to succeed or fail. Either way, a barrier request
2466 * may fail at any time with BLKIF_RSP_EOPNOTSUPP if it is unsupported by
2467 * the underlying block-device hardware. The boolean simply indicates whether
2468 @@ -43,33 +63,50 @@ typedef uint64_t blkif_sector_t;
2469 * create the "feature-barrier" node!
2470 */
2471 #define BLKIF_OP_WRITE_BARRIER 2
2472 +/*
2473 + * Recognised if "feature-flush-cache" is present in backend xenbus
2474 + * info. A flush will ask the underlying storage hardware to flush its
2475 + * non-volatile caches as appropriate. The "feature-flush-cache" node
2476 + * contains a boolean indicating whether flush requests are likely to
2477 + * succeed or fail. Either way, a flush request may fail at any time
2478 + * with BLKIF_RSP_EOPNOTSUPP if it is unsupported by the underlying
2479 + * block-device hardware. The boolean simply indicates whether or not it
2480 + * is worthwhile for the frontend to attempt flushes. If a backend does
2481 + * not recognise BLKIF_OP_WRITE_FLUSH_CACHE, it should *not* create the
2482 + * "feature-flush-cache" node!
2483 + */
2484 +#define BLKIF_OP_FLUSH_DISKCACHE 3
2485
2486 /*
2487 * Maximum scatter/gather segments per request.
2488 - * This is carefully chosen so that sizeof(struct blkif_ring) <= PAGE_SIZE.
2489 + * This is carefully chosen so that sizeof(blkif_ring_t) <= PAGE_SIZE.
2490 * NB. This could be 12 if the ring indexes weren't stored in the same page.
2491 */
2492 #define BLKIF_MAX_SEGMENTS_PER_REQUEST 11
2493
2494 +struct blkif_request_segment {
2495 + grant_ref_t gref; /* reference to I/O buffer frame */
2496 + /* @first_sect: first sector in frame to transfer (inclusive). */
2497 + /* @last_sect: last sector in frame to transfer (inclusive). */
2498 + uint8_t first_sect, last_sect;
2499 +};
2500 +
2501 struct blkif_request {
2502 - uint8_t operation; /* BLKIF_OP_??? */
2503 - uint8_t nr_segments; /* number of segments */
2504 - blkif_vdev_t handle; /* only for read/write requests */
2505 - uint64_t id; /* private guest value, echoed in resp */
2506 - blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */
2507 - struct blkif_request_segment {
2508 - grant_ref_t gref; /* reference to I/O buffer frame */
2509 - /* @first_sect: first sector in frame to transfer (inclusive). */
2510 - /* @last_sect: last sector in frame to transfer (inclusive). */
2511 - uint8_t first_sect, last_sect;
2512 - } seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
2513 + uint8_t operation; /* BLKIF_OP_??? */
2514 + uint8_t nr_segments; /* number of segments */
2515 + blkif_vdev_t handle; /* only for read/write requests */
2516 + uint64_t id; /* private guest value, echoed in resp */
2517 + blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */
2518 + struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
2519 };
2520 +typedef struct blkif_request blkif_request_t;
2521
2522 struct blkif_response {
2523 - uint64_t id; /* copied from request */
2524 - uint8_t operation; /* copied from request */
2525 - int16_t status; /* BLKIF_RSP_??? */
2526 + uint64_t id; /* copied from request */
2527 + uint8_t operation; /* copied from request */
2528 + int16_t status; /* BLKIF_RSP_??? */
2529 };
2530 +typedef struct blkif_response blkif_response_t;
2531
2532 /*
2533 * STATUS RETURN CODES.
2534 @@ -92,3 +129,13 @@ DEFINE_RING_TYPES(blkif, struct blkif_re
2535 #define VDISK_READONLY 0x4
2536
2537 #endif /* __XEN_PUBLIC_IO_BLKIF_H__ */
2538 +
2539 +/*
2540 + * Local variables:
2541 + * mode: C
2542 + * c-set-style: "BSD"
2543 + * c-basic-offset: 4
2544 + * tab-width: 4
2545 + * indent-tabs-mode: nil
2546 + * End:
2547 + */
2548 Index: head-2008-11-25/include/xen/interface/io/console.h
2549 ===================================================================
2550 --- head-2008-11-25.orig/include/xen/interface/io/console.h 2008-11-25 12:33:06.000000000 +0100
2551 +++ head-2008-11-25/include/xen/interface/io/console.h 2008-11-25 12:35:56.000000000 +0100
2552 @@ -3,6 +3,24 @@
2553 *
2554 * Console I/O interface for Xen guest OSes.
2555 *
2556 + * Permission is hereby granted, free of charge, to any person obtaining a copy
2557 + * of this software and associated documentation files (the "Software"), to
2558 + * deal in the Software without restriction, including without limitation the
2559 + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
2560 + * sell copies of the Software, and to permit persons to whom the Software is
2561 + * furnished to do so, subject to the following conditions:
2562 + *
2563 + * The above copyright notice and this permission notice shall be included in
2564 + * all copies or substantial portions of the Software.
2565 + *
2566 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
2567 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
2568 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
2569 + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
2570 + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
2571 + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
2572 + * DEALINGS IN THE SOFTWARE.
2573 + *
2574 * Copyright (c) 2005, Keir Fraser
2575 */
2576
2577 @@ -21,3 +39,13 @@ struct xencons_interface {
2578 };
2579
2580 #endif /* __XEN_PUBLIC_IO_CONSOLE_H__ */
2581 +
2582 +/*
2583 + * Local variables:
2584 + * mode: C
2585 + * c-set-style: "BSD"
2586 + * c-basic-offset: 4
2587 + * tab-width: 4
2588 + * indent-tabs-mode: nil
2589 + * End:
2590 + */
2591 Index: head-2008-11-25/include/xen/interface/io/fbif.h
2592 ===================================================================
2593 --- head-2008-11-25.orig/include/xen/interface/io/fbif.h 2008-11-25 12:33:06.000000000 +0100
2594 +++ head-2008-11-25/include/xen/interface/io/fbif.h 2008-11-25 12:35:56.000000000 +0100
2595 @@ -41,12 +41,13 @@
2596 */
2597 #define XENFB_TYPE_UPDATE 2
2598
2599 -struct xenfb_update {
2600 - uint8_t type; /* XENFB_TYPE_UPDATE */
2601 - int32_t x; /* source x */
2602 - int32_t y; /* source y */
2603 - int32_t width; /* rect width */
2604 - int32_t height; /* rect height */
2605 +struct xenfb_update
2606 +{
2607 + uint8_t type; /* XENFB_TYPE_UPDATE */
2608 + int32_t x; /* source x */
2609 + int32_t y; /* source y */
2610 + int32_t width; /* rect width */
2611 + int32_t height; /* rect height */
2612 };
2613
2614 /*
2615 @@ -55,36 +56,58 @@ struct xenfb_update {
2616 */
2617 #define XENFB_TYPE_RESIZE 3
2618
2619 -struct xenfb_resize {
2620 - uint8_t type; /* XENFB_TYPE_RESIZE */
2621 - int32_t width; /* width in pixels */
2622 - int32_t height; /* height in pixels */
2623 - int32_t stride; /* stride in bytes */
2624 - int32_t depth; /* depth in bits */
2625 - int32_t offset; /* start offset within framebuffer */
2626 +struct xenfb_resize
2627 +{
2628 + uint8_t type; /* XENFB_TYPE_RESIZE */
2629 + int32_t width; /* width in pixels */
2630 + int32_t height; /* height in pixels */
2631 + int32_t stride; /* stride in bytes */
2632 + int32_t depth; /* depth in bits */
2633 + int32_t offset; /* offset of the framebuffer in bytes */
2634 };
2635
2636 #define XENFB_OUT_EVENT_SIZE 40
2637
2638 -union xenfb_out_event {
2639 - uint8_t type;
2640 - struct xenfb_update update;
2641 - struct xenfb_resize resize;
2642 - char pad[XENFB_OUT_EVENT_SIZE];
2643 +union xenfb_out_event
2644 +{
2645 + uint8_t type;
2646 + struct xenfb_update update;
2647 + struct xenfb_resize resize;
2648 + char pad[XENFB_OUT_EVENT_SIZE];
2649 };
2650
2651 /* In events (backend -> frontend) */
2652
2653 /*
2654 * Frontends should ignore unknown in events.
2655 - * No in events currently defined.
2656 */
2657
2658 +/*
2659 + * Framebuffer refresh period advice
2660 + * Backend sends it to advise the frontend their preferred period of
2661 + * refresh. Frontends that keep the framebuffer constantly up-to-date
2662 + * just ignore it. Frontends that use the advice should immediately
2663 + * refresh the framebuffer (and send an update notification event if
2664 + * those have been requested), then use the update frequency to guide
2665 + * their periodical refreshs.
2666 + */
2667 +#define XENFB_TYPE_REFRESH_PERIOD 1
2668 +#define XENFB_NO_REFRESH 0
2669 +
2670 +struct xenfb_refresh_period
2671 +{
2672 + uint8_t type; /* XENFB_TYPE_UPDATE_PERIOD */
2673 + uint32_t period; /* period of refresh, in ms,
2674 + * XENFB_NO_REFRESH if no refresh is needed */
2675 +};
2676 +
2677 #define XENFB_IN_EVENT_SIZE 40
2678
2679 -union xenfb_in_event {
2680 - uint8_t type;
2681 - char pad[XENFB_IN_EVENT_SIZE];
2682 +union xenfb_in_event
2683 +{
2684 + uint8_t type;
2685 + struct xenfb_refresh_period refresh_period;
2686 + char pad[XENFB_IN_EVENT_SIZE];
2687 };
2688
2689 /* shared page */
2690 @@ -93,41 +116,41 @@ union xenfb_in_event {
2691 #define XENFB_IN_RING_LEN (XENFB_IN_RING_SIZE / XENFB_IN_EVENT_SIZE)
2692 #define XENFB_IN_RING_OFFS 1024
2693 #define XENFB_IN_RING(page) \
2694 - ((union xenfb_in_event *)((char *)(page) + XENFB_IN_RING_OFFS))
2695 + ((union xenfb_in_event *)((char *)(page) + XENFB_IN_RING_OFFS))
2696 #define XENFB_IN_RING_REF(page, idx) \
2697 - (XENFB_IN_RING((page))[(idx) % XENFB_IN_RING_LEN])
2698 + (XENFB_IN_RING((page))[(idx) % XENFB_IN_RING_LEN])
2699
2700 #define XENFB_OUT_RING_SIZE 2048
2701 #define XENFB_OUT_RING_LEN (XENFB_OUT_RING_SIZE / XENFB_OUT_EVENT_SIZE)
2702 #define XENFB_OUT_RING_OFFS (XENFB_IN_RING_OFFS + XENFB_IN_RING_SIZE)
2703 #define XENFB_OUT_RING(page) \
2704 - ((union xenfb_out_event *)((char *)(page) + XENFB_OUT_RING_OFFS))
2705 + ((union xenfb_out_event *)((char *)(page) + XENFB_OUT_RING_OFFS))
2706 #define XENFB_OUT_RING_REF(page, idx) \
2707 - (XENFB_OUT_RING((page))[(idx) % XENFB_OUT_RING_LEN])
2708 + (XENFB_OUT_RING((page))[(idx) % XENFB_OUT_RING_LEN])
2709
2710 -struct xenfb_page {
2711 - uint32_t in_cons, in_prod;
2712 - uint32_t out_cons, out_prod;
2713 -
2714 - int32_t width; /* width of the framebuffer (in pixels) */
2715 - int32_t height; /* height of the framebuffer (in pixels) */
2716 - uint32_t line_length; /* length of a row of pixels (in bytes) */
2717 - uint32_t mem_length; /* length of the framebuffer (in bytes) */
2718 - uint8_t depth; /* depth of a pixel (in bits) */
2719 -
2720 - /*
2721 - * Framebuffer page directory
2722 - *
2723 - * Each directory page holds PAGE_SIZE / sizeof(*pd)
2724 - * framebuffer pages, and can thus map up to PAGE_SIZE *
2725 - * PAGE_SIZE / sizeof(*pd) bytes. With PAGE_SIZE == 4096 and
2726 - * sizeof(unsigned long) == 4/8, that's 4 Megs 32 bit and 2
2727 - * Megs 64 bit. 256 directories give enough room for a 512
2728 - * Meg framebuffer with a max resolution of 12,800x10,240.
2729 - * Should be enough for a while with room leftover for
2730 - * expansion.
2731 - */
2732 - unsigned long pd[256];
2733 +struct xenfb_page
2734 +{
2735 + uint32_t in_cons, in_prod;
2736 + uint32_t out_cons, out_prod;
2737 +
2738 + int32_t width; /* the width of the framebuffer (in pixels) */
2739 + int32_t height; /* the height of the framebuffer (in pixels) */
2740 + uint32_t line_length; /* the length of a row of pixels (in bytes) */
2741 + uint32_t mem_length; /* the length of the framebuffer (in bytes) */
2742 + uint8_t depth; /* the depth of a pixel (in bits) */
2743 +
2744 + /*
2745 + * Framebuffer page directory
2746 + *
2747 + * Each directory page holds PAGE_SIZE / sizeof(*pd)
2748 + * framebuffer pages, and can thus map up to PAGE_SIZE *
2749 + * PAGE_SIZE / sizeof(*pd) bytes. With PAGE_SIZE == 4096 and
2750 + * sizeof(unsigned long) == 4/8, that's 4 Megs 32 bit and 2 Megs
2751 + * 64 bit. 256 directories give enough room for a 512 Meg
2752 + * framebuffer with a max resolution of 12,800x10,240. Should
2753 + * be enough for a while with room leftover for expansion.
2754 + */
2755 + unsigned long pd[256];
2756 };
2757
2758 /*
2759 @@ -141,3 +164,13 @@ struct xenfb_page {
2760 #endif
2761
2762 #endif
2763 +
2764 +/*
2765 + * Local variables:
2766 + * mode: C
2767 + * c-set-style: "BSD"
2768 + * c-basic-offset: 4
2769 + * tab-width: 4
2770 + * indent-tabs-mode: nil
2771 + * End:
2772 + */
2773 Index: head-2008-11-25/include/xen/interface/io/kbdif.h
2774 ===================================================================
2775 --- head-2008-11-25.orig/include/xen/interface/io/kbdif.h 2008-11-25 12:33:06.000000000 +0100
2776 +++ head-2008-11-25/include/xen/interface/io/kbdif.h 2008-11-25 12:35:56.000000000 +0100
2777 @@ -45,34 +45,38 @@
2778 */
2779 #define XENKBD_TYPE_POS 4
2780
2781 -struct xenkbd_motion {
2782 - uint8_t type; /* XENKBD_TYPE_MOTION */
2783 - int32_t rel_x; /* relative X motion */
2784 - int32_t rel_y; /* relative Y motion */
2785 - int32_t rel_z; /* relative Z motion (wheel) */
2786 -};
2787 -
2788 -struct xenkbd_key {
2789 - uint8_t type; /* XENKBD_TYPE_KEY */
2790 - uint8_t pressed; /* 1 if pressed; 0 otherwise */
2791 - uint32_t keycode; /* KEY_* from linux/input.h */
2792 -};
2793 -
2794 -struct xenkbd_position {
2795 - uint8_t type; /* XENKBD_TYPE_POS */
2796 - int32_t abs_x; /* absolute X position (in FB pixels) */
2797 - int32_t abs_y; /* absolute Y position (in FB pixels) */
2798 - int32_t rel_z; /* relative Z motion (wheel) */
2799 +struct xenkbd_motion
2800 +{
2801 + uint8_t type; /* XENKBD_TYPE_MOTION */
2802 + int32_t rel_x; /* relative X motion */
2803 + int32_t rel_y; /* relative Y motion */
2804 + int32_t rel_z; /* relative Z motion (wheel) */
2805 +};
2806 +
2807 +struct xenkbd_key
2808 +{
2809 + uint8_t type; /* XENKBD_TYPE_KEY */
2810 + uint8_t pressed; /* 1 if pressed; 0 otherwise */
2811 + uint32_t keycode; /* KEY_* from linux/input.h */
2812 +};
2813 +
2814 +struct xenkbd_position
2815 +{
2816 + uint8_t type; /* XENKBD_TYPE_POS */
2817 + int32_t abs_x; /* absolute X position (in FB pixels) */
2818 + int32_t abs_y; /* absolute Y position (in FB pixels) */
2819 + int32_t rel_z; /* relative Z motion (wheel) */
2820 };
2821
2822 #define XENKBD_IN_EVENT_SIZE 40
2823
2824 -union xenkbd_in_event {
2825 - uint8_t type;
2826 - struct xenkbd_motion motion;
2827 - struct xenkbd_key key;
2828 - struct xenkbd_position pos;
2829 - char pad[XENKBD_IN_EVENT_SIZE];
2830 +union xenkbd_in_event
2831 +{
2832 + uint8_t type;
2833 + struct xenkbd_motion motion;
2834 + struct xenkbd_key key;
2835 + struct xenkbd_position pos;
2836 + char pad[XENKBD_IN_EVENT_SIZE];
2837 };
2838
2839 /* Out events (frontend -> backend) */
2840 @@ -85,9 +89,10 @@ union xenkbd_in_event {
2841
2842 #define XENKBD_OUT_EVENT_SIZE 40
2843
2844 -union xenkbd_out_event {
2845 - uint8_t type;
2846 - char pad[XENKBD_OUT_EVENT_SIZE];
2847 +union xenkbd_out_event
2848 +{
2849 + uint8_t type;
2850 + char pad[XENKBD_OUT_EVENT_SIZE];
2851 };
2852
2853 /* shared page */
2854 @@ -96,21 +101,32 @@ union xenkbd_out_event {
2855 #define XENKBD_IN_RING_LEN (XENKBD_IN_RING_SIZE / XENKBD_IN_EVENT_SIZE)
2856 #define XENKBD_IN_RING_OFFS 1024
2857 #define XENKBD_IN_RING(page) \
2858 - ((union xenkbd_in_event *)((char *)(page) + XENKBD_IN_RING_OFFS))
2859 + ((union xenkbd_in_event *)((char *)(page) + XENKBD_IN_RING_OFFS))
2860 #define XENKBD_IN_RING_REF(page, idx) \
2861 - (XENKBD_IN_RING((page))[(idx) % XENKBD_IN_RING_LEN])
2862 + (XENKBD_IN_RING((page))[(idx) % XENKBD_IN_RING_LEN])
2863
2864 #define XENKBD_OUT_RING_SIZE 1024
2865 #define XENKBD_OUT_RING_LEN (XENKBD_OUT_RING_SIZE / XENKBD_OUT_EVENT_SIZE)
2866 #define XENKBD_OUT_RING_OFFS (XENKBD_IN_RING_OFFS + XENKBD_IN_RING_SIZE)
2867 #define XENKBD_OUT_RING(page) \
2868 - ((union xenkbd_out_event *)((char *)(page) + XENKBD_OUT_RING_OFFS))
2869 + ((union xenkbd_out_event *)((char *)(page) + XENKBD_OUT_RING_OFFS))
2870 #define XENKBD_OUT_RING_REF(page, idx) \
2871 - (XENKBD_OUT_RING((page))[(idx) % XENKBD_OUT_RING_LEN])
2872 + (XENKBD_OUT_RING((page))[(idx) % XENKBD_OUT_RING_LEN])
2873
2874 -struct xenkbd_page {
2875 - uint32_t in_cons, in_prod;
2876 - uint32_t out_cons, out_prod;
2877 +struct xenkbd_page
2878 +{
2879 + uint32_t in_cons, in_prod;
2880 + uint32_t out_cons, out_prod;
2881 };
2882
2883 #endif
2884 +
2885 +/*
2886 + * Local variables:
2887 + * mode: C
2888 + * c-set-style: "BSD"
2889 + * c-basic-offset: 4
2890 + * tab-width: 4
2891 + * indent-tabs-mode: nil
2892 + * End:
2893 + */
2894 Index: head-2008-11-25/include/xen/interface/io/netif.h
2895 ===================================================================
2896 --- head-2008-11-25.orig/include/xen/interface/io/netif.h 2008-11-25 12:33:06.000000000 +0100
2897 +++ head-2008-11-25/include/xen/interface/io/netif.h 2008-11-25 12:35:56.000000000 +0100
2898 @@ -3,6 +3,24 @@
2899 *
2900 * Unified network-device I/O interface for Xen guest OSes.
2901 *
2902 + * Permission is hereby granted, free of charge, to any person obtaining a copy
2903 + * of this software and associated documentation files (the "Software"), to
2904 + * deal in the Software without restriction, including without limitation the
2905 + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
2906 + * sell copies of the Software, and to permit persons to whom the Software is
2907 + * furnished to do so, subject to the following conditions:
2908 + *
2909 + * The above copyright notice and this permission notice shall be included in
2910 + * all copies or substantial portions of the Software.
2911 + *
2912 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
2913 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
2914 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
2915 + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
2916 + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
2917 + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
2918 + * DEALINGS IN THE SOFTWARE.
2919 + *
2920 * Copyright (c) 2003-2004, Keir Fraser
2921 */
2922
2923 @@ -47,18 +65,21 @@
2924 #define _NETTXF_extra_info (3)
2925 #define NETTXF_extra_info (1U<<_NETTXF_extra_info)
2926
2927 -struct xen_netif_tx_request {
2928 +struct netif_tx_request {
2929 grant_ref_t gref; /* Reference to buffer page */
2930 uint16_t offset; /* Offset within buffer page */
2931 uint16_t flags; /* NETTXF_* */
2932 uint16_t id; /* Echoed in response message. */
2933 uint16_t size; /* Packet size in bytes. */
2934 };
2935 +typedef struct netif_tx_request netif_tx_request_t;
2936
2937 /* Types of netif_extra_info descriptors. */
2938 -#define XEN_NETIF_EXTRA_TYPE_NONE (0) /* Never used - invalid */
2939 -#define XEN_NETIF_EXTRA_TYPE_GSO (1) /* u.gso */
2940 -#define XEN_NETIF_EXTRA_TYPE_MAX (2)
2941 +#define XEN_NETIF_EXTRA_TYPE_NONE (0) /* Never used - invalid */
2942 +#define XEN_NETIF_EXTRA_TYPE_GSO (1) /* u.gso */
2943 +#define XEN_NETIF_EXTRA_TYPE_MCAST_ADD (2) /* u.mcast */
2944 +#define XEN_NETIF_EXTRA_TYPE_MCAST_DEL (3) /* u.mcast */
2945 +#define XEN_NETIF_EXTRA_TYPE_MAX (4)
2946
2947 /* netif_extra_info flags. */
2948 #define _XEN_NETIF_EXTRA_FLAG_MORE (0)
2949 @@ -71,49 +92,68 @@ struct xen_netif_tx_request {
2950 * This structure needs to fit within both netif_tx_request and
2951 * netif_rx_response for compatibility.
2952 */
2953 -struct xen_netif_extra_info {
2954 - uint8_t type; /* XEN_NETIF_EXTRA_TYPE_* */
2955 - uint8_t flags; /* XEN_NETIF_EXTRA_FLAG_* */
2956 -
2957 - union {
2958 - struct {
2959 - /*
2960 - * Maximum payload size of each segment. For
2961 - * example, for TCP this is just the path MSS.
2962 - */
2963 - uint16_t size;
2964 -
2965 - /*
2966 - * GSO type. This determines the protocol of
2967 - * the packet and any extra features required
2968 - * to segment the packet properly.
2969 - */
2970 - uint8_t type; /* XEN_NETIF_GSO_TYPE_* */
2971 -
2972 - /* Future expansion. */
2973 - uint8_t pad;
2974 -
2975 - /*
2976 - * GSO features. This specifies any extra GSO
2977 - * features required to process this packet,
2978 - * such as ECN support for TCPv4.
2979 - */
2980 - uint16_t features; /* XEN_NETIF_GSO_FEAT_* */
2981 - } gso;
2982 +struct netif_extra_info {
2983 + uint8_t type; /* XEN_NETIF_EXTRA_TYPE_* */
2984 + uint8_t flags; /* XEN_NETIF_EXTRA_FLAG_* */
2985 +
2986 + union {
2987 + /*
2988 + * XEN_NETIF_EXTRA_TYPE_GSO:
2989 + */
2990 + struct {
2991 + /*
2992 + * Maximum payload size of each segment. For example, for TCP this
2993 + * is just the path MSS.
2994 + */
2995 + uint16_t size;
2996 +
2997 + /*
2998 + * GSO type. This determines the protocol of the packet and any
2999 + * extra features required to segment the packet properly.
3000 + */
3001 + uint8_t type; /* XEN_NETIF_GSO_TYPE_* */
3002 +
3003 + /* Future expansion. */
3004 + uint8_t pad;
3005 +
3006 + /*
3007 + * GSO features. This specifies any extra GSO features required
3008 + * to process this packet, such as ECN support for TCPv4.
3009 + */
3010 + uint16_t features; /* XEN_NETIF_GSO_FEAT_* */
3011 + } gso;
3012 +
3013 + /*
3014 + * XEN_NETIF_EXTRA_TYPE_MCAST_{ADD,DEL}:
3015 + * Backend advertises availability via 'feature-multicast-control'
3016 + * xenbus node containing value '1'.
3017 + * Frontend requests this feature by advertising
3018 + * 'request-multicast-control' xenbus node containing value '1'.
3019 + * If multicast control is requested then multicast flooding is
3020 + * disabled and the frontend must explicitly register its interest
3021 + * in multicast groups using dummy transmit requests containing
3022 + * MCAST_{ADD,DEL} extra-info fragments.
3023 + */
3024 + struct {
3025 + uint8_t addr[6]; /* Address to add/remove. */
3026 + } mcast;
3027
3028 - uint16_t pad[3];
3029 - } u;
3030 + uint16_t pad[3];
3031 + } u;
3032 };
3033 +typedef struct netif_extra_info netif_extra_info_t;
3034
3035 -struct xen_netif_tx_response {
3036 - uint16_t id;
3037 - int16_t status; /* NETIF_RSP_* */
3038 +struct netif_tx_response {
3039 + uint16_t id;
3040 + int16_t status; /* NETIF_RSP_* */
3041 };
3042 +typedef struct netif_tx_response netif_tx_response_t;
3043
3044 -struct xen_netif_rx_request {
3045 - uint16_t id; /* Echoed in response message. */
3046 - grant_ref_t gref; /* Reference to incoming granted frame */
3047 +struct netif_rx_request {
3048 + uint16_t id; /* Echoed in response message. */
3049 + grant_ref_t gref; /* Reference to incoming granted frame */
3050 };
3051 +typedef struct netif_rx_request netif_rx_request_t;
3052
3053 /* Packet data has been validated against protocol checksum. */
3054 #define _NETRXF_data_validated (0)
3055 @@ -131,23 +171,20 @@ struct xen_netif_rx_request {
3056 #define _NETRXF_extra_info (3)
3057 #define NETRXF_extra_info (1U<<_NETRXF_extra_info)
3058
3059 -struct xen_netif_rx_response {
3060 +struct netif_rx_response {
3061 uint16_t id;
3062 uint16_t offset; /* Offset in page of start of received packet */
3063 uint16_t flags; /* NETRXF_* */
3064 int16_t status; /* -ve: BLKIF_RSP_* ; +ve: Rx'ed pkt size. */
3065 };
3066 +typedef struct netif_rx_response netif_rx_response_t;
3067
3068 /*
3069 * Generate netif ring structures and types.
3070 */
3071
3072 -DEFINE_RING_TYPES(xen_netif_tx,
3073 - struct xen_netif_tx_request,
3074 - struct xen_netif_tx_response);
3075 -DEFINE_RING_TYPES(xen_netif_rx,
3076 - struct xen_netif_rx_request,
3077 - struct xen_netif_rx_response);
3078 +DEFINE_RING_TYPES(netif_tx, struct netif_tx_request, struct netif_tx_response);
3079 +DEFINE_RING_TYPES(netif_rx, struct netif_rx_request, struct netif_rx_response);
3080
3081 #define NETIF_RSP_DROPPED -2
3082 #define NETIF_RSP_ERROR -1
3083 @@ -156,3 +193,13 @@ DEFINE_RING_TYPES(xen_netif_rx,
3084 #define NETIF_RSP_NULL 1
3085
3086 #endif
3087 +
3088 +/*
3089 + * Local variables:
3090 + * mode: C
3091 + * c-set-style: "BSD"
3092 + * c-basic-offset: 4
3093 + * tab-width: 4
3094 + * indent-tabs-mode: nil
3095 + * End:
3096 + */
3097 Index: head-2008-11-25/include/xen/interface/io/protocols.h
3098 ===================================================================
3099 --- head-2008-11-25.orig/include/xen/interface/io/protocols.h 2008-11-25 12:33:06.000000000 +0100
3100 +++ head-2008-11-25/include/xen/interface/io/protocols.h 2008-11-25 12:35:56.000000000 +0100
3101 @@ -1,10 +1,31 @@
3102 +/******************************************************************************
3103 + * protocols.h
3104 + *
3105 + * Permission is hereby granted, free of charge, to any person obtaining a copy
3106 + * of this software and associated documentation files (the "Software"), to
3107 + * deal in the Software without restriction, including without limitation the
3108 + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
3109 + * sell copies of the Software, and to permit persons to whom the Software is
3110 + * furnished to do so, subject to the following conditions:
3111 + *
3112 + * The above copyright notice and this permission notice shall be included in
3113 + * all copies or substantial portions of the Software.
3114 + *
3115 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
3116 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
3117 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
3118 + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
3119 + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
3120 + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
3121 + * DEALINGS IN THE SOFTWARE.
3122 + */
3123 +
3124 #ifndef __XEN_PROTOCOLS_H__
3125 #define __XEN_PROTOCOLS_H__
3126
3127 #define XEN_IO_PROTO_ABI_X86_32 "x86_32-abi"
3128 #define XEN_IO_PROTO_ABI_X86_64 "x86_64-abi"
3129 #define XEN_IO_PROTO_ABI_IA64 "ia64-abi"
3130 -#define XEN_IO_PROTO_ABI_POWERPC64 "powerpc64-abi"
3131
3132 #if defined(__i386__)
3133 # define XEN_IO_PROTO_ABI_NATIVE XEN_IO_PROTO_ABI_X86_32
3134 @@ -12,8 +33,6 @@
3135 # define XEN_IO_PROTO_ABI_NATIVE XEN_IO_PROTO_ABI_X86_64
3136 #elif defined(__ia64__)
3137 # define XEN_IO_PROTO_ABI_NATIVE XEN_IO_PROTO_ABI_IA64
3138 -#elif defined(__powerpc64__)
3139 -# define XEN_IO_PROTO_ABI_NATIVE XEN_IO_PROTO_ABI_POWERPC64
3140 #else
3141 # error arch fixup needed here
3142 #endif
3143 Index: head-2008-11-25/include/xen/interface/io/ring.h
3144 ===================================================================
3145 --- head-2008-11-25.orig/include/xen/interface/io/ring.h 2008-11-25 12:33:06.000000000 +0100
3146 +++ head-2008-11-25/include/xen/interface/io/ring.h 2008-11-25 12:35:56.000000000 +0100
3147 @@ -3,16 +3,42 @@
3148 *
3149 * Shared producer-consumer ring macros.
3150 *
3151 + * Permission is hereby granted, free of charge, to any person obtaining a copy
3152 + * of this software and associated documentation files (the "Software"), to
3153 + * deal in the Software without restriction, including without limitation the
3154 + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
3155 + * sell copies of the Software, and to permit persons to whom the Software is
3156 + * furnished to do so, subject to the following conditions:
3157 + *
3158 + * The above copyright notice and this permission notice shall be included in
3159 + * all copies or substantial portions of the Software.
3160 + *
3161 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
3162 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
3163 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
3164 + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
3165 + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
3166 + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
3167 + * DEALINGS IN THE SOFTWARE.
3168 + *
3169 * Tim Deegan and Andrew Warfield November 2004.
3170 */
3171
3172 #ifndef __XEN_PUBLIC_IO_RING_H__
3173 #define __XEN_PUBLIC_IO_RING_H__
3174
3175 +#include "../xen-compat.h"
3176 +
3177 +#if __XEN_INTERFACE_VERSION__ < 0x00030208
3178 +#define xen_mb() mb()
3179 +#define xen_rmb() rmb()
3180 +#define xen_wmb() wmb()
3181 +#endif
3182 +
3183 typedef unsigned int RING_IDX;
3184
3185 /* Round a 32-bit unsigned constant down to the nearest power of two. */
3186 -#define __RD2(_x) (((_x) & 0x00000002) ? 0x2 : ((_x) & 0x1))
3187 +#define __RD2(_x) (((_x) & 0x00000002) ? 0x2 : ((_x) & 0x1))
3188 #define __RD4(_x) (((_x) & 0x0000000c) ? __RD2((_x)>>2)<<2 : __RD2(_x))
3189 #define __RD8(_x) (((_x) & 0x000000f0) ? __RD4((_x)>>4)<<4 : __RD4(_x))
3190 #define __RD16(_x) (((_x) & 0x0000ff00) ? __RD8((_x)>>8)<<8 : __RD8(_x))
3191 @@ -25,73 +51,76 @@ typedef unsigned int RING_IDX;
3192 * power of two (so we can mask with (size-1) to loop around).
3193 */
3194 #define __RING_SIZE(_s, _sz) \
3195 - (__RD32(((_sz) - (long)&(_s)->ring + (long)(_s)) / sizeof((_s)->ring[0])))
3196 + (__RD32(((_sz) - (long)(_s)->ring + (long)(_s)) / sizeof((_s)->ring[0])))
3197
3198 /*
3199 * Macros to make the correct C datatypes for a new kind of ring.
3200 *
3201 * To make a new ring datatype, you need to have two message structures,
3202 - * let's say struct request, and struct response already defined.
3203 + * let's say request_t, and response_t already defined.
3204 *
3205 * In a header where you want the ring datatype declared, you then do:
3206 *
3207 - * DEFINE_RING_TYPES(mytag, struct request, struct response);
3208 + * DEFINE_RING_TYPES(mytag, request_t, response_t);
3209 *
3210 * These expand out to give you a set of types, as you can see below.
3211 * The most important of these are:
3212 *
3213 - * struct mytag_sring - The shared ring.
3214 - * struct mytag_front_ring - The 'front' half of the ring.
3215 - * struct mytag_back_ring - The 'back' half of the ring.
3216 + * mytag_sring_t - The shared ring.
3217 + * mytag_front_ring_t - The 'front' half of the ring.
3218 + * mytag_back_ring_t - The 'back' half of the ring.
3219 *
3220 * To initialize a ring in your code you need to know the location and size
3221 * of the shared memory area (PAGE_SIZE, for instance). To initialise
3222 * the front half:
3223 *
3224 - * struct mytag_front_ring front_ring;
3225 - * SHARED_RING_INIT((struct mytag_sring *)shared_page);
3226 - * FRONT_RING_INIT(&front_ring, (struct mytag_sring *)shared_page,
3227 - * PAGE_SIZE);
3228 + * mytag_front_ring_t front_ring;
3229 + * SHARED_RING_INIT((mytag_sring_t *)shared_page);
3230 + * FRONT_RING_INIT(&front_ring, (mytag_sring_t *)shared_page, PAGE_SIZE);
3231 *
3232 * Initializing the back follows similarly (note that only the front
3233 * initializes the shared ring):
3234 *
3235 - * struct mytag_back_ring back_ring;
3236 - * BACK_RING_INIT(&back_ring, (struct mytag_sring *)shared_page,
3237 - * PAGE_SIZE);
3238 + * mytag_back_ring_t back_ring;
3239 + * BACK_RING_INIT(&back_ring, (mytag_sring_t *)shared_page, PAGE_SIZE);
3240 */
3241
3242 -#define DEFINE_RING_TYPES(__name, __req_t, __rsp_t) \
3243 - \
3244 -/* Shared ring entry */ \
3245 -union __name##_sring_entry { \
3246 - __req_t req; \
3247 - __rsp_t rsp; \
3248 -}; \
3249 - \
3250 -/* Shared ring page */ \
3251 -struct __name##_sring { \
3252 - RING_IDX req_prod, req_event; \
3253 - RING_IDX rsp_prod, rsp_event; \
3254 - uint8_t pad[48]; \
3255 - union __name##_sring_entry ring[1]; /* variable-length */ \
3256 -}; \
3257 - \
3258 -/* "Front" end's private variables */ \
3259 -struct __name##_front_ring { \
3260 - RING_IDX req_prod_pvt; \
3261 - RING_IDX rsp_cons; \
3262 - unsigned int nr_ents; \
3263 - struct __name##_sring *sring; \
3264 -}; \
3265 - \
3266 -/* "Back" end's private variables */ \
3267 -struct __name##_back_ring { \
3268 - RING_IDX rsp_prod_pvt; \
3269 - RING_IDX req_cons; \
3270 - unsigned int nr_ents; \
3271 - struct __name##_sring *sring; \
3272 -};
3273 +#define DEFINE_RING_TYPES(__name, __req_t, __rsp_t) \
3274 + \
3275 +/* Shared ring entry */ \
3276 +union __name##_sring_entry { \
3277 + __req_t req; \
3278 + __rsp_t rsp; \
3279 +}; \
3280 + \
3281 +/* Shared ring page */ \
3282 +struct __name##_sring { \
3283 + RING_IDX req_prod, req_event; \
3284 + RING_IDX rsp_prod, rsp_event; \
3285 + uint8_t pad[48]; \
3286 + union __name##_sring_entry ring[1]; /* variable-length */ \
3287 +}; \
3288 + \
3289 +/* "Front" end's private variables */ \
3290 +struct __name##_front_ring { \
3291 + RING_IDX req_prod_pvt; \
3292 + RING_IDX rsp_cons; \
3293 + unsigned int nr_ents; \
3294 + struct __name##_sring *sring; \
3295 +}; \
3296 + \
3297 +/* "Back" end's private variables */ \
3298 +struct __name##_back_ring { \
3299 + RING_IDX rsp_prod_pvt; \
3300 + RING_IDX req_cons; \
3301 + unsigned int nr_ents; \
3302 + struct __name##_sring *sring; \
3303 +}; \
3304 + \
3305 +/* Syntactic sugar */ \
3306 +typedef struct __name##_sring __name##_sring_t; \
3307 +typedef struct __name##_front_ring __name##_front_ring_t; \
3308 +typedef struct __name##_back_ring __name##_back_ring_t
3309
3310 /*
3311 * Macros for manipulating rings.
3312 @@ -109,86 +138,94 @@ struct __name##_back_ring { \
3313 */
3314
3315 /* Initialising empty rings */
3316 -#define SHARED_RING_INIT(_s) do { \
3317 - (_s)->req_prod = (_s)->rsp_prod = 0; \
3318 - (_s)->req_event = (_s)->rsp_event = 1; \
3319 - memset((_s)->pad, 0, sizeof((_s)->pad)); \
3320 +#define SHARED_RING_INIT(_s) do { \
3321 + (_s)->req_prod = (_s)->rsp_prod = 0; \
3322 + (_s)->req_event = (_s)->rsp_event = 1; \
3323 + (void)memset((_s)->pad, 0, sizeof((_s)->pad)); \
3324 } while(0)
3325
3326 -#define FRONT_RING_INIT(_r, _s, __size) do { \
3327 - (_r)->req_prod_pvt = 0; \
3328 - (_r)->rsp_cons = 0; \
3329 - (_r)->nr_ents = __RING_SIZE(_s, __size); \
3330 - (_r)->sring = (_s); \
3331 +#define FRONT_RING_INIT(_r, _s, __size) do { \
3332 + (_r)->req_prod_pvt = 0; \
3333 + (_r)->rsp_cons = 0; \
3334 + (_r)->nr_ents = __RING_SIZE(_s, __size); \
3335 + (_r)->sring = (_s); \
3336 } while (0)
3337
3338 -#define BACK_RING_INIT(_r, _s, __size) do { \
3339 - (_r)->rsp_prod_pvt = 0; \
3340 - (_r)->req_cons = 0; \
3341 - (_r)->nr_ents = __RING_SIZE(_s, __size); \
3342 - (_r)->sring = (_s); \
3343 +#define BACK_RING_INIT(_r, _s, __size) do { \
3344 + (_r)->rsp_prod_pvt = 0; \
3345 + (_r)->req_cons = 0; \
3346 + (_r)->nr_ents = __RING_SIZE(_s, __size); \
3347 + (_r)->sring = (_s); \
3348 } while (0)
3349
3350 /* Initialize to existing shared indexes -- for recovery */
3351 -#define FRONT_RING_ATTACH(_r, _s, __size) do { \
3352 - (_r)->sring = (_s); \
3353 - (_r)->req_prod_pvt = (_s)->req_prod; \
3354 - (_r)->rsp_cons = (_s)->rsp_prod; \
3355 - (_r)->nr_ents = __RING_SIZE(_s, __size); \
3356 +#define FRONT_RING_ATTACH(_r, _s, __size) do { \
3357 + (_r)->sring = (_s); \
3358 + (_r)->req_prod_pvt = (_s)->req_prod; \
3359 + (_r)->rsp_cons = (_s)->rsp_prod; \
3360 + (_r)->nr_ents = __RING_SIZE(_s, __size); \
3361 } while (0)
3362
3363 -#define BACK_RING_ATTACH(_r, _s, __size) do { \
3364 - (_r)->sring = (_s); \
3365 - (_r)->rsp_prod_pvt = (_s)->rsp_prod; \
3366 - (_r)->req_cons = (_s)->req_prod; \
3367 - (_r)->nr_ents = __RING_SIZE(_s, __size); \
3368 +#define BACK_RING_ATTACH(_r, _s, __size) do { \
3369 + (_r)->sring = (_s); \
3370 + (_r)->rsp_prod_pvt = (_s)->rsp_prod; \
3371 + (_r)->req_cons = (_s)->req_prod; \
3372 + (_r)->nr_ents = __RING_SIZE(_s, __size); \
3373 } while (0)
3374
3375 /* How big is this ring? */
3376 -#define RING_SIZE(_r) \
3377 +#define RING_SIZE(_r) \
3378 ((_r)->nr_ents)
3379
3380 /* Number of free requests (for use on front side only). */
3381 -#define RING_FREE_REQUESTS(_r) \
3382 +#define RING_FREE_REQUESTS(_r) \
3383 (RING_SIZE(_r) - ((_r)->req_prod_pvt - (_r)->rsp_cons))
3384
3385 /* Test if there is an empty slot available on the front ring.
3386 * (This is only meaningful from the front. )
3387 */
3388 -#define RING_FULL(_r) \
3389 +#define RING_FULL(_r) \
3390 (RING_FREE_REQUESTS(_r) == 0)
3391
3392 /* Test if there are outstanding messages to be processed on a ring. */
3393 -#define RING_HAS_UNCONSUMED_RESPONSES(_r) \
3394 +#define RING_HAS_UNCONSUMED_RESPONSES(_r) \
3395 ((_r)->sring->rsp_prod - (_r)->rsp_cons)
3396
3397 -#define RING_HAS_UNCONSUMED_REQUESTS(_r) \
3398 - ({ \
3399 - unsigned int req = (_r)->sring->req_prod - (_r)->req_cons; \
3400 - unsigned int rsp = RING_SIZE(_r) - \
3401 - ((_r)->req_cons - (_r)->rsp_prod_pvt); \
3402 - req < rsp ? req : rsp; \
3403 - })
3404 +#ifdef __GNUC__
3405 +#define RING_HAS_UNCONSUMED_REQUESTS(_r) ({ \
3406 + unsigned int req = (_r)->sring->req_prod - (_r)->req_cons; \
3407 + unsigned int rsp = RING_SIZE(_r) - \
3408 + ((_r)->req_cons - (_r)->rsp_prod_pvt); \
3409 + req < rsp ? req : rsp; \
3410 +})
3411 +#else
3412 +/* Same as above, but without the nice GCC ({ ... }) syntax. */
3413 +#define RING_HAS_UNCONSUMED_REQUESTS(_r) \
3414 + ((((_r)->sring->req_prod - (_r)->req_cons) < \
3415 + (RING_SIZE(_r) - ((_r)->req_cons - (_r)->rsp_prod_pvt))) ? \
3416 + ((_r)->sring->req_prod - (_r)->req_cons) : \
3417 + (RING_SIZE(_r) - ((_r)->req_cons - (_r)->rsp_prod_pvt)))
3418 +#endif
3419
3420 /* Direct access to individual ring elements, by index. */
3421 -#define RING_GET_REQUEST(_r, _idx) \
3422 +#define RING_GET_REQUEST(_r, _idx) \
3423 (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].req))
3424
3425 -#define RING_GET_RESPONSE(_r, _idx) \
3426 +#define RING_GET_RESPONSE(_r, _idx) \
3427 (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].rsp))
3428
3429 /* Loop termination condition: Would the specified index overflow the ring? */
3430 -#define RING_REQUEST_CONS_OVERFLOW(_r, _cons) \
3431 +#define RING_REQUEST_CONS_OVERFLOW(_r, _cons) \
3432 (((_cons) - (_r)->rsp_prod_pvt) >= RING_SIZE(_r))
3433
3434 -#define RING_PUSH_REQUESTS(_r) do { \
3435 - wmb(); /* back sees requests /before/ updated producer index */ \
3436 - (_r)->sring->req_prod = (_r)->req_prod_pvt; \
3437 +#define RING_PUSH_REQUESTS(_r) do { \
3438 + xen_wmb(); /* back sees requests /before/ updated producer index */ \
3439 + (_r)->sring->req_prod = (_r)->req_prod_pvt; \
3440 } while (0)
3441
3442 -#define RING_PUSH_RESPONSES(_r) do { \
3443 - wmb(); /* front sees responses /before/ updated producer index */ \
3444 - (_r)->sring->rsp_prod = (_r)->rsp_prod_pvt; \
3445 +#define RING_PUSH_RESPONSES(_r) do { \
3446 + xen_wmb(); /* front sees resps /before/ updated producer index */ \
3447 + (_r)->sring->rsp_prod = (_r)->rsp_prod_pvt; \
3448 } while (0)
3449
3450 /*
3451 @@ -221,40 +258,50 @@ struct __name##_back_ring { \
3452 * field appropriately.
3453 */
3454
3455 -#define RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(_r, _notify) do { \
3456 - RING_IDX __old = (_r)->sring->req_prod; \
3457 - RING_IDX __new = (_r)->req_prod_pvt; \
3458 - wmb(); /* back sees requests /before/ updated producer index */ \
3459 - (_r)->sring->req_prod = __new; \
3460 - mb(); /* back sees new requests /before/ we check req_event */ \
3461 - (_notify) = ((RING_IDX)(__new - (_r)->sring->req_event) < \
3462 - (RING_IDX)(__new - __old)); \
3463 +#define RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(_r, _notify) do { \
3464 + RING_IDX __old = (_r)->sring->req_prod; \
3465 + RING_IDX __new = (_r)->req_prod_pvt; \
3466 + xen_wmb(); /* back sees requests /before/ updated producer index */ \
3467 + (_r)->sring->req_prod = __new; \
3468 + xen_mb(); /* back sees new requests /before/ we check req_event */ \
3469 + (_notify) = ((RING_IDX)(__new - (_r)->sring->req_event) < \
3470 + (RING_IDX)(__new - __old)); \
3471 } while (0)
3472
3473 -#define RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(_r, _notify) do { \
3474 - RING_IDX __old = (_r)->sring->rsp_prod; \
3475 - RING_IDX __new = (_r)->rsp_prod_pvt; \
3476 - wmb(); /* front sees responses /before/ updated producer index */ \
3477 - (_r)->sring->rsp_prod = __new; \
3478 - mb(); /* front sees new responses /before/ we check rsp_event */ \
3479 - (_notify) = ((RING_IDX)(__new - (_r)->sring->rsp_event) < \
3480 - (RING_IDX)(__new - __old)); \
3481 +#define RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(_r, _notify) do { \
3482 + RING_IDX __old = (_r)->sring->rsp_prod; \
3483 + RING_IDX __new = (_r)->rsp_prod_pvt; \
3484 + xen_wmb(); /* front sees resps /before/ updated producer index */ \
3485 + (_r)->sring->rsp_prod = __new; \
3486 + xen_mb(); /* front sees new resps /before/ we check rsp_event */ \
3487 + (_notify) = ((RING_IDX)(__new - (_r)->sring->rsp_event) < \
3488 + (RING_IDX)(__new - __old)); \
3489 } while (0)
3490
3491 -#define RING_FINAL_CHECK_FOR_REQUESTS(_r, _work_to_do) do { \
3492 - (_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r); \
3493 - if (_work_to_do) break; \
3494 - (_r)->sring->req_event = (_r)->req_cons + 1; \
3495 - mb(); \
3496 - (_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r); \
3497 +#define RING_FINAL_CHECK_FOR_REQUESTS(_r, _work_to_do) do { \
3498 + (_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r); \
3499 + if (_work_to_do) break; \
3500 + (_r)->sring->req_event = (_r)->req_cons + 1; \
3501 + xen_mb(); \
3502 + (_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r); \
3503 } while (0)
3504
3505 -#define RING_FINAL_CHECK_FOR_RESPONSES(_r, _work_to_do) do { \
3506 - (_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r); \
3507 - if (_work_to_do) break; \
3508 - (_r)->sring->rsp_event = (_r)->rsp_cons + 1; \
3509 - mb(); \
3510 - (_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r); \
3511 +#define RING_FINAL_CHECK_FOR_RESPONSES(_r, _work_to_do) do { \
3512 + (_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r); \
3513 + if (_work_to_do) break; \
3514 + (_r)->sring->rsp_event = (_r)->rsp_cons + 1; \
3515 + xen_mb(); \
3516 + (_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r); \
3517 } while (0)
3518
3519 #endif /* __XEN_PUBLIC_IO_RING_H__ */
3520 +
3521 +/*
3522 + * Local variables:
3523 + * mode: C
3524 + * c-set-style: "BSD"
3525 + * c-basic-offset: 4
3526 + * tab-width: 4
3527 + * indent-tabs-mode: nil
3528 + * End:
3529 + */
3530 Index: head-2008-11-25/include/xen/interface/io/xenbus.h
3531 ===================================================================
3532 --- head-2008-11-25.orig/include/xen/interface/io/xenbus.h 2008-11-25 12:33:06.000000000 +0100
3533 +++ head-2008-11-25/include/xen/interface/io/xenbus.h 2008-11-25 12:35:56.000000000 +0100
3534 @@ -3,42 +3,78 @@
3535 *
3536 * Xenbus protocol details.
3537 *
3538 + * Permission is hereby granted, free of charge, to any person obtaining a copy
3539 + * of this software and associated documentation files (the "Software"), to
3540 + * deal in the Software without restriction, including without limitation the
3541 + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
3542 + * sell copies of the Software, and to permit persons to whom the Software is
3543 + * furnished to do so, subject to the following conditions:
3544 + *
3545 + * The above copyright notice and this permission notice shall be included in
3546 + * all copies or substantial portions of the Software.
3547 + *
3548 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
3549 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
3550 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
3551 + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
3552 + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
3553 + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
3554 + * DEALINGS IN THE SOFTWARE.
3555 + *
3556 * Copyright (C) 2005 XenSource Ltd.
3557 */
3558
3559 #ifndef _XEN_PUBLIC_IO_XENBUS_H
3560 #define _XEN_PUBLIC_IO_XENBUS_H
3561
3562 -/* The state of either end of the Xenbus, i.e. the current communication
3563 - status of initialisation across the bus. States here imply nothing about
3564 - the state of the connection between the driver and the kernel's device
3565 - layers. */
3566 -enum xenbus_state
3567 -{
3568 - XenbusStateUnknown = 0,
3569 - XenbusStateInitialising = 1,
3570 - XenbusStateInitWait = 2, /* Finished early
3571 - initialisation, but waiting
3572 - for information from the peer
3573 - or hotplug scripts. */
3574 - XenbusStateInitialised = 3, /* Initialised and waiting for a
3575 - connection from the peer. */
3576 - XenbusStateConnected = 4,
3577 - XenbusStateClosing = 5, /* The device is being closed
3578 - due to an error or an unplug
3579 - event. */
3580 - XenbusStateClosed = 6
3581 +/*
3582 + * The state of either end of the Xenbus, i.e. the current communication
3583 + * status of initialisation across the bus. States here imply nothing about
3584 + * the state of the connection between the driver and the kernel's device
3585 + * layers.
3586 + */
3587 +enum xenbus_state {
3588 + XenbusStateUnknown = 0,
3589 +
3590 + XenbusStateInitialising = 1,
3591 +
3592 + /*
3593 + * InitWait: Finished early initialisation but waiting for information
3594 + * from the peer or hotplug scripts.
3595 + */
3596 + XenbusStateInitWait = 2,
3597 +
3598 + /*
3599 + * Initialised: Waiting for a connection from the peer.
3600 + */
3601 + XenbusStateInitialised = 3,
3602 +
3603 + XenbusStateConnected = 4,
3604 +
3605 + /*
3606 + * Closing: The device is being closed due to an error or an unplug event.
3607 + */
3608 + XenbusStateClosing = 5,
3609 +
3610 + XenbusStateClosed = 6,
3611 +
3612 + /*
3613 + * Reconfiguring: The device is being reconfigured.
3614 + */
3615 + XenbusStateReconfiguring = 7,
3616
3617 + XenbusStateReconfigured = 8
3618 };
3619 +typedef enum xenbus_state XenbusState;
3620
3621 #endif /* _XEN_PUBLIC_IO_XENBUS_H */
3622
3623 /*
3624 * Local variables:
3625 - * c-file-style: "linux"
3626 - * indent-tabs-mode: t
3627 - * c-indent-level: 8
3628 - * c-basic-offset: 8
3629 - * tab-width: 8
3630 + * mode: C
3631 + * c-set-style: "BSD"
3632 + * c-basic-offset: 4
3633 + * tab-width: 4
3634 + * indent-tabs-mode: nil
3635 * End:
3636 */
3637 Index: head-2008-11-25/include/xen/interface/io/xs_wire.h
3638 ===================================================================
3639 --- head-2008-11-25.orig/include/xen/interface/io/xs_wire.h 2008-11-25 12:33:06.000000000 +0100
3640 +++ head-2008-11-25/include/xen/interface/io/xs_wire.h 2008-11-25 12:35:56.000000000 +0100
3641 @@ -1,6 +1,25 @@
3642 /*
3643 * Details of the "wire" protocol between Xen Store Daemon and client
3644 * library or guest kernel.
3645 + *
3646 + * Permission is hereby granted, free of charge, to any person obtaining a copy
3647 + * of this software and associated documentation files (the "Software"), to
3648 + * deal in the Software without restriction, including without limitation the
3649 + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
3650 + * sell copies of the Software, and to permit persons to whom the Software is
3651 + * furnished to do so, subject to the following conditions:
3652 + *
3653 + * The above copyright notice and this permission notice shall be included in
3654 + * all copies or substantial portions of the Software.
3655 + *
3656 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
3657 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
3658 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
3659 + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
3660 + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
3661 + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
3662 + * DEALINGS IN THE SOFTWARE.
3663 + *
3664 * Copyright (C) 2005 Rusty Russell IBM Corporation
3665 */
3666
3667 @@ -26,7 +45,9 @@ enum xsd_sockmsg_type
3668 XS_SET_PERMS,
3669 XS_WATCH_EVENT,
3670 XS_ERROR,
3671 - XS_IS_DOMAIN_INTRODUCED
3672 + XS_IS_DOMAIN_INTRODUCED,
3673 + XS_RESUME,
3674 + XS_SET_TARGET
3675 };
3676
3677 #define XS_WRITE_NONE "NONE"
3678 @@ -40,7 +61,12 @@ struct xsd_errors
3679 const char *errstring;
3680 };
3681 #define XSD_ERROR(x) { x, #x }
3682 -static struct xsd_errors xsd_errors[] __attribute__((unused)) = {
3683 +/* LINTED: static unused */
3684 +static struct xsd_errors xsd_errors[]
3685 +#if defined(__GNUC__)
3686 +__attribute__((unused))
3687 +#endif
3688 + = {
3689 XSD_ERROR(EINVAL),
3690 XSD_ERROR(EACCES),
3691 XSD_ERROR(EEXIST),
3692 @@ -84,4 +110,21 @@ struct xenstore_domain_interface {
3693 XENSTORE_RING_IDX rsp_cons, rsp_prod;
3694 };
3695
3696 +/* Violating this is very bad. See docs/misc/xenstore.txt. */
3697 +#define XENSTORE_PAYLOAD_MAX 4096
3698 +
3699 +/* Violating these just gets you an error back */
3700 +#define XENSTORE_ABS_PATH_MAX 3072
3701 +#define XENSTORE_REL_PATH_MAX 2048
3702 +
3703 #endif /* _XS_WIRE_H */
3704 +
3705 +/*
3706 + * Local variables:
3707 + * mode: C
3708 + * c-set-style: "BSD"
3709 + * c-basic-offset: 4
3710 + * tab-width: 4
3711 + * indent-tabs-mode: nil
3712 + * End:
3713 + */
3714 Index: head-2008-11-25/include/xen/interface/memory.h
3715 ===================================================================
3716 --- head-2008-11-25.orig/include/xen/interface/memory.h 2008-11-25 12:33:06.000000000 +0100
3717 +++ head-2008-11-25/include/xen/interface/memory.h 2008-11-25 12:35:56.000000000 +0100
3718 @@ -3,6 +3,24 @@
3719 *
3720 * Memory reservation and information.
3721 *
3722 + * Permission is hereby granted, free of charge, to any person obtaining a copy
3723 + * of this software and associated documentation files (the "Software"), to
3724 + * deal in the Software without restriction, including without limitation the
3725 + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
3726 + * sell copies of the Software, and to permit persons to whom the Software is
3727 + * furnished to do so, subject to the following conditions:
3728 + *
3729 + * The above copyright notice and this permission notice shall be included in
3730 + * all copies or substantial portions of the Software.
3731 + *
3732 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
3733 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
3734 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
3735 + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
3736 + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
3737 + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
3738 + * DEALINGS IN THE SOFTWARE.
3739 + *
3740 * Copyright (c) 2005, Keir Fraser <keir@xensource.com>
3741 */
3742
3743 @@ -10,13 +28,28 @@
3744 #define __XEN_PUBLIC_MEMORY_H__
3745
3746 /*
3747 - * Increase or decrease the specified domain's memory reservation. Returns a
3748 - * -ve errcode on failure, or the # extents successfully allocated or freed.
3749 + * Increase or decrease the specified domain's memory reservation. Returns the
3750 + * number of extents successfully allocated or freed.
3751 * arg == addr of struct xen_memory_reservation.
3752 */
3753 #define XENMEM_increase_reservation 0
3754 #define XENMEM_decrease_reservation 1
3755 #define XENMEM_populate_physmap 6
3756 +
3757 +#if __XEN_INTERFACE_VERSION__ >= 0x00030209
3758 +/*
3759 + * Maximum # bits addressable by the user of the allocated region (e.g., I/O
3760 + * devices often have a 32-bit limitation even in 64-bit systems). If zero
3761 + * then the user has no addressing restriction. This field is not used by
3762 + * XENMEM_decrease_reservation.
3763 + */
3764 +#define XENMEMF_address_bits(x) (x)
3765 +#define XENMEMF_get_address_bits(x) ((x) & 0xffu)
3766 +/* NUMA node to allocate from. */
3767 +#define XENMEMF_node(x) (((x) + 1) << 8)
3768 +#define XENMEMF_get_node(x) ((((x) >> 8) - 1) & 0xffu)
3769 +#endif
3770 +
3771 struct xen_memory_reservation {
3772
3773 /*
3774 @@ -29,19 +62,18 @@ struct xen_memory_reservation {
3775 * OUT: GMFN bases of extents that were allocated
3776 * (NB. This command also updates the mach_to_phys translation table)
3777 */
3778 - GUEST_HANDLE(ulong) extent_start;
3779 + XEN_GUEST_HANDLE(ulong) extent_start;
3780
3781 /* Number of extents, and size/alignment of each (2^extent_order pages). */
3782 - unsigned long nr_extents;
3783 + xen_ulong_t nr_extents;
3784 unsigned int extent_order;
3785
3786 - /*
3787 - * Maximum # bits addressable by the user of the allocated region (e.g.,
3788 - * I/O devices often have a 32-bit limitation even in 64-bit systems). If
3789 - * zero then the user has no addressing restriction.
3790 - * This field is not used by XENMEM_decrease_reservation.
3791 - */
3792 +#if __XEN_INTERFACE_VERSION__ >= 0x00030209
3793 + /* XENMEMF flags. */
3794 + unsigned int mem_flags;
3795 +#else
3796 unsigned int address_bits;
3797 +#endif
3798
3799 /*
3800 * Domain whose reservation is being changed.
3801 @@ -50,7 +82,51 @@ struct xen_memory_reservation {
3802 domid_t domid;
3803
3804 };
3805 -DEFINE_GUEST_HANDLE_STRUCT(xen_memory_reservation);
3806 +typedef struct xen_memory_reservation xen_memory_reservation_t;
3807 +DEFINE_XEN_GUEST_HANDLE(xen_memory_reservation_t);
3808 +
3809 +/*
3810 + * An atomic exchange of memory pages. If return code is zero then
3811 + * @out.extent_list provides GMFNs of the newly-allocated memory.
3812 + * Returns zero on complete success, otherwise a negative error code.
3813 + * On complete success then always @nr_exchanged == @in.nr_extents.
3814 + * On partial success @nr_exchanged indicates how much work was done.
3815 + */
3816 +#define XENMEM_exchange 11
3817 +struct xen_memory_exchange {
3818 + /*
3819 + * [IN] Details of memory extents to be exchanged (GMFN bases).
3820 + * Note that @in.address_bits is ignored and unused.
3821 + */
3822 + struct xen_memory_reservation in;
3823 +
3824 + /*
3825 + * [IN/OUT] Details of new memory extents.
3826 + * We require that:
3827 + * 1. @in.domid == @out.domid
3828 + * 2. @in.nr_extents << @in.extent_order ==
3829 + * @out.nr_extents << @out.extent_order
3830 + * 3. @in.extent_start and @out.extent_start lists must not overlap
3831 + * 4. @out.extent_start lists GPFN bases to be populated
3832 + * 5. @out.extent_start is overwritten with allocated GMFN bases
3833 + */
3834 + struct xen_memory_reservation out;
3835 +
3836 + /*
3837 + * [OUT] Number of input extents that were successfully exchanged:
3838 + * 1. The first @nr_exchanged input extents were successfully
3839 + * deallocated.
3840 + * 2. The corresponding first entries in the output extent list correctly
3841 + * indicate the GMFNs that were successfully exchanged.
3842 + * 3. All other input and output extents are untouched.
3843 + * 4. If not all input exents are exchanged then the return code of this
3844 + * command will be non-zero.
3845 + * 5. THIS FIELD MUST BE INITIALISED TO ZERO BY THE CALLER!
3846 + */
3847 + xen_ulong_t nr_exchanged;
3848 +};
3849 +typedef struct xen_memory_exchange xen_memory_exchange_t;
3850 +DEFINE_XEN_GUEST_HANDLE(xen_memory_exchange_t);
3851
3852 /*
3853 * Returns the maximum machine frame number of mapped RAM in this system.
3854 @@ -68,6 +144,11 @@ DEFINE_GUEST_HANDLE_STRUCT(xen_memory_re
3855 #define XENMEM_maximum_reservation 4
3856
3857 /*
3858 + * Returns the maximum GPFN in use by the guest, or -ve errcode on failure.
3859 + */
3860 +#define XENMEM_maximum_gpfn 14
3861 +
3862 +/*
3863 * Returns a list of MFN bases of 2MB extents comprising the machine_to_phys
3864 * mapping table. Architectures which do not have a m2p table do not implement
3865 * this command.
3866 @@ -86,7 +167,7 @@ struct xen_machphys_mfn_list {
3867 * any large discontiguities in the machine address space, 2MB gaps in
3868 * the machphys table will be represented by an MFN base of zero.
3869 */
3870 - GUEST_HANDLE(ulong) extent_start;
3871 + XEN_GUEST_HANDLE(xen_pfn_t) extent_start;
3872
3873 /*
3874 * Number of extents written to the above array. This will be smaller
3875 @@ -94,7 +175,22 @@ struct xen_machphys_mfn_list {
3876 */
3877 unsigned int nr_extents;
3878 };
3879 -DEFINE_GUEST_HANDLE_STRUCT(xen_machphys_mfn_list);
3880 +typedef struct xen_machphys_mfn_list xen_machphys_mfn_list_t;
3881 +DEFINE_XEN_GUEST_HANDLE(xen_machphys_mfn_list_t);
3882 +
3883 +/*
3884 + * Returns the location in virtual address space of the machine_to_phys
3885 + * mapping table. Architectures which do not have a m2p table, or which do not
3886 + * map it by default into guest address space, do not implement this command.
3887 + * arg == addr of xen_machphys_mapping_t.
3888 + */
3889 +#define XENMEM_machphys_mapping 12
3890 +struct xen_machphys_mapping {
3891 + xen_ulong_t v_start, v_end; /* Start and end virtual addresses. */
3892 + xen_ulong_t max_mfn; /* Maximum MFN that can be looked up. */
3893 +};
3894 +typedef struct xen_machphys_mapping xen_machphys_mapping_t;
3895 +DEFINE_XEN_GUEST_HANDLE(xen_machphys_mapping_t);
3896
3897 /*
3898 * Sets the GPFN at which a particular page appears in the specified guest's
3899 @@ -109,15 +205,33 @@ struct xen_add_to_physmap {
3900 /* Source mapping space. */
3901 #define XENMAPSPACE_shared_info 0 /* shared info page */
3902 #define XENMAPSPACE_grant_table 1 /* grant table page */
3903 +#define XENMAPSPACE_mfn 2 /* usual MFN */
3904 unsigned int space;
3905
3906 /* Index into source mapping space. */
3907 - unsigned long idx;
3908 + xen_ulong_t idx;
3909
3910 /* GPFN where the source mapping page should appear. */
3911 - unsigned long gpfn;
3912 + xen_pfn_t gpfn;
3913 };
3914 -DEFINE_GUEST_HANDLE_STRUCT(xen_add_to_physmap);
3915 +typedef struct xen_add_to_physmap xen_add_to_physmap_t;
3916 +DEFINE_XEN_GUEST_HANDLE(xen_add_to_physmap_t);
3917 +
3918 +/*
3919 + * Unmaps the page appearing at a particular GPFN from the specified guest's
3920 + * pseudophysical address space.
3921 + * arg == addr of xen_remove_from_physmap_t.
3922 + */
3923 +#define XENMEM_remove_from_physmap 15
3924 +struct xen_remove_from_physmap {
3925 + /* Which domain to change the mapping for. */
3926 + domid_t domid;
3927 +
3928 + /* GPFN of the current mapping of the page. */
3929 + xen_pfn_t gpfn;
3930 +};
3931 +typedef struct xen_remove_from_physmap xen_remove_from_physmap_t;
3932 +DEFINE_XEN_GUEST_HANDLE(xen_remove_from_physmap_t);
3933
3934 /*
3935 * Translates a list of domain-specific GPFNs into MFNs. Returns a -ve error
3936 @@ -129,17 +243,71 @@ struct xen_translate_gpfn_list {
3937 domid_t domid;
3938
3939 /* Length of list. */
3940 - unsigned long nr_gpfns;
3941 + xen_ulong_t nr_gpfns;
3942
3943 /* List of GPFNs to translate. */
3944 - GUEST_HANDLE(ulong) gpfn_list;
3945 + XEN_GUEST_HANDLE(xen_pfn_t) gpfn_list;
3946
3947 /*
3948 * Output list to contain MFN translations. May be the same as the input
3949 * list (in which case each input GPFN is overwritten with the output MFN).
3950 */
3951 - GUEST_HANDLE(ulong) mfn_list;
3952 + XEN_GUEST_HANDLE(xen_pfn_t) mfn_list;
3953 +};
3954 +typedef struct xen_translate_gpfn_list xen_translate_gpfn_list_t;
3955 +DEFINE_XEN_GUEST_HANDLE(xen_translate_gpfn_list_t);
3956 +
3957 +/*
3958 + * Returns the pseudo-physical memory map as it was when the domain
3959 + * was started (specified by XENMEM_set_memory_map).
3960 + * arg == addr of xen_memory_map_t.
3961 + */
3962 +#define XENMEM_memory_map 9
3963 +struct xen_memory_map {
3964 + /*
3965 + * On call the number of entries which can be stored in buffer. On
3966 + * return the number of entries which have been stored in
3967 + * buffer.
3968 + */
3969 + unsigned int nr_entries;
3970 +
3971 + /*
3972 + * Entries in the buffer are in the same format as returned by the
3973 + * BIOS INT 0x15 EAX=0xE820 call.
3974 + */
3975 + XEN_GUEST_HANDLE(void) buffer;
3976 +};
3977 +typedef struct xen_memory_map xen_memory_map_t;
3978 +DEFINE_XEN_GUEST_HANDLE(xen_memory_map_t);
3979 +
3980 +/*
3981 + * Returns the real physical memory map. Passes the same structure as
3982 + * XENMEM_memory_map.
3983 + * arg == addr of xen_memory_map_t.
3984 + */
3985 +#define XENMEM_machine_memory_map 10
3986 +
3987 +/*
3988 + * Set the pseudo-physical memory map of a domain, as returned by
3989 + * XENMEM_memory_map.
3990 + * arg == addr of xen_foreign_memory_map_t.
3991 + */
3992 +#define XENMEM_set_memory_map 13
3993 +struct xen_foreign_memory_map {
3994 + domid_t domid;
3995 + struct xen_memory_map map;
3996 };
3997 -DEFINE_GUEST_HANDLE_STRUCT(xen_translate_gpfn_list);
3998 +typedef struct xen_foreign_memory_map xen_foreign_memory_map_t;
3999 +DEFINE_XEN_GUEST_HANDLE(xen_foreign_memory_map_t);
4000
4001 #endif /* __XEN_PUBLIC_MEMORY_H__ */
4002 +
4003 +/*
4004 + * Local variables:
4005 + * mode: C
4006 + * c-set-style: "BSD"
4007 + * c-basic-offset: 4
4008 + * tab-width: 4
4009 + * indent-tabs-mode: nil
4010 + * End:
4011 + */
4012 Index: head-2008-11-25/include/xen/interface/physdev.h
4013 ===================================================================
4014 --- head-2008-11-25.orig/include/xen/interface/physdev.h 2008-11-25 12:33:06.000000000 +0100
4015 +++ head-2008-11-25/include/xen/interface/physdev.h 2008-11-25 12:35:56.000000000 +0100
4016 @@ -24,7 +24,7 @@
4017 /*
4018 * Prototype for this hypercall is:
4019 * int physdev_op(int cmd, void *args)
4020 - * @cmd == PHYSDEVOP_??? (physdev operation).
4021 + * @cmd == PHYSDEVOP_??? (physdev operation).
4022 * @args == Operation-specific extra arguments (NULL if none).
4023 */
4024
4025 @@ -32,114 +32,188 @@
4026 * Notify end-of-interrupt (EOI) for the specified IRQ.
4027 * @arg == pointer to physdev_eoi structure.
4028 */
4029 -#define PHYSDEVOP_eoi 12
4030 +#define PHYSDEVOP_eoi 12
4031 struct physdev_eoi {
4032 - /* IN */
4033 - uint32_t irq;
4034 + /* IN */
4035 + uint32_t irq;
4036 };
4037 +typedef struct physdev_eoi physdev_eoi_t;
4038 +DEFINE_XEN_GUEST_HANDLE(physdev_eoi_t);
4039
4040 /*
4041 * Query the status of an IRQ line.
4042 * @arg == pointer to physdev_irq_status_query structure.
4043 */
4044 -#define PHYSDEVOP_irq_status_query 5
4045 +#define PHYSDEVOP_irq_status_query 5
4046 struct physdev_irq_status_query {
4047 - /* IN */
4048 - uint32_t irq;
4049 - /* OUT */
4050 - uint32_t flags; /* XENIRQSTAT_* */
4051 + /* IN */
4052 + uint32_t irq;
4053 + /* OUT */
4054 + uint32_t flags; /* XENIRQSTAT_* */
4055 };
4056 +typedef struct physdev_irq_status_query physdev_irq_status_query_t;
4057 +DEFINE_XEN_GUEST_HANDLE(physdev_irq_status_query_t);
4058
4059 /* Need to call PHYSDEVOP_eoi when the IRQ has been serviced? */
4060 -#define _XENIRQSTAT_needs_eoi (0)
4061 -#define XENIRQSTAT_needs_eoi (1U<<_XENIRQSTAT_needs_eoi)
4062 +#define _XENIRQSTAT_needs_eoi (0)
4063 +#define XENIRQSTAT_needs_eoi (1U<<_XENIRQSTAT_needs_eoi)
4064
4065 /* IRQ shared by multiple guests? */
4066 -#define _XENIRQSTAT_shared (1)
4067 -#define XENIRQSTAT_shared (1U<<_XENIRQSTAT_shared)
4068 +#define _XENIRQSTAT_shared (1)
4069 +#define XENIRQSTAT_shared (1U<<_XENIRQSTAT_shared)
4070
4071 /*
4072 * Set the current VCPU's I/O privilege level.
4073 * @arg == pointer to physdev_set_iopl structure.
4074 */
4075 -#define PHYSDEVOP_set_iopl 6
4076 +#define PHYSDEVOP_set_iopl 6
4077 struct physdev_set_iopl {
4078 - /* IN */
4079 - uint32_t iopl;
4080 + /* IN */
4081 + uint32_t iopl;
4082 };
4083 +typedef struct physdev_set_iopl physdev_set_iopl_t;
4084 +DEFINE_XEN_GUEST_HANDLE(physdev_set_iopl_t);
4085
4086 /*
4087 * Set the current VCPU's I/O-port permissions bitmap.
4088 * @arg == pointer to physdev_set_iobitmap structure.
4089 */
4090 -#define PHYSDEVOP_set_iobitmap 7
4091 +#define PHYSDEVOP_set_iobitmap 7
4092 struct physdev_set_iobitmap {
4093 - /* IN */
4094 - uint8_t * bitmap;
4095 - uint32_t nr_ports;
4096 + /* IN */
4097 +#if __XEN_INTERFACE_VERSION__ >= 0x00030205
4098 + XEN_GUEST_HANDLE(uint8) bitmap;
4099 +#else
4100 + uint8_t *bitmap;
4101 +#endif
4102 + uint32_t nr_ports;
4103 };
4104 +typedef struct physdev_set_iobitmap physdev_set_iobitmap_t;
4105 +DEFINE_XEN_GUEST_HANDLE(physdev_set_iobitmap_t);
4106
4107 /*
4108 * Read or write an IO-APIC register.
4109 * @arg == pointer to physdev_apic structure.
4110 */
4111 -#define PHYSDEVOP_apic_read 8
4112 -#define PHYSDEVOP_apic_write 9
4113 +#define PHYSDEVOP_apic_read 8
4114 +#define PHYSDEVOP_apic_write 9
4115 struct physdev_apic {
4116 - /* IN */
4117 - unsigned long apic_physbase;
4118 - uint32_t reg;
4119 - /* IN or OUT */
4120 - uint32_t value;
4121 + /* IN */
4122 + unsigned long apic_physbase;
4123 + uint32_t reg;
4124 + /* IN or OUT */
4125 + uint32_t value;
4126 };
4127 +typedef struct physdev_apic physdev_apic_t;
4128 +DEFINE_XEN_GUEST_HANDLE(physdev_apic_t);
4129
4130 /*
4131 * Allocate or free a physical upcall vector for the specified IRQ line.
4132 * @arg == pointer to physdev_irq structure.
4133 */
4134 -#define PHYSDEVOP_alloc_irq_vector 10
4135 -#define PHYSDEVOP_free_irq_vector 11
4136 +#define PHYSDEVOP_alloc_irq_vector 10
4137 +#define PHYSDEVOP_free_irq_vector 11
4138 struct physdev_irq {
4139 - /* IN */
4140 - uint32_t irq;
4141 - /* IN or OUT */
4142 - uint32_t vector;
4143 + /* IN */
4144 + uint32_t irq;
4145 + /* IN or OUT */
4146 + uint32_t vector;
4147 +};
4148 +typedef struct physdev_irq physdev_irq_t;
4149 +DEFINE_XEN_GUEST_HANDLE(physdev_irq_t);
4150 +
4151 +#define MAP_PIRQ_TYPE_MSI 0x0
4152 +#define MAP_PIRQ_TYPE_GSI 0x1
4153 +#define MAP_PIRQ_TYPE_UNKNOWN 0x2
4154 +
4155 +#define PHYSDEVOP_map_pirq 13
4156 +struct physdev_map_pirq {
4157 + domid_t domid;
4158 + /* IN */
4159 + int type;
4160 + /* IN */
4161 + int index;
4162 + /* IN or OUT */
4163 + int pirq;
4164 + /* IN */
4165 + int bus;
4166 + /* IN */
4167 + int devfn;
4168 + /* IN */
4169 + int entry_nr;
4170 + /* IN */
4171 + uint64_t table_base;
4172 +};
4173 +typedef struct physdev_map_pirq physdev_map_pirq_t;
4174 +DEFINE_XEN_GUEST_HANDLE(physdev_map_pirq_t);
4175 +
4176 +#define PHYSDEVOP_unmap_pirq 14
4177 +struct physdev_unmap_pirq {
4178 + domid_t domid;
4179 + /* IN */
4180 + int pirq;
4181 +};
4182 +
4183 +typedef struct physdev_unmap_pirq physdev_unmap_pirq_t;
4184 +DEFINE_XEN_GUEST_HANDLE(physdev_unmap_pirq_t);
4185 +
4186 +#define PHYSDEVOP_manage_pci_add 15
4187 +#define PHYSDEVOP_manage_pci_remove 16
4188 +struct physdev_manage_pci {
4189 + /* IN */
4190 + uint8_t bus;
4191 + uint8_t devfn;
4192 };
4193
4194 +typedef struct physdev_manage_pci physdev_manage_pci_t;
4195 +DEFINE_XEN_GUEST_HANDLE(physdev_manage_pci_t);
4196 +
4197 /*
4198 * Argument to physdev_op_compat() hypercall. Superceded by new physdev_op()
4199 * hypercall since 0x00030202.
4200 */
4201 struct physdev_op {
4202 - uint32_t cmd;
4203 - union {
4204 - struct physdev_irq_status_query irq_status_query;
4205 - struct physdev_set_iopl set_iopl;
4206 - struct physdev_set_iobitmap set_iobitmap;
4207 - struct physdev_apic apic_op;
4208 - struct physdev_irq irq_op;
4209 - } u;
4210 + uint32_t cmd;
4211 + union {
4212 + struct physdev_irq_status_query irq_status_query;
4213 + struct physdev_set_iopl set_iopl;
4214 + struct physdev_set_iobitmap set_iobitmap;
4215 + struct physdev_apic apic_op;
4216 + struct physdev_irq irq_op;
4217 + } u;
4218 };
4219 +typedef struct physdev_op physdev_op_t;
4220 +DEFINE_XEN_GUEST_HANDLE(physdev_op_t);
4221
4222 /*
4223 * Notify that some PIRQ-bound event channels have been unmasked.
4224 * ** This command is obsolete since interface version 0x00030202 and is **
4225 - * ** unsupported by newer versions of Xen. **
4226 + * ** unsupported by newer versions of Xen. **
4227 */
4228 -#define PHYSDEVOP_IRQ_UNMASK_NOTIFY 4
4229 +#define PHYSDEVOP_IRQ_UNMASK_NOTIFY 4
4230
4231 /*
4232 * These all-capitals physdev operation names are superceded by the new names
4233 * (defined above) since interface version 0x00030202.
4234 */
4235 -#define PHYSDEVOP_IRQ_STATUS_QUERY PHYSDEVOP_irq_status_query
4236 -#define PHYSDEVOP_SET_IOPL PHYSDEVOP_set_iopl
4237 -#define PHYSDEVOP_SET_IOBITMAP PHYSDEVOP_set_iobitmap
4238 -#define PHYSDEVOP_APIC_READ PHYSDEVOP_apic_read
4239 -#define PHYSDEVOP_APIC_WRITE PHYSDEVOP_apic_write
4240 -#define PHYSDEVOP_ASSIGN_VECTOR PHYSDEVOP_alloc_irq_vector
4241 -#define PHYSDEVOP_FREE_VECTOR PHYSDEVOP_free_irq_vector
4242 +#define PHYSDEVOP_IRQ_STATUS_QUERY PHYSDEVOP_irq_status_query
4243 +#define PHYSDEVOP_SET_IOPL PHYSDEVOP_set_iopl
4244 +#define PHYSDEVOP_SET_IOBITMAP PHYSDEVOP_set_iobitmap
4245 +#define PHYSDEVOP_APIC_READ PHYSDEVOP_apic_read
4246 +#define PHYSDEVOP_APIC_WRITE PHYSDEVOP_apic_write
4247 +#define PHYSDEVOP_ASSIGN_VECTOR PHYSDEVOP_alloc_irq_vector
4248 +#define PHYSDEVOP_FREE_VECTOR PHYSDEVOP_free_irq_vector
4249 #define PHYSDEVOP_IRQ_NEEDS_UNMASK_NOTIFY XENIRQSTAT_needs_eoi
4250 -#define PHYSDEVOP_IRQ_SHARED XENIRQSTAT_shared
4251 +#define PHYSDEVOP_IRQ_SHARED XENIRQSTAT_shared
4252
4253 #endif /* __XEN_PUBLIC_PHYSDEV_H__ */
4254 +
4255 +/*
4256 + * Local variables:
4257 + * mode: C
4258 + * c-set-style: "BSD"
4259 + * c-basic-offset: 4
4260 + * tab-width: 4
4261 + * indent-tabs-mode: nil
4262 + * End:
4263 + */
4264 Index: head-2008-11-25/include/xen/interface/sched.h
4265 ===================================================================
4266 --- head-2008-11-25.orig/include/xen/interface/sched.h 2008-11-25 12:33:06.000000000 +0100
4267 +++ head-2008-11-25/include/xen/interface/sched.h 2008-11-25 12:35:56.000000000 +0100
4268 @@ -3,6 +3,24 @@
4269 *
4270 * Scheduler state interactions
4271 *
4272 + * Permission is hereby granted, free of charge, to any person obtaining a copy
4273 + * of this software and associated documentation files (the "Software"), to
4274 + * deal in the Software without restriction, including without limitation the
4275 + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
4276 + * sell copies of the Software, and to permit persons to whom the Software is
4277 + * furnished to do so, subject to the following conditions:
4278 + *
4279 + * The above copyright notice and this permission notice shall be included in
4280 + * all copies or substantial portions of the Software.
4281 + *
4282 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
4283 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
4284 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
4285 + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
4286 + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
4287 + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
4288 + * DEALINGS IN THE SOFTWARE.
4289 + *
4290 * Copyright (c) 2005, Keir Fraser <keir@xensource.com>
4291 */
4292
4293 @@ -13,17 +31,17 @@
4294
4295 /*
4296 * The prototype for this hypercall is:
4297 - * long sched_op_new(int cmd, void *arg)
4298 + * long sched_op(int cmd, void *arg)
4299 * @cmd == SCHEDOP_??? (scheduler operation).
4300 * @arg == Operation-specific extra argument(s), as described below.
4301 *
4302 - * **NOTE**:
4303 - * Versions of Xen prior to 3.0.2 provide only the following legacy version
4304 + * Versions of Xen prior to 3.0.2 provided only the following legacy version
4305 * of this hypercall, supporting only the commands yield, block and shutdown:
4306 * long sched_op(int cmd, unsigned long arg)
4307 * @cmd == SCHEDOP_??? (scheduler operation).
4308 * @arg == 0 (SCHEDOP_yield and SCHEDOP_block)
4309 * == SHUTDOWN_* code (SCHEDOP_shutdown)
4310 + * This legacy version is available to new guests as sched_op_compat().
4311 */
4312
4313 /*
4314 @@ -49,7 +67,8 @@
4315 struct sched_shutdown {
4316 unsigned int reason; /* SHUTDOWN_* */
4317 };
4318 -DEFINE_GUEST_HANDLE_STRUCT(sched_shutdown);
4319 +typedef struct sched_shutdown sched_shutdown_t;
4320 +DEFINE_XEN_GUEST_HANDLE(sched_shutdown_t);
4321
4322 /*
4323 * Poll a set of event-channel ports. Return when one or more are pending. An
4324 @@ -58,11 +77,26 @@ DEFINE_GUEST_HANDLE_STRUCT(sched_shutdow
4325 */
4326 #define SCHEDOP_poll 3
4327 struct sched_poll {
4328 - GUEST_HANDLE(evtchn_port_t) ports;
4329 + XEN_GUEST_HANDLE(evtchn_port_t) ports;
4330 unsigned int nr_ports;
4331 uint64_t timeout;
4332 };
4333 -DEFINE_GUEST_HANDLE_STRUCT(sched_poll);
4334 +typedef struct sched_poll sched_poll_t;
4335 +DEFINE_XEN_GUEST_HANDLE(sched_poll_t);
4336 +
4337 +/*
4338 + * Declare a shutdown for another domain. The main use of this function is
4339 + * in interpreting shutdown requests and reasons for fully-virtualized
4340 + * domains. A para-virtualized domain may use SCHEDOP_shutdown directly.
4341 + * @arg == pointer to sched_remote_shutdown structure.
4342 + */
4343 +#define SCHEDOP_remote_shutdown 4
4344 +struct sched_remote_shutdown {
4345 + domid_t domain_id; /* Remote domain ID */
4346 + unsigned int reason; /* SHUTDOWN_xxx reason */
4347 +};
4348 +typedef struct sched_remote_shutdown sched_remote_shutdown_t;
4349 +DEFINE_XEN_GUEST_HANDLE(sched_remote_shutdown_t);
4350
4351 /*
4352 * Reason codes for SCHEDOP_shutdown. These may be interpreted by control
4353 @@ -75,3 +109,13 @@ DEFINE_GUEST_HANDLE_STRUCT(sched_poll);
4354 #define SHUTDOWN_crash 3 /* Tell controller we've crashed. */
4355
4356 #endif /* __XEN_PUBLIC_SCHED_H__ */
4357 +
4358 +/*
4359 + * Local variables:
4360 + * mode: C
4361 + * c-set-style: "BSD"
4362 + * c-basic-offset: 4
4363 + * tab-width: 4
4364 + * indent-tabs-mode: nil
4365 + * End:
4366 + */
4367 Index: head-2008-11-25/include/xen/interface/vcpu.h
4368 ===================================================================
4369 --- head-2008-11-25.orig/include/xen/interface/vcpu.h 2008-11-25 12:33:06.000000000 +0100
4370 +++ head-2008-11-25/include/xen/interface/vcpu.h 2008-11-25 12:35:56.000000000 +0100
4371 @@ -29,9 +29,9 @@
4372
4373 /*
4374 * Prototype for this hypercall is:
4375 - * int vcpu_op(int cmd, int vcpuid, void *extra_args)
4376 - * @cmd == VCPUOP_??? (VCPU operation).
4377 - * @vcpuid == VCPU to operate on.
4378 + * int vcpu_op(int cmd, int vcpuid, void *extra_args)
4379 + * @cmd == VCPUOP_??? (VCPU operation).
4380 + * @vcpuid == VCPU to operate on.
4381 * @extra_args == Operation-specific extra arguments (NULL if none).
4382 */
4383
4384 @@ -40,52 +40,53 @@
4385 * newly-initialised VCPU will not run until it is brought up by VCPUOP_up.
4386 *
4387 * @extra_arg == pointer to vcpu_guest_context structure containing initial
4388 - * state for the VCPU.
4389 + * state for the VCPU.
4390 */
4391 -#define VCPUOP_initialise 0
4392 +#define VCPUOP_initialise 0
4393
4394 /*
4395 * Bring up a VCPU. This makes the VCPU runnable. This operation will fail
4396 * if the VCPU has not been initialised (VCPUOP_initialise).
4397 */
4398 -#define VCPUOP_up 1
4399 +#define VCPUOP_up 1
4400
4401 /*
4402 * Bring down a VCPU (i.e., make it non-runnable).
4403 * There are a few caveats that callers should observe:
4404 - * 1. This operation may return, and VCPU_is_up may return false, before the
4405 - * VCPU stops running (i.e., the command is asynchronous). It is a good
4406 - * idea to ensure that the VCPU has entered a non-critical loop before
4407 - * bringing it down. Alternatively, this operation is guaranteed
4408 - * synchronous if invoked by the VCPU itself.
4409 - * 2. After a VCPU is initialised, there is currently no way to drop all its
4410 - * references to domain memory. Even a VCPU that is down still holds
4411 - * memory references via its pagetable base pointer and GDT. It is good
4412 - * practise to move a VCPU onto an 'idle' or default page table, LDT and
4413 - * GDT before bringing it down.
4414 + * 1. This operation may return, and VCPU_is_up may return false, before the
4415 + * VCPU stops running (i.e., the command is asynchronous). It is a good
4416 + * idea to ensure that the VCPU has entered a non-critical loop before
4417 + * bringing it down. Alternatively, this operation is guaranteed
4418 + * synchronous if invoked by the VCPU itself.
4419 + * 2. After a VCPU is initialised, there is currently no way to drop all its
4420 + * references to domain memory. Even a VCPU that is down still holds
4421 + * memory references via its pagetable base pointer and GDT. It is good
4422 + * practise to move a VCPU onto an 'idle' or default page table, LDT and
4423 + * GDT before bringing it down.
4424 */
4425 -#define VCPUOP_down 2
4426 +#define VCPUOP_down 2
4427
4428 /* Returns 1 if the given VCPU is up. */
4429 -#define VCPUOP_is_up 3
4430 +#define VCPUOP_is_up 3
4431
4432 /*
4433 * Return information about the state and running time of a VCPU.
4434 * @extra_arg == pointer to vcpu_runstate_info structure.
4435 */
4436 -#define VCPUOP_get_runstate_info 4
4437 +#define VCPUOP_get_runstate_info 4
4438 struct vcpu_runstate_info {
4439 - /* VCPU's current state (RUNSTATE_*). */
4440 - int state;
4441 - /* When was current state entered (system time, ns)? */
4442 - uint64_t state_entry_time;
4443 - /*
4444 - * Time spent in each RUNSTATE_* (ns). The sum of these times is
4445 - * guaranteed not to drift from system time.
4446 - */
4447 - uint64_t time[4];
4448 + /* VCPU's current state (RUNSTATE_*). */
4449 + int state;
4450 + /* When was current state entered (system time, ns)? */
4451 + uint64_t state_entry_time;
4452 + /*
4453 + * Time spent in each RUNSTATE_* (ns). The sum of these times is
4454 + * guaranteed not to drift from system time.
4455 + */
4456 + uint64_t time[4];
4457 };
4458 -DEFINE_GUEST_HANDLE_STRUCT(vcpu_runstate_info);
4459 +typedef struct vcpu_runstate_info vcpu_runstate_info_t;
4460 +DEFINE_XEN_GUEST_HANDLE(vcpu_runstate_info_t);
4461
4462 /* VCPU is currently running on a physical CPU. */
4463 #define RUNSTATE_running 0
4464 @@ -108,47 +109,52 @@ DEFINE_GUEST_HANDLE_STRUCT(vcpu_runstate
4465 * Register a shared memory area from which the guest may obtain its own
4466 * runstate information without needing to execute a hypercall.
4467 * Notes:
4468 - * 1. The registered address may be virtual or physical, depending on the
4469 - * platform. The virtual address should be registered on x86 systems.
4470 - * 2. Only one shared area may be registered per VCPU. The shared area is
4471 - * updated by the hypervisor each time the VCPU is scheduled. Thus
4472 - * runstate.state will always be RUNSTATE_running and
4473 - * runstate.state_entry_time will indicate the system time at which the
4474 - * VCPU was last scheduled to run.
4475 + * 1. The registered address may be virtual or physical or guest handle,
4476 + * depending on the platform. Virtual address or guest handle should be
4477 + * registered on x86 systems.
4478 + * 2. Only one shared area may be registered per VCPU. The shared area is
4479 + * updated by the hypervisor each time the VCPU is scheduled. Thus
4480 + * runstate.state will always be RUNSTATE_running and
4481 + * runstate.state_entry_time will indicate the system time at which the
4482 + * VCPU was last scheduled to run.
4483 * @extra_arg == pointer to vcpu_register_runstate_memory_area structure.
4484 */
4485 #define VCPUOP_register_runstate_memory_area 5
4486 struct vcpu_register_runstate_memory_area {
4487 - union {
4488 - GUEST_HANDLE(vcpu_runstate_info) h;
4489 - struct vcpu_runstate_info *v;
4490 - uint64_t p;
4491 - } addr;
4492 + union {
4493 + XEN_GUEST_HANDLE(vcpu_runstate_info_t) h;
4494 + struct vcpu_runstate_info *v;
4495 + uint64_t p;
4496 + } addr;
4497 };
4498 +typedef struct vcpu_register_runstate_memory_area vcpu_register_runstate_memory_area_t;
4499 +DEFINE_XEN_GUEST_HANDLE(vcpu_register_runstate_memory_area_t);
4500
4501 /*
4502 * Set or stop a VCPU's periodic timer. Every VCPU has one periodic timer
4503 * which can be set via these commands. Periods smaller than one millisecond
4504 * may not be supported.
4505 */
4506 -#define VCPUOP_set_periodic_timer 6 /* arg == vcpu_set_periodic_timer_t */
4507 -#define VCPUOP_stop_periodic_timer 7 /* arg == NULL */
4508 +#define VCPUOP_set_periodic_timer 6 /* arg == vcpu_set_periodic_timer_t */
4509 +#define VCPUOP_stop_periodic_timer 7 /* arg == NULL */
4510 struct vcpu_set_periodic_timer {
4511 - uint64_t period_ns;
4512 + uint64_t period_ns;
4513 };
4514 -DEFINE_GUEST_HANDLE_STRUCT(vcpu_set_periodic_timer);
4515 +typedef struct vcpu_set_periodic_timer vcpu_set_periodic_timer_t;
4516 +DEFINE_XEN_GUEST_HANDLE(vcpu_set_periodic_timer_t);
4517
4518 /*
4519 * Set or stop a VCPU's single-shot timer. Every VCPU has one single-shot
4520 * timer which can be set via these commands.
4521 */
4522 -#define VCPUOP_set_singleshot_timer 8 /* arg == vcpu_set_singleshot_timer_t */
4523 +#define VCPUOP_set_singleshot_timer 8 /* arg == vcpu_set_singleshot_timer_t */
4524 #define VCPUOP_stop_singleshot_timer 9 /* arg == NULL */
4525 struct vcpu_set_singleshot_timer {
4526 - uint64_t timeout_abs_ns;
4527 - uint32_t flags; /* VCPU_SSHOTTMR_??? */
4528 + uint64_t timeout_abs_ns; /* Absolute system time value in nanoseconds. */
4529 + uint32_t flags; /* VCPU_SSHOTTMR_??? */
4530 };
4531 -DEFINE_GUEST_HANDLE_STRUCT(vcpu_set_singleshot_timer);
4532 +typedef struct vcpu_set_singleshot_timer vcpu_set_singleshot_timer_t;
4533 +DEFINE_XEN_GUEST_HANDLE(vcpu_set_singleshot_timer_t);
4534
4535 /* Flags to VCPUOP_set_singleshot_timer. */
4536 /* Require the timeout to be in the future (return -ETIME if it's passed). */
4537 @@ -161,13 +167,47 @@ DEFINE_GUEST_HANDLE_STRUCT(vcpu_set_sing
4538 * structure in a convenient place, such as in a per-cpu data area.
4539 * The pointer need not be page aligned, but the structure must not
4540 * cross a page boundary.
4541 + *
4542 + * This may be called only once per vcpu.
4543 */
4544 -#define VCPUOP_register_vcpu_info 10 /* arg == struct vcpu_info */
4545 +#define VCPUOP_register_vcpu_info 10 /* arg == vcpu_register_vcpu_info_t */
4546 struct vcpu_register_vcpu_info {
4547 uint64_t mfn; /* mfn of page to place vcpu_info */
4548 uint32_t offset; /* offset within page */
4549 uint32_t rsvd; /* unused */
4550 };
4551 -DEFINE_GUEST_HANDLE_STRUCT(vcpu_register_vcpu_info);
4552 +typedef struct vcpu_register_vcpu_info vcpu_register_vcpu_info_t;
4553 +DEFINE_XEN_GUEST_HANDLE(vcpu_register_vcpu_info_t);
4554 +
4555 +/* Send an NMI to the specified VCPU. @extra_arg == NULL. */
4556 +#define VCPUOP_send_nmi 11
4557 +
4558 +/*
4559 + * Get the physical ID information for a pinned vcpu's underlying physical
4560 + * processor. The physical ID informmation is architecture-specific.
4561 + * On x86: id[31:0]=apic_id, id[63:32]=acpi_id, and all values 0xff and
4562 + * greater are reserved.
4563 + * This command returns -EINVAL if it is not a valid operation for this VCPU.
4564 + */
4565 +#define VCPUOP_get_physid 12 /* arg == vcpu_get_physid_t */
4566 +struct vcpu_get_physid {
4567 + uint64_t phys_id;
4568 +};
4569 +typedef struct vcpu_get_physid vcpu_get_physid_t;
4570 +DEFINE_XEN_GUEST_HANDLE(vcpu_get_physid_t);
4571 +#define xen_vcpu_physid_to_x86_apicid(physid) \
4572 + ((((uint32_t)(physid)) >= 0xff) ? 0xff : ((uint8_t)(physid)))
4573 +#define xen_vcpu_physid_to_x86_acpiid(physid) \
4574 + ((((uint32_t)((physid)>>32)) >= 0xff) ? 0xff : ((uint8_t)((physid)>>32)))
4575
4576 #endif /* __XEN_PUBLIC_VCPU_H__ */
4577 +
4578 +/*
4579 + * Local variables:
4580 + * mode: C
4581 + * c-set-style: "BSD"
4582 + * c-basic-offset: 4
4583 + * tab-width: 4
4584 + * indent-tabs-mode: nil
4585 + * End:
4586 + */
4587 Index: head-2008-11-25/include/xen/interface/version.h
4588 ===================================================================
4589 --- head-2008-11-25.orig/include/xen/interface/version.h 2008-11-25 12:33:06.000000000 +0100
4590 +++ head-2008-11-25/include/xen/interface/version.h 2008-11-25 12:35:56.000000000 +0100
4591 @@ -3,6 +3,24 @@
4592 *
4593 * Xen version, type, and compile information.
4594 *
4595 + * Permission is hereby granted, free of charge, to any person obtaining a copy
4596 + * of this software and associated documentation files (the "Software"), to
4597 + * deal in the Software without restriction, including without limitation the
4598 + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
4599 + * sell copies of the Software, and to permit persons to whom the Software is
4600 + * furnished to do so, subject to the following conditions:
4601 + *
4602 + * The above copyright notice and this permission notice shall be included in
4603 + * all copies or substantial portions of the Software.
4604 + *
4605 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
4606 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
4607 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
4608 + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
4609 + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
4610 + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
4611 + * DEALINGS IN THE SOFTWARE.
4612 + *
4613 * Copyright (c) 2005, Nguyen Anh Quynh <aquynh@gmail.com>
4614 * Copyright (c) 2005, Keir Fraser <keir@xensource.com>
4615 */
4616 @@ -10,17 +28,15 @@
4617 #ifndef __XEN_PUBLIC_VERSION_H__
4618 #define __XEN_PUBLIC_VERSION_H__
4619
4620 -/* NB. All ops return zero on success, except XENVER_version. */
4621 +/* NB. All ops return zero on success, except XENVER_{version,pagesize} */
4622
4623 /* arg == NULL; returns major:minor (16:16). */
4624 #define XENVER_version 0
4625
4626 /* arg == xen_extraversion_t. */
4627 #define XENVER_extraversion 1
4628 -struct xen_extraversion {
4629 - char extraversion[16];
4630 -};
4631 -#define XEN_EXTRAVERSION_LEN (sizeof(struct xen_extraversion))
4632 +typedef char xen_extraversion_t[16];
4633 +#define XEN_EXTRAVERSION_LEN (sizeof(xen_extraversion_t))
4634
4635 /* arg == xen_compile_info_t. */
4636 #define XENVER_compile_info 2
4637 @@ -30,31 +46,46 @@ struct xen_compile_info {
4638 char compile_domain[32];
4639 char compile_date[32];
4640 };
4641 +typedef struct xen_compile_info xen_compile_info_t;
4642
4643 #define XENVER_capabilities 3
4644 -struct xen_capabilities_info {
4645 - char info[1024];
4646 -};
4647 -#define XEN_CAPABILITIES_INFO_LEN (sizeof(struct xen_capabilities_info))
4648 +typedef char xen_capabilities_info_t[1024];
4649 +#define XEN_CAPABILITIES_INFO_LEN (sizeof(xen_capabilities_info_t))
4650
4651 #define XENVER_changeset 4
4652 -struct xen_changeset_info {
4653 - char info[64];
4654 -};
4655 -#define XEN_CHANGESET_INFO_LEN (sizeof(struct xen_changeset_info))
4656 +typedef char xen_changeset_info_t[64];
4657 +#define XEN_CHANGESET_INFO_LEN (sizeof(xen_changeset_info_t))
4658
4659 #define XENVER_platform_parameters 5
4660 struct xen_platform_parameters {
4661 unsigned long virt_start;
4662 };
4663 +typedef struct xen_platform_parameters xen_platform_parameters_t;
4664
4665 #define XENVER_get_features 6
4666 struct xen_feature_info {
4667 unsigned int submap_idx; /* IN: which 32-bit submap to return */
4668 uint32_t submap; /* OUT: 32-bit submap */
4669 };
4670 +typedef struct xen_feature_info xen_feature_info_t;
4671
4672 /* Declares the features reported by XENVER_get_features. */
4673 #include "features.h"
4674
4675 +/* arg == NULL; returns host memory page size. */
4676 +#define XENVER_pagesize 7
4677 +
4678 +/* arg == xen_domain_handle_t. */
4679 +#define XENVER_guest_handle 8
4680 +
4681 #endif /* __XEN_PUBLIC_VERSION_H__ */
4682 +
4683 +/*
4684 + * Local variables:
4685 + * mode: C
4686 + * c-set-style: "BSD"
4687 + * c-basic-offset: 4
4688 + * tab-width: 4
4689 + * indent-tabs-mode: nil
4690 + * End:
4691 + */
4692 Index: head-2008-11-25/include/xen/interface/xen.h
4693 ===================================================================
4694 --- head-2008-11-25.orig/include/xen/interface/xen.h 2008-11-25 12:33:06.000000000 +0100
4695 +++ head-2008-11-25/include/xen/interface/xen.h 2008-11-25 12:35:56.000000000 +0100
4696 @@ -3,35 +3,68 @@
4697 *
4698 * Guest OS interface to Xen.
4699 *
4700 + * Permission is hereby granted, free of charge, to any person obtaining a copy
4701 + * of this software and associated documentation files (the "Software"), to
4702 + * deal in the Software without restriction, including without limitation the
4703 + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
4704 + * sell copies of the Software, and to permit persons to whom the Software is
4705 + * furnished to do so, subject to the following conditions:
4706 + *
4707 + * The above copyright notice and this permission notice shall be included in
4708 + * all copies or substantial portions of the Software.
4709 + *
4710 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
4711 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
4712 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
4713 + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
4714 + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
4715 + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
4716 + * DEALINGS IN THE SOFTWARE.
4717 + *
4718 * Copyright (c) 2004, K A Fraser
4719 */
4720
4721 #ifndef __XEN_PUBLIC_XEN_H__
4722 #define __XEN_PUBLIC_XEN_H__
4723
4724 -#include <asm/xen/interface.h>
4725 +#include "xen-compat.h"
4726 +#ifdef CONFIG_PARAVIRT_XEN
4727 #include <asm/pvclock-abi.h>
4728 +#endif
4729
4730 -/*
4731 - * XEN "SYSTEM CALLS" (a.k.a. HYPERCALLS).
4732 - */
4733 +#if defined(__i386__) || defined(__x86_64__)
4734 +#include "arch-x86/xen.h"
4735 +#elif defined(__ia64__)
4736 +#include "arch-ia64.h"
4737 +#else
4738 +#error "Unsupported architecture"
4739 +#endif
4740 +
4741 +#ifndef __ASSEMBLY__
4742 +/* Guest handles for primitive C types. */
4743 +DEFINE_XEN_GUEST_HANDLE(char);
4744 +__DEFINE_XEN_GUEST_HANDLE(uchar, unsigned char);
4745 +DEFINE_XEN_GUEST_HANDLE(int);
4746 +__DEFINE_XEN_GUEST_HANDLE(uint, unsigned int);
4747 +DEFINE_XEN_GUEST_HANDLE(long);
4748 +__DEFINE_XEN_GUEST_HANDLE(ulong, unsigned long);
4749 +DEFINE_XEN_GUEST_HANDLE(void);
4750 +
4751 +DEFINE_XEN_GUEST_HANDLE(xen_pfn_t);
4752 +#endif
4753
4754 /*
4755 - * x86_32: EAX = vector; EBX, ECX, EDX, ESI, EDI = args 1, 2, 3, 4, 5.
4756 - * EAX = return value
4757 - * (argument registers may be clobbered on return)
4758 - * x86_64: RAX = vector; RDI, RSI, RDX, R10, R8, R9 = args 1, 2, 3, 4, 5, 6.
4759 - * RAX = return value
4760 - * (argument registers not clobbered on return; RCX, R11 are)
4761 + * HYPERCALLS
4762 */
4763 +
4764 #define __HYPERVISOR_set_trap_table 0
4765 #define __HYPERVISOR_mmu_update 1
4766 #define __HYPERVISOR_set_gdt 2
4767 #define __HYPERVISOR_stack_switch 3
4768 #define __HYPERVISOR_set_callbacks 4
4769 #define __HYPERVISOR_fpu_taskswitch 5
4770 -#define __HYPERVISOR_sched_op 6
4771 -#define __HYPERVISOR_dom0_op 7
4772 +#define __HYPERVISOR_sched_op_compat 6 /* compat since 0x00030101 */
4773 +#define __HYPERVISOR_platform_op 7
4774 #define __HYPERVISOR_set_debugreg 8
4775 #define __HYPERVISOR_get_debugreg 9
4776 #define __HYPERVISOR_update_descriptor 10
4777 @@ -39,10 +72,10 @@
4778 #define __HYPERVISOR_multicall 13
4779 #define __HYPERVISOR_update_va_mapping 14
4780 #define __HYPERVISOR_set_timer_op 15
4781 -#define __HYPERVISOR_event_channel_op_compat 16
4782 +#define __HYPERVISOR_event_channel_op_compat 16 /* compat since 0x00030202 */
4783 #define __HYPERVISOR_xen_version 17
4784 #define __HYPERVISOR_console_io 18
4785 -#define __HYPERVISOR_physdev_op_compat 19
4786 +#define __HYPERVISOR_physdev_op_compat 19 /* compat since 0x00030202 */
4787 #define __HYPERVISOR_grant_table_op 20
4788 #define __HYPERVISOR_vm_assist 21
4789 #define __HYPERVISOR_update_va_mapping_otherdomain 22
4790 @@ -50,7 +83,7 @@
4791 #define __HYPERVISOR_vcpu_op 24
4792 #define __HYPERVISOR_set_segment_base 25 /* x86/64 only */
4793 #define __HYPERVISOR_mmuext_op 26
4794 -#define __HYPERVISOR_acm_op 27
4795 +#define __HYPERVISOR_xsm_op 27
4796 #define __HYPERVISOR_nmi_op 28
4797 #define __HYPERVISOR_sched_op_new 29
4798 #define __HYPERVISOR_callback_op 30
4799 @@ -58,6 +91,9 @@
4800 #define __HYPERVISOR_event_channel_op 32
4801 #define __HYPERVISOR_physdev_op 33
4802 #define __HYPERVISOR_hvm_op 34
4803 +#define __HYPERVISOR_sysctl 35
4804 +#define __HYPERVISOR_domctl 36
4805 +#define __HYPERVISOR_kexec_op 37
4806
4807 /* Architecture-specific hypercall definitions. */
4808 #define __HYPERVISOR_arch_0 48
4809 @@ -70,15 +106,48 @@
4810 #define __HYPERVISOR_arch_7 55
4811
4812 /*
4813 + * HYPERCALL COMPATIBILITY.
4814 + */
4815 +
4816 +/* New sched_op hypercall introduced in 0x00030101. */
4817 +#if __XEN_INTERFACE_VERSION__ < 0x00030101
4818 +#undef __HYPERVISOR_sched_op
4819 +#define __HYPERVISOR_sched_op __HYPERVISOR_sched_op_compat
4820 +#else
4821 +#define __HYPERVISOR_sched_op __HYPERVISOR_sched_op_new
4822 +#endif
4823 +
4824 +/* New event-channel and physdev hypercalls introduced in 0x00030202. */
4825 +#if __XEN_INTERFACE_VERSION__ < 0x00030202
4826 +#undef __HYPERVISOR_event_channel_op
4827 +#define __HYPERVISOR_event_channel_op __HYPERVISOR_event_channel_op_compat
4828 +#undef __HYPERVISOR_physdev_op
4829 +#define __HYPERVISOR_physdev_op __HYPERVISOR_physdev_op_compat
4830 +#endif
4831 +
4832 +/* New platform_op hypercall introduced in 0x00030204. */
4833 +#if __XEN_INTERFACE_VERSION__ < 0x00030204
4834 +#define __HYPERVISOR_dom0_op __HYPERVISOR_platform_op
4835 +#endif
4836 +
4837 +/*
4838 * VIRTUAL INTERRUPTS
4839 *
4840 * Virtual interrupts that a guest OS may receive from Xen.
4841 - */
4842 -#define VIRQ_TIMER 0 /* Timebase update, and/or requested timeout. */
4843 -#define VIRQ_DEBUG 1 /* Request guest to dump debug info. */
4844 -#define VIRQ_CONSOLE 2 /* (DOM0) Bytes received on emergency console. */
4845 -#define VIRQ_DOM_EXC 3 /* (DOM0) Exceptional event for some domain. */
4846 -#define VIRQ_DEBUGGER 6 /* (DOM0) A domain has paused for debugging. */
4847 + *
4848 + * In the side comments, 'V.' denotes a per-VCPU VIRQ while 'G.' denotes a
4849 + * global VIRQ. The former can be bound once per VCPU and cannot be re-bound.
4850 + * The latter can be allocated only once per guest: they must initially be
4851 + * allocated to VCPU0 but can subsequently be re-bound.
4852 + */
4853 +#define VIRQ_TIMER 0 /* V. Timebase update, and/or requested timeout. */
4854 +#define VIRQ_DEBUG 1 /* V. Request guest to dump debug info. */
4855 +#define VIRQ_CONSOLE 2 /* G. (DOM0) Bytes received on emergency console. */
4856 +#define VIRQ_DOM_EXC 3 /* G. (DOM0) Exceptional event for some domain. */
4857 +#define VIRQ_TBUF 4 /* G. (DOM0) Trace buffer has records available. */
4858 +#define VIRQ_DEBUGGER 6 /* G. (DOM0) A domain has paused for debugging. */
4859 +#define VIRQ_XENOPROF 7 /* V. XenOprofile interrupt: new sample available */
4860 +#define VIRQ_CON_RING 8 /* G. (DOM0) Bytes received on console */
4861
4862 /* Architecture-specific VIRQ definitions. */
4863 #define VIRQ_ARCH_0 16
4864 @@ -91,6 +158,7 @@
4865 #define VIRQ_ARCH_7 23
4866
4867 #define NR_VIRQS 24
4868 +
4869 /*
4870 * MMU-UPDATE REQUESTS
4871 *
4872 @@ -166,6 +234,13 @@
4873 * cmd: MMUEXT_SET_LDT
4874 * linear_addr: Linear address of LDT base (NB. must be page-aligned).
4875 * nr_ents: Number of entries in LDT.
4876 + *
4877 + * cmd: MMUEXT_CLEAR_PAGE
4878 + * mfn: Machine frame number to be cleared.
4879 + *
4880 + * cmd: MMUEXT_COPY_PAGE
4881 + * mfn: Machine frame number of the destination page.
4882 + * src_mfn: Machine frame number of the source page.
4883 */
4884 #define MMUEXT_PIN_L1_TABLE 0
4885 #define MMUEXT_PIN_L2_TABLE 1
4886 @@ -182,24 +257,34 @@
4887 #define MMUEXT_FLUSH_CACHE 12
4888 #define MMUEXT_SET_LDT 13
4889 #define MMUEXT_NEW_USER_BASEPTR 15
4890 +#define MMUEXT_CLEAR_PAGE 16
4891 +#define MMUEXT_COPY_PAGE 17
4892
4893 #ifndef __ASSEMBLY__
4894 struct mmuext_op {
4895 - unsigned int cmd;
4896 - union {
4897 - /* [UN]PIN_TABLE, NEW_BASEPTR, NEW_USER_BASEPTR */
4898 - unsigned long mfn;
4899 - /* INVLPG_LOCAL, INVLPG_ALL, SET_LDT */
4900 - unsigned long linear_addr;
4901 - } arg1;
4902 - union {
4903 - /* SET_LDT */
4904 - unsigned int nr_ents;
4905 - /* TLB_FLUSH_MULTI, INVLPG_MULTI */
4906 - void *vcpumask;
4907 - } arg2;
4908 + unsigned int cmd;
4909 + union {
4910 + /* [UN]PIN_TABLE, NEW_BASEPTR, NEW_USER_BASEPTR
4911 + * CLEAR_PAGE, COPY_PAGE */
4912 + xen_pfn_t mfn;
4913 + /* INVLPG_LOCAL, INVLPG_ALL, SET_LDT */
4914 + unsigned long linear_addr;
4915 + } arg1;
4916 + union {
4917 + /* SET_LDT */
4918 + unsigned int nr_ents;
4919 + /* TLB_FLUSH_MULTI, INVLPG_MULTI */
4920 +#if __XEN_INTERFACE_VERSION__ >= 0x00030205
4921 + XEN_GUEST_HANDLE(void) vcpumask;
4922 +#else
4923 + void *vcpumask;
4924 +#endif
4925 + /* COPY_PAGE */
4926 + xen_pfn_t src_mfn;
4927 + } arg2;
4928 };
4929 -DEFINE_GUEST_HANDLE_STRUCT(mmuext_op);
4930 +typedef struct mmuext_op mmuext_op_t;
4931 +DEFINE_XEN_GUEST_HANDLE(mmuext_op_t);
4932 #endif
4933
4934 /* These are passed as 'flags' to update_va_mapping. They can be ORed. */
4935 @@ -224,11 +309,24 @@ DEFINE_GUEST_HANDLE_STRUCT(mmuext_op);
4936 */
4937 #define VMASST_CMD_enable 0
4938 #define VMASST_CMD_disable 1
4939 +
4940 +/* x86/32 guests: simulate full 4GB segment limits. */
4941 #define VMASST_TYPE_4gb_segments 0
4942 +
4943 +/* x86/32 guests: trap (vector 15) whenever above vmassist is used. */
4944 #define VMASST_TYPE_4gb_segments_notify 1
4945 +
4946 +/*
4947 + * x86 guests: support writes to bottom-level PTEs.
4948 + * NB1. Page-directory entries cannot be written.
4949 + * NB2. Guest must continue to remove all writable mappings of PTEs.
4950 + */
4951 #define VMASST_TYPE_writable_pagetables 2
4952 +
4953 +/* x86/PAE guests: support PDPTs above 4GB. */
4954 #define VMASST_TYPE_pae_extended_cr3 3
4955 -#define MAX_VMASST_TYPE 3
4956 +
4957 +#define MAX_VMASST_TYPE 3
4958
4959 #ifndef __ASSEMBLY__
4960
4961 @@ -267,18 +365,19 @@ struct mmu_update {
4962 uint64_t ptr; /* Machine address of PTE. */
4963 uint64_t val; /* New contents of PTE. */
4964 };
4965 -DEFINE_GUEST_HANDLE_STRUCT(mmu_update);
4966 +typedef struct mmu_update mmu_update_t;
4967 +DEFINE_XEN_GUEST_HANDLE(mmu_update_t);
4968
4969 /*
4970 * Send an array of these to HYPERVISOR_multicall().
4971 * NB. The fields are natural register size for this architecture.
4972 */
4973 struct multicall_entry {
4974 - unsigned long op;
4975 - long result;
4976 + unsigned long op, result;
4977 unsigned long args[6];
4978 };
4979 -DEFINE_GUEST_HANDLE_STRUCT(multicall_entry);
4980 +typedef struct multicall_entry multicall_entry_t;
4981 +DEFINE_XEN_GUEST_HANDLE(multicall_entry_t);
4982
4983 /*
4984 * Event channel endpoints per domain:
4985 @@ -287,173 +386,240 @@ DEFINE_GUEST_HANDLE_STRUCT(multicall_ent
4986 #define NR_EVENT_CHANNELS (sizeof(unsigned long) * sizeof(unsigned long) * 64)
4987
4988 struct vcpu_time_info {
4989 - /*
4990 - * Updates to the following values are preceded and followed
4991 - * by an increment of 'version'. The guest can therefore
4992 - * detect updates by looking for changes to 'version'. If the
4993 - * least-significant bit of the version number is set then an
4994 - * update is in progress and the guest must wait to read a
4995 - * consistent set of values. The correct way to interact with
4996 - * the version number is similar to Linux's seqlock: see the
4997 - * implementations of read_seqbegin/read_seqretry.
4998 - */
4999 - uint32_t version;
5000 - uint32_t pad0;
5001 - uint64_t tsc_timestamp; /* TSC at last update of time vals. */
5002 - uint64_t system_time; /* Time, in nanosecs, since boot. */
5003 - /*
5004 - * Current system time:
5005 - * system_time + ((tsc - tsc_timestamp) << tsc_shift) * tsc_to_system_mul
5006 - * CPU frequency (Hz):
5007 - * ((10^9 << 32) / tsc_to_system_mul) >> tsc_shift
5008 - */
5009 - uint32_t tsc_to_system_mul;
5010 - int8_t tsc_shift;
5011 - int8_t pad1[3];
5012 + /*
5013 + * Updates to the following values are preceded and followed by an
5014 + * increment of 'version'. The guest can therefore detect updates by
5015 + * looking for changes to 'version'. If the least-significant bit of
5016 + * the version number is set then an update is in progress and the guest
5017 + * must wait to read a consistent set of values.
5018 + * The correct way to interact with the version number is similar to
5019 + * Linux's seqlock: see the implementations of read_seqbegin/read_seqretry.
5020 + */
5021 + uint32_t version;
5022 + uint32_t pad0;
5023 + uint64_t tsc_timestamp; /* TSC at last update of time vals. */
5024 + uint64_t system_time; /* Time, in nanosecs, since boot. */
5025 + /*
5026 + * Current system time:
5027 + * system_time +
5028 + * ((((tsc - tsc_timestamp) << tsc_shift) * tsc_to_system_mul) >> 32)
5029 + * CPU frequency (Hz):
5030 + * ((10^9 << 32) / tsc_to_system_mul) >> tsc_shift
5031 + */
5032 + uint32_t tsc_to_system_mul;
5033 + int8_t tsc_shift;
5034 + int8_t pad1[3];
5035 }; /* 32 bytes */
5036 +typedef struct vcpu_time_info vcpu_time_info_t;
5037
5038 struct vcpu_info {
5039 - /*
5040 - * 'evtchn_upcall_pending' is written non-zero by Xen to indicate
5041 - * a pending notification for a particular VCPU. It is then cleared
5042 - * by the guest OS /before/ checking for pending work, thus avoiding
5043 - * a set-and-check race. Note that the mask is only accessed by Xen
5044 - * on the CPU that is currently hosting the VCPU. This means that the
5045 - * pending and mask flags can be updated by the guest without special
5046 - * synchronisation (i.e., no need for the x86 LOCK prefix).
5047 - * This may seem suboptimal because if the pending flag is set by
5048 - * a different CPU then an IPI may be scheduled even when the mask
5049 - * is set. However, note:
5050 - * 1. The task of 'interrupt holdoff' is covered by the per-event-
5051 - * channel mask bits. A 'noisy' event that is continually being
5052 - * triggered can be masked at source at this very precise
5053 - * granularity.
5054 - * 2. The main purpose of the per-VCPU mask is therefore to restrict
5055 - * reentrant execution: whether for concurrency control, or to
5056 - * prevent unbounded stack usage. Whatever the purpose, we expect
5057 - * that the mask will be asserted only for short periods at a time,
5058 - * and so the likelihood of a 'spurious' IPI is suitably small.
5059 - * The mask is read before making an event upcall to the guest: a
5060 - * non-zero mask therefore guarantees that the VCPU will not receive
5061 - * an upcall activation. The mask is cleared when the VCPU requests
5062 - * to block: this avoids wakeup-waiting races.
5063 - */
5064 - uint8_t evtchn_upcall_pending;
5065 - uint8_t evtchn_upcall_mask;
5066 - unsigned long evtchn_pending_sel;
5067 - struct arch_vcpu_info arch;
5068 - struct pvclock_vcpu_time_info time;
5069 + /*
5070 + * 'evtchn_upcall_pending' is written non-zero by Xen to indicate
5071 + * a pending notification for a particular VCPU. It is then cleared
5072 + * by the guest OS /before/ checking for pending work, thus avoiding
5073 + * a set-and-check race. Note that the mask is only accessed by Xen
5074 + * on the CPU that is currently hosting the VCPU. This means that the
5075 + * pending and mask flags can be updated by the guest without special
5076 + * synchronisation (i.e., no need for the x86 LOCK prefix).
5077 + * This may seem suboptimal because if the pending flag is set by
5078 + * a different CPU then an IPI may be scheduled even when the mask
5079 + * is set. However, note:
5080 + * 1. The task of 'interrupt holdoff' is covered by the per-event-
5081 + * channel mask bits. A 'noisy' event that is continually being
5082 + * triggered can be masked at source at this very precise
5083 + * granularity.
5084 + * 2. The main purpose of the per-VCPU mask is therefore to restrict
5085 + * reentrant execution: whether for concurrency control, or to
5086 + * prevent unbounded stack usage. Whatever the purpose, we expect
5087 + * that the mask will be asserted only for short periods at a time,
5088 + * and so the likelihood of a 'spurious' IPI is suitably small.
5089 + * The mask is read before making an event upcall to the guest: a
5090 + * non-zero mask therefore guarantees that the VCPU will not receive
5091 + * an upcall activation. The mask is cleared when the VCPU requests
5092 + * to block: this avoids wakeup-waiting races.
5093 + */
5094 + uint8_t evtchn_upcall_pending;
5095 + uint8_t evtchn_upcall_mask;
5096 + unsigned long evtchn_pending_sel;
5097 + struct arch_vcpu_info arch;
5098 +#ifdef CONFIG_PARAVIRT_XEN
5099 + struct pvclock_vcpu_time_info time;
5100 +#else
5101 + struct vcpu_time_info time;
5102 +#endif
5103 }; /* 64 bytes (x86) */
5104 +#ifndef __XEN__
5105 +typedef struct vcpu_info vcpu_info_t;
5106 +#endif
5107
5108 /*
5109 * Xen/kernel shared data -- pointer provided in start_info.
5110 - * NB. We expect that this struct is smaller than a page.
5111 + *
5112 + * This structure is defined to be both smaller than a page, and the
5113 + * only data on the shared page, but may vary in actual size even within
5114 + * compatible Xen versions; guests should not rely on the size
5115 + * of this structure remaining constant.
5116 */
5117 struct shared_info {
5118 - struct vcpu_info vcpu_info[MAX_VIRT_CPUS];
5119 + struct vcpu_info vcpu_info[MAX_VIRT_CPUS];
5120
5121 - /*
5122 - * A domain can create "event channels" on which it can send and receive
5123 - * asynchronous event notifications. There are three classes of event that
5124 - * are delivered by this mechanism:
5125 - * 1. Bi-directional inter- and intra-domain connections. Domains must
5126 - * arrange out-of-band to set up a connection (usually by allocating
5127 - * an unbound 'listener' port and avertising that via a storage service
5128 - * such as xenstore).
5129 - * 2. Physical interrupts. A domain with suitable hardware-access
5130 - * privileges can bind an event-channel port to a physical interrupt
5131 - * source.
5132 - * 3. Virtual interrupts ('events'). A domain can bind an event-channel
5133 - * port to a virtual interrupt source, such as the virtual-timer
5134 - * device or the emergency console.
5135 - *
5136 - * Event channels are addressed by a "port index". Each channel is
5137 - * associated with two bits of information:
5138 - * 1. PENDING -- notifies the domain that there is a pending notification
5139 - * to be processed. This bit is cleared by the guest.
5140 - * 2. MASK -- if this bit is clear then a 0->1 transition of PENDING
5141 - * will cause an asynchronous upcall to be scheduled. This bit is only
5142 - * updated by the guest. It is read-only within Xen. If a channel
5143 - * becomes pending while the channel is masked then the 'edge' is lost
5144 - * (i.e., when the channel is unmasked, the guest must manually handle
5145 - * pending notifications as no upcall will be scheduled by Xen).
5146 - *
5147 - * To expedite scanning of pending notifications, any 0->1 pending
5148 - * transition on an unmasked channel causes a corresponding bit in a
5149 - * per-vcpu selector word to be set. Each bit in the selector covers a
5150 - * 'C long' in the PENDING bitfield array.
5151 - */
5152 - unsigned long evtchn_pending[sizeof(unsigned long) * 8];
5153 - unsigned long evtchn_mask[sizeof(unsigned long) * 8];
5154 -
5155 - /*
5156 - * Wallclock time: updated only by control software. Guests should base
5157 - * their gettimeofday() syscall on this wallclock-base value.
5158 - */
5159 - struct pvclock_wall_clock wc;
5160 + /*
5161 + * A domain can create "event channels" on which it can send and receive
5162 + * asynchronous event notifications. There are three classes of event that
5163 + * are delivered by this mechanism:
5164 + * 1. Bi-directional inter- and intra-domain connections. Domains must
5165 + * arrange out-of-band to set up a connection (usually by allocating
5166 + * an unbound 'listener' port and avertising that via a storage service
5167 + * such as xenstore).
5168 + * 2. Physical interrupts. A domain with suitable hardware-access
5169 + * privileges can bind an event-channel port to a physical interrupt
5170 + * source.
5171 + * 3. Virtual interrupts ('events'). A domain can bind an event-channel
5172 + * port to a virtual interrupt source, such as the virtual-timer
5173 + * device or the emergency console.
5174 + *
5175 + * Event channels are addressed by a "port index". Each channel is
5176 + * associated with two bits of information:
5177 + * 1. PENDING -- notifies the domain that there is a pending notification
5178 + * to be processed. This bit is cleared by the guest.
5179 + * 2. MASK -- if this bit is clear then a 0->1 transition of PENDING
5180 + * will cause an asynchronous upcall to be scheduled. This bit is only
5181 + * updated by the guest. It is read-only within Xen. If a channel
5182 + * becomes pending while the channel is masked then the 'edge' is lost
5183 + * (i.e., when the channel is unmasked, the guest must manually handle
5184 + * pending notifications as no upcall will be scheduled by Xen).
5185 + *
5186 + * To expedite scanning of pending notifications, any 0->1 pending
5187 + * transition on an unmasked channel causes a corresponding bit in a
5188 + * per-vcpu selector word to be set. Each bit in the selector covers a
5189 + * 'C long' in the PENDING bitfield array.
5190 + */
5191 + unsigned long evtchn_pending[sizeof(unsigned long) * 8];
5192 + unsigned long evtchn_mask[sizeof(unsigned long) * 8];
5193 +
5194 + /*
5195 + * Wallclock time: updated only by control software. Guests should base
5196 + * their gettimeofday() syscall on this wallclock-base value.
5197 + */
5198 +#ifdef CONFIG_PARAVIRT_XEN
5199 + struct pvclock_wall_clock wc;
5200 +#else
5201 + uint32_t wc_version; /* Version counter: see vcpu_time_info_t. */
5202 + uint32_t wc_sec; /* Secs 00:00:00 UTC, Jan 1, 1970. */
5203 + uint32_t wc_nsec; /* Nsecs 00:00:00 UTC, Jan 1, 1970. */
5204 +#endif
5205
5206 - struct arch_shared_info arch;
5207 + struct arch_shared_info arch;
5208
5209 };
5210 +#ifndef __XEN__
5211 +typedef struct shared_info shared_info_t;
5212 +#endif
5213
5214 /*
5215 - * Start-of-day memory layout for the initial domain (DOM0):
5216 + * Start-of-day memory layout:
5217 * 1. The domain is started within contiguous virtual-memory region.
5218 - * 2. The contiguous region begins and ends on an aligned 4MB boundary.
5219 - * 3. The region start corresponds to the load address of the OS image.
5220 - * If the load address is not 4MB aligned then the address is rounded down.
5221 - * 4. This the order of bootstrap elements in the initial virtual region:
5222 + * 2. The contiguous region ends on an aligned 4MB boundary.
5223 + * 3. This the order of bootstrap elements in the initial virtual region:
5224 * a. relocated kernel image
5225 * b. initial ram disk [mod_start, mod_len]
5226 * c. list of allocated page frames [mfn_list, nr_pages]
5227 * d. start_info_t structure [register ESI (x86)]
5228 * e. bootstrap page tables [pt_base, CR3 (x86)]
5229 * f. bootstrap stack [register ESP (x86)]
5230 - * 5. Bootstrap elements are packed together, but each is 4kB-aligned.
5231 - * 6. The initial ram disk may be omitted.
5232 - * 7. The list of page frames forms a contiguous 'pseudo-physical' memory
5233 + * 4. Bootstrap elements are packed together, but each is 4kB-aligned.
5234 + * 5. The initial ram disk may be omitted.
5235 + * 6. The list of page frames forms a contiguous 'pseudo-physical' memory
5236 * layout for the domain. In particular, the bootstrap virtual-memory
5237 * region is a 1:1 mapping to the first section of the pseudo-physical map.
5238 - * 8. All bootstrap elements are mapped read-writable for the guest OS. The
5239 + * 7. All bootstrap elements are mapped read-writable for the guest OS. The
5240 * only exception is the bootstrap page table, which is mapped read-only.
5241 - * 9. There is guaranteed to be at least 512kB padding after the final
5242 + * 8. There is guaranteed to be at least 512kB padding after the final
5243 * bootstrap element. If necessary, the bootstrap virtual region is
5244 * extended by an extra 4MB to ensure this.
5245 */
5246
5247 #define MAX_GUEST_CMDLINE 1024
5248 struct start_info {
5249 - /* THE FOLLOWING ARE FILLED IN BOTH ON INITIAL BOOT AND ON RESUME. */
5250 - char magic[32]; /* "xen-<version>-<platform>". */
5251 - unsigned long nr_pages; /* Total pages allocated to this domain. */
5252 - unsigned long shared_info; /* MACHINE address of shared info struct. */
5253 - uint32_t flags; /* SIF_xxx flags. */
5254 - unsigned long store_mfn; /* MACHINE page number of shared page. */
5255 - uint32_t store_evtchn; /* Event channel for store communication. */
5256 - union {
5257 - struct {
5258 - unsigned long mfn; /* MACHINE page number of console page. */
5259 - uint32_t evtchn; /* Event channel for console page. */
5260 - } domU;
5261 - struct {
5262 - uint32_t info_off; /* Offset of console_info struct. */
5263 - uint32_t info_size; /* Size of console_info struct from start.*/
5264 - } dom0;
5265 - } console;
5266 - /* THE FOLLOWING ARE ONLY FILLED IN ON INITIAL BOOT (NOT RESUME). */
5267 - unsigned long pt_base; /* VIRTUAL address of page directory. */
5268 - unsigned long nr_pt_frames; /* Number of bootstrap p.t. frames. */
5269 - unsigned long mfn_list; /* VIRTUAL address of page-frame list. */
5270 - unsigned long mod_start; /* VIRTUAL address of pre-loaded module. */
5271 - unsigned long mod_len; /* Size (bytes) of pre-loaded module. */
5272 - int8_t cmd_line[MAX_GUEST_CMDLINE];
5273 + /* THE FOLLOWING ARE FILLED IN BOTH ON INITIAL BOOT AND ON RESUME. */
5274 + char magic[32]; /* "xen-<version>-<platform>". */
5275 + unsigned long nr_pages; /* Total pages allocated to this domain. */
5276 + unsigned long shared_info; /* MACHINE address of shared info struct. */
5277 + uint32_t flags; /* SIF_xxx flags. */
5278 + xen_pfn_t store_mfn; /* MACHINE page number of shared page. */
5279 + uint32_t store_evtchn; /* Event channel for store communication. */
5280 + union {
5281 + struct {
5282 + xen_pfn_t mfn; /* MACHINE page number of console page. */
5283 + uint32_t evtchn; /* Event channel for console page. */
5284 + } domU;
5285 + struct {
5286 + uint32_t info_off; /* Offset of console_info struct. */
5287 + uint32_t info_size; /* Size of console_info struct from start.*/
5288 + } dom0;
5289 + } console;
5290 + /* THE FOLLOWING ARE ONLY FILLED IN ON INITIAL BOOT (NOT RESUME). */
5291 + unsigned long pt_base; /* VIRTUAL address of page directory. */
5292 + unsigned long nr_pt_frames; /* Number of bootstrap p.t. frames. */
5293 + unsigned long mfn_list; /* VIRTUAL address of page-frame list. */
5294 + unsigned long mod_start; /* VIRTUAL address of pre-loaded module. */
5295 + unsigned long mod_len; /* Size (bytes) of pre-loaded module. */
5296 + int8_t cmd_line[MAX_GUEST_CMDLINE];
5297 };
5298 +typedef struct start_info start_info_t;
5299 +
5300 +/* New console union for dom0 introduced in 0x00030203. */
5301 +#if __XEN_INTERFACE_VERSION__ < 0x00030203
5302 +#define console_mfn console.domU.mfn
5303 +#define console_evtchn console.domU.evtchn
5304 +#endif
5305
5306 /* These flags are passed in the 'flags' field of start_info_t. */
5307 #define SIF_PRIVILEGED (1<<0) /* Is the domain privileged? */
5308 #define SIF_INITDOMAIN (1<<1) /* Is this the initial control domain? */
5309 +#define SIF_PM_MASK (0xFF<<8) /* reserve 1 byte for xen-pm options */
5310
5311 -typedef uint64_t cpumap_t;
5312 +typedef struct dom0_vga_console_info {
5313 + uint8_t video_type; /* DOM0_VGA_CONSOLE_??? */
5314 +#define XEN_VGATYPE_TEXT_MODE_3 0x03
5315 +#define XEN_VGATYPE_VESA_LFB 0x23
5316 +
5317 + union {
5318 + struct {
5319 + /* Font height, in pixels. */
5320 + uint16_t font_height;
5321 + /* Cursor location (column, row). */
5322 + uint16_t cursor_x, cursor_y;
5323 + /* Number of rows and columns (dimensions in characters). */
5324 + uint16_t rows, columns;
5325 + } text_mode_3;
5326 +
5327 + struct {
5328 + /* Width and height, in pixels. */
5329 + uint16_t width, height;
5330 + /* Bytes per scan line. */
5331 + uint16_t bytes_per_line;
5332 + /* Bits per pixel. */
5333 + uint16_t bits_per_pixel;
5334 + /* LFB physical address, and size (in units of 64kB). */
5335 + uint32_t lfb_base;
5336 + uint32_t lfb_size;
5337 + /* RGB mask offsets and sizes, as defined by VBE 1.2+ */
5338 + uint8_t red_pos, red_size;
5339 + uint8_t green_pos, green_size;
5340 + uint8_t blue_pos, blue_size;
5341 + uint8_t rsvd_pos, rsvd_size;
5342 +#if __XEN_INTERFACE_VERSION__ >= 0x00030206
5343 + /* VESA capabilities (offset 0xa, VESA command 0x4f00). */
5344 + uint32_t gbl_caps;
5345 + /* Mode attributes (offset 0x0, VESA command 0x4f01). */
5346 + uint16_t mode_attrs;
5347 +#endif
5348 + } vesa_lfb;
5349 + } u;
5350 +} dom0_vga_console_info_t;
5351 +#define xen_vga_console_info dom0_vga_console_info
5352 +#define xen_vga_console_info_t dom0_vga_console_info_t
5353
5354 typedef uint8_t xen_domain_handle_t[16];
5355
5356 @@ -461,6 +627,11 @@ typedef uint8_t xen_domain_handle_t[16];
5357 #define __mk_unsigned_long(x) x ## UL
5358 #define mk_unsigned_long(x) __mk_unsigned_long(x)
5359
5360 +__DEFINE_XEN_GUEST_HANDLE(uint8, uint8_t);
5361 +__DEFINE_XEN_GUEST_HANDLE(uint16, uint16_t);
5362 +__DEFINE_XEN_GUEST_HANDLE(uint32, uint32_t);
5363 +__DEFINE_XEN_GUEST_HANDLE(uint64, uint64_t);
5364 +
5365 #else /* __ASSEMBLY__ */
5366
5367 /* In assembly code we cannot use C numeric constant suffixes. */
5368 @@ -468,4 +639,24 @@ typedef uint8_t xen_domain_handle_t[16];
5369
5370 #endif /* !__ASSEMBLY__ */
5371
5372 +/* Default definitions for macros used by domctl/sysctl. */
5373 +#if defined(__XEN__) || defined(__XEN_TOOLS__)
5374 +#ifndef uint64_aligned_t
5375 +#define uint64_aligned_t uint64_t
5376 +#endif
5377 +#ifndef XEN_GUEST_HANDLE_64
5378 +#define XEN_GUEST_HANDLE_64(name) XEN_GUEST_HANDLE(name)
5379 +#endif
5380 +#endif
5381 +
5382 #endif /* __XEN_PUBLIC_XEN_H__ */
5383 +
5384 +/*
5385 + * Local variables:
5386 + * mode: C
5387 + * c-set-style: "BSD"
5388 + * c-basic-offset: 4
5389 + * tab-width: 4
5390 + * indent-tabs-mode: nil
5391 + * End:
5392 + */
5393 Index: head-2008-11-25/include/xen/xenbus.h
5394 ===================================================================
5395 --- head-2008-11-25.orig/include/xen/xenbus.h 2008-11-25 12:33:06.000000000 +0100
5396 +++ head-2008-11-25/include/xen/xenbus.h 2008-11-25 12:35:56.000000000 +0100
5397 @@ -39,6 +39,7 @@
5398 #include <linux/mutex.h>
5399 #include <linux/completion.h>
5400 #include <linux/init.h>
5401 +#include <linux/err.h>
5402 #include <xen/interface/xen.h>
5403 #include <xen/interface/grant_table.h>
5404 #include <xen/interface/io/xenbus.h>
5405 @@ -55,8 +56,17 @@ struct xenbus_watch
5406 /* Callback (executed in a process context with no locks held). */
5407 void (*callback)(struct xenbus_watch *,
5408 const char **vec, unsigned int len);
5409 +
5410 + /* See XBWF_ definitions below. */
5411 + unsigned long flags;
5412 };
5413
5414 +/*
5415 + * Execute callback in its own kthread. Useful if the callback is long
5416 + * running or heavily serialised, to avoid taking out the main xenwatch thread
5417 + * for a long period of time (or even unwittingly causing a deadlock).
5418 + */
5419 +#define XBWF_new_thread 1
5420
5421 /* A xenbus device. */
5422 struct xenbus_device {
5423 @@ -105,27 +115,8 @@ static inline struct xenbus_driver *to_x
5424 return container_of(drv, struct xenbus_driver, driver);
5425 }
5426
5427 -int __must_check __xenbus_register_frontend(struct xenbus_driver *drv,
5428 - struct module *owner,
5429 - const char *mod_name);
5430 -
5431 -static inline int __must_check
5432 -xenbus_register_frontend(struct xenbus_driver *drv)
5433 -{
5434 - WARN_ON(drv->owner != THIS_MODULE);
5435 - return __xenbus_register_frontend(drv, THIS_MODULE, KBUILD_MODNAME);
5436 -}
5437 -
5438 -int __must_check __xenbus_register_backend(struct xenbus_driver *drv,
5439 - struct module *owner,
5440 - const char *mod_name);
5441 -static inline int __must_check
5442 -xenbus_register_backend(struct xenbus_driver *drv)
5443 -{
5444 - WARN_ON(drv->owner != THIS_MODULE);
5445 - return __xenbus_register_backend(drv, THIS_MODULE, KBUILD_MODNAME);
5446 -}
5447 -
5448 +int xenbus_register_frontend(struct xenbus_driver *drv);
5449 +int xenbus_register_backend(struct xenbus_driver *drv);
5450 void xenbus_unregister_driver(struct xenbus_driver *drv);
5451
5452 struct xenbus_transaction
5453 @@ -136,8 +127,6 @@ struct xenbus_transaction
5454 /* Nil transaction ID. */
5455 #define XBT_NIL ((struct xenbus_transaction) { 0 })
5456
5457 -int __init xenbus_dev_init(void);
5458 -
5459 char **xenbus_directory(struct xenbus_transaction t,
5460 const char *dir, const char *node, unsigned int *num);
5461 void *xenbus_read(struct xenbus_transaction t,
5462 @@ -167,7 +156,6 @@ int xenbus_printf(struct xenbus_transact
5463 int xenbus_gather(struct xenbus_transaction t, const char *dir, ...);
5464
5465 /* notifer routines for when the xenstore comes up */
5466 -extern int xenstored_ready;
5467 int register_xenstore_notifier(struct notifier_block *nb);
5468 void unregister_xenstore_notifier(struct notifier_block *nb);
5469
5470 @@ -180,12 +168,9 @@ void xs_suspend_cancel(void);
5471 /* Used by xenbus_dev to borrow kernel's store connection. */
5472 void *xenbus_dev_request_and_reply(struct xsd_sockmsg *msg);
5473
5474 -struct work_struct;
5475 -
5476 /* Prepare for domain suspend: then resume or cancel the suspend. */
5477 void xenbus_suspend(void);
5478 void xenbus_resume(void);
5479 -void xenbus_probe(struct work_struct *);
5480 void xenbus_suspend_cancel(void);
5481
5482 #define XENBUS_IS_ERR_READ(str) ({ \
5483 @@ -198,38 +183,125 @@ void xenbus_suspend_cancel(void);
5484
5485 #define XENBUS_EXIST_ERR(err) ((err) == -ENOENT || (err) == -ERANGE)
5486
5487 +
5488 +/**
5489 + * Register a watch on the given path, using the given xenbus_watch structure
5490 + * for storage, and the given callback function as the callback. Return 0 on
5491 + * success, or -errno on error. On success, the given path will be saved as
5492 + * watch->node, and remains the caller's to free. On error, watch->node will
5493 + * be NULL, the device will switch to XenbusStateClosing, and the error will
5494 + * be saved in the store.
5495 + */
5496 int xenbus_watch_path(struct xenbus_device *dev, const char *path,
5497 struct xenbus_watch *watch,
5498 void (*callback)(struct xenbus_watch *,
5499 const char **, unsigned int));
5500 -int xenbus_watch_pathfmt(struct xenbus_device *dev, struct xenbus_watch *watch,
5501 - void (*callback)(struct xenbus_watch *,
5502 - const char **, unsigned int),
5503 - const char *pathfmt, ...)
5504 - __attribute__ ((format (printf, 4, 5)));
5505
5506 +
5507 +/**
5508 + * Register a watch on the given path/path2, using the given xenbus_watch
5509 + * structure for storage, and the given callback function as the callback.
5510 + * Return 0 on success, or -errno on error. On success, the watched path
5511 + * (path/path2) will be saved as watch->node, and becomes the caller's to
5512 + * kfree(). On error, watch->node will be NULL, so the caller has nothing to
5513 + * free, the device will switch to XenbusStateClosing, and the error will be
5514 + * saved in the store.
5515 + */
5516 +int xenbus_watch_path2(struct xenbus_device *dev, const char *path,
5517 + const char *path2, struct xenbus_watch *watch,
5518 + void (*callback)(struct xenbus_watch *,
5519 + const char **, unsigned int));
5520 +
5521 +
5522 +/**
5523 + * Advertise in the store a change of the given driver to the given new_state.
5524 + * Return 0 on success, or -errno on error. On error, the device will switch
5525 + * to XenbusStateClosing, and the error will be saved in the store.
5526 + */
5527 int xenbus_switch_state(struct xenbus_device *dev, enum xenbus_state new_state);
5528 +
5529 +
5530 +/**
5531 + * Grant access to the given ring_mfn to the peer of the given device. Return
5532 + * 0 on success, or -errno on error. On error, the device will switch to
5533 + * XenbusStateClosing, and the error will be saved in the store.
5534 + */
5535 int xenbus_grant_ring(struct xenbus_device *dev, unsigned long ring_mfn);
5536 -int xenbus_map_ring_valloc(struct xenbus_device *dev,
5537 - int gnt_ref, void **vaddr);
5538 +
5539 +
5540 +/**
5541 + * Map a page of memory into this domain from another domain's grant table.
5542 + * xenbus_map_ring_valloc allocates a page of virtual address space, maps the
5543 + * page to that address, and sets *vaddr to that address.
5544 + * xenbus_map_ring does not allocate the virtual address space (you must do
5545 + * this yourself!). It only maps in the page to the specified address.
5546 + * Returns 0 on success, and GNTST_* (see xen/include/interface/grant_table.h)
5547 + * or -ENOMEM on error. If an error is returned, device will switch to
5548 + * XenbusStateClosing and the error message will be saved in XenStore.
5549 + */
5550 +struct vm_struct *xenbus_map_ring_valloc(struct xenbus_device *dev,
5551 + int gnt_ref);
5552 int xenbus_map_ring(struct xenbus_device *dev, int gnt_ref,
5553 grant_handle_t *handle, void *vaddr);
5554
5555 -int xenbus_unmap_ring_vfree(struct xenbus_device *dev, void *vaddr);
5556 +
5557 +/**
5558 + * Unmap a page of memory in this domain that was imported from another domain.
5559 + * Use xenbus_unmap_ring_vfree if you mapped in your memory with
5560 + * xenbus_map_ring_valloc (it will free the virtual address space).
5561 + * Returns 0 on success and returns GNTST_* on error
5562 + * (see xen/include/interface/grant_table.h).
5563 + */
5564 +int xenbus_unmap_ring_vfree(struct xenbus_device *dev, struct vm_struct *);
5565 int xenbus_unmap_ring(struct xenbus_device *dev,
5566 grant_handle_t handle, void *vaddr);
5567
5568 +
5569 +/**
5570 + * Allocate an event channel for the given xenbus_device, assigning the newly
5571 + * created local port to *port. Return 0 on success, or -errno on error. On
5572 + * error, the device will switch to XenbusStateClosing, and the error will be
5573 + * saved in the store.
5574 + */
5575 int xenbus_alloc_evtchn(struct xenbus_device *dev, int *port);
5576 -int xenbus_bind_evtchn(struct xenbus_device *dev, int remote_port, int *port);
5577 +
5578 +
5579 +/**
5580 + * Free an existing event channel. Returns 0 on success or -errno on error.
5581 + */
5582 int xenbus_free_evtchn(struct xenbus_device *dev, int port);
5583
5584 +
5585 +/**
5586 + * Return the state of the driver rooted at the given store path, or
5587 + * XenbusStateUnknown if no state can be read.
5588 + */
5589 enum xenbus_state xenbus_read_driver_state(const char *path);
5590
5591 -void xenbus_dev_error(struct xenbus_device *dev, int err, const char *fmt, ...);
5592 -void xenbus_dev_fatal(struct xenbus_device *dev, int err, const char *fmt, ...);
5593 +
5594 +/***
5595 + * Report the given negative errno into the store, along with the given
5596 + * formatted message.
5597 + */
5598 +void xenbus_dev_error(struct xenbus_device *dev, int err, const char *fmt,
5599 + ...);
5600 +
5601 +
5602 +/***
5603 + * Equivalent to xenbus_dev_error(dev, err, fmt, args), followed by
5604 + * xenbus_switch_state(dev, NULL, XenbusStateClosing) to schedule an orderly
5605 + * closedown of this driver and its peer.
5606 + */
5607 +void xenbus_dev_fatal(struct xenbus_device *dev, int err, const char *fmt,
5608 + ...);
5609 +
5610 +int xenbus_dev_init(void);
5611
5612 const char *xenbus_strstate(enum xenbus_state state);
5613 int xenbus_dev_is_online(struct xenbus_device *dev);
5614 int xenbus_frontend_closed(struct xenbus_device *dev);
5615
5616 +int xenbus_for_each_backend(void *arg, int (*fn)(struct device *, void *));
5617 +int xenbus_for_each_frontend(void *arg, int (*fn)(struct device *, void *));
5618 +
5619 #endif /* _XEN_XENBUS_H */