1 Subject: Fix Xen build wrt. Xen files coming from mainline.
2 From: http://xenbits.xensource.com/linux-2.6.18-xen.hg (tip 728:832aac894efd)
3 Patch-mainline: obsolete
5 Acked-by: jbeulich@novell.com
7 Index: head-2008-11-25/drivers/xen/Makefile
8 ===================================================================
9 --- head-2008-11-25.orig/drivers/xen/Makefile 2008-11-25 12:33:06.000000000 +0100
10 +++ head-2008-11-25/drivers/xen/Makefile 2008-11-25 12:35:56.000000000 +0100
12 -obj-y += grant-table.o features.o events.o manage.o
17 -obj-$(CONFIG_XEN_XENCOMM) += xencomm.o
18 -obj-$(CONFIG_XEN_BALLOON) += balloon.o
22 +obj-$(CONFIG_XEN_BALLOON) += balloon/
23 +obj-$(CONFIG_XEN_BLKDEV_BACKEND) += blkback/
24 +obj-$(CONFIG_XEN_BLKDEV_TAP) += blktap/
25 +obj-$(CONFIG_XEN_NETDEV_BACKEND) += netback/
26 +obj-$(CONFIG_XEN_TPMDEV_BACKEND) += tpmback/
27 +obj-$(CONFIG_XEN_BLKDEV_FRONTEND) += blkfront/
28 +obj-$(CONFIG_XEN_NETDEV_FRONTEND) += netfront/
29 +obj-$(CONFIG_XEN_PCIDEV_BACKEND) += pciback/
30 +obj-$(CONFIG_XEN_PCIDEV_FRONTEND) += pcifront/
31 +obj-$(CONFIG_XEN_FRAMEBUFFER) += fbfront/
32 +obj-$(CONFIG_XEN_KEYBOARD) += fbfront/
33 +obj-$(CONFIG_XEN_SCSI_BACKEND) += scsiback/
34 +obj-$(CONFIG_XEN_SCSI_FRONTEND) += scsifront/
35 +obj-$(CONFIG_XEN_PRIVCMD) += privcmd/
36 +obj-$(CONFIG_XEN_GRANT_DEV) += gntdev/
37 +obj-$(CONFIG_XEN_NETDEV_ACCEL_SFC_UTIL) += sfc_netutil/
38 +obj-$(CONFIG_XEN_NETDEV_ACCEL_SFC_FRONTEND) += sfc_netfront/
39 +obj-$(CONFIG_XEN_NETDEV_ACCEL_SFC_BACKEND) += sfc_netback/
40 Index: head-2008-11-25/drivers/xen/xenbus/Makefile
41 ===================================================================
42 --- head-2008-11-25.orig/drivers/xen/xenbus/Makefile 2008-11-25 12:33:06.000000000 +0100
43 +++ head-2008-11-25/drivers/xen/xenbus/Makefile 2008-11-25 12:35:56.000000000 +0100
46 +obj-y += xenbus_client.o xenbus_comms.o xenbus_xs.o xenbus_probe.o
47 +obj-$(CONFIG_XEN_BACKEND) += xenbus_be.o
50 -xenbus-objs += xenbus_client.o
51 -xenbus-objs += xenbus_comms.o
52 -xenbus-objs += xenbus_xs.o
53 -xenbus-objs += xenbus_probe.o
55 +xenbus_be-objs += xenbus_backend_client.o
57 +xenbus-$(CONFIG_XEN_BACKEND) += xenbus_probe_backend.o
58 +obj-y += $(xenbus-y) $(xenbus-m)
59 +obj-$(CONFIG_XEN_XENBUS_DEV) += xenbus_dev.o
60 Index: head-2008-11-25/drivers/xen/xenbus/xenbus_client.c
61 ===================================================================
62 --- head-2008-11-25.orig/drivers/xen/xenbus/xenbus_client.c 2008-11-25 12:33:06.000000000 +0100
63 +++ head-2008-11-25/drivers/xen/xenbus/xenbus_client.c 2008-11-25 12:35:56.000000000 +0100
68 -#include <linux/types.h>
69 -#include <linux/vmalloc.h>
70 -#include <asm/xen/hypervisor.h>
71 -#include <xen/interface/xen.h>
72 -#include <xen/interface/event_channel.h>
73 -#include <xen/events.h>
74 -#include <xen/grant_table.h>
75 +#include <linux/slab.h>
76 +#include <xen/evtchn.h>
77 +#include <xen/gnttab.h>
78 #include <xen/xenbus.h>
79 +#include <xen/driver_util.h>
81 +#ifdef HAVE_XEN_PLATFORM_COMPAT_H
82 +#include <xen/platform-compat.h>
85 +#define DPRINTK(fmt, args...) \
86 + pr_debug("xenbus_client (%s:%d) " fmt ".\n", __FUNCTION__, __LINE__, ##args)
88 const char *xenbus_strstate(enum xenbus_state state)
90 @@ -54,20 +58,6 @@ const char *xenbus_strstate(enum xenbus_
92 EXPORT_SYMBOL_GPL(xenbus_strstate);
95 - * xenbus_watch_path - register a watch
96 - * @dev: xenbus device
97 - * @path: path to watch
98 - * @watch: watch to register
99 - * @callback: callback to register
101 - * Register a @watch on the given path, using the given xenbus_watch structure
102 - * for storage, and the given @callback function as the callback. Return 0 on
103 - * success, or -errno on error. On success, the given @path will be saved as
104 - * @watch->node, and remains the caller's to free. On error, @watch->node will
105 - * be NULL, the device will switch to %XenbusStateClosing, and the error will
106 - * be saved in the store.
108 int xenbus_watch_path(struct xenbus_device *dev, const char *path,
109 struct xenbus_watch *watch,
110 void (*callback)(struct xenbus_watch *,
111 @@ -91,58 +81,26 @@ int xenbus_watch_path(struct xenbus_devi
112 EXPORT_SYMBOL_GPL(xenbus_watch_path);
116 - * xenbus_watch_pathfmt - register a watch on a sprintf-formatted path
117 - * @dev: xenbus device
118 - * @watch: watch to register
119 - * @callback: callback to register
120 - * @pathfmt: format of path to watch
122 - * Register a watch on the given @path, using the given xenbus_watch
123 - * structure for storage, and the given @callback function as the callback.
124 - * Return 0 on success, or -errno on error. On success, the watched path
125 - * (@path/@path2) will be saved as @watch->node, and becomes the caller's to
126 - * kfree(). On error, watch->node will be NULL, so the caller has nothing to
127 - * free, the device will switch to %XenbusStateClosing, and the error will be
128 - * saved in the store.
130 -int xenbus_watch_pathfmt(struct xenbus_device *dev,
131 - struct xenbus_watch *watch,
132 - void (*callback)(struct xenbus_watch *,
133 - const char **, unsigned int),
134 - const char *pathfmt, ...)
135 +int xenbus_watch_path2(struct xenbus_device *dev, const char *path,
136 + const char *path2, struct xenbus_watch *watch,
137 + void (*callback)(struct xenbus_watch *,
138 + const char **, unsigned int))
144 - va_start(ap, pathfmt);
145 - path = kvasprintf(GFP_NOIO | __GFP_HIGH, pathfmt, ap);
149 + char *state = kasprintf(GFP_NOIO | __GFP_HIGH, "%s/%s", path, path2);
151 xenbus_dev_fatal(dev, -ENOMEM, "allocating path for watch");
154 - err = xenbus_watch_path(dev, path, watch, callback);
155 + err = xenbus_watch_path(dev, state, watch, callback);
162 -EXPORT_SYMBOL_GPL(xenbus_watch_pathfmt);
163 +EXPORT_SYMBOL_GPL(xenbus_watch_path2);
167 - * xenbus_switch_state
168 - * @dev: xenbus device
169 - * @xbt: transaction handle
170 - * @state: new state
172 - * Advertise in the store a change of the given driver to the given new_state.
173 - * Return 0 on success, or -errno on error. On error, the device will switch
174 - * to XenbusStateClosing, and the error will be saved in the store.
176 int xenbus_switch_state(struct xenbus_device *dev, enum xenbus_state state)
178 /* We check whether the state is currently set to the given value, and
179 @@ -201,13 +159,12 @@ static char *error_path(struct xenbus_de
183 -static void xenbus_va_dev_error(struct xenbus_device *dev, int err,
184 - const char *fmt, va_list ap)
185 +void _dev_error(struct xenbus_device *dev, int err, const char *fmt,
190 - char *printf_buffer = NULL;
191 - char *path_buffer = NULL;
192 + char *printf_buffer = NULL, *path_buffer = NULL;
194 #define PRINTF_BUFFER_SIZE 4096
195 printf_buffer = kmalloc(PRINTF_BUFFER_SIZE, GFP_KERNEL);
196 @@ -224,74 +181,51 @@ static void xenbus_va_dev_error(struct x
197 path_buffer = error_path(dev);
199 if (path_buffer == NULL) {
200 - dev_err(&dev->dev, "failed to write error node for %s (%s)\n",
201 + printk("xenbus: failed to write error node for %s (%s)\n",
202 dev->nodename, printf_buffer);
206 if (xenbus_write(XBT_NIL, path_buffer, "error", printf_buffer) != 0) {
207 - dev_err(&dev->dev, "failed to write error node for %s (%s)\n",
208 + printk("xenbus: failed to write error node for %s (%s)\n",
209 dev->nodename, printf_buffer);
214 - kfree(printf_buffer);
215 - kfree(path_buffer);
217 + kfree(printf_buffer);
219 + kfree(path_buffer);
225 - * @dev: xenbus device
226 - * @err: error to report
227 - * @fmt: error message format
229 - * Report the given negative errno into the store, along with the given
230 - * formatted message.
232 -void xenbus_dev_error(struct xenbus_device *dev, int err, const char *fmt, ...)
233 +void xenbus_dev_error(struct xenbus_device *dev, int err, const char *fmt,
239 - xenbus_va_dev_error(dev, err, fmt, ap);
240 + _dev_error(dev, err, fmt, ap);
243 EXPORT_SYMBOL_GPL(xenbus_dev_error);
247 - * @dev: xenbus device
248 - * @err: error to report
249 - * @fmt: error message format
251 - * Equivalent to xenbus_dev_error(dev, err, fmt, args), followed by
252 - * xenbus_switch_state(dev, NULL, XenbusStateClosing) to schedule an orderly
253 - * closedown of this driver and its peer.
256 -void xenbus_dev_fatal(struct xenbus_device *dev, int err, const char *fmt, ...)
257 +void xenbus_dev_fatal(struct xenbus_device *dev, int err, const char *fmt,
263 - xenbus_va_dev_error(dev, err, fmt, ap);
264 + _dev_error(dev, err, fmt, ap);
267 xenbus_switch_state(dev, XenbusStateClosing);
269 EXPORT_SYMBOL_GPL(xenbus_dev_fatal);
272 - * xenbus_grant_ring
273 - * @dev: xenbus device
274 - * @ring_mfn: mfn of ring to grant
276 - * Grant access to the given @ring_mfn to the peer of the given device. Return
277 - * 0 on success, or -errno on error. On error, the device will switch to
278 - * XenbusStateClosing, and the error will be saved in the store.
281 int xenbus_grant_ring(struct xenbus_device *dev, unsigned long ring_mfn)
283 int err = gnttab_grant_foreign_access(dev->otherend_id, ring_mfn, 0);
284 @@ -302,18 +236,12 @@ int xenbus_grant_ring(struct xenbus_devi
285 EXPORT_SYMBOL_GPL(xenbus_grant_ring);
289 - * Allocate an event channel for the given xenbus_device, assigning the newly
290 - * created local port to *port. Return 0 on success, or -errno on error. On
291 - * error, the device will switch to XenbusStateClosing, and the error will be
292 - * saved in the store.
294 int xenbus_alloc_evtchn(struct xenbus_device *dev, int *port)
296 struct evtchn_alloc_unbound alloc_unbound;
299 - alloc_unbound.dom = DOMID_SELF;
300 + alloc_unbound.dom = DOMID_SELF;
301 alloc_unbound.remote_dom = dev->otherend_id;
303 err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound,
304 @@ -328,36 +256,6 @@ int xenbus_alloc_evtchn(struct xenbus_de
305 EXPORT_SYMBOL_GPL(xenbus_alloc_evtchn);
309 - * Bind to an existing interdomain event channel in another domain. Returns 0
310 - * on success and stores the local port in *port. On error, returns -errno,
311 - * switches the device to XenbusStateClosing, and saves the error in XenStore.
313 -int xenbus_bind_evtchn(struct xenbus_device *dev, int remote_port, int *port)
315 - struct evtchn_bind_interdomain bind_interdomain;
318 - bind_interdomain.remote_dom = dev->otherend_id;
319 - bind_interdomain.remote_port = remote_port;
321 - err = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain,
322 - &bind_interdomain);
324 - xenbus_dev_fatal(dev, err,
325 - "binding to event channel %d from domain %d",
326 - remote_port, dev->otherend_id);
328 - *port = bind_interdomain.local_port;
332 -EXPORT_SYMBOL_GPL(xenbus_bind_evtchn);
336 - * Free an existing event channel. Returns 0 on success or -errno on error.
338 int xenbus_free_evtchn(struct xenbus_device *dev, int port)
340 struct evtchn_close close;
341 @@ -374,189 +272,6 @@ int xenbus_free_evtchn(struct xenbus_dev
342 EXPORT_SYMBOL_GPL(xenbus_free_evtchn);
346 - * xenbus_map_ring_valloc
347 - * @dev: xenbus device
348 - * @gnt_ref: grant reference
349 - * @vaddr: pointer to address to be filled out by mapping
351 - * Based on Rusty Russell's skeleton driver's map_page.
352 - * Map a page of memory into this domain from another domain's grant table.
353 - * xenbus_map_ring_valloc allocates a page of virtual address space, maps the
354 - * page to that address, and sets *vaddr to that address.
355 - * Returns 0 on success, and GNTST_* (see xen/include/interface/grant_table.h)
356 - * or -ENOMEM on error. If an error is returned, device will switch to
357 - * XenbusStateClosing and the error message will be saved in XenStore.
359 -int xenbus_map_ring_valloc(struct xenbus_device *dev, int gnt_ref, void **vaddr)
361 - struct gnttab_map_grant_ref op = {
362 - .flags = GNTMAP_host_map,
364 - .dom = dev->otherend_id,
366 - struct vm_struct *area;
370 - area = xen_alloc_vm_area(PAGE_SIZE);
374 - op.host_addr = (unsigned long)area->addr;
376 - if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1))
379 - if (op.status != GNTST_okay) {
380 - xen_free_vm_area(area);
381 - xenbus_dev_fatal(dev, op.status,
382 - "mapping in shared page %d from domain %d",
383 - gnt_ref, dev->otherend_id);
387 - /* Stuff the handle in an unused field */
388 - area->phys_addr = (unsigned long)op.handle;
390 - *vaddr = area->addr;
393 -EXPORT_SYMBOL_GPL(xenbus_map_ring_valloc);
398 - * @dev: xenbus device
399 - * @gnt_ref: grant reference
400 - * @handle: pointer to grant handle to be filled
401 - * @vaddr: address to be mapped to
403 - * Map a page of memory into this domain from another domain's grant table.
404 - * xenbus_map_ring does not allocate the virtual address space (you must do
405 - * this yourself!). It only maps in the page to the specified address.
406 - * Returns 0 on success, and GNTST_* (see xen/include/interface/grant_table.h)
407 - * or -ENOMEM on error. If an error is returned, device will switch to
408 - * XenbusStateClosing and the error message will be saved in XenStore.
410 -int xenbus_map_ring(struct xenbus_device *dev, int gnt_ref,
411 - grant_handle_t *handle, void *vaddr)
413 - struct gnttab_map_grant_ref op = {
414 - .host_addr = (unsigned long)vaddr,
415 - .flags = GNTMAP_host_map,
417 - .dom = dev->otherend_id,
420 - if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1))
423 - if (op.status != GNTST_okay) {
424 - xenbus_dev_fatal(dev, op.status,
425 - "mapping in shared page %d from domain %d",
426 - gnt_ref, dev->otherend_id);
428 - *handle = op.handle;
432 -EXPORT_SYMBOL_GPL(xenbus_map_ring);
436 - * xenbus_unmap_ring_vfree
437 - * @dev: xenbus device
438 - * @vaddr: addr to unmap
440 - * Based on Rusty Russell's skeleton driver's unmap_page.
441 - * Unmap a page of memory in this domain that was imported from another domain.
442 - * Use xenbus_unmap_ring_vfree if you mapped in your memory with
443 - * xenbus_map_ring_valloc (it will free the virtual address space).
444 - * Returns 0 on success and returns GNTST_* on error
445 - * (see xen/include/interface/grant_table.h).
447 -int xenbus_unmap_ring_vfree(struct xenbus_device *dev, void *vaddr)
449 - struct vm_struct *area;
450 - struct gnttab_unmap_grant_ref op = {
451 - .host_addr = (unsigned long)vaddr,
454 - /* It'd be nice if linux/vmalloc.h provided a find_vm_area(void *addr)
455 - * method so that we don't have to muck with vmalloc internals here.
456 - * We could force the user to hang on to their struct vm_struct from
457 - * xenbus_map_ring_valloc, but these 6 lines considerably simplify
460 - read_lock(&vmlist_lock);
461 - for (area = vmlist; area != NULL; area = area->next) {
462 - if (area->addr == vaddr)
465 - read_unlock(&vmlist_lock);
468 - xenbus_dev_error(dev, -ENOENT,
469 - "can't find mapped virtual address %p", vaddr);
470 - return GNTST_bad_virt_addr;
473 - op.handle = (grant_handle_t)area->phys_addr;
475 - if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1))
478 - if (op.status == GNTST_okay)
479 - xen_free_vm_area(area);
481 - xenbus_dev_error(dev, op.status,
482 - "unmapping page at handle %d error %d",
483 - (int16_t)area->phys_addr, op.status);
487 -EXPORT_SYMBOL_GPL(xenbus_unmap_ring_vfree);
491 - * xenbus_unmap_ring
492 - * @dev: xenbus device
493 - * @handle: grant handle
494 - * @vaddr: addr to unmap
496 - * Unmap a page of memory in this domain that was imported from another domain.
497 - * Returns 0 on success and returns GNTST_* on error
498 - * (see xen/include/interface/grant_table.h).
500 -int xenbus_unmap_ring(struct xenbus_device *dev,
501 - grant_handle_t handle, void *vaddr)
503 - struct gnttab_unmap_grant_ref op = {
504 - .host_addr = (unsigned long)vaddr,
508 - if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1))
511 - if (op.status != GNTST_okay)
512 - xenbus_dev_error(dev, op.status,
513 - "unmapping page at handle %d error %d",
514 - handle, op.status);
518 -EXPORT_SYMBOL_GPL(xenbus_unmap_ring);
522 - * xenbus_read_driver_state
523 - * @path: path for driver
525 - * Return the state of the driver rooted at the given store path, or
526 - * XenbusStateUnknown if no state can be read.
528 enum xenbus_state xenbus_read_driver_state(const char *path)
530 enum xenbus_state result;
531 Index: head-2008-11-25/drivers/xen/xenbus/xenbus_comms.c
532 ===================================================================
533 --- head-2008-11-25.orig/drivers/xen/xenbus/xenbus_comms.c 2008-11-25 12:33:06.000000000 +0100
534 +++ head-2008-11-25/drivers/xen/xenbus/xenbus_comms.c 2008-11-25 12:35:56.000000000 +0100
536 #include <linux/interrupt.h>
537 #include <linux/sched.h>
538 #include <linux/err.h>
539 +#include <linux/ptrace.h>
540 +#include <linux/workqueue.h>
541 +#include <xen/evtchn.h>
542 #include <xen/xenbus.h>
543 -#include <asm/xen/hypervisor.h>
544 -#include <xen/events.h>
545 -#include <xen/page.h>
547 +#include <asm/hypervisor.h>
549 #include "xenbus_comms.h"
551 +#ifdef HAVE_XEN_PLATFORM_COMPAT_H
552 +#include <xen/platform-compat.h>
555 static int xenbus_irq;
557 -static DECLARE_WORK(probe_work, xenbus_probe);
558 +extern void xenbus_probe(void *);
559 +extern int xenstored_ready;
560 +static DECLARE_WORK(probe_work, xenbus_probe, NULL);
562 static DECLARE_WAIT_QUEUE_HEAD(xb_waitq);
564 -static irqreturn_t wake_waiting(int irq, void *unused)
565 +static irqreturn_t wake_waiting(int irq, void *unused, struct pt_regs *regs)
567 if (unlikely(xenstored_ready == 0)) {
569 @@ -82,13 +91,6 @@ static const void *get_input_chunk(XENST
570 return buf + MASK_XENSTORE_IDX(cons);
574 - * xb_write - low level write
575 - * @data: buffer to send
576 - * @len: length of buffer
578 - * Returns 0 on success, error otherwise.
580 int xb_write(const void *data, unsigned len)
582 struct xenstore_domain_interface *intf = xen_store_interface;
583 @@ -197,12 +199,11 @@ int xb_read(void *data, unsigned len)
588 - * xb_init_comms - Set up interrupt handler off store event channel.
590 +/* Set up interrupt handler off store event channel. */
591 int xb_init_comms(void)
593 struct xenstore_domain_interface *intf = xen_store_interface;
596 if (intf->req_prod != intf->req_cons)
597 printk(KERN_ERR "XENBUS request ring is not quiescent "
598 @@ -215,20 +216,18 @@ int xb_init_comms(void)
599 intf->rsp_cons = intf->rsp_prod;
603 - /* Already have an irq; assume we're resuming */
604 - rebind_evtchn_irq(xen_store_evtchn, xenbus_irq);
607 - err = bind_evtchn_to_irqhandler(xen_store_evtchn, wake_waiting,
608 - 0, "xenbus", &xb_waitq);
610 - printk(KERN_ERR "XENBUS request irq failed %i\n", err);
614 + unbind_from_irqhandler(xenbus_irq, &xb_waitq);
617 + err = bind_caller_port_to_irqhandler(
618 + xen_store_evtchn, wake_waiting,
619 + 0, "xenbus", &xb_waitq);
621 + printk(KERN_ERR "XENBUS request irq failed %i\n", err);
629 Index: head-2008-11-25/drivers/xen/xenbus/xenbus_probe.c
630 ===================================================================
631 --- head-2008-11-25.orig/drivers/xen/xenbus/xenbus_probe.c 2008-11-25 12:33:06.000000000 +0100
632 +++ head-2008-11-25/drivers/xen/xenbus/xenbus_probe.c 2008-11-25 12:35:56.000000000 +0100
634 * Copyright (C) 2005 Rusty Russell, IBM Corporation
635 * Copyright (C) 2005 Mike Wray, Hewlett-Packard
636 * Copyright (C) 2005, 2006 XenSource Ltd
637 + * Copyright (C) 2007 Solarflare Communications, Inc.
639 * This program is free software; you can redistribute it and/or
640 * modify it under the terms of the GNU General Public License version 2
643 #define DPRINTK(fmt, args...) \
644 pr_debug("xenbus_probe (%s:%d) " fmt ".\n", \
645 - __func__, __LINE__, ##args)
646 + __FUNCTION__, __LINE__, ##args)
648 #include <linux/kernel.h>
649 #include <linux/err.h>
651 #include <linux/fcntl.h>
652 #include <linux/mm.h>
653 #include <linux/notifier.h>
654 -#include <linux/kthread.h>
655 #include <linux/mutex.h>
656 -#include <linux/io.h>
657 +#include <linux/module.h>
660 #include <asm/page.h>
661 +#include <asm/maddr.h>
662 #include <asm/pgtable.h>
663 -#include <asm/xen/hypervisor.h>
664 +#include <asm/hypervisor.h>
665 #include <xen/xenbus.h>
666 -#include <xen/events.h>
667 -#include <xen/page.h>
668 +#include <xen/xen_proc.h>
669 +#include <xen/evtchn.h>
670 +#include <xen/features.h>
672 +#include <xen/hvm.h>
675 #include "xenbus_comms.h"
676 #include "xenbus_probe.h"
678 +#ifdef HAVE_XEN_PLATFORM_COMPAT_H
679 +#include <xen/platform-compat.h>
682 int xen_store_evtchn;
683 struct xenstore_domain_interface *xen_store_interface;
684 static unsigned long xen_store_mfn;
686 +extern struct mutex xenwatch_mutex;
688 static BLOCKING_NOTIFIER_HEAD(xenstore_chain);
690 static void wait_for_devices(struct xenbus_driver *xendrv);
691 @@ -88,16 +100,6 @@ int xenbus_match(struct device *_dev, st
692 return match_device(drv->ids, to_xenbus_device(_dev)) != NULL;
695 -static int xenbus_uevent(struct device *_dev, struct kobj_uevent_env *env)
697 - struct xenbus_device *dev = to_xenbus_device(_dev);
699 - if (add_uevent_var(env, "MODALIAS=xen:%s", dev->devicetype))
705 /* device/<type>/<id> => <type>-<id> */
706 static int frontend_bus_id(char bus_id[BUS_ID_SIZE], const char *nodename)
708 @@ -166,6 +168,30 @@ static int read_backend_details(struct x
709 return read_otherend_details(xendev, "backend-id", "backend");
712 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
713 +static int xenbus_uevent_frontend(struct device *dev, char **envp,
714 + int num_envp, char *buffer, int buffer_size)
716 + struct xenbus_device *xdev;
717 + int length = 0, i = 0;
721 + xdev = to_xenbus_device(dev);
725 + /* stuff we want to pass to /sbin/hotplug */
726 + add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &length,
727 + "XENBUS_TYPE=%s", xdev->devicetype);
728 + add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &length,
729 + "XENBUS_PATH=%s", xdev->nodename);
730 + add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &length,
731 + "MODALIAS=xen:%s", xdev->devicetype);
737 /* Bus type for frontend drivers. */
738 static struct xen_bus_type xenbus_frontend = {
739 @@ -173,13 +199,19 @@ static struct xen_bus_type xenbus_fronte
740 .levels = 2, /* device/type/<id> */
741 .get_bus_id = frontend_bus_id,
742 .probe = xenbus_probe_frontend,
746 .match = xenbus_match,
747 - .uevent = xenbus_uevent,
748 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
749 .probe = xenbus_dev_probe,
750 .remove = xenbus_dev_remove,
751 .shutdown = xenbus_dev_shutdown,
752 + .uevent = xenbus_uevent_frontend,
760 @@ -196,17 +228,16 @@ static void otherend_changed(struct xenb
761 if (!dev->otherend ||
762 strncmp(dev->otherend, vec[XS_WATCH_PATH],
763 strlen(dev->otherend))) {
764 - dev_dbg(&dev->dev, "Ignoring watch at %s\n",
765 - vec[XS_WATCH_PATH]);
766 + DPRINTK("Ignoring watch at %s", vec[XS_WATCH_PATH]);
770 state = xenbus_read_driver_state(dev->otherend);
772 - dev_dbg(&dev->dev, "state is %d, (%s), %s, %s\n",
773 - state, xenbus_strstate(state), dev->otherend_watch.node,
774 - vec[XS_WATCH_PATH]);
775 + DPRINTK("state is %d (%s), %s, %s", state, xenbus_strstate(state),
776 + dev->otherend_watch.node, vec[XS_WATCH_PATH]);
778 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
780 * Ignore xenbus transitions during shutdown. This prevents us doing
781 * work that can fail e.g., when the rootfs is gone.
782 @@ -220,6 +251,7 @@ static void otherend_changed(struct xenb
783 xenbus_frontend_closed(dev);
788 if (drv->otherend_changed)
789 drv->otherend_changed(dev, state);
790 @@ -239,8 +271,8 @@ static int talk_to_otherend(struct xenbu
792 static int watch_otherend(struct xenbus_device *dev)
794 - return xenbus_watch_pathfmt(dev, &dev->otherend_watch, otherend_changed,
795 - "%s/%s", dev->otherend, "state");
796 + return xenbus_watch_path2(dev, dev->otherend, "state",
797 + &dev->otherend_watch, otherend_changed);
801 @@ -266,8 +298,9 @@ int xenbus_dev_probe(struct device *_dev
803 err = talk_to_otherend(dev);
805 - dev_warn(&dev->dev, "talk_to_otherend on %s failed.\n",
807 + printk(KERN_WARNING
808 + "xenbus_probe: talk_to_otherend on %s failed.\n",
813 @@ -277,7 +310,8 @@ int xenbus_dev_probe(struct device *_dev
815 err = watch_otherend(dev);
817 - dev_warn(&dev->dev, "watch_otherend on %s failed.\n",
818 + printk(KERN_WARNING
819 + "xenbus_probe: watch_otherend on %s failed.\n",
823 @@ -313,43 +347,55 @@ static void xenbus_dev_shutdown(struct d
825 DPRINTK("%s", dev->nodename);
827 + if (is_initial_xendomain())
830 get_device(&dev->dev);
831 if (dev->state != XenbusStateConnected) {
832 - printk(KERN_INFO "%s: %s: %s != Connected, skipping\n", __func__,
833 + printk("%s: %s: %s != Connected, skipping\n", __FUNCTION__,
834 dev->nodename, xenbus_strstate(dev->state));
837 xenbus_switch_state(dev, XenbusStateClosing);
838 timeout = wait_for_completion_timeout(&dev->down, timeout);
840 - printk(KERN_INFO "%s: %s timeout closing device\n",
841 - __func__, dev->nodename);
842 + printk("%s: %s timeout closing device\n", __FUNCTION__, dev->nodename);
844 put_device(&dev->dev);
847 int xenbus_register_driver_common(struct xenbus_driver *drv,
848 - struct xen_bus_type *bus,
849 - struct module *owner,
850 - const char *mod_name)
851 + struct xen_bus_type *bus)
858 drv->driver.name = drv->name;
859 drv->driver.bus = &bus->bus;
860 - drv->driver.owner = owner;
861 - drv->driver.mod_name = mod_name;
862 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10)
863 + drv->driver.owner = drv->owner;
865 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16)
866 + drv->driver.probe = xenbus_dev_probe;
867 + drv->driver.remove = xenbus_dev_remove;
868 + drv->driver.shutdown = xenbus_dev_shutdown;
871 - return driver_register(&drv->driver);
872 + mutex_lock(&xenwatch_mutex);
873 + ret = driver_register(&drv->driver);
874 + mutex_unlock(&xenwatch_mutex);
878 -int __xenbus_register_frontend(struct xenbus_driver *drv,
879 - struct module *owner, const char *mod_name)
880 +int xenbus_register_frontend(struct xenbus_driver *drv)
884 drv->read_otherend_details = read_backend_details;
886 - ret = xenbus_register_driver_common(drv, &xenbus_frontend,
888 + ret = xenbus_register_driver_common(drv, &xenbus_frontend);
892 @@ -358,7 +404,7 @@ int __xenbus_register_frontend(struct xe
896 -EXPORT_SYMBOL_GPL(__xenbus_register_frontend);
897 +EXPORT_SYMBOL_GPL(xenbus_register_frontend);
899 void xenbus_unregister_driver(struct xenbus_driver *drv)
901 @@ -436,25 +482,25 @@ static void xenbus_dev_release(struct de
904 static ssize_t xendev_show_nodename(struct device *dev,
905 - struct device_attribute *attr, char *buf)
906 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,13)
907 + struct device_attribute *attr,
911 return sprintf(buf, "%s\n", to_xenbus_device(dev)->nodename);
913 DEVICE_ATTR(nodename, S_IRUSR | S_IRGRP | S_IROTH, xendev_show_nodename, NULL);
915 static ssize_t xendev_show_devtype(struct device *dev,
916 - struct device_attribute *attr, char *buf)
917 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,13)
918 + struct device_attribute *attr,
922 return sprintf(buf, "%s\n", to_xenbus_device(dev)->devicetype);
924 DEVICE_ATTR(devtype, S_IRUSR | S_IRGRP | S_IROTH, xendev_show_devtype, NULL);
926 -static ssize_t xendev_show_modalias(struct device *dev,
927 - struct device_attribute *attr, char *buf)
929 - return sprintf(buf, "xen:%s\n", to_xenbus_device(dev)->devicetype);
931 -DEVICE_ATTR(modalias, S_IRUSR | S_IRGRP | S_IROTH, xendev_show_modalias, NULL);
933 int xenbus_probe_node(struct xen_bus_type *bus,
935 @@ -467,6 +513,9 @@ int xenbus_probe_node(struct xen_bus_typ
937 enum xenbus_state state = xenbus_read_driver_state(nodename);
942 if (state != XenbusStateInitialising) {
943 /* Device is not new, so ignore it. This can happen if a
944 device is going away after switching to Closed. */
945 @@ -491,6 +540,7 @@ int xenbus_probe_node(struct xen_bus_typ
946 xendev->devicetype = tmpstring;
947 init_completion(&xendev->down);
949 + xendev->dev.parent = &bus->dev;
950 xendev->dev.bus = &bus->bus;
951 xendev->dev.release = xenbus_dev_release;
953 @@ -505,22 +555,15 @@ int xenbus_probe_node(struct xen_bus_typ
955 err = device_create_file(&xendev->dev, &dev_attr_nodename);
957 - goto fail_unregister;
960 err = device_create_file(&xendev->dev, &dev_attr_devtype);
962 - goto fail_remove_nodename;
964 - err = device_create_file(&xendev->dev, &dev_attr_modalias);
966 - goto fail_remove_devtype;
970 -fail_remove_devtype:
971 - device_remove_file(&xendev->dev, &dev_attr_devtype);
972 -fail_remove_nodename:
974 device_remove_file(&xendev->dev, &dev_attr_nodename);
976 + device_remove_file(&xendev->dev, &dev_attr_devtype);
977 device_unregister(&xendev->dev);
980 @@ -533,8 +576,7 @@ static int xenbus_probe_frontend(const c
984 - nodename = kasprintf(GFP_KERNEL, "%s/%s/%s",
985 - xenbus_frontend.root, type, name);
986 + nodename = kasprintf(GFP_KERNEL, "%s/%s/%s", xenbus_frontend.root, type, name);
990 @@ -571,6 +613,9 @@ int xenbus_probe_devices(struct xen_bus_
992 unsigned int i, dir_n;
997 dir = xenbus_directory(XBT_NIL, bus->root, "", &dir_n);
1000 @@ -607,15 +652,15 @@ static int strsep_len(const char *str, c
1001 return (len == 0) ? i : -ERANGE;
1004 -void xenbus_dev_changed(const char *node, struct xen_bus_type *bus)
1005 +void dev_changed(const char *node, struct xen_bus_type *bus)
1007 int exists, rootlen;
1008 struct xenbus_device *dev;
1009 char type[BUS_ID_SIZE];
1010 const char *p, *root;
1012 - if (char_count(node, '/') < 2)
1014 + if (bus->error || char_count(node, '/') < 2)
1017 exists = xenbus_exists(XBT_NIL, node, "");
1019 @@ -649,7 +694,7 @@ static void frontend_changed(struct xenb
1023 - xenbus_dev_changed(vec[XS_WATCH_PATH], &xenbus_frontend);
1024 + dev_changed(vec[XS_WATCH_PATH], &xenbus_frontend);
1027 /* We watch for devices appearing and vanishing. */
1028 @@ -748,7 +793,8 @@ void xenbus_suspend(void)
1032 - bus_for_each_dev(&xenbus_frontend.bus, NULL, NULL, suspend_dev);
1033 + if (!xenbus_frontend.error)
1034 + bus_for_each_dev(&xenbus_frontend.bus, NULL, NULL, suspend_dev);
1035 xenbus_backend_suspend(suspend_dev);
1038 @@ -758,7 +804,8 @@ void xenbus_resume(void)
1042 - bus_for_each_dev(&xenbus_frontend.bus, NULL, NULL, resume_dev);
1043 + if (!xenbus_frontend.error)
1044 + bus_for_each_dev(&xenbus_frontend.bus, NULL, NULL, resume_dev);
1045 xenbus_backend_resume(resume_dev);
1047 EXPORT_SYMBOL_GPL(xenbus_resume);
1048 @@ -766,7 +813,8 @@ EXPORT_SYMBOL_GPL(xenbus_resume);
1049 void xenbus_suspend_cancel(void)
1051 xs_suspend_cancel();
1052 - bus_for_each_dev(&xenbus_frontend.bus, NULL, NULL, suspend_cancel_dev);
1053 + if (!xenbus_frontend.error)
1054 + bus_for_each_dev(&xenbus_frontend.bus, NULL, NULL, suspend_cancel_dev);
1055 xenbus_backend_resume(suspend_cancel_dev);
1057 EXPORT_SYMBOL_GPL(xenbus_suspend_cancel);
1058 @@ -794,7 +842,8 @@ void unregister_xenstore_notifier(struct
1060 EXPORT_SYMBOL_GPL(unregister_xenstore_notifier);
1062 -void xenbus_probe(struct work_struct *unused)
1064 +void xenbus_probe(void *unused)
1066 BUG_ON((xenstored_ready <= 0));
1068 @@ -807,63 +856,171 @@ void xenbus_probe(struct work_struct *un
1069 blocking_notifier_call_chain(&xenstore_chain, 0, NULL);
1072 -static int __init xenbus_probe_init(void)
1074 +#if defined(CONFIG_PROC_FS) && defined(CONFIG_XEN_PRIVILEGED_GUEST)
1075 +static struct file_operations xsd_kva_fops;
1076 +static struct proc_dir_entry *xsd_kva_intf;
1077 +static struct proc_dir_entry *xsd_port_intf;
1079 +static int xsd_kva_mmap(struct file *file, struct vm_area_struct *vma)
1081 + size_t size = vma->vm_end - vma->vm_start;
1083 + if ((size > PAGE_SIZE) || (vma->vm_pgoff != 0))
1086 + if (remap_pfn_range(vma, vma->vm_start, mfn_to_pfn(xen_store_mfn),
1087 + size, vma->vm_page_prot))
1093 +static int xsd_kva_read(char *page, char **start, off_t off,
1094 + int count, int *eof, void *data)
1098 + len = sprintf(page, "0x%p", xen_store_interface);
1103 +static int xsd_port_read(char *page, char **start, off_t off,
1104 + int count, int *eof, void *data)
1108 + len = sprintf(page, "%d", xen_store_evtchn);
1114 +static int xenbus_probe_init(void)
1117 + unsigned long page = 0;
1122 if (!is_running_on_xen())
1126 /* Register ourselves with the kernel bus subsystem */
1127 - err = bus_register(&xenbus_frontend.bus);
1131 - err = xenbus_backend_bus_register();
1133 - goto out_unreg_front;
1134 + xenbus_frontend.error = bus_register(&xenbus_frontend.bus);
1135 + if (xenbus_frontend.error)
1136 + printk(KERN_WARNING
1137 + "XENBUS: Error registering frontend bus: %i\n",
1138 + xenbus_frontend.error);
1139 + xenbus_backend_bus_register();
1142 * Domain0 doesn't have a store_evtchn or store_mfn yet.
1144 if (is_initial_xendomain()) {
1145 - /* dom0 not yet supported */
1146 + struct evtchn_alloc_unbound alloc_unbound;
1148 + /* Allocate page. */
1149 + page = get_zeroed_page(GFP_KERNEL);
1153 + xen_store_mfn = xen_start_info->store_mfn =
1154 + pfn_to_mfn(virt_to_phys((void *)page) >>
1157 + /* Next allocate a local port which xenstored can bind to */
1158 + alloc_unbound.dom = DOMID_SELF;
1159 + alloc_unbound.remote_dom = 0;
1161 + err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound,
1163 + if (err == -ENOSYS)
1166 + xen_store_evtchn = xen_start_info->store_evtchn =
1167 + alloc_unbound.port;
1169 +#if defined(CONFIG_PROC_FS) && defined(CONFIG_XEN_PRIVILEGED_GUEST)
1170 + /* And finally publish the above info in /proc/xen */
1171 + xsd_kva_intf = create_xen_proc_entry("xsd_kva", 0600);
1172 + if (xsd_kva_intf) {
1173 + memcpy(&xsd_kva_fops, xsd_kva_intf->proc_fops,
1174 + sizeof(xsd_kva_fops));
1175 + xsd_kva_fops.mmap = xsd_kva_mmap;
1176 + xsd_kva_intf->proc_fops = &xsd_kva_fops;
1177 + xsd_kva_intf->read_proc = xsd_kva_read;
1179 + xsd_port_intf = create_xen_proc_entry("xsd_port", 0400);
1180 + if (xsd_port_intf)
1181 + xsd_port_intf->read_proc = xsd_port_read;
1183 + xen_store_interface = mfn_to_virt(xen_store_mfn);
1185 xenstored_ready = 1;
1187 xen_store_evtchn = xen_start_info->store_evtchn;
1188 xen_store_mfn = xen_start_info->store_mfn;
1189 + xen_store_interface = mfn_to_virt(xen_store_mfn);
1191 + xen_store_evtchn = hvm_get_parameter(HVM_PARAM_STORE_EVTCHN);
1192 + xen_store_mfn = hvm_get_parameter(HVM_PARAM_STORE_PFN);
1193 + xen_store_interface = ioremap(xen_store_mfn << PAGE_SHIFT,
1197 - xen_store_interface = mfn_to_virt(xen_store_mfn);
1200 + xenbus_dev_init();
1202 /* Initialize the interface to xenstore. */
1206 "XENBUS: Error initializing xenstore comms: %i\n", err);
1207 - goto out_unreg_back;
1211 + /* Register ourselves with the kernel device subsystem */
1212 + if (!xenbus_frontend.error) {
1213 + xenbus_frontend.error = device_register(&xenbus_frontend.dev);
1214 + if (xenbus_frontend.error) {
1215 + bus_unregister(&xenbus_frontend.bus);
1216 + printk(KERN_WARNING
1217 + "XENBUS: Error registering frontend device: %i\n",
1218 + xenbus_frontend.error);
1221 + xenbus_backend_device_register();
1223 if (!is_initial_xendomain())
1229 - xenbus_backend_bus_unregister();
1235 - bus_unregister(&xenbus_frontend.bus);
1237 + * Do not unregister the xenbus front/backend buses here. The buses
1238 + * must exist because front/backend drivers will use them when they are
1247 postcore_initcall(xenbus_probe_init);
1249 -MODULE_LICENSE("GPL");
1250 +MODULE_LICENSE("Dual BSD/GPL");
1252 +int xenbus_init(void)
1254 + return xenbus_probe_init();
1258 static int is_disconnected_device(struct device *dev, void *data)
1260 @@ -883,12 +1040,14 @@ static int is_disconnected_device(struct
1263 xendrv = to_xenbus_driver(dev->driver);
1264 - return (xendev->state != XenbusStateConnected ||
1265 + return (xendev->state < XenbusStateConnected ||
1266 (xendrv->is_ready && !xendrv->is_ready(xendev)));
1269 static int exists_disconnected_device(struct device_driver *drv)
1271 + if (xenbus_frontend.error)
1272 + return xenbus_frontend.error;
1273 return bus_for_each_dev(&xenbus_frontend.bus, NULL, drv,
1274 is_disconnected_device);
1276 @@ -897,6 +1056,7 @@ static int print_device_status(struct de
1278 struct xenbus_device *xendev = to_xenbus_device(dev);
1279 struct device_driver *drv = data;
1280 + struct xenbus_driver *xendrv;
1282 /* Is this operation limited to a particular driver? */
1283 if (drv && (dev->driver != drv))
1284 @@ -906,12 +1066,23 @@ static int print_device_status(struct de
1285 /* Information only: is this too noisy? */
1286 printk(KERN_INFO "XENBUS: Device with no driver: %s\n",
1288 - } else if (xendev->state != XenbusStateConnected) {
1292 + if (xendev->state < XenbusStateConnected) {
1293 + enum xenbus_state rstate = XenbusStateUnknown;
1294 + if (xendev->otherend)
1295 + rstate = xenbus_read_driver_state(xendev->otherend);
1296 printk(KERN_WARNING "XENBUS: Timeout connecting "
1297 - "to device: %s (state %d)\n",
1298 - xendev->nodename, xendev->state);
1299 + "to device: %s (local state %d, remote state %d)\n",
1300 + xendev->nodename, xendev->state, rstate);
1303 + xendrv = to_xenbus_driver(dev->driver);
1304 + if (xendrv->is_ready && !xendrv->is_ready(xendev))
1305 + printk(KERN_WARNING "XENBUS: Device not ready: %s\n",
1306 + xendev->nodename);
1311 @@ -919,7 +1090,7 @@ static int print_device_status(struct de
1312 static int ready_to_wait_for_devices;
1315 - * On a 10 second timeout, wait for all devices currently configured. We need
1316 + * On a 5-minute timeout, wait for all devices currently configured. We need
1317 * to do this to guarantee that the filesystems and / or network devices
1318 * needed for boot are available, before we can allow the boot to proceed.
1320 @@ -934,18 +1105,30 @@ static int ready_to_wait_for_devices;
1322 static void wait_for_devices(struct xenbus_driver *xendrv)
1324 - unsigned long timeout = jiffies + 10*HZ;
1325 + unsigned long start = jiffies;
1326 struct device_driver *drv = xendrv ? &xendrv->driver : NULL;
1327 + unsigned int seconds_waited = 0;
1329 if (!ready_to_wait_for_devices || !is_running_on_xen())
1332 while (exists_disconnected_device(drv)) {
1333 - if (time_after(jiffies, timeout))
1335 + if (time_after(jiffies, start + (seconds_waited+5)*HZ)) {
1336 + if (!seconds_waited)
1337 + printk(KERN_WARNING "XENBUS: Waiting for "
1338 + "devices to initialise: ");
1339 + seconds_waited += 5;
1340 + printk("%us...", 300 - seconds_waited);
1341 + if (seconds_waited == 300)
1345 schedule_timeout_interruptible(HZ/10);
1348 + if (seconds_waited)
1351 bus_for_each_dev(&xenbus_frontend.bus, NULL, drv,
1352 print_device_status);
1354 @@ -953,10 +1136,18 @@ static void wait_for_devices(struct xenb
1356 static int __init boot_wait_for_devices(void)
1358 - ready_to_wait_for_devices = 1;
1359 - wait_for_devices(NULL);
1360 + if (!xenbus_frontend.error) {
1361 + ready_to_wait_for_devices = 1;
1362 + wait_for_devices(NULL);
1367 late_initcall(boot_wait_for_devices);
1370 +int xenbus_for_each_frontend(void *arg, int (*fn)(struct device *, void *))
1372 + return bus_for_each_dev(&xenbus_frontend.bus, NULL, arg, fn);
1374 +EXPORT_SYMBOL_GPL(xenbus_for_each_frontend);
1375 Index: head-2008-11-25/drivers/xen/xenbus/xenbus_probe.h
1376 ===================================================================
1377 --- head-2008-11-25.orig/drivers/xen/xenbus/xenbus_probe.h 2008-11-25 12:33:06.000000000 +0100
1378 +++ head-2008-11-25/drivers/xen/xenbus/xenbus_probe.h 2008-11-25 12:35:56.000000000 +0100
1380 #ifndef _XENBUS_PROBE_H
1381 #define _XENBUS_PROBE_H
1383 -#ifdef CONFIG_XEN_BACKEND
1384 +#if defined(CONFIG_XEN_BACKEND) || defined(CONFIG_XEN_BACKEND_MODULE)
1385 extern void xenbus_backend_suspend(int (*fn)(struct device *, void *));
1386 extern void xenbus_backend_resume(int (*fn)(struct device *, void *));
1387 extern void xenbus_backend_probe_and_watch(void);
1388 -extern int xenbus_backend_bus_register(void);
1389 -extern void xenbus_backend_bus_unregister(void);
1390 +extern void xenbus_backend_bus_register(void);
1391 +extern void xenbus_backend_device_register(void);
1393 static inline void xenbus_backend_suspend(int (*fn)(struct device *, void *)) {}
1394 static inline void xenbus_backend_resume(int (*fn)(struct device *, void *)) {}
1395 static inline void xenbus_backend_probe_and_watch(void) {}
1396 -static inline int xenbus_backend_bus_register(void) { return 0; }
1397 -static inline void xenbus_backend_bus_unregister(void) {}
1398 +static inline void xenbus_backend_bus_register(void) {}
1399 +static inline void xenbus_backend_device_register(void) {}
1406 unsigned int levels;
1407 int (*get_bus_id)(char bus_id[BUS_ID_SIZE], const char *nodename);
1408 int (*probe)(const char *type, const char *dir);
1409 struct bus_type bus;
1410 + struct device dev;
1413 extern int xenbus_match(struct device *_dev, struct device_driver *_drv);
1414 extern int xenbus_dev_probe(struct device *_dev);
1415 extern int xenbus_dev_remove(struct device *_dev);
1416 extern int xenbus_register_driver_common(struct xenbus_driver *drv,
1417 - struct xen_bus_type *bus,
1418 - struct module *owner,
1419 - const char *mod_name);
1420 + struct xen_bus_type *bus);
1421 extern int xenbus_probe_node(struct xen_bus_type *bus,
1423 const char *nodename);
1424 extern int xenbus_probe_devices(struct xen_bus_type *bus);
1426 -extern void xenbus_dev_changed(const char *node, struct xen_bus_type *bus);
1427 +extern void dev_changed(const char *node, struct xen_bus_type *bus);
1431 Index: head-2008-11-25/drivers/xen/xenbus/xenbus_xs.c
1432 ===================================================================
1433 --- head-2008-11-25.orig/drivers/xen/xenbus/xenbus_xs.c 2008-11-25 12:33:06.000000000 +0100
1434 +++ head-2008-11-25/drivers/xen/xenbus/xenbus_xs.c 2008-11-25 12:35:56.000000000 +0100
1436 #include <xen/xenbus.h>
1437 #include "xenbus_comms.h"
1439 +#ifdef HAVE_XEN_PLATFORM_COMPAT_H
1440 +#include <xen/platform-compat.h>
1443 +#ifndef PF_NOFREEZE /* Old kernel (pre-2.6.6). */
1444 +#define PF_NOFREEZE 0
1447 struct xs_stored_msg {
1448 struct list_head list;
1450 @@ -108,7 +116,7 @@ static DEFINE_SPINLOCK(watch_events_lock
1451 * carrying out work.
1453 static pid_t xenwatch_pid;
1454 -static DEFINE_MUTEX(xenwatch_mutex);
1455 +/* static */ DEFINE_MUTEX(xenwatch_mutex);
1456 static DECLARE_WAIT_QUEUE_HEAD(watch_events_waitq);
1458 static int get_error(const char *errorstring)
1459 @@ -177,7 +185,7 @@ void *xenbus_dev_request_and_reply(struc
1461 mutex_unlock(&xs_state.request_mutex);
1463 - if ((msg->type == XS_TRANSACTION_END) ||
1464 + if ((req_msg.type == XS_TRANSACTION_END) ||
1465 ((req_msg.type == XS_TRANSACTION_START) &&
1466 (msg->type == XS_ERROR)))
1467 up_read(&xs_state.transaction_mutex);
1468 @@ -213,7 +221,7 @@ static void *xs_talkv(struct xenbus_tran
1471 for (i = 0; i < num_vecs; i++) {
1472 - err = xb_write(iovec[i].iov_base, iovec[i].iov_len);
1473 + err = xb_write(iovec[i].iov_base, iovec[i].iov_len);;
1475 mutex_unlock(&xs_state.request_mutex);
1476 return ERR_PTR(err);
1477 @@ -294,7 +302,7 @@ static char **split(char *strings, unsig
1480 /* Count the strings. */
1481 - *num = count_strings(strings, len);
1482 + *num = count_strings(strings, len) + 1;
1484 /* Transfer to one big alloc for easy freeing. */
1485 ret = kmalloc(*num * sizeof(char *) + len, GFP_NOIO | __GFP_HIGH);
1486 @@ -308,6 +316,7 @@ static char **split(char *strings, unsig
1487 strings = (char *)&ret[*num];
1488 for (p = strings, *num = 0; p < strings + len; p += strlen(p) + 1)
1490 + ret[*num] = strings + len;
1494 @@ -498,7 +507,7 @@ int xenbus_printf(struct xenbus_transact
1495 #define PRINTF_BUFFER_SIZE 4096
1496 char *printf_buffer;
1498 - printf_buffer = kmalloc(PRINTF_BUFFER_SIZE, GFP_KERNEL);
1499 + printf_buffer = kmalloc(PRINTF_BUFFER_SIZE, GFP_NOIO | __GFP_HIGH);
1500 if (printf_buffer == NULL)
1503 @@ -621,6 +630,8 @@ void unregister_xenbus_watch(struct xenb
1504 char token[sizeof(watch) * 2 + 1];
1507 + BUG_ON(watch->flags & XBWF_new_thread);
1509 sprintf(token, "%lX", (long)watch);
1511 down_read(&xs_state.watch_mutex);
1512 @@ -638,11 +649,6 @@ void unregister_xenbus_watch(struct xenb
1514 up_read(&xs_state.watch_mutex);
1516 - /* Make sure there are no callbacks running currently (unless
1518 - if (current->pid != xenwatch_pid)
1519 - mutex_lock(&xenwatch_mutex);
1521 /* Cancel pending watch events. */
1522 spin_lock(&watch_events_lock);
1523 list_for_each_entry_safe(msg, tmp, &watch_events, list) {
1524 @@ -654,8 +660,11 @@ void unregister_xenbus_watch(struct xenb
1526 spin_unlock(&watch_events_lock);
1528 - if (current->pid != xenwatch_pid)
1529 + /* Flush any currently-executing callback, unless we are it. :-) */
1530 + if (current->pid != xenwatch_pid) {
1531 + mutex_lock(&xenwatch_mutex);
1532 mutex_unlock(&xenwatch_mutex);
1535 EXPORT_SYMBOL_GPL(unregister_xenbus_watch);
1537 @@ -693,11 +702,30 @@ void xs_suspend_cancel(void)
1538 up_write(&xs_state.transaction_mutex);
1541 +static int xenwatch_handle_callback(void *data)
1543 + struct xs_stored_msg *msg = data;
1545 + msg->u.watch.handle->callback(msg->u.watch.handle,
1546 + (const char **)msg->u.watch.vec,
1547 + msg->u.watch.vec_size);
1549 + kfree(msg->u.watch.vec);
1552 + /* Kill this kthread if we were spawned just for this callback. */
1553 + if (current->pid != xenwatch_pid)
1559 static int xenwatch_thread(void *unused)
1561 struct list_head *ent;
1562 struct xs_stored_msg *msg;
1564 + current->flags |= PF_NOFREEZE;
1566 wait_event_interruptible(watch_events_waitq,
1567 !list_empty(&watch_events));
1568 @@ -713,17 +741,29 @@ static int xenwatch_thread(void *unused)
1570 spin_unlock(&watch_events_lock);
1572 - if (ent != &watch_events) {
1573 - msg = list_entry(ent, struct xs_stored_msg, list);
1574 - msg->u.watch.handle->callback(
1575 - msg->u.watch.handle,
1576 - (const char **)msg->u.watch.vec,
1577 - msg->u.watch.vec_size);
1578 - kfree(msg->u.watch.vec);
1580 + if (ent == &watch_events) {
1581 + mutex_unlock(&xenwatch_mutex);
1585 - mutex_unlock(&xenwatch_mutex);
1586 + msg = list_entry(ent, struct xs_stored_msg, list);
1589 + * Unlock the mutex before running an XBWF_new_thread
1590 + * handler. kthread_run can block which can deadlock
1591 + * against unregister_xenbus_watch() if we need to
1592 + * unregister other watches in order to make
1593 + * progress. This can occur on resume before the swap
1594 + * device is attached.
1596 + if (msg->u.watch.handle->flags & XBWF_new_thread) {
1597 + mutex_unlock(&xenwatch_mutex);
1598 + kthread_run(xenwatch_handle_callback,
1599 + msg, "xenwatch_cb");
1601 + xenwatch_handle_callback(msg);
1602 + mutex_unlock(&xenwatch_mutex);
1607 @@ -817,6 +857,7 @@ static int xenbus_thread(void *unused)
1611 + current->flags |= PF_NOFREEZE;
1613 err = process_msg();
1615 Index: head-2008-11-25/include/xen/balloon.h
1616 ===================================================================
1617 --- head-2008-11-25.orig/include/xen/balloon.h 2008-11-25 12:33:06.000000000 +0100
1618 +++ head-2008-11-25/include/xen/balloon.h 2008-11-25 12:35:56.000000000 +0100
1623 -#ifndef __XEN_BALLOON_H__
1624 -#define __XEN_BALLOON_H__
1625 +#ifndef __ASM_BALLOON_H__
1626 +#define __ASM_BALLOON_H__
1628 -#include <linux/spinlock.h>
1632 * Inform the balloon driver that it should allow some slop for device-driver
1633 * memory activities.
1634 @@ -56,6 +53,5 @@ void balloon_release_driver_page(struct
1635 extern spinlock_t balloon_lock;
1636 #define balloon_lock(__flags) spin_lock_irqsave(&balloon_lock, __flags)
1637 #define balloon_unlock(__flags) spin_unlock_irqrestore(&balloon_lock, __flags)
1640 -#endif /* __XEN_BALLOON_H__ */
1641 +#endif /* __ASM_BALLOON_H__ */
1642 Index: head-2008-11-25/include/xen/interface/callback.h
1643 ===================================================================
1644 --- head-2008-11-25.orig/include/xen/interface/callback.h 2008-11-25 12:33:06.000000000 +0100
1645 +++ head-2008-11-25/include/xen/interface/callback.h 2008-11-25 12:35:56.000000000 +0100
1646 @@ -86,6 +86,8 @@ struct callback_register {
1648 xen_callback_t address;
1650 +typedef struct callback_register callback_register_t;
1651 +DEFINE_XEN_GUEST_HANDLE(callback_register_t);
1654 * Unregister a callback.
1655 @@ -98,5 +100,22 @@ struct callback_unregister {
1659 +typedef struct callback_unregister callback_unregister_t;
1660 +DEFINE_XEN_GUEST_HANDLE(callback_unregister_t);
1662 +#if __XEN_INTERFACE_VERSION__ < 0x00030207
1663 +#undef CALLBACKTYPE_sysenter
1664 +#define CALLBACKTYPE_sysenter CALLBACKTYPE_sysenter_deprecated
1667 #endif /* __XEN_PUBLIC_CALLBACK_H__ */
1670 + * Local variables:
1672 + * c-set-style: "BSD"
1673 + * c-basic-offset: 4
1675 + * indent-tabs-mode: nil
1678 Index: head-2008-11-25/include/xen/interface/elfnote.h
1679 ===================================================================
1680 --- head-2008-11-25.orig/include/xen/interface/elfnote.h 2008-11-25 12:33:06.000000000 +0100
1681 +++ head-2008-11-25/include/xen/interface/elfnote.h 2008-11-25 12:35:56.000000000 +0100
1684 * Definitions used for the Xen ELF notes.
1686 + * Permission is hereby granted, free of charge, to any person obtaining a copy
1687 + * of this software and associated documentation files (the "Software"), to
1688 + * deal in the Software without restriction, including without limitation the
1689 + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
1690 + * sell copies of the Software, and to permit persons to whom the Software is
1691 + * furnished to do so, subject to the following conditions:
1693 + * The above copyright notice and this permission notice shall be included in
1694 + * all copies or substantial portions of the Software.
1696 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
1697 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
1698 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
1699 + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
1700 + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
1701 + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
1702 + * DEALINGS IN THE SOFTWARE.
1704 * Copyright (c) 2006, Ian Campbell, XenSource Ltd.
1708 #define __XEN_PUBLIC_ELFNOTE_H__
1711 - * The notes should live in a SHT_NOTE segment and have "Xen" in the
1712 + * The notes should live in a PT_NOTE segment and have "Xen" in the
1715 * Numeric types are either 4 or 8 bytes depending on the content of
1719 * NAME=VALUE pair (string).
1721 - * LEGACY: FEATURES and PAE
1723 #define XEN_ELFNOTE_INFO 0
1726 #define XEN_ELFNOTE_LOADER 8
1729 - * The kernel supports PAE (x86/32 only, string = "yes" or "no").
1730 + * The kernel supports PAE (x86/32 only, string = "yes", "no" or
1733 + * For compatibility with Xen 3.0.3 and earlier the "bimodal" setting
1734 + * may be given as "yes,bimodal" which will cause older Xen to treat
1735 + * this kernel as PAE.
1737 * LEGACY: PAE (n.b. The legacy interface included a provision to
1738 * indicate 'extended-cr3' support allowing L3 page tables to be
1739 @@ -140,6 +161,65 @@
1741 #define XEN_ELFNOTE_SUSPEND_CANCEL 14
1744 + * The number of the highest elfnote defined.
1746 +#define XEN_ELFNOTE_MAX XEN_ELFNOTE_SUSPEND_CANCEL
1749 + * System information exported through crash notes.
1751 + * The kexec / kdump code will create one XEN_ELFNOTE_CRASH_INFO
1752 + * note in case of a system crash. This note will contain various
1753 + * information about the system, see xen/include/xen/elfcore.h.
1755 +#define XEN_ELFNOTE_CRASH_INFO 0x1000001
1758 + * System registers exported through crash notes.
1760 + * The kexec / kdump code will create one XEN_ELFNOTE_CRASH_REGS
1761 + * note per cpu in case of a system crash. This note is architecture
1762 + * specific and will contain registers not saved in the "CORE" note.
1763 + * See xen/include/xen/elfcore.h for more information.
1765 +#define XEN_ELFNOTE_CRASH_REGS 0x1000002
1769 + * xen dump-core none note.
1770 + * xm dump-core code will create one XEN_ELFNOTE_DUMPCORE_NONE
1771 + * in its dump file to indicate that the file is xen dump-core
1772 + * file. This note doesn't have any other information.
1773 + * See tools/libxc/xc_core.h for more information.
1775 +#define XEN_ELFNOTE_DUMPCORE_NONE 0x2000000
1778 + * xen dump-core header note.
1779 + * xm dump-core code will create one XEN_ELFNOTE_DUMPCORE_HEADER
1780 + * in its dump file.
1781 + * See tools/libxc/xc_core.h for more information.
1783 +#define XEN_ELFNOTE_DUMPCORE_HEADER 0x2000001
1786 + * xen dump-core xen version note.
1787 + * xm dump-core code will create one XEN_ELFNOTE_DUMPCORE_XEN_VERSION
1788 + * in its dump file. It contains the xen version obtained via the
1789 + * XENVER hypercall.
1790 + * See tools/libxc/xc_core.h for more information.
1792 +#define XEN_ELFNOTE_DUMPCORE_XEN_VERSION 0x2000002
1795 + * xen dump-core format version note.
1796 + * xm dump-core code will create one XEN_ELFNOTE_DUMPCORE_FORMAT_VERSION
1797 + * in its dump file. It contains a format version identifier.
1798 + * See tools/libxc/xc_core.h for more information.
1800 +#define XEN_ELFNOTE_DUMPCORE_FORMAT_VERSION 0x2000003
1802 #endif /* __XEN_PUBLIC_ELFNOTE_H__ */
1805 Index: head-2008-11-25/include/xen/interface/event_channel.h
1806 ===================================================================
1807 --- head-2008-11-25.orig/include/xen/interface/event_channel.h 2008-11-25 12:33:06.000000000 +0100
1808 +++ head-2008-11-25/include/xen/interface/event_channel.h 2008-11-25 12:35:56.000000000 +0100
1811 * Event channels between domains.
1813 + * Permission is hereby granted, free of charge, to any person obtaining a copy
1814 + * of this software and associated documentation files (the "Software"), to
1815 + * deal in the Software without restriction, including without limitation the
1816 + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
1817 + * sell copies of the Software, and to permit persons to whom the Software is
1818 + * furnished to do so, subject to the following conditions:
1820 + * The above copyright notice and this permission notice shall be included in
1821 + * all copies or substantial portions of the Software.
1823 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
1824 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
1825 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
1826 + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
1827 + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
1828 + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
1829 + * DEALINGS IN THE SOFTWARE.
1831 * Copyright (c) 2003-2004, K A Fraser.
1834 #ifndef __XEN_PUBLIC_EVENT_CHANNEL_H__
1835 #define __XEN_PUBLIC_EVENT_CHANNEL_H__
1838 + * Prototype for this hypercall is:
1839 + * int event_channel_op(int cmd, void *args)
1840 + * @cmd == EVTCHNOP_??? (event-channel operation).
1841 + * @args == Operation-specific extra arguments (NULL if none).
1844 typedef uint32_t evtchn_port_t;
1845 -DEFINE_GUEST_HANDLE(evtchn_port_t);
1846 +DEFINE_XEN_GUEST_HANDLE(evtchn_port_t);
1849 * EVTCHNOP_alloc_unbound: Allocate a port in domain <dom> and mark as
1850 @@ -20,13 +45,14 @@ DEFINE_GUEST_HANDLE(evtchn_port_t);
1851 * 1. If the caller is unprivileged then <dom> must be DOMID_SELF.
1852 * 2. <rdom> may be DOMID_SELF, allowing loopback connections.
1854 -#define EVTCHNOP_alloc_unbound 6
1855 +#define EVTCHNOP_alloc_unbound 6
1856 struct evtchn_alloc_unbound {
1857 - /* IN parameters */
1858 - domid_t dom, remote_dom;
1859 - /* OUT parameters */
1860 - evtchn_port_t port;
1861 + /* IN parameters */
1862 + domid_t dom, remote_dom;
1863 + /* OUT parameters */
1864 + evtchn_port_t port;
1866 +typedef struct evtchn_alloc_unbound evtchn_alloc_unbound_t;
1869 * EVTCHNOP_bind_interdomain: Construct an interdomain event channel between
1870 @@ -39,29 +65,35 @@ struct evtchn_alloc_unbound {
1872 #define EVTCHNOP_bind_interdomain 0
1873 struct evtchn_bind_interdomain {
1874 - /* IN parameters. */
1875 - domid_t remote_dom;
1876 - evtchn_port_t remote_port;
1877 - /* OUT parameters. */
1878 - evtchn_port_t local_port;
1879 + /* IN parameters. */
1880 + domid_t remote_dom;
1881 + evtchn_port_t remote_port;
1882 + /* OUT parameters. */
1883 + evtchn_port_t local_port;
1885 +typedef struct evtchn_bind_interdomain evtchn_bind_interdomain_t;
1888 * EVTCHNOP_bind_virq: Bind a local event channel to VIRQ <irq> on specified
1891 - * 1. A virtual IRQ may be bound to at most one event channel per vcpu.
1892 - * 2. The allocated event channel is bound to the specified vcpu. The binding
1893 - * may not be changed.
1894 + * 1. Virtual IRQs are classified as per-vcpu or global. See the VIRQ list
1895 + * in xen.h for the classification of each VIRQ.
1896 + * 2. Global VIRQs must be allocated on VCPU0 but can subsequently be
1897 + * re-bound via EVTCHNOP_bind_vcpu.
1898 + * 3. Per-vcpu VIRQs may be bound to at most one event channel per vcpu.
1899 + * The allocated event channel is bound to the specified vcpu and the
1900 + * binding cannot be changed.
1902 -#define EVTCHNOP_bind_virq 1
1903 +#define EVTCHNOP_bind_virq 1
1904 struct evtchn_bind_virq {
1905 - /* IN parameters. */
1908 - /* OUT parameters. */
1909 - evtchn_port_t port;
1910 + /* IN parameters. */
1913 + /* OUT parameters. */
1914 + evtchn_port_t port;
1916 +typedef struct evtchn_bind_virq evtchn_bind_virq_t;
1919 * EVTCHNOP_bind_pirq: Bind a local event channel to PIRQ <irq>.
1920 @@ -69,15 +101,16 @@ struct evtchn_bind_virq {
1921 * 1. A physical IRQ may be bound to at most one event channel per domain.
1922 * 2. Only a sufficiently-privileged domain may bind to a physical IRQ.
1924 -#define EVTCHNOP_bind_pirq 2
1925 +#define EVTCHNOP_bind_pirq 2
1926 struct evtchn_bind_pirq {
1927 - /* IN parameters. */
1929 + /* IN parameters. */
1931 #define BIND_PIRQ__WILL_SHARE 1
1932 - uint32_t flags; /* BIND_PIRQ__* */
1933 - /* OUT parameters. */
1934 - evtchn_port_t port;
1935 + uint32_t flags; /* BIND_PIRQ__* */
1936 + /* OUT parameters. */
1937 + evtchn_port_t port;
1939 +typedef struct evtchn_bind_pirq evtchn_bind_pirq_t;
1942 * EVTCHNOP_bind_ipi: Bind a local event channel to receive events.
1943 @@ -85,33 +118,36 @@ struct evtchn_bind_pirq {
1944 * 1. The allocated event channel is bound to the specified vcpu. The binding
1945 * may not be changed.
1947 -#define EVTCHNOP_bind_ipi 7
1948 +#define EVTCHNOP_bind_ipi 7
1949 struct evtchn_bind_ipi {
1951 - /* OUT parameters. */
1952 - evtchn_port_t port;
1954 + /* OUT parameters. */
1955 + evtchn_port_t port;
1957 +typedef struct evtchn_bind_ipi evtchn_bind_ipi_t;
1960 * EVTCHNOP_close: Close a local event channel <port>. If the channel is
1961 * interdomain then the remote end is placed in the unbound state
1962 * (EVTCHNSTAT_unbound), awaiting a new connection.
1964 -#define EVTCHNOP_close 3
1965 +#define EVTCHNOP_close 3
1966 struct evtchn_close {
1967 - /* IN parameters. */
1968 - evtchn_port_t port;
1969 + /* IN parameters. */
1970 + evtchn_port_t port;
1972 +typedef struct evtchn_close evtchn_close_t;
1975 * EVTCHNOP_send: Send an event to the remote end of the channel whose local
1976 * endpoint is <port>.
1978 -#define EVTCHNOP_send 4
1979 +#define EVTCHNOP_send 4
1980 struct evtchn_send {
1981 - /* IN parameters. */
1982 - evtchn_port_t port;
1983 + /* IN parameters. */
1984 + evtchn_port_t port;
1986 +typedef struct evtchn_send evtchn_send_t;
1989 * EVTCHNOP_status: Get the current status of the communication channel which
1990 @@ -121,75 +157,108 @@ struct evtchn_send {
1991 * 2. Only a sufficiently-privileged domain may obtain the status of an event
1992 * channel for which <dom> is not DOMID_SELF.
1994 -#define EVTCHNOP_status 5
1995 +#define EVTCHNOP_status 5
1996 struct evtchn_status {
1997 - /* IN parameters */
1999 - evtchn_port_t port;
2000 - /* OUT parameters */
2001 -#define EVTCHNSTAT_closed 0 /* Channel is not in use. */
2002 -#define EVTCHNSTAT_unbound 1 /* Channel is waiting interdom connection.*/
2003 -#define EVTCHNSTAT_interdomain 2 /* Channel is connected to remote domain. */
2004 -#define EVTCHNSTAT_pirq 3 /* Channel is bound to a phys IRQ line. */
2005 -#define EVTCHNSTAT_virq 4 /* Channel is bound to a virtual IRQ line */
2006 -#define EVTCHNSTAT_ipi 5 /* Channel is bound to a virtual IPI line */
2008 - uint32_t vcpu; /* VCPU to which this channel is bound. */
2012 - } unbound; /* EVTCHNSTAT_unbound */
2015 - evtchn_port_t port;
2016 - } interdomain; /* EVTCHNSTAT_interdomain */
2017 - uint32_t pirq; /* EVTCHNSTAT_pirq */
2018 - uint32_t virq; /* EVTCHNSTAT_virq */
2020 + /* IN parameters */
2022 + evtchn_port_t port;
2023 + /* OUT parameters */
2024 +#define EVTCHNSTAT_closed 0 /* Channel is not in use. */
2025 +#define EVTCHNSTAT_unbound 1 /* Channel is waiting interdom connection.*/
2026 +#define EVTCHNSTAT_interdomain 2 /* Channel is connected to remote domain. */
2027 +#define EVTCHNSTAT_pirq 3 /* Channel is bound to a phys IRQ line. */
2028 +#define EVTCHNSTAT_virq 4 /* Channel is bound to a virtual IRQ line */
2029 +#define EVTCHNSTAT_ipi 5 /* Channel is bound to a virtual IPI line */
2031 + uint32_t vcpu; /* VCPU to which this channel is bound. */
2035 + } unbound; /* EVTCHNSTAT_unbound */
2038 + evtchn_port_t port;
2039 + } interdomain; /* EVTCHNSTAT_interdomain */
2040 + uint32_t pirq; /* EVTCHNSTAT_pirq */
2041 + uint32_t virq; /* EVTCHNSTAT_virq */
2044 +typedef struct evtchn_status evtchn_status_t;
2047 * EVTCHNOP_bind_vcpu: Specify which vcpu a channel should notify when an
2050 - * 1. IPI- and VIRQ-bound channels always notify the vcpu that initialised
2051 - * the binding. This binding cannot be changed.
2052 - * 2. All other channels notify vcpu0 by default. This default is set when
2053 + * 1. IPI-bound channels always notify the vcpu specified at bind time.
2054 + * This binding cannot be changed.
2055 + * 2. Per-VCPU VIRQ channels always notify the vcpu specified at bind time.
2056 + * This binding cannot be changed.
2057 + * 3. All other channels notify vcpu0 by default. This default is set when
2058 * the channel is allocated (a port that is freed and subsequently reused
2059 * has its binding reset to vcpu0).
2061 -#define EVTCHNOP_bind_vcpu 8
2062 +#define EVTCHNOP_bind_vcpu 8
2063 struct evtchn_bind_vcpu {
2064 - /* IN parameters. */
2065 - evtchn_port_t port;
2067 + /* IN parameters. */
2068 + evtchn_port_t port;
2071 +typedef struct evtchn_bind_vcpu evtchn_bind_vcpu_t;
2074 * EVTCHNOP_unmask: Unmask the specified local event-channel port and deliver
2075 * a notification to the appropriate VCPU if an event is pending.
2077 -#define EVTCHNOP_unmask 9
2078 +#define EVTCHNOP_unmask 9
2079 struct evtchn_unmask {
2080 - /* IN parameters. */
2081 - evtchn_port_t port;
2082 + /* IN parameters. */
2083 + evtchn_port_t port;
2085 +typedef struct evtchn_unmask evtchn_unmask_t;
2088 + * EVTCHNOP_reset: Close all event channels associated with specified domain.
2090 + * 1. <dom> may be specified as DOMID_SELF.
2091 + * 2. Only a sufficiently-privileged domain may specify other than DOMID_SELF.
2093 +#define EVTCHNOP_reset 10
2094 +struct evtchn_reset {
2095 + /* IN parameters. */
2098 +typedef struct evtchn_reset evtchn_reset_t;
2101 + * Argument to event_channel_op_compat() hypercall. Superceded by new
2102 + * event_channel_op() hypercall since 0x00030202.
2105 - uint32_t cmd; /* EVTCHNOP_* */
2107 - struct evtchn_alloc_unbound alloc_unbound;
2108 - struct evtchn_bind_interdomain bind_interdomain;
2109 - struct evtchn_bind_virq bind_virq;
2110 - struct evtchn_bind_pirq bind_pirq;
2111 - struct evtchn_bind_ipi bind_ipi;
2112 - struct evtchn_close close;
2113 - struct evtchn_send send;
2114 - struct evtchn_status status;
2115 - struct evtchn_bind_vcpu bind_vcpu;
2116 - struct evtchn_unmask unmask;
2118 + uint32_t cmd; /* EVTCHNOP_* */
2120 + struct evtchn_alloc_unbound alloc_unbound;
2121 + struct evtchn_bind_interdomain bind_interdomain;
2122 + struct evtchn_bind_virq bind_virq;
2123 + struct evtchn_bind_pirq bind_pirq;
2124 + struct evtchn_bind_ipi bind_ipi;
2125 + struct evtchn_close close;
2126 + struct evtchn_send send;
2127 + struct evtchn_status status;
2128 + struct evtchn_bind_vcpu bind_vcpu;
2129 + struct evtchn_unmask unmask;
2132 -DEFINE_GUEST_HANDLE_STRUCT(evtchn_op);
2133 +typedef struct evtchn_op evtchn_op_t;
2134 +DEFINE_XEN_GUEST_HANDLE(evtchn_op_t);
2136 #endif /* __XEN_PUBLIC_EVENT_CHANNEL_H__ */
2139 + * Local variables:
2141 + * c-set-style: "BSD"
2142 + * c-basic-offset: 4
2144 + * indent-tabs-mode: nil
2147 Index: head-2008-11-25/include/xen/interface/features.h
2148 ===================================================================
2149 --- head-2008-11-25.orig/include/xen/interface/features.h 2008-11-25 12:33:06.000000000 +0100
2150 +++ head-2008-11-25/include/xen/interface/features.h 2008-11-25 12:22:34.000000000 +0100
2153 * Feature flags, reported by XENVER_get_features.
2155 + * Permission is hereby granted, free of charge, to any person obtaining a copy
2156 + * of this software and associated documentation files (the "Software"), to
2157 + * deal in the Software without restriction, including without limitation the
2158 + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
2159 + * sell copies of the Software, and to permit persons to whom the Software is
2160 + * furnished to do so, subject to the following conditions:
2162 + * The above copyright notice and this permission notice shall be included in
2163 + * all copies or substantial portions of the Software.
2165 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
2166 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
2167 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
2168 + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
2169 + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
2170 + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
2171 + * DEALINGS IN THE SOFTWARE.
2173 * Copyright (c) 2006, Keir Fraser <keir@xensource.com>
2177 /* x86: Does this Xen host support the MMU_PT_UPDATE_PRESERVE_AD hypercall? */
2178 #define XENFEAT_mmu_pt_update_preserve_ad 5
2180 +/* x86: Does this Xen host support the MMU_{CLEAR,COPY}_PAGE hypercall? */
2181 +#define XENFEAT_highmem_assist 6
2184 + * If set, GNTTABOP_map_grant_ref honors flags to be placed into guest kernel
2185 + * available pte bits.
2187 +#define XENFEAT_gnttab_map_avail_bits 7
2189 #define XENFEAT_NR_SUBMAPS 1
2191 #endif /* __XEN_PUBLIC_FEATURES_H__ */
2194 + * Local variables:
2196 + * c-set-style: "BSD"
2197 + * c-basic-offset: 4
2199 + * indent-tabs-mode: nil
2202 Index: head-2008-11-25/include/xen/interface/grant_table.h
2203 ===================================================================
2204 --- head-2008-11-25.orig/include/xen/interface/grant_table.h 2008-11-25 12:33:06.000000000 +0100
2205 +++ head-2008-11-25/include/xen/interface/grant_table.h 2008-11-25 12:22:34.000000000 +0100
2206 @@ -100,6 +100,7 @@ struct grant_entry {
2210 +typedef struct grant_entry grant_entry_t;
2213 * Type of grant entry.
2214 @@ -118,6 +119,7 @@ struct grant_entry {
2215 * GTF_readonly: Restrict @domid to read-only mappings and accesses. [GST]
2216 * GTF_reading: Grant entry is currently mapped for reading by @domid. [XEN]
2217 * GTF_writing: Grant entry is currently mapped for writing by @domid. [XEN]
2218 + * GTF_PAT, GTF_PWT, GTF_PCD: (x86) cache attribute flags for the grant [GST]
2220 #define _GTF_readonly (2)
2221 #define GTF_readonly (1U<<_GTF_readonly)
2222 @@ -125,6 +127,12 @@ struct grant_entry {
2223 #define GTF_reading (1U<<_GTF_reading)
2224 #define _GTF_writing (4)
2225 #define GTF_writing (1U<<_GTF_writing)
2226 +#define _GTF_PWT (5)
2227 +#define GTF_PWT (1U<<_GTF_PWT)
2228 +#define _GTF_PCD (6)
2229 +#define GTF_PCD (1U<<_GTF_PCD)
2230 +#define _GTF_PAT (7)
2231 +#define GTF_PAT (1U<<_GTF_PAT)
2234 * Subflags for GTF_accept_transfer:
2235 @@ -185,7 +193,8 @@ struct gnttab_map_grant_ref {
2236 grant_handle_t handle;
2237 uint64_t dev_bus_addr;
2239 -DEFINE_GUEST_HANDLE_STRUCT(gnttab_map_grant_ref);
2240 +typedef struct gnttab_map_grant_ref gnttab_map_grant_ref_t;
2241 +DEFINE_XEN_GUEST_HANDLE(gnttab_map_grant_ref_t);
2244 * GNTTABOP_unmap_grant_ref: Destroy one or more grant-reference mappings
2245 @@ -207,7 +216,8 @@ struct gnttab_unmap_grant_ref {
2246 /* OUT parameters. */
2247 int16_t status; /* GNTST_* */
2249 -DEFINE_GUEST_HANDLE_STRUCT(gnttab_unmap_grant_ref);
2250 +typedef struct gnttab_unmap_grant_ref gnttab_unmap_grant_ref_t;
2251 +DEFINE_XEN_GUEST_HANDLE(gnttab_unmap_grant_ref_t);
2254 * GNTTABOP_setup_table: Set up a grant table for <dom> comprising at least
2255 @@ -225,9 +235,10 @@ struct gnttab_setup_table {
2257 /* OUT parameters. */
2258 int16_t status; /* GNTST_* */
2259 - GUEST_HANDLE(ulong) frame_list;
2260 + XEN_GUEST_HANDLE(ulong) frame_list;
2262 -DEFINE_GUEST_HANDLE_STRUCT(gnttab_setup_table);
2263 +typedef struct gnttab_setup_table gnttab_setup_table_t;
2264 +DEFINE_XEN_GUEST_HANDLE(gnttab_setup_table_t);
2267 * GNTTABOP_dump_table: Dump the contents of the grant table to the
2268 @@ -240,7 +251,8 @@ struct gnttab_dump_table {
2269 /* OUT parameters. */
2270 int16_t status; /* GNTST_* */
2272 -DEFINE_GUEST_HANDLE_STRUCT(gnttab_dump_table);
2273 +typedef struct gnttab_dump_table gnttab_dump_table_t;
2274 +DEFINE_XEN_GUEST_HANDLE(gnttab_dump_table_t);
2277 * GNTTABOP_transfer_grant_ref: Transfer <frame> to a foreign domain. The
2278 @@ -253,13 +265,15 @@ DEFINE_GUEST_HANDLE_STRUCT(gnttab_dump_t
2279 #define GNTTABOP_transfer 4
2280 struct gnttab_transfer {
2281 /* IN parameters. */
2282 - unsigned long mfn;
2286 /* OUT parameters. */
2289 -DEFINE_GUEST_HANDLE_STRUCT(gnttab_transfer);
2290 +typedef struct gnttab_transfer gnttab_transfer_t;
2291 +DEFINE_XEN_GUEST_HANDLE(gnttab_transfer_t);
2295 * GNTTABOP_copy: Hypervisor based copy
2296 @@ -285,22 +299,22 @@ DEFINE_GUEST_HANDLE_STRUCT(gnttab_transf
2297 #define GNTCOPY_dest_gref (1<<_GNTCOPY_dest_gref)
2299 #define GNTTABOP_copy 5
2300 -struct gnttab_copy {
2301 - /* IN parameters. */
2305 - unsigned long gmfn;
2311 - uint16_t flags; /* GNTCOPY_* */
2312 - /* OUT parameters. */
2315 -DEFINE_GUEST_HANDLE_STRUCT(gnttab_copy);
2316 +typedef struct gnttab_copy {
2317 + /* IN parameters. */
2327 + uint16_t flags; /* GNTCOPY_* */
2328 + /* OUT parameters. */
2331 +DEFINE_XEN_GUEST_HANDLE(gnttab_copy_t);
2334 * GNTTABOP_query_size: Query the current and maximum sizes of the shared
2335 @@ -318,10 +332,35 @@ struct gnttab_query_size {
2336 uint32_t max_nr_frames;
2337 int16_t status; /* GNTST_* */
2339 -DEFINE_GUEST_HANDLE_STRUCT(gnttab_query_size);
2340 +typedef struct gnttab_query_size gnttab_query_size_t;
2341 +DEFINE_XEN_GUEST_HANDLE(gnttab_query_size_t);
2344 - * Bitfield values for update_pin_status.flags.
2345 + * GNTTABOP_unmap_and_replace: Destroy one or more grant-reference mappings
2346 + * tracked by <handle> but atomically replace the page table entry with one
2347 + * pointing to the machine address under <new_addr>. <new_addr> will be
2348 + * redirected to the null entry.
2350 + * 1. The call may fail in an undefined manner if either mapping is not
2351 + * tracked by <handle>.
2352 + * 2. After executing a batch of unmaps, it is guaranteed that no stale
2353 + * mappings will remain in the device or host TLBs.
2355 +#define GNTTABOP_unmap_and_replace 7
2356 +struct gnttab_unmap_and_replace {
2357 + /* IN parameters. */
2358 + uint64_t host_addr;
2359 + uint64_t new_addr;
2360 + grant_handle_t handle;
2361 + /* OUT parameters. */
2362 + int16_t status; /* GNTST_* */
2364 +typedef struct gnttab_unmap_and_replace gnttab_unmap_and_replace_t;
2365 +DEFINE_XEN_GUEST_HANDLE(gnttab_unmap_and_replace_t);
2369 + * Bitfield values for gnttab_map_grant_ref.flags.
2371 /* Map the grant entry for access by I/O devices. */
2372 #define _GNTMAP_device_map (0)
2373 @@ -349,6 +388,13 @@ DEFINE_GUEST_HANDLE_STRUCT(gnttab_query_
2374 #define GNTMAP_contains_pte (1<<_GNTMAP_contains_pte)
2377 + * Bits to be placed in guest kernel available PTE bits (architecture
2378 + * dependent; only supported when XENFEAT_gnttab_map_avail_bits is set).
2380 +#define _GNTMAP_guest_avail0 (16)
2381 +#define GNTMAP_guest_avail_mask ((uint32_t)~0 << _GNTMAP_guest_avail0)
2384 * Values for error status returns. All errors are -ve.
2386 #define GNTST_okay (0) /* Normal return. */
2387 @@ -361,7 +407,8 @@ DEFINE_GUEST_HANDLE_STRUCT(gnttab_query_
2388 #define GNTST_no_device_space (-7) /* Out of space in I/O MMU. */
2389 #define GNTST_permission_denied (-8) /* Not enough privilege for operation. */
2390 #define GNTST_bad_page (-9) /* Specified page was invalid for op. */
2391 -#define GNTST_bad_copy_arg (-10) /* copy arguments cross page boundary */
2392 +#define GNTST_bad_copy_arg (-10) /* copy arguments cross page boundary. */
2393 +#define GNTST_address_too_big (-11) /* transfer page address too large. */
2395 #define GNTTABOP_error_msgs { \
2397 @@ -374,7 +421,18 @@ DEFINE_GUEST_HANDLE_STRUCT(gnttab_query_
2398 "no spare translation slot in the I/O MMU", \
2399 "permission denied", \
2401 - "copy arguments cross page boundary" \
2402 + "copy arguments cross page boundary", \
2403 + "page address size too large" \
2406 #endif /* __XEN_PUBLIC_GRANT_TABLE_H__ */
2409 + * Local variables:
2411 + * c-set-style: "BSD"
2412 + * c-basic-offset: 4
2414 + * indent-tabs-mode: nil
2417 Index: head-2008-11-25/include/xen/interface/io/blkif.h
2418 ===================================================================
2419 --- head-2008-11-25.orig/include/xen/interface/io/blkif.h 2008-11-25 12:33:06.000000000 +0100
2420 +++ head-2008-11-25/include/xen/interface/io/blkif.h 2008-11-25 12:35:56.000000000 +0100
2423 * Unified block-device I/O interface for Xen guest OSes.
2425 + * Permission is hereby granted, free of charge, to any person obtaining a copy
2426 + * of this software and associated documentation files (the "Software"), to
2427 + * deal in the Software without restriction, including without limitation the
2428 + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
2429 + * sell copies of the Software, and to permit persons to whom the Software is
2430 + * furnished to do so, subject to the following conditions:
2432 + * The above copyright notice and this permission notice shall be included in
2433 + * all copies or substantial portions of the Software.
2435 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
2436 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
2437 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
2438 + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
2439 + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
2440 + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
2441 + * DEALINGS IN THE SOFTWARE.
2443 * Copyright (c) 2003-2004, Keir Fraser
2447 * rsp_event appropriately (e.g., using RING_FINAL_CHECK_FOR_RESPONSES()).
2450 -typedef uint16_t blkif_vdev_t;
2451 -typedef uint64_t blkif_sector_t;
2452 +#ifndef blkif_vdev_t
2453 +#define blkif_vdev_t uint16_t
2455 +#define blkif_sector_t uint64_t
2459 @@ -34,7 +54,7 @@ typedef uint64_t blkif_sector_t;
2460 #define BLKIF_OP_WRITE 1
2462 * Recognised only if "feature-barrier" is present in backend xenbus info.
2463 - * The "feature_barrier" node contains a boolean indicating whether barrier
2464 + * The "feature-barrier" node contains a boolean indicating whether barrier
2465 * requests are likely to succeed or fail. Either way, a barrier request
2466 * may fail at any time with BLKIF_RSP_EOPNOTSUPP if it is unsupported by
2467 * the underlying block-device hardware. The boolean simply indicates whether
2468 @@ -43,33 +63,50 @@ typedef uint64_t blkif_sector_t;
2469 * create the "feature-barrier" node!
2471 #define BLKIF_OP_WRITE_BARRIER 2
2473 + * Recognised if "feature-flush-cache" is present in backend xenbus
2474 + * info. A flush will ask the underlying storage hardware to flush its
2475 + * non-volatile caches as appropriate. The "feature-flush-cache" node
2476 + * contains a boolean indicating whether flush requests are likely to
2477 + * succeed or fail. Either way, a flush request may fail at any time
2478 + * with BLKIF_RSP_EOPNOTSUPP if it is unsupported by the underlying
2479 + * block-device hardware. The boolean simply indicates whether or not it
2480 + * is worthwhile for the frontend to attempt flushes. If a backend does
2481 + * not recognise BLKIF_OP_WRITE_FLUSH_CACHE, it should *not* create the
2482 + * "feature-flush-cache" node!
2484 +#define BLKIF_OP_FLUSH_DISKCACHE 3
2487 * Maximum scatter/gather segments per request.
2488 - * This is carefully chosen so that sizeof(struct blkif_ring) <= PAGE_SIZE.
2489 + * This is carefully chosen so that sizeof(blkif_ring_t) <= PAGE_SIZE.
2490 * NB. This could be 12 if the ring indexes weren't stored in the same page.
2492 #define BLKIF_MAX_SEGMENTS_PER_REQUEST 11
2494 +struct blkif_request_segment {
2495 + grant_ref_t gref; /* reference to I/O buffer frame */
2496 + /* @first_sect: first sector in frame to transfer (inclusive). */
2497 + /* @last_sect: last sector in frame to transfer (inclusive). */
2498 + uint8_t first_sect, last_sect;
2501 struct blkif_request {
2502 - uint8_t operation; /* BLKIF_OP_??? */
2503 - uint8_t nr_segments; /* number of segments */
2504 - blkif_vdev_t handle; /* only for read/write requests */
2505 - uint64_t id; /* private guest value, echoed in resp */
2506 - blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */
2507 - struct blkif_request_segment {
2508 - grant_ref_t gref; /* reference to I/O buffer frame */
2509 - /* @first_sect: first sector in frame to transfer (inclusive). */
2510 - /* @last_sect: last sector in frame to transfer (inclusive). */
2511 - uint8_t first_sect, last_sect;
2512 - } seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
2513 + uint8_t operation; /* BLKIF_OP_??? */
2514 + uint8_t nr_segments; /* number of segments */
2515 + blkif_vdev_t handle; /* only for read/write requests */
2516 + uint64_t id; /* private guest value, echoed in resp */
2517 + blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */
2518 + struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
2520 +typedef struct blkif_request blkif_request_t;
2522 struct blkif_response {
2523 - uint64_t id; /* copied from request */
2524 - uint8_t operation; /* copied from request */
2525 - int16_t status; /* BLKIF_RSP_??? */
2526 + uint64_t id; /* copied from request */
2527 + uint8_t operation; /* copied from request */
2528 + int16_t status; /* BLKIF_RSP_??? */
2530 +typedef struct blkif_response blkif_response_t;
2533 * STATUS RETURN CODES.
2534 @@ -92,3 +129,13 @@ DEFINE_RING_TYPES(blkif, struct blkif_re
2535 #define VDISK_READONLY 0x4
2537 #endif /* __XEN_PUBLIC_IO_BLKIF_H__ */
2540 + * Local variables:
2542 + * c-set-style: "BSD"
2543 + * c-basic-offset: 4
2545 + * indent-tabs-mode: nil
2548 Index: head-2008-11-25/include/xen/interface/io/console.h
2549 ===================================================================
2550 --- head-2008-11-25.orig/include/xen/interface/io/console.h 2008-11-25 12:33:06.000000000 +0100
2551 +++ head-2008-11-25/include/xen/interface/io/console.h 2008-11-25 12:35:56.000000000 +0100
2554 * Console I/O interface for Xen guest OSes.
2556 + * Permission is hereby granted, free of charge, to any person obtaining a copy
2557 + * of this software and associated documentation files (the "Software"), to
2558 + * deal in the Software without restriction, including without limitation the
2559 + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
2560 + * sell copies of the Software, and to permit persons to whom the Software is
2561 + * furnished to do so, subject to the following conditions:
2563 + * The above copyright notice and this permission notice shall be included in
2564 + * all copies or substantial portions of the Software.
2566 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
2567 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
2568 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
2569 + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
2570 + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
2571 + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
2572 + * DEALINGS IN THE SOFTWARE.
2574 * Copyright (c) 2005, Keir Fraser
2577 @@ -21,3 +39,13 @@ struct xencons_interface {
2580 #endif /* __XEN_PUBLIC_IO_CONSOLE_H__ */
2583 + * Local variables:
2585 + * c-set-style: "BSD"
2586 + * c-basic-offset: 4
2588 + * indent-tabs-mode: nil
2591 Index: head-2008-11-25/include/xen/interface/io/fbif.h
2592 ===================================================================
2593 --- head-2008-11-25.orig/include/xen/interface/io/fbif.h 2008-11-25 12:33:06.000000000 +0100
2594 +++ head-2008-11-25/include/xen/interface/io/fbif.h 2008-11-25 12:35:56.000000000 +0100
2597 #define XENFB_TYPE_UPDATE 2
2599 -struct xenfb_update {
2600 - uint8_t type; /* XENFB_TYPE_UPDATE */
2601 - int32_t x; /* source x */
2602 - int32_t y; /* source y */
2603 - int32_t width; /* rect width */
2604 - int32_t height; /* rect height */
2605 +struct xenfb_update
2607 + uint8_t type; /* XENFB_TYPE_UPDATE */
2608 + int32_t x; /* source x */
2609 + int32_t y; /* source y */
2610 + int32_t width; /* rect width */
2611 + int32_t height; /* rect height */
2615 @@ -55,36 +56,58 @@ struct xenfb_update {
2617 #define XENFB_TYPE_RESIZE 3
2619 -struct xenfb_resize {
2620 - uint8_t type; /* XENFB_TYPE_RESIZE */
2621 - int32_t width; /* width in pixels */
2622 - int32_t height; /* height in pixels */
2623 - int32_t stride; /* stride in bytes */
2624 - int32_t depth; /* depth in bits */
2625 - int32_t offset; /* start offset within framebuffer */
2626 +struct xenfb_resize
2628 + uint8_t type; /* XENFB_TYPE_RESIZE */
2629 + int32_t width; /* width in pixels */
2630 + int32_t height; /* height in pixels */
2631 + int32_t stride; /* stride in bytes */
2632 + int32_t depth; /* depth in bits */
2633 + int32_t offset; /* offset of the framebuffer in bytes */
2636 #define XENFB_OUT_EVENT_SIZE 40
2638 -union xenfb_out_event {
2640 - struct xenfb_update update;
2641 - struct xenfb_resize resize;
2642 - char pad[XENFB_OUT_EVENT_SIZE];
2643 +union xenfb_out_event
2646 + struct xenfb_update update;
2647 + struct xenfb_resize resize;
2648 + char pad[XENFB_OUT_EVENT_SIZE];
2651 /* In events (backend -> frontend) */
2654 * Frontends should ignore unknown in events.
2655 - * No in events currently defined.
2659 + * Framebuffer refresh period advice
2660 + * Backend sends it to advise the frontend their preferred period of
2661 + * refresh. Frontends that keep the framebuffer constantly up-to-date
2662 + * just ignore it. Frontends that use the advice should immediately
2663 + * refresh the framebuffer (and send an update notification event if
2664 + * those have been requested), then use the update frequency to guide
2665 + * their periodical refreshs.
2667 +#define XENFB_TYPE_REFRESH_PERIOD 1
2668 +#define XENFB_NO_REFRESH 0
2670 +struct xenfb_refresh_period
2672 + uint8_t type; /* XENFB_TYPE_UPDATE_PERIOD */
2673 + uint32_t period; /* period of refresh, in ms,
2674 + * XENFB_NO_REFRESH if no refresh is needed */
2677 #define XENFB_IN_EVENT_SIZE 40
2679 -union xenfb_in_event {
2681 - char pad[XENFB_IN_EVENT_SIZE];
2682 +union xenfb_in_event
2685 + struct xenfb_refresh_period refresh_period;
2686 + char pad[XENFB_IN_EVENT_SIZE];
2690 @@ -93,41 +116,41 @@ union xenfb_in_event {
2691 #define XENFB_IN_RING_LEN (XENFB_IN_RING_SIZE / XENFB_IN_EVENT_SIZE)
2692 #define XENFB_IN_RING_OFFS 1024
2693 #define XENFB_IN_RING(page) \
2694 - ((union xenfb_in_event *)((char *)(page) + XENFB_IN_RING_OFFS))
2695 + ((union xenfb_in_event *)((char *)(page) + XENFB_IN_RING_OFFS))
2696 #define XENFB_IN_RING_REF(page, idx) \
2697 - (XENFB_IN_RING((page))[(idx) % XENFB_IN_RING_LEN])
2698 + (XENFB_IN_RING((page))[(idx) % XENFB_IN_RING_LEN])
2700 #define XENFB_OUT_RING_SIZE 2048
2701 #define XENFB_OUT_RING_LEN (XENFB_OUT_RING_SIZE / XENFB_OUT_EVENT_SIZE)
2702 #define XENFB_OUT_RING_OFFS (XENFB_IN_RING_OFFS + XENFB_IN_RING_SIZE)
2703 #define XENFB_OUT_RING(page) \
2704 - ((union xenfb_out_event *)((char *)(page) + XENFB_OUT_RING_OFFS))
2705 + ((union xenfb_out_event *)((char *)(page) + XENFB_OUT_RING_OFFS))
2706 #define XENFB_OUT_RING_REF(page, idx) \
2707 - (XENFB_OUT_RING((page))[(idx) % XENFB_OUT_RING_LEN])
2708 + (XENFB_OUT_RING((page))[(idx) % XENFB_OUT_RING_LEN])
2710 -struct xenfb_page {
2711 - uint32_t in_cons, in_prod;
2712 - uint32_t out_cons, out_prod;
2714 - int32_t width; /* width of the framebuffer (in pixels) */
2715 - int32_t height; /* height of the framebuffer (in pixels) */
2716 - uint32_t line_length; /* length of a row of pixels (in bytes) */
2717 - uint32_t mem_length; /* length of the framebuffer (in bytes) */
2718 - uint8_t depth; /* depth of a pixel (in bits) */
2721 - * Framebuffer page directory
2723 - * Each directory page holds PAGE_SIZE / sizeof(*pd)
2724 - * framebuffer pages, and can thus map up to PAGE_SIZE *
2725 - * PAGE_SIZE / sizeof(*pd) bytes. With PAGE_SIZE == 4096 and
2726 - * sizeof(unsigned long) == 4/8, that's 4 Megs 32 bit and 2
2727 - * Megs 64 bit. 256 directories give enough room for a 512
2728 - * Meg framebuffer with a max resolution of 12,800x10,240.
2729 - * Should be enough for a while with room leftover for
2732 - unsigned long pd[256];
2735 + uint32_t in_cons, in_prod;
2736 + uint32_t out_cons, out_prod;
2738 + int32_t width; /* the width of the framebuffer (in pixels) */
2739 + int32_t height; /* the height of the framebuffer (in pixels) */
2740 + uint32_t line_length; /* the length of a row of pixels (in bytes) */
2741 + uint32_t mem_length; /* the length of the framebuffer (in bytes) */
2742 + uint8_t depth; /* the depth of a pixel (in bits) */
2745 + * Framebuffer page directory
2747 + * Each directory page holds PAGE_SIZE / sizeof(*pd)
2748 + * framebuffer pages, and can thus map up to PAGE_SIZE *
2749 + * PAGE_SIZE / sizeof(*pd) bytes. With PAGE_SIZE == 4096 and
2750 + * sizeof(unsigned long) == 4/8, that's 4 Megs 32 bit and 2 Megs
2751 + * 64 bit. 256 directories give enough room for a 512 Meg
2752 + * framebuffer with a max resolution of 12,800x10,240. Should
2753 + * be enough for a while with room leftover for expansion.
2755 + unsigned long pd[256];
2759 @@ -141,3 +164,13 @@ struct xenfb_page {
2765 + * Local variables:
2767 + * c-set-style: "BSD"
2768 + * c-basic-offset: 4
2770 + * indent-tabs-mode: nil
2773 Index: head-2008-11-25/include/xen/interface/io/kbdif.h
2774 ===================================================================
2775 --- head-2008-11-25.orig/include/xen/interface/io/kbdif.h 2008-11-25 12:33:06.000000000 +0100
2776 +++ head-2008-11-25/include/xen/interface/io/kbdif.h 2008-11-25 12:35:56.000000000 +0100
2779 #define XENKBD_TYPE_POS 4
2781 -struct xenkbd_motion {
2782 - uint8_t type; /* XENKBD_TYPE_MOTION */
2783 - int32_t rel_x; /* relative X motion */
2784 - int32_t rel_y; /* relative Y motion */
2785 - int32_t rel_z; /* relative Z motion (wheel) */
2788 -struct xenkbd_key {
2789 - uint8_t type; /* XENKBD_TYPE_KEY */
2790 - uint8_t pressed; /* 1 if pressed; 0 otherwise */
2791 - uint32_t keycode; /* KEY_* from linux/input.h */
2794 -struct xenkbd_position {
2795 - uint8_t type; /* XENKBD_TYPE_POS */
2796 - int32_t abs_x; /* absolute X position (in FB pixels) */
2797 - int32_t abs_y; /* absolute Y position (in FB pixels) */
2798 - int32_t rel_z; /* relative Z motion (wheel) */
2799 +struct xenkbd_motion
2801 + uint8_t type; /* XENKBD_TYPE_MOTION */
2802 + int32_t rel_x; /* relative X motion */
2803 + int32_t rel_y; /* relative Y motion */
2804 + int32_t rel_z; /* relative Z motion (wheel) */
2809 + uint8_t type; /* XENKBD_TYPE_KEY */
2810 + uint8_t pressed; /* 1 if pressed; 0 otherwise */
2811 + uint32_t keycode; /* KEY_* from linux/input.h */
2814 +struct xenkbd_position
2816 + uint8_t type; /* XENKBD_TYPE_POS */
2817 + int32_t abs_x; /* absolute X position (in FB pixels) */
2818 + int32_t abs_y; /* absolute Y position (in FB pixels) */
2819 + int32_t rel_z; /* relative Z motion (wheel) */
2822 #define XENKBD_IN_EVENT_SIZE 40
2824 -union xenkbd_in_event {
2826 - struct xenkbd_motion motion;
2827 - struct xenkbd_key key;
2828 - struct xenkbd_position pos;
2829 - char pad[XENKBD_IN_EVENT_SIZE];
2830 +union xenkbd_in_event
2833 + struct xenkbd_motion motion;
2834 + struct xenkbd_key key;
2835 + struct xenkbd_position pos;
2836 + char pad[XENKBD_IN_EVENT_SIZE];
2839 /* Out events (frontend -> backend) */
2840 @@ -85,9 +89,10 @@ union xenkbd_in_event {
2842 #define XENKBD_OUT_EVENT_SIZE 40
2844 -union xenkbd_out_event {
2846 - char pad[XENKBD_OUT_EVENT_SIZE];
2847 +union xenkbd_out_event
2850 + char pad[XENKBD_OUT_EVENT_SIZE];
2854 @@ -96,21 +101,32 @@ union xenkbd_out_event {
2855 #define XENKBD_IN_RING_LEN (XENKBD_IN_RING_SIZE / XENKBD_IN_EVENT_SIZE)
2856 #define XENKBD_IN_RING_OFFS 1024
2857 #define XENKBD_IN_RING(page) \
2858 - ((union xenkbd_in_event *)((char *)(page) + XENKBD_IN_RING_OFFS))
2859 + ((union xenkbd_in_event *)((char *)(page) + XENKBD_IN_RING_OFFS))
2860 #define XENKBD_IN_RING_REF(page, idx) \
2861 - (XENKBD_IN_RING((page))[(idx) % XENKBD_IN_RING_LEN])
2862 + (XENKBD_IN_RING((page))[(idx) % XENKBD_IN_RING_LEN])
2864 #define XENKBD_OUT_RING_SIZE 1024
2865 #define XENKBD_OUT_RING_LEN (XENKBD_OUT_RING_SIZE / XENKBD_OUT_EVENT_SIZE)
2866 #define XENKBD_OUT_RING_OFFS (XENKBD_IN_RING_OFFS + XENKBD_IN_RING_SIZE)
2867 #define XENKBD_OUT_RING(page) \
2868 - ((union xenkbd_out_event *)((char *)(page) + XENKBD_OUT_RING_OFFS))
2869 + ((union xenkbd_out_event *)((char *)(page) + XENKBD_OUT_RING_OFFS))
2870 #define XENKBD_OUT_RING_REF(page, idx) \
2871 - (XENKBD_OUT_RING((page))[(idx) % XENKBD_OUT_RING_LEN])
2872 + (XENKBD_OUT_RING((page))[(idx) % XENKBD_OUT_RING_LEN])
2874 -struct xenkbd_page {
2875 - uint32_t in_cons, in_prod;
2876 - uint32_t out_cons, out_prod;
2879 + uint32_t in_cons, in_prod;
2880 + uint32_t out_cons, out_prod;
2886 + * Local variables:
2888 + * c-set-style: "BSD"
2889 + * c-basic-offset: 4
2891 + * indent-tabs-mode: nil
2894 Index: head-2008-11-25/include/xen/interface/io/netif.h
2895 ===================================================================
2896 --- head-2008-11-25.orig/include/xen/interface/io/netif.h 2008-11-25 12:33:06.000000000 +0100
2897 +++ head-2008-11-25/include/xen/interface/io/netif.h 2008-11-25 12:35:56.000000000 +0100
2900 * Unified network-device I/O interface for Xen guest OSes.
2902 + * Permission is hereby granted, free of charge, to any person obtaining a copy
2903 + * of this software and associated documentation files (the "Software"), to
2904 + * deal in the Software without restriction, including without limitation the
2905 + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
2906 + * sell copies of the Software, and to permit persons to whom the Software is
2907 + * furnished to do so, subject to the following conditions:
2909 + * The above copyright notice and this permission notice shall be included in
2910 + * all copies or substantial portions of the Software.
2912 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
2913 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
2914 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
2915 + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
2916 + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
2917 + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
2918 + * DEALINGS IN THE SOFTWARE.
2920 * Copyright (c) 2003-2004, Keir Fraser
2924 #define _NETTXF_extra_info (3)
2925 #define NETTXF_extra_info (1U<<_NETTXF_extra_info)
2927 -struct xen_netif_tx_request {
2928 +struct netif_tx_request {
2929 grant_ref_t gref; /* Reference to buffer page */
2930 uint16_t offset; /* Offset within buffer page */
2931 uint16_t flags; /* NETTXF_* */
2932 uint16_t id; /* Echoed in response message. */
2933 uint16_t size; /* Packet size in bytes. */
2935 +typedef struct netif_tx_request netif_tx_request_t;
2937 /* Types of netif_extra_info descriptors. */
2938 -#define XEN_NETIF_EXTRA_TYPE_NONE (0) /* Never used - invalid */
2939 -#define XEN_NETIF_EXTRA_TYPE_GSO (1) /* u.gso */
2940 -#define XEN_NETIF_EXTRA_TYPE_MAX (2)
2941 +#define XEN_NETIF_EXTRA_TYPE_NONE (0) /* Never used - invalid */
2942 +#define XEN_NETIF_EXTRA_TYPE_GSO (1) /* u.gso */
2943 +#define XEN_NETIF_EXTRA_TYPE_MCAST_ADD (2) /* u.mcast */
2944 +#define XEN_NETIF_EXTRA_TYPE_MCAST_DEL (3) /* u.mcast */
2945 +#define XEN_NETIF_EXTRA_TYPE_MAX (4)
2947 /* netif_extra_info flags. */
2948 #define _XEN_NETIF_EXTRA_FLAG_MORE (0)
2949 @@ -71,49 +92,68 @@ struct xen_netif_tx_request {
2950 * This structure needs to fit within both netif_tx_request and
2951 * netif_rx_response for compatibility.
2953 -struct xen_netif_extra_info {
2954 - uint8_t type; /* XEN_NETIF_EXTRA_TYPE_* */
2955 - uint8_t flags; /* XEN_NETIF_EXTRA_FLAG_* */
2960 - * Maximum payload size of each segment. For
2961 - * example, for TCP this is just the path MSS.
2966 - * GSO type. This determines the protocol of
2967 - * the packet and any extra features required
2968 - * to segment the packet properly.
2970 - uint8_t type; /* XEN_NETIF_GSO_TYPE_* */
2972 - /* Future expansion. */
2976 - * GSO features. This specifies any extra GSO
2977 - * features required to process this packet,
2978 - * such as ECN support for TCPv4.
2980 - uint16_t features; /* XEN_NETIF_GSO_FEAT_* */
2982 +struct netif_extra_info {
2983 + uint8_t type; /* XEN_NETIF_EXTRA_TYPE_* */
2984 + uint8_t flags; /* XEN_NETIF_EXTRA_FLAG_* */
2988 + * XEN_NETIF_EXTRA_TYPE_GSO:
2992 + * Maximum payload size of each segment. For example, for TCP this
2993 + * is just the path MSS.
2998 + * GSO type. This determines the protocol of the packet and any
2999 + * extra features required to segment the packet properly.
3001 + uint8_t type; /* XEN_NETIF_GSO_TYPE_* */
3003 + /* Future expansion. */
3007 + * GSO features. This specifies any extra GSO features required
3008 + * to process this packet, such as ECN support for TCPv4.
3010 + uint16_t features; /* XEN_NETIF_GSO_FEAT_* */
3014 + * XEN_NETIF_EXTRA_TYPE_MCAST_{ADD,DEL}:
3015 + * Backend advertises availability via 'feature-multicast-control'
3016 + * xenbus node containing value '1'.
3017 + * Frontend requests this feature by advertising
3018 + * 'request-multicast-control' xenbus node containing value '1'.
3019 + * If multicast control is requested then multicast flooding is
3020 + * disabled and the frontend must explicitly register its interest
3021 + * in multicast groups using dummy transmit requests containing
3022 + * MCAST_{ADD,DEL} extra-info fragments.
3025 + uint8_t addr[6]; /* Address to add/remove. */
3033 +typedef struct netif_extra_info netif_extra_info_t;
3035 -struct xen_netif_tx_response {
3037 - int16_t status; /* NETIF_RSP_* */
3038 +struct netif_tx_response {
3040 + int16_t status; /* NETIF_RSP_* */
3042 +typedef struct netif_tx_response netif_tx_response_t;
3044 -struct xen_netif_rx_request {
3045 - uint16_t id; /* Echoed in response message. */
3046 - grant_ref_t gref; /* Reference to incoming granted frame */
3047 +struct netif_rx_request {
3048 + uint16_t id; /* Echoed in response message. */
3049 + grant_ref_t gref; /* Reference to incoming granted frame */
3051 +typedef struct netif_rx_request netif_rx_request_t;
3053 /* Packet data has been validated against protocol checksum. */
3054 #define _NETRXF_data_validated (0)
3055 @@ -131,23 +171,20 @@ struct xen_netif_rx_request {
3056 #define _NETRXF_extra_info (3)
3057 #define NETRXF_extra_info (1U<<_NETRXF_extra_info)
3059 -struct xen_netif_rx_response {
3060 +struct netif_rx_response {
3062 uint16_t offset; /* Offset in page of start of received packet */
3063 uint16_t flags; /* NETRXF_* */
3064 int16_t status; /* -ve: BLKIF_RSP_* ; +ve: Rx'ed pkt size. */
3066 +typedef struct netif_rx_response netif_rx_response_t;
3069 * Generate netif ring structures and types.
3072 -DEFINE_RING_TYPES(xen_netif_tx,
3073 - struct xen_netif_tx_request,
3074 - struct xen_netif_tx_response);
3075 -DEFINE_RING_TYPES(xen_netif_rx,
3076 - struct xen_netif_rx_request,
3077 - struct xen_netif_rx_response);
3078 +DEFINE_RING_TYPES(netif_tx, struct netif_tx_request, struct netif_tx_response);
3079 +DEFINE_RING_TYPES(netif_rx, struct netif_rx_request, struct netif_rx_response);
3081 #define NETIF_RSP_DROPPED -2
3082 #define NETIF_RSP_ERROR -1
3083 @@ -156,3 +193,13 @@ DEFINE_RING_TYPES(xen_netif_rx,
3084 #define NETIF_RSP_NULL 1
3089 + * Local variables:
3091 + * c-set-style: "BSD"
3092 + * c-basic-offset: 4
3094 + * indent-tabs-mode: nil
3097 Index: head-2008-11-25/include/xen/interface/io/protocols.h
3098 ===================================================================
3099 --- head-2008-11-25.orig/include/xen/interface/io/protocols.h 2008-11-25 12:33:06.000000000 +0100
3100 +++ head-2008-11-25/include/xen/interface/io/protocols.h 2008-11-25 12:35:56.000000000 +0100
3102 +/******************************************************************************
3105 + * Permission is hereby granted, free of charge, to any person obtaining a copy
3106 + * of this software and associated documentation files (the "Software"), to
3107 + * deal in the Software without restriction, including without limitation the
3108 + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
3109 + * sell copies of the Software, and to permit persons to whom the Software is
3110 + * furnished to do so, subject to the following conditions:
3112 + * The above copyright notice and this permission notice shall be included in
3113 + * all copies or substantial portions of the Software.
3115 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
3116 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
3117 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
3118 + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
3119 + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
3120 + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
3121 + * DEALINGS IN THE SOFTWARE.
3124 #ifndef __XEN_PROTOCOLS_H__
3125 #define __XEN_PROTOCOLS_H__
3127 #define XEN_IO_PROTO_ABI_X86_32 "x86_32-abi"
3128 #define XEN_IO_PROTO_ABI_X86_64 "x86_64-abi"
3129 #define XEN_IO_PROTO_ABI_IA64 "ia64-abi"
3130 -#define XEN_IO_PROTO_ABI_POWERPC64 "powerpc64-abi"
3132 #if defined(__i386__)
3133 # define XEN_IO_PROTO_ABI_NATIVE XEN_IO_PROTO_ABI_X86_32
3135 # define XEN_IO_PROTO_ABI_NATIVE XEN_IO_PROTO_ABI_X86_64
3136 #elif defined(__ia64__)
3137 # define XEN_IO_PROTO_ABI_NATIVE XEN_IO_PROTO_ABI_IA64
3138 -#elif defined(__powerpc64__)
3139 -# define XEN_IO_PROTO_ABI_NATIVE XEN_IO_PROTO_ABI_POWERPC64
3141 # error arch fixup needed here
3143 Index: head-2008-11-25/include/xen/interface/io/ring.h
3144 ===================================================================
3145 --- head-2008-11-25.orig/include/xen/interface/io/ring.h 2008-11-25 12:33:06.000000000 +0100
3146 +++ head-2008-11-25/include/xen/interface/io/ring.h 2008-11-25 12:35:56.000000000 +0100
3149 * Shared producer-consumer ring macros.
3151 + * Permission is hereby granted, free of charge, to any person obtaining a copy
3152 + * of this software and associated documentation files (the "Software"), to
3153 + * deal in the Software without restriction, including without limitation the
3154 + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
3155 + * sell copies of the Software, and to permit persons to whom the Software is
3156 + * furnished to do so, subject to the following conditions:
3158 + * The above copyright notice and this permission notice shall be included in
3159 + * all copies or substantial portions of the Software.
3161 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
3162 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
3163 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
3164 + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
3165 + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
3166 + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
3167 + * DEALINGS IN THE SOFTWARE.
3169 * Tim Deegan and Andrew Warfield November 2004.
3172 #ifndef __XEN_PUBLIC_IO_RING_H__
3173 #define __XEN_PUBLIC_IO_RING_H__
3175 +#include "../xen-compat.h"
3177 +#if __XEN_INTERFACE_VERSION__ < 0x00030208
3178 +#define xen_mb() mb()
3179 +#define xen_rmb() rmb()
3180 +#define xen_wmb() wmb()
3183 typedef unsigned int RING_IDX;
3185 /* Round a 32-bit unsigned constant down to the nearest power of two. */
3186 -#define __RD2(_x) (((_x) & 0x00000002) ? 0x2 : ((_x) & 0x1))
3187 +#define __RD2(_x) (((_x) & 0x00000002) ? 0x2 : ((_x) & 0x1))
3188 #define __RD4(_x) (((_x) & 0x0000000c) ? __RD2((_x)>>2)<<2 : __RD2(_x))
3189 #define __RD8(_x) (((_x) & 0x000000f0) ? __RD4((_x)>>4)<<4 : __RD4(_x))
3190 #define __RD16(_x) (((_x) & 0x0000ff00) ? __RD8((_x)>>8)<<8 : __RD8(_x))
3191 @@ -25,73 +51,76 @@ typedef unsigned int RING_IDX;
3192 * power of two (so we can mask with (size-1) to loop around).
3194 #define __RING_SIZE(_s, _sz) \
3195 - (__RD32(((_sz) - (long)&(_s)->ring + (long)(_s)) / sizeof((_s)->ring[0])))
3196 + (__RD32(((_sz) - (long)(_s)->ring + (long)(_s)) / sizeof((_s)->ring[0])))
3199 * Macros to make the correct C datatypes for a new kind of ring.
3201 * To make a new ring datatype, you need to have two message structures,
3202 - * let's say struct request, and struct response already defined.
3203 + * let's say request_t, and response_t already defined.
3205 * In a header where you want the ring datatype declared, you then do:
3207 - * DEFINE_RING_TYPES(mytag, struct request, struct response);
3208 + * DEFINE_RING_TYPES(mytag, request_t, response_t);
3210 * These expand out to give you a set of types, as you can see below.
3211 * The most important of these are:
3213 - * struct mytag_sring - The shared ring.
3214 - * struct mytag_front_ring - The 'front' half of the ring.
3215 - * struct mytag_back_ring - The 'back' half of the ring.
3216 + * mytag_sring_t - The shared ring.
3217 + * mytag_front_ring_t - The 'front' half of the ring.
3218 + * mytag_back_ring_t - The 'back' half of the ring.
3220 * To initialize a ring in your code you need to know the location and size
3221 * of the shared memory area (PAGE_SIZE, for instance). To initialise
3224 - * struct mytag_front_ring front_ring;
3225 - * SHARED_RING_INIT((struct mytag_sring *)shared_page);
3226 - * FRONT_RING_INIT(&front_ring, (struct mytag_sring *)shared_page,
3228 + * mytag_front_ring_t front_ring;
3229 + * SHARED_RING_INIT((mytag_sring_t *)shared_page);
3230 + * FRONT_RING_INIT(&front_ring, (mytag_sring_t *)shared_page, PAGE_SIZE);
3232 * Initializing the back follows similarly (note that only the front
3233 * initializes the shared ring):
3235 - * struct mytag_back_ring back_ring;
3236 - * BACK_RING_INIT(&back_ring, (struct mytag_sring *)shared_page,
3238 + * mytag_back_ring_t back_ring;
3239 + * BACK_RING_INIT(&back_ring, (mytag_sring_t *)shared_page, PAGE_SIZE);
3242 -#define DEFINE_RING_TYPES(__name, __req_t, __rsp_t) \
3244 -/* Shared ring entry */ \
3245 -union __name##_sring_entry { \
3250 -/* Shared ring page */ \
3251 -struct __name##_sring { \
3252 - RING_IDX req_prod, req_event; \
3253 - RING_IDX rsp_prod, rsp_event; \
3254 - uint8_t pad[48]; \
3255 - union __name##_sring_entry ring[1]; /* variable-length */ \
3258 -/* "Front" end's private variables */ \
3259 -struct __name##_front_ring { \
3260 - RING_IDX req_prod_pvt; \
3261 - RING_IDX rsp_cons; \
3262 - unsigned int nr_ents; \
3263 - struct __name##_sring *sring; \
3266 -/* "Back" end's private variables */ \
3267 -struct __name##_back_ring { \
3268 - RING_IDX rsp_prod_pvt; \
3269 - RING_IDX req_cons; \
3270 - unsigned int nr_ents; \
3271 - struct __name##_sring *sring; \
3273 +#define DEFINE_RING_TYPES(__name, __req_t, __rsp_t) \
3275 +/* Shared ring entry */ \
3276 +union __name##_sring_entry { \
3281 +/* Shared ring page */ \
3282 +struct __name##_sring { \
3283 + RING_IDX req_prod, req_event; \
3284 + RING_IDX rsp_prod, rsp_event; \
3285 + uint8_t pad[48]; \
3286 + union __name##_sring_entry ring[1]; /* variable-length */ \
3289 +/* "Front" end's private variables */ \
3290 +struct __name##_front_ring { \
3291 + RING_IDX req_prod_pvt; \
3292 + RING_IDX rsp_cons; \
3293 + unsigned int nr_ents; \
3294 + struct __name##_sring *sring; \
3297 +/* "Back" end's private variables */ \
3298 +struct __name##_back_ring { \
3299 + RING_IDX rsp_prod_pvt; \
3300 + RING_IDX req_cons; \
3301 + unsigned int nr_ents; \
3302 + struct __name##_sring *sring; \
3305 +/* Syntactic sugar */ \
3306 +typedef struct __name##_sring __name##_sring_t; \
3307 +typedef struct __name##_front_ring __name##_front_ring_t; \
3308 +typedef struct __name##_back_ring __name##_back_ring_t
3311 * Macros for manipulating rings.
3312 @@ -109,86 +138,94 @@ struct __name##_back_ring { \
3315 /* Initialising empty rings */
3316 -#define SHARED_RING_INIT(_s) do { \
3317 - (_s)->req_prod = (_s)->rsp_prod = 0; \
3318 - (_s)->req_event = (_s)->rsp_event = 1; \
3319 - memset((_s)->pad, 0, sizeof((_s)->pad)); \
3320 +#define SHARED_RING_INIT(_s) do { \
3321 + (_s)->req_prod = (_s)->rsp_prod = 0; \
3322 + (_s)->req_event = (_s)->rsp_event = 1; \
3323 + (void)memset((_s)->pad, 0, sizeof((_s)->pad)); \
3326 -#define FRONT_RING_INIT(_r, _s, __size) do { \
3327 - (_r)->req_prod_pvt = 0; \
3328 - (_r)->rsp_cons = 0; \
3329 - (_r)->nr_ents = __RING_SIZE(_s, __size); \
3330 - (_r)->sring = (_s); \
3331 +#define FRONT_RING_INIT(_r, _s, __size) do { \
3332 + (_r)->req_prod_pvt = 0; \
3333 + (_r)->rsp_cons = 0; \
3334 + (_r)->nr_ents = __RING_SIZE(_s, __size); \
3335 + (_r)->sring = (_s); \
3338 -#define BACK_RING_INIT(_r, _s, __size) do { \
3339 - (_r)->rsp_prod_pvt = 0; \
3340 - (_r)->req_cons = 0; \
3341 - (_r)->nr_ents = __RING_SIZE(_s, __size); \
3342 - (_r)->sring = (_s); \
3343 +#define BACK_RING_INIT(_r, _s, __size) do { \
3344 + (_r)->rsp_prod_pvt = 0; \
3345 + (_r)->req_cons = 0; \
3346 + (_r)->nr_ents = __RING_SIZE(_s, __size); \
3347 + (_r)->sring = (_s); \
3350 /* Initialize to existing shared indexes -- for recovery */
3351 -#define FRONT_RING_ATTACH(_r, _s, __size) do { \
3352 - (_r)->sring = (_s); \
3353 - (_r)->req_prod_pvt = (_s)->req_prod; \
3354 - (_r)->rsp_cons = (_s)->rsp_prod; \
3355 - (_r)->nr_ents = __RING_SIZE(_s, __size); \
3356 +#define FRONT_RING_ATTACH(_r, _s, __size) do { \
3357 + (_r)->sring = (_s); \
3358 + (_r)->req_prod_pvt = (_s)->req_prod; \
3359 + (_r)->rsp_cons = (_s)->rsp_prod; \
3360 + (_r)->nr_ents = __RING_SIZE(_s, __size); \
3363 -#define BACK_RING_ATTACH(_r, _s, __size) do { \
3364 - (_r)->sring = (_s); \
3365 - (_r)->rsp_prod_pvt = (_s)->rsp_prod; \
3366 - (_r)->req_cons = (_s)->req_prod; \
3367 - (_r)->nr_ents = __RING_SIZE(_s, __size); \
3368 +#define BACK_RING_ATTACH(_r, _s, __size) do { \
3369 + (_r)->sring = (_s); \
3370 + (_r)->rsp_prod_pvt = (_s)->rsp_prod; \
3371 + (_r)->req_cons = (_s)->req_prod; \
3372 + (_r)->nr_ents = __RING_SIZE(_s, __size); \
3375 /* How big is this ring? */
3376 -#define RING_SIZE(_r) \
3377 +#define RING_SIZE(_r) \
3380 /* Number of free requests (for use on front side only). */
3381 -#define RING_FREE_REQUESTS(_r) \
3382 +#define RING_FREE_REQUESTS(_r) \
3383 (RING_SIZE(_r) - ((_r)->req_prod_pvt - (_r)->rsp_cons))
3385 /* Test if there is an empty slot available on the front ring.
3386 * (This is only meaningful from the front. )
3388 -#define RING_FULL(_r) \
3389 +#define RING_FULL(_r) \
3390 (RING_FREE_REQUESTS(_r) == 0)
3392 /* Test if there are outstanding messages to be processed on a ring. */
3393 -#define RING_HAS_UNCONSUMED_RESPONSES(_r) \
3394 +#define RING_HAS_UNCONSUMED_RESPONSES(_r) \
3395 ((_r)->sring->rsp_prod - (_r)->rsp_cons)
3397 -#define RING_HAS_UNCONSUMED_REQUESTS(_r) \
3399 - unsigned int req = (_r)->sring->req_prod - (_r)->req_cons; \
3400 - unsigned int rsp = RING_SIZE(_r) - \
3401 - ((_r)->req_cons - (_r)->rsp_prod_pvt); \
3402 - req < rsp ? req : rsp; \
3405 +#define RING_HAS_UNCONSUMED_REQUESTS(_r) ({ \
3406 + unsigned int req = (_r)->sring->req_prod - (_r)->req_cons; \
3407 + unsigned int rsp = RING_SIZE(_r) - \
3408 + ((_r)->req_cons - (_r)->rsp_prod_pvt); \
3409 + req < rsp ? req : rsp; \
3412 +/* Same as above, but without the nice GCC ({ ... }) syntax. */
3413 +#define RING_HAS_UNCONSUMED_REQUESTS(_r) \
3414 + ((((_r)->sring->req_prod - (_r)->req_cons) < \
3415 + (RING_SIZE(_r) - ((_r)->req_cons - (_r)->rsp_prod_pvt))) ? \
3416 + ((_r)->sring->req_prod - (_r)->req_cons) : \
3417 + (RING_SIZE(_r) - ((_r)->req_cons - (_r)->rsp_prod_pvt)))
3420 /* Direct access to individual ring elements, by index. */
3421 -#define RING_GET_REQUEST(_r, _idx) \
3422 +#define RING_GET_REQUEST(_r, _idx) \
3423 (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].req))
3425 -#define RING_GET_RESPONSE(_r, _idx) \
3426 +#define RING_GET_RESPONSE(_r, _idx) \
3427 (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].rsp))
3429 /* Loop termination condition: Would the specified index overflow the ring? */
3430 -#define RING_REQUEST_CONS_OVERFLOW(_r, _cons) \
3431 +#define RING_REQUEST_CONS_OVERFLOW(_r, _cons) \
3432 (((_cons) - (_r)->rsp_prod_pvt) >= RING_SIZE(_r))
3434 -#define RING_PUSH_REQUESTS(_r) do { \
3435 - wmb(); /* back sees requests /before/ updated producer index */ \
3436 - (_r)->sring->req_prod = (_r)->req_prod_pvt; \
3437 +#define RING_PUSH_REQUESTS(_r) do { \
3438 + xen_wmb(); /* back sees requests /before/ updated producer index */ \
3439 + (_r)->sring->req_prod = (_r)->req_prod_pvt; \
3442 -#define RING_PUSH_RESPONSES(_r) do { \
3443 - wmb(); /* front sees responses /before/ updated producer index */ \
3444 - (_r)->sring->rsp_prod = (_r)->rsp_prod_pvt; \
3445 +#define RING_PUSH_RESPONSES(_r) do { \
3446 + xen_wmb(); /* front sees resps /before/ updated producer index */ \
3447 + (_r)->sring->rsp_prod = (_r)->rsp_prod_pvt; \
3451 @@ -221,40 +258,50 @@ struct __name##_back_ring { \
3452 * field appropriately.
3455 -#define RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(_r, _notify) do { \
3456 - RING_IDX __old = (_r)->sring->req_prod; \
3457 - RING_IDX __new = (_r)->req_prod_pvt; \
3458 - wmb(); /* back sees requests /before/ updated producer index */ \
3459 - (_r)->sring->req_prod = __new; \
3460 - mb(); /* back sees new requests /before/ we check req_event */ \
3461 - (_notify) = ((RING_IDX)(__new - (_r)->sring->req_event) < \
3462 - (RING_IDX)(__new - __old)); \
3463 +#define RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(_r, _notify) do { \
3464 + RING_IDX __old = (_r)->sring->req_prod; \
3465 + RING_IDX __new = (_r)->req_prod_pvt; \
3466 + xen_wmb(); /* back sees requests /before/ updated producer index */ \
3467 + (_r)->sring->req_prod = __new; \
3468 + xen_mb(); /* back sees new requests /before/ we check req_event */ \
3469 + (_notify) = ((RING_IDX)(__new - (_r)->sring->req_event) < \
3470 + (RING_IDX)(__new - __old)); \
3473 -#define RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(_r, _notify) do { \
3474 - RING_IDX __old = (_r)->sring->rsp_prod; \
3475 - RING_IDX __new = (_r)->rsp_prod_pvt; \
3476 - wmb(); /* front sees responses /before/ updated producer index */ \
3477 - (_r)->sring->rsp_prod = __new; \
3478 - mb(); /* front sees new responses /before/ we check rsp_event */ \
3479 - (_notify) = ((RING_IDX)(__new - (_r)->sring->rsp_event) < \
3480 - (RING_IDX)(__new - __old)); \
3481 +#define RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(_r, _notify) do { \
3482 + RING_IDX __old = (_r)->sring->rsp_prod; \
3483 + RING_IDX __new = (_r)->rsp_prod_pvt; \
3484 + xen_wmb(); /* front sees resps /before/ updated producer index */ \
3485 + (_r)->sring->rsp_prod = __new; \
3486 + xen_mb(); /* front sees new resps /before/ we check rsp_event */ \
3487 + (_notify) = ((RING_IDX)(__new - (_r)->sring->rsp_event) < \
3488 + (RING_IDX)(__new - __old)); \
3491 -#define RING_FINAL_CHECK_FOR_REQUESTS(_r, _work_to_do) do { \
3492 - (_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r); \
3493 - if (_work_to_do) break; \
3494 - (_r)->sring->req_event = (_r)->req_cons + 1; \
3496 - (_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r); \
3497 +#define RING_FINAL_CHECK_FOR_REQUESTS(_r, _work_to_do) do { \
3498 + (_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r); \
3499 + if (_work_to_do) break; \
3500 + (_r)->sring->req_event = (_r)->req_cons + 1; \
3502 + (_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r); \
3505 -#define RING_FINAL_CHECK_FOR_RESPONSES(_r, _work_to_do) do { \
3506 - (_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r); \
3507 - if (_work_to_do) break; \
3508 - (_r)->sring->rsp_event = (_r)->rsp_cons + 1; \
3510 - (_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r); \
3511 +#define RING_FINAL_CHECK_FOR_RESPONSES(_r, _work_to_do) do { \
3512 + (_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r); \
3513 + if (_work_to_do) break; \
3514 + (_r)->sring->rsp_event = (_r)->rsp_cons + 1; \
3516 + (_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r); \
3519 #endif /* __XEN_PUBLIC_IO_RING_H__ */
3522 + * Local variables:
3524 + * c-set-style: "BSD"
3525 + * c-basic-offset: 4
3527 + * indent-tabs-mode: nil
3530 Index: head-2008-11-25/include/xen/interface/io/xenbus.h
3531 ===================================================================
3532 --- head-2008-11-25.orig/include/xen/interface/io/xenbus.h 2008-11-25 12:33:06.000000000 +0100
3533 +++ head-2008-11-25/include/xen/interface/io/xenbus.h 2008-11-25 12:35:56.000000000 +0100
3536 * Xenbus protocol details.
3538 + * Permission is hereby granted, free of charge, to any person obtaining a copy
3539 + * of this software and associated documentation files (the "Software"), to
3540 + * deal in the Software without restriction, including without limitation the
3541 + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
3542 + * sell copies of the Software, and to permit persons to whom the Software is
3543 + * furnished to do so, subject to the following conditions:
3545 + * The above copyright notice and this permission notice shall be included in
3546 + * all copies or substantial portions of the Software.
3548 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
3549 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
3550 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
3551 + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
3552 + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
3553 + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
3554 + * DEALINGS IN THE SOFTWARE.
3556 * Copyright (C) 2005 XenSource Ltd.
3559 #ifndef _XEN_PUBLIC_IO_XENBUS_H
3560 #define _XEN_PUBLIC_IO_XENBUS_H
3562 -/* The state of either end of the Xenbus, i.e. the current communication
3563 - status of initialisation across the bus. States here imply nothing about
3564 - the state of the connection between the driver and the kernel's device
3568 - XenbusStateUnknown = 0,
3569 - XenbusStateInitialising = 1,
3570 - XenbusStateInitWait = 2, /* Finished early
3571 - initialisation, but waiting
3572 - for information from the peer
3573 - or hotplug scripts. */
3574 - XenbusStateInitialised = 3, /* Initialised and waiting for a
3575 - connection from the peer. */
3576 - XenbusStateConnected = 4,
3577 - XenbusStateClosing = 5, /* The device is being closed
3578 - due to an error or an unplug
3580 - XenbusStateClosed = 6
3582 + * The state of either end of the Xenbus, i.e. the current communication
3583 + * status of initialisation across the bus. States here imply nothing about
3584 + * the state of the connection between the driver and the kernel's device
3587 +enum xenbus_state {
3588 + XenbusStateUnknown = 0,
3590 + XenbusStateInitialising = 1,
3593 + * InitWait: Finished early initialisation but waiting for information
3594 + * from the peer or hotplug scripts.
3596 + XenbusStateInitWait = 2,
3599 + * Initialised: Waiting for a connection from the peer.
3601 + XenbusStateInitialised = 3,
3603 + XenbusStateConnected = 4,
3606 + * Closing: The device is being closed due to an error or an unplug event.
3608 + XenbusStateClosing = 5,
3610 + XenbusStateClosed = 6,
3613 + * Reconfiguring: The device is being reconfigured.
3615 + XenbusStateReconfiguring = 7,
3617 + XenbusStateReconfigured = 8
3619 +typedef enum xenbus_state XenbusState;
3621 #endif /* _XEN_PUBLIC_IO_XENBUS_H */
3625 - * c-file-style: "linux"
3626 - * indent-tabs-mode: t
3627 - * c-indent-level: 8
3628 - * c-basic-offset: 8
3631 + * c-set-style: "BSD"
3632 + * c-basic-offset: 4
3634 + * indent-tabs-mode: nil
3637 Index: head-2008-11-25/include/xen/interface/io/xs_wire.h
3638 ===================================================================
3639 --- head-2008-11-25.orig/include/xen/interface/io/xs_wire.h 2008-11-25 12:33:06.000000000 +0100
3640 +++ head-2008-11-25/include/xen/interface/io/xs_wire.h 2008-11-25 12:35:56.000000000 +0100
3643 * Details of the "wire" protocol between Xen Store Daemon and client
3644 * library or guest kernel.
3646 + * Permission is hereby granted, free of charge, to any person obtaining a copy
3647 + * of this software and associated documentation files (the "Software"), to
3648 + * deal in the Software without restriction, including without limitation the
3649 + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
3650 + * sell copies of the Software, and to permit persons to whom the Software is
3651 + * furnished to do so, subject to the following conditions:
3653 + * The above copyright notice and this permission notice shall be included in
3654 + * all copies or substantial portions of the Software.
3656 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
3657 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
3658 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
3659 + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
3660 + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
3661 + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
3662 + * DEALINGS IN THE SOFTWARE.
3664 * Copyright (C) 2005 Rusty Russell IBM Corporation
3667 @@ -26,7 +45,9 @@ enum xsd_sockmsg_type
3671 - XS_IS_DOMAIN_INTRODUCED
3672 + XS_IS_DOMAIN_INTRODUCED,
3677 #define XS_WRITE_NONE "NONE"
3678 @@ -40,7 +61,12 @@ struct xsd_errors
3679 const char *errstring;
3681 #define XSD_ERROR(x) { x, #x }
3682 -static struct xsd_errors xsd_errors[] __attribute__((unused)) = {
3683 +/* LINTED: static unused */
3684 +static struct xsd_errors xsd_errors[]
3685 +#if defined(__GNUC__)
3686 +__attribute__((unused))
3692 @@ -84,4 +110,21 @@ struct xenstore_domain_interface {
3693 XENSTORE_RING_IDX rsp_cons, rsp_prod;
3696 +/* Violating this is very bad. See docs/misc/xenstore.txt. */
3697 +#define XENSTORE_PAYLOAD_MAX 4096
3699 +/* Violating these just gets you an error back */
3700 +#define XENSTORE_ABS_PATH_MAX 3072
3701 +#define XENSTORE_REL_PATH_MAX 2048
3703 #endif /* _XS_WIRE_H */
3706 + * Local variables:
3708 + * c-set-style: "BSD"
3709 + * c-basic-offset: 4
3711 + * indent-tabs-mode: nil
3714 Index: head-2008-11-25/include/xen/interface/memory.h
3715 ===================================================================
3716 --- head-2008-11-25.orig/include/xen/interface/memory.h 2008-11-25 12:33:06.000000000 +0100
3717 +++ head-2008-11-25/include/xen/interface/memory.h 2008-11-25 12:35:56.000000000 +0100
3720 * Memory reservation and information.
3722 + * Permission is hereby granted, free of charge, to any person obtaining a copy
3723 + * of this software and associated documentation files (the "Software"), to
3724 + * deal in the Software without restriction, including without limitation the
3725 + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
3726 + * sell copies of the Software, and to permit persons to whom the Software is
3727 + * furnished to do so, subject to the following conditions:
3729 + * The above copyright notice and this permission notice shall be included in
3730 + * all copies or substantial portions of the Software.
3732 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
3733 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
3734 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
3735 + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
3736 + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
3737 + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
3738 + * DEALINGS IN THE SOFTWARE.
3740 * Copyright (c) 2005, Keir Fraser <keir@xensource.com>
3744 #define __XEN_PUBLIC_MEMORY_H__
3747 - * Increase or decrease the specified domain's memory reservation. Returns a
3748 - * -ve errcode on failure, or the # extents successfully allocated or freed.
3749 + * Increase or decrease the specified domain's memory reservation. Returns the
3750 + * number of extents successfully allocated or freed.
3751 * arg == addr of struct xen_memory_reservation.
3753 #define XENMEM_increase_reservation 0
3754 #define XENMEM_decrease_reservation 1
3755 #define XENMEM_populate_physmap 6
3757 +#if __XEN_INTERFACE_VERSION__ >= 0x00030209
3759 + * Maximum # bits addressable by the user of the allocated region (e.g., I/O
3760 + * devices often have a 32-bit limitation even in 64-bit systems). If zero
3761 + * then the user has no addressing restriction. This field is not used by
3762 + * XENMEM_decrease_reservation.
3764 +#define XENMEMF_address_bits(x) (x)
3765 +#define XENMEMF_get_address_bits(x) ((x) & 0xffu)
3766 +/* NUMA node to allocate from. */
3767 +#define XENMEMF_node(x) (((x) + 1) << 8)
3768 +#define XENMEMF_get_node(x) ((((x) >> 8) - 1) & 0xffu)
3771 struct xen_memory_reservation {
3774 @@ -29,19 +62,18 @@ struct xen_memory_reservation {
3775 * OUT: GMFN bases of extents that were allocated
3776 * (NB. This command also updates the mach_to_phys translation table)
3778 - GUEST_HANDLE(ulong) extent_start;
3779 + XEN_GUEST_HANDLE(ulong) extent_start;
3781 /* Number of extents, and size/alignment of each (2^extent_order pages). */
3782 - unsigned long nr_extents;
3783 + xen_ulong_t nr_extents;
3784 unsigned int extent_order;
3787 - * Maximum # bits addressable by the user of the allocated region (e.g.,
3788 - * I/O devices often have a 32-bit limitation even in 64-bit systems). If
3789 - * zero then the user has no addressing restriction.
3790 - * This field is not used by XENMEM_decrease_reservation.
3792 +#if __XEN_INTERFACE_VERSION__ >= 0x00030209
3793 + /* XENMEMF flags. */
3794 + unsigned int mem_flags;
3796 unsigned int address_bits;
3800 * Domain whose reservation is being changed.
3801 @@ -50,7 +82,51 @@ struct xen_memory_reservation {
3805 -DEFINE_GUEST_HANDLE_STRUCT(xen_memory_reservation);
3806 +typedef struct xen_memory_reservation xen_memory_reservation_t;
3807 +DEFINE_XEN_GUEST_HANDLE(xen_memory_reservation_t);
3810 + * An atomic exchange of memory pages. If return code is zero then
3811 + * @out.extent_list provides GMFNs of the newly-allocated memory.
3812 + * Returns zero on complete success, otherwise a negative error code.
3813 + * On complete success then always @nr_exchanged == @in.nr_extents.
3814 + * On partial success @nr_exchanged indicates how much work was done.
3816 +#define XENMEM_exchange 11
3817 +struct xen_memory_exchange {
3819 + * [IN] Details of memory extents to be exchanged (GMFN bases).
3820 + * Note that @in.address_bits is ignored and unused.
3822 + struct xen_memory_reservation in;
3825 + * [IN/OUT] Details of new memory extents.
3826 + * We require that:
3827 + * 1. @in.domid == @out.domid
3828 + * 2. @in.nr_extents << @in.extent_order ==
3829 + * @out.nr_extents << @out.extent_order
3830 + * 3. @in.extent_start and @out.extent_start lists must not overlap
3831 + * 4. @out.extent_start lists GPFN bases to be populated
3832 + * 5. @out.extent_start is overwritten with allocated GMFN bases
3834 + struct xen_memory_reservation out;
3837 + * [OUT] Number of input extents that were successfully exchanged:
3838 + * 1. The first @nr_exchanged input extents were successfully
3840 + * 2. The corresponding first entries in the output extent list correctly
3841 + * indicate the GMFNs that were successfully exchanged.
3842 + * 3. All other input and output extents are untouched.
3843 + * 4. If not all input exents are exchanged then the return code of this
3844 + * command will be non-zero.
3845 + * 5. THIS FIELD MUST BE INITIALISED TO ZERO BY THE CALLER!
3847 + xen_ulong_t nr_exchanged;
3849 +typedef struct xen_memory_exchange xen_memory_exchange_t;
3850 +DEFINE_XEN_GUEST_HANDLE(xen_memory_exchange_t);
3853 * Returns the maximum machine frame number of mapped RAM in this system.
3854 @@ -68,6 +144,11 @@ DEFINE_GUEST_HANDLE_STRUCT(xen_memory_re
3855 #define XENMEM_maximum_reservation 4
3858 + * Returns the maximum GPFN in use by the guest, or -ve errcode on failure.
3860 +#define XENMEM_maximum_gpfn 14
3863 * Returns a list of MFN bases of 2MB extents comprising the machine_to_phys
3864 * mapping table. Architectures which do not have a m2p table do not implement
3866 @@ -86,7 +167,7 @@ struct xen_machphys_mfn_list {
3867 * any large discontiguities in the machine address space, 2MB gaps in
3868 * the machphys table will be represented by an MFN base of zero.
3870 - GUEST_HANDLE(ulong) extent_start;
3871 + XEN_GUEST_HANDLE(xen_pfn_t) extent_start;
3874 * Number of extents written to the above array. This will be smaller
3875 @@ -94,7 +175,22 @@ struct xen_machphys_mfn_list {
3877 unsigned int nr_extents;
3879 -DEFINE_GUEST_HANDLE_STRUCT(xen_machphys_mfn_list);
3880 +typedef struct xen_machphys_mfn_list xen_machphys_mfn_list_t;
3881 +DEFINE_XEN_GUEST_HANDLE(xen_machphys_mfn_list_t);
3884 + * Returns the location in virtual address space of the machine_to_phys
3885 + * mapping table. Architectures which do not have a m2p table, or which do not
3886 + * map it by default into guest address space, do not implement this command.
3887 + * arg == addr of xen_machphys_mapping_t.
3889 +#define XENMEM_machphys_mapping 12
3890 +struct xen_machphys_mapping {
3891 + xen_ulong_t v_start, v_end; /* Start and end virtual addresses. */
3892 + xen_ulong_t max_mfn; /* Maximum MFN that can be looked up. */
3894 +typedef struct xen_machphys_mapping xen_machphys_mapping_t;
3895 +DEFINE_XEN_GUEST_HANDLE(xen_machphys_mapping_t);
3898 * Sets the GPFN at which a particular page appears in the specified guest's
3899 @@ -109,15 +205,33 @@ struct xen_add_to_physmap {
3900 /* Source mapping space. */
3901 #define XENMAPSPACE_shared_info 0 /* shared info page */
3902 #define XENMAPSPACE_grant_table 1 /* grant table page */
3903 +#define XENMAPSPACE_mfn 2 /* usual MFN */
3906 /* Index into source mapping space. */
3907 - unsigned long idx;
3910 /* GPFN where the source mapping page should appear. */
3911 - unsigned long gpfn;
3914 -DEFINE_GUEST_HANDLE_STRUCT(xen_add_to_physmap);
3915 +typedef struct xen_add_to_physmap xen_add_to_physmap_t;
3916 +DEFINE_XEN_GUEST_HANDLE(xen_add_to_physmap_t);
3919 + * Unmaps the page appearing at a particular GPFN from the specified guest's
3920 + * pseudophysical address space.
3921 + * arg == addr of xen_remove_from_physmap_t.
3923 +#define XENMEM_remove_from_physmap 15
3924 +struct xen_remove_from_physmap {
3925 + /* Which domain to change the mapping for. */
3928 + /* GPFN of the current mapping of the page. */
3931 +typedef struct xen_remove_from_physmap xen_remove_from_physmap_t;
3932 +DEFINE_XEN_GUEST_HANDLE(xen_remove_from_physmap_t);
3935 * Translates a list of domain-specific GPFNs into MFNs. Returns a -ve error
3936 @@ -129,17 +243,71 @@ struct xen_translate_gpfn_list {
3939 /* Length of list. */
3940 - unsigned long nr_gpfns;
3941 + xen_ulong_t nr_gpfns;
3943 /* List of GPFNs to translate. */
3944 - GUEST_HANDLE(ulong) gpfn_list;
3945 + XEN_GUEST_HANDLE(xen_pfn_t) gpfn_list;
3948 * Output list to contain MFN translations. May be the same as the input
3949 * list (in which case each input GPFN is overwritten with the output MFN).
3951 - GUEST_HANDLE(ulong) mfn_list;
3952 + XEN_GUEST_HANDLE(xen_pfn_t) mfn_list;
3954 +typedef struct xen_translate_gpfn_list xen_translate_gpfn_list_t;
3955 +DEFINE_XEN_GUEST_HANDLE(xen_translate_gpfn_list_t);
3958 + * Returns the pseudo-physical memory map as it was when the domain
3959 + * was started (specified by XENMEM_set_memory_map).
3960 + * arg == addr of xen_memory_map_t.
3962 +#define XENMEM_memory_map 9
3963 +struct xen_memory_map {
3965 + * On call the number of entries which can be stored in buffer. On
3966 + * return the number of entries which have been stored in
3969 + unsigned int nr_entries;
3972 + * Entries in the buffer are in the same format as returned by the
3973 + * BIOS INT 0x15 EAX=0xE820 call.
3975 + XEN_GUEST_HANDLE(void) buffer;
3977 +typedef struct xen_memory_map xen_memory_map_t;
3978 +DEFINE_XEN_GUEST_HANDLE(xen_memory_map_t);
3981 + * Returns the real physical memory map. Passes the same structure as
3982 + * XENMEM_memory_map.
3983 + * arg == addr of xen_memory_map_t.
3985 +#define XENMEM_machine_memory_map 10
3988 + * Set the pseudo-physical memory map of a domain, as returned by
3989 + * XENMEM_memory_map.
3990 + * arg == addr of xen_foreign_memory_map_t.
3992 +#define XENMEM_set_memory_map 13
3993 +struct xen_foreign_memory_map {
3995 + struct xen_memory_map map;
3997 -DEFINE_GUEST_HANDLE_STRUCT(xen_translate_gpfn_list);
3998 +typedef struct xen_foreign_memory_map xen_foreign_memory_map_t;
3999 +DEFINE_XEN_GUEST_HANDLE(xen_foreign_memory_map_t);
4001 #endif /* __XEN_PUBLIC_MEMORY_H__ */
4004 + * Local variables:
4006 + * c-set-style: "BSD"
4007 + * c-basic-offset: 4
4009 + * indent-tabs-mode: nil
4012 Index: head-2008-11-25/include/xen/interface/physdev.h
4013 ===================================================================
4014 --- head-2008-11-25.orig/include/xen/interface/physdev.h 2008-11-25 12:33:06.000000000 +0100
4015 +++ head-2008-11-25/include/xen/interface/physdev.h 2008-11-25 12:35:56.000000000 +0100
4018 * Prototype for this hypercall is:
4019 * int physdev_op(int cmd, void *args)
4020 - * @cmd == PHYSDEVOP_??? (physdev operation).
4021 + * @cmd == PHYSDEVOP_??? (physdev operation).
4022 * @args == Operation-specific extra arguments (NULL if none).
4025 @@ -32,114 +32,188 @@
4026 * Notify end-of-interrupt (EOI) for the specified IRQ.
4027 * @arg == pointer to physdev_eoi structure.
4029 -#define PHYSDEVOP_eoi 12
4030 +#define PHYSDEVOP_eoi 12
4031 struct physdev_eoi {
4037 +typedef struct physdev_eoi physdev_eoi_t;
4038 +DEFINE_XEN_GUEST_HANDLE(physdev_eoi_t);
4041 * Query the status of an IRQ line.
4042 * @arg == pointer to physdev_irq_status_query structure.
4044 -#define PHYSDEVOP_irq_status_query 5
4045 +#define PHYSDEVOP_irq_status_query 5
4046 struct physdev_irq_status_query {
4050 - uint32_t flags; /* XENIRQSTAT_* */
4054 + uint32_t flags; /* XENIRQSTAT_* */
4056 +typedef struct physdev_irq_status_query physdev_irq_status_query_t;
4057 +DEFINE_XEN_GUEST_HANDLE(physdev_irq_status_query_t);
4059 /* Need to call PHYSDEVOP_eoi when the IRQ has been serviced? */
4060 -#define _XENIRQSTAT_needs_eoi (0)
4061 -#define XENIRQSTAT_needs_eoi (1U<<_XENIRQSTAT_needs_eoi)
4062 +#define _XENIRQSTAT_needs_eoi (0)
4063 +#define XENIRQSTAT_needs_eoi (1U<<_XENIRQSTAT_needs_eoi)
4065 /* IRQ shared by multiple guests? */
4066 -#define _XENIRQSTAT_shared (1)
4067 -#define XENIRQSTAT_shared (1U<<_XENIRQSTAT_shared)
4068 +#define _XENIRQSTAT_shared (1)
4069 +#define XENIRQSTAT_shared (1U<<_XENIRQSTAT_shared)
4072 * Set the current VCPU's I/O privilege level.
4073 * @arg == pointer to physdev_set_iopl structure.
4075 -#define PHYSDEVOP_set_iopl 6
4076 +#define PHYSDEVOP_set_iopl 6
4077 struct physdev_set_iopl {
4083 +typedef struct physdev_set_iopl physdev_set_iopl_t;
4084 +DEFINE_XEN_GUEST_HANDLE(physdev_set_iopl_t);
4087 * Set the current VCPU's I/O-port permissions bitmap.
4088 * @arg == pointer to physdev_set_iobitmap structure.
4090 -#define PHYSDEVOP_set_iobitmap 7
4091 +#define PHYSDEVOP_set_iobitmap 7
4092 struct physdev_set_iobitmap {
4095 - uint32_t nr_ports;
4097 +#if __XEN_INTERFACE_VERSION__ >= 0x00030205
4098 + XEN_GUEST_HANDLE(uint8) bitmap;
4102 + uint32_t nr_ports;
4104 +typedef struct physdev_set_iobitmap physdev_set_iobitmap_t;
4105 +DEFINE_XEN_GUEST_HANDLE(physdev_set_iobitmap_t);
4108 * Read or write an IO-APIC register.
4109 * @arg == pointer to physdev_apic structure.
4111 -#define PHYSDEVOP_apic_read 8
4112 -#define PHYSDEVOP_apic_write 9
4113 +#define PHYSDEVOP_apic_read 8
4114 +#define PHYSDEVOP_apic_write 9
4115 struct physdev_apic {
4117 - unsigned long apic_physbase;
4122 + unsigned long apic_physbase;
4127 +typedef struct physdev_apic physdev_apic_t;
4128 +DEFINE_XEN_GUEST_HANDLE(physdev_apic_t);
4131 * Allocate or free a physical upcall vector for the specified IRQ line.
4132 * @arg == pointer to physdev_irq structure.
4134 -#define PHYSDEVOP_alloc_irq_vector 10
4135 -#define PHYSDEVOP_free_irq_vector 11
4136 +#define PHYSDEVOP_alloc_irq_vector 10
4137 +#define PHYSDEVOP_free_irq_vector 11
4138 struct physdev_irq {
4148 +typedef struct physdev_irq physdev_irq_t;
4149 +DEFINE_XEN_GUEST_HANDLE(physdev_irq_t);
4151 +#define MAP_PIRQ_TYPE_MSI 0x0
4152 +#define MAP_PIRQ_TYPE_GSI 0x1
4153 +#define MAP_PIRQ_TYPE_UNKNOWN 0x2
4155 +#define PHYSDEVOP_map_pirq 13
4156 +struct physdev_map_pirq {
4171 + uint64_t table_base;
4173 +typedef struct physdev_map_pirq physdev_map_pirq_t;
4174 +DEFINE_XEN_GUEST_HANDLE(physdev_map_pirq_t);
4176 +#define PHYSDEVOP_unmap_pirq 14
4177 +struct physdev_unmap_pirq {
4183 +typedef struct physdev_unmap_pirq physdev_unmap_pirq_t;
4184 +DEFINE_XEN_GUEST_HANDLE(physdev_unmap_pirq_t);
4186 +#define PHYSDEVOP_manage_pci_add 15
4187 +#define PHYSDEVOP_manage_pci_remove 16
4188 +struct physdev_manage_pci {
4194 +typedef struct physdev_manage_pci physdev_manage_pci_t;
4195 +DEFINE_XEN_GUEST_HANDLE(physdev_manage_pci_t);
4198 * Argument to physdev_op_compat() hypercall. Superceded by new physdev_op()
4199 * hypercall since 0x00030202.
4204 - struct physdev_irq_status_query irq_status_query;
4205 - struct physdev_set_iopl set_iopl;
4206 - struct physdev_set_iobitmap set_iobitmap;
4207 - struct physdev_apic apic_op;
4208 - struct physdev_irq irq_op;
4212 + struct physdev_irq_status_query irq_status_query;
4213 + struct physdev_set_iopl set_iopl;
4214 + struct physdev_set_iobitmap set_iobitmap;
4215 + struct physdev_apic apic_op;
4216 + struct physdev_irq irq_op;
4219 +typedef struct physdev_op physdev_op_t;
4220 +DEFINE_XEN_GUEST_HANDLE(physdev_op_t);
4223 * Notify that some PIRQ-bound event channels have been unmasked.
4224 * ** This command is obsolete since interface version 0x00030202 and is **
4225 - * ** unsupported by newer versions of Xen. **
4226 + * ** unsupported by newer versions of Xen. **
4228 -#define PHYSDEVOP_IRQ_UNMASK_NOTIFY 4
4229 +#define PHYSDEVOP_IRQ_UNMASK_NOTIFY 4
4232 * These all-capitals physdev operation names are superceded by the new names
4233 * (defined above) since interface version 0x00030202.
4235 -#define PHYSDEVOP_IRQ_STATUS_QUERY PHYSDEVOP_irq_status_query
4236 -#define PHYSDEVOP_SET_IOPL PHYSDEVOP_set_iopl
4237 -#define PHYSDEVOP_SET_IOBITMAP PHYSDEVOP_set_iobitmap
4238 -#define PHYSDEVOP_APIC_READ PHYSDEVOP_apic_read
4239 -#define PHYSDEVOP_APIC_WRITE PHYSDEVOP_apic_write
4240 -#define PHYSDEVOP_ASSIGN_VECTOR PHYSDEVOP_alloc_irq_vector
4241 -#define PHYSDEVOP_FREE_VECTOR PHYSDEVOP_free_irq_vector
4242 +#define PHYSDEVOP_IRQ_STATUS_QUERY PHYSDEVOP_irq_status_query
4243 +#define PHYSDEVOP_SET_IOPL PHYSDEVOP_set_iopl
4244 +#define PHYSDEVOP_SET_IOBITMAP PHYSDEVOP_set_iobitmap
4245 +#define PHYSDEVOP_APIC_READ PHYSDEVOP_apic_read
4246 +#define PHYSDEVOP_APIC_WRITE PHYSDEVOP_apic_write
4247 +#define PHYSDEVOP_ASSIGN_VECTOR PHYSDEVOP_alloc_irq_vector
4248 +#define PHYSDEVOP_FREE_VECTOR PHYSDEVOP_free_irq_vector
4249 #define PHYSDEVOP_IRQ_NEEDS_UNMASK_NOTIFY XENIRQSTAT_needs_eoi
4250 -#define PHYSDEVOP_IRQ_SHARED XENIRQSTAT_shared
4251 +#define PHYSDEVOP_IRQ_SHARED XENIRQSTAT_shared
4253 #endif /* __XEN_PUBLIC_PHYSDEV_H__ */
4256 + * Local variables:
4258 + * c-set-style: "BSD"
4259 + * c-basic-offset: 4
4261 + * indent-tabs-mode: nil
4264 Index: head-2008-11-25/include/xen/interface/sched.h
4265 ===================================================================
4266 --- head-2008-11-25.orig/include/xen/interface/sched.h 2008-11-25 12:33:06.000000000 +0100
4267 +++ head-2008-11-25/include/xen/interface/sched.h 2008-11-25 12:35:56.000000000 +0100
4270 * Scheduler state interactions
4272 + * Permission is hereby granted, free of charge, to any person obtaining a copy
4273 + * of this software and associated documentation files (the "Software"), to
4274 + * deal in the Software without restriction, including without limitation the
4275 + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
4276 + * sell copies of the Software, and to permit persons to whom the Software is
4277 + * furnished to do so, subject to the following conditions:
4279 + * The above copyright notice and this permission notice shall be included in
4280 + * all copies or substantial portions of the Software.
4282 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
4283 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
4284 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
4285 + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
4286 + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
4287 + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
4288 + * DEALINGS IN THE SOFTWARE.
4290 * Copyright (c) 2005, Keir Fraser <keir@xensource.com>
4296 * The prototype for this hypercall is:
4297 - * long sched_op_new(int cmd, void *arg)
4298 + * long sched_op(int cmd, void *arg)
4299 * @cmd == SCHEDOP_??? (scheduler operation).
4300 * @arg == Operation-specific extra argument(s), as described below.
4303 - * Versions of Xen prior to 3.0.2 provide only the following legacy version
4304 + * Versions of Xen prior to 3.0.2 provided only the following legacy version
4305 * of this hypercall, supporting only the commands yield, block and shutdown:
4306 * long sched_op(int cmd, unsigned long arg)
4307 * @cmd == SCHEDOP_??? (scheduler operation).
4308 * @arg == 0 (SCHEDOP_yield and SCHEDOP_block)
4309 * == SHUTDOWN_* code (SCHEDOP_shutdown)
4310 + * This legacy version is available to new guests as sched_op_compat().
4315 struct sched_shutdown {
4316 unsigned int reason; /* SHUTDOWN_* */
4318 -DEFINE_GUEST_HANDLE_STRUCT(sched_shutdown);
4319 +typedef struct sched_shutdown sched_shutdown_t;
4320 +DEFINE_XEN_GUEST_HANDLE(sched_shutdown_t);
4323 * Poll a set of event-channel ports. Return when one or more are pending. An
4324 @@ -58,11 +77,26 @@ DEFINE_GUEST_HANDLE_STRUCT(sched_shutdow
4326 #define SCHEDOP_poll 3
4328 - GUEST_HANDLE(evtchn_port_t) ports;
4329 + XEN_GUEST_HANDLE(evtchn_port_t) ports;
4330 unsigned int nr_ports;
4333 -DEFINE_GUEST_HANDLE_STRUCT(sched_poll);
4334 +typedef struct sched_poll sched_poll_t;
4335 +DEFINE_XEN_GUEST_HANDLE(sched_poll_t);
4338 + * Declare a shutdown for another domain. The main use of this function is
4339 + * in interpreting shutdown requests and reasons for fully-virtualized
4340 + * domains. A para-virtualized domain may use SCHEDOP_shutdown directly.
4341 + * @arg == pointer to sched_remote_shutdown structure.
4343 +#define SCHEDOP_remote_shutdown 4
4344 +struct sched_remote_shutdown {
4345 + domid_t domain_id; /* Remote domain ID */
4346 + unsigned int reason; /* SHUTDOWN_xxx reason */
4348 +typedef struct sched_remote_shutdown sched_remote_shutdown_t;
4349 +DEFINE_XEN_GUEST_HANDLE(sched_remote_shutdown_t);
4352 * Reason codes for SCHEDOP_shutdown. These may be interpreted by control
4353 @@ -75,3 +109,13 @@ DEFINE_GUEST_HANDLE_STRUCT(sched_poll);
4354 #define SHUTDOWN_crash 3 /* Tell controller we've crashed. */
4356 #endif /* __XEN_PUBLIC_SCHED_H__ */
4359 + * Local variables:
4361 + * c-set-style: "BSD"
4362 + * c-basic-offset: 4
4364 + * indent-tabs-mode: nil
4367 Index: head-2008-11-25/include/xen/interface/vcpu.h
4368 ===================================================================
4369 --- head-2008-11-25.orig/include/xen/interface/vcpu.h 2008-11-25 12:33:06.000000000 +0100
4370 +++ head-2008-11-25/include/xen/interface/vcpu.h 2008-11-25 12:35:56.000000000 +0100
4374 * Prototype for this hypercall is:
4375 - * int vcpu_op(int cmd, int vcpuid, void *extra_args)
4376 - * @cmd == VCPUOP_??? (VCPU operation).
4377 - * @vcpuid == VCPU to operate on.
4378 + * int vcpu_op(int cmd, int vcpuid, void *extra_args)
4379 + * @cmd == VCPUOP_??? (VCPU operation).
4380 + * @vcpuid == VCPU to operate on.
4381 * @extra_args == Operation-specific extra arguments (NULL if none).
4385 * newly-initialised VCPU will not run until it is brought up by VCPUOP_up.
4387 * @extra_arg == pointer to vcpu_guest_context structure containing initial
4388 - * state for the VCPU.
4389 + * state for the VCPU.
4391 -#define VCPUOP_initialise 0
4392 +#define VCPUOP_initialise 0
4395 * Bring up a VCPU. This makes the VCPU runnable. This operation will fail
4396 * if the VCPU has not been initialised (VCPUOP_initialise).
4398 -#define VCPUOP_up 1
4399 +#define VCPUOP_up 1
4402 * Bring down a VCPU (i.e., make it non-runnable).
4403 * There are a few caveats that callers should observe:
4404 - * 1. This operation may return, and VCPU_is_up may return false, before the
4405 - * VCPU stops running (i.e., the command is asynchronous). It is a good
4406 - * idea to ensure that the VCPU has entered a non-critical loop before
4407 - * bringing it down. Alternatively, this operation is guaranteed
4408 - * synchronous if invoked by the VCPU itself.
4409 - * 2. After a VCPU is initialised, there is currently no way to drop all its
4410 - * references to domain memory. Even a VCPU that is down still holds
4411 - * memory references via its pagetable base pointer and GDT. It is good
4412 - * practise to move a VCPU onto an 'idle' or default page table, LDT and
4413 - * GDT before bringing it down.
4414 + * 1. This operation may return, and VCPU_is_up may return false, before the
4415 + * VCPU stops running (i.e., the command is asynchronous). It is a good
4416 + * idea to ensure that the VCPU has entered a non-critical loop before
4417 + * bringing it down. Alternatively, this operation is guaranteed
4418 + * synchronous if invoked by the VCPU itself.
4419 + * 2. After a VCPU is initialised, there is currently no way to drop all its
4420 + * references to domain memory. Even a VCPU that is down still holds
4421 + * memory references via its pagetable base pointer and GDT. It is good
4422 + * practise to move a VCPU onto an 'idle' or default page table, LDT and
4423 + * GDT before bringing it down.
4425 -#define VCPUOP_down 2
4426 +#define VCPUOP_down 2
4428 /* Returns 1 if the given VCPU is up. */
4429 -#define VCPUOP_is_up 3
4430 +#define VCPUOP_is_up 3
4433 * Return information about the state and running time of a VCPU.
4434 * @extra_arg == pointer to vcpu_runstate_info structure.
4436 -#define VCPUOP_get_runstate_info 4
4437 +#define VCPUOP_get_runstate_info 4
4438 struct vcpu_runstate_info {
4439 - /* VCPU's current state (RUNSTATE_*). */
4441 - /* When was current state entered (system time, ns)? */
4442 - uint64_t state_entry_time;
4444 - * Time spent in each RUNSTATE_* (ns). The sum of these times is
4445 - * guaranteed not to drift from system time.
4448 + /* VCPU's current state (RUNSTATE_*). */
4450 + /* When was current state entered (system time, ns)? */
4451 + uint64_t state_entry_time;
4453 + * Time spent in each RUNSTATE_* (ns). The sum of these times is
4454 + * guaranteed not to drift from system time.
4458 -DEFINE_GUEST_HANDLE_STRUCT(vcpu_runstate_info);
4459 +typedef struct vcpu_runstate_info vcpu_runstate_info_t;
4460 +DEFINE_XEN_GUEST_HANDLE(vcpu_runstate_info_t);
4462 /* VCPU is currently running on a physical CPU. */
4463 #define RUNSTATE_running 0
4464 @@ -108,47 +109,52 @@ DEFINE_GUEST_HANDLE_STRUCT(vcpu_runstate
4465 * Register a shared memory area from which the guest may obtain its own
4466 * runstate information without needing to execute a hypercall.
4468 - * 1. The registered address may be virtual or physical, depending on the
4469 - * platform. The virtual address should be registered on x86 systems.
4470 - * 2. Only one shared area may be registered per VCPU. The shared area is
4471 - * updated by the hypervisor each time the VCPU is scheduled. Thus
4472 - * runstate.state will always be RUNSTATE_running and
4473 - * runstate.state_entry_time will indicate the system time at which the
4474 - * VCPU was last scheduled to run.
4475 + * 1. The registered address may be virtual or physical or guest handle,
4476 + * depending on the platform. Virtual address or guest handle should be
4477 + * registered on x86 systems.
4478 + * 2. Only one shared area may be registered per VCPU. The shared area is
4479 + * updated by the hypervisor each time the VCPU is scheduled. Thus
4480 + * runstate.state will always be RUNSTATE_running and
4481 + * runstate.state_entry_time will indicate the system time at which the
4482 + * VCPU was last scheduled to run.
4483 * @extra_arg == pointer to vcpu_register_runstate_memory_area structure.
4485 #define VCPUOP_register_runstate_memory_area 5
4486 struct vcpu_register_runstate_memory_area {
4488 - GUEST_HANDLE(vcpu_runstate_info) h;
4489 - struct vcpu_runstate_info *v;
4493 + XEN_GUEST_HANDLE(vcpu_runstate_info_t) h;
4494 + struct vcpu_runstate_info *v;
4498 +typedef struct vcpu_register_runstate_memory_area vcpu_register_runstate_memory_area_t;
4499 +DEFINE_XEN_GUEST_HANDLE(vcpu_register_runstate_memory_area_t);
4502 * Set or stop a VCPU's periodic timer. Every VCPU has one periodic timer
4503 * which can be set via these commands. Periods smaller than one millisecond
4504 * may not be supported.
4506 -#define VCPUOP_set_periodic_timer 6 /* arg == vcpu_set_periodic_timer_t */
4507 -#define VCPUOP_stop_periodic_timer 7 /* arg == NULL */
4508 +#define VCPUOP_set_periodic_timer 6 /* arg == vcpu_set_periodic_timer_t */
4509 +#define VCPUOP_stop_periodic_timer 7 /* arg == NULL */
4510 struct vcpu_set_periodic_timer {
4511 - uint64_t period_ns;
4512 + uint64_t period_ns;
4514 -DEFINE_GUEST_HANDLE_STRUCT(vcpu_set_periodic_timer);
4515 +typedef struct vcpu_set_periodic_timer vcpu_set_periodic_timer_t;
4516 +DEFINE_XEN_GUEST_HANDLE(vcpu_set_periodic_timer_t);
4519 * Set or stop a VCPU's single-shot timer. Every VCPU has one single-shot
4520 * timer which can be set via these commands.
4522 -#define VCPUOP_set_singleshot_timer 8 /* arg == vcpu_set_singleshot_timer_t */
4523 +#define VCPUOP_set_singleshot_timer 8 /* arg == vcpu_set_singleshot_timer_t */
4524 #define VCPUOP_stop_singleshot_timer 9 /* arg == NULL */
4525 struct vcpu_set_singleshot_timer {
4526 - uint64_t timeout_abs_ns;
4527 - uint32_t flags; /* VCPU_SSHOTTMR_??? */
4528 + uint64_t timeout_abs_ns; /* Absolute system time value in nanoseconds. */
4529 + uint32_t flags; /* VCPU_SSHOTTMR_??? */
4531 -DEFINE_GUEST_HANDLE_STRUCT(vcpu_set_singleshot_timer);
4532 +typedef struct vcpu_set_singleshot_timer vcpu_set_singleshot_timer_t;
4533 +DEFINE_XEN_GUEST_HANDLE(vcpu_set_singleshot_timer_t);
4535 /* Flags to VCPUOP_set_singleshot_timer. */
4536 /* Require the timeout to be in the future (return -ETIME if it's passed). */
4537 @@ -161,13 +167,47 @@ DEFINE_GUEST_HANDLE_STRUCT(vcpu_set_sing
4538 * structure in a convenient place, such as in a per-cpu data area.
4539 * The pointer need not be page aligned, but the structure must not
4540 * cross a page boundary.
4542 + * This may be called only once per vcpu.
4544 -#define VCPUOP_register_vcpu_info 10 /* arg == struct vcpu_info */
4545 +#define VCPUOP_register_vcpu_info 10 /* arg == vcpu_register_vcpu_info_t */
4546 struct vcpu_register_vcpu_info {
4547 uint64_t mfn; /* mfn of page to place vcpu_info */
4548 uint32_t offset; /* offset within page */
4549 uint32_t rsvd; /* unused */
4551 -DEFINE_GUEST_HANDLE_STRUCT(vcpu_register_vcpu_info);
4552 +typedef struct vcpu_register_vcpu_info vcpu_register_vcpu_info_t;
4553 +DEFINE_XEN_GUEST_HANDLE(vcpu_register_vcpu_info_t);
4555 +/* Send an NMI to the specified VCPU. @extra_arg == NULL. */
4556 +#define VCPUOP_send_nmi 11
4559 + * Get the physical ID information for a pinned vcpu's underlying physical
4560 + * processor. The physical ID informmation is architecture-specific.
4561 + * On x86: id[31:0]=apic_id, id[63:32]=acpi_id, and all values 0xff and
4562 + * greater are reserved.
4563 + * This command returns -EINVAL if it is not a valid operation for this VCPU.
4565 +#define VCPUOP_get_physid 12 /* arg == vcpu_get_physid_t */
4566 +struct vcpu_get_physid {
4569 +typedef struct vcpu_get_physid vcpu_get_physid_t;
4570 +DEFINE_XEN_GUEST_HANDLE(vcpu_get_physid_t);
4571 +#define xen_vcpu_physid_to_x86_apicid(physid) \
4572 + ((((uint32_t)(physid)) >= 0xff) ? 0xff : ((uint8_t)(physid)))
4573 +#define xen_vcpu_physid_to_x86_acpiid(physid) \
4574 + ((((uint32_t)((physid)>>32)) >= 0xff) ? 0xff : ((uint8_t)((physid)>>32)))
4576 #endif /* __XEN_PUBLIC_VCPU_H__ */
4579 + * Local variables:
4581 + * c-set-style: "BSD"
4582 + * c-basic-offset: 4
4584 + * indent-tabs-mode: nil
4587 Index: head-2008-11-25/include/xen/interface/version.h
4588 ===================================================================
4589 --- head-2008-11-25.orig/include/xen/interface/version.h 2008-11-25 12:33:06.000000000 +0100
4590 +++ head-2008-11-25/include/xen/interface/version.h 2008-11-25 12:35:56.000000000 +0100
4593 * Xen version, type, and compile information.
4595 + * Permission is hereby granted, free of charge, to any person obtaining a copy
4596 + * of this software and associated documentation files (the "Software"), to
4597 + * deal in the Software without restriction, including without limitation the
4598 + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
4599 + * sell copies of the Software, and to permit persons to whom the Software is
4600 + * furnished to do so, subject to the following conditions:
4602 + * The above copyright notice and this permission notice shall be included in
4603 + * all copies or substantial portions of the Software.
4605 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
4606 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
4607 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
4608 + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
4609 + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
4610 + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
4611 + * DEALINGS IN THE SOFTWARE.
4613 * Copyright (c) 2005, Nguyen Anh Quynh <aquynh@gmail.com>
4614 * Copyright (c) 2005, Keir Fraser <keir@xensource.com>
4617 #ifndef __XEN_PUBLIC_VERSION_H__
4618 #define __XEN_PUBLIC_VERSION_H__
4620 -/* NB. All ops return zero on success, except XENVER_version. */
4621 +/* NB. All ops return zero on success, except XENVER_{version,pagesize} */
4623 /* arg == NULL; returns major:minor (16:16). */
4624 #define XENVER_version 0
4626 /* arg == xen_extraversion_t. */
4627 #define XENVER_extraversion 1
4628 -struct xen_extraversion {
4629 - char extraversion[16];
4631 -#define XEN_EXTRAVERSION_LEN (sizeof(struct xen_extraversion))
4632 +typedef char xen_extraversion_t[16];
4633 +#define XEN_EXTRAVERSION_LEN (sizeof(xen_extraversion_t))
4635 /* arg == xen_compile_info_t. */
4636 #define XENVER_compile_info 2
4637 @@ -30,31 +46,46 @@ struct xen_compile_info {
4638 char compile_domain[32];
4639 char compile_date[32];
4641 +typedef struct xen_compile_info xen_compile_info_t;
4643 #define XENVER_capabilities 3
4644 -struct xen_capabilities_info {
4647 -#define XEN_CAPABILITIES_INFO_LEN (sizeof(struct xen_capabilities_info))
4648 +typedef char xen_capabilities_info_t[1024];
4649 +#define XEN_CAPABILITIES_INFO_LEN (sizeof(xen_capabilities_info_t))
4651 #define XENVER_changeset 4
4652 -struct xen_changeset_info {
4655 -#define XEN_CHANGESET_INFO_LEN (sizeof(struct xen_changeset_info))
4656 +typedef char xen_changeset_info_t[64];
4657 +#define XEN_CHANGESET_INFO_LEN (sizeof(xen_changeset_info_t))
4659 #define XENVER_platform_parameters 5
4660 struct xen_platform_parameters {
4661 unsigned long virt_start;
4663 +typedef struct xen_platform_parameters xen_platform_parameters_t;
4665 #define XENVER_get_features 6
4666 struct xen_feature_info {
4667 unsigned int submap_idx; /* IN: which 32-bit submap to return */
4668 uint32_t submap; /* OUT: 32-bit submap */
4670 +typedef struct xen_feature_info xen_feature_info_t;
4672 /* Declares the features reported by XENVER_get_features. */
4673 #include "features.h"
4675 +/* arg == NULL; returns host memory page size. */
4676 +#define XENVER_pagesize 7
4678 +/* arg == xen_domain_handle_t. */
4679 +#define XENVER_guest_handle 8
4681 #endif /* __XEN_PUBLIC_VERSION_H__ */
4684 + * Local variables:
4686 + * c-set-style: "BSD"
4687 + * c-basic-offset: 4
4689 + * indent-tabs-mode: nil
4692 Index: head-2008-11-25/include/xen/interface/xen.h
4693 ===================================================================
4694 --- head-2008-11-25.orig/include/xen/interface/xen.h 2008-11-25 12:33:06.000000000 +0100
4695 +++ head-2008-11-25/include/xen/interface/xen.h 2008-11-25 12:35:56.000000000 +0100
4698 * Guest OS interface to Xen.
4700 + * Permission is hereby granted, free of charge, to any person obtaining a copy
4701 + * of this software and associated documentation files (the "Software"), to
4702 + * deal in the Software without restriction, including without limitation the
4703 + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
4704 + * sell copies of the Software, and to permit persons to whom the Software is
4705 + * furnished to do so, subject to the following conditions:
4707 + * The above copyright notice and this permission notice shall be included in
4708 + * all copies or substantial portions of the Software.
4710 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
4711 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
4712 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
4713 + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
4714 + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
4715 + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
4716 + * DEALINGS IN THE SOFTWARE.
4718 * Copyright (c) 2004, K A Fraser
4721 #ifndef __XEN_PUBLIC_XEN_H__
4722 #define __XEN_PUBLIC_XEN_H__
4724 -#include <asm/xen/interface.h>
4725 +#include "xen-compat.h"
4726 +#ifdef CONFIG_PARAVIRT_XEN
4727 #include <asm/pvclock-abi.h>
4731 - * XEN "SYSTEM CALLS" (a.k.a. HYPERCALLS).
4733 +#if defined(__i386__) || defined(__x86_64__)
4734 +#include "arch-x86/xen.h"
4735 +#elif defined(__ia64__)
4736 +#include "arch-ia64.h"
4738 +#error "Unsupported architecture"
4741 +#ifndef __ASSEMBLY__
4742 +/* Guest handles for primitive C types. */
4743 +DEFINE_XEN_GUEST_HANDLE(char);
4744 +__DEFINE_XEN_GUEST_HANDLE(uchar, unsigned char);
4745 +DEFINE_XEN_GUEST_HANDLE(int);
4746 +__DEFINE_XEN_GUEST_HANDLE(uint, unsigned int);
4747 +DEFINE_XEN_GUEST_HANDLE(long);
4748 +__DEFINE_XEN_GUEST_HANDLE(ulong, unsigned long);
4749 +DEFINE_XEN_GUEST_HANDLE(void);
4751 +DEFINE_XEN_GUEST_HANDLE(xen_pfn_t);
4755 - * x86_32: EAX = vector; EBX, ECX, EDX, ESI, EDI = args 1, 2, 3, 4, 5.
4756 - * EAX = return value
4757 - * (argument registers may be clobbered on return)
4758 - * x86_64: RAX = vector; RDI, RSI, RDX, R10, R8, R9 = args 1, 2, 3, 4, 5, 6.
4759 - * RAX = return value
4760 - * (argument registers not clobbered on return; RCX, R11 are)
4764 #define __HYPERVISOR_set_trap_table 0
4765 #define __HYPERVISOR_mmu_update 1
4766 #define __HYPERVISOR_set_gdt 2
4767 #define __HYPERVISOR_stack_switch 3
4768 #define __HYPERVISOR_set_callbacks 4
4769 #define __HYPERVISOR_fpu_taskswitch 5
4770 -#define __HYPERVISOR_sched_op 6
4771 -#define __HYPERVISOR_dom0_op 7
4772 +#define __HYPERVISOR_sched_op_compat 6 /* compat since 0x00030101 */
4773 +#define __HYPERVISOR_platform_op 7
4774 #define __HYPERVISOR_set_debugreg 8
4775 #define __HYPERVISOR_get_debugreg 9
4776 #define __HYPERVISOR_update_descriptor 10
4778 #define __HYPERVISOR_multicall 13
4779 #define __HYPERVISOR_update_va_mapping 14
4780 #define __HYPERVISOR_set_timer_op 15
4781 -#define __HYPERVISOR_event_channel_op_compat 16
4782 +#define __HYPERVISOR_event_channel_op_compat 16 /* compat since 0x00030202 */
4783 #define __HYPERVISOR_xen_version 17
4784 #define __HYPERVISOR_console_io 18
4785 -#define __HYPERVISOR_physdev_op_compat 19
4786 +#define __HYPERVISOR_physdev_op_compat 19 /* compat since 0x00030202 */
4787 #define __HYPERVISOR_grant_table_op 20
4788 #define __HYPERVISOR_vm_assist 21
4789 #define __HYPERVISOR_update_va_mapping_otherdomain 22
4791 #define __HYPERVISOR_vcpu_op 24
4792 #define __HYPERVISOR_set_segment_base 25 /* x86/64 only */
4793 #define __HYPERVISOR_mmuext_op 26
4794 -#define __HYPERVISOR_acm_op 27
4795 +#define __HYPERVISOR_xsm_op 27
4796 #define __HYPERVISOR_nmi_op 28
4797 -#define __HYPERVISOR_sched_op_new 29
4798 +#define __HYPERVISOR_sched_op 29
4799 #define __HYPERVISOR_callback_op 30
4800 #define __HYPERVISOR_xenoprof_op 31
4801 #define __HYPERVISOR_event_channel_op 32
4802 #define __HYPERVISOR_physdev_op 33
4803 #define __HYPERVISOR_hvm_op 34
4804 +#define __HYPERVISOR_sysctl 35
4805 +#define __HYPERVISOR_domctl 36
4806 +#define __HYPERVISOR_kexec_op 37
4808 /* Architecture-specific hypercall definitions. */
4809 #define __HYPERVISOR_arch_0 48
4810 @@ -70,15 +106,46 @@
4811 #define __HYPERVISOR_arch_7 55
4814 + * HYPERCALL COMPATIBILITY.
4817 +/* New sched_op hypercall introduced in 0x00030101. */
4818 +#if __XEN_INTERFACE_VERSION__ < 0x00030101
4819 +#undef __HYPERVISOR_sched_op
4820 +#define __HYPERVISOR_sched_op __HYPERVISOR_sched_op_compat
4823 +/* New event-channel and physdev hypercalls introduced in 0x00030202. */
4824 +#if __XEN_INTERFACE_VERSION__ < 0x00030202
4825 +#undef __HYPERVISOR_event_channel_op
4826 +#define __HYPERVISOR_event_channel_op __HYPERVISOR_event_channel_op_compat
4827 +#undef __HYPERVISOR_physdev_op
4828 +#define __HYPERVISOR_physdev_op __HYPERVISOR_physdev_op_compat
4831 +/* New platform_op hypercall introduced in 0x00030204. */
4832 +#if __XEN_INTERFACE_VERSION__ < 0x00030204
4833 +#define __HYPERVISOR_dom0_op __HYPERVISOR_platform_op
4837 * VIRTUAL INTERRUPTS
4839 * Virtual interrupts that a guest OS may receive from Xen.
4841 -#define VIRQ_TIMER 0 /* Timebase update, and/or requested timeout. */
4842 -#define VIRQ_DEBUG 1 /* Request guest to dump debug info. */
4843 -#define VIRQ_CONSOLE 2 /* (DOM0) Bytes received on emergency console. */
4844 -#define VIRQ_DOM_EXC 3 /* (DOM0) Exceptional event for some domain. */
4845 -#define VIRQ_DEBUGGER 6 /* (DOM0) A domain has paused for debugging. */
4847 + * In the side comments, 'V.' denotes a per-VCPU VIRQ while 'G.' denotes a
4848 + * global VIRQ. The former can be bound once per VCPU and cannot be re-bound.
4849 + * The latter can be allocated only once per guest: they must initially be
4850 + * allocated to VCPU0 but can subsequently be re-bound.
4852 +#define VIRQ_TIMER 0 /* V. Timebase update, and/or requested timeout. */
4853 +#define VIRQ_DEBUG 1 /* V. Request guest to dump debug info. */
4854 +#define VIRQ_CONSOLE 2 /* G. (DOM0) Bytes received on emergency console. */
4855 +#define VIRQ_DOM_EXC 3 /* G. (DOM0) Exceptional event for some domain. */
4856 +#define VIRQ_TBUF 4 /* G. (DOM0) Trace buffer has records available. */
4857 +#define VIRQ_DEBUGGER 6 /* G. (DOM0) A domain has paused for debugging. */
4858 +#define VIRQ_XENOPROF 7 /* V. XenOprofile interrupt: new sample available */
4859 +#define VIRQ_CON_RING 8 /* G. (DOM0) Bytes received on console */
4861 /* Architecture-specific VIRQ definitions. */
4862 #define VIRQ_ARCH_0 16
4864 #define VIRQ_ARCH_7 23
4869 * MMU-UPDATE REQUESTS
4871 @@ -166,6 +234,13 @@
4872 * cmd: MMUEXT_SET_LDT
4873 * linear_addr: Linear address of LDT base (NB. must be page-aligned).
4874 * nr_ents: Number of entries in LDT.
4876 + * cmd: MMUEXT_CLEAR_PAGE
4877 + * mfn: Machine frame number to be cleared.
4879 + * cmd: MMUEXT_COPY_PAGE
4880 + * mfn: Machine frame number of the destination page.
4881 + * src_mfn: Machine frame number of the source page.
4883 #define MMUEXT_PIN_L1_TABLE 0
4884 #define MMUEXT_PIN_L2_TABLE 1
4885 @@ -182,24 +257,34 @@
4886 #define MMUEXT_FLUSH_CACHE 12
4887 #define MMUEXT_SET_LDT 13
4888 #define MMUEXT_NEW_USER_BASEPTR 15
4889 +#define MMUEXT_CLEAR_PAGE 16
4890 +#define MMUEXT_COPY_PAGE 17
4892 #ifndef __ASSEMBLY__
4896 - /* [UN]PIN_TABLE, NEW_BASEPTR, NEW_USER_BASEPTR */
4897 - unsigned long mfn;
4898 - /* INVLPG_LOCAL, INVLPG_ALL, SET_LDT */
4899 - unsigned long linear_addr;
4903 - unsigned int nr_ents;
4904 - /* TLB_FLUSH_MULTI, INVLPG_MULTI */
4909 + /* [UN]PIN_TABLE, NEW_BASEPTR, NEW_USER_BASEPTR
4910 + * CLEAR_PAGE, COPY_PAGE */
4912 + /* INVLPG_LOCAL, INVLPG_ALL, SET_LDT */
4913 + unsigned long linear_addr;
4917 + unsigned int nr_ents;
4918 + /* TLB_FLUSH_MULTI, INVLPG_MULTI */
4919 +#if __XEN_INTERFACE_VERSION__ >= 0x00030205
4920 + XEN_GUEST_HANDLE(void) vcpumask;
4925 + xen_pfn_t src_mfn;
4928 -DEFINE_GUEST_HANDLE_STRUCT(mmuext_op);
4929 +typedef struct mmuext_op mmuext_op_t;
4930 +DEFINE_XEN_GUEST_HANDLE(mmuext_op_t);
4933 /* These are passed as 'flags' to update_va_mapping. They can be ORed. */
4934 @@ -224,11 +309,24 @@ DEFINE_GUEST_HANDLE_STRUCT(mmuext_op);
4936 #define VMASST_CMD_enable 0
4937 #define VMASST_CMD_disable 1
4939 +/* x86/32 guests: simulate full 4GB segment limits. */
4940 #define VMASST_TYPE_4gb_segments 0
4942 +/* x86/32 guests: trap (vector 15) whenever above vmassist is used. */
4943 #define VMASST_TYPE_4gb_segments_notify 1
4946 + * x86 guests: support writes to bottom-level PTEs.
4947 + * NB1. Page-directory entries cannot be written.
4948 + * NB2. Guest must continue to remove all writable mappings of PTEs.
4950 #define VMASST_TYPE_writable_pagetables 2
4952 +/* x86/PAE guests: support PDPTs above 4GB. */
4953 #define VMASST_TYPE_pae_extended_cr3 3
4954 -#define MAX_VMASST_TYPE 3
4956 +#define MAX_VMASST_TYPE 3
4958 #ifndef __ASSEMBLY__
4960 @@ -267,18 +365,19 @@ struct mmu_update {
4961 uint64_t ptr; /* Machine address of PTE. */
4962 uint64_t val; /* New contents of PTE. */
4964 -DEFINE_GUEST_HANDLE_STRUCT(mmu_update);
4965 +typedef struct mmu_update mmu_update_t;
4966 +DEFINE_XEN_GUEST_HANDLE(mmu_update_t);
4969 * Send an array of these to HYPERVISOR_multicall().
4970 * NB. The fields are natural register size for this architecture.
4972 struct multicall_entry {
4975 + unsigned long op, result;
4976 unsigned long args[6];
4978 -DEFINE_GUEST_HANDLE_STRUCT(multicall_entry);
4979 +typedef struct multicall_entry multicall_entry_t;
4980 +DEFINE_XEN_GUEST_HANDLE(multicall_entry_t);
4983 * Event channel endpoints per domain:
4984 @@ -287,173 +386,240 @@ DEFINE_GUEST_HANDLE_STRUCT(multicall_ent
4985 #define NR_EVENT_CHANNELS (sizeof(unsigned long) * sizeof(unsigned long) * 64)
4987 struct vcpu_time_info {
4989 - * Updates to the following values are preceded and followed
4990 - * by an increment of 'version'. The guest can therefore
4991 - * detect updates by looking for changes to 'version'. If the
4992 - * least-significant bit of the version number is set then an
4993 - * update is in progress and the guest must wait to read a
4994 - * consistent set of values. The correct way to interact with
4995 - * the version number is similar to Linux's seqlock: see the
4996 - * implementations of read_seqbegin/read_seqretry.
5000 - uint64_t tsc_timestamp; /* TSC at last update of time vals. */
5001 - uint64_t system_time; /* Time, in nanosecs, since boot. */
5003 - * Current system time:
5004 - * system_time + ((tsc - tsc_timestamp) << tsc_shift) * tsc_to_system_mul
5005 - * CPU frequency (Hz):
5006 - * ((10^9 << 32) / tsc_to_system_mul) >> tsc_shift
5008 - uint32_t tsc_to_system_mul;
5012 + * Updates to the following values are preceded and followed by an
5013 + * increment of 'version'. The guest can therefore detect updates by
5014 + * looking for changes to 'version'. If the least-significant bit of
5015 + * the version number is set then an update is in progress and the guest
5016 + * must wait to read a consistent set of values.
5017 + * The correct way to interact with the version number is similar to
5018 + * Linux's seqlock: see the implementations of read_seqbegin/read_seqretry.
5022 + uint64_t tsc_timestamp; /* TSC at last update of time vals. */
5023 + uint64_t system_time; /* Time, in nanosecs, since boot. */
5025 + * Current system time:
5027 + * ((((tsc - tsc_timestamp) << tsc_shift) * tsc_to_system_mul) >> 32)
5028 + * CPU frequency (Hz):
5029 + * ((10^9 << 32) / tsc_to_system_mul) >> tsc_shift
5031 + uint32_t tsc_to_system_mul;
5035 +typedef struct vcpu_time_info vcpu_time_info_t;
5039 - * 'evtchn_upcall_pending' is written non-zero by Xen to indicate
5040 - * a pending notification for a particular VCPU. It is then cleared
5041 - * by the guest OS /before/ checking for pending work, thus avoiding
5042 - * a set-and-check race. Note that the mask is only accessed by Xen
5043 - * on the CPU that is currently hosting the VCPU. This means that the
5044 - * pending and mask flags can be updated by the guest without special
5045 - * synchronisation (i.e., no need for the x86 LOCK prefix).
5046 - * This may seem suboptimal because if the pending flag is set by
5047 - * a different CPU then an IPI may be scheduled even when the mask
5048 - * is set. However, note:
5049 - * 1. The task of 'interrupt holdoff' is covered by the per-event-
5050 - * channel mask bits. A 'noisy' event that is continually being
5051 - * triggered can be masked at source at this very precise
5053 - * 2. The main purpose of the per-VCPU mask is therefore to restrict
5054 - * reentrant execution: whether for concurrency control, or to
5055 - * prevent unbounded stack usage. Whatever the purpose, we expect
5056 - * that the mask will be asserted only for short periods at a time,
5057 - * and so the likelihood of a 'spurious' IPI is suitably small.
5058 - * The mask is read before making an event upcall to the guest: a
5059 - * non-zero mask therefore guarantees that the VCPU will not receive
5060 - * an upcall activation. The mask is cleared when the VCPU requests
5061 - * to block: this avoids wakeup-waiting races.
5063 - uint8_t evtchn_upcall_pending;
5064 - uint8_t evtchn_upcall_mask;
5065 - unsigned long evtchn_pending_sel;
5066 - struct arch_vcpu_info arch;
5067 - struct pvclock_vcpu_time_info time;
5069 + * 'evtchn_upcall_pending' is written non-zero by Xen to indicate
5070 + * a pending notification for a particular VCPU. It is then cleared
5071 + * by the guest OS /before/ checking for pending work, thus avoiding
5072 + * a set-and-check race. Note that the mask is only accessed by Xen
5073 + * on the CPU that is currently hosting the VCPU. This means that the
5074 + * pending and mask flags can be updated by the guest without special
5075 + * synchronisation (i.e., no need for the x86 LOCK prefix).
5076 + * This may seem suboptimal because if the pending flag is set by
5077 + * a different CPU then an IPI may be scheduled even when the mask
5078 + * is set. However, note:
5079 + * 1. The task of 'interrupt holdoff' is covered by the per-event-
5080 + * channel mask bits. A 'noisy' event that is continually being
5081 + * triggered can be masked at source at this very precise
5083 + * 2. The main purpose of the per-VCPU mask is therefore to restrict
5084 + * reentrant execution: whether for concurrency control, or to
5085 + * prevent unbounded stack usage. Whatever the purpose, we expect
5086 + * that the mask will be asserted only for short periods at a time,
5087 + * and so the likelihood of a 'spurious' IPI is suitably small.
5088 + * The mask is read before making an event upcall to the guest: a
5089 + * non-zero mask therefore guarantees that the VCPU will not receive
5090 + * an upcall activation. The mask is cleared when the VCPU requests
5091 + * to block: this avoids wakeup-waiting races.
5093 + uint8_t evtchn_upcall_pending;
5094 + uint8_t evtchn_upcall_mask;
5095 + unsigned long evtchn_pending_sel;
5096 + struct arch_vcpu_info arch;
5097 +#ifdef CONFIG_PARAVIRT_XEN
5098 + struct pvclock_vcpu_time_info time;
5100 + struct vcpu_time_info time;
5102 }; /* 64 bytes (x86) */
5104 +typedef struct vcpu_info vcpu_info_t;
5108 * Xen/kernel shared data -- pointer provided in start_info.
5109 - * NB. We expect that this struct is smaller than a page.
5111 + * This structure is defined to be both smaller than a page, and the
5112 + * only data on the shared page, but may vary in actual size even within
5113 + * compatible Xen versions; guests should not rely on the size
5114 + * of this structure remaining constant.
5116 struct shared_info {
5117 - struct vcpu_info vcpu_info[MAX_VIRT_CPUS];
5118 + struct vcpu_info vcpu_info[MAX_VIRT_CPUS];
5121 - * A domain can create "event channels" on which it can send and receive
5122 - * asynchronous event notifications. There are three classes of event that
5123 - * are delivered by this mechanism:
5124 - * 1. Bi-directional inter- and intra-domain connections. Domains must
5125 - * arrange out-of-band to set up a connection (usually by allocating
5126 - * an unbound 'listener' port and avertising that via a storage service
5127 - * such as xenstore).
5128 - * 2. Physical interrupts. A domain with suitable hardware-access
5129 - * privileges can bind an event-channel port to a physical interrupt
5131 - * 3. Virtual interrupts ('events'). A domain can bind an event-channel
5132 - * port to a virtual interrupt source, such as the virtual-timer
5133 - * device or the emergency console.
5135 - * Event channels are addressed by a "port index". Each channel is
5136 - * associated with two bits of information:
5137 - * 1. PENDING -- notifies the domain that there is a pending notification
5138 - * to be processed. This bit is cleared by the guest.
5139 - * 2. MASK -- if this bit is clear then a 0->1 transition of PENDING
5140 - * will cause an asynchronous upcall to be scheduled. This bit is only
5141 - * updated by the guest. It is read-only within Xen. If a channel
5142 - * becomes pending while the channel is masked then the 'edge' is lost
5143 - * (i.e., when the channel is unmasked, the guest must manually handle
5144 - * pending notifications as no upcall will be scheduled by Xen).
5146 - * To expedite scanning of pending notifications, any 0->1 pending
5147 - * transition on an unmasked channel causes a corresponding bit in a
5148 - * per-vcpu selector word to be set. Each bit in the selector covers a
5149 - * 'C long' in the PENDING bitfield array.
5151 - unsigned long evtchn_pending[sizeof(unsigned long) * 8];
5152 - unsigned long evtchn_mask[sizeof(unsigned long) * 8];
5155 - * Wallclock time: updated only by control software. Guests should base
5156 - * their gettimeofday() syscall on this wallclock-base value.
5158 - struct pvclock_wall_clock wc;
5160 + * A domain can create "event channels" on which it can send and receive
5161 + * asynchronous event notifications. There are three classes of event that
5162 + * are delivered by this mechanism:
5163 + * 1. Bi-directional inter- and intra-domain connections. Domains must
5164 + * arrange out-of-band to set up a connection (usually by allocating
5165 + * an unbound 'listener' port and avertising that via a storage service
5166 + * such as xenstore).
5167 + * 2. Physical interrupts. A domain with suitable hardware-access
5168 + * privileges can bind an event-channel port to a physical interrupt
5170 + * 3. Virtual interrupts ('events'). A domain can bind an event-channel
5171 + * port to a virtual interrupt source, such as the virtual-timer
5172 + * device or the emergency console.
5174 + * Event channels are addressed by a "port index". Each channel is
5175 + * associated with two bits of information:
5176 + * 1. PENDING -- notifies the domain that there is a pending notification
5177 + * to be processed. This bit is cleared by the guest.
5178 + * 2. MASK -- if this bit is clear then a 0->1 transition of PENDING
5179 + * will cause an asynchronous upcall to be scheduled. This bit is only
5180 + * updated by the guest. It is read-only within Xen. If a channel
5181 + * becomes pending while the channel is masked then the 'edge' is lost
5182 + * (i.e., when the channel is unmasked, the guest must manually handle
5183 + * pending notifications as no upcall will be scheduled by Xen).
5185 + * To expedite scanning of pending notifications, any 0->1 pending
5186 + * transition on an unmasked channel causes a corresponding bit in a
5187 + * per-vcpu selector word to be set. Each bit in the selector covers a
5188 + * 'C long' in the PENDING bitfield array.
5190 + unsigned long evtchn_pending[sizeof(unsigned long) * 8];
5191 + unsigned long evtchn_mask[sizeof(unsigned long) * 8];
5194 + * Wallclock time: updated only by control software. Guests should base
5195 + * their gettimeofday() syscall on this wallclock-base value.
5197 +#ifdef CONFIG_PARAVIRT_XEN
5198 + struct pvclock_wall_clock wc;
5200 + uint32_t wc_version; /* Version counter: see vcpu_time_info_t. */
5201 + uint32_t wc_sec; /* Secs 00:00:00 UTC, Jan 1, 1970. */
5202 + uint32_t wc_nsec; /* Nsecs 00:00:00 UTC, Jan 1, 1970. */
5205 - struct arch_shared_info arch;
5206 + struct arch_shared_info arch;
5210 +typedef struct shared_info shared_info_t;
5214 - * Start-of-day memory layout for the initial domain (DOM0):
5215 + * Start-of-day memory layout:
5216 * 1. The domain is started within contiguous virtual-memory region.
5217 - * 2. The contiguous region begins and ends on an aligned 4MB boundary.
5218 - * 3. The region start corresponds to the load address of the OS image.
5219 - * If the load address is not 4MB aligned then the address is rounded down.
5220 - * 4. This the order of bootstrap elements in the initial virtual region:
5221 + * 2. The contiguous region ends on an aligned 4MB boundary.
5222 + * 3. This the order of bootstrap elements in the initial virtual region:
5223 * a. relocated kernel image
5224 * b. initial ram disk [mod_start, mod_len]
5225 * c. list of allocated page frames [mfn_list, nr_pages]
5226 * d. start_info_t structure [register ESI (x86)]
5227 * e. bootstrap page tables [pt_base, CR3 (x86)]
5228 * f. bootstrap stack [register ESP (x86)]
5229 - * 5. Bootstrap elements are packed together, but each is 4kB-aligned.
5230 - * 6. The initial ram disk may be omitted.
5231 - * 7. The list of page frames forms a contiguous 'pseudo-physical' memory
5232 + * 4. Bootstrap elements are packed together, but each is 4kB-aligned.
5233 + * 5. The initial ram disk may be omitted.
5234 + * 6. The list of page frames forms a contiguous 'pseudo-physical' memory
5235 * layout for the domain. In particular, the bootstrap virtual-memory
5236 * region is a 1:1 mapping to the first section of the pseudo-physical map.
5237 - * 8. All bootstrap elements are mapped read-writable for the guest OS. The
5238 + * 7. All bootstrap elements are mapped read-writable for the guest OS. The
5239 * only exception is the bootstrap page table, which is mapped read-only.
5240 - * 9. There is guaranteed to be at least 512kB padding after the final
5241 + * 8. There is guaranteed to be at least 512kB padding after the final
5242 * bootstrap element. If necessary, the bootstrap virtual region is
5243 * extended by an extra 4MB to ensure this.
5246 #define MAX_GUEST_CMDLINE 1024
5248 - /* THE FOLLOWING ARE FILLED IN BOTH ON INITIAL BOOT AND ON RESUME. */
5249 - char magic[32]; /* "xen-<version>-<platform>". */
5250 - unsigned long nr_pages; /* Total pages allocated to this domain. */
5251 - unsigned long shared_info; /* MACHINE address of shared info struct. */
5252 - uint32_t flags; /* SIF_xxx flags. */
5253 - unsigned long store_mfn; /* MACHINE page number of shared page. */
5254 - uint32_t store_evtchn; /* Event channel for store communication. */
5257 - unsigned long mfn; /* MACHINE page number of console page. */
5258 - uint32_t evtchn; /* Event channel for console page. */
5261 - uint32_t info_off; /* Offset of console_info struct. */
5262 - uint32_t info_size; /* Size of console_info struct from start.*/
5265 - /* THE FOLLOWING ARE ONLY FILLED IN ON INITIAL BOOT (NOT RESUME). */
5266 - unsigned long pt_base; /* VIRTUAL address of page directory. */
5267 - unsigned long nr_pt_frames; /* Number of bootstrap p.t. frames. */
5268 - unsigned long mfn_list; /* VIRTUAL address of page-frame list. */
5269 - unsigned long mod_start; /* VIRTUAL address of pre-loaded module. */
5270 - unsigned long mod_len; /* Size (bytes) of pre-loaded module. */
5271 - int8_t cmd_line[MAX_GUEST_CMDLINE];
5272 + /* THE FOLLOWING ARE FILLED IN BOTH ON INITIAL BOOT AND ON RESUME. */
5273 + char magic[32]; /* "xen-<version>-<platform>". */
5274 + unsigned long nr_pages; /* Total pages allocated to this domain. */
5275 + unsigned long shared_info; /* MACHINE address of shared info struct. */
5276 + uint32_t flags; /* SIF_xxx flags. */
5277 + xen_pfn_t store_mfn; /* MACHINE page number of shared page. */
5278 + uint32_t store_evtchn; /* Event channel for store communication. */
5281 + xen_pfn_t mfn; /* MACHINE page number of console page. */
5282 + uint32_t evtchn; /* Event channel for console page. */
5285 + uint32_t info_off; /* Offset of console_info struct. */
5286 + uint32_t info_size; /* Size of console_info struct from start.*/
5289 + /* THE FOLLOWING ARE ONLY FILLED IN ON INITIAL BOOT (NOT RESUME). */
5290 + unsigned long pt_base; /* VIRTUAL address of page directory. */
5291 + unsigned long nr_pt_frames; /* Number of bootstrap p.t. frames. */
5292 + unsigned long mfn_list; /* VIRTUAL address of page-frame list. */
5293 + unsigned long mod_start; /* VIRTUAL address of pre-loaded module. */
5294 + unsigned long mod_len; /* Size (bytes) of pre-loaded module. */
5295 + int8_t cmd_line[MAX_GUEST_CMDLINE];
5297 +typedef struct start_info start_info_t;
5299 +/* New console union for dom0 introduced in 0x00030203. */
5300 +#if __XEN_INTERFACE_VERSION__ < 0x00030203
5301 +#define console_mfn console.domU.mfn
5302 +#define console_evtchn console.domU.evtchn
5305 /* These flags are passed in the 'flags' field of start_info_t. */
5306 #define SIF_PRIVILEGED (1<<0) /* Is the domain privileged? */
5307 #define SIF_INITDOMAIN (1<<1) /* Is this the initial control domain? */
5308 +#define SIF_PM_MASK (0xFF<<8) /* reserve 1 byte for xen-pm options */
5310 -typedef uint64_t cpumap_t;
5311 +typedef struct dom0_vga_console_info {
5312 + uint8_t video_type; /* DOM0_VGA_CONSOLE_??? */
5313 +#define XEN_VGATYPE_TEXT_MODE_3 0x03
5314 +#define XEN_VGATYPE_VESA_LFB 0x23
5318 + /* Font height, in pixels. */
5319 + uint16_t font_height;
5320 + /* Cursor location (column, row). */
5321 + uint16_t cursor_x, cursor_y;
5322 + /* Number of rows and columns (dimensions in characters). */
5323 + uint16_t rows, columns;
5327 + /* Width and height, in pixels. */
5328 + uint16_t width, height;
5329 + /* Bytes per scan line. */
5330 + uint16_t bytes_per_line;
5331 + /* Bits per pixel. */
5332 + uint16_t bits_per_pixel;
5333 + /* LFB physical address, and size (in units of 64kB). */
5334 + uint32_t lfb_base;
5335 + uint32_t lfb_size;
5336 + /* RGB mask offsets and sizes, as defined by VBE 1.2+ */
5337 + uint8_t red_pos, red_size;
5338 + uint8_t green_pos, green_size;
5339 + uint8_t blue_pos, blue_size;
5340 + uint8_t rsvd_pos, rsvd_size;
5341 +#if __XEN_INTERFACE_VERSION__ >= 0x00030206
5342 + /* VESA capabilities (offset 0xa, VESA command 0x4f00). */
5343 + uint32_t gbl_caps;
5344 + /* Mode attributes (offset 0x0, VESA command 0x4f01). */
5345 + uint16_t mode_attrs;
5349 +} dom0_vga_console_info_t;
5350 +#define xen_vga_console_info dom0_vga_console_info
5351 +#define xen_vga_console_info_t dom0_vga_console_info_t
5353 typedef uint8_t xen_domain_handle_t[16];
5355 @@ -461,6 +627,11 @@ typedef uint8_t xen_domain_handle_t[16];
5356 #define __mk_unsigned_long(x) x ## UL
5357 #define mk_unsigned_long(x) __mk_unsigned_long(x)
5359 +__DEFINE_XEN_GUEST_HANDLE(uint8, uint8_t);
5360 +__DEFINE_XEN_GUEST_HANDLE(uint16, uint16_t);
5361 +__DEFINE_XEN_GUEST_HANDLE(uint32, uint32_t);
5362 +__DEFINE_XEN_GUEST_HANDLE(uint64, uint64_t);
5364 #else /* __ASSEMBLY__ */
5366 /* In assembly code we cannot use C numeric constant suffixes. */
5367 @@ -468,4 +639,24 @@ typedef uint8_t xen_domain_handle_t[16];
5369 #endif /* !__ASSEMBLY__ */
5371 +/* Default definitions for macros used by domctl/sysctl. */
5372 +#if defined(__XEN__) || defined(__XEN_TOOLS__)
5373 +#ifndef uint64_aligned_t
5374 +#define uint64_aligned_t uint64_t
5376 +#ifndef XEN_GUEST_HANDLE_64
5377 +#define XEN_GUEST_HANDLE_64(name) XEN_GUEST_HANDLE(name)
5381 #endif /* __XEN_PUBLIC_XEN_H__ */
5384 + * Local variables:
5386 + * c-set-style: "BSD"
5387 + * c-basic-offset: 4
5389 + * indent-tabs-mode: nil
5392 Index: head-2008-11-25/include/xen/xenbus.h
5393 ===================================================================
5394 --- head-2008-11-25.orig/include/xen/xenbus.h 2008-11-25 12:33:06.000000000 +0100
5395 +++ head-2008-11-25/include/xen/xenbus.h 2008-11-25 12:35:56.000000000 +0100
5397 #include <linux/mutex.h>
5398 #include <linux/completion.h>
5399 #include <linux/init.h>
5400 +#include <linux/err.h>
5401 #include <xen/interface/xen.h>
5402 #include <xen/interface/grant_table.h>
5403 #include <xen/interface/io/xenbus.h>
5404 @@ -55,8 +56,17 @@ struct xenbus_watch
5405 /* Callback (executed in a process context with no locks held). */
5406 void (*callback)(struct xenbus_watch *,
5407 const char **vec, unsigned int len);
5409 + /* See XBWF_ definitions below. */
5410 + unsigned long flags;
5414 + * Execute callback in its own kthread. Useful if the callback is long
5415 + * running or heavily serialised, to avoid taking out the main xenwatch thread
5416 + * for a long period of time (or even unwittingly causing a deadlock).
5418 +#define XBWF_new_thread 1
5420 /* A xenbus device. */
5421 struct xenbus_device {
5422 @@ -105,27 +115,8 @@ static inline struct xenbus_driver *to_x
5423 return container_of(drv, struct xenbus_driver, driver);
5426 -int __must_check __xenbus_register_frontend(struct xenbus_driver *drv,
5427 - struct module *owner,
5428 - const char *mod_name);
5430 -static inline int __must_check
5431 -xenbus_register_frontend(struct xenbus_driver *drv)
5433 - WARN_ON(drv->owner != THIS_MODULE);
5434 - return __xenbus_register_frontend(drv, THIS_MODULE, KBUILD_MODNAME);
5437 -int __must_check __xenbus_register_backend(struct xenbus_driver *drv,
5438 - struct module *owner,
5439 - const char *mod_name);
5440 -static inline int __must_check
5441 -xenbus_register_backend(struct xenbus_driver *drv)
5443 - WARN_ON(drv->owner != THIS_MODULE);
5444 - return __xenbus_register_backend(drv, THIS_MODULE, KBUILD_MODNAME);
5447 +int xenbus_register_frontend(struct xenbus_driver *drv);
5448 +int xenbus_register_backend(struct xenbus_driver *drv);
5449 void xenbus_unregister_driver(struct xenbus_driver *drv);
5451 struct xenbus_transaction
5452 @@ -136,8 +127,6 @@ struct xenbus_transaction
5453 /* Nil transaction ID. */
5454 #define XBT_NIL ((struct xenbus_transaction) { 0 })
5456 -int __init xenbus_dev_init(void);
5458 char **xenbus_directory(struct xenbus_transaction t,
5459 const char *dir, const char *node, unsigned int *num);
5460 void *xenbus_read(struct xenbus_transaction t,
5461 @@ -167,7 +156,6 @@ int xenbus_printf(struct xenbus_transact
5462 int xenbus_gather(struct xenbus_transaction t, const char *dir, ...);
5464 /* notifer routines for when the xenstore comes up */
5465 -extern int xenstored_ready;
5466 int register_xenstore_notifier(struct notifier_block *nb);
5467 void unregister_xenstore_notifier(struct notifier_block *nb);
5469 @@ -180,12 +168,9 @@ void xs_suspend_cancel(void);
5470 /* Used by xenbus_dev to borrow kernel's store connection. */
5471 void *xenbus_dev_request_and_reply(struct xsd_sockmsg *msg);
5473 -struct work_struct;
5475 /* Prepare for domain suspend: then resume or cancel the suspend. */
5476 void xenbus_suspend(void);
5477 void xenbus_resume(void);
5478 -void xenbus_probe(struct work_struct *);
5479 void xenbus_suspend_cancel(void);
5481 #define XENBUS_IS_ERR_READ(str) ({ \
5482 @@ -198,38 +183,125 @@ void xenbus_suspend_cancel(void);
5484 #define XENBUS_EXIST_ERR(err) ((err) == -ENOENT || (err) == -ERANGE)
5488 + * Register a watch on the given path, using the given xenbus_watch structure
5489 + * for storage, and the given callback function as the callback. Return 0 on
5490 + * success, or -errno on error. On success, the given path will be saved as
5491 + * watch->node, and remains the caller's to free. On error, watch->node will
5492 + * be NULL, the device will switch to XenbusStateClosing, and the error will
5493 + * be saved in the store.
5495 int xenbus_watch_path(struct xenbus_device *dev, const char *path,
5496 struct xenbus_watch *watch,
5497 void (*callback)(struct xenbus_watch *,
5498 const char **, unsigned int));
5499 -int xenbus_watch_pathfmt(struct xenbus_device *dev, struct xenbus_watch *watch,
5500 - void (*callback)(struct xenbus_watch *,
5501 - const char **, unsigned int),
5502 - const char *pathfmt, ...)
5503 - __attribute__ ((format (printf, 4, 5)));
5507 + * Register a watch on the given path/path2, using the given xenbus_watch
5508 + * structure for storage, and the given callback function as the callback.
5509 + * Return 0 on success, or -errno on error. On success, the watched path
5510 + * (path/path2) will be saved as watch->node, and becomes the caller's to
5511 + * kfree(). On error, watch->node will be NULL, so the caller has nothing to
5512 + * free, the device will switch to XenbusStateClosing, and the error will be
5513 + * saved in the store.
5515 +int xenbus_watch_path2(struct xenbus_device *dev, const char *path,
5516 + const char *path2, struct xenbus_watch *watch,
5517 + void (*callback)(struct xenbus_watch *,
5518 + const char **, unsigned int));
5522 + * Advertise in the store a change of the given driver to the given new_state.
5523 + * Return 0 on success, or -errno on error. On error, the device will switch
5524 + * to XenbusStateClosing, and the error will be saved in the store.
5526 int xenbus_switch_state(struct xenbus_device *dev, enum xenbus_state new_state);
5530 + * Grant access to the given ring_mfn to the peer of the given device. Return
5531 + * 0 on success, or -errno on error. On error, the device will switch to
5532 + * XenbusStateClosing, and the error will be saved in the store.
5534 int xenbus_grant_ring(struct xenbus_device *dev, unsigned long ring_mfn);
5535 -int xenbus_map_ring_valloc(struct xenbus_device *dev,
5536 - int gnt_ref, void **vaddr);
5540 + * Map a page of memory into this domain from another domain's grant table.
5541 + * xenbus_map_ring_valloc allocates a page of virtual address space, maps the
5542 + * page to that address, and sets *vaddr to that address.
5543 + * xenbus_map_ring does not allocate the virtual address space (you must do
5544 + * this yourself!). It only maps in the page to the specified address.
5545 + * Returns 0 on success, and GNTST_* (see xen/include/interface/grant_table.h)
5546 + * or -ENOMEM on error. If an error is returned, device will switch to
5547 + * XenbusStateClosing and the error message will be saved in XenStore.
5549 +struct vm_struct *xenbus_map_ring_valloc(struct xenbus_device *dev,
5551 int xenbus_map_ring(struct xenbus_device *dev, int gnt_ref,
5552 grant_handle_t *handle, void *vaddr);
5554 -int xenbus_unmap_ring_vfree(struct xenbus_device *dev, void *vaddr);
5557 + * Unmap a page of memory in this domain that was imported from another domain.
5558 + * Use xenbus_unmap_ring_vfree if you mapped in your memory with
5559 + * xenbus_map_ring_valloc (it will free the virtual address space).
5560 + * Returns 0 on success and returns GNTST_* on error
5561 + * (see xen/include/interface/grant_table.h).
5563 +int xenbus_unmap_ring_vfree(struct xenbus_device *dev, struct vm_struct *);
5564 int xenbus_unmap_ring(struct xenbus_device *dev,
5565 grant_handle_t handle, void *vaddr);
5569 + * Allocate an event channel for the given xenbus_device, assigning the newly
5570 + * created local port to *port. Return 0 on success, or -errno on error. On
5571 + * error, the device will switch to XenbusStateClosing, and the error will be
5572 + * saved in the store.
5574 int xenbus_alloc_evtchn(struct xenbus_device *dev, int *port);
5575 -int xenbus_bind_evtchn(struct xenbus_device *dev, int remote_port, int *port);
5579 + * Free an existing event channel. Returns 0 on success or -errno on error.
5581 int xenbus_free_evtchn(struct xenbus_device *dev, int port);
5585 + * Return the state of the driver rooted at the given store path, or
5586 + * XenbusStateUnknown if no state can be read.
5588 enum xenbus_state xenbus_read_driver_state(const char *path);
5590 -void xenbus_dev_error(struct xenbus_device *dev, int err, const char *fmt, ...);
5591 -void xenbus_dev_fatal(struct xenbus_device *dev, int err, const char *fmt, ...);
5594 + * Report the given negative errno into the store, along with the given
5595 + * formatted message.
5597 +void xenbus_dev_error(struct xenbus_device *dev, int err, const char *fmt,
5602 + * Equivalent to xenbus_dev_error(dev, err, fmt, args), followed by
5603 + * xenbus_switch_state(dev, NULL, XenbusStateClosing) to schedule an orderly
5604 + * closedown of this driver and its peer.
5606 +void xenbus_dev_fatal(struct xenbus_device *dev, int err, const char *fmt,
5609 +int xenbus_dev_init(void);
5611 const char *xenbus_strstate(enum xenbus_state state);
5612 int xenbus_dev_is_online(struct xenbus_device *dev);
5613 int xenbus_frontend_closed(struct xenbus_device *dev);
5615 +int xenbus_for_each_backend(void *arg, int (*fn)(struct device *, void *));
5616 +int xenbus_for_each_frontend(void *arg, int (*fn)(struct device *, void *));
5618 #endif /* _XEN_XENBUS_H */