]> git.ipfire.org Git - ipfire-2.x.git/blob - src/patches/suse-2.6.27.39/patches.xen/xen3-fixup-xen
Imported linux-2.6.27.39 suse/xen patches.
[ipfire-2.x.git] / src / patches / suse-2.6.27.39 / patches.xen / xen3-fixup-xen
1 Subject: Fix Xen build wrt. Xen files coming from mainline.
2 From: http://xenbits.xensource.com/linux-2.6.18-xen.hg (tip 728:832aac894efd)
3 Patch-mainline: obsolete
4
5 Acked-by: jbeulich@novell.com
6
7 --- sle11-2009-09-18.orig/drivers/xen/Makefile 2009-09-18 10:11:48.000000000 +0200
8 +++ sle11-2009-09-18/drivers/xen/Makefile 2008-11-25 12:35:56.000000000 +0100
9 @@ -1,4 +1,25 @@
10 -obj-y += grant-table.o features.o events.o manage.o
11 +obj-y += core/
12 +obj-y += console/
13 +obj-y += evtchn/
14 obj-y += xenbus/
15 -obj-$(CONFIG_XEN_XENCOMM) += xencomm.o
16 -obj-$(CONFIG_XEN_BALLOON) += balloon.o
17 +obj-y += char/
18 +
19 +obj-y += util.o
20 +obj-$(CONFIG_XEN_BALLOON) += balloon/
21 +obj-$(CONFIG_XEN_BLKDEV_BACKEND) += blkback/
22 +obj-$(CONFIG_XEN_BLKDEV_TAP) += blktap/
23 +obj-$(CONFIG_XEN_NETDEV_BACKEND) += netback/
24 +obj-$(CONFIG_XEN_TPMDEV_BACKEND) += tpmback/
25 +obj-$(CONFIG_XEN_BLKDEV_FRONTEND) += blkfront/
26 +obj-$(CONFIG_XEN_NETDEV_FRONTEND) += netfront/
27 +obj-$(CONFIG_XEN_PCIDEV_BACKEND) += pciback/
28 +obj-$(CONFIG_XEN_PCIDEV_FRONTEND) += pcifront/
29 +obj-$(CONFIG_XEN_FRAMEBUFFER) += fbfront/
30 +obj-$(CONFIG_XEN_KEYBOARD) += fbfront/
31 +obj-$(CONFIG_XEN_SCSI_BACKEND) += scsiback/
32 +obj-$(CONFIG_XEN_SCSI_FRONTEND) += scsifront/
33 +obj-$(CONFIG_XEN_PRIVCMD) += privcmd/
34 +obj-$(CONFIG_XEN_GRANT_DEV) += gntdev/
35 +obj-$(CONFIG_XEN_NETDEV_ACCEL_SFC_UTIL) += sfc_netutil/
36 +obj-$(CONFIG_XEN_NETDEV_ACCEL_SFC_FRONTEND) += sfc_netfront/
37 +obj-$(CONFIG_XEN_NETDEV_ACCEL_SFC_BACKEND) += sfc_netback/
38 --- sle11-2009-09-18.orig/drivers/xen/xenbus/Makefile 2009-09-18 10:11:48.000000000 +0200
39 +++ sle11-2009-09-18/drivers/xen/xenbus/Makefile 2008-11-25 12:35:56.000000000 +0100
40 @@ -1,7 +1,9 @@
41 -obj-y += xenbus.o
42 +obj-y += xenbus_client.o xenbus_comms.o xenbus_xs.o xenbus_probe.o
43 +obj-$(CONFIG_XEN_BACKEND) += xenbus_be.o
44
45 -xenbus-objs =
46 -xenbus-objs += xenbus_client.o
47 -xenbus-objs += xenbus_comms.o
48 -xenbus-objs += xenbus_xs.o
49 -xenbus-objs += xenbus_probe.o
50 +xenbus_be-objs =
51 +xenbus_be-objs += xenbus_backend_client.o
52 +
53 +xenbus-$(CONFIG_XEN_BACKEND) += xenbus_probe_backend.o
54 +obj-y += $(xenbus-y) $(xenbus-m)
55 +obj-$(CONFIG_XEN_XENBUS_DEV) += xenbus_dev.o
56 --- sle11-2009-09-18.orig/drivers/xen/xenbus/xenbus_client.c 2009-09-18 10:11:48.000000000 +0200
57 +++ sle11-2009-09-18/drivers/xen/xenbus/xenbus_client.c 2008-11-25 12:35:56.000000000 +0100
58 @@ -30,14 +30,18 @@
59 * IN THE SOFTWARE.
60 */
61
62 -#include <linux/types.h>
63 -#include <linux/vmalloc.h>
64 -#include <asm/xen/hypervisor.h>
65 -#include <xen/interface/xen.h>
66 -#include <xen/interface/event_channel.h>
67 -#include <xen/events.h>
68 -#include <xen/grant_table.h>
69 +#include <linux/slab.h>
70 +#include <xen/evtchn.h>
71 +#include <xen/gnttab.h>
72 #include <xen/xenbus.h>
73 +#include <xen/driver_util.h>
74 +
75 +#ifdef HAVE_XEN_PLATFORM_COMPAT_H
76 +#include <xen/platform-compat.h>
77 +#endif
78 +
79 +#define DPRINTK(fmt, args...) \
80 + pr_debug("xenbus_client (%s:%d) " fmt ".\n", __FUNCTION__, __LINE__, ##args)
81
82 const char *xenbus_strstate(enum xenbus_state state)
83 {
84 @@ -54,20 +58,6 @@ const char *xenbus_strstate(enum xenbus_
85 }
86 EXPORT_SYMBOL_GPL(xenbus_strstate);
87
88 -/**
89 - * xenbus_watch_path - register a watch
90 - * @dev: xenbus device
91 - * @path: path to watch
92 - * @watch: watch to register
93 - * @callback: callback to register
94 - *
95 - * Register a @watch on the given path, using the given xenbus_watch structure
96 - * for storage, and the given @callback function as the callback. Return 0 on
97 - * success, or -errno on error. On success, the given @path will be saved as
98 - * @watch->node, and remains the caller's to free. On error, @watch->node will
99 - * be NULL, the device will switch to %XenbusStateClosing, and the error will
100 - * be saved in the store.
101 - */
102 int xenbus_watch_path(struct xenbus_device *dev, const char *path,
103 struct xenbus_watch *watch,
104 void (*callback)(struct xenbus_watch *,
105 @@ -91,58 +81,26 @@ int xenbus_watch_path(struct xenbus_devi
106 EXPORT_SYMBOL_GPL(xenbus_watch_path);
107
108
109 -/**
110 - * xenbus_watch_pathfmt - register a watch on a sprintf-formatted path
111 - * @dev: xenbus device
112 - * @watch: watch to register
113 - * @callback: callback to register
114 - * @pathfmt: format of path to watch
115 - *
116 - * Register a watch on the given @path, using the given xenbus_watch
117 - * structure for storage, and the given @callback function as the callback.
118 - * Return 0 on success, or -errno on error. On success, the watched path
119 - * (@path/@path2) will be saved as @watch->node, and becomes the caller's to
120 - * kfree(). On error, watch->node will be NULL, so the caller has nothing to
121 - * free, the device will switch to %XenbusStateClosing, and the error will be
122 - * saved in the store.
123 - */
124 -int xenbus_watch_pathfmt(struct xenbus_device *dev,
125 - struct xenbus_watch *watch,
126 - void (*callback)(struct xenbus_watch *,
127 - const char **, unsigned int),
128 - const char *pathfmt, ...)
129 +int xenbus_watch_path2(struct xenbus_device *dev, const char *path,
130 + const char *path2, struct xenbus_watch *watch,
131 + void (*callback)(struct xenbus_watch *,
132 + const char **, unsigned int))
133 {
134 int err;
135 - va_list ap;
136 - char *path;
137 -
138 - va_start(ap, pathfmt);
139 - path = kvasprintf(GFP_NOIO | __GFP_HIGH, pathfmt, ap);
140 - va_end(ap);
141 -
142 - if (!path) {
143 + char *state = kasprintf(GFP_NOIO | __GFP_HIGH, "%s/%s", path, path2);
144 + if (!state) {
145 xenbus_dev_fatal(dev, -ENOMEM, "allocating path for watch");
146 return -ENOMEM;
147 }
148 - err = xenbus_watch_path(dev, path, watch, callback);
149 + err = xenbus_watch_path(dev, state, watch, callback);
150
151 if (err)
152 - kfree(path);
153 + kfree(state);
154 return err;
155 }
156 -EXPORT_SYMBOL_GPL(xenbus_watch_pathfmt);
157 +EXPORT_SYMBOL_GPL(xenbus_watch_path2);
158
159
160 -/**
161 - * xenbus_switch_state
162 - * @dev: xenbus device
163 - * @xbt: transaction handle
164 - * @state: new state
165 - *
166 - * Advertise in the store a change of the given driver to the given new_state.
167 - * Return 0 on success, or -errno on error. On error, the device will switch
168 - * to XenbusStateClosing, and the error will be saved in the store.
169 - */
170 int xenbus_switch_state(struct xenbus_device *dev, enum xenbus_state state)
171 {
172 /* We check whether the state is currently set to the given value, and
173 @@ -201,13 +159,12 @@ static char *error_path(struct xenbus_de
174 }
175
176
177 -static void xenbus_va_dev_error(struct xenbus_device *dev, int err,
178 - const char *fmt, va_list ap)
179 +void _dev_error(struct xenbus_device *dev, int err, const char *fmt,
180 + va_list ap)
181 {
182 int ret;
183 unsigned int len;
184 - char *printf_buffer = NULL;
185 - char *path_buffer = NULL;
186 + char *printf_buffer = NULL, *path_buffer = NULL;
187
188 #define PRINTF_BUFFER_SIZE 4096
189 printf_buffer = kmalloc(PRINTF_BUFFER_SIZE, GFP_KERNEL);
190 @@ -224,74 +181,51 @@ static void xenbus_va_dev_error(struct x
191 path_buffer = error_path(dev);
192
193 if (path_buffer == NULL) {
194 - dev_err(&dev->dev, "failed to write error node for %s (%s)\n",
195 + printk("xenbus: failed to write error node for %s (%s)\n",
196 dev->nodename, printf_buffer);
197 goto fail;
198 }
199
200 if (xenbus_write(XBT_NIL, path_buffer, "error", printf_buffer) != 0) {
201 - dev_err(&dev->dev, "failed to write error node for %s (%s)\n",
202 + printk("xenbus: failed to write error node for %s (%s)\n",
203 dev->nodename, printf_buffer);
204 goto fail;
205 }
206
207 fail:
208 - kfree(printf_buffer);
209 - kfree(path_buffer);
210 + if (printf_buffer)
211 + kfree(printf_buffer);
212 + if (path_buffer)
213 + kfree(path_buffer);
214 }
215
216
217 -/**
218 - * xenbus_dev_error
219 - * @dev: xenbus device
220 - * @err: error to report
221 - * @fmt: error message format
222 - *
223 - * Report the given negative errno into the store, along with the given
224 - * formatted message.
225 - */
226 -void xenbus_dev_error(struct xenbus_device *dev, int err, const char *fmt, ...)
227 +void xenbus_dev_error(struct xenbus_device *dev, int err, const char *fmt,
228 + ...)
229 {
230 va_list ap;
231
232 va_start(ap, fmt);
233 - xenbus_va_dev_error(dev, err, fmt, ap);
234 + _dev_error(dev, err, fmt, ap);
235 va_end(ap);
236 }
237 EXPORT_SYMBOL_GPL(xenbus_dev_error);
238
239 -/**
240 - * xenbus_dev_fatal
241 - * @dev: xenbus device
242 - * @err: error to report
243 - * @fmt: error message format
244 - *
245 - * Equivalent to xenbus_dev_error(dev, err, fmt, args), followed by
246 - * xenbus_switch_state(dev, NULL, XenbusStateClosing) to schedule an orderly
247 - * closedown of this driver and its peer.
248 - */
249
250 -void xenbus_dev_fatal(struct xenbus_device *dev, int err, const char *fmt, ...)
251 +void xenbus_dev_fatal(struct xenbus_device *dev, int err, const char *fmt,
252 + ...)
253 {
254 va_list ap;
255
256 va_start(ap, fmt);
257 - xenbus_va_dev_error(dev, err, fmt, ap);
258 + _dev_error(dev, err, fmt, ap);
259 va_end(ap);
260
261 xenbus_switch_state(dev, XenbusStateClosing);
262 }
263 EXPORT_SYMBOL_GPL(xenbus_dev_fatal);
264
265 -/**
266 - * xenbus_grant_ring
267 - * @dev: xenbus device
268 - * @ring_mfn: mfn of ring to grant
269 -
270 - * Grant access to the given @ring_mfn to the peer of the given device. Return
271 - * 0 on success, or -errno on error. On error, the device will switch to
272 - * XenbusStateClosing, and the error will be saved in the store.
273 - */
274 +
275 int xenbus_grant_ring(struct xenbus_device *dev, unsigned long ring_mfn)
276 {
277 int err = gnttab_grant_foreign_access(dev->otherend_id, ring_mfn, 0);
278 @@ -302,18 +236,12 @@ int xenbus_grant_ring(struct xenbus_devi
279 EXPORT_SYMBOL_GPL(xenbus_grant_ring);
280
281
282 -/**
283 - * Allocate an event channel for the given xenbus_device, assigning the newly
284 - * created local port to *port. Return 0 on success, or -errno on error. On
285 - * error, the device will switch to XenbusStateClosing, and the error will be
286 - * saved in the store.
287 - */
288 int xenbus_alloc_evtchn(struct xenbus_device *dev, int *port)
289 {
290 struct evtchn_alloc_unbound alloc_unbound;
291 int err;
292
293 - alloc_unbound.dom = DOMID_SELF;
294 + alloc_unbound.dom = DOMID_SELF;
295 alloc_unbound.remote_dom = dev->otherend_id;
296
297 err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound,
298 @@ -328,36 +256,6 @@ int xenbus_alloc_evtchn(struct xenbus_de
299 EXPORT_SYMBOL_GPL(xenbus_alloc_evtchn);
300
301
302 -/**
303 - * Bind to an existing interdomain event channel in another domain. Returns 0
304 - * on success and stores the local port in *port. On error, returns -errno,
305 - * switches the device to XenbusStateClosing, and saves the error in XenStore.
306 - */
307 -int xenbus_bind_evtchn(struct xenbus_device *dev, int remote_port, int *port)
308 -{
309 - struct evtchn_bind_interdomain bind_interdomain;
310 - int err;
311 -
312 - bind_interdomain.remote_dom = dev->otherend_id;
313 - bind_interdomain.remote_port = remote_port;
314 -
315 - err = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain,
316 - &bind_interdomain);
317 - if (err)
318 - xenbus_dev_fatal(dev, err,
319 - "binding to event channel %d from domain %d",
320 - remote_port, dev->otherend_id);
321 - else
322 - *port = bind_interdomain.local_port;
323 -
324 - return err;
325 -}
326 -EXPORT_SYMBOL_GPL(xenbus_bind_evtchn);
327 -
328 -
329 -/**
330 - * Free an existing event channel. Returns 0 on success or -errno on error.
331 - */
332 int xenbus_free_evtchn(struct xenbus_device *dev, int port)
333 {
334 struct evtchn_close close;
335 @@ -374,189 +272,6 @@ int xenbus_free_evtchn(struct xenbus_dev
336 EXPORT_SYMBOL_GPL(xenbus_free_evtchn);
337
338
339 -/**
340 - * xenbus_map_ring_valloc
341 - * @dev: xenbus device
342 - * @gnt_ref: grant reference
343 - * @vaddr: pointer to address to be filled out by mapping
344 - *
345 - * Based on Rusty Russell's skeleton driver's map_page.
346 - * Map a page of memory into this domain from another domain's grant table.
347 - * xenbus_map_ring_valloc allocates a page of virtual address space, maps the
348 - * page to that address, and sets *vaddr to that address.
349 - * Returns 0 on success, and GNTST_* (see xen/include/interface/grant_table.h)
350 - * or -ENOMEM on error. If an error is returned, device will switch to
351 - * XenbusStateClosing and the error message will be saved in XenStore.
352 - */
353 -int xenbus_map_ring_valloc(struct xenbus_device *dev, int gnt_ref, void **vaddr)
354 -{
355 - struct gnttab_map_grant_ref op = {
356 - .flags = GNTMAP_host_map,
357 - .ref = gnt_ref,
358 - .dom = dev->otherend_id,
359 - };
360 - struct vm_struct *area;
361 -
362 - *vaddr = NULL;
363 -
364 - area = xen_alloc_vm_area(PAGE_SIZE);
365 - if (!area)
366 - return -ENOMEM;
367 -
368 - op.host_addr = (unsigned long)area->addr;
369 -
370 - if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1))
371 - BUG();
372 -
373 - if (op.status != GNTST_okay) {
374 - xen_free_vm_area(area);
375 - xenbus_dev_fatal(dev, op.status,
376 - "mapping in shared page %d from domain %d",
377 - gnt_ref, dev->otherend_id);
378 - return op.status;
379 - }
380 -
381 - /* Stuff the handle in an unused field */
382 - area->phys_addr = (unsigned long)op.handle;
383 -
384 - *vaddr = area->addr;
385 - return 0;
386 -}
387 -EXPORT_SYMBOL_GPL(xenbus_map_ring_valloc);
388 -
389 -
390 -/**
391 - * xenbus_map_ring
392 - * @dev: xenbus device
393 - * @gnt_ref: grant reference
394 - * @handle: pointer to grant handle to be filled
395 - * @vaddr: address to be mapped to
396 - *
397 - * Map a page of memory into this domain from another domain's grant table.
398 - * xenbus_map_ring does not allocate the virtual address space (you must do
399 - * this yourself!). It only maps in the page to the specified address.
400 - * Returns 0 on success, and GNTST_* (see xen/include/interface/grant_table.h)
401 - * or -ENOMEM on error. If an error is returned, device will switch to
402 - * XenbusStateClosing and the error message will be saved in XenStore.
403 - */
404 -int xenbus_map_ring(struct xenbus_device *dev, int gnt_ref,
405 - grant_handle_t *handle, void *vaddr)
406 -{
407 - struct gnttab_map_grant_ref op = {
408 - .host_addr = (unsigned long)vaddr,
409 - .flags = GNTMAP_host_map,
410 - .ref = gnt_ref,
411 - .dom = dev->otherend_id,
412 - };
413 -
414 - if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1))
415 - BUG();
416 -
417 - if (op.status != GNTST_okay) {
418 - xenbus_dev_fatal(dev, op.status,
419 - "mapping in shared page %d from domain %d",
420 - gnt_ref, dev->otherend_id);
421 - } else
422 - *handle = op.handle;
423 -
424 - return op.status;
425 -}
426 -EXPORT_SYMBOL_GPL(xenbus_map_ring);
427 -
428 -
429 -/**
430 - * xenbus_unmap_ring_vfree
431 - * @dev: xenbus device
432 - * @vaddr: addr to unmap
433 - *
434 - * Based on Rusty Russell's skeleton driver's unmap_page.
435 - * Unmap a page of memory in this domain that was imported from another domain.
436 - * Use xenbus_unmap_ring_vfree if you mapped in your memory with
437 - * xenbus_map_ring_valloc (it will free the virtual address space).
438 - * Returns 0 on success and returns GNTST_* on error
439 - * (see xen/include/interface/grant_table.h).
440 - */
441 -int xenbus_unmap_ring_vfree(struct xenbus_device *dev, void *vaddr)
442 -{
443 - struct vm_struct *area;
444 - struct gnttab_unmap_grant_ref op = {
445 - .host_addr = (unsigned long)vaddr,
446 - };
447 -
448 - /* It'd be nice if linux/vmalloc.h provided a find_vm_area(void *addr)
449 - * method so that we don't have to muck with vmalloc internals here.
450 - * We could force the user to hang on to their struct vm_struct from
451 - * xenbus_map_ring_valloc, but these 6 lines considerably simplify
452 - * this API.
453 - */
454 - read_lock(&vmlist_lock);
455 - for (area = vmlist; area != NULL; area = area->next) {
456 - if (area->addr == vaddr)
457 - break;
458 - }
459 - read_unlock(&vmlist_lock);
460 -
461 - if (!area) {
462 - xenbus_dev_error(dev, -ENOENT,
463 - "can't find mapped virtual address %p", vaddr);
464 - return GNTST_bad_virt_addr;
465 - }
466 -
467 - op.handle = (grant_handle_t)area->phys_addr;
468 -
469 - if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1))
470 - BUG();
471 -
472 - if (op.status == GNTST_okay)
473 - xen_free_vm_area(area);
474 - else
475 - xenbus_dev_error(dev, op.status,
476 - "unmapping page at handle %d error %d",
477 - (int16_t)area->phys_addr, op.status);
478 -
479 - return op.status;
480 -}
481 -EXPORT_SYMBOL_GPL(xenbus_unmap_ring_vfree);
482 -
483 -
484 -/**
485 - * xenbus_unmap_ring
486 - * @dev: xenbus device
487 - * @handle: grant handle
488 - * @vaddr: addr to unmap
489 - *
490 - * Unmap a page of memory in this domain that was imported from another domain.
491 - * Returns 0 on success and returns GNTST_* on error
492 - * (see xen/include/interface/grant_table.h).
493 - */
494 -int xenbus_unmap_ring(struct xenbus_device *dev,
495 - grant_handle_t handle, void *vaddr)
496 -{
497 - struct gnttab_unmap_grant_ref op = {
498 - .host_addr = (unsigned long)vaddr,
499 - .handle = handle,
500 - };
501 -
502 - if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1))
503 - BUG();
504 -
505 - if (op.status != GNTST_okay)
506 - xenbus_dev_error(dev, op.status,
507 - "unmapping page at handle %d error %d",
508 - handle, op.status);
509 -
510 - return op.status;
511 -}
512 -EXPORT_SYMBOL_GPL(xenbus_unmap_ring);
513 -
514 -
515 -/**
516 - * xenbus_read_driver_state
517 - * @path: path for driver
518 - *
519 - * Return the state of the driver rooted at the given store path, or
520 - * XenbusStateUnknown if no state can be read.
521 - */
522 enum xenbus_state xenbus_read_driver_state(const char *path)
523 {
524 enum xenbus_state result;
525 --- sle11-2009-09-18.orig/drivers/xen/xenbus/xenbus_comms.c 2009-09-18 10:11:48.000000000 +0200
526 +++ sle11-2009-09-18/drivers/xen/xenbus/xenbus_comms.c 2008-11-25 12:35:56.000000000 +0100
527 @@ -34,19 +34,28 @@
528 #include <linux/interrupt.h>
529 #include <linux/sched.h>
530 #include <linux/err.h>
531 +#include <linux/ptrace.h>
532 +#include <linux/workqueue.h>
533 +#include <xen/evtchn.h>
534 #include <xen/xenbus.h>
535 -#include <asm/xen/hypervisor.h>
536 -#include <xen/events.h>
537 -#include <xen/page.h>
538 +
539 +#include <asm/hypervisor.h>
540 +
541 #include "xenbus_comms.h"
542
543 +#ifdef HAVE_XEN_PLATFORM_COMPAT_H
544 +#include <xen/platform-compat.h>
545 +#endif
546 +
547 static int xenbus_irq;
548
549 -static DECLARE_WORK(probe_work, xenbus_probe);
550 +extern void xenbus_probe(void *);
551 +extern int xenstored_ready;
552 +static DECLARE_WORK(probe_work, xenbus_probe, NULL);
553
554 static DECLARE_WAIT_QUEUE_HEAD(xb_waitq);
555
556 -static irqreturn_t wake_waiting(int irq, void *unused)
557 +static irqreturn_t wake_waiting(int irq, void *unused, struct pt_regs *regs)
558 {
559 if (unlikely(xenstored_ready == 0)) {
560 xenstored_ready = 1;
561 @@ -82,13 +91,6 @@ static const void *get_input_chunk(XENST
562 return buf + MASK_XENSTORE_IDX(cons);
563 }
564
565 -/**
566 - * xb_write - low level write
567 - * @data: buffer to send
568 - * @len: length of buffer
569 - *
570 - * Returns 0 on success, error otherwise.
571 - */
572 int xb_write(const void *data, unsigned len)
573 {
574 struct xenstore_domain_interface *intf = xen_store_interface;
575 @@ -197,12 +199,11 @@ int xb_read(void *data, unsigned len)
576 return 0;
577 }
578
579 -/**
580 - * xb_init_comms - Set up interrupt handler off store event channel.
581 - */
582 +/* Set up interrupt handler off store event channel. */
583 int xb_init_comms(void)
584 {
585 struct xenstore_domain_interface *intf = xen_store_interface;
586 + int err;
587
588 if (intf->req_prod != intf->req_cons)
589 printk(KERN_ERR "XENBUS request ring is not quiescent "
590 @@ -215,20 +216,18 @@ int xb_init_comms(void)
591 intf->rsp_cons = intf->rsp_prod;
592 }
593
594 - if (xenbus_irq) {
595 - /* Already have an irq; assume we're resuming */
596 - rebind_evtchn_irq(xen_store_evtchn, xenbus_irq);
597 - } else {
598 - int err;
599 - err = bind_evtchn_to_irqhandler(xen_store_evtchn, wake_waiting,
600 - 0, "xenbus", &xb_waitq);
601 - if (err <= 0) {
602 - printk(KERN_ERR "XENBUS request irq failed %i\n", err);
603 - return err;
604 - }
605 + if (xenbus_irq)
606 + unbind_from_irqhandler(xenbus_irq, &xb_waitq);
607
608 - xenbus_irq = err;
609 + err = bind_caller_port_to_irqhandler(
610 + xen_store_evtchn, wake_waiting,
611 + 0, "xenbus", &xb_waitq);
612 + if (err <= 0) {
613 + printk(KERN_ERR "XENBUS request irq failed %i\n", err);
614 + return err;
615 }
616
617 + xenbus_irq = err;
618 +
619 return 0;
620 }
621 --- sle11-2009-09-18.orig/drivers/xen/xenbus/xenbus_probe.c 2009-09-18 10:11:48.000000000 +0200
622 +++ sle11-2009-09-18/drivers/xen/xenbus/xenbus_probe.c 2008-11-25 12:35:56.000000000 +0100
623 @@ -4,6 +4,7 @@
624 * Copyright (C) 2005 Rusty Russell, IBM Corporation
625 * Copyright (C) 2005 Mike Wray, Hewlett-Packard
626 * Copyright (C) 2005, 2006 XenSource Ltd
627 + * Copyright (C) 2007 Solarflare Communications, Inc.
628 *
629 * This program is free software; you can redistribute it and/or
630 * modify it under the terms of the GNU General Public License version 2
631 @@ -32,7 +33,7 @@
632
633 #define DPRINTK(fmt, args...) \
634 pr_debug("xenbus_probe (%s:%d) " fmt ".\n", \
635 - __func__, __LINE__, ##args)
636 + __FUNCTION__, __LINE__, ##args)
637
638 #include <linux/kernel.h>
639 #include <linux/err.h>
640 @@ -41,24 +42,35 @@
641 #include <linux/fcntl.h>
642 #include <linux/mm.h>
643 #include <linux/notifier.h>
644 -#include <linux/kthread.h>
645 #include <linux/mutex.h>
646 -#include <linux/io.h>
647 +#include <linux/module.h>
648
649 +#include <asm/io.h>
650 #include <asm/page.h>
651 +#include <asm/maddr.h>
652 #include <asm/pgtable.h>
653 -#include <asm/xen/hypervisor.h>
654 +#include <asm/hypervisor.h>
655 #include <xen/xenbus.h>
656 -#include <xen/events.h>
657 -#include <xen/page.h>
658 +#include <xen/xen_proc.h>
659 +#include <xen/evtchn.h>
660 +#include <xen/features.h>
661 +#ifdef MODULE
662 +#include <xen/hvm.h>
663 +#endif
664
665 #include "xenbus_comms.h"
666 #include "xenbus_probe.h"
667
668 +#ifdef HAVE_XEN_PLATFORM_COMPAT_H
669 +#include <xen/platform-compat.h>
670 +#endif
671 +
672 int xen_store_evtchn;
673 struct xenstore_domain_interface *xen_store_interface;
674 static unsigned long xen_store_mfn;
675
676 +extern struct mutex xenwatch_mutex;
677 +
678 static BLOCKING_NOTIFIER_HEAD(xenstore_chain);
679
680 static void wait_for_devices(struct xenbus_driver *xendrv);
681 @@ -88,16 +100,6 @@ int xenbus_match(struct device *_dev, st
682 return match_device(drv->ids, to_xenbus_device(_dev)) != NULL;
683 }
684
685 -static int xenbus_uevent(struct device *_dev, struct kobj_uevent_env *env)
686 -{
687 - struct xenbus_device *dev = to_xenbus_device(_dev);
688 -
689 - if (add_uevent_var(env, "MODALIAS=xen:%s", dev->devicetype))
690 - return -ENOMEM;
691 -
692 - return 0;
693 -}
694 -
695 /* device/<type>/<id> => <type>-<id> */
696 static int frontend_bus_id(char bus_id[BUS_ID_SIZE], const char *nodename)
697 {
698 @@ -166,6 +168,30 @@ static int read_backend_details(struct x
699 return read_otherend_details(xendev, "backend-id", "backend");
700 }
701
702 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
703 +static int xenbus_uevent_frontend(struct device *dev, char **envp,
704 + int num_envp, char *buffer, int buffer_size)
705 +{
706 + struct xenbus_device *xdev;
707 + int length = 0, i = 0;
708 +
709 + if (dev == NULL)
710 + return -ENODEV;
711 + xdev = to_xenbus_device(dev);
712 + if (xdev == NULL)
713 + return -ENODEV;
714 +
715 + /* stuff we want to pass to /sbin/hotplug */
716 + add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &length,
717 + "XENBUS_TYPE=%s", xdev->devicetype);
718 + add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &length,
719 + "XENBUS_PATH=%s", xdev->nodename);
720 + add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &length,
721 + "MODALIAS=xen:%s", xdev->devicetype);
722 +
723 + return 0;
724 +}
725 +#endif
726
727 /* Bus type for frontend drivers. */
728 static struct xen_bus_type xenbus_frontend = {
729 @@ -173,13 +199,19 @@ static struct xen_bus_type xenbus_fronte
730 .levels = 2, /* device/type/<id> */
731 .get_bus_id = frontend_bus_id,
732 .probe = xenbus_probe_frontend,
733 + .error = -ENODEV,
734 .bus = {
735 .name = "xen",
736 .match = xenbus_match,
737 - .uevent = xenbus_uevent,
738 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
739 .probe = xenbus_dev_probe,
740 .remove = xenbus_dev_remove,
741 .shutdown = xenbus_dev_shutdown,
742 + .uevent = xenbus_uevent_frontend,
743 +#endif
744 + },
745 + .dev = {
746 + .bus_id = "xen",
747 },
748 };
749
750 @@ -196,17 +228,16 @@ static void otherend_changed(struct xenb
751 if (!dev->otherend ||
752 strncmp(dev->otherend, vec[XS_WATCH_PATH],
753 strlen(dev->otherend))) {
754 - dev_dbg(&dev->dev, "Ignoring watch at %s\n",
755 - vec[XS_WATCH_PATH]);
756 + DPRINTK("Ignoring watch at %s", vec[XS_WATCH_PATH]);
757 return;
758 }
759
760 state = xenbus_read_driver_state(dev->otherend);
761
762 - dev_dbg(&dev->dev, "state is %d, (%s), %s, %s\n",
763 - state, xenbus_strstate(state), dev->otherend_watch.node,
764 - vec[XS_WATCH_PATH]);
765 + DPRINTK("state is %d (%s), %s, %s", state, xenbus_strstate(state),
766 + dev->otherend_watch.node, vec[XS_WATCH_PATH]);
767
768 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
769 /*
770 * Ignore xenbus transitions during shutdown. This prevents us doing
771 * work that can fail e.g., when the rootfs is gone.
772 @@ -220,6 +251,7 @@ static void otherend_changed(struct xenb
773 xenbus_frontend_closed(dev);
774 return;
775 }
776 +#endif
777
778 if (drv->otherend_changed)
779 drv->otherend_changed(dev, state);
780 @@ -239,8 +271,8 @@ static int talk_to_otherend(struct xenbu
781
782 static int watch_otherend(struct xenbus_device *dev)
783 {
784 - return xenbus_watch_pathfmt(dev, &dev->otherend_watch, otherend_changed,
785 - "%s/%s", dev->otherend, "state");
786 + return xenbus_watch_path2(dev, dev->otherend, "state",
787 + &dev->otherend_watch, otherend_changed);
788 }
789
790
791 @@ -266,8 +298,9 @@ int xenbus_dev_probe(struct device *_dev
792
793 err = talk_to_otherend(dev);
794 if (err) {
795 - dev_warn(&dev->dev, "talk_to_otherend on %s failed.\n",
796 - dev->nodename);
797 + printk(KERN_WARNING
798 + "xenbus_probe: talk_to_otherend on %s failed.\n",
799 + dev->nodename);
800 return err;
801 }
802
803 @@ -277,7 +310,8 @@ int xenbus_dev_probe(struct device *_dev
804
805 err = watch_otherend(dev);
806 if (err) {
807 - dev_warn(&dev->dev, "watch_otherend on %s failed.\n",
808 + printk(KERN_WARNING
809 + "xenbus_probe: watch_otherend on %s failed.\n",
810 dev->nodename);
811 return err;
812 }
813 @@ -313,43 +347,55 @@ static void xenbus_dev_shutdown(struct d
814
815 DPRINTK("%s", dev->nodename);
816
817 + if (is_initial_xendomain())
818 + return;
819 +
820 get_device(&dev->dev);
821 if (dev->state != XenbusStateConnected) {
822 - printk(KERN_INFO "%s: %s: %s != Connected, skipping\n", __func__,
823 + printk("%s: %s: %s != Connected, skipping\n", __FUNCTION__,
824 dev->nodename, xenbus_strstate(dev->state));
825 goto out;
826 }
827 xenbus_switch_state(dev, XenbusStateClosing);
828 timeout = wait_for_completion_timeout(&dev->down, timeout);
829 if (!timeout)
830 - printk(KERN_INFO "%s: %s timeout closing device\n",
831 - __func__, dev->nodename);
832 + printk("%s: %s timeout closing device\n", __FUNCTION__, dev->nodename);
833 out:
834 put_device(&dev->dev);
835 }
836
837 int xenbus_register_driver_common(struct xenbus_driver *drv,
838 - struct xen_bus_type *bus,
839 - struct module *owner,
840 - const char *mod_name)
841 + struct xen_bus_type *bus)
842 {
843 + int ret;
844 +
845 + if (bus->error)
846 + return bus->error;
847 +
848 drv->driver.name = drv->name;
849 drv->driver.bus = &bus->bus;
850 - drv->driver.owner = owner;
851 - drv->driver.mod_name = mod_name;
852 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10)
853 + drv->driver.owner = drv->owner;
854 +#endif
855 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16)
856 + drv->driver.probe = xenbus_dev_probe;
857 + drv->driver.remove = xenbus_dev_remove;
858 + drv->driver.shutdown = xenbus_dev_shutdown;
859 +#endif
860
861 - return driver_register(&drv->driver);
862 + mutex_lock(&xenwatch_mutex);
863 + ret = driver_register(&drv->driver);
864 + mutex_unlock(&xenwatch_mutex);
865 + return ret;
866 }
867
868 -int __xenbus_register_frontend(struct xenbus_driver *drv,
869 - struct module *owner, const char *mod_name)
870 +int xenbus_register_frontend(struct xenbus_driver *drv)
871 {
872 int ret;
873
874 drv->read_otherend_details = read_backend_details;
875
876 - ret = xenbus_register_driver_common(drv, &xenbus_frontend,
877 - owner, mod_name);
878 + ret = xenbus_register_driver_common(drv, &xenbus_frontend);
879 if (ret)
880 return ret;
881
882 @@ -358,7 +404,7 @@ int __xenbus_register_frontend(struct xe
883
884 return 0;
885 }
886 -EXPORT_SYMBOL_GPL(__xenbus_register_frontend);
887 +EXPORT_SYMBOL_GPL(xenbus_register_frontend);
888
889 void xenbus_unregister_driver(struct xenbus_driver *drv)
890 {
891 @@ -436,25 +482,25 @@ static void xenbus_dev_release(struct de
892 }
893
894 static ssize_t xendev_show_nodename(struct device *dev,
895 - struct device_attribute *attr, char *buf)
896 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,13)
897 + struct device_attribute *attr,
898 +#endif
899 + char *buf)
900 {
901 return sprintf(buf, "%s\n", to_xenbus_device(dev)->nodename);
902 }
903 DEVICE_ATTR(nodename, S_IRUSR | S_IRGRP | S_IROTH, xendev_show_nodename, NULL);
904
905 static ssize_t xendev_show_devtype(struct device *dev,
906 - struct device_attribute *attr, char *buf)
907 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,13)
908 + struct device_attribute *attr,
909 +#endif
910 + char *buf)
911 {
912 return sprintf(buf, "%s\n", to_xenbus_device(dev)->devicetype);
913 }
914 DEVICE_ATTR(devtype, S_IRUSR | S_IRGRP | S_IROTH, xendev_show_devtype, NULL);
915
916 -static ssize_t xendev_show_modalias(struct device *dev,
917 - struct device_attribute *attr, char *buf)
918 -{
919 - return sprintf(buf, "xen:%s\n", to_xenbus_device(dev)->devicetype);
920 -}
921 -DEVICE_ATTR(modalias, S_IRUSR | S_IRGRP | S_IROTH, xendev_show_modalias, NULL);
922
923 int xenbus_probe_node(struct xen_bus_type *bus,
924 const char *type,
925 @@ -467,6 +513,9 @@ int xenbus_probe_node(struct xen_bus_typ
926
927 enum xenbus_state state = xenbus_read_driver_state(nodename);
928
929 + if (bus->error)
930 + return bus->error;
931 +
932 if (state != XenbusStateInitialising) {
933 /* Device is not new, so ignore it. This can happen if a
934 device is going away after switching to Closed. */
935 @@ -491,6 +540,7 @@ int xenbus_probe_node(struct xen_bus_typ
936 xendev->devicetype = tmpstring;
937 init_completion(&xendev->down);
938
939 + xendev->dev.parent = &bus->dev;
940 xendev->dev.bus = &bus->bus;
941 xendev->dev.release = xenbus_dev_release;
942
943 @@ -505,22 +555,15 @@ int xenbus_probe_node(struct xen_bus_typ
944
945 err = device_create_file(&xendev->dev, &dev_attr_nodename);
946 if (err)
947 - goto fail_unregister;
948 -
949 + goto unregister;
950 err = device_create_file(&xendev->dev, &dev_attr_devtype);
951 if (err)
952 - goto fail_remove_nodename;
953 -
954 - err = device_create_file(&xendev->dev, &dev_attr_modalias);
955 - if (err)
956 - goto fail_remove_devtype;
957 + goto unregister;
958
959 return 0;
960 -fail_remove_devtype:
961 - device_remove_file(&xendev->dev, &dev_attr_devtype);
962 -fail_remove_nodename:
963 +unregister:
964 device_remove_file(&xendev->dev, &dev_attr_nodename);
965 -fail_unregister:
966 + device_remove_file(&xendev->dev, &dev_attr_devtype);
967 device_unregister(&xendev->dev);
968 fail:
969 kfree(xendev);
970 @@ -533,8 +576,7 @@ static int xenbus_probe_frontend(const c
971 char *nodename;
972 int err;
973
974 - nodename = kasprintf(GFP_KERNEL, "%s/%s/%s",
975 - xenbus_frontend.root, type, name);
976 + nodename = kasprintf(GFP_KERNEL, "%s/%s/%s", xenbus_frontend.root, type, name);
977 if (!nodename)
978 return -ENOMEM;
979
980 @@ -571,6 +613,9 @@ int xenbus_probe_devices(struct xen_bus_
981 char **dir;
982 unsigned int i, dir_n;
983
984 + if (bus->error)
985 + return bus->error;
986 +
987 dir = xenbus_directory(XBT_NIL, bus->root, "", &dir_n);
988 if (IS_ERR(dir))
989 return PTR_ERR(dir);
990 @@ -607,15 +652,15 @@ static int strsep_len(const char *str, c
991 return (len == 0) ? i : -ERANGE;
992 }
993
994 -void xenbus_dev_changed(const char *node, struct xen_bus_type *bus)
995 +void dev_changed(const char *node, struct xen_bus_type *bus)
996 {
997 int exists, rootlen;
998 struct xenbus_device *dev;
999 char type[BUS_ID_SIZE];
1000 const char *p, *root;
1001
1002 - if (char_count(node, '/') < 2)
1003 - return;
1004 + if (bus->error || char_count(node, '/') < 2)
1005 + return;
1006
1007 exists = xenbus_exists(XBT_NIL, node, "");
1008 if (!exists) {
1009 @@ -649,7 +694,7 @@ static void frontend_changed(struct xenb
1010 {
1011 DPRINTK("");
1012
1013 - xenbus_dev_changed(vec[XS_WATCH_PATH], &xenbus_frontend);
1014 + dev_changed(vec[XS_WATCH_PATH], &xenbus_frontend);
1015 }
1016
1017 /* We watch for devices appearing and vanishing. */
1018 @@ -748,7 +793,8 @@ void xenbus_suspend(void)
1019 {
1020 DPRINTK("");
1021
1022 - bus_for_each_dev(&xenbus_frontend.bus, NULL, NULL, suspend_dev);
1023 + if (!xenbus_frontend.error)
1024 + bus_for_each_dev(&xenbus_frontend.bus, NULL, NULL, suspend_dev);
1025 xenbus_backend_suspend(suspend_dev);
1026 xs_suspend();
1027 }
1028 @@ -758,7 +804,8 @@ void xenbus_resume(void)
1029 {
1030 xb_init_comms();
1031 xs_resume();
1032 - bus_for_each_dev(&xenbus_frontend.bus, NULL, NULL, resume_dev);
1033 + if (!xenbus_frontend.error)
1034 + bus_for_each_dev(&xenbus_frontend.bus, NULL, NULL, resume_dev);
1035 xenbus_backend_resume(resume_dev);
1036 }
1037 EXPORT_SYMBOL_GPL(xenbus_resume);
1038 @@ -766,7 +813,8 @@ EXPORT_SYMBOL_GPL(xenbus_resume);
1039 void xenbus_suspend_cancel(void)
1040 {
1041 xs_suspend_cancel();
1042 - bus_for_each_dev(&xenbus_frontend.bus, NULL, NULL, suspend_cancel_dev);
1043 + if (!xenbus_frontend.error)
1044 + bus_for_each_dev(&xenbus_frontend.bus, NULL, NULL, suspend_cancel_dev);
1045 xenbus_backend_resume(suspend_cancel_dev);
1046 }
1047 EXPORT_SYMBOL_GPL(xenbus_suspend_cancel);
1048 @@ -794,7 +842,8 @@ void unregister_xenstore_notifier(struct
1049 }
1050 EXPORT_SYMBOL_GPL(unregister_xenstore_notifier);
1051
1052 -void xenbus_probe(struct work_struct *unused)
1053 +
1054 +void xenbus_probe(void *unused)
1055 {
1056 BUG_ON((xenstored_ready <= 0));
1057
1058 @@ -807,63 +856,171 @@ void xenbus_probe(struct work_struct *un
1059 blocking_notifier_call_chain(&xenstore_chain, 0, NULL);
1060 }
1061
1062 -static int __init xenbus_probe_init(void)
1063 +
1064 +#if defined(CONFIG_PROC_FS) && defined(CONFIG_XEN_PRIVILEGED_GUEST)
1065 +static struct file_operations xsd_kva_fops;
1066 +static struct proc_dir_entry *xsd_kva_intf;
1067 +static struct proc_dir_entry *xsd_port_intf;
1068 +
1069 +static int xsd_kva_mmap(struct file *file, struct vm_area_struct *vma)
1070 +{
1071 + size_t size = vma->vm_end - vma->vm_start;
1072 +
1073 + if ((size > PAGE_SIZE) || (vma->vm_pgoff != 0))
1074 + return -EINVAL;
1075 +
1076 + if (remap_pfn_range(vma, vma->vm_start, mfn_to_pfn(xen_store_mfn),
1077 + size, vma->vm_page_prot))
1078 + return -EAGAIN;
1079 +
1080 + return 0;
1081 +}
1082 +
1083 +static int xsd_kva_read(char *page, char **start, off_t off,
1084 + int count, int *eof, void *data)
1085 +{
1086 + int len;
1087 +
1088 + len = sprintf(page, "0x%p", xen_store_interface);
1089 + *eof = 1;
1090 + return len;
1091 +}
1092 +
1093 +static int xsd_port_read(char *page, char **start, off_t off,
1094 + int count, int *eof, void *data)
1095 +{
1096 + int len;
1097 +
1098 + len = sprintf(page, "%d", xen_store_evtchn);
1099 + *eof = 1;
1100 + return len;
1101 +}
1102 +#endif
1103 +
1104 +static int xenbus_probe_init(void)
1105 {
1106 int err = 0;
1107 + unsigned long page = 0;
1108
1109 DPRINTK("");
1110
1111 - err = -ENODEV;
1112 if (!is_running_on_xen())
1113 - goto out_error;
1114 + return -ENODEV;
1115
1116 /* Register ourselves with the kernel bus subsystem */
1117 - err = bus_register(&xenbus_frontend.bus);
1118 - if (err)
1119 - goto out_error;
1120 -
1121 - err = xenbus_backend_bus_register();
1122 - if (err)
1123 - goto out_unreg_front;
1124 + xenbus_frontend.error = bus_register(&xenbus_frontend.bus);
1125 + if (xenbus_frontend.error)
1126 + printk(KERN_WARNING
1127 + "XENBUS: Error registering frontend bus: %i\n",
1128 + xenbus_frontend.error);
1129 + xenbus_backend_bus_register();
1130
1131 /*
1132 * Domain0 doesn't have a store_evtchn or store_mfn yet.
1133 */
1134 if (is_initial_xendomain()) {
1135 - /* dom0 not yet supported */
1136 + struct evtchn_alloc_unbound alloc_unbound;
1137 +
1138 + /* Allocate page. */
1139 + page = get_zeroed_page(GFP_KERNEL);
1140 + if (!page)
1141 + return -ENOMEM;
1142 +
1143 + xen_store_mfn = xen_start_info->store_mfn =
1144 + pfn_to_mfn(virt_to_phys((void *)page) >>
1145 + PAGE_SHIFT);
1146 +
1147 + /* Next allocate a local port which xenstored can bind to */
1148 + alloc_unbound.dom = DOMID_SELF;
1149 + alloc_unbound.remote_dom = 0;
1150 +
1151 + err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound,
1152 + &alloc_unbound);
1153 + if (err == -ENOSYS)
1154 + goto err;
1155 + BUG_ON(err);
1156 + xen_store_evtchn = xen_start_info->store_evtchn =
1157 + alloc_unbound.port;
1158 +
1159 +#if defined(CONFIG_PROC_FS) && defined(CONFIG_XEN_PRIVILEGED_GUEST)
1160 + /* And finally publish the above info in /proc/xen */
1161 + xsd_kva_intf = create_xen_proc_entry("xsd_kva", 0600);
1162 + if (xsd_kva_intf) {
1163 + memcpy(&xsd_kva_fops, xsd_kva_intf->proc_fops,
1164 + sizeof(xsd_kva_fops));
1165 + xsd_kva_fops.mmap = xsd_kva_mmap;
1166 + xsd_kva_intf->proc_fops = &xsd_kva_fops;
1167 + xsd_kva_intf->read_proc = xsd_kva_read;
1168 + }
1169 + xsd_port_intf = create_xen_proc_entry("xsd_port", 0400);
1170 + if (xsd_port_intf)
1171 + xsd_port_intf->read_proc = xsd_port_read;
1172 +#endif
1173 + xen_store_interface = mfn_to_virt(xen_store_mfn);
1174 } else {
1175 xenstored_ready = 1;
1176 +#ifdef CONFIG_XEN
1177 xen_store_evtchn = xen_start_info->store_evtchn;
1178 xen_store_mfn = xen_start_info->store_mfn;
1179 + xen_store_interface = mfn_to_virt(xen_store_mfn);
1180 +#else
1181 + xen_store_evtchn = hvm_get_parameter(HVM_PARAM_STORE_EVTCHN);
1182 + xen_store_mfn = hvm_get_parameter(HVM_PARAM_STORE_PFN);
1183 + xen_store_interface = ioremap(xen_store_mfn << PAGE_SHIFT,
1184 + PAGE_SIZE);
1185 +#endif
1186 }
1187 - xen_store_interface = mfn_to_virt(xen_store_mfn);
1188 +
1189 +
1190 + xenbus_dev_init();
1191
1192 /* Initialize the interface to xenstore. */
1193 err = xs_init();
1194 if (err) {
1195 printk(KERN_WARNING
1196 "XENBUS: Error initializing xenstore comms: %i\n", err);
1197 - goto out_unreg_back;
1198 + goto err;
1199 }
1200
1201 + /* Register ourselves with the kernel device subsystem */
1202 + if (!xenbus_frontend.error) {
1203 + xenbus_frontend.error = device_register(&xenbus_frontend.dev);
1204 + if (xenbus_frontend.error) {
1205 + bus_unregister(&xenbus_frontend.bus);
1206 + printk(KERN_WARNING
1207 + "XENBUS: Error registering frontend device: %i\n",
1208 + xenbus_frontend.error);
1209 + }
1210 + }
1211 + xenbus_backend_device_register();
1212 +
1213 if (!is_initial_xendomain())
1214 xenbus_probe(NULL);
1215
1216 return 0;
1217
1218 - out_unreg_back:
1219 - xenbus_backend_bus_unregister();
1220 + err:
1221 + if (page)
1222 + free_page(page);
1223
1224 - out_unreg_front:
1225 - bus_unregister(&xenbus_frontend.bus);
1226 + /*
1227 + * Do not unregister the xenbus front/backend buses here. The buses
1228 + * must exist because front/backend drivers will use them when they are
1229 + * registered.
1230 + */
1231
1232 - out_error:
1233 return err;
1234 }
1235
1236 +#ifdef CONFIG_XEN
1237 postcore_initcall(xenbus_probe_init);
1238 -
1239 -MODULE_LICENSE("GPL");
1240 +MODULE_LICENSE("Dual BSD/GPL");
1241 +#else
1242 +int xenbus_init(void)
1243 +{
1244 + return xenbus_probe_init();
1245 +}
1246 +#endif
1247
1248 static int is_disconnected_device(struct device *dev, void *data)
1249 {
1250 @@ -883,12 +1040,14 @@ static int is_disconnected_device(struct
1251 return 0;
1252
1253 xendrv = to_xenbus_driver(dev->driver);
1254 - return (xendev->state != XenbusStateConnected ||
1255 + return (xendev->state < XenbusStateConnected ||
1256 (xendrv->is_ready && !xendrv->is_ready(xendev)));
1257 }
1258
1259 static int exists_disconnected_device(struct device_driver *drv)
1260 {
1261 + if (xenbus_frontend.error)
1262 + return xenbus_frontend.error;
1263 return bus_for_each_dev(&xenbus_frontend.bus, NULL, drv,
1264 is_disconnected_device);
1265 }
1266 @@ -897,6 +1056,7 @@ static int print_device_status(struct de
1267 {
1268 struct xenbus_device *xendev = to_xenbus_device(dev);
1269 struct device_driver *drv = data;
1270 + struct xenbus_driver *xendrv;
1271
1272 /* Is this operation limited to a particular driver? */
1273 if (drv && (dev->driver != drv))
1274 @@ -906,12 +1066,23 @@ static int print_device_status(struct de
1275 /* Information only: is this too noisy? */
1276 printk(KERN_INFO "XENBUS: Device with no driver: %s\n",
1277 xendev->nodename);
1278 - } else if (xendev->state != XenbusStateConnected) {
1279 + return 0;
1280 + }
1281 +
1282 + if (xendev->state < XenbusStateConnected) {
1283 + enum xenbus_state rstate = XenbusStateUnknown;
1284 + if (xendev->otherend)
1285 + rstate = xenbus_read_driver_state(xendev->otherend);
1286 printk(KERN_WARNING "XENBUS: Timeout connecting "
1287 - "to device: %s (state %d)\n",
1288 - xendev->nodename, xendev->state);
1289 + "to device: %s (local state %d, remote state %d)\n",
1290 + xendev->nodename, xendev->state, rstate);
1291 }
1292
1293 + xendrv = to_xenbus_driver(dev->driver);
1294 + if (xendrv->is_ready && !xendrv->is_ready(xendev))
1295 + printk(KERN_WARNING "XENBUS: Device not ready: %s\n",
1296 + xendev->nodename);
1297 +
1298 return 0;
1299 }
1300
1301 @@ -919,7 +1090,7 @@ static int print_device_status(struct de
1302 static int ready_to_wait_for_devices;
1303
1304 /*
1305 - * On a 10 second timeout, wait for all devices currently configured. We need
1306 + * On a 5-minute timeout, wait for all devices currently configured. We need
1307 * to do this to guarantee that the filesystems and / or network devices
1308 * needed for boot are available, before we can allow the boot to proceed.
1309 *
1310 @@ -934,18 +1105,30 @@ static int ready_to_wait_for_devices;
1311 */
1312 static void wait_for_devices(struct xenbus_driver *xendrv)
1313 {
1314 - unsigned long timeout = jiffies + 10*HZ;
1315 + unsigned long start = jiffies;
1316 struct device_driver *drv = xendrv ? &xendrv->driver : NULL;
1317 + unsigned int seconds_waited = 0;
1318
1319 if (!ready_to_wait_for_devices || !is_running_on_xen())
1320 return;
1321
1322 while (exists_disconnected_device(drv)) {
1323 - if (time_after(jiffies, timeout))
1324 - break;
1325 + if (time_after(jiffies, start + (seconds_waited+5)*HZ)) {
1326 + if (!seconds_waited)
1327 + printk(KERN_WARNING "XENBUS: Waiting for "
1328 + "devices to initialise: ");
1329 + seconds_waited += 5;
1330 + printk("%us...", 300 - seconds_waited);
1331 + if (seconds_waited == 300)
1332 + break;
1333 + }
1334 +
1335 schedule_timeout_interruptible(HZ/10);
1336 }
1337
1338 + if (seconds_waited)
1339 + printk("\n");
1340 +
1341 bus_for_each_dev(&xenbus_frontend.bus, NULL, drv,
1342 print_device_status);
1343 }
1344 @@ -953,10 +1136,18 @@ static void wait_for_devices(struct xenb
1345 #ifndef MODULE
1346 static int __init boot_wait_for_devices(void)
1347 {
1348 - ready_to_wait_for_devices = 1;
1349 - wait_for_devices(NULL);
1350 + if (!xenbus_frontend.error) {
1351 + ready_to_wait_for_devices = 1;
1352 + wait_for_devices(NULL);
1353 + }
1354 return 0;
1355 }
1356
1357 late_initcall(boot_wait_for_devices);
1358 #endif
1359 +
1360 +int xenbus_for_each_frontend(void *arg, int (*fn)(struct device *, void *))
1361 +{
1362 + return bus_for_each_dev(&xenbus_frontend.bus, NULL, arg, fn);
1363 +}
1364 +EXPORT_SYMBOL_GPL(xenbus_for_each_frontend);
1365 --- sle11-2009-09-18.orig/drivers/xen/xenbus/xenbus_probe.h 2009-09-18 10:11:48.000000000 +0200
1366 +++ sle11-2009-09-18/drivers/xen/xenbus/xenbus_probe.h 2008-11-25 12:35:56.000000000 +0100
1367 @@ -34,41 +34,42 @@
1368 #ifndef _XENBUS_PROBE_H
1369 #define _XENBUS_PROBE_H
1370
1371 -#ifdef CONFIG_XEN_BACKEND
1372 +#if defined(CONFIG_XEN_BACKEND) || defined(CONFIG_XEN_BACKEND_MODULE)
1373 extern void xenbus_backend_suspend(int (*fn)(struct device *, void *));
1374 extern void xenbus_backend_resume(int (*fn)(struct device *, void *));
1375 extern void xenbus_backend_probe_and_watch(void);
1376 -extern int xenbus_backend_bus_register(void);
1377 -extern void xenbus_backend_bus_unregister(void);
1378 +extern void xenbus_backend_bus_register(void);
1379 +extern void xenbus_backend_device_register(void);
1380 #else
1381 static inline void xenbus_backend_suspend(int (*fn)(struct device *, void *)) {}
1382 static inline void xenbus_backend_resume(int (*fn)(struct device *, void *)) {}
1383 static inline void xenbus_backend_probe_and_watch(void) {}
1384 -static inline int xenbus_backend_bus_register(void) { return 0; }
1385 -static inline void xenbus_backend_bus_unregister(void) {}
1386 +static inline void xenbus_backend_bus_register(void) {}
1387 +static inline void xenbus_backend_device_register(void) {}
1388 #endif
1389
1390 struct xen_bus_type
1391 {
1392 char *root;
1393 + int error;
1394 unsigned int levels;
1395 int (*get_bus_id)(char bus_id[BUS_ID_SIZE], const char *nodename);
1396 int (*probe)(const char *type, const char *dir);
1397 struct bus_type bus;
1398 + struct device dev;
1399 };
1400
1401 extern int xenbus_match(struct device *_dev, struct device_driver *_drv);
1402 extern int xenbus_dev_probe(struct device *_dev);
1403 extern int xenbus_dev_remove(struct device *_dev);
1404 extern int xenbus_register_driver_common(struct xenbus_driver *drv,
1405 - struct xen_bus_type *bus,
1406 - struct module *owner,
1407 - const char *mod_name);
1408 + struct xen_bus_type *bus);
1409 extern int xenbus_probe_node(struct xen_bus_type *bus,
1410 const char *type,
1411 const char *nodename);
1412 extern int xenbus_probe_devices(struct xen_bus_type *bus);
1413
1414 -extern void xenbus_dev_changed(const char *node, struct xen_bus_type *bus);
1415 +extern void dev_changed(const char *node, struct xen_bus_type *bus);
1416
1417 #endif
1418 +
1419 --- sle11-2009-09-18.orig/drivers/xen/xenbus/xenbus_xs.c 2009-09-18 10:11:48.000000000 +0200
1420 +++ sle11-2009-09-18/drivers/xen/xenbus/xenbus_xs.c 2008-11-25 12:35:56.000000000 +0100
1421 @@ -47,6 +47,14 @@
1422 #include <xen/xenbus.h>
1423 #include "xenbus_comms.h"
1424
1425 +#ifdef HAVE_XEN_PLATFORM_COMPAT_H
1426 +#include <xen/platform-compat.h>
1427 +#endif
1428 +
1429 +#ifndef PF_NOFREEZE /* Old kernel (pre-2.6.6). */
1430 +#define PF_NOFREEZE 0
1431 +#endif
1432 +
1433 struct xs_stored_msg {
1434 struct list_head list;
1435
1436 @@ -108,7 +116,7 @@ static DEFINE_SPINLOCK(watch_events_lock
1437 * carrying out work.
1438 */
1439 static pid_t xenwatch_pid;
1440 -static DEFINE_MUTEX(xenwatch_mutex);
1441 +/* static */ DEFINE_MUTEX(xenwatch_mutex);
1442 static DECLARE_WAIT_QUEUE_HEAD(watch_events_waitq);
1443
1444 static int get_error(const char *errorstring)
1445 @@ -177,7 +185,7 @@ void *xenbus_dev_request_and_reply(struc
1446
1447 mutex_unlock(&xs_state.request_mutex);
1448
1449 - if ((msg->type == XS_TRANSACTION_END) ||
1450 + if ((req_msg.type == XS_TRANSACTION_END) ||
1451 ((req_msg.type == XS_TRANSACTION_START) &&
1452 (msg->type == XS_ERROR)))
1453 up_read(&xs_state.transaction_mutex);
1454 @@ -213,7 +221,7 @@ static void *xs_talkv(struct xenbus_tran
1455 }
1456
1457 for (i = 0; i < num_vecs; i++) {
1458 - err = xb_write(iovec[i].iov_base, iovec[i].iov_len);
1459 + err = xb_write(iovec[i].iov_base, iovec[i].iov_len);;
1460 if (err) {
1461 mutex_unlock(&xs_state.request_mutex);
1462 return ERR_PTR(err);
1463 @@ -294,7 +302,7 @@ static char **split(char *strings, unsig
1464 char *p, **ret;
1465
1466 /* Count the strings. */
1467 - *num = count_strings(strings, len);
1468 + *num = count_strings(strings, len) + 1;
1469
1470 /* Transfer to one big alloc for easy freeing. */
1471 ret = kmalloc(*num * sizeof(char *) + len, GFP_NOIO | __GFP_HIGH);
1472 @@ -308,6 +316,7 @@ static char **split(char *strings, unsig
1473 strings = (char *)&ret[*num];
1474 for (p = strings, *num = 0; p < strings + len; p += strlen(p) + 1)
1475 ret[(*num)++] = p;
1476 + ret[*num] = strings + len;
1477
1478 return ret;
1479 }
1480 @@ -498,7 +507,7 @@ int xenbus_printf(struct xenbus_transact
1481 #define PRINTF_BUFFER_SIZE 4096
1482 char *printf_buffer;
1483
1484 - printf_buffer = kmalloc(PRINTF_BUFFER_SIZE, GFP_KERNEL);
1485 + printf_buffer = kmalloc(PRINTF_BUFFER_SIZE, GFP_NOIO | __GFP_HIGH);
1486 if (printf_buffer == NULL)
1487 return -ENOMEM;
1488
1489 @@ -621,6 +630,8 @@ void unregister_xenbus_watch(struct xenb
1490 char token[sizeof(watch) * 2 + 1];
1491 int err;
1492
1493 + BUG_ON(watch->flags & XBWF_new_thread);
1494 +
1495 sprintf(token, "%lX", (long)watch);
1496
1497 down_read(&xs_state.watch_mutex);
1498 @@ -638,11 +649,6 @@ void unregister_xenbus_watch(struct xenb
1499
1500 up_read(&xs_state.watch_mutex);
1501
1502 - /* Make sure there are no callbacks running currently (unless
1503 - its us) */
1504 - if (current->pid != xenwatch_pid)
1505 - mutex_lock(&xenwatch_mutex);
1506 -
1507 /* Cancel pending watch events. */
1508 spin_lock(&watch_events_lock);
1509 list_for_each_entry_safe(msg, tmp, &watch_events, list) {
1510 @@ -654,8 +660,11 @@ void unregister_xenbus_watch(struct xenb
1511 }
1512 spin_unlock(&watch_events_lock);
1513
1514 - if (current->pid != xenwatch_pid)
1515 + /* Flush any currently-executing callback, unless we are it. :-) */
1516 + if (current->pid != xenwatch_pid) {
1517 + mutex_lock(&xenwatch_mutex);
1518 mutex_unlock(&xenwatch_mutex);
1519 + }
1520 }
1521 EXPORT_SYMBOL_GPL(unregister_xenbus_watch);
1522
1523 @@ -693,11 +702,30 @@ void xs_suspend_cancel(void)
1524 up_write(&xs_state.transaction_mutex);
1525 }
1526
1527 +static int xenwatch_handle_callback(void *data)
1528 +{
1529 + struct xs_stored_msg *msg = data;
1530 +
1531 + msg->u.watch.handle->callback(msg->u.watch.handle,
1532 + (const char **)msg->u.watch.vec,
1533 + msg->u.watch.vec_size);
1534 +
1535 + kfree(msg->u.watch.vec);
1536 + kfree(msg);
1537 +
1538 + /* Kill this kthread if we were spawned just for this callback. */
1539 + if (current->pid != xenwatch_pid)
1540 + do_exit(0);
1541 +
1542 + return 0;
1543 +}
1544 +
1545 static int xenwatch_thread(void *unused)
1546 {
1547 struct list_head *ent;
1548 struct xs_stored_msg *msg;
1549
1550 + current->flags |= PF_NOFREEZE;
1551 for (;;) {
1552 wait_event_interruptible(watch_events_waitq,
1553 !list_empty(&watch_events));
1554 @@ -713,17 +741,29 @@ static int xenwatch_thread(void *unused)
1555 list_del(ent);
1556 spin_unlock(&watch_events_lock);
1557
1558 - if (ent != &watch_events) {
1559 - msg = list_entry(ent, struct xs_stored_msg, list);
1560 - msg->u.watch.handle->callback(
1561 - msg->u.watch.handle,
1562 - (const char **)msg->u.watch.vec,
1563 - msg->u.watch.vec_size);
1564 - kfree(msg->u.watch.vec);
1565 - kfree(msg);
1566 + if (ent == &watch_events) {
1567 + mutex_unlock(&xenwatch_mutex);
1568 + continue;
1569 }
1570
1571 - mutex_unlock(&xenwatch_mutex);
1572 + msg = list_entry(ent, struct xs_stored_msg, list);
1573 +
1574 + /*
1575 + * Unlock the mutex before running an XBWF_new_thread
1576 + * handler. kthread_run can block which can deadlock
1577 + * against unregister_xenbus_watch() if we need to
1578 + * unregister other watches in order to make
1579 + * progress. This can occur on resume before the swap
1580 + * device is attached.
1581 + */
1582 + if (msg->u.watch.handle->flags & XBWF_new_thread) {
1583 + mutex_unlock(&xenwatch_mutex);
1584 + kthread_run(xenwatch_handle_callback,
1585 + msg, "xenwatch_cb");
1586 + } else {
1587 + xenwatch_handle_callback(msg);
1588 + mutex_unlock(&xenwatch_mutex);
1589 + }
1590 }
1591
1592 return 0;
1593 @@ -817,6 +857,7 @@ static int xenbus_thread(void *unused)
1594 {
1595 int err;
1596
1597 + current->flags |= PF_NOFREEZE;
1598 for (;;) {
1599 err = process_msg();
1600 if (err)
1601 --- sle11-2009-09-18.orig/include/xen/balloon.h 2009-09-18 10:11:48.000000000 +0200
1602 +++ sle11-2009-09-18/include/xen/balloon.h 2008-11-25 12:35:56.000000000 +0100
1603 @@ -31,12 +31,9 @@
1604 * IN THE SOFTWARE.
1605 */
1606
1607 -#ifndef __XEN_BALLOON_H__
1608 -#define __XEN_BALLOON_H__
1609 +#ifndef __ASM_BALLOON_H__
1610 +#define __ASM_BALLOON_H__
1611
1612 -#include <linux/spinlock.h>
1613 -
1614 -#if 0
1615 /*
1616 * Inform the balloon driver that it should allow some slop for device-driver
1617 * memory activities.
1618 @@ -56,6 +53,5 @@ void balloon_release_driver_page(struct
1619 extern spinlock_t balloon_lock;
1620 #define balloon_lock(__flags) spin_lock_irqsave(&balloon_lock, __flags)
1621 #define balloon_unlock(__flags) spin_unlock_irqrestore(&balloon_lock, __flags)
1622 -#endif
1623
1624 -#endif /* __XEN_BALLOON_H__ */
1625 +#endif /* __ASM_BALLOON_H__ */
1626 --- sle11-2009-09-18.orig/include/xen/interface/callback.h 2009-09-18 10:11:48.000000000 +0200
1627 +++ sle11-2009-09-18/include/xen/interface/callback.h 2008-11-25 12:35:56.000000000 +0100
1628 @@ -86,6 +86,8 @@ struct callback_register {
1629 uint16_t flags;
1630 xen_callback_t address;
1631 };
1632 +typedef struct callback_register callback_register_t;
1633 +DEFINE_XEN_GUEST_HANDLE(callback_register_t);
1634
1635 /*
1636 * Unregister a callback.
1637 @@ -98,5 +100,22 @@ struct callback_unregister {
1638 uint16_t type;
1639 uint16_t _unused;
1640 };
1641 +typedef struct callback_unregister callback_unregister_t;
1642 +DEFINE_XEN_GUEST_HANDLE(callback_unregister_t);
1643 +
1644 +#if __XEN_INTERFACE_VERSION__ < 0x00030207
1645 +#undef CALLBACKTYPE_sysenter
1646 +#define CALLBACKTYPE_sysenter CALLBACKTYPE_sysenter_deprecated
1647 +#endif
1648
1649 #endif /* __XEN_PUBLIC_CALLBACK_H__ */
1650 +
1651 +/*
1652 + * Local variables:
1653 + * mode: C
1654 + * c-set-style: "BSD"
1655 + * c-basic-offset: 4
1656 + * tab-width: 4
1657 + * indent-tabs-mode: nil
1658 + * End:
1659 + */
1660 --- sle11-2009-09-18.orig/include/xen/interface/elfnote.h 2009-09-18 10:11:48.000000000 +0200
1661 +++ sle11-2009-09-18/include/xen/interface/elfnote.h 2008-11-25 12:35:56.000000000 +0100
1662 @@ -3,6 +3,24 @@
1663 *
1664 * Definitions used for the Xen ELF notes.
1665 *
1666 + * Permission is hereby granted, free of charge, to any person obtaining a copy
1667 + * of this software and associated documentation files (the "Software"), to
1668 + * deal in the Software without restriction, including without limitation the
1669 + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
1670 + * sell copies of the Software, and to permit persons to whom the Software is
1671 + * furnished to do so, subject to the following conditions:
1672 + *
1673 + * The above copyright notice and this permission notice shall be included in
1674 + * all copies or substantial portions of the Software.
1675 + *
1676 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
1677 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
1678 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
1679 + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
1680 + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
1681 + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
1682 + * DEALINGS IN THE SOFTWARE.
1683 + *
1684 * Copyright (c) 2006, Ian Campbell, XenSource Ltd.
1685 */
1686
1687 @@ -10,7 +28,7 @@
1688 #define __XEN_PUBLIC_ELFNOTE_H__
1689
1690 /*
1691 - * The notes should live in a SHT_NOTE segment and have "Xen" in the
1692 + * The notes should live in a PT_NOTE segment and have "Xen" in the
1693 * name field.
1694 *
1695 * Numeric types are either 4 or 8 bytes depending on the content of
1696 @@ -22,8 +40,6 @@
1697
1698 /*
1699 * NAME=VALUE pair (string).
1700 - *
1701 - * LEGACY: FEATURES and PAE
1702 */
1703 #define XEN_ELFNOTE_INFO 0
1704
1705 @@ -90,7 +106,12 @@
1706 #define XEN_ELFNOTE_LOADER 8
1707
1708 /*
1709 - * The kernel supports PAE (x86/32 only, string = "yes" or "no").
1710 + * The kernel supports PAE (x86/32 only, string = "yes", "no" or
1711 + * "bimodal").
1712 + *
1713 + * For compatibility with Xen 3.0.3 and earlier the "bimodal" setting
1714 + * may be given as "yes,bimodal" which will cause older Xen to treat
1715 + * this kernel as PAE.
1716 *
1717 * LEGACY: PAE (n.b. The legacy interface included a provision to
1718 * indicate 'extended-cr3' support allowing L3 page tables to be
1719 @@ -140,6 +161,65 @@
1720 */
1721 #define XEN_ELFNOTE_SUSPEND_CANCEL 14
1722
1723 +/*
1724 + * The number of the highest elfnote defined.
1725 + */
1726 +#define XEN_ELFNOTE_MAX XEN_ELFNOTE_SUSPEND_CANCEL
1727 +
1728 +/*
1729 + * System information exported through crash notes.
1730 + *
1731 + * The kexec / kdump code will create one XEN_ELFNOTE_CRASH_INFO
1732 + * note in case of a system crash. This note will contain various
1733 + * information about the system, see xen/include/xen/elfcore.h.
1734 + */
1735 +#define XEN_ELFNOTE_CRASH_INFO 0x1000001
1736 +
1737 +/*
1738 + * System registers exported through crash notes.
1739 + *
1740 + * The kexec / kdump code will create one XEN_ELFNOTE_CRASH_REGS
1741 + * note per cpu in case of a system crash. This note is architecture
1742 + * specific and will contain registers not saved in the "CORE" note.
1743 + * See xen/include/xen/elfcore.h for more information.
1744 + */
1745 +#define XEN_ELFNOTE_CRASH_REGS 0x1000002
1746 +
1747 +
1748 +/*
1749 + * xen dump-core none note.
1750 + * xm dump-core code will create one XEN_ELFNOTE_DUMPCORE_NONE
1751 + * in its dump file to indicate that the file is xen dump-core
1752 + * file. This note doesn't have any other information.
1753 + * See tools/libxc/xc_core.h for more information.
1754 + */
1755 +#define XEN_ELFNOTE_DUMPCORE_NONE 0x2000000
1756 +
1757 +/*
1758 + * xen dump-core header note.
1759 + * xm dump-core code will create one XEN_ELFNOTE_DUMPCORE_HEADER
1760 + * in its dump file.
1761 + * See tools/libxc/xc_core.h for more information.
1762 + */
1763 +#define XEN_ELFNOTE_DUMPCORE_HEADER 0x2000001
1764 +
1765 +/*
1766 + * xen dump-core xen version note.
1767 + * xm dump-core code will create one XEN_ELFNOTE_DUMPCORE_XEN_VERSION
1768 + * in its dump file. It contains the xen version obtained via the
1769 + * XENVER hypercall.
1770 + * See tools/libxc/xc_core.h for more information.
1771 + */
1772 +#define XEN_ELFNOTE_DUMPCORE_XEN_VERSION 0x2000002
1773 +
1774 +/*
1775 + * xen dump-core format version note.
1776 + * xm dump-core code will create one XEN_ELFNOTE_DUMPCORE_FORMAT_VERSION
1777 + * in its dump file. It contains a format version identifier.
1778 + * See tools/libxc/xc_core.h for more information.
1779 + */
1780 +#define XEN_ELFNOTE_DUMPCORE_FORMAT_VERSION 0x2000003
1781 +
1782 #endif /* __XEN_PUBLIC_ELFNOTE_H__ */
1783
1784 /*
1785 --- sle11-2009-09-18.orig/include/xen/interface/event_channel.h 2009-09-18 10:11:48.000000000 +0200
1786 +++ sle11-2009-09-18/include/xen/interface/event_channel.h 2008-11-25 12:35:56.000000000 +0100
1787 @@ -3,14 +3,39 @@
1788 *
1789 * Event channels between domains.
1790 *
1791 + * Permission is hereby granted, free of charge, to any person obtaining a copy
1792 + * of this software and associated documentation files (the "Software"), to
1793 + * deal in the Software without restriction, including without limitation the
1794 + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
1795 + * sell copies of the Software, and to permit persons to whom the Software is
1796 + * furnished to do so, subject to the following conditions:
1797 + *
1798 + * The above copyright notice and this permission notice shall be included in
1799 + * all copies or substantial portions of the Software.
1800 + *
1801 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
1802 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
1803 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
1804 + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
1805 + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
1806 + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
1807 + * DEALINGS IN THE SOFTWARE.
1808 + *
1809 * Copyright (c) 2003-2004, K A Fraser.
1810 */
1811
1812 #ifndef __XEN_PUBLIC_EVENT_CHANNEL_H__
1813 #define __XEN_PUBLIC_EVENT_CHANNEL_H__
1814
1815 +/*
1816 + * Prototype for this hypercall is:
1817 + * int event_channel_op(int cmd, void *args)
1818 + * @cmd == EVTCHNOP_??? (event-channel operation).
1819 + * @args == Operation-specific extra arguments (NULL if none).
1820 + */
1821 +
1822 typedef uint32_t evtchn_port_t;
1823 -DEFINE_GUEST_HANDLE(evtchn_port_t);
1824 +DEFINE_XEN_GUEST_HANDLE(evtchn_port_t);
1825
1826 /*
1827 * EVTCHNOP_alloc_unbound: Allocate a port in domain <dom> and mark as
1828 @@ -20,13 +45,14 @@ DEFINE_GUEST_HANDLE(evtchn_port_t);
1829 * 1. If the caller is unprivileged then <dom> must be DOMID_SELF.
1830 * 2. <rdom> may be DOMID_SELF, allowing loopback connections.
1831 */
1832 -#define EVTCHNOP_alloc_unbound 6
1833 +#define EVTCHNOP_alloc_unbound 6
1834 struct evtchn_alloc_unbound {
1835 - /* IN parameters */
1836 - domid_t dom, remote_dom;
1837 - /* OUT parameters */
1838 - evtchn_port_t port;
1839 + /* IN parameters */
1840 + domid_t dom, remote_dom;
1841 + /* OUT parameters */
1842 + evtchn_port_t port;
1843 };
1844 +typedef struct evtchn_alloc_unbound evtchn_alloc_unbound_t;
1845
1846 /*
1847 * EVTCHNOP_bind_interdomain: Construct an interdomain event channel between
1848 @@ -39,29 +65,35 @@ struct evtchn_alloc_unbound {
1849 */
1850 #define EVTCHNOP_bind_interdomain 0
1851 struct evtchn_bind_interdomain {
1852 - /* IN parameters. */
1853 - domid_t remote_dom;
1854 - evtchn_port_t remote_port;
1855 - /* OUT parameters. */
1856 - evtchn_port_t local_port;
1857 + /* IN parameters. */
1858 + domid_t remote_dom;
1859 + evtchn_port_t remote_port;
1860 + /* OUT parameters. */
1861 + evtchn_port_t local_port;
1862 };
1863 +typedef struct evtchn_bind_interdomain evtchn_bind_interdomain_t;
1864
1865 /*
1866 * EVTCHNOP_bind_virq: Bind a local event channel to VIRQ <irq> on specified
1867 * vcpu.
1868 * NOTES:
1869 - * 1. A virtual IRQ may be bound to at most one event channel per vcpu.
1870 - * 2. The allocated event channel is bound to the specified vcpu. The binding
1871 - * may not be changed.
1872 + * 1. Virtual IRQs are classified as per-vcpu or global. See the VIRQ list
1873 + * in xen.h for the classification of each VIRQ.
1874 + * 2. Global VIRQs must be allocated on VCPU0 but can subsequently be
1875 + * re-bound via EVTCHNOP_bind_vcpu.
1876 + * 3. Per-vcpu VIRQs may be bound to at most one event channel per vcpu.
1877 + * The allocated event channel is bound to the specified vcpu and the
1878 + * binding cannot be changed.
1879 */
1880 -#define EVTCHNOP_bind_virq 1
1881 +#define EVTCHNOP_bind_virq 1
1882 struct evtchn_bind_virq {
1883 - /* IN parameters. */
1884 - uint32_t virq;
1885 - uint32_t vcpu;
1886 - /* OUT parameters. */
1887 - evtchn_port_t port;
1888 + /* IN parameters. */
1889 + uint32_t virq;
1890 + uint32_t vcpu;
1891 + /* OUT parameters. */
1892 + evtchn_port_t port;
1893 };
1894 +typedef struct evtchn_bind_virq evtchn_bind_virq_t;
1895
1896 /*
1897 * EVTCHNOP_bind_pirq: Bind a local event channel to PIRQ <irq>.
1898 @@ -69,15 +101,16 @@ struct evtchn_bind_virq {
1899 * 1. A physical IRQ may be bound to at most one event channel per domain.
1900 * 2. Only a sufficiently-privileged domain may bind to a physical IRQ.
1901 */
1902 -#define EVTCHNOP_bind_pirq 2
1903 +#define EVTCHNOP_bind_pirq 2
1904 struct evtchn_bind_pirq {
1905 - /* IN parameters. */
1906 - uint32_t pirq;
1907 + /* IN parameters. */
1908 + uint32_t pirq;
1909 #define BIND_PIRQ__WILL_SHARE 1
1910 - uint32_t flags; /* BIND_PIRQ__* */
1911 - /* OUT parameters. */
1912 - evtchn_port_t port;
1913 + uint32_t flags; /* BIND_PIRQ__* */
1914 + /* OUT parameters. */
1915 + evtchn_port_t port;
1916 };
1917 +typedef struct evtchn_bind_pirq evtchn_bind_pirq_t;
1918
1919 /*
1920 * EVTCHNOP_bind_ipi: Bind a local event channel to receive events.
1921 @@ -85,33 +118,36 @@ struct evtchn_bind_pirq {
1922 * 1. The allocated event channel is bound to the specified vcpu. The binding
1923 * may not be changed.
1924 */
1925 -#define EVTCHNOP_bind_ipi 7
1926 +#define EVTCHNOP_bind_ipi 7
1927 struct evtchn_bind_ipi {
1928 - uint32_t vcpu;
1929 - /* OUT parameters. */
1930 - evtchn_port_t port;
1931 + uint32_t vcpu;
1932 + /* OUT parameters. */
1933 + evtchn_port_t port;
1934 };
1935 +typedef struct evtchn_bind_ipi evtchn_bind_ipi_t;
1936
1937 /*
1938 * EVTCHNOP_close: Close a local event channel <port>. If the channel is
1939 * interdomain then the remote end is placed in the unbound state
1940 * (EVTCHNSTAT_unbound), awaiting a new connection.
1941 */
1942 -#define EVTCHNOP_close 3
1943 +#define EVTCHNOP_close 3
1944 struct evtchn_close {
1945 - /* IN parameters. */
1946 - evtchn_port_t port;
1947 + /* IN parameters. */
1948 + evtchn_port_t port;
1949 };
1950 +typedef struct evtchn_close evtchn_close_t;
1951
1952 /*
1953 * EVTCHNOP_send: Send an event to the remote end of the channel whose local
1954 * endpoint is <port>.
1955 */
1956 -#define EVTCHNOP_send 4
1957 +#define EVTCHNOP_send 4
1958 struct evtchn_send {
1959 - /* IN parameters. */
1960 - evtchn_port_t port;
1961 + /* IN parameters. */
1962 + evtchn_port_t port;
1963 };
1964 +typedef struct evtchn_send evtchn_send_t;
1965
1966 /*
1967 * EVTCHNOP_status: Get the current status of the communication channel which
1968 @@ -121,75 +157,108 @@ struct evtchn_send {
1969 * 2. Only a sufficiently-privileged domain may obtain the status of an event
1970 * channel for which <dom> is not DOMID_SELF.
1971 */
1972 -#define EVTCHNOP_status 5
1973 +#define EVTCHNOP_status 5
1974 struct evtchn_status {
1975 - /* IN parameters */
1976 - domid_t dom;
1977 - evtchn_port_t port;
1978 - /* OUT parameters */
1979 -#define EVTCHNSTAT_closed 0 /* Channel is not in use. */
1980 -#define EVTCHNSTAT_unbound 1 /* Channel is waiting interdom connection.*/
1981 -#define EVTCHNSTAT_interdomain 2 /* Channel is connected to remote domain. */
1982 -#define EVTCHNSTAT_pirq 3 /* Channel is bound to a phys IRQ line. */
1983 -#define EVTCHNSTAT_virq 4 /* Channel is bound to a virtual IRQ line */
1984 -#define EVTCHNSTAT_ipi 5 /* Channel is bound to a virtual IPI line */
1985 - uint32_t status;
1986 - uint32_t vcpu; /* VCPU to which this channel is bound. */
1987 - union {
1988 - struct {
1989 - domid_t dom;
1990 - } unbound; /* EVTCHNSTAT_unbound */
1991 - struct {
1992 - domid_t dom;
1993 - evtchn_port_t port;
1994 - } interdomain; /* EVTCHNSTAT_interdomain */
1995 - uint32_t pirq; /* EVTCHNSTAT_pirq */
1996 - uint32_t virq; /* EVTCHNSTAT_virq */
1997 - } u;
1998 + /* IN parameters */
1999 + domid_t dom;
2000 + evtchn_port_t port;
2001 + /* OUT parameters */
2002 +#define EVTCHNSTAT_closed 0 /* Channel is not in use. */
2003 +#define EVTCHNSTAT_unbound 1 /* Channel is waiting interdom connection.*/
2004 +#define EVTCHNSTAT_interdomain 2 /* Channel is connected to remote domain. */
2005 +#define EVTCHNSTAT_pirq 3 /* Channel is bound to a phys IRQ line. */
2006 +#define EVTCHNSTAT_virq 4 /* Channel is bound to a virtual IRQ line */
2007 +#define EVTCHNSTAT_ipi 5 /* Channel is bound to a virtual IPI line */
2008 + uint32_t status;
2009 + uint32_t vcpu; /* VCPU to which this channel is bound. */
2010 + union {
2011 + struct {
2012 + domid_t dom;
2013 + } unbound; /* EVTCHNSTAT_unbound */
2014 + struct {
2015 + domid_t dom;
2016 + evtchn_port_t port;
2017 + } interdomain; /* EVTCHNSTAT_interdomain */
2018 + uint32_t pirq; /* EVTCHNSTAT_pirq */
2019 + uint32_t virq; /* EVTCHNSTAT_virq */
2020 + } u;
2021 };
2022 +typedef struct evtchn_status evtchn_status_t;
2023
2024 /*
2025 * EVTCHNOP_bind_vcpu: Specify which vcpu a channel should notify when an
2026 * event is pending.
2027 * NOTES:
2028 - * 1. IPI- and VIRQ-bound channels always notify the vcpu that initialised
2029 - * the binding. This binding cannot be changed.
2030 - * 2. All other channels notify vcpu0 by default. This default is set when
2031 + * 1. IPI-bound channels always notify the vcpu specified at bind time.
2032 + * This binding cannot be changed.
2033 + * 2. Per-VCPU VIRQ channels always notify the vcpu specified at bind time.
2034 + * This binding cannot be changed.
2035 + * 3. All other channels notify vcpu0 by default. This default is set when
2036 * the channel is allocated (a port that is freed and subsequently reused
2037 * has its binding reset to vcpu0).
2038 */
2039 -#define EVTCHNOP_bind_vcpu 8
2040 +#define EVTCHNOP_bind_vcpu 8
2041 struct evtchn_bind_vcpu {
2042 - /* IN parameters. */
2043 - evtchn_port_t port;
2044 - uint32_t vcpu;
2045 + /* IN parameters. */
2046 + evtchn_port_t port;
2047 + uint32_t vcpu;
2048 };
2049 +typedef struct evtchn_bind_vcpu evtchn_bind_vcpu_t;
2050
2051 /*
2052 * EVTCHNOP_unmask: Unmask the specified local event-channel port and deliver
2053 * a notification to the appropriate VCPU if an event is pending.
2054 */
2055 -#define EVTCHNOP_unmask 9
2056 +#define EVTCHNOP_unmask 9
2057 struct evtchn_unmask {
2058 - /* IN parameters. */
2059 - evtchn_port_t port;
2060 + /* IN parameters. */
2061 + evtchn_port_t port;
2062 +};
2063 +typedef struct evtchn_unmask evtchn_unmask_t;
2064 +
2065 +/*
2066 + * EVTCHNOP_reset: Close all event channels associated with specified domain.
2067 + * NOTES:
2068 + * 1. <dom> may be specified as DOMID_SELF.
2069 + * 2. Only a sufficiently-privileged domain may specify other than DOMID_SELF.
2070 + */
2071 +#define EVTCHNOP_reset 10
2072 +struct evtchn_reset {
2073 + /* IN parameters. */
2074 + domid_t dom;
2075 };
2076 +typedef struct evtchn_reset evtchn_reset_t;
2077
2078 +/*
2079 + * Argument to event_channel_op_compat() hypercall. Superceded by new
2080 + * event_channel_op() hypercall since 0x00030202.
2081 + */
2082 struct evtchn_op {
2083 - uint32_t cmd; /* EVTCHNOP_* */
2084 - union {
2085 - struct evtchn_alloc_unbound alloc_unbound;
2086 - struct evtchn_bind_interdomain bind_interdomain;
2087 - struct evtchn_bind_virq bind_virq;
2088 - struct evtchn_bind_pirq bind_pirq;
2089 - struct evtchn_bind_ipi bind_ipi;
2090 - struct evtchn_close close;
2091 - struct evtchn_send send;
2092 - struct evtchn_status status;
2093 - struct evtchn_bind_vcpu bind_vcpu;
2094 - struct evtchn_unmask unmask;
2095 - } u;
2096 + uint32_t cmd; /* EVTCHNOP_* */
2097 + union {
2098 + struct evtchn_alloc_unbound alloc_unbound;
2099 + struct evtchn_bind_interdomain bind_interdomain;
2100 + struct evtchn_bind_virq bind_virq;
2101 + struct evtchn_bind_pirq bind_pirq;
2102 + struct evtchn_bind_ipi bind_ipi;
2103 + struct evtchn_close close;
2104 + struct evtchn_send send;
2105 + struct evtchn_status status;
2106 + struct evtchn_bind_vcpu bind_vcpu;
2107 + struct evtchn_unmask unmask;
2108 + } u;
2109 };
2110 -DEFINE_GUEST_HANDLE_STRUCT(evtchn_op);
2111 +typedef struct evtchn_op evtchn_op_t;
2112 +DEFINE_XEN_GUEST_HANDLE(evtchn_op_t);
2113
2114 #endif /* __XEN_PUBLIC_EVENT_CHANNEL_H__ */
2115 +
2116 +/*
2117 + * Local variables:
2118 + * mode: C
2119 + * c-set-style: "BSD"
2120 + * c-basic-offset: 4
2121 + * tab-width: 4
2122 + * indent-tabs-mode: nil
2123 + * End:
2124 + */
2125 --- sle11-2009-09-18.orig/include/xen/interface/features.h 2009-09-18 10:11:48.000000000 +0200
2126 +++ sle11-2009-09-18/include/xen/interface/features.h 2008-11-25 12:22:34.000000000 +0100
2127 @@ -3,6 +3,24 @@
2128 *
2129 * Feature flags, reported by XENVER_get_features.
2130 *
2131 + * Permission is hereby granted, free of charge, to any person obtaining a copy
2132 + * of this software and associated documentation files (the "Software"), to
2133 + * deal in the Software without restriction, including without limitation the
2134 + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
2135 + * sell copies of the Software, and to permit persons to whom the Software is
2136 + * furnished to do so, subject to the following conditions:
2137 + *
2138 + * The above copyright notice and this permission notice shall be included in
2139 + * all copies or substantial portions of the Software.
2140 + *
2141 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
2142 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
2143 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
2144 + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
2145 + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
2146 + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
2147 + * DEALINGS IN THE SOFTWARE.
2148 + *
2149 * Copyright (c) 2006, Keir Fraser <keir@xensource.com>
2150 */
2151
2152 @@ -41,6 +59,25 @@
2153 /* x86: Does this Xen host support the MMU_PT_UPDATE_PRESERVE_AD hypercall? */
2154 #define XENFEAT_mmu_pt_update_preserve_ad 5
2155
2156 +/* x86: Does this Xen host support the MMU_{CLEAR,COPY}_PAGE hypercall? */
2157 +#define XENFEAT_highmem_assist 6
2158 +
2159 +/*
2160 + * If set, GNTTABOP_map_grant_ref honors flags to be placed into guest kernel
2161 + * available pte bits.
2162 + */
2163 +#define XENFEAT_gnttab_map_avail_bits 7
2164 +
2165 #define XENFEAT_NR_SUBMAPS 1
2166
2167 #endif /* __XEN_PUBLIC_FEATURES_H__ */
2168 +
2169 +/*
2170 + * Local variables:
2171 + * mode: C
2172 + * c-set-style: "BSD"
2173 + * c-basic-offset: 4
2174 + * tab-width: 4
2175 + * indent-tabs-mode: nil
2176 + * End:
2177 + */
2178 --- sle11-2009-09-18.orig/include/xen/interface/grant_table.h 2009-09-18 10:11:48.000000000 +0200
2179 +++ sle11-2009-09-18/include/xen/interface/grant_table.h 2008-11-25 12:22:34.000000000 +0100
2180 @@ -100,6 +100,7 @@ struct grant_entry {
2181 */
2182 uint32_t frame;
2183 };
2184 +typedef struct grant_entry grant_entry_t;
2185
2186 /*
2187 * Type of grant entry.
2188 @@ -118,6 +119,7 @@ struct grant_entry {
2189 * GTF_readonly: Restrict @domid to read-only mappings and accesses. [GST]
2190 * GTF_reading: Grant entry is currently mapped for reading by @domid. [XEN]
2191 * GTF_writing: Grant entry is currently mapped for writing by @domid. [XEN]
2192 + * GTF_PAT, GTF_PWT, GTF_PCD: (x86) cache attribute flags for the grant [GST]
2193 */
2194 #define _GTF_readonly (2)
2195 #define GTF_readonly (1U<<_GTF_readonly)
2196 @@ -125,6 +127,12 @@ struct grant_entry {
2197 #define GTF_reading (1U<<_GTF_reading)
2198 #define _GTF_writing (4)
2199 #define GTF_writing (1U<<_GTF_writing)
2200 +#define _GTF_PWT (5)
2201 +#define GTF_PWT (1U<<_GTF_PWT)
2202 +#define _GTF_PCD (6)
2203 +#define GTF_PCD (1U<<_GTF_PCD)
2204 +#define _GTF_PAT (7)
2205 +#define GTF_PAT (1U<<_GTF_PAT)
2206
2207 /*
2208 * Subflags for GTF_accept_transfer:
2209 @@ -185,7 +193,8 @@ struct gnttab_map_grant_ref {
2210 grant_handle_t handle;
2211 uint64_t dev_bus_addr;
2212 };
2213 -DEFINE_GUEST_HANDLE_STRUCT(gnttab_map_grant_ref);
2214 +typedef struct gnttab_map_grant_ref gnttab_map_grant_ref_t;
2215 +DEFINE_XEN_GUEST_HANDLE(gnttab_map_grant_ref_t);
2216
2217 /*
2218 * GNTTABOP_unmap_grant_ref: Destroy one or more grant-reference mappings
2219 @@ -207,7 +216,8 @@ struct gnttab_unmap_grant_ref {
2220 /* OUT parameters. */
2221 int16_t status; /* GNTST_* */
2222 };
2223 -DEFINE_GUEST_HANDLE_STRUCT(gnttab_unmap_grant_ref);
2224 +typedef struct gnttab_unmap_grant_ref gnttab_unmap_grant_ref_t;
2225 +DEFINE_XEN_GUEST_HANDLE(gnttab_unmap_grant_ref_t);
2226
2227 /*
2228 * GNTTABOP_setup_table: Set up a grant table for <dom> comprising at least
2229 @@ -225,9 +235,10 @@ struct gnttab_setup_table {
2230 uint32_t nr_frames;
2231 /* OUT parameters. */
2232 int16_t status; /* GNTST_* */
2233 - GUEST_HANDLE(ulong) frame_list;
2234 + XEN_GUEST_HANDLE(ulong) frame_list;
2235 };
2236 -DEFINE_GUEST_HANDLE_STRUCT(gnttab_setup_table);
2237 +typedef struct gnttab_setup_table gnttab_setup_table_t;
2238 +DEFINE_XEN_GUEST_HANDLE(gnttab_setup_table_t);
2239
2240 /*
2241 * GNTTABOP_dump_table: Dump the contents of the grant table to the
2242 @@ -240,7 +251,8 @@ struct gnttab_dump_table {
2243 /* OUT parameters. */
2244 int16_t status; /* GNTST_* */
2245 };
2246 -DEFINE_GUEST_HANDLE_STRUCT(gnttab_dump_table);
2247 +typedef struct gnttab_dump_table gnttab_dump_table_t;
2248 +DEFINE_XEN_GUEST_HANDLE(gnttab_dump_table_t);
2249
2250 /*
2251 * GNTTABOP_transfer_grant_ref: Transfer <frame> to a foreign domain. The
2252 @@ -253,13 +265,15 @@ DEFINE_GUEST_HANDLE_STRUCT(gnttab_dump_t
2253 #define GNTTABOP_transfer 4
2254 struct gnttab_transfer {
2255 /* IN parameters. */
2256 - unsigned long mfn;
2257 + xen_pfn_t mfn;
2258 domid_t domid;
2259 grant_ref_t ref;
2260 /* OUT parameters. */
2261 int16_t status;
2262 };
2263 -DEFINE_GUEST_HANDLE_STRUCT(gnttab_transfer);
2264 +typedef struct gnttab_transfer gnttab_transfer_t;
2265 +DEFINE_XEN_GUEST_HANDLE(gnttab_transfer_t);
2266 +
2267
2268 /*
2269 * GNTTABOP_copy: Hypervisor based copy
2270 @@ -285,22 +299,22 @@ DEFINE_GUEST_HANDLE_STRUCT(gnttab_transf
2271 #define GNTCOPY_dest_gref (1<<_GNTCOPY_dest_gref)
2272
2273 #define GNTTABOP_copy 5
2274 -struct gnttab_copy {
2275 - /* IN parameters. */
2276 - struct {
2277 - union {
2278 - grant_ref_t ref;
2279 - unsigned long gmfn;
2280 - } u;
2281 - domid_t domid;
2282 - uint16_t offset;
2283 - } source, dest;
2284 - uint16_t len;
2285 - uint16_t flags; /* GNTCOPY_* */
2286 - /* OUT parameters. */
2287 - int16_t status;
2288 -};
2289 -DEFINE_GUEST_HANDLE_STRUCT(gnttab_copy);
2290 +typedef struct gnttab_copy {
2291 + /* IN parameters. */
2292 + struct {
2293 + union {
2294 + grant_ref_t ref;
2295 + xen_pfn_t gmfn;
2296 + } u;
2297 + domid_t domid;
2298 + uint16_t offset;
2299 + } source, dest;
2300 + uint16_t len;
2301 + uint16_t flags; /* GNTCOPY_* */
2302 + /* OUT parameters. */
2303 + int16_t status;
2304 +} gnttab_copy_t;
2305 +DEFINE_XEN_GUEST_HANDLE(gnttab_copy_t);
2306
2307 /*
2308 * GNTTABOP_query_size: Query the current and maximum sizes of the shared
2309 @@ -318,10 +332,35 @@ struct gnttab_query_size {
2310 uint32_t max_nr_frames;
2311 int16_t status; /* GNTST_* */
2312 };
2313 -DEFINE_GUEST_HANDLE_STRUCT(gnttab_query_size);
2314 +typedef struct gnttab_query_size gnttab_query_size_t;
2315 +DEFINE_XEN_GUEST_HANDLE(gnttab_query_size_t);
2316
2317 /*
2318 - * Bitfield values for update_pin_status.flags.
2319 + * GNTTABOP_unmap_and_replace: Destroy one or more grant-reference mappings
2320 + * tracked by <handle> but atomically replace the page table entry with one
2321 + * pointing to the machine address under <new_addr>. <new_addr> will be
2322 + * redirected to the null entry.
2323 + * NOTES:
2324 + * 1. The call may fail in an undefined manner if either mapping is not
2325 + * tracked by <handle>.
2326 + * 2. After executing a batch of unmaps, it is guaranteed that no stale
2327 + * mappings will remain in the device or host TLBs.
2328 + */
2329 +#define GNTTABOP_unmap_and_replace 7
2330 +struct gnttab_unmap_and_replace {
2331 + /* IN parameters. */
2332 + uint64_t host_addr;
2333 + uint64_t new_addr;
2334 + grant_handle_t handle;
2335 + /* OUT parameters. */
2336 + int16_t status; /* GNTST_* */
2337 +};
2338 +typedef struct gnttab_unmap_and_replace gnttab_unmap_and_replace_t;
2339 +DEFINE_XEN_GUEST_HANDLE(gnttab_unmap_and_replace_t);
2340 +
2341 +
2342 +/*
2343 + * Bitfield values for gnttab_map_grant_ref.flags.
2344 */
2345 /* Map the grant entry for access by I/O devices. */
2346 #define _GNTMAP_device_map (0)
2347 @@ -349,6 +388,13 @@ DEFINE_GUEST_HANDLE_STRUCT(gnttab_query_
2348 #define GNTMAP_contains_pte (1<<_GNTMAP_contains_pte)
2349
2350 /*
2351 + * Bits to be placed in guest kernel available PTE bits (architecture
2352 + * dependent; only supported when XENFEAT_gnttab_map_avail_bits is set).
2353 + */
2354 +#define _GNTMAP_guest_avail0 (16)
2355 +#define GNTMAP_guest_avail_mask ((uint32_t)~0 << _GNTMAP_guest_avail0)
2356 +
2357 +/*
2358 * Values for error status returns. All errors are -ve.
2359 */
2360 #define GNTST_okay (0) /* Normal return. */
2361 @@ -361,7 +407,8 @@ DEFINE_GUEST_HANDLE_STRUCT(gnttab_query_
2362 #define GNTST_no_device_space (-7) /* Out of space in I/O MMU. */
2363 #define GNTST_permission_denied (-8) /* Not enough privilege for operation. */
2364 #define GNTST_bad_page (-9) /* Specified page was invalid for op. */
2365 -#define GNTST_bad_copy_arg (-10) /* copy arguments cross page boundary */
2366 +#define GNTST_bad_copy_arg (-10) /* copy arguments cross page boundary. */
2367 +#define GNTST_address_too_big (-11) /* transfer page address too large. */
2368
2369 #define GNTTABOP_error_msgs { \
2370 "okay", \
2371 @@ -374,7 +421,18 @@ DEFINE_GUEST_HANDLE_STRUCT(gnttab_query_
2372 "no spare translation slot in the I/O MMU", \
2373 "permission denied", \
2374 "bad page", \
2375 - "copy arguments cross page boundary" \
2376 + "copy arguments cross page boundary", \
2377 + "page address size too large" \
2378 }
2379
2380 #endif /* __XEN_PUBLIC_GRANT_TABLE_H__ */
2381 +
2382 +/*
2383 + * Local variables:
2384 + * mode: C
2385 + * c-set-style: "BSD"
2386 + * c-basic-offset: 4
2387 + * tab-width: 4
2388 + * indent-tabs-mode: nil
2389 + * End:
2390 + */
2391 --- sle11-2009-09-18.orig/include/xen/interface/io/blkif.h 2009-09-18 10:11:48.000000000 +0200
2392 +++ sle11-2009-09-18/include/xen/interface/io/blkif.h 2008-11-25 12:35:56.000000000 +0100
2393 @@ -3,6 +3,24 @@
2394 *
2395 * Unified block-device I/O interface for Xen guest OSes.
2396 *
2397 + * Permission is hereby granted, free of charge, to any person obtaining a copy
2398 + * of this software and associated documentation files (the "Software"), to
2399 + * deal in the Software without restriction, including without limitation the
2400 + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
2401 + * sell copies of the Software, and to permit persons to whom the Software is
2402 + * furnished to do so, subject to the following conditions:
2403 + *
2404 + * The above copyright notice and this permission notice shall be included in
2405 + * all copies or substantial portions of the Software.
2406 + *
2407 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
2408 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
2409 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
2410 + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
2411 + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
2412 + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
2413 + * DEALINGS IN THE SOFTWARE.
2414 + *
2415 * Copyright (c) 2003-2004, Keir Fraser
2416 */
2417
2418 @@ -24,8 +42,10 @@
2419 * rsp_event appropriately (e.g., using RING_FINAL_CHECK_FOR_RESPONSES()).
2420 */
2421
2422 -typedef uint16_t blkif_vdev_t;
2423 -typedef uint64_t blkif_sector_t;
2424 +#ifndef blkif_vdev_t
2425 +#define blkif_vdev_t uint16_t
2426 +#endif
2427 +#define blkif_sector_t uint64_t
2428
2429 /*
2430 * REQUEST CODES.
2431 @@ -34,7 +54,7 @@ typedef uint64_t blkif_sector_t;
2432 #define BLKIF_OP_WRITE 1
2433 /*
2434 * Recognised only if "feature-barrier" is present in backend xenbus info.
2435 - * The "feature_barrier" node contains a boolean indicating whether barrier
2436 + * The "feature-barrier" node contains a boolean indicating whether barrier
2437 * requests are likely to succeed or fail. Either way, a barrier request
2438 * may fail at any time with BLKIF_RSP_EOPNOTSUPP if it is unsupported by
2439 * the underlying block-device hardware. The boolean simply indicates whether
2440 @@ -43,33 +63,50 @@ typedef uint64_t blkif_sector_t;
2441 * create the "feature-barrier" node!
2442 */
2443 #define BLKIF_OP_WRITE_BARRIER 2
2444 +/*
2445 + * Recognised if "feature-flush-cache" is present in backend xenbus
2446 + * info. A flush will ask the underlying storage hardware to flush its
2447 + * non-volatile caches as appropriate. The "feature-flush-cache" node
2448 + * contains a boolean indicating whether flush requests are likely to
2449 + * succeed or fail. Either way, a flush request may fail at any time
2450 + * with BLKIF_RSP_EOPNOTSUPP if it is unsupported by the underlying
2451 + * block-device hardware. The boolean simply indicates whether or not it
2452 + * is worthwhile for the frontend to attempt flushes. If a backend does
2453 + * not recognise BLKIF_OP_WRITE_FLUSH_CACHE, it should *not* create the
2454 + * "feature-flush-cache" node!
2455 + */
2456 +#define BLKIF_OP_FLUSH_DISKCACHE 3
2457
2458 /*
2459 * Maximum scatter/gather segments per request.
2460 - * This is carefully chosen so that sizeof(struct blkif_ring) <= PAGE_SIZE.
2461 + * This is carefully chosen so that sizeof(blkif_ring_t) <= PAGE_SIZE.
2462 * NB. This could be 12 if the ring indexes weren't stored in the same page.
2463 */
2464 #define BLKIF_MAX_SEGMENTS_PER_REQUEST 11
2465
2466 +struct blkif_request_segment {
2467 + grant_ref_t gref; /* reference to I/O buffer frame */
2468 + /* @first_sect: first sector in frame to transfer (inclusive). */
2469 + /* @last_sect: last sector in frame to transfer (inclusive). */
2470 + uint8_t first_sect, last_sect;
2471 +};
2472 +
2473 struct blkif_request {
2474 - uint8_t operation; /* BLKIF_OP_??? */
2475 - uint8_t nr_segments; /* number of segments */
2476 - blkif_vdev_t handle; /* only for read/write requests */
2477 - uint64_t id; /* private guest value, echoed in resp */
2478 - blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */
2479 - struct blkif_request_segment {
2480 - grant_ref_t gref; /* reference to I/O buffer frame */
2481 - /* @first_sect: first sector in frame to transfer (inclusive). */
2482 - /* @last_sect: last sector in frame to transfer (inclusive). */
2483 - uint8_t first_sect, last_sect;
2484 - } seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
2485 + uint8_t operation; /* BLKIF_OP_??? */
2486 + uint8_t nr_segments; /* number of segments */
2487 + blkif_vdev_t handle; /* only for read/write requests */
2488 + uint64_t id; /* private guest value, echoed in resp */
2489 + blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */
2490 + struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
2491 };
2492 +typedef struct blkif_request blkif_request_t;
2493
2494 struct blkif_response {
2495 - uint64_t id; /* copied from request */
2496 - uint8_t operation; /* copied from request */
2497 - int16_t status; /* BLKIF_RSP_??? */
2498 + uint64_t id; /* copied from request */
2499 + uint8_t operation; /* copied from request */
2500 + int16_t status; /* BLKIF_RSP_??? */
2501 };
2502 +typedef struct blkif_response blkif_response_t;
2503
2504 /*
2505 * STATUS RETURN CODES.
2506 @@ -92,3 +129,13 @@ DEFINE_RING_TYPES(blkif, struct blkif_re
2507 #define VDISK_READONLY 0x4
2508
2509 #endif /* __XEN_PUBLIC_IO_BLKIF_H__ */
2510 +
2511 +/*
2512 + * Local variables:
2513 + * mode: C
2514 + * c-set-style: "BSD"
2515 + * c-basic-offset: 4
2516 + * tab-width: 4
2517 + * indent-tabs-mode: nil
2518 + * End:
2519 + */
2520 --- sle11-2009-09-18.orig/include/xen/interface/io/console.h 2009-09-18 10:11:48.000000000 +0200
2521 +++ sle11-2009-09-18/include/xen/interface/io/console.h 2008-11-25 12:35:56.000000000 +0100
2522 @@ -3,6 +3,24 @@
2523 *
2524 * Console I/O interface for Xen guest OSes.
2525 *
2526 + * Permission is hereby granted, free of charge, to any person obtaining a copy
2527 + * of this software and associated documentation files (the "Software"), to
2528 + * deal in the Software without restriction, including without limitation the
2529 + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
2530 + * sell copies of the Software, and to permit persons to whom the Software is
2531 + * furnished to do so, subject to the following conditions:
2532 + *
2533 + * The above copyright notice and this permission notice shall be included in
2534 + * all copies or substantial portions of the Software.
2535 + *
2536 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
2537 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
2538 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
2539 + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
2540 + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
2541 + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
2542 + * DEALINGS IN THE SOFTWARE.
2543 + *
2544 * Copyright (c) 2005, Keir Fraser
2545 */
2546
2547 @@ -21,3 +39,13 @@ struct xencons_interface {
2548 };
2549
2550 #endif /* __XEN_PUBLIC_IO_CONSOLE_H__ */
2551 +
2552 +/*
2553 + * Local variables:
2554 + * mode: C
2555 + * c-set-style: "BSD"
2556 + * c-basic-offset: 4
2557 + * tab-width: 4
2558 + * indent-tabs-mode: nil
2559 + * End:
2560 + */
2561 --- sle11-2009-09-18.orig/include/xen/interface/io/fbif.h 2009-09-18 10:11:48.000000000 +0200
2562 +++ sle11-2009-09-18/include/xen/interface/io/fbif.h 2008-11-25 12:35:56.000000000 +0100
2563 @@ -41,12 +41,13 @@
2564 */
2565 #define XENFB_TYPE_UPDATE 2
2566
2567 -struct xenfb_update {
2568 - uint8_t type; /* XENFB_TYPE_UPDATE */
2569 - int32_t x; /* source x */
2570 - int32_t y; /* source y */
2571 - int32_t width; /* rect width */
2572 - int32_t height; /* rect height */
2573 +struct xenfb_update
2574 +{
2575 + uint8_t type; /* XENFB_TYPE_UPDATE */
2576 + int32_t x; /* source x */
2577 + int32_t y; /* source y */
2578 + int32_t width; /* rect width */
2579 + int32_t height; /* rect height */
2580 };
2581
2582 /*
2583 @@ -55,36 +56,58 @@ struct xenfb_update {
2584 */
2585 #define XENFB_TYPE_RESIZE 3
2586
2587 -struct xenfb_resize {
2588 - uint8_t type; /* XENFB_TYPE_RESIZE */
2589 - int32_t width; /* width in pixels */
2590 - int32_t height; /* height in pixels */
2591 - int32_t stride; /* stride in bytes */
2592 - int32_t depth; /* depth in bits */
2593 - int32_t offset; /* start offset within framebuffer */
2594 +struct xenfb_resize
2595 +{
2596 + uint8_t type; /* XENFB_TYPE_RESIZE */
2597 + int32_t width; /* width in pixels */
2598 + int32_t height; /* height in pixels */
2599 + int32_t stride; /* stride in bytes */
2600 + int32_t depth; /* depth in bits */
2601 + int32_t offset; /* offset of the framebuffer in bytes */
2602 };
2603
2604 #define XENFB_OUT_EVENT_SIZE 40
2605
2606 -union xenfb_out_event {
2607 - uint8_t type;
2608 - struct xenfb_update update;
2609 - struct xenfb_resize resize;
2610 - char pad[XENFB_OUT_EVENT_SIZE];
2611 +union xenfb_out_event
2612 +{
2613 + uint8_t type;
2614 + struct xenfb_update update;
2615 + struct xenfb_resize resize;
2616 + char pad[XENFB_OUT_EVENT_SIZE];
2617 };
2618
2619 /* In events (backend -> frontend) */
2620
2621 /*
2622 * Frontends should ignore unknown in events.
2623 - * No in events currently defined.
2624 */
2625
2626 +/*
2627 + * Framebuffer refresh period advice
2628 + * Backend sends it to advise the frontend their preferred period of
2629 + * refresh. Frontends that keep the framebuffer constantly up-to-date
2630 + * just ignore it. Frontends that use the advice should immediately
2631 + * refresh the framebuffer (and send an update notification event if
2632 + * those have been requested), then use the update frequency to guide
2633 + * their periodical refreshs.
2634 + */
2635 +#define XENFB_TYPE_REFRESH_PERIOD 1
2636 +#define XENFB_NO_REFRESH 0
2637 +
2638 +struct xenfb_refresh_period
2639 +{
2640 + uint8_t type; /* XENFB_TYPE_UPDATE_PERIOD */
2641 + uint32_t period; /* period of refresh, in ms,
2642 + * XENFB_NO_REFRESH if no refresh is needed */
2643 +};
2644 +
2645 #define XENFB_IN_EVENT_SIZE 40
2646
2647 -union xenfb_in_event {
2648 - uint8_t type;
2649 - char pad[XENFB_IN_EVENT_SIZE];
2650 +union xenfb_in_event
2651 +{
2652 + uint8_t type;
2653 + struct xenfb_refresh_period refresh_period;
2654 + char pad[XENFB_IN_EVENT_SIZE];
2655 };
2656
2657 /* shared page */
2658 @@ -93,41 +116,41 @@ union xenfb_in_event {
2659 #define XENFB_IN_RING_LEN (XENFB_IN_RING_SIZE / XENFB_IN_EVENT_SIZE)
2660 #define XENFB_IN_RING_OFFS 1024
2661 #define XENFB_IN_RING(page) \
2662 - ((union xenfb_in_event *)((char *)(page) + XENFB_IN_RING_OFFS))
2663 + ((union xenfb_in_event *)((char *)(page) + XENFB_IN_RING_OFFS))
2664 #define XENFB_IN_RING_REF(page, idx) \
2665 - (XENFB_IN_RING((page))[(idx) % XENFB_IN_RING_LEN])
2666 + (XENFB_IN_RING((page))[(idx) % XENFB_IN_RING_LEN])
2667
2668 #define XENFB_OUT_RING_SIZE 2048
2669 #define XENFB_OUT_RING_LEN (XENFB_OUT_RING_SIZE / XENFB_OUT_EVENT_SIZE)
2670 #define XENFB_OUT_RING_OFFS (XENFB_IN_RING_OFFS + XENFB_IN_RING_SIZE)
2671 #define XENFB_OUT_RING(page) \
2672 - ((union xenfb_out_event *)((char *)(page) + XENFB_OUT_RING_OFFS))
2673 + ((union xenfb_out_event *)((char *)(page) + XENFB_OUT_RING_OFFS))
2674 #define XENFB_OUT_RING_REF(page, idx) \
2675 - (XENFB_OUT_RING((page))[(idx) % XENFB_OUT_RING_LEN])
2676 + (XENFB_OUT_RING((page))[(idx) % XENFB_OUT_RING_LEN])
2677
2678 -struct xenfb_page {
2679 - uint32_t in_cons, in_prod;
2680 - uint32_t out_cons, out_prod;
2681 -
2682 - int32_t width; /* width of the framebuffer (in pixels) */
2683 - int32_t height; /* height of the framebuffer (in pixels) */
2684 - uint32_t line_length; /* length of a row of pixels (in bytes) */
2685 - uint32_t mem_length; /* length of the framebuffer (in bytes) */
2686 - uint8_t depth; /* depth of a pixel (in bits) */
2687 -
2688 - /*
2689 - * Framebuffer page directory
2690 - *
2691 - * Each directory page holds PAGE_SIZE / sizeof(*pd)
2692 - * framebuffer pages, and can thus map up to PAGE_SIZE *
2693 - * PAGE_SIZE / sizeof(*pd) bytes. With PAGE_SIZE == 4096 and
2694 - * sizeof(unsigned long) == 4/8, that's 4 Megs 32 bit and 2
2695 - * Megs 64 bit. 256 directories give enough room for a 512
2696 - * Meg framebuffer with a max resolution of 12,800x10,240.
2697 - * Should be enough for a while with room leftover for
2698 - * expansion.
2699 - */
2700 - unsigned long pd[256];
2701 +struct xenfb_page
2702 +{
2703 + uint32_t in_cons, in_prod;
2704 + uint32_t out_cons, out_prod;
2705 +
2706 + int32_t width; /* the width of the framebuffer (in pixels) */
2707 + int32_t height; /* the height of the framebuffer (in pixels) */
2708 + uint32_t line_length; /* the length of a row of pixels (in bytes) */
2709 + uint32_t mem_length; /* the length of the framebuffer (in bytes) */
2710 + uint8_t depth; /* the depth of a pixel (in bits) */
2711 +
2712 + /*
2713 + * Framebuffer page directory
2714 + *
2715 + * Each directory page holds PAGE_SIZE / sizeof(*pd)
2716 + * framebuffer pages, and can thus map up to PAGE_SIZE *
2717 + * PAGE_SIZE / sizeof(*pd) bytes. With PAGE_SIZE == 4096 and
2718 + * sizeof(unsigned long) == 4/8, that's 4 Megs 32 bit and 2 Megs
2719 + * 64 bit. 256 directories give enough room for a 512 Meg
2720 + * framebuffer with a max resolution of 12,800x10,240. Should
2721 + * be enough for a while with room leftover for expansion.
2722 + */
2723 + unsigned long pd[256];
2724 };
2725
2726 /*
2727 @@ -141,3 +164,13 @@ struct xenfb_page {
2728 #endif
2729
2730 #endif
2731 +
2732 +/*
2733 + * Local variables:
2734 + * mode: C
2735 + * c-set-style: "BSD"
2736 + * c-basic-offset: 4
2737 + * tab-width: 4
2738 + * indent-tabs-mode: nil
2739 + * End:
2740 + */
2741 --- sle11-2009-09-18.orig/include/xen/interface/io/kbdif.h 2009-09-18 10:11:48.000000000 +0200
2742 +++ sle11-2009-09-18/include/xen/interface/io/kbdif.h 2008-11-25 12:35:56.000000000 +0100
2743 @@ -45,34 +45,38 @@
2744 */
2745 #define XENKBD_TYPE_POS 4
2746
2747 -struct xenkbd_motion {
2748 - uint8_t type; /* XENKBD_TYPE_MOTION */
2749 - int32_t rel_x; /* relative X motion */
2750 - int32_t rel_y; /* relative Y motion */
2751 - int32_t rel_z; /* relative Z motion (wheel) */
2752 -};
2753 -
2754 -struct xenkbd_key {
2755 - uint8_t type; /* XENKBD_TYPE_KEY */
2756 - uint8_t pressed; /* 1 if pressed; 0 otherwise */
2757 - uint32_t keycode; /* KEY_* from linux/input.h */
2758 -};
2759 -
2760 -struct xenkbd_position {
2761 - uint8_t type; /* XENKBD_TYPE_POS */
2762 - int32_t abs_x; /* absolute X position (in FB pixels) */
2763 - int32_t abs_y; /* absolute Y position (in FB pixels) */
2764 - int32_t rel_z; /* relative Z motion (wheel) */
2765 +struct xenkbd_motion
2766 +{
2767 + uint8_t type; /* XENKBD_TYPE_MOTION */
2768 + int32_t rel_x; /* relative X motion */
2769 + int32_t rel_y; /* relative Y motion */
2770 + int32_t rel_z; /* relative Z motion (wheel) */
2771 +};
2772 +
2773 +struct xenkbd_key
2774 +{
2775 + uint8_t type; /* XENKBD_TYPE_KEY */
2776 + uint8_t pressed; /* 1 if pressed; 0 otherwise */
2777 + uint32_t keycode; /* KEY_* from linux/input.h */
2778 +};
2779 +
2780 +struct xenkbd_position
2781 +{
2782 + uint8_t type; /* XENKBD_TYPE_POS */
2783 + int32_t abs_x; /* absolute X position (in FB pixels) */
2784 + int32_t abs_y; /* absolute Y position (in FB pixels) */
2785 + int32_t rel_z; /* relative Z motion (wheel) */
2786 };
2787
2788 #define XENKBD_IN_EVENT_SIZE 40
2789
2790 -union xenkbd_in_event {
2791 - uint8_t type;
2792 - struct xenkbd_motion motion;
2793 - struct xenkbd_key key;
2794 - struct xenkbd_position pos;
2795 - char pad[XENKBD_IN_EVENT_SIZE];
2796 +union xenkbd_in_event
2797 +{
2798 + uint8_t type;
2799 + struct xenkbd_motion motion;
2800 + struct xenkbd_key key;
2801 + struct xenkbd_position pos;
2802 + char pad[XENKBD_IN_EVENT_SIZE];
2803 };
2804
2805 /* Out events (frontend -> backend) */
2806 @@ -85,9 +89,10 @@ union xenkbd_in_event {
2807
2808 #define XENKBD_OUT_EVENT_SIZE 40
2809
2810 -union xenkbd_out_event {
2811 - uint8_t type;
2812 - char pad[XENKBD_OUT_EVENT_SIZE];
2813 +union xenkbd_out_event
2814 +{
2815 + uint8_t type;
2816 + char pad[XENKBD_OUT_EVENT_SIZE];
2817 };
2818
2819 /* shared page */
2820 @@ -96,21 +101,32 @@ union xenkbd_out_event {
2821 #define XENKBD_IN_RING_LEN (XENKBD_IN_RING_SIZE / XENKBD_IN_EVENT_SIZE)
2822 #define XENKBD_IN_RING_OFFS 1024
2823 #define XENKBD_IN_RING(page) \
2824 - ((union xenkbd_in_event *)((char *)(page) + XENKBD_IN_RING_OFFS))
2825 + ((union xenkbd_in_event *)((char *)(page) + XENKBD_IN_RING_OFFS))
2826 #define XENKBD_IN_RING_REF(page, idx) \
2827 - (XENKBD_IN_RING((page))[(idx) % XENKBD_IN_RING_LEN])
2828 + (XENKBD_IN_RING((page))[(idx) % XENKBD_IN_RING_LEN])
2829
2830 #define XENKBD_OUT_RING_SIZE 1024
2831 #define XENKBD_OUT_RING_LEN (XENKBD_OUT_RING_SIZE / XENKBD_OUT_EVENT_SIZE)
2832 #define XENKBD_OUT_RING_OFFS (XENKBD_IN_RING_OFFS + XENKBD_IN_RING_SIZE)
2833 #define XENKBD_OUT_RING(page) \
2834 - ((union xenkbd_out_event *)((char *)(page) + XENKBD_OUT_RING_OFFS))
2835 + ((union xenkbd_out_event *)((char *)(page) + XENKBD_OUT_RING_OFFS))
2836 #define XENKBD_OUT_RING_REF(page, idx) \
2837 - (XENKBD_OUT_RING((page))[(idx) % XENKBD_OUT_RING_LEN])
2838 + (XENKBD_OUT_RING((page))[(idx) % XENKBD_OUT_RING_LEN])
2839
2840 -struct xenkbd_page {
2841 - uint32_t in_cons, in_prod;
2842 - uint32_t out_cons, out_prod;
2843 +struct xenkbd_page
2844 +{
2845 + uint32_t in_cons, in_prod;
2846 + uint32_t out_cons, out_prod;
2847 };
2848
2849 #endif
2850 +
2851 +/*
2852 + * Local variables:
2853 + * mode: C
2854 + * c-set-style: "BSD"
2855 + * c-basic-offset: 4
2856 + * tab-width: 4
2857 + * indent-tabs-mode: nil
2858 + * End:
2859 + */
2860 --- sle11-2009-09-18.orig/include/xen/interface/io/netif.h 2009-09-18 10:11:48.000000000 +0200
2861 +++ sle11-2009-09-18/include/xen/interface/io/netif.h 2008-11-25 12:35:56.000000000 +0100
2862 @@ -3,6 +3,24 @@
2863 *
2864 * Unified network-device I/O interface for Xen guest OSes.
2865 *
2866 + * Permission is hereby granted, free of charge, to any person obtaining a copy
2867 + * of this software and associated documentation files (the "Software"), to
2868 + * deal in the Software without restriction, including without limitation the
2869 + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
2870 + * sell copies of the Software, and to permit persons to whom the Software is
2871 + * furnished to do so, subject to the following conditions:
2872 + *
2873 + * The above copyright notice and this permission notice shall be included in
2874 + * all copies or substantial portions of the Software.
2875 + *
2876 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
2877 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
2878 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
2879 + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
2880 + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
2881 + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
2882 + * DEALINGS IN THE SOFTWARE.
2883 + *
2884 * Copyright (c) 2003-2004, Keir Fraser
2885 */
2886
2887 @@ -47,18 +65,21 @@
2888 #define _NETTXF_extra_info (3)
2889 #define NETTXF_extra_info (1U<<_NETTXF_extra_info)
2890
2891 -struct xen_netif_tx_request {
2892 +struct netif_tx_request {
2893 grant_ref_t gref; /* Reference to buffer page */
2894 uint16_t offset; /* Offset within buffer page */
2895 uint16_t flags; /* NETTXF_* */
2896 uint16_t id; /* Echoed in response message. */
2897 uint16_t size; /* Packet size in bytes. */
2898 };
2899 +typedef struct netif_tx_request netif_tx_request_t;
2900
2901 /* Types of netif_extra_info descriptors. */
2902 -#define XEN_NETIF_EXTRA_TYPE_NONE (0) /* Never used - invalid */
2903 -#define XEN_NETIF_EXTRA_TYPE_GSO (1) /* u.gso */
2904 -#define XEN_NETIF_EXTRA_TYPE_MAX (2)
2905 +#define XEN_NETIF_EXTRA_TYPE_NONE (0) /* Never used - invalid */
2906 +#define XEN_NETIF_EXTRA_TYPE_GSO (1) /* u.gso */
2907 +#define XEN_NETIF_EXTRA_TYPE_MCAST_ADD (2) /* u.mcast */
2908 +#define XEN_NETIF_EXTRA_TYPE_MCAST_DEL (3) /* u.mcast */
2909 +#define XEN_NETIF_EXTRA_TYPE_MAX (4)
2910
2911 /* netif_extra_info flags. */
2912 #define _XEN_NETIF_EXTRA_FLAG_MORE (0)
2913 @@ -71,49 +92,68 @@ struct xen_netif_tx_request {
2914 * This structure needs to fit within both netif_tx_request and
2915 * netif_rx_response for compatibility.
2916 */
2917 -struct xen_netif_extra_info {
2918 - uint8_t type; /* XEN_NETIF_EXTRA_TYPE_* */
2919 - uint8_t flags; /* XEN_NETIF_EXTRA_FLAG_* */
2920 -
2921 - union {
2922 - struct {
2923 - /*
2924 - * Maximum payload size of each segment. For
2925 - * example, for TCP this is just the path MSS.
2926 - */
2927 - uint16_t size;
2928 -
2929 - /*
2930 - * GSO type. This determines the protocol of
2931 - * the packet and any extra features required
2932 - * to segment the packet properly.
2933 - */
2934 - uint8_t type; /* XEN_NETIF_GSO_TYPE_* */
2935 -
2936 - /* Future expansion. */
2937 - uint8_t pad;
2938 -
2939 - /*
2940 - * GSO features. This specifies any extra GSO
2941 - * features required to process this packet,
2942 - * such as ECN support for TCPv4.
2943 - */
2944 - uint16_t features; /* XEN_NETIF_GSO_FEAT_* */
2945 - } gso;
2946 +struct netif_extra_info {
2947 + uint8_t type; /* XEN_NETIF_EXTRA_TYPE_* */
2948 + uint8_t flags; /* XEN_NETIF_EXTRA_FLAG_* */
2949 +
2950 + union {
2951 + /*
2952 + * XEN_NETIF_EXTRA_TYPE_GSO:
2953 + */
2954 + struct {
2955 + /*
2956 + * Maximum payload size of each segment. For example, for TCP this
2957 + * is just the path MSS.
2958 + */
2959 + uint16_t size;
2960 +
2961 + /*
2962 + * GSO type. This determines the protocol of the packet and any
2963 + * extra features required to segment the packet properly.
2964 + */
2965 + uint8_t type; /* XEN_NETIF_GSO_TYPE_* */
2966 +
2967 + /* Future expansion. */
2968 + uint8_t pad;
2969 +
2970 + /*
2971 + * GSO features. This specifies any extra GSO features required
2972 + * to process this packet, such as ECN support for TCPv4.
2973 + */
2974 + uint16_t features; /* XEN_NETIF_GSO_FEAT_* */
2975 + } gso;
2976 +
2977 + /*
2978 + * XEN_NETIF_EXTRA_TYPE_MCAST_{ADD,DEL}:
2979 + * Backend advertises availability via 'feature-multicast-control'
2980 + * xenbus node containing value '1'.
2981 + * Frontend requests this feature by advertising
2982 + * 'request-multicast-control' xenbus node containing value '1'.
2983 + * If multicast control is requested then multicast flooding is
2984 + * disabled and the frontend must explicitly register its interest
2985 + * in multicast groups using dummy transmit requests containing
2986 + * MCAST_{ADD,DEL} extra-info fragments.
2987 + */
2988 + struct {
2989 + uint8_t addr[6]; /* Address to add/remove. */
2990 + } mcast;
2991
2992 - uint16_t pad[3];
2993 - } u;
2994 + uint16_t pad[3];
2995 + } u;
2996 };
2997 +typedef struct netif_extra_info netif_extra_info_t;
2998
2999 -struct xen_netif_tx_response {
3000 - uint16_t id;
3001 - int16_t status; /* NETIF_RSP_* */
3002 +struct netif_tx_response {
3003 + uint16_t id;
3004 + int16_t status; /* NETIF_RSP_* */
3005 };
3006 +typedef struct netif_tx_response netif_tx_response_t;
3007
3008 -struct xen_netif_rx_request {
3009 - uint16_t id; /* Echoed in response message. */
3010 - grant_ref_t gref; /* Reference to incoming granted frame */
3011 +struct netif_rx_request {
3012 + uint16_t id; /* Echoed in response message. */
3013 + grant_ref_t gref; /* Reference to incoming granted frame */
3014 };
3015 +typedef struct netif_rx_request netif_rx_request_t;
3016
3017 /* Packet data has been validated against protocol checksum. */
3018 #define _NETRXF_data_validated (0)
3019 @@ -131,23 +171,20 @@ struct xen_netif_rx_request {
3020 #define _NETRXF_extra_info (3)
3021 #define NETRXF_extra_info (1U<<_NETRXF_extra_info)
3022
3023 -struct xen_netif_rx_response {
3024 +struct netif_rx_response {
3025 uint16_t id;
3026 uint16_t offset; /* Offset in page of start of received packet */
3027 uint16_t flags; /* NETRXF_* */
3028 int16_t status; /* -ve: BLKIF_RSP_* ; +ve: Rx'ed pkt size. */
3029 };
3030 +typedef struct netif_rx_response netif_rx_response_t;
3031
3032 /*
3033 * Generate netif ring structures and types.
3034 */
3035
3036 -DEFINE_RING_TYPES(xen_netif_tx,
3037 - struct xen_netif_tx_request,
3038 - struct xen_netif_tx_response);
3039 -DEFINE_RING_TYPES(xen_netif_rx,
3040 - struct xen_netif_rx_request,
3041 - struct xen_netif_rx_response);
3042 +DEFINE_RING_TYPES(netif_tx, struct netif_tx_request, struct netif_tx_response);
3043 +DEFINE_RING_TYPES(netif_rx, struct netif_rx_request, struct netif_rx_response);
3044
3045 #define NETIF_RSP_DROPPED -2
3046 #define NETIF_RSP_ERROR -1
3047 @@ -156,3 +193,13 @@ DEFINE_RING_TYPES(xen_netif_rx,
3048 #define NETIF_RSP_NULL 1
3049
3050 #endif
3051 +
3052 +/*
3053 + * Local variables:
3054 + * mode: C
3055 + * c-set-style: "BSD"
3056 + * c-basic-offset: 4
3057 + * tab-width: 4
3058 + * indent-tabs-mode: nil
3059 + * End:
3060 + */
3061 --- sle11-2009-09-18.orig/include/xen/interface/io/protocols.h 2009-09-18 10:11:48.000000000 +0200
3062 +++ sle11-2009-09-18/include/xen/interface/io/protocols.h 2008-11-25 12:35:56.000000000 +0100
3063 @@ -1,10 +1,31 @@
3064 +/******************************************************************************
3065 + * protocols.h
3066 + *
3067 + * Permission is hereby granted, free of charge, to any person obtaining a copy
3068 + * of this software and associated documentation files (the "Software"), to
3069 + * deal in the Software without restriction, including without limitation the
3070 + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
3071 + * sell copies of the Software, and to permit persons to whom the Software is
3072 + * furnished to do so, subject to the following conditions:
3073 + *
3074 + * The above copyright notice and this permission notice shall be included in
3075 + * all copies or substantial portions of the Software.
3076 + *
3077 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
3078 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
3079 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
3080 + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
3081 + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
3082 + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
3083 + * DEALINGS IN THE SOFTWARE.
3084 + */
3085 +
3086 #ifndef __XEN_PROTOCOLS_H__
3087 #define __XEN_PROTOCOLS_H__
3088
3089 #define XEN_IO_PROTO_ABI_X86_32 "x86_32-abi"
3090 #define XEN_IO_PROTO_ABI_X86_64 "x86_64-abi"
3091 #define XEN_IO_PROTO_ABI_IA64 "ia64-abi"
3092 -#define XEN_IO_PROTO_ABI_POWERPC64 "powerpc64-abi"
3093
3094 #if defined(__i386__)
3095 # define XEN_IO_PROTO_ABI_NATIVE XEN_IO_PROTO_ABI_X86_32
3096 @@ -12,8 +33,6 @@
3097 # define XEN_IO_PROTO_ABI_NATIVE XEN_IO_PROTO_ABI_X86_64
3098 #elif defined(__ia64__)
3099 # define XEN_IO_PROTO_ABI_NATIVE XEN_IO_PROTO_ABI_IA64
3100 -#elif defined(__powerpc64__)
3101 -# define XEN_IO_PROTO_ABI_NATIVE XEN_IO_PROTO_ABI_POWERPC64
3102 #else
3103 # error arch fixup needed here
3104 #endif
3105 --- sle11-2009-09-18.orig/include/xen/interface/io/ring.h 2009-09-18 10:11:48.000000000 +0200
3106 +++ sle11-2009-09-18/include/xen/interface/io/ring.h 2008-11-25 12:35:56.000000000 +0100
3107 @@ -3,16 +3,42 @@
3108 *
3109 * Shared producer-consumer ring macros.
3110 *
3111 + * Permission is hereby granted, free of charge, to any person obtaining a copy
3112 + * of this software and associated documentation files (the "Software"), to
3113 + * deal in the Software without restriction, including without limitation the
3114 + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
3115 + * sell copies of the Software, and to permit persons to whom the Software is
3116 + * furnished to do so, subject to the following conditions:
3117 + *
3118 + * The above copyright notice and this permission notice shall be included in
3119 + * all copies or substantial portions of the Software.
3120 + *
3121 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
3122 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
3123 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
3124 + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
3125 + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
3126 + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
3127 + * DEALINGS IN THE SOFTWARE.
3128 + *
3129 * Tim Deegan and Andrew Warfield November 2004.
3130 */
3131
3132 #ifndef __XEN_PUBLIC_IO_RING_H__
3133 #define __XEN_PUBLIC_IO_RING_H__
3134
3135 +#include "../xen-compat.h"
3136 +
3137 +#if __XEN_INTERFACE_VERSION__ < 0x00030208
3138 +#define xen_mb() mb()
3139 +#define xen_rmb() rmb()
3140 +#define xen_wmb() wmb()
3141 +#endif
3142 +
3143 typedef unsigned int RING_IDX;
3144
3145 /* Round a 32-bit unsigned constant down to the nearest power of two. */
3146 -#define __RD2(_x) (((_x) & 0x00000002) ? 0x2 : ((_x) & 0x1))
3147 +#define __RD2(_x) (((_x) & 0x00000002) ? 0x2 : ((_x) & 0x1))
3148 #define __RD4(_x) (((_x) & 0x0000000c) ? __RD2((_x)>>2)<<2 : __RD2(_x))
3149 #define __RD8(_x) (((_x) & 0x000000f0) ? __RD4((_x)>>4)<<4 : __RD4(_x))
3150 #define __RD16(_x) (((_x) & 0x0000ff00) ? __RD8((_x)>>8)<<8 : __RD8(_x))
3151 @@ -25,73 +51,76 @@ typedef unsigned int RING_IDX;
3152 * power of two (so we can mask with (size-1) to loop around).
3153 */
3154 #define __RING_SIZE(_s, _sz) \
3155 - (__RD32(((_sz) - (long)&(_s)->ring + (long)(_s)) / sizeof((_s)->ring[0])))
3156 + (__RD32(((_sz) - (long)(_s)->ring + (long)(_s)) / sizeof((_s)->ring[0])))
3157
3158 /*
3159 * Macros to make the correct C datatypes for a new kind of ring.
3160 *
3161 * To make a new ring datatype, you need to have two message structures,
3162 - * let's say struct request, and struct response already defined.
3163 + * let's say request_t, and response_t already defined.
3164 *
3165 * In a header where you want the ring datatype declared, you then do:
3166 *
3167 - * DEFINE_RING_TYPES(mytag, struct request, struct response);
3168 + * DEFINE_RING_TYPES(mytag, request_t, response_t);
3169 *
3170 * These expand out to give you a set of types, as you can see below.
3171 * The most important of these are:
3172 *
3173 - * struct mytag_sring - The shared ring.
3174 - * struct mytag_front_ring - The 'front' half of the ring.
3175 - * struct mytag_back_ring - The 'back' half of the ring.
3176 + * mytag_sring_t - The shared ring.
3177 + * mytag_front_ring_t - The 'front' half of the ring.
3178 + * mytag_back_ring_t - The 'back' half of the ring.
3179 *
3180 * To initialize a ring in your code you need to know the location and size
3181 * of the shared memory area (PAGE_SIZE, for instance). To initialise
3182 * the front half:
3183 *
3184 - * struct mytag_front_ring front_ring;
3185 - * SHARED_RING_INIT((struct mytag_sring *)shared_page);
3186 - * FRONT_RING_INIT(&front_ring, (struct mytag_sring *)shared_page,
3187 - * PAGE_SIZE);
3188 + * mytag_front_ring_t front_ring;
3189 + * SHARED_RING_INIT((mytag_sring_t *)shared_page);
3190 + * FRONT_RING_INIT(&front_ring, (mytag_sring_t *)shared_page, PAGE_SIZE);
3191 *
3192 * Initializing the back follows similarly (note that only the front
3193 * initializes the shared ring):
3194 *
3195 - * struct mytag_back_ring back_ring;
3196 - * BACK_RING_INIT(&back_ring, (struct mytag_sring *)shared_page,
3197 - * PAGE_SIZE);
3198 + * mytag_back_ring_t back_ring;
3199 + * BACK_RING_INIT(&back_ring, (mytag_sring_t *)shared_page, PAGE_SIZE);
3200 */
3201
3202 -#define DEFINE_RING_TYPES(__name, __req_t, __rsp_t) \
3203 - \
3204 -/* Shared ring entry */ \
3205 -union __name##_sring_entry { \
3206 - __req_t req; \
3207 - __rsp_t rsp; \
3208 -}; \
3209 - \
3210 -/* Shared ring page */ \
3211 -struct __name##_sring { \
3212 - RING_IDX req_prod, req_event; \
3213 - RING_IDX rsp_prod, rsp_event; \
3214 - uint8_t pad[48]; \
3215 - union __name##_sring_entry ring[1]; /* variable-length */ \
3216 -}; \
3217 - \
3218 -/* "Front" end's private variables */ \
3219 -struct __name##_front_ring { \
3220 - RING_IDX req_prod_pvt; \
3221 - RING_IDX rsp_cons; \
3222 - unsigned int nr_ents; \
3223 - struct __name##_sring *sring; \
3224 -}; \
3225 - \
3226 -/* "Back" end's private variables */ \
3227 -struct __name##_back_ring { \
3228 - RING_IDX rsp_prod_pvt; \
3229 - RING_IDX req_cons; \
3230 - unsigned int nr_ents; \
3231 - struct __name##_sring *sring; \
3232 -};
3233 +#define DEFINE_RING_TYPES(__name, __req_t, __rsp_t) \
3234 + \
3235 +/* Shared ring entry */ \
3236 +union __name##_sring_entry { \
3237 + __req_t req; \
3238 + __rsp_t rsp; \
3239 +}; \
3240 + \
3241 +/* Shared ring page */ \
3242 +struct __name##_sring { \
3243 + RING_IDX req_prod, req_event; \
3244 + RING_IDX rsp_prod, rsp_event; \
3245 + uint8_t pad[48]; \
3246 + union __name##_sring_entry ring[1]; /* variable-length */ \
3247 +}; \
3248 + \
3249 +/* "Front" end's private variables */ \
3250 +struct __name##_front_ring { \
3251 + RING_IDX req_prod_pvt; \
3252 + RING_IDX rsp_cons; \
3253 + unsigned int nr_ents; \
3254 + struct __name##_sring *sring; \
3255 +}; \
3256 + \
3257 +/* "Back" end's private variables */ \
3258 +struct __name##_back_ring { \
3259 + RING_IDX rsp_prod_pvt; \
3260 + RING_IDX req_cons; \
3261 + unsigned int nr_ents; \
3262 + struct __name##_sring *sring; \
3263 +}; \
3264 + \
3265 +/* Syntactic sugar */ \
3266 +typedef struct __name##_sring __name##_sring_t; \
3267 +typedef struct __name##_front_ring __name##_front_ring_t; \
3268 +typedef struct __name##_back_ring __name##_back_ring_t
3269
3270 /*
3271 * Macros for manipulating rings.
3272 @@ -109,86 +138,94 @@ struct __name##_back_ring { \
3273 */
3274
3275 /* Initialising empty rings */
3276 -#define SHARED_RING_INIT(_s) do { \
3277 - (_s)->req_prod = (_s)->rsp_prod = 0; \
3278 - (_s)->req_event = (_s)->rsp_event = 1; \
3279 - memset((_s)->pad, 0, sizeof((_s)->pad)); \
3280 +#define SHARED_RING_INIT(_s) do { \
3281 + (_s)->req_prod = (_s)->rsp_prod = 0; \
3282 + (_s)->req_event = (_s)->rsp_event = 1; \
3283 + (void)memset((_s)->pad, 0, sizeof((_s)->pad)); \
3284 } while(0)
3285
3286 -#define FRONT_RING_INIT(_r, _s, __size) do { \
3287 - (_r)->req_prod_pvt = 0; \
3288 - (_r)->rsp_cons = 0; \
3289 - (_r)->nr_ents = __RING_SIZE(_s, __size); \
3290 - (_r)->sring = (_s); \
3291 +#define FRONT_RING_INIT(_r, _s, __size) do { \
3292 + (_r)->req_prod_pvt = 0; \
3293 + (_r)->rsp_cons = 0; \
3294 + (_r)->nr_ents = __RING_SIZE(_s, __size); \
3295 + (_r)->sring = (_s); \
3296 } while (0)
3297
3298 -#define BACK_RING_INIT(_r, _s, __size) do { \
3299 - (_r)->rsp_prod_pvt = 0; \
3300 - (_r)->req_cons = 0; \
3301 - (_r)->nr_ents = __RING_SIZE(_s, __size); \
3302 - (_r)->sring = (_s); \
3303 +#define BACK_RING_INIT(_r, _s, __size) do { \
3304 + (_r)->rsp_prod_pvt = 0; \
3305 + (_r)->req_cons = 0; \
3306 + (_r)->nr_ents = __RING_SIZE(_s, __size); \
3307 + (_r)->sring = (_s); \
3308 } while (0)
3309
3310 /* Initialize to existing shared indexes -- for recovery */
3311 -#define FRONT_RING_ATTACH(_r, _s, __size) do { \
3312 - (_r)->sring = (_s); \
3313 - (_r)->req_prod_pvt = (_s)->req_prod; \
3314 - (_r)->rsp_cons = (_s)->rsp_prod; \
3315 - (_r)->nr_ents = __RING_SIZE(_s, __size); \
3316 +#define FRONT_RING_ATTACH(_r, _s, __size) do { \
3317 + (_r)->sring = (_s); \
3318 + (_r)->req_prod_pvt = (_s)->req_prod; \
3319 + (_r)->rsp_cons = (_s)->rsp_prod; \
3320 + (_r)->nr_ents = __RING_SIZE(_s, __size); \
3321 } while (0)
3322
3323 -#define BACK_RING_ATTACH(_r, _s, __size) do { \
3324 - (_r)->sring = (_s); \
3325 - (_r)->rsp_prod_pvt = (_s)->rsp_prod; \
3326 - (_r)->req_cons = (_s)->req_prod; \
3327 - (_r)->nr_ents = __RING_SIZE(_s, __size); \
3328 +#define BACK_RING_ATTACH(_r, _s, __size) do { \
3329 + (_r)->sring = (_s); \
3330 + (_r)->rsp_prod_pvt = (_s)->rsp_prod; \
3331 + (_r)->req_cons = (_s)->req_prod; \
3332 + (_r)->nr_ents = __RING_SIZE(_s, __size); \
3333 } while (0)
3334
3335 /* How big is this ring? */
3336 -#define RING_SIZE(_r) \
3337 +#define RING_SIZE(_r) \
3338 ((_r)->nr_ents)
3339
3340 /* Number of free requests (for use on front side only). */
3341 -#define RING_FREE_REQUESTS(_r) \
3342 +#define RING_FREE_REQUESTS(_r) \
3343 (RING_SIZE(_r) - ((_r)->req_prod_pvt - (_r)->rsp_cons))
3344
3345 /* Test if there is an empty slot available on the front ring.
3346 * (This is only meaningful from the front. )
3347 */
3348 -#define RING_FULL(_r) \
3349 +#define RING_FULL(_r) \
3350 (RING_FREE_REQUESTS(_r) == 0)
3351
3352 /* Test if there are outstanding messages to be processed on a ring. */
3353 -#define RING_HAS_UNCONSUMED_RESPONSES(_r) \
3354 +#define RING_HAS_UNCONSUMED_RESPONSES(_r) \
3355 ((_r)->sring->rsp_prod - (_r)->rsp_cons)
3356
3357 -#define RING_HAS_UNCONSUMED_REQUESTS(_r) \
3358 - ({ \
3359 - unsigned int req = (_r)->sring->req_prod - (_r)->req_cons; \
3360 - unsigned int rsp = RING_SIZE(_r) - \
3361 - ((_r)->req_cons - (_r)->rsp_prod_pvt); \
3362 - req < rsp ? req : rsp; \
3363 - })
3364 +#ifdef __GNUC__
3365 +#define RING_HAS_UNCONSUMED_REQUESTS(_r) ({ \
3366 + unsigned int req = (_r)->sring->req_prod - (_r)->req_cons; \
3367 + unsigned int rsp = RING_SIZE(_r) - \
3368 + ((_r)->req_cons - (_r)->rsp_prod_pvt); \
3369 + req < rsp ? req : rsp; \
3370 +})
3371 +#else
3372 +/* Same as above, but without the nice GCC ({ ... }) syntax. */
3373 +#define RING_HAS_UNCONSUMED_REQUESTS(_r) \
3374 + ((((_r)->sring->req_prod - (_r)->req_cons) < \
3375 + (RING_SIZE(_r) - ((_r)->req_cons - (_r)->rsp_prod_pvt))) ? \
3376 + ((_r)->sring->req_prod - (_r)->req_cons) : \
3377 + (RING_SIZE(_r) - ((_r)->req_cons - (_r)->rsp_prod_pvt)))
3378 +#endif
3379
3380 /* Direct access to individual ring elements, by index. */
3381 -#define RING_GET_REQUEST(_r, _idx) \
3382 +#define RING_GET_REQUEST(_r, _idx) \
3383 (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].req))
3384
3385 -#define RING_GET_RESPONSE(_r, _idx) \
3386 +#define RING_GET_RESPONSE(_r, _idx) \
3387 (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].rsp))
3388
3389 /* Loop termination condition: Would the specified index overflow the ring? */
3390 -#define RING_REQUEST_CONS_OVERFLOW(_r, _cons) \
3391 +#define RING_REQUEST_CONS_OVERFLOW(_r, _cons) \
3392 (((_cons) - (_r)->rsp_prod_pvt) >= RING_SIZE(_r))
3393
3394 -#define RING_PUSH_REQUESTS(_r) do { \
3395 - wmb(); /* back sees requests /before/ updated producer index */ \
3396 - (_r)->sring->req_prod = (_r)->req_prod_pvt; \
3397 +#define RING_PUSH_REQUESTS(_r) do { \
3398 + xen_wmb(); /* back sees requests /before/ updated producer index */ \
3399 + (_r)->sring->req_prod = (_r)->req_prod_pvt; \
3400 } while (0)
3401
3402 -#define RING_PUSH_RESPONSES(_r) do { \
3403 - wmb(); /* front sees responses /before/ updated producer index */ \
3404 - (_r)->sring->rsp_prod = (_r)->rsp_prod_pvt; \
3405 +#define RING_PUSH_RESPONSES(_r) do { \
3406 + xen_wmb(); /* front sees resps /before/ updated producer index */ \
3407 + (_r)->sring->rsp_prod = (_r)->rsp_prod_pvt; \
3408 } while (0)
3409
3410 /*
3411 @@ -221,40 +258,50 @@ struct __name##_back_ring { \
3412 * field appropriately.
3413 */
3414
3415 -#define RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(_r, _notify) do { \
3416 - RING_IDX __old = (_r)->sring->req_prod; \
3417 - RING_IDX __new = (_r)->req_prod_pvt; \
3418 - wmb(); /* back sees requests /before/ updated producer index */ \
3419 - (_r)->sring->req_prod = __new; \
3420 - mb(); /* back sees new requests /before/ we check req_event */ \
3421 - (_notify) = ((RING_IDX)(__new - (_r)->sring->req_event) < \
3422 - (RING_IDX)(__new - __old)); \
3423 +#define RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(_r, _notify) do { \
3424 + RING_IDX __old = (_r)->sring->req_prod; \
3425 + RING_IDX __new = (_r)->req_prod_pvt; \
3426 + xen_wmb(); /* back sees requests /before/ updated producer index */ \
3427 + (_r)->sring->req_prod = __new; \
3428 + xen_mb(); /* back sees new requests /before/ we check req_event */ \
3429 + (_notify) = ((RING_IDX)(__new - (_r)->sring->req_event) < \
3430 + (RING_IDX)(__new - __old)); \
3431 } while (0)
3432
3433 -#define RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(_r, _notify) do { \
3434 - RING_IDX __old = (_r)->sring->rsp_prod; \
3435 - RING_IDX __new = (_r)->rsp_prod_pvt; \
3436 - wmb(); /* front sees responses /before/ updated producer index */ \
3437 - (_r)->sring->rsp_prod = __new; \
3438 - mb(); /* front sees new responses /before/ we check rsp_event */ \
3439 - (_notify) = ((RING_IDX)(__new - (_r)->sring->rsp_event) < \
3440 - (RING_IDX)(__new - __old)); \
3441 +#define RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(_r, _notify) do { \
3442 + RING_IDX __old = (_r)->sring->rsp_prod; \
3443 + RING_IDX __new = (_r)->rsp_prod_pvt; \
3444 + xen_wmb(); /* front sees resps /before/ updated producer index */ \
3445 + (_r)->sring->rsp_prod = __new; \
3446 + xen_mb(); /* front sees new resps /before/ we check rsp_event */ \
3447 + (_notify) = ((RING_IDX)(__new - (_r)->sring->rsp_event) < \
3448 + (RING_IDX)(__new - __old)); \
3449 } while (0)
3450
3451 -#define RING_FINAL_CHECK_FOR_REQUESTS(_r, _work_to_do) do { \
3452 - (_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r); \
3453 - if (_work_to_do) break; \
3454 - (_r)->sring->req_event = (_r)->req_cons + 1; \
3455 - mb(); \
3456 - (_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r); \
3457 +#define RING_FINAL_CHECK_FOR_REQUESTS(_r, _work_to_do) do { \
3458 + (_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r); \
3459 + if (_work_to_do) break; \
3460 + (_r)->sring->req_event = (_r)->req_cons + 1; \
3461 + xen_mb(); \
3462 + (_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r); \
3463 } while (0)
3464
3465 -#define RING_FINAL_CHECK_FOR_RESPONSES(_r, _work_to_do) do { \
3466 - (_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r); \
3467 - if (_work_to_do) break; \
3468 - (_r)->sring->rsp_event = (_r)->rsp_cons + 1; \
3469 - mb(); \
3470 - (_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r); \
3471 +#define RING_FINAL_CHECK_FOR_RESPONSES(_r, _work_to_do) do { \
3472 + (_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r); \
3473 + if (_work_to_do) break; \
3474 + (_r)->sring->rsp_event = (_r)->rsp_cons + 1; \
3475 + xen_mb(); \
3476 + (_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r); \
3477 } while (0)
3478
3479 #endif /* __XEN_PUBLIC_IO_RING_H__ */
3480 +
3481 +/*
3482 + * Local variables:
3483 + * mode: C
3484 + * c-set-style: "BSD"
3485 + * c-basic-offset: 4
3486 + * tab-width: 4
3487 + * indent-tabs-mode: nil
3488 + * End:
3489 + */
3490 --- sle11-2009-09-18.orig/include/xen/interface/io/xenbus.h 2009-09-18 10:11:48.000000000 +0200
3491 +++ sle11-2009-09-18/include/xen/interface/io/xenbus.h 2008-11-25 12:35:56.000000000 +0100
3492 @@ -3,42 +3,78 @@
3493 *
3494 * Xenbus protocol details.
3495 *
3496 + * Permission is hereby granted, free of charge, to any person obtaining a copy
3497 + * of this software and associated documentation files (the "Software"), to
3498 + * deal in the Software without restriction, including without limitation the
3499 + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
3500 + * sell copies of the Software, and to permit persons to whom the Software is
3501 + * furnished to do so, subject to the following conditions:
3502 + *
3503 + * The above copyright notice and this permission notice shall be included in
3504 + * all copies or substantial portions of the Software.
3505 + *
3506 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
3507 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
3508 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
3509 + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
3510 + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
3511 + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
3512 + * DEALINGS IN THE SOFTWARE.
3513 + *
3514 * Copyright (C) 2005 XenSource Ltd.
3515 */
3516
3517 #ifndef _XEN_PUBLIC_IO_XENBUS_H
3518 #define _XEN_PUBLIC_IO_XENBUS_H
3519
3520 -/* The state of either end of the Xenbus, i.e. the current communication
3521 - status of initialisation across the bus. States here imply nothing about
3522 - the state of the connection between the driver and the kernel's device
3523 - layers. */
3524 -enum xenbus_state
3525 -{
3526 - XenbusStateUnknown = 0,
3527 - XenbusStateInitialising = 1,
3528 - XenbusStateInitWait = 2, /* Finished early
3529 - initialisation, but waiting
3530 - for information from the peer
3531 - or hotplug scripts. */
3532 - XenbusStateInitialised = 3, /* Initialised and waiting for a
3533 - connection from the peer. */
3534 - XenbusStateConnected = 4,
3535 - XenbusStateClosing = 5, /* The device is being closed
3536 - due to an error or an unplug
3537 - event. */
3538 - XenbusStateClosed = 6
3539 +/*
3540 + * The state of either end of the Xenbus, i.e. the current communication
3541 + * status of initialisation across the bus. States here imply nothing about
3542 + * the state of the connection between the driver and the kernel's device
3543 + * layers.
3544 + */
3545 +enum xenbus_state {
3546 + XenbusStateUnknown = 0,
3547 +
3548 + XenbusStateInitialising = 1,
3549 +
3550 + /*
3551 + * InitWait: Finished early initialisation but waiting for information
3552 + * from the peer or hotplug scripts.
3553 + */
3554 + XenbusStateInitWait = 2,
3555 +
3556 + /*
3557 + * Initialised: Waiting for a connection from the peer.
3558 + */
3559 + XenbusStateInitialised = 3,
3560 +
3561 + XenbusStateConnected = 4,
3562 +
3563 + /*
3564 + * Closing: The device is being closed due to an error or an unplug event.
3565 + */
3566 + XenbusStateClosing = 5,
3567 +
3568 + XenbusStateClosed = 6,
3569 +
3570 + /*
3571 + * Reconfiguring: The device is being reconfigured.
3572 + */
3573 + XenbusStateReconfiguring = 7,
3574
3575 + XenbusStateReconfigured = 8
3576 };
3577 +typedef enum xenbus_state XenbusState;
3578
3579 #endif /* _XEN_PUBLIC_IO_XENBUS_H */
3580
3581 /*
3582 * Local variables:
3583 - * c-file-style: "linux"
3584 - * indent-tabs-mode: t
3585 - * c-indent-level: 8
3586 - * c-basic-offset: 8
3587 - * tab-width: 8
3588 + * mode: C
3589 + * c-set-style: "BSD"
3590 + * c-basic-offset: 4
3591 + * tab-width: 4
3592 + * indent-tabs-mode: nil
3593 * End:
3594 */
3595 --- sle11-2009-09-18.orig/include/xen/interface/io/xs_wire.h 2009-09-18 10:11:48.000000000 +0200
3596 +++ sle11-2009-09-18/include/xen/interface/io/xs_wire.h 2008-11-25 12:35:56.000000000 +0100
3597 @@ -1,6 +1,25 @@
3598 /*
3599 * Details of the "wire" protocol between Xen Store Daemon and client
3600 * library or guest kernel.
3601 + *
3602 + * Permission is hereby granted, free of charge, to any person obtaining a copy
3603 + * of this software and associated documentation files (the "Software"), to
3604 + * deal in the Software without restriction, including without limitation the
3605 + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
3606 + * sell copies of the Software, and to permit persons to whom the Software is
3607 + * furnished to do so, subject to the following conditions:
3608 + *
3609 + * The above copyright notice and this permission notice shall be included in
3610 + * all copies or substantial portions of the Software.
3611 + *
3612 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
3613 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
3614 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
3615 + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
3616 + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
3617 + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
3618 + * DEALINGS IN THE SOFTWARE.
3619 + *
3620 * Copyright (C) 2005 Rusty Russell IBM Corporation
3621 */
3622
3623 @@ -26,7 +45,9 @@ enum xsd_sockmsg_type
3624 XS_SET_PERMS,
3625 XS_WATCH_EVENT,
3626 XS_ERROR,
3627 - XS_IS_DOMAIN_INTRODUCED
3628 + XS_IS_DOMAIN_INTRODUCED,
3629 + XS_RESUME,
3630 + XS_SET_TARGET
3631 };
3632
3633 #define XS_WRITE_NONE "NONE"
3634 @@ -40,7 +61,12 @@ struct xsd_errors
3635 const char *errstring;
3636 };
3637 #define XSD_ERROR(x) { x, #x }
3638 -static struct xsd_errors xsd_errors[] __attribute__((unused)) = {
3639 +/* LINTED: static unused */
3640 +static struct xsd_errors xsd_errors[]
3641 +#if defined(__GNUC__)
3642 +__attribute__((unused))
3643 +#endif
3644 + = {
3645 XSD_ERROR(EINVAL),
3646 XSD_ERROR(EACCES),
3647 XSD_ERROR(EEXIST),
3648 @@ -84,4 +110,21 @@ struct xenstore_domain_interface {
3649 XENSTORE_RING_IDX rsp_cons, rsp_prod;
3650 };
3651
3652 +/* Violating this is very bad. See docs/misc/xenstore.txt. */
3653 +#define XENSTORE_PAYLOAD_MAX 4096
3654 +
3655 +/* Violating these just gets you an error back */
3656 +#define XENSTORE_ABS_PATH_MAX 3072
3657 +#define XENSTORE_REL_PATH_MAX 2048
3658 +
3659 #endif /* _XS_WIRE_H */
3660 +
3661 +/*
3662 + * Local variables:
3663 + * mode: C
3664 + * c-set-style: "BSD"
3665 + * c-basic-offset: 4
3666 + * tab-width: 4
3667 + * indent-tabs-mode: nil
3668 + * End:
3669 + */
3670 --- sle11-2009-09-18.orig/include/xen/interface/memory.h 2009-09-18 10:11:48.000000000 +0200
3671 +++ sle11-2009-09-18/include/xen/interface/memory.h 2008-11-25 12:35:56.000000000 +0100
3672 @@ -3,6 +3,24 @@
3673 *
3674 * Memory reservation and information.
3675 *
3676 + * Permission is hereby granted, free of charge, to any person obtaining a copy
3677 + * of this software and associated documentation files (the "Software"), to
3678 + * deal in the Software without restriction, including without limitation the
3679 + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
3680 + * sell copies of the Software, and to permit persons to whom the Software is
3681 + * furnished to do so, subject to the following conditions:
3682 + *
3683 + * The above copyright notice and this permission notice shall be included in
3684 + * all copies or substantial portions of the Software.
3685 + *
3686 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
3687 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
3688 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
3689 + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
3690 + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
3691 + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
3692 + * DEALINGS IN THE SOFTWARE.
3693 + *
3694 * Copyright (c) 2005, Keir Fraser <keir@xensource.com>
3695 */
3696
3697 @@ -10,13 +28,28 @@
3698 #define __XEN_PUBLIC_MEMORY_H__
3699
3700 /*
3701 - * Increase or decrease the specified domain's memory reservation. Returns a
3702 - * -ve errcode on failure, or the # extents successfully allocated or freed.
3703 + * Increase or decrease the specified domain's memory reservation. Returns the
3704 + * number of extents successfully allocated or freed.
3705 * arg == addr of struct xen_memory_reservation.
3706 */
3707 #define XENMEM_increase_reservation 0
3708 #define XENMEM_decrease_reservation 1
3709 #define XENMEM_populate_physmap 6
3710 +
3711 +#if __XEN_INTERFACE_VERSION__ >= 0x00030209
3712 +/*
3713 + * Maximum # bits addressable by the user of the allocated region (e.g., I/O
3714 + * devices often have a 32-bit limitation even in 64-bit systems). If zero
3715 + * then the user has no addressing restriction. This field is not used by
3716 + * XENMEM_decrease_reservation.
3717 + */
3718 +#define XENMEMF_address_bits(x) (x)
3719 +#define XENMEMF_get_address_bits(x) ((x) & 0xffu)
3720 +/* NUMA node to allocate from. */
3721 +#define XENMEMF_node(x) (((x) + 1) << 8)
3722 +#define XENMEMF_get_node(x) ((((x) >> 8) - 1) & 0xffu)
3723 +#endif
3724 +
3725 struct xen_memory_reservation {
3726
3727 /*
3728 @@ -29,19 +62,18 @@ struct xen_memory_reservation {
3729 * OUT: GMFN bases of extents that were allocated
3730 * (NB. This command also updates the mach_to_phys translation table)
3731 */
3732 - GUEST_HANDLE(ulong) extent_start;
3733 + XEN_GUEST_HANDLE(ulong) extent_start;
3734
3735 /* Number of extents, and size/alignment of each (2^extent_order pages). */
3736 - unsigned long nr_extents;
3737 + xen_ulong_t nr_extents;
3738 unsigned int extent_order;
3739
3740 - /*
3741 - * Maximum # bits addressable by the user of the allocated region (e.g.,
3742 - * I/O devices often have a 32-bit limitation even in 64-bit systems). If
3743 - * zero then the user has no addressing restriction.
3744 - * This field is not used by XENMEM_decrease_reservation.
3745 - */
3746 +#if __XEN_INTERFACE_VERSION__ >= 0x00030209
3747 + /* XENMEMF flags. */
3748 + unsigned int mem_flags;
3749 +#else
3750 unsigned int address_bits;
3751 +#endif
3752
3753 /*
3754 * Domain whose reservation is being changed.
3755 @@ -50,7 +82,51 @@ struct xen_memory_reservation {
3756 domid_t domid;
3757
3758 };
3759 -DEFINE_GUEST_HANDLE_STRUCT(xen_memory_reservation);
3760 +typedef struct xen_memory_reservation xen_memory_reservation_t;
3761 +DEFINE_XEN_GUEST_HANDLE(xen_memory_reservation_t);
3762 +
3763 +/*
3764 + * An atomic exchange of memory pages. If return code is zero then
3765 + * @out.extent_list provides GMFNs of the newly-allocated memory.
3766 + * Returns zero on complete success, otherwise a negative error code.
3767 + * On complete success then always @nr_exchanged == @in.nr_extents.
3768 + * On partial success @nr_exchanged indicates how much work was done.
3769 + */
3770 +#define XENMEM_exchange 11
3771 +struct xen_memory_exchange {
3772 + /*
3773 + * [IN] Details of memory extents to be exchanged (GMFN bases).
3774 + * Note that @in.address_bits is ignored and unused.
3775 + */
3776 + struct xen_memory_reservation in;
3777 +
3778 + /*
3779 + * [IN/OUT] Details of new memory extents.
3780 + * We require that:
3781 + * 1. @in.domid == @out.domid
3782 + * 2. @in.nr_extents << @in.extent_order ==
3783 + * @out.nr_extents << @out.extent_order
3784 + * 3. @in.extent_start and @out.extent_start lists must not overlap
3785 + * 4. @out.extent_start lists GPFN bases to be populated
3786 + * 5. @out.extent_start is overwritten with allocated GMFN bases
3787 + */
3788 + struct xen_memory_reservation out;
3789 +
3790 + /*
3791 + * [OUT] Number of input extents that were successfully exchanged:
3792 + * 1. The first @nr_exchanged input extents were successfully
3793 + * deallocated.
3794 + * 2. The corresponding first entries in the output extent list correctly
3795 + * indicate the GMFNs that were successfully exchanged.
3796 + * 3. All other input and output extents are untouched.
3797 + * 4. If not all input exents are exchanged then the return code of this
3798 + * command will be non-zero.
3799 + * 5. THIS FIELD MUST BE INITIALISED TO ZERO BY THE CALLER!
3800 + */
3801 + xen_ulong_t nr_exchanged;
3802 +};
3803 +typedef struct xen_memory_exchange xen_memory_exchange_t;
3804 +DEFINE_XEN_GUEST_HANDLE(xen_memory_exchange_t);
3805
3806 /*
3807 * Returns the maximum machine frame number of mapped RAM in this system.
3808 @@ -68,6 +144,11 @@ DEFINE_GUEST_HANDLE_STRUCT(xen_memory_re
3809 #define XENMEM_maximum_reservation 4
3810
3811 /*
3812 + * Returns the maximum GPFN in use by the guest, or -ve errcode on failure.
3813 + */
3814 +#define XENMEM_maximum_gpfn 14
3815 +
3816 +/*
3817 * Returns a list of MFN bases of 2MB extents comprising the machine_to_phys
3818 * mapping table. Architectures which do not have a m2p table do not implement
3819 * this command.
3820 @@ -86,7 +167,7 @@ struct xen_machphys_mfn_list {
3821 * any large discontiguities in the machine address space, 2MB gaps in
3822 * the machphys table will be represented by an MFN base of zero.
3823 */
3824 - GUEST_HANDLE(ulong) extent_start;
3825 + XEN_GUEST_HANDLE(xen_pfn_t) extent_start;
3826
3827 /*
3828 * Number of extents written to the above array. This will be smaller
3829 @@ -94,7 +175,22 @@ struct xen_machphys_mfn_list {
3830 */
3831 unsigned int nr_extents;
3832 };
3833 -DEFINE_GUEST_HANDLE_STRUCT(xen_machphys_mfn_list);
3834 +typedef struct xen_machphys_mfn_list xen_machphys_mfn_list_t;
3835 +DEFINE_XEN_GUEST_HANDLE(xen_machphys_mfn_list_t);
3836 +
3837 +/*
3838 + * Returns the location in virtual address space of the machine_to_phys
3839 + * mapping table. Architectures which do not have a m2p table, or which do not
3840 + * map it by default into guest address space, do not implement this command.
3841 + * arg == addr of xen_machphys_mapping_t.
3842 + */
3843 +#define XENMEM_machphys_mapping 12
3844 +struct xen_machphys_mapping {
3845 + xen_ulong_t v_start, v_end; /* Start and end virtual addresses. */
3846 + xen_ulong_t max_mfn; /* Maximum MFN that can be looked up. */
3847 +};
3848 +typedef struct xen_machphys_mapping xen_machphys_mapping_t;
3849 +DEFINE_XEN_GUEST_HANDLE(xen_machphys_mapping_t);
3850
3851 /*
3852 * Sets the GPFN at which a particular page appears in the specified guest's
3853 @@ -109,15 +205,33 @@ struct xen_add_to_physmap {
3854 /* Source mapping space. */
3855 #define XENMAPSPACE_shared_info 0 /* shared info page */
3856 #define XENMAPSPACE_grant_table 1 /* grant table page */
3857 +#define XENMAPSPACE_mfn 2 /* usual MFN */
3858 unsigned int space;
3859
3860 /* Index into source mapping space. */
3861 - unsigned long idx;
3862 + xen_ulong_t idx;
3863
3864 /* GPFN where the source mapping page should appear. */
3865 - unsigned long gpfn;
3866 + xen_pfn_t gpfn;
3867 };
3868 -DEFINE_GUEST_HANDLE_STRUCT(xen_add_to_physmap);
3869 +typedef struct xen_add_to_physmap xen_add_to_physmap_t;
3870 +DEFINE_XEN_GUEST_HANDLE(xen_add_to_physmap_t);
3871 +
3872 +/*
3873 + * Unmaps the page appearing at a particular GPFN from the specified guest's
3874 + * pseudophysical address space.
3875 + * arg == addr of xen_remove_from_physmap_t.
3876 + */
3877 +#define XENMEM_remove_from_physmap 15
3878 +struct xen_remove_from_physmap {
3879 + /* Which domain to change the mapping for. */
3880 + domid_t domid;
3881 +
3882 + /* GPFN of the current mapping of the page. */
3883 + xen_pfn_t gpfn;
3884 +};
3885 +typedef struct xen_remove_from_physmap xen_remove_from_physmap_t;
3886 +DEFINE_XEN_GUEST_HANDLE(xen_remove_from_physmap_t);
3887
3888 /*
3889 * Translates a list of domain-specific GPFNs into MFNs. Returns a -ve error
3890 @@ -129,17 +243,71 @@ struct xen_translate_gpfn_list {
3891 domid_t domid;
3892
3893 /* Length of list. */
3894 - unsigned long nr_gpfns;
3895 + xen_ulong_t nr_gpfns;
3896
3897 /* List of GPFNs to translate. */
3898 - GUEST_HANDLE(ulong) gpfn_list;
3899 + XEN_GUEST_HANDLE(xen_pfn_t) gpfn_list;
3900
3901 /*
3902 * Output list to contain MFN translations. May be the same as the input
3903 * list (in which case each input GPFN is overwritten with the output MFN).
3904 */
3905 - GUEST_HANDLE(ulong) mfn_list;
3906 + XEN_GUEST_HANDLE(xen_pfn_t) mfn_list;
3907 +};
3908 +typedef struct xen_translate_gpfn_list xen_translate_gpfn_list_t;
3909 +DEFINE_XEN_GUEST_HANDLE(xen_translate_gpfn_list_t);
3910 +
3911 +/*
3912 + * Returns the pseudo-physical memory map as it was when the domain
3913 + * was started (specified by XENMEM_set_memory_map).
3914 + * arg == addr of xen_memory_map_t.
3915 + */
3916 +#define XENMEM_memory_map 9
3917 +struct xen_memory_map {
3918 + /*
3919 + * On call the number of entries which can be stored in buffer. On
3920 + * return the number of entries which have been stored in
3921 + * buffer.
3922 + */
3923 + unsigned int nr_entries;
3924 +
3925 + /*
3926 + * Entries in the buffer are in the same format as returned by the
3927 + * BIOS INT 0x15 EAX=0xE820 call.
3928 + */
3929 + XEN_GUEST_HANDLE(void) buffer;
3930 +};
3931 +typedef struct xen_memory_map xen_memory_map_t;
3932 +DEFINE_XEN_GUEST_HANDLE(xen_memory_map_t);
3933 +
3934 +/*
3935 + * Returns the real physical memory map. Passes the same structure as
3936 + * XENMEM_memory_map.
3937 + * arg == addr of xen_memory_map_t.
3938 + */
3939 +#define XENMEM_machine_memory_map 10
3940 +
3941 +/*
3942 + * Set the pseudo-physical memory map of a domain, as returned by
3943 + * XENMEM_memory_map.
3944 + * arg == addr of xen_foreign_memory_map_t.
3945 + */
3946 +#define XENMEM_set_memory_map 13
3947 +struct xen_foreign_memory_map {
3948 + domid_t domid;
3949 + struct xen_memory_map map;
3950 };
3951 -DEFINE_GUEST_HANDLE_STRUCT(xen_translate_gpfn_list);
3952 +typedef struct xen_foreign_memory_map xen_foreign_memory_map_t;
3953 +DEFINE_XEN_GUEST_HANDLE(xen_foreign_memory_map_t);
3954
3955 #endif /* __XEN_PUBLIC_MEMORY_H__ */
3956 +
3957 +/*
3958 + * Local variables:
3959 + * mode: C
3960 + * c-set-style: "BSD"
3961 + * c-basic-offset: 4
3962 + * tab-width: 4
3963 + * indent-tabs-mode: nil
3964 + * End:
3965 + */
3966 --- sle11-2009-09-18.orig/include/xen/interface/physdev.h 2009-09-18 10:11:48.000000000 +0200
3967 +++ sle11-2009-09-18/include/xen/interface/physdev.h 2008-11-25 12:35:56.000000000 +0100
3968 @@ -24,7 +24,7 @@
3969 /*
3970 * Prototype for this hypercall is:
3971 * int physdev_op(int cmd, void *args)
3972 - * @cmd == PHYSDEVOP_??? (physdev operation).
3973 + * @cmd == PHYSDEVOP_??? (physdev operation).
3974 * @args == Operation-specific extra arguments (NULL if none).
3975 */
3976
3977 @@ -32,114 +32,188 @@
3978 * Notify end-of-interrupt (EOI) for the specified IRQ.
3979 * @arg == pointer to physdev_eoi structure.
3980 */
3981 -#define PHYSDEVOP_eoi 12
3982 +#define PHYSDEVOP_eoi 12
3983 struct physdev_eoi {
3984 - /* IN */
3985 - uint32_t irq;
3986 + /* IN */
3987 + uint32_t irq;
3988 };
3989 +typedef struct physdev_eoi physdev_eoi_t;
3990 +DEFINE_XEN_GUEST_HANDLE(physdev_eoi_t);
3991
3992 /*
3993 * Query the status of an IRQ line.
3994 * @arg == pointer to physdev_irq_status_query structure.
3995 */
3996 -#define PHYSDEVOP_irq_status_query 5
3997 +#define PHYSDEVOP_irq_status_query 5
3998 struct physdev_irq_status_query {
3999 - /* IN */
4000 - uint32_t irq;
4001 - /* OUT */
4002 - uint32_t flags; /* XENIRQSTAT_* */
4003 + /* IN */
4004 + uint32_t irq;
4005 + /* OUT */
4006 + uint32_t flags; /* XENIRQSTAT_* */
4007 };
4008 +typedef struct physdev_irq_status_query physdev_irq_status_query_t;
4009 +DEFINE_XEN_GUEST_HANDLE(physdev_irq_status_query_t);
4010
4011 /* Need to call PHYSDEVOP_eoi when the IRQ has been serviced? */
4012 -#define _XENIRQSTAT_needs_eoi (0)
4013 -#define XENIRQSTAT_needs_eoi (1U<<_XENIRQSTAT_needs_eoi)
4014 +#define _XENIRQSTAT_needs_eoi (0)
4015 +#define XENIRQSTAT_needs_eoi (1U<<_XENIRQSTAT_needs_eoi)
4016
4017 /* IRQ shared by multiple guests? */
4018 -#define _XENIRQSTAT_shared (1)
4019 -#define XENIRQSTAT_shared (1U<<_XENIRQSTAT_shared)
4020 +#define _XENIRQSTAT_shared (1)
4021 +#define XENIRQSTAT_shared (1U<<_XENIRQSTAT_shared)
4022
4023 /*
4024 * Set the current VCPU's I/O privilege level.
4025 * @arg == pointer to physdev_set_iopl structure.
4026 */
4027 -#define PHYSDEVOP_set_iopl 6
4028 +#define PHYSDEVOP_set_iopl 6
4029 struct physdev_set_iopl {
4030 - /* IN */
4031 - uint32_t iopl;
4032 + /* IN */
4033 + uint32_t iopl;
4034 };
4035 +typedef struct physdev_set_iopl physdev_set_iopl_t;
4036 +DEFINE_XEN_GUEST_HANDLE(physdev_set_iopl_t);
4037
4038 /*
4039 * Set the current VCPU's I/O-port permissions bitmap.
4040 * @arg == pointer to physdev_set_iobitmap structure.
4041 */
4042 -#define PHYSDEVOP_set_iobitmap 7
4043 +#define PHYSDEVOP_set_iobitmap 7
4044 struct physdev_set_iobitmap {
4045 - /* IN */
4046 - uint8_t * bitmap;
4047 - uint32_t nr_ports;
4048 + /* IN */
4049 +#if __XEN_INTERFACE_VERSION__ >= 0x00030205
4050 + XEN_GUEST_HANDLE(uint8) bitmap;
4051 +#else
4052 + uint8_t *bitmap;
4053 +#endif
4054 + uint32_t nr_ports;
4055 };
4056 +typedef struct physdev_set_iobitmap physdev_set_iobitmap_t;
4057 +DEFINE_XEN_GUEST_HANDLE(physdev_set_iobitmap_t);
4058
4059 /*
4060 * Read or write an IO-APIC register.
4061 * @arg == pointer to physdev_apic structure.
4062 */
4063 -#define PHYSDEVOP_apic_read 8
4064 -#define PHYSDEVOP_apic_write 9
4065 +#define PHYSDEVOP_apic_read 8
4066 +#define PHYSDEVOP_apic_write 9
4067 struct physdev_apic {
4068 - /* IN */
4069 - unsigned long apic_physbase;
4070 - uint32_t reg;
4071 - /* IN or OUT */
4072 - uint32_t value;
4073 + /* IN */
4074 + unsigned long apic_physbase;
4075 + uint32_t reg;
4076 + /* IN or OUT */
4077 + uint32_t value;
4078 };
4079 +typedef struct physdev_apic physdev_apic_t;
4080 +DEFINE_XEN_GUEST_HANDLE(physdev_apic_t);
4081
4082 /*
4083 * Allocate or free a physical upcall vector for the specified IRQ line.
4084 * @arg == pointer to physdev_irq structure.
4085 */
4086 -#define PHYSDEVOP_alloc_irq_vector 10
4087 -#define PHYSDEVOP_free_irq_vector 11
4088 +#define PHYSDEVOP_alloc_irq_vector 10
4089 +#define PHYSDEVOP_free_irq_vector 11
4090 struct physdev_irq {
4091 - /* IN */
4092 - uint32_t irq;
4093 - /* IN or OUT */
4094 - uint32_t vector;
4095 + /* IN */
4096 + uint32_t irq;
4097 + /* IN or OUT */
4098 + uint32_t vector;
4099 +};
4100 +typedef struct physdev_irq physdev_irq_t;
4101 +DEFINE_XEN_GUEST_HANDLE(physdev_irq_t);
4102 +
4103 +#define MAP_PIRQ_TYPE_MSI 0x0
4104 +#define MAP_PIRQ_TYPE_GSI 0x1
4105 +#define MAP_PIRQ_TYPE_UNKNOWN 0x2
4106 +
4107 +#define PHYSDEVOP_map_pirq 13
4108 +struct physdev_map_pirq {
4109 + domid_t domid;
4110 + /* IN */
4111 + int type;
4112 + /* IN */
4113 + int index;
4114 + /* IN or OUT */
4115 + int pirq;
4116 + /* IN */
4117 + int bus;
4118 + /* IN */
4119 + int devfn;
4120 + /* IN */
4121 + int entry_nr;
4122 + /* IN */
4123 + uint64_t table_base;
4124 +};
4125 +typedef struct physdev_map_pirq physdev_map_pirq_t;
4126 +DEFINE_XEN_GUEST_HANDLE(physdev_map_pirq_t);
4127 +
4128 +#define PHYSDEVOP_unmap_pirq 14
4129 +struct physdev_unmap_pirq {
4130 + domid_t domid;
4131 + /* IN */
4132 + int pirq;
4133 +};
4134 +
4135 +typedef struct physdev_unmap_pirq physdev_unmap_pirq_t;
4136 +DEFINE_XEN_GUEST_HANDLE(physdev_unmap_pirq_t);
4137 +
4138 +#define PHYSDEVOP_manage_pci_add 15
4139 +#define PHYSDEVOP_manage_pci_remove 16
4140 +struct physdev_manage_pci {
4141 + /* IN */
4142 + uint8_t bus;
4143 + uint8_t devfn;
4144 };
4145
4146 +typedef struct physdev_manage_pci physdev_manage_pci_t;
4147 +DEFINE_XEN_GUEST_HANDLE(physdev_manage_pci_t);
4148 +
4149 /*
4150 * Argument to physdev_op_compat() hypercall. Superceded by new physdev_op()
4151 * hypercall since 0x00030202.
4152 */
4153 struct physdev_op {
4154 - uint32_t cmd;
4155 - union {
4156 - struct physdev_irq_status_query irq_status_query;
4157 - struct physdev_set_iopl set_iopl;
4158 - struct physdev_set_iobitmap set_iobitmap;
4159 - struct physdev_apic apic_op;
4160 - struct physdev_irq irq_op;
4161 - } u;
4162 + uint32_t cmd;
4163 + union {
4164 + struct physdev_irq_status_query irq_status_query;
4165 + struct physdev_set_iopl set_iopl;
4166 + struct physdev_set_iobitmap set_iobitmap;
4167 + struct physdev_apic apic_op;
4168 + struct physdev_irq irq_op;
4169 + } u;
4170 };
4171 +typedef struct physdev_op physdev_op_t;
4172 +DEFINE_XEN_GUEST_HANDLE(physdev_op_t);
4173
4174 /*
4175 * Notify that some PIRQ-bound event channels have been unmasked.
4176 * ** This command is obsolete since interface version 0x00030202 and is **
4177 - * ** unsupported by newer versions of Xen. **
4178 + * ** unsupported by newer versions of Xen. **
4179 */
4180 -#define PHYSDEVOP_IRQ_UNMASK_NOTIFY 4
4181 +#define PHYSDEVOP_IRQ_UNMASK_NOTIFY 4
4182
4183 /*
4184 * These all-capitals physdev operation names are superceded by the new names
4185 * (defined above) since interface version 0x00030202.
4186 */
4187 -#define PHYSDEVOP_IRQ_STATUS_QUERY PHYSDEVOP_irq_status_query
4188 -#define PHYSDEVOP_SET_IOPL PHYSDEVOP_set_iopl
4189 -#define PHYSDEVOP_SET_IOBITMAP PHYSDEVOP_set_iobitmap
4190 -#define PHYSDEVOP_APIC_READ PHYSDEVOP_apic_read
4191 -#define PHYSDEVOP_APIC_WRITE PHYSDEVOP_apic_write
4192 -#define PHYSDEVOP_ASSIGN_VECTOR PHYSDEVOP_alloc_irq_vector
4193 -#define PHYSDEVOP_FREE_VECTOR PHYSDEVOP_free_irq_vector
4194 +#define PHYSDEVOP_IRQ_STATUS_QUERY PHYSDEVOP_irq_status_query
4195 +#define PHYSDEVOP_SET_IOPL PHYSDEVOP_set_iopl
4196 +#define PHYSDEVOP_SET_IOBITMAP PHYSDEVOP_set_iobitmap
4197 +#define PHYSDEVOP_APIC_READ PHYSDEVOP_apic_read
4198 +#define PHYSDEVOP_APIC_WRITE PHYSDEVOP_apic_write
4199 +#define PHYSDEVOP_ASSIGN_VECTOR PHYSDEVOP_alloc_irq_vector
4200 +#define PHYSDEVOP_FREE_VECTOR PHYSDEVOP_free_irq_vector
4201 #define PHYSDEVOP_IRQ_NEEDS_UNMASK_NOTIFY XENIRQSTAT_needs_eoi
4202 -#define PHYSDEVOP_IRQ_SHARED XENIRQSTAT_shared
4203 +#define PHYSDEVOP_IRQ_SHARED XENIRQSTAT_shared
4204
4205 #endif /* __XEN_PUBLIC_PHYSDEV_H__ */
4206 +
4207 +/*
4208 + * Local variables:
4209 + * mode: C
4210 + * c-set-style: "BSD"
4211 + * c-basic-offset: 4
4212 + * tab-width: 4
4213 + * indent-tabs-mode: nil
4214 + * End:
4215 + */
4216 --- sle11-2009-09-18.orig/include/xen/interface/sched.h 2009-09-18 10:11:48.000000000 +0200
4217 +++ sle11-2009-09-18/include/xen/interface/sched.h 2008-11-25 12:35:56.000000000 +0100
4218 @@ -3,6 +3,24 @@
4219 *
4220 * Scheduler state interactions
4221 *
4222 + * Permission is hereby granted, free of charge, to any person obtaining a copy
4223 + * of this software and associated documentation files (the "Software"), to
4224 + * deal in the Software without restriction, including without limitation the
4225 + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
4226 + * sell copies of the Software, and to permit persons to whom the Software is
4227 + * furnished to do so, subject to the following conditions:
4228 + *
4229 + * The above copyright notice and this permission notice shall be included in
4230 + * all copies or substantial portions of the Software.
4231 + *
4232 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
4233 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
4234 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
4235 + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
4236 + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
4237 + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
4238 + * DEALINGS IN THE SOFTWARE.
4239 + *
4240 * Copyright (c) 2005, Keir Fraser <keir@xensource.com>
4241 */
4242
4243 @@ -13,17 +31,17 @@
4244
4245 /*
4246 * The prototype for this hypercall is:
4247 - * long sched_op_new(int cmd, void *arg)
4248 + * long sched_op(int cmd, void *arg)
4249 * @cmd == SCHEDOP_??? (scheduler operation).
4250 * @arg == Operation-specific extra argument(s), as described below.
4251 *
4252 - * **NOTE**:
4253 - * Versions of Xen prior to 3.0.2 provide only the following legacy version
4254 + * Versions of Xen prior to 3.0.2 provided only the following legacy version
4255 * of this hypercall, supporting only the commands yield, block and shutdown:
4256 * long sched_op(int cmd, unsigned long arg)
4257 * @cmd == SCHEDOP_??? (scheduler operation).
4258 * @arg == 0 (SCHEDOP_yield and SCHEDOP_block)
4259 * == SHUTDOWN_* code (SCHEDOP_shutdown)
4260 + * This legacy version is available to new guests as sched_op_compat().
4261 */
4262
4263 /*
4264 @@ -49,7 +67,8 @@
4265 struct sched_shutdown {
4266 unsigned int reason; /* SHUTDOWN_* */
4267 };
4268 -DEFINE_GUEST_HANDLE_STRUCT(sched_shutdown);
4269 +typedef struct sched_shutdown sched_shutdown_t;
4270 +DEFINE_XEN_GUEST_HANDLE(sched_shutdown_t);
4271
4272 /*
4273 * Poll a set of event-channel ports. Return when one or more are pending. An
4274 @@ -58,11 +77,26 @@ DEFINE_GUEST_HANDLE_STRUCT(sched_shutdow
4275 */
4276 #define SCHEDOP_poll 3
4277 struct sched_poll {
4278 - GUEST_HANDLE(evtchn_port_t) ports;
4279 + XEN_GUEST_HANDLE(evtchn_port_t) ports;
4280 unsigned int nr_ports;
4281 uint64_t timeout;
4282 };
4283 -DEFINE_GUEST_HANDLE_STRUCT(sched_poll);
4284 +typedef struct sched_poll sched_poll_t;
4285 +DEFINE_XEN_GUEST_HANDLE(sched_poll_t);
4286 +
4287 +/*
4288 + * Declare a shutdown for another domain. The main use of this function is
4289 + * in interpreting shutdown requests and reasons for fully-virtualized
4290 + * domains. A para-virtualized domain may use SCHEDOP_shutdown directly.
4291 + * @arg == pointer to sched_remote_shutdown structure.
4292 + */
4293 +#define SCHEDOP_remote_shutdown 4
4294 +struct sched_remote_shutdown {
4295 + domid_t domain_id; /* Remote domain ID */
4296 + unsigned int reason; /* SHUTDOWN_xxx reason */
4297 +};
4298 +typedef struct sched_remote_shutdown sched_remote_shutdown_t;
4299 +DEFINE_XEN_GUEST_HANDLE(sched_remote_shutdown_t);
4300
4301 /*
4302 * Reason codes for SCHEDOP_shutdown. These may be interpreted by control
4303 @@ -75,3 +109,13 @@ DEFINE_GUEST_HANDLE_STRUCT(sched_poll);
4304 #define SHUTDOWN_crash 3 /* Tell controller we've crashed. */
4305
4306 #endif /* __XEN_PUBLIC_SCHED_H__ */
4307 +
4308 +/*
4309 + * Local variables:
4310 + * mode: C
4311 + * c-set-style: "BSD"
4312 + * c-basic-offset: 4
4313 + * tab-width: 4
4314 + * indent-tabs-mode: nil
4315 + * End:
4316 + */
4317 --- sle11-2009-09-18.orig/include/xen/interface/vcpu.h 2009-09-18 10:11:48.000000000 +0200
4318 +++ sle11-2009-09-18/include/xen/interface/vcpu.h 2008-11-25 12:35:56.000000000 +0100
4319 @@ -29,9 +29,9 @@
4320
4321 /*
4322 * Prototype for this hypercall is:
4323 - * int vcpu_op(int cmd, int vcpuid, void *extra_args)
4324 - * @cmd == VCPUOP_??? (VCPU operation).
4325 - * @vcpuid == VCPU to operate on.
4326 + * int vcpu_op(int cmd, int vcpuid, void *extra_args)
4327 + * @cmd == VCPUOP_??? (VCPU operation).
4328 + * @vcpuid == VCPU to operate on.
4329 * @extra_args == Operation-specific extra arguments (NULL if none).
4330 */
4331
4332 @@ -40,52 +40,53 @@
4333 * newly-initialised VCPU will not run until it is brought up by VCPUOP_up.
4334 *
4335 * @extra_arg == pointer to vcpu_guest_context structure containing initial
4336 - * state for the VCPU.
4337 + * state for the VCPU.
4338 */
4339 -#define VCPUOP_initialise 0
4340 +#define VCPUOP_initialise 0
4341
4342 /*
4343 * Bring up a VCPU. This makes the VCPU runnable. This operation will fail
4344 * if the VCPU has not been initialised (VCPUOP_initialise).
4345 */
4346 -#define VCPUOP_up 1
4347 +#define VCPUOP_up 1
4348
4349 /*
4350 * Bring down a VCPU (i.e., make it non-runnable).
4351 * There are a few caveats that callers should observe:
4352 - * 1. This operation may return, and VCPU_is_up may return false, before the
4353 - * VCPU stops running (i.e., the command is asynchronous). It is a good
4354 - * idea to ensure that the VCPU has entered a non-critical loop before
4355 - * bringing it down. Alternatively, this operation is guaranteed
4356 - * synchronous if invoked by the VCPU itself.
4357 - * 2. After a VCPU is initialised, there is currently no way to drop all its
4358 - * references to domain memory. Even a VCPU that is down still holds
4359 - * memory references via its pagetable base pointer and GDT. It is good
4360 - * practise to move a VCPU onto an 'idle' or default page table, LDT and
4361 - * GDT before bringing it down.
4362 + * 1. This operation may return, and VCPU_is_up may return false, before the
4363 + * VCPU stops running (i.e., the command is asynchronous). It is a good
4364 + * idea to ensure that the VCPU has entered a non-critical loop before
4365 + * bringing it down. Alternatively, this operation is guaranteed
4366 + * synchronous if invoked by the VCPU itself.
4367 + * 2. After a VCPU is initialised, there is currently no way to drop all its
4368 + * references to domain memory. Even a VCPU that is down still holds
4369 + * memory references via its pagetable base pointer and GDT. It is good
4370 + * practise to move a VCPU onto an 'idle' or default page table, LDT and
4371 + * GDT before bringing it down.
4372 */
4373 -#define VCPUOP_down 2
4374 +#define VCPUOP_down 2
4375
4376 /* Returns 1 if the given VCPU is up. */
4377 -#define VCPUOP_is_up 3
4378 +#define VCPUOP_is_up 3
4379
4380 /*
4381 * Return information about the state and running time of a VCPU.
4382 * @extra_arg == pointer to vcpu_runstate_info structure.
4383 */
4384 -#define VCPUOP_get_runstate_info 4
4385 +#define VCPUOP_get_runstate_info 4
4386 struct vcpu_runstate_info {
4387 - /* VCPU's current state (RUNSTATE_*). */
4388 - int state;
4389 - /* When was current state entered (system time, ns)? */
4390 - uint64_t state_entry_time;
4391 - /*
4392 - * Time spent in each RUNSTATE_* (ns). The sum of these times is
4393 - * guaranteed not to drift from system time.
4394 - */
4395 - uint64_t time[4];
4396 + /* VCPU's current state (RUNSTATE_*). */
4397 + int state;
4398 + /* When was current state entered (system time, ns)? */
4399 + uint64_t state_entry_time;
4400 + /*
4401 + * Time spent in each RUNSTATE_* (ns). The sum of these times is
4402 + * guaranteed not to drift from system time.
4403 + */
4404 + uint64_t time[4];
4405 };
4406 -DEFINE_GUEST_HANDLE_STRUCT(vcpu_runstate_info);
4407 +typedef struct vcpu_runstate_info vcpu_runstate_info_t;
4408 +DEFINE_XEN_GUEST_HANDLE(vcpu_runstate_info_t);
4409
4410 /* VCPU is currently running on a physical CPU. */
4411 #define RUNSTATE_running 0
4412 @@ -108,47 +109,52 @@ DEFINE_GUEST_HANDLE_STRUCT(vcpu_runstate
4413 * Register a shared memory area from which the guest may obtain its own
4414 * runstate information without needing to execute a hypercall.
4415 * Notes:
4416 - * 1. The registered address may be virtual or physical, depending on the
4417 - * platform. The virtual address should be registered on x86 systems.
4418 - * 2. Only one shared area may be registered per VCPU. The shared area is
4419 - * updated by the hypervisor each time the VCPU is scheduled. Thus
4420 - * runstate.state will always be RUNSTATE_running and
4421 - * runstate.state_entry_time will indicate the system time at which the
4422 - * VCPU was last scheduled to run.
4423 + * 1. The registered address may be virtual or physical or guest handle,
4424 + * depending on the platform. Virtual address or guest handle should be
4425 + * registered on x86 systems.
4426 + * 2. Only one shared area may be registered per VCPU. The shared area is
4427 + * updated by the hypervisor each time the VCPU is scheduled. Thus
4428 + * runstate.state will always be RUNSTATE_running and
4429 + * runstate.state_entry_time will indicate the system time at which the
4430 + * VCPU was last scheduled to run.
4431 * @extra_arg == pointer to vcpu_register_runstate_memory_area structure.
4432 */
4433 #define VCPUOP_register_runstate_memory_area 5
4434 struct vcpu_register_runstate_memory_area {
4435 - union {
4436 - GUEST_HANDLE(vcpu_runstate_info) h;
4437 - struct vcpu_runstate_info *v;
4438 - uint64_t p;
4439 - } addr;
4440 + union {
4441 + XEN_GUEST_HANDLE(vcpu_runstate_info_t) h;
4442 + struct vcpu_runstate_info *v;
4443 + uint64_t p;
4444 + } addr;
4445 };
4446 +typedef struct vcpu_register_runstate_memory_area vcpu_register_runstate_memory_area_t;
4447 +DEFINE_XEN_GUEST_HANDLE(vcpu_register_runstate_memory_area_t);
4448
4449 /*
4450 * Set or stop a VCPU's periodic timer. Every VCPU has one periodic timer
4451 * which can be set via these commands. Periods smaller than one millisecond
4452 * may not be supported.
4453 */
4454 -#define VCPUOP_set_periodic_timer 6 /* arg == vcpu_set_periodic_timer_t */
4455 -#define VCPUOP_stop_periodic_timer 7 /* arg == NULL */
4456 +#define VCPUOP_set_periodic_timer 6 /* arg == vcpu_set_periodic_timer_t */
4457 +#define VCPUOP_stop_periodic_timer 7 /* arg == NULL */
4458 struct vcpu_set_periodic_timer {
4459 - uint64_t period_ns;
4460 + uint64_t period_ns;
4461 };
4462 -DEFINE_GUEST_HANDLE_STRUCT(vcpu_set_periodic_timer);
4463 +typedef struct vcpu_set_periodic_timer vcpu_set_periodic_timer_t;
4464 +DEFINE_XEN_GUEST_HANDLE(vcpu_set_periodic_timer_t);
4465
4466 /*
4467 * Set or stop a VCPU's single-shot timer. Every VCPU has one single-shot
4468 * timer which can be set via these commands.
4469 */
4470 -#define VCPUOP_set_singleshot_timer 8 /* arg == vcpu_set_singleshot_timer_t */
4471 +#define VCPUOP_set_singleshot_timer 8 /* arg == vcpu_set_singleshot_timer_t */
4472 #define VCPUOP_stop_singleshot_timer 9 /* arg == NULL */
4473 struct vcpu_set_singleshot_timer {
4474 - uint64_t timeout_abs_ns;
4475 - uint32_t flags; /* VCPU_SSHOTTMR_??? */
4476 + uint64_t timeout_abs_ns; /* Absolute system time value in nanoseconds. */
4477 + uint32_t flags; /* VCPU_SSHOTTMR_??? */
4478 };
4479 -DEFINE_GUEST_HANDLE_STRUCT(vcpu_set_singleshot_timer);
4480 +typedef struct vcpu_set_singleshot_timer vcpu_set_singleshot_timer_t;
4481 +DEFINE_XEN_GUEST_HANDLE(vcpu_set_singleshot_timer_t);
4482
4483 /* Flags to VCPUOP_set_singleshot_timer. */
4484 /* Require the timeout to be in the future (return -ETIME if it's passed). */
4485 @@ -161,13 +167,47 @@ DEFINE_GUEST_HANDLE_STRUCT(vcpu_set_sing
4486 * structure in a convenient place, such as in a per-cpu data area.
4487 * The pointer need not be page aligned, but the structure must not
4488 * cross a page boundary.
4489 + *
4490 + * This may be called only once per vcpu.
4491 */
4492 -#define VCPUOP_register_vcpu_info 10 /* arg == struct vcpu_info */
4493 +#define VCPUOP_register_vcpu_info 10 /* arg == vcpu_register_vcpu_info_t */
4494 struct vcpu_register_vcpu_info {
4495 uint64_t mfn; /* mfn of page to place vcpu_info */
4496 uint32_t offset; /* offset within page */
4497 uint32_t rsvd; /* unused */
4498 };
4499 -DEFINE_GUEST_HANDLE_STRUCT(vcpu_register_vcpu_info);
4500 +typedef struct vcpu_register_vcpu_info vcpu_register_vcpu_info_t;
4501 +DEFINE_XEN_GUEST_HANDLE(vcpu_register_vcpu_info_t);
4502 +
4503 +/* Send an NMI to the specified VCPU. @extra_arg == NULL. */
4504 +#define VCPUOP_send_nmi 11
4505 +
4506 +/*
4507 + * Get the physical ID information for a pinned vcpu's underlying physical
4508 + * processor. The physical ID informmation is architecture-specific.
4509 + * On x86: id[31:0]=apic_id, id[63:32]=acpi_id, and all values 0xff and
4510 + * greater are reserved.
4511 + * This command returns -EINVAL if it is not a valid operation for this VCPU.
4512 + */
4513 +#define VCPUOP_get_physid 12 /* arg == vcpu_get_physid_t */
4514 +struct vcpu_get_physid {
4515 + uint64_t phys_id;
4516 +};
4517 +typedef struct vcpu_get_physid vcpu_get_physid_t;
4518 +DEFINE_XEN_GUEST_HANDLE(vcpu_get_physid_t);
4519 +#define xen_vcpu_physid_to_x86_apicid(physid) \
4520 + ((((uint32_t)(physid)) >= 0xff) ? 0xff : ((uint8_t)(physid)))
4521 +#define xen_vcpu_physid_to_x86_acpiid(physid) \
4522 + ((((uint32_t)((physid)>>32)) >= 0xff) ? 0xff : ((uint8_t)((physid)>>32)))
4523
4524 #endif /* __XEN_PUBLIC_VCPU_H__ */
4525 +
4526 +/*
4527 + * Local variables:
4528 + * mode: C
4529 + * c-set-style: "BSD"
4530 + * c-basic-offset: 4
4531 + * tab-width: 4
4532 + * indent-tabs-mode: nil
4533 + * End:
4534 + */
4535 --- sle11-2009-09-18.orig/include/xen/interface/version.h 2009-09-18 10:11:48.000000000 +0200
4536 +++ sle11-2009-09-18/include/xen/interface/version.h 2008-11-25 12:35:56.000000000 +0100
4537 @@ -3,6 +3,24 @@
4538 *
4539 * Xen version, type, and compile information.
4540 *
4541 + * Permission is hereby granted, free of charge, to any person obtaining a copy
4542 + * of this software and associated documentation files (the "Software"), to
4543 + * deal in the Software without restriction, including without limitation the
4544 + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
4545 + * sell copies of the Software, and to permit persons to whom the Software is
4546 + * furnished to do so, subject to the following conditions:
4547 + *
4548 + * The above copyright notice and this permission notice shall be included in
4549 + * all copies or substantial portions of the Software.
4550 + *
4551 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
4552 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
4553 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
4554 + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
4555 + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
4556 + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
4557 + * DEALINGS IN THE SOFTWARE.
4558 + *
4559 * Copyright (c) 2005, Nguyen Anh Quynh <aquynh@gmail.com>
4560 * Copyright (c) 2005, Keir Fraser <keir@xensource.com>
4561 */
4562 @@ -10,17 +28,15 @@
4563 #ifndef __XEN_PUBLIC_VERSION_H__
4564 #define __XEN_PUBLIC_VERSION_H__
4565
4566 -/* NB. All ops return zero on success, except XENVER_version. */
4567 +/* NB. All ops return zero on success, except XENVER_{version,pagesize} */
4568
4569 /* arg == NULL; returns major:minor (16:16). */
4570 #define XENVER_version 0
4571
4572 /* arg == xen_extraversion_t. */
4573 #define XENVER_extraversion 1
4574 -struct xen_extraversion {
4575 - char extraversion[16];
4576 -};
4577 -#define XEN_EXTRAVERSION_LEN (sizeof(struct xen_extraversion))
4578 +typedef char xen_extraversion_t[16];
4579 +#define XEN_EXTRAVERSION_LEN (sizeof(xen_extraversion_t))
4580
4581 /* arg == xen_compile_info_t. */
4582 #define XENVER_compile_info 2
4583 @@ -30,31 +46,46 @@ struct xen_compile_info {
4584 char compile_domain[32];
4585 char compile_date[32];
4586 };
4587 +typedef struct xen_compile_info xen_compile_info_t;
4588
4589 #define XENVER_capabilities 3
4590 -struct xen_capabilities_info {
4591 - char info[1024];
4592 -};
4593 -#define XEN_CAPABILITIES_INFO_LEN (sizeof(struct xen_capabilities_info))
4594 +typedef char xen_capabilities_info_t[1024];
4595 +#define XEN_CAPABILITIES_INFO_LEN (sizeof(xen_capabilities_info_t))
4596
4597 #define XENVER_changeset 4
4598 -struct xen_changeset_info {
4599 - char info[64];
4600 -};
4601 -#define XEN_CHANGESET_INFO_LEN (sizeof(struct xen_changeset_info))
4602 +typedef char xen_changeset_info_t[64];
4603 +#define XEN_CHANGESET_INFO_LEN (sizeof(xen_changeset_info_t))
4604
4605 #define XENVER_platform_parameters 5
4606 struct xen_platform_parameters {
4607 unsigned long virt_start;
4608 };
4609 +typedef struct xen_platform_parameters xen_platform_parameters_t;
4610
4611 #define XENVER_get_features 6
4612 struct xen_feature_info {
4613 unsigned int submap_idx; /* IN: which 32-bit submap to return */
4614 uint32_t submap; /* OUT: 32-bit submap */
4615 };
4616 +typedef struct xen_feature_info xen_feature_info_t;
4617
4618 /* Declares the features reported by XENVER_get_features. */
4619 #include "features.h"
4620
4621 +/* arg == NULL; returns host memory page size. */
4622 +#define XENVER_pagesize 7
4623 +
4624 +/* arg == xen_domain_handle_t. */
4625 +#define XENVER_guest_handle 8
4626 +
4627 #endif /* __XEN_PUBLIC_VERSION_H__ */
4628 +
4629 +/*
4630 + * Local variables:
4631 + * mode: C
4632 + * c-set-style: "BSD"
4633 + * c-basic-offset: 4
4634 + * tab-width: 4
4635 + * indent-tabs-mode: nil
4636 + * End:
4637 + */
4638 --- sle11-2009-09-18.orig/include/xen/interface/xen.h 2009-09-18 10:11:48.000000000 +0200
4639 +++ sle11-2009-09-18/include/xen/interface/xen.h 2008-11-25 12:35:56.000000000 +0100
4640 @@ -3,35 +3,68 @@
4641 *
4642 * Guest OS interface to Xen.
4643 *
4644 + * Permission is hereby granted, free of charge, to any person obtaining a copy
4645 + * of this software and associated documentation files (the "Software"), to
4646 + * deal in the Software without restriction, including without limitation the
4647 + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
4648 + * sell copies of the Software, and to permit persons to whom the Software is
4649 + * furnished to do so, subject to the following conditions:
4650 + *
4651 + * The above copyright notice and this permission notice shall be included in
4652 + * all copies or substantial portions of the Software.
4653 + *
4654 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
4655 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
4656 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
4657 + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
4658 + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
4659 + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
4660 + * DEALINGS IN THE SOFTWARE.
4661 + *
4662 * Copyright (c) 2004, K A Fraser
4663 */
4664
4665 #ifndef __XEN_PUBLIC_XEN_H__
4666 #define __XEN_PUBLIC_XEN_H__
4667
4668 -#include <asm/xen/interface.h>
4669 +#include "xen-compat.h"
4670 +#ifdef CONFIG_PARAVIRT_XEN
4671 #include <asm/pvclock-abi.h>
4672 +#endif
4673
4674 -/*
4675 - * XEN "SYSTEM CALLS" (a.k.a. HYPERCALLS).
4676 - */
4677 +#if defined(__i386__) || defined(__x86_64__)
4678 +#include "arch-x86/xen.h"
4679 +#elif defined(__ia64__)
4680 +#include "arch-ia64.h"
4681 +#else
4682 +#error "Unsupported architecture"
4683 +#endif
4684 +
4685 +#ifndef __ASSEMBLY__
4686 +/* Guest handles for primitive C types. */
4687 +DEFINE_XEN_GUEST_HANDLE(char);
4688 +__DEFINE_XEN_GUEST_HANDLE(uchar, unsigned char);
4689 +DEFINE_XEN_GUEST_HANDLE(int);
4690 +__DEFINE_XEN_GUEST_HANDLE(uint, unsigned int);
4691 +DEFINE_XEN_GUEST_HANDLE(long);
4692 +__DEFINE_XEN_GUEST_HANDLE(ulong, unsigned long);
4693 +DEFINE_XEN_GUEST_HANDLE(void);
4694 +
4695 +DEFINE_XEN_GUEST_HANDLE(xen_pfn_t);
4696 +#endif
4697
4698 /*
4699 - * x86_32: EAX = vector; EBX, ECX, EDX, ESI, EDI = args 1, 2, 3, 4, 5.
4700 - * EAX = return value
4701 - * (argument registers may be clobbered on return)
4702 - * x86_64: RAX = vector; RDI, RSI, RDX, R10, R8, R9 = args 1, 2, 3, 4, 5, 6.
4703 - * RAX = return value
4704 - * (argument registers not clobbered on return; RCX, R11 are)
4705 + * HYPERCALLS
4706 */
4707 +
4708 #define __HYPERVISOR_set_trap_table 0
4709 #define __HYPERVISOR_mmu_update 1
4710 #define __HYPERVISOR_set_gdt 2
4711 #define __HYPERVISOR_stack_switch 3
4712 #define __HYPERVISOR_set_callbacks 4
4713 #define __HYPERVISOR_fpu_taskswitch 5
4714 -#define __HYPERVISOR_sched_op 6
4715 -#define __HYPERVISOR_dom0_op 7
4716 +#define __HYPERVISOR_sched_op_compat 6 /* compat since 0x00030101 */
4717 +#define __HYPERVISOR_platform_op 7
4718 #define __HYPERVISOR_set_debugreg 8
4719 #define __HYPERVISOR_get_debugreg 9
4720 #define __HYPERVISOR_update_descriptor 10
4721 @@ -39,10 +72,10 @@
4722 #define __HYPERVISOR_multicall 13
4723 #define __HYPERVISOR_update_va_mapping 14
4724 #define __HYPERVISOR_set_timer_op 15
4725 -#define __HYPERVISOR_event_channel_op_compat 16
4726 +#define __HYPERVISOR_event_channel_op_compat 16 /* compat since 0x00030202 */
4727 #define __HYPERVISOR_xen_version 17
4728 #define __HYPERVISOR_console_io 18
4729 -#define __HYPERVISOR_physdev_op_compat 19
4730 +#define __HYPERVISOR_physdev_op_compat 19 /* compat since 0x00030202 */
4731 #define __HYPERVISOR_grant_table_op 20
4732 #define __HYPERVISOR_vm_assist 21
4733 #define __HYPERVISOR_update_va_mapping_otherdomain 22
4734 @@ -50,7 +83,7 @@
4735 #define __HYPERVISOR_vcpu_op 24
4736 #define __HYPERVISOR_set_segment_base 25 /* x86/64 only */
4737 #define __HYPERVISOR_mmuext_op 26
4738 -#define __HYPERVISOR_acm_op 27
4739 +#define __HYPERVISOR_xsm_op 27
4740 #define __HYPERVISOR_nmi_op 28
4741 #define __HYPERVISOR_sched_op_new 29
4742 #define __HYPERVISOR_callback_op 30
4743 @@ -58,6 +91,9 @@
4744 #define __HYPERVISOR_event_channel_op 32
4745 #define __HYPERVISOR_physdev_op 33
4746 #define __HYPERVISOR_hvm_op 34
4747 +#define __HYPERVISOR_sysctl 35
4748 +#define __HYPERVISOR_domctl 36
4749 +#define __HYPERVISOR_kexec_op 37
4750
4751 /* Architecture-specific hypercall definitions. */
4752 #define __HYPERVISOR_arch_0 48
4753 @@ -70,15 +106,48 @@
4754 #define __HYPERVISOR_arch_7 55
4755
4756 /*
4757 + * HYPERCALL COMPATIBILITY.
4758 + */
4759 +
4760 +/* New sched_op hypercall introduced in 0x00030101. */
4761 +#if __XEN_INTERFACE_VERSION__ < 0x00030101
4762 +#undef __HYPERVISOR_sched_op
4763 +#define __HYPERVISOR_sched_op __HYPERVISOR_sched_op_compat
4764 +#else
4765 +#define __HYPERVISOR_sched_op __HYPERVISOR_sched_op_new
4766 +#endif
4767 +
4768 +/* New event-channel and physdev hypercalls introduced in 0x00030202. */
4769 +#if __XEN_INTERFACE_VERSION__ < 0x00030202
4770 +#undef __HYPERVISOR_event_channel_op
4771 +#define __HYPERVISOR_event_channel_op __HYPERVISOR_event_channel_op_compat
4772 +#undef __HYPERVISOR_physdev_op
4773 +#define __HYPERVISOR_physdev_op __HYPERVISOR_physdev_op_compat
4774 +#endif
4775 +
4776 +/* New platform_op hypercall introduced in 0x00030204. */
4777 +#if __XEN_INTERFACE_VERSION__ < 0x00030204
4778 +#define __HYPERVISOR_dom0_op __HYPERVISOR_platform_op
4779 +#endif
4780 +
4781 +/*
4782 * VIRTUAL INTERRUPTS
4783 *
4784 * Virtual interrupts that a guest OS may receive from Xen.
4785 - */
4786 -#define VIRQ_TIMER 0 /* Timebase update, and/or requested timeout. */
4787 -#define VIRQ_DEBUG 1 /* Request guest to dump debug info. */
4788 -#define VIRQ_CONSOLE 2 /* (DOM0) Bytes received on emergency console. */
4789 -#define VIRQ_DOM_EXC 3 /* (DOM0) Exceptional event for some domain. */
4790 -#define VIRQ_DEBUGGER 6 /* (DOM0) A domain has paused for debugging. */
4791 + *
4792 + * In the side comments, 'V.' denotes a per-VCPU VIRQ while 'G.' denotes a
4793 + * global VIRQ. The former can be bound once per VCPU and cannot be re-bound.
4794 + * The latter can be allocated only once per guest: they must initially be
4795 + * allocated to VCPU0 but can subsequently be re-bound.
4796 + */
4797 +#define VIRQ_TIMER 0 /* V. Timebase update, and/or requested timeout. */
4798 +#define VIRQ_DEBUG 1 /* V. Request guest to dump debug info. */
4799 +#define VIRQ_CONSOLE 2 /* G. (DOM0) Bytes received on emergency console. */
4800 +#define VIRQ_DOM_EXC 3 /* G. (DOM0) Exceptional event for some domain. */
4801 +#define VIRQ_TBUF 4 /* G. (DOM0) Trace buffer has records available. */
4802 +#define VIRQ_DEBUGGER 6 /* G. (DOM0) A domain has paused for debugging. */
4803 +#define VIRQ_XENOPROF 7 /* V. XenOprofile interrupt: new sample available */
4804 +#define VIRQ_CON_RING 8 /* G. (DOM0) Bytes received on console */
4805
4806 /* Architecture-specific VIRQ definitions. */
4807 #define VIRQ_ARCH_0 16
4808 @@ -91,6 +160,7 @@
4809 #define VIRQ_ARCH_7 23
4810
4811 #define NR_VIRQS 24
4812 +
4813 /*
4814 * MMU-UPDATE REQUESTS
4815 *
4816 @@ -166,6 +236,13 @@
4817 * cmd: MMUEXT_SET_LDT
4818 * linear_addr: Linear address of LDT base (NB. must be page-aligned).
4819 * nr_ents: Number of entries in LDT.
4820 + *
4821 + * cmd: MMUEXT_CLEAR_PAGE
4822 + * mfn: Machine frame number to be cleared.
4823 + *
4824 + * cmd: MMUEXT_COPY_PAGE
4825 + * mfn: Machine frame number of the destination page.
4826 + * src_mfn: Machine frame number of the source page.
4827 */
4828 #define MMUEXT_PIN_L1_TABLE 0
4829 #define MMUEXT_PIN_L2_TABLE 1
4830 @@ -182,24 +259,34 @@
4831 #define MMUEXT_FLUSH_CACHE 12
4832 #define MMUEXT_SET_LDT 13
4833 #define MMUEXT_NEW_USER_BASEPTR 15
4834 +#define MMUEXT_CLEAR_PAGE 16
4835 +#define MMUEXT_COPY_PAGE 17
4836
4837 #ifndef __ASSEMBLY__
4838 struct mmuext_op {
4839 - unsigned int cmd;
4840 - union {
4841 - /* [UN]PIN_TABLE, NEW_BASEPTR, NEW_USER_BASEPTR */
4842 - unsigned long mfn;
4843 - /* INVLPG_LOCAL, INVLPG_ALL, SET_LDT */
4844 - unsigned long linear_addr;
4845 - } arg1;
4846 - union {
4847 - /* SET_LDT */
4848 - unsigned int nr_ents;
4849 - /* TLB_FLUSH_MULTI, INVLPG_MULTI */
4850 - void *vcpumask;
4851 - } arg2;
4852 + unsigned int cmd;
4853 + union {
4854 + /* [UN]PIN_TABLE, NEW_BASEPTR, NEW_USER_BASEPTR
4855 + * CLEAR_PAGE, COPY_PAGE */
4856 + xen_pfn_t mfn;
4857 + /* INVLPG_LOCAL, INVLPG_ALL, SET_LDT */
4858 + unsigned long linear_addr;
4859 + } arg1;
4860 + union {
4861 + /* SET_LDT */
4862 + unsigned int nr_ents;
4863 + /* TLB_FLUSH_MULTI, INVLPG_MULTI */
4864 +#if __XEN_INTERFACE_VERSION__ >= 0x00030205
4865 + XEN_GUEST_HANDLE(void) vcpumask;
4866 +#else
4867 + void *vcpumask;
4868 +#endif
4869 + /* COPY_PAGE */
4870 + xen_pfn_t src_mfn;
4871 + } arg2;
4872 };
4873 -DEFINE_GUEST_HANDLE_STRUCT(mmuext_op);
4874 +typedef struct mmuext_op mmuext_op_t;
4875 +DEFINE_XEN_GUEST_HANDLE(mmuext_op_t);
4876 #endif
4877
4878 /* These are passed as 'flags' to update_va_mapping. They can be ORed. */
4879 @@ -224,11 +311,24 @@ DEFINE_GUEST_HANDLE_STRUCT(mmuext_op);
4880 */
4881 #define VMASST_CMD_enable 0
4882 #define VMASST_CMD_disable 1
4883 +
4884 +/* x86/32 guests: simulate full 4GB segment limits. */
4885 #define VMASST_TYPE_4gb_segments 0
4886 +
4887 +/* x86/32 guests: trap (vector 15) whenever above vmassist is used. */
4888 #define VMASST_TYPE_4gb_segments_notify 1
4889 +
4890 +/*
4891 + * x86 guests: support writes to bottom-level PTEs.
4892 + * NB1. Page-directory entries cannot be written.
4893 + * NB2. Guest must continue to remove all writable mappings of PTEs.
4894 + */
4895 #define VMASST_TYPE_writable_pagetables 2
4896 +
4897 +/* x86/PAE guests: support PDPTs above 4GB. */
4898 #define VMASST_TYPE_pae_extended_cr3 3
4899 -#define MAX_VMASST_TYPE 3
4900 +
4901 +#define MAX_VMASST_TYPE 3
4902
4903 #ifndef __ASSEMBLY__
4904
4905 @@ -267,18 +367,19 @@ struct mmu_update {
4906 uint64_t ptr; /* Machine address of PTE. */
4907 uint64_t val; /* New contents of PTE. */
4908 };
4909 -DEFINE_GUEST_HANDLE_STRUCT(mmu_update);
4910 +typedef struct mmu_update mmu_update_t;
4911 +DEFINE_XEN_GUEST_HANDLE(mmu_update_t);
4912
4913 /*
4914 * Send an array of these to HYPERVISOR_multicall().
4915 * NB. The fields are natural register size for this architecture.
4916 */
4917 struct multicall_entry {
4918 - unsigned long op;
4919 - long result;
4920 + unsigned long op, result;
4921 unsigned long args[6];
4922 };
4923 -DEFINE_GUEST_HANDLE_STRUCT(multicall_entry);
4924 +typedef struct multicall_entry multicall_entry_t;
4925 +DEFINE_XEN_GUEST_HANDLE(multicall_entry_t);
4926
4927 /*
4928 * Event channel endpoints per domain:
4929 @@ -287,173 +388,240 @@ DEFINE_GUEST_HANDLE_STRUCT(multicall_ent
4930 #define NR_EVENT_CHANNELS (sizeof(unsigned long) * sizeof(unsigned long) * 64)
4931
4932 struct vcpu_time_info {
4933 - /*
4934 - * Updates to the following values are preceded and followed
4935 - * by an increment of 'version'. The guest can therefore
4936 - * detect updates by looking for changes to 'version'. If the
4937 - * least-significant bit of the version number is set then an
4938 - * update is in progress and the guest must wait to read a
4939 - * consistent set of values. The correct way to interact with
4940 - * the version number is similar to Linux's seqlock: see the
4941 - * implementations of read_seqbegin/read_seqretry.
4942 - */
4943 - uint32_t version;
4944 - uint32_t pad0;
4945 - uint64_t tsc_timestamp; /* TSC at last update of time vals. */
4946 - uint64_t system_time; /* Time, in nanosecs, since boot. */
4947 - /*
4948 - * Current system time:
4949 - * system_time + ((tsc - tsc_timestamp) << tsc_shift) * tsc_to_system_mul
4950 - * CPU frequency (Hz):
4951 - * ((10^9 << 32) / tsc_to_system_mul) >> tsc_shift
4952 - */
4953 - uint32_t tsc_to_system_mul;
4954 - int8_t tsc_shift;
4955 - int8_t pad1[3];
4956 + /*
4957 + * Updates to the following values are preceded and followed by an
4958 + * increment of 'version'. The guest can therefore detect updates by
4959 + * looking for changes to 'version'. If the least-significant bit of
4960 + * the version number is set then an update is in progress and the guest
4961 + * must wait to read a consistent set of values.
4962 + * The correct way to interact with the version number is similar to
4963 + * Linux's seqlock: see the implementations of read_seqbegin/read_seqretry.
4964 + */
4965 + uint32_t version;
4966 + uint32_t pad0;
4967 + uint64_t tsc_timestamp; /* TSC at last update of time vals. */
4968 + uint64_t system_time; /* Time, in nanosecs, since boot. */
4969 + /*
4970 + * Current system time:
4971 + * system_time +
4972 + * ((((tsc - tsc_timestamp) << tsc_shift) * tsc_to_system_mul) >> 32)
4973 + * CPU frequency (Hz):
4974 + * ((10^9 << 32) / tsc_to_system_mul) >> tsc_shift
4975 + */
4976 + uint32_t tsc_to_system_mul;
4977 + int8_t tsc_shift;
4978 + int8_t pad1[3];
4979 }; /* 32 bytes */
4980 +typedef struct vcpu_time_info vcpu_time_info_t;
4981
4982 struct vcpu_info {
4983 - /*
4984 - * 'evtchn_upcall_pending' is written non-zero by Xen to indicate
4985 - * a pending notification for a particular VCPU. It is then cleared
4986 - * by the guest OS /before/ checking for pending work, thus avoiding
4987 - * a set-and-check race. Note that the mask is only accessed by Xen
4988 - * on the CPU that is currently hosting the VCPU. This means that the
4989 - * pending and mask flags can be updated by the guest without special
4990 - * synchronisation (i.e., no need for the x86 LOCK prefix).
4991 - * This may seem suboptimal because if the pending flag is set by
4992 - * a different CPU then an IPI may be scheduled even when the mask
4993 - * is set. However, note:
4994 - * 1. The task of 'interrupt holdoff' is covered by the per-event-
4995 - * channel mask bits. A 'noisy' event that is continually being
4996 - * triggered can be masked at source at this very precise
4997 - * granularity.
4998 - * 2. The main purpose of the per-VCPU mask is therefore to restrict
4999 - * reentrant execution: whether for concurrency control, or to
5000 - * prevent unbounded stack usage. Whatever the purpose, we expect
5001 - * that the mask will be asserted only for short periods at a time,
5002 - * and so the likelihood of a 'spurious' IPI is suitably small.
5003 - * The mask is read before making an event upcall to the guest: a
5004 - * non-zero mask therefore guarantees that the VCPU will not receive
5005 - * an upcall activation. The mask is cleared when the VCPU requests
5006 - * to block: this avoids wakeup-waiting races.
5007 - */
5008 - uint8_t evtchn_upcall_pending;
5009 - uint8_t evtchn_upcall_mask;
5010 - unsigned long evtchn_pending_sel;
5011 - struct arch_vcpu_info arch;
5012 - struct pvclock_vcpu_time_info time;
5013 + /*
5014 + * 'evtchn_upcall_pending' is written non-zero by Xen to indicate
5015 + * a pending notification for a particular VCPU. It is then cleared
5016 + * by the guest OS /before/ checking for pending work, thus avoiding
5017 + * a set-and-check race. Note that the mask is only accessed by Xen
5018 + * on the CPU that is currently hosting the VCPU. This means that the
5019 + * pending and mask flags can be updated by the guest without special
5020 + * synchronisation (i.e., no need for the x86 LOCK prefix).
5021 + * This may seem suboptimal because if the pending flag is set by
5022 + * a different CPU then an IPI may be scheduled even when the mask
5023 + * is set. However, note:
5024 + * 1. The task of 'interrupt holdoff' is covered by the per-event-
5025 + * channel mask bits. A 'noisy' event that is continually being
5026 + * triggered can be masked at source at this very precise
5027 + * granularity.
5028 + * 2. The main purpose of the per-VCPU mask is therefore to restrict
5029 + * reentrant execution: whether for concurrency control, or to
5030 + * prevent unbounded stack usage. Whatever the purpose, we expect
5031 + * that the mask will be asserted only for short periods at a time,
5032 + * and so the likelihood of a 'spurious' IPI is suitably small.
5033 + * The mask is read before making an event upcall to the guest: a
5034 + * non-zero mask therefore guarantees that the VCPU will not receive
5035 + * an upcall activation. The mask is cleared when the VCPU requests
5036 + * to block: this avoids wakeup-waiting races.
5037 + */
5038 + uint8_t evtchn_upcall_pending;
5039 + uint8_t evtchn_upcall_mask;
5040 + unsigned long evtchn_pending_sel;
5041 + struct arch_vcpu_info arch;
5042 +#ifdef CONFIG_PARAVIRT_XEN
5043 + struct pvclock_vcpu_time_info time;
5044 +#else
5045 + struct vcpu_time_info time;
5046 +#endif
5047 }; /* 64 bytes (x86) */
5048 +#ifndef __XEN__
5049 +typedef struct vcpu_info vcpu_info_t;
5050 +#endif
5051
5052 /*
5053 * Xen/kernel shared data -- pointer provided in start_info.
5054 - * NB. We expect that this struct is smaller than a page.
5055 + *
5056 + * This structure is defined to be both smaller than a page, and the
5057 + * only data on the shared page, but may vary in actual size even within
5058 + * compatible Xen versions; guests should not rely on the size
5059 + * of this structure remaining constant.
5060 */
5061 struct shared_info {
5062 - struct vcpu_info vcpu_info[MAX_VIRT_CPUS];
5063 + struct vcpu_info vcpu_info[MAX_VIRT_CPUS];
5064
5065 - /*
5066 - * A domain can create "event channels" on which it can send and receive
5067 - * asynchronous event notifications. There are three classes of event that
5068 - * are delivered by this mechanism:
5069 - * 1. Bi-directional inter- and intra-domain connections. Domains must
5070 - * arrange out-of-band to set up a connection (usually by allocating
5071 - * an unbound 'listener' port and avertising that via a storage service
5072 - * such as xenstore).
5073 - * 2. Physical interrupts. A domain with suitable hardware-access
5074 - * privileges can bind an event-channel port to a physical interrupt
5075 - * source.
5076 - * 3. Virtual interrupts ('events'). A domain can bind an event-channel
5077 - * port to a virtual interrupt source, such as the virtual-timer
5078 - * device or the emergency console.
5079 - *
5080 - * Event channels are addressed by a "port index". Each channel is
5081 - * associated with two bits of information:
5082 - * 1. PENDING -- notifies the domain that there is a pending notification
5083 - * to be processed. This bit is cleared by the guest.
5084 - * 2. MASK -- if this bit is clear then a 0->1 transition of PENDING
5085 - * will cause an asynchronous upcall to be scheduled. This bit is only
5086 - * updated by the guest. It is read-only within Xen. If a channel
5087 - * becomes pending while the channel is masked then the 'edge' is lost
5088 - * (i.e., when the channel is unmasked, the guest must manually handle
5089 - * pending notifications as no upcall will be scheduled by Xen).
5090 - *
5091 - * To expedite scanning of pending notifications, any 0->1 pending
5092 - * transition on an unmasked channel causes a corresponding bit in a
5093 - * per-vcpu selector word to be set. Each bit in the selector covers a
5094 - * 'C long' in the PENDING bitfield array.
5095 - */
5096 - unsigned long evtchn_pending[sizeof(unsigned long) * 8];
5097 - unsigned long evtchn_mask[sizeof(unsigned long) * 8];
5098 -
5099 - /*
5100 - * Wallclock time: updated only by control software. Guests should base
5101 - * their gettimeofday() syscall on this wallclock-base value.
5102 - */
5103 - struct pvclock_wall_clock wc;
5104 + /*
5105 + * A domain can create "event channels" on which it can send and receive
5106 + * asynchronous event notifications. There are three classes of event that
5107 + * are delivered by this mechanism:
5108 + * 1. Bi-directional inter- and intra-domain connections. Domains must
5109 + * arrange out-of-band to set up a connection (usually by allocating
5110 + * an unbound 'listener' port and avertising that via a storage service
5111 + * such as xenstore).
5112 + * 2. Physical interrupts. A domain with suitable hardware-access
5113 + * privileges can bind an event-channel port to a physical interrupt
5114 + * source.
5115 + * 3. Virtual interrupts ('events'). A domain can bind an event-channel
5116 + * port to a virtual interrupt source, such as the virtual-timer
5117 + * device or the emergency console.
5118 + *
5119 + * Event channels are addressed by a "port index". Each channel is
5120 + * associated with two bits of information:
5121 + * 1. PENDING -- notifies the domain that there is a pending notification
5122 + * to be processed. This bit is cleared by the guest.
5123 + * 2. MASK -- if this bit is clear then a 0->1 transition of PENDING
5124 + * will cause an asynchronous upcall to be scheduled. This bit is only
5125 + * updated by the guest. It is read-only within Xen. If a channel
5126 + * becomes pending while the channel is masked then the 'edge' is lost
5127 + * (i.e., when the channel is unmasked, the guest must manually handle
5128 + * pending notifications as no upcall will be scheduled by Xen).
5129 + *
5130 + * To expedite scanning of pending notifications, any 0->1 pending
5131 + * transition on an unmasked channel causes a corresponding bit in a
5132 + * per-vcpu selector word to be set. Each bit in the selector covers a
5133 + * 'C long' in the PENDING bitfield array.
5134 + */
5135 + unsigned long evtchn_pending[sizeof(unsigned long) * 8];
5136 + unsigned long evtchn_mask[sizeof(unsigned long) * 8];
5137 +
5138 + /*
5139 + * Wallclock time: updated only by control software. Guests should base
5140 + * their gettimeofday() syscall on this wallclock-base value.
5141 + */
5142 +#ifdef CONFIG_PARAVIRT_XEN
5143 + struct pvclock_wall_clock wc;
5144 +#else
5145 + uint32_t wc_version; /* Version counter: see vcpu_time_info_t. */
5146 + uint32_t wc_sec; /* Secs 00:00:00 UTC, Jan 1, 1970. */
5147 + uint32_t wc_nsec; /* Nsecs 00:00:00 UTC, Jan 1, 1970. */
5148 +#endif
5149
5150 - struct arch_shared_info arch;
5151 + struct arch_shared_info arch;
5152
5153 };
5154 +#ifndef __XEN__
5155 +typedef struct shared_info shared_info_t;
5156 +#endif
5157
5158 /*
5159 - * Start-of-day memory layout for the initial domain (DOM0):
5160 + * Start-of-day memory layout:
5161 * 1. The domain is started within contiguous virtual-memory region.
5162 - * 2. The contiguous region begins and ends on an aligned 4MB boundary.
5163 - * 3. The region start corresponds to the load address of the OS image.
5164 - * If the load address is not 4MB aligned then the address is rounded down.
5165 - * 4. This the order of bootstrap elements in the initial virtual region:
5166 + * 2. The contiguous region ends on an aligned 4MB boundary.
5167 + * 3. This the order of bootstrap elements in the initial virtual region:
5168 * a. relocated kernel image
5169 * b. initial ram disk [mod_start, mod_len]
5170 * c. list of allocated page frames [mfn_list, nr_pages]
5171 * d. start_info_t structure [register ESI (x86)]
5172 * e. bootstrap page tables [pt_base, CR3 (x86)]
5173 * f. bootstrap stack [register ESP (x86)]
5174 - * 5. Bootstrap elements are packed together, but each is 4kB-aligned.
5175 - * 6. The initial ram disk may be omitted.
5176 - * 7. The list of page frames forms a contiguous 'pseudo-physical' memory
5177 + * 4. Bootstrap elements are packed together, but each is 4kB-aligned.
5178 + * 5. The initial ram disk may be omitted.
5179 + * 6. The list of page frames forms a contiguous 'pseudo-physical' memory
5180 * layout for the domain. In particular, the bootstrap virtual-memory
5181 * region is a 1:1 mapping to the first section of the pseudo-physical map.
5182 - * 8. All bootstrap elements are mapped read-writable for the guest OS. The
5183 + * 7. All bootstrap elements are mapped read-writable for the guest OS. The
5184 * only exception is the bootstrap page table, which is mapped read-only.
5185 - * 9. There is guaranteed to be at least 512kB padding after the final
5186 + * 8. There is guaranteed to be at least 512kB padding after the final
5187 * bootstrap element. If necessary, the bootstrap virtual region is
5188 * extended by an extra 4MB to ensure this.
5189 */
5190
5191 #define MAX_GUEST_CMDLINE 1024
5192 struct start_info {
5193 - /* THE FOLLOWING ARE FILLED IN BOTH ON INITIAL BOOT AND ON RESUME. */
5194 - char magic[32]; /* "xen-<version>-<platform>". */
5195 - unsigned long nr_pages; /* Total pages allocated to this domain. */
5196 - unsigned long shared_info; /* MACHINE address of shared info struct. */
5197 - uint32_t flags; /* SIF_xxx flags. */
5198 - unsigned long store_mfn; /* MACHINE page number of shared page. */
5199 - uint32_t store_evtchn; /* Event channel for store communication. */
5200 - union {
5201 - struct {
5202 - unsigned long mfn; /* MACHINE page number of console page. */
5203 - uint32_t evtchn; /* Event channel for console page. */
5204 - } domU;
5205 - struct {
5206 - uint32_t info_off; /* Offset of console_info struct. */
5207 - uint32_t info_size; /* Size of console_info struct from start.*/
5208 - } dom0;
5209 - } console;
5210 - /* THE FOLLOWING ARE ONLY FILLED IN ON INITIAL BOOT (NOT RESUME). */
5211 - unsigned long pt_base; /* VIRTUAL address of page directory. */
5212 - unsigned long nr_pt_frames; /* Number of bootstrap p.t. frames. */
5213 - unsigned long mfn_list; /* VIRTUAL address of page-frame list. */
5214 - unsigned long mod_start; /* VIRTUAL address of pre-loaded module. */
5215 - unsigned long mod_len; /* Size (bytes) of pre-loaded module. */
5216 - int8_t cmd_line[MAX_GUEST_CMDLINE];
5217 + /* THE FOLLOWING ARE FILLED IN BOTH ON INITIAL BOOT AND ON RESUME. */
5218 + char magic[32]; /* "xen-<version>-<platform>". */
5219 + unsigned long nr_pages; /* Total pages allocated to this domain. */
5220 + unsigned long shared_info; /* MACHINE address of shared info struct. */
5221 + uint32_t flags; /* SIF_xxx flags. */
5222 + xen_pfn_t store_mfn; /* MACHINE page number of shared page. */
5223 + uint32_t store_evtchn; /* Event channel for store communication. */
5224 + union {
5225 + struct {
5226 + xen_pfn_t mfn; /* MACHINE page number of console page. */
5227 + uint32_t evtchn; /* Event channel for console page. */
5228 + } domU;
5229 + struct {
5230 + uint32_t info_off; /* Offset of console_info struct. */
5231 + uint32_t info_size; /* Size of console_info struct from start.*/
5232 + } dom0;
5233 + } console;
5234 + /* THE FOLLOWING ARE ONLY FILLED IN ON INITIAL BOOT (NOT RESUME). */
5235 + unsigned long pt_base; /* VIRTUAL address of page directory. */
5236 + unsigned long nr_pt_frames; /* Number of bootstrap p.t. frames. */
5237 + unsigned long mfn_list; /* VIRTUAL address of page-frame list. */
5238 + unsigned long mod_start; /* VIRTUAL address of pre-loaded module. */
5239 + unsigned long mod_len; /* Size (bytes) of pre-loaded module. */
5240 + int8_t cmd_line[MAX_GUEST_CMDLINE];
5241 };
5242 +typedef struct start_info start_info_t;
5243 +
5244 +/* New console union for dom0 introduced in 0x00030203. */
5245 +#if __XEN_INTERFACE_VERSION__ < 0x00030203
5246 +#define console_mfn console.domU.mfn
5247 +#define console_evtchn console.domU.evtchn
5248 +#endif
5249
5250 /* These flags are passed in the 'flags' field of start_info_t. */
5251 #define SIF_PRIVILEGED (1<<0) /* Is the domain privileged? */
5252 #define SIF_INITDOMAIN (1<<1) /* Is this the initial control domain? */
5253 +#define SIF_PM_MASK (0xFF<<8) /* reserve 1 byte for xen-pm options */
5254
5255 -typedef uint64_t cpumap_t;
5256 +typedef struct dom0_vga_console_info {
5257 + uint8_t video_type; /* DOM0_VGA_CONSOLE_??? */
5258 +#define XEN_VGATYPE_TEXT_MODE_3 0x03
5259 +#define XEN_VGATYPE_VESA_LFB 0x23
5260 +
5261 + union {
5262 + struct {
5263 + /* Font height, in pixels. */
5264 + uint16_t font_height;
5265 + /* Cursor location (column, row). */
5266 + uint16_t cursor_x, cursor_y;
5267 + /* Number of rows and columns (dimensions in characters). */
5268 + uint16_t rows, columns;
5269 + } text_mode_3;
5270 +
5271 + struct {
5272 + /* Width and height, in pixels. */
5273 + uint16_t width, height;
5274 + /* Bytes per scan line. */
5275 + uint16_t bytes_per_line;
5276 + /* Bits per pixel. */
5277 + uint16_t bits_per_pixel;
5278 + /* LFB physical address, and size (in units of 64kB). */
5279 + uint32_t lfb_base;
5280 + uint32_t lfb_size;
5281 + /* RGB mask offsets and sizes, as defined by VBE 1.2+ */
5282 + uint8_t red_pos, red_size;
5283 + uint8_t green_pos, green_size;
5284 + uint8_t blue_pos, blue_size;
5285 + uint8_t rsvd_pos, rsvd_size;
5286 +#if __XEN_INTERFACE_VERSION__ >= 0x00030206
5287 + /* VESA capabilities (offset 0xa, VESA command 0x4f00). */
5288 + uint32_t gbl_caps;
5289 + /* Mode attributes (offset 0x0, VESA command 0x4f01). */
5290 + uint16_t mode_attrs;
5291 +#endif
5292 + } vesa_lfb;
5293 + } u;
5294 +} dom0_vga_console_info_t;
5295 +#define xen_vga_console_info dom0_vga_console_info
5296 +#define xen_vga_console_info_t dom0_vga_console_info_t
5297
5298 typedef uint8_t xen_domain_handle_t[16];
5299
5300 @@ -461,6 +629,11 @@ typedef uint8_t xen_domain_handle_t[16];
5301 #define __mk_unsigned_long(x) x ## UL
5302 #define mk_unsigned_long(x) __mk_unsigned_long(x)
5303
5304 +__DEFINE_XEN_GUEST_HANDLE(uint8, uint8_t);
5305 +__DEFINE_XEN_GUEST_HANDLE(uint16, uint16_t);
5306 +__DEFINE_XEN_GUEST_HANDLE(uint32, uint32_t);
5307 +__DEFINE_XEN_GUEST_HANDLE(uint64, uint64_t);
5308 +
5309 #else /* __ASSEMBLY__ */
5310
5311 /* In assembly code we cannot use C numeric constant suffixes. */
5312 @@ -468,4 +641,24 @@ typedef uint8_t xen_domain_handle_t[16];
5313
5314 #endif /* !__ASSEMBLY__ */
5315
5316 +/* Default definitions for macros used by domctl/sysctl. */
5317 +#if defined(__XEN__) || defined(__XEN_TOOLS__)
5318 +#ifndef uint64_aligned_t
5319 +#define uint64_aligned_t uint64_t
5320 +#endif
5321 +#ifndef XEN_GUEST_HANDLE_64
5322 +#define XEN_GUEST_HANDLE_64(name) XEN_GUEST_HANDLE(name)
5323 +#endif
5324 +#endif
5325 +
5326 #endif /* __XEN_PUBLIC_XEN_H__ */
5327 +
5328 +/*
5329 + * Local variables:
5330 + * mode: C
5331 + * c-set-style: "BSD"
5332 + * c-basic-offset: 4
5333 + * tab-width: 4
5334 + * indent-tabs-mode: nil
5335 + * End:
5336 + */
5337 --- sle11-2009-09-18.orig/include/xen/xenbus.h 2009-09-18 10:11:48.000000000 +0200
5338 +++ sle11-2009-09-18/include/xen/xenbus.h 2008-11-25 12:35:56.000000000 +0100
5339 @@ -39,6 +39,7 @@
5340 #include <linux/mutex.h>
5341 #include <linux/completion.h>
5342 #include <linux/init.h>
5343 +#include <linux/err.h>
5344 #include <xen/interface/xen.h>
5345 #include <xen/interface/grant_table.h>
5346 #include <xen/interface/io/xenbus.h>
5347 @@ -55,8 +56,17 @@ struct xenbus_watch
5348 /* Callback (executed in a process context with no locks held). */
5349 void (*callback)(struct xenbus_watch *,
5350 const char **vec, unsigned int len);
5351 +
5352 + /* See XBWF_ definitions below. */
5353 + unsigned long flags;
5354 };
5355
5356 +/*
5357 + * Execute callback in its own kthread. Useful if the callback is long
5358 + * running or heavily serialised, to avoid taking out the main xenwatch thread
5359 + * for a long period of time (or even unwittingly causing a deadlock).
5360 + */
5361 +#define XBWF_new_thread 1
5362
5363 /* A xenbus device. */
5364 struct xenbus_device {
5365 @@ -105,27 +115,8 @@ static inline struct xenbus_driver *to_x
5366 return container_of(drv, struct xenbus_driver, driver);
5367 }
5368
5369 -int __must_check __xenbus_register_frontend(struct xenbus_driver *drv,
5370 - struct module *owner,
5371 - const char *mod_name);
5372 -
5373 -static inline int __must_check
5374 -xenbus_register_frontend(struct xenbus_driver *drv)
5375 -{
5376 - WARN_ON(drv->owner != THIS_MODULE);
5377 - return __xenbus_register_frontend(drv, THIS_MODULE, KBUILD_MODNAME);
5378 -}
5379 -
5380 -int __must_check __xenbus_register_backend(struct xenbus_driver *drv,
5381 - struct module *owner,
5382 - const char *mod_name);
5383 -static inline int __must_check
5384 -xenbus_register_backend(struct xenbus_driver *drv)
5385 -{
5386 - WARN_ON(drv->owner != THIS_MODULE);
5387 - return __xenbus_register_backend(drv, THIS_MODULE, KBUILD_MODNAME);
5388 -}
5389 -
5390 +int xenbus_register_frontend(struct xenbus_driver *drv);
5391 +int xenbus_register_backend(struct xenbus_driver *drv);
5392 void xenbus_unregister_driver(struct xenbus_driver *drv);
5393
5394 struct xenbus_transaction
5395 @@ -136,8 +127,6 @@ struct xenbus_transaction
5396 /* Nil transaction ID. */
5397 #define XBT_NIL ((struct xenbus_transaction) { 0 })
5398
5399 -int __init xenbus_dev_init(void);
5400 -
5401 char **xenbus_directory(struct xenbus_transaction t,
5402 const char *dir, const char *node, unsigned int *num);
5403 void *xenbus_read(struct xenbus_transaction t,
5404 @@ -167,7 +156,6 @@ int xenbus_printf(struct xenbus_transact
5405 int xenbus_gather(struct xenbus_transaction t, const char *dir, ...);
5406
5407 /* notifer routines for when the xenstore comes up */
5408 -extern int xenstored_ready;
5409 int register_xenstore_notifier(struct notifier_block *nb);
5410 void unregister_xenstore_notifier(struct notifier_block *nb);
5411
5412 @@ -180,12 +168,9 @@ void xs_suspend_cancel(void);
5413 /* Used by xenbus_dev to borrow kernel's store connection. */
5414 void *xenbus_dev_request_and_reply(struct xsd_sockmsg *msg);
5415
5416 -struct work_struct;
5417 -
5418 /* Prepare for domain suspend: then resume or cancel the suspend. */
5419 void xenbus_suspend(void);
5420 void xenbus_resume(void);
5421 -void xenbus_probe(struct work_struct *);
5422 void xenbus_suspend_cancel(void);
5423
5424 #define XENBUS_IS_ERR_READ(str) ({ \
5425 @@ -198,38 +183,125 @@ void xenbus_suspend_cancel(void);
5426
5427 #define XENBUS_EXIST_ERR(err) ((err) == -ENOENT || (err) == -ERANGE)
5428
5429 +
5430 +/**
5431 + * Register a watch on the given path, using the given xenbus_watch structure
5432 + * for storage, and the given callback function as the callback. Return 0 on
5433 + * success, or -errno on error. On success, the given path will be saved as
5434 + * watch->node, and remains the caller's to free. On error, watch->node will
5435 + * be NULL, the device will switch to XenbusStateClosing, and the error will
5436 + * be saved in the store.
5437 + */
5438 int xenbus_watch_path(struct xenbus_device *dev, const char *path,
5439 struct xenbus_watch *watch,
5440 void (*callback)(struct xenbus_watch *,
5441 const char **, unsigned int));
5442 -int xenbus_watch_pathfmt(struct xenbus_device *dev, struct xenbus_watch *watch,
5443 - void (*callback)(struct xenbus_watch *,
5444 - const char **, unsigned int),
5445 - const char *pathfmt, ...)
5446 - __attribute__ ((format (printf, 4, 5)));
5447
5448 +
5449 +/**
5450 + * Register a watch on the given path/path2, using the given xenbus_watch
5451 + * structure for storage, and the given callback function as the callback.
5452 + * Return 0 on success, or -errno on error. On success, the watched path
5453 + * (path/path2) will be saved as watch->node, and becomes the caller's to
5454 + * kfree(). On error, watch->node will be NULL, so the caller has nothing to
5455 + * free, the device will switch to XenbusStateClosing, and the error will be
5456 + * saved in the store.
5457 + */
5458 +int xenbus_watch_path2(struct xenbus_device *dev, const char *path,
5459 + const char *path2, struct xenbus_watch *watch,
5460 + void (*callback)(struct xenbus_watch *,
5461 + const char **, unsigned int));
5462 +
5463 +
5464 +/**
5465 + * Advertise in the store a change of the given driver to the given new_state.
5466 + * Return 0 on success, or -errno on error. On error, the device will switch
5467 + * to XenbusStateClosing, and the error will be saved in the store.
5468 + */
5469 int xenbus_switch_state(struct xenbus_device *dev, enum xenbus_state new_state);
5470 +
5471 +
5472 +/**
5473 + * Grant access to the given ring_mfn to the peer of the given device. Return
5474 + * 0 on success, or -errno on error. On error, the device will switch to
5475 + * XenbusStateClosing, and the error will be saved in the store.
5476 + */
5477 int xenbus_grant_ring(struct xenbus_device *dev, unsigned long ring_mfn);
5478 -int xenbus_map_ring_valloc(struct xenbus_device *dev,
5479 - int gnt_ref, void **vaddr);
5480 +
5481 +
5482 +/**
5483 + * Map a page of memory into this domain from another domain's grant table.
5484 + * xenbus_map_ring_valloc allocates a page of virtual address space, maps the
5485 + * page to that address, and sets *vaddr to that address.
5486 + * xenbus_map_ring does not allocate the virtual address space (you must do
5487 + * this yourself!). It only maps in the page to the specified address.
5488 + * Returns 0 on success, and GNTST_* (see xen/include/interface/grant_table.h)
5489 + * or -ENOMEM on error. If an error is returned, device will switch to
5490 + * XenbusStateClosing and the error message will be saved in XenStore.
5491 + */
5492 +struct vm_struct *xenbus_map_ring_valloc(struct xenbus_device *dev,
5493 + int gnt_ref);
5494 int xenbus_map_ring(struct xenbus_device *dev, int gnt_ref,
5495 grant_handle_t *handle, void *vaddr);
5496
5497 -int xenbus_unmap_ring_vfree(struct xenbus_device *dev, void *vaddr);
5498 +
5499 +/**
5500 + * Unmap a page of memory in this domain that was imported from another domain.
5501 + * Use xenbus_unmap_ring_vfree if you mapped in your memory with
5502 + * xenbus_map_ring_valloc (it will free the virtual address space).
5503 + * Returns 0 on success and returns GNTST_* on error
5504 + * (see xen/include/interface/grant_table.h).
5505 + */
5506 +int xenbus_unmap_ring_vfree(struct xenbus_device *dev, struct vm_struct *);
5507 int xenbus_unmap_ring(struct xenbus_device *dev,
5508 grant_handle_t handle, void *vaddr);
5509
5510 +
5511 +/**
5512 + * Allocate an event channel for the given xenbus_device, assigning the newly
5513 + * created local port to *port. Return 0 on success, or -errno on error. On
5514 + * error, the device will switch to XenbusStateClosing, and the error will be
5515 + * saved in the store.
5516 + */
5517 int xenbus_alloc_evtchn(struct xenbus_device *dev, int *port);
5518 -int xenbus_bind_evtchn(struct xenbus_device *dev, int remote_port, int *port);
5519 +
5520 +
5521 +/**
5522 + * Free an existing event channel. Returns 0 on success or -errno on error.
5523 + */
5524 int xenbus_free_evtchn(struct xenbus_device *dev, int port);
5525
5526 +
5527 +/**
5528 + * Return the state of the driver rooted at the given store path, or
5529 + * XenbusStateUnknown if no state can be read.
5530 + */
5531 enum xenbus_state xenbus_read_driver_state(const char *path);
5532
5533 -void xenbus_dev_error(struct xenbus_device *dev, int err, const char *fmt, ...);
5534 -void xenbus_dev_fatal(struct xenbus_device *dev, int err, const char *fmt, ...);
5535 +
5536 +/***
5537 + * Report the given negative errno into the store, along with the given
5538 + * formatted message.
5539 + */
5540 +void xenbus_dev_error(struct xenbus_device *dev, int err, const char *fmt,
5541 + ...);
5542 +
5543 +
5544 +/***
5545 + * Equivalent to xenbus_dev_error(dev, err, fmt, args), followed by
5546 + * xenbus_switch_state(dev, NULL, XenbusStateClosing) to schedule an orderly
5547 + * closedown of this driver and its peer.
5548 + */
5549 +void xenbus_dev_fatal(struct xenbus_device *dev, int err, const char *fmt,
5550 + ...);
5551 +
5552 +int xenbus_dev_init(void);
5553
5554 const char *xenbus_strstate(enum xenbus_state state);
5555 int xenbus_dev_is_online(struct xenbus_device *dev);
5556 int xenbus_frontend_closed(struct xenbus_device *dev);
5557
5558 +int xenbus_for_each_backend(void *arg, int (*fn)(struct device *, void *));
5559 +int xenbus_for_each_frontend(void *arg, int (*fn)(struct device *, void *));
5560 +
5561 #endif /* _XEN_XENBUS_H */