]> git.ipfire.org Git - people/pmueller/ipfire-2.x.git/blob - src/patches/suse-2.6.27.25/patches.xen/xen3-auto-common.diff
Changed checkfs to auto reboot after correctable fsck fixes.
[people/pmueller/ipfire-2.x.git] / src / patches / suse-2.6.27.25 / patches.xen / xen3-auto-common.diff
1 Subject: xen3 common
2 From: http://xenbits.xensource.com/linux-2.6.18-xen.hg (tip 728:832aac894efd)
3 Patch-mainline: obsolete
4 Acked-by: jbeulich@novell.com
5
6 List of files that don't require modification anymore (and hence
7 removed from this patch), for reference and in case upstream wants to
8 take the forward porting patches:
9 2.6.22/include/linux/sched.h
10 2.6.22/kernel/softlockup.c
11 2.6.22/kernel/timer.c
12 2.6.25/mm/highmem.c
13
14 ---
15 drivers/Makefile | 1
16 drivers/acpi/Makefile | 3
17 drivers/acpi/hardware/hwsleep.c | 15
18 drivers/acpi/processor_core.c | 72 +++
19 drivers/acpi/processor_extcntl.c | 241 +++++++++++
20 drivers/acpi/processor_idle.c | 24 -
21 drivers/acpi/processor_perflib.c | 21
22 drivers/acpi/sleep/main.c | 9
23 drivers/char/agp/intel-agp.c | 10
24 drivers/char/mem.c | 16
25 drivers/char/tpm/Makefile | 2
26 drivers/char/tpm/tpm.h | 15
27 drivers/char/tpm/tpm_vtpm.c | 542 +++++++++++++++++++++++++
28 drivers/char/tpm/tpm_vtpm.h | 55 ++
29 drivers/char/tpm/tpm_xen.c | 722 ++++++++++++++++++++++++++++++++++
30 drivers/ide/ide-lib.c | 8
31 drivers/oprofile/buffer_sync.c | 87 +++-
32 drivers/oprofile/cpu_buffer.c | 51 +-
33 drivers/oprofile/cpu_buffer.h | 9
34 drivers/oprofile/event_buffer.h | 3
35 drivers/oprofile/oprof.c | 30 +
36 drivers/oprofile/oprof.h | 3
37 drivers/oprofile/oprofile_files.c | 201 +++++++++
38 fs/aio.c | 119 +++++
39 fs/compat_ioctl.c | 19
40 include/acpi/processor.h | 143 ++++++
41 include/asm-generic/pci.h | 2
42 include/asm-generic/pgtable.h | 4
43 include/linux/aio.h | 5
44 include/linux/highmem.h | 8
45 include/linux/interrupt.h | 6
46 include/linux/kexec.h | 13
47 include/linux/mm.h | 8
48 include/linux/oprofile.h | 12
49 include/linux/page-flags.h | 27 +
50 include/linux/pci.h | 12
51 include/linux/skbuff.h | 8
52 include/linux/vermagic.h | 7
53 kernel/irq/spurious.c | 2
54 kernel/kexec.c | 71 ++-
55 kernel/sysctl.c | 2
56 mm/memory.c | 42 +
57 mm/mprotect.c | 2
58 mm/page_alloc.c | 12
59 net/core/dev.c | 62 ++
60 net/core/skbuff.c | 4
61 net/ipv4/netfilter/nf_nat_proto_tcp.c | 3
62 net/ipv4/netfilter/nf_nat_proto_udp.c | 4
63 net/ipv4/xfrm4_output.c | 2
64 scripts/Makefile.build | 14
65 scripts/Makefile.lib | 6
66 51 files changed, 2673 insertions(+), 86 deletions(-)
67
68 Index: linux-2.6.27/drivers/Makefile
69 ===================================================================
70 --- linux-2.6.27.orig/drivers/Makefile
71 +++ linux-2.6.27/drivers/Makefile
72 @@ -37,6 +37,7 @@ obj-y += base/ block/ misc/ mfd/ net/
73 obj-$(CONFIG_NUBUS) += nubus/
74 obj-$(CONFIG_ATM) += atm/
75 obj-y += macintosh/
76 +obj-$(CONFIG_XEN) += xen/
77 obj-$(CONFIG_SCSI) += scsi/
78 obj-$(CONFIG_ATA) += ata/
79 obj-$(CONFIG_IDE) += ide/
80 Index: linux-2.6.27/drivers/acpi/Makefile
81 ===================================================================
82 --- linux-2.6.27.orig/drivers/acpi/Makefile
83 +++ linux-2.6.27/drivers/acpi/Makefile
84 @@ -34,6 +34,9 @@ processor-objs += processor_core.o proce
85 ifdef CONFIG_CPU_FREQ
86 processor-objs += processor_perflib.o
87 endif
88 +ifdef CONFIG_PROCESSOR_EXTERNAL_CONTROL
89 +processor-objs += processor_perflib.o processor_extcntl.o
90 +endif
91
92 obj-y += sleep/
93 obj-y += bus.o glue.o
94 Index: linux-2.6.27/drivers/acpi/hardware/hwsleep.c
95 ===================================================================
96 --- linux-2.6.27.orig/drivers/acpi/hardware/hwsleep.c
97 +++ linux-2.6.27/drivers/acpi/hardware/hwsleep.c
98 @@ -241,7 +241,11 @@ acpi_status asmlinkage acpi_enter_sleep_
99 u32 PM1Bcontrol;
100 struct acpi_bit_register_info *sleep_type_reg_info;
101 struct acpi_bit_register_info *sleep_enable_reg_info;
102 +#if !(defined(CONFIG_XEN) && defined(CONFIG_X86))
103 u32 in_value;
104 +#else
105 + int err;
106 +#endif
107 struct acpi_object_list arg_list;
108 union acpi_object arg;
109 acpi_status status;
110 @@ -351,6 +355,7 @@ acpi_status asmlinkage acpi_enter_sleep_
111
112 ACPI_FLUSH_CPU_CACHE();
113
114 +#if !(defined(CONFIG_XEN) && defined(CONFIG_X86))
115 status = acpi_hw_register_write(ACPI_REGISTER_PM1A_CONTROL,
116 PM1Acontrol);
117 if (ACPI_FAILURE(status)) {
118 @@ -397,6 +402,16 @@ acpi_status asmlinkage acpi_enter_sleep_
119 /* Spin until we wake */
120
121 } while (!in_value);
122 +#else
123 + /* PV ACPI just need check hypercall return value */
124 + err = acpi_notify_hypervisor_state(sleep_state,
125 + PM1Acontrol, PM1Bcontrol);
126 + if (err) {
127 + ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
128 + "Hypervisor failure [%d]\n", err));
129 + return_ACPI_STATUS(AE_ERROR);
130 + }
131 +#endif
132
133 return_ACPI_STATUS(AE_OK);
134 }
135 Index: linux-2.6.27/drivers/acpi/processor_core.c
136 ===================================================================
137 --- linux-2.6.27.orig/drivers/acpi/processor_core.c
138 +++ linux-2.6.27/drivers/acpi/processor_core.c
139 @@ -620,7 +620,8 @@ static int acpi_processor_get_info(struc
140 */
141 if (pr->id == -1) {
142 if (ACPI_FAILURE
143 - (acpi_processor_hotadd_init(pr->handle, &pr->id))) {
144 + (acpi_processor_hotadd_init(pr->handle, &pr->id)) &&
145 + !processor_cntl_external()) {
146 return -ENODEV;
147 }
148 }
149 @@ -662,7 +663,11 @@ static int acpi_processor_get_info(struc
150 return 0;
151 }
152
153 +#ifndef CONFIG_XEN
154 static DEFINE_PER_CPU(void *, processor_device_array);
155 +#else
156 +static void *processor_device_array[NR_ACPI_CPUS];
157 +#endif
158
159 static int __cpuinit acpi_processor_start(struct acpi_device *device)
160 {
161 @@ -671,30 +676,46 @@ static int __cpuinit acpi_processor_star
162 struct acpi_processor *pr;
163 struct sys_device *sysdev;
164
165 + processor_extcntl_init();
166 +
167 pr = acpi_driver_data(device);
168
169 result = acpi_processor_get_info(device);
170 - if (result) {
171 + if (result ||
172 + ((pr->id == -1) && !processor_cntl_external())) {
173 /* Processor is physically not present */
174 return 0;
175 }
176
177 - BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));
178 + BUG_ON(!processor_cntl_external() &&
179 + ((pr->id >= nr_cpu_ids) || (pr->id < 0)));
180
181 /*
182 * Buggy BIOS check
183 * ACPI id of processors can be reported wrongly by the BIOS.
184 * Don't trust it blindly
185 */
186 +#ifndef CONFIG_XEN
187 if (per_cpu(processor_device_array, pr->id) != NULL &&
188 per_cpu(processor_device_array, pr->id) != device) {
189 +#else
190 + BUG_ON(pr->acpi_id >= NR_ACPI_CPUS);
191 + if (processor_device_array[pr->acpi_id] != NULL &&
192 + processor_device_array[pr->acpi_id] != device) {
193 +#endif
194 printk(KERN_WARNING "BIOS reported wrong ACPI id "
195 "for the processor\n");
196 return -ENODEV;
197 }
198 +#ifndef CONFIG_XEN
199 per_cpu(processor_device_array, pr->id) = device;
200
201 per_cpu(processors, pr->id) = pr;
202 +#else
203 + processor_device_array[pr->acpi_id] = device;
204 + if (pr->id != -1)
205 + per_cpu(processors, pr->id) = pr;
206 +#endif
207
208 result = acpi_processor_add_fs(device);
209 if (result)
210 @@ -710,15 +731,28 @@ static int __cpuinit acpi_processor_star
211 /* _PDC call should be done before doing anything else (if reqd.). */
212 arch_acpi_processor_init_pdc(pr);
213 acpi_processor_set_pdc(pr);
214 -#ifdef CONFIG_CPU_FREQ
215 +#if defined(CONFIG_CPU_FREQ) || defined(CONFIG_PROCESSOR_EXTERNAL_CONTROL)
216 acpi_processor_ppc_has_changed(pr);
217 #endif
218 - acpi_processor_get_throttling_info(pr);
219 - acpi_processor_get_limit_info(pr);
220 +
221 + /*
222 + * pr->id may equal to -1 while processor_cntl_external enabled.
223 + * throttle and thermal module don't support this case.
224 + * Tx only works when dom0 vcpu == pcpu num by far, as we give
225 + * control to dom0.
226 + */
227 + if (pr->id != -1) {
228 + acpi_processor_get_throttling_info(pr);
229 + acpi_processor_get_limit_info(pr);
230 + }
231
232
233 acpi_processor_power_init(pr, device);
234
235 + result = processor_extcntl_prepare(pr);
236 + if (result)
237 + goto end;
238 +
239 pr->cdev = thermal_cooling_device_register("Processor", device,
240 &processor_cooling_ops);
241 if (IS_ERR(pr->cdev)) {
242 @@ -846,7 +880,7 @@ static int acpi_processor_remove(struct
243
244 pr = acpi_driver_data(device);
245
246 - if (pr->id >= nr_cpu_ids) {
247 + if (!processor_cntl_external() && pr->id >= nr_cpu_ids) {
248 kfree(pr);
249 return 0;
250 }
251 @@ -872,8 +906,14 @@ static int acpi_processor_remove(struct
252 pr->cdev = NULL;
253 }
254
255 +#ifndef CONFIG_XEN
256 per_cpu(processors, pr->id) = NULL;
257 per_cpu(processor_device_array, pr->id) = NULL;
258 +#else
259 + if (pr->id != -1)
260 + per_cpu(processors, pr->id) = NULL;
261 + processor_device_array[pr->acpi_id] = NULL;
262 +#endif
263 kfree(pr);
264
265 return 0;
266 @@ -933,6 +973,10 @@ int acpi_processor_device_add(acpi_handl
267 if (!pr)
268 return -ENODEV;
269
270 + if (processor_cntl_external())
271 + processor_notify_external(pr,
272 + PROCESSOR_HOTPLUG, HOTPLUG_TYPE_ADD);
273 +
274 if ((pr->id >= 0) && (pr->id < nr_cpu_ids)) {
275 kobject_uevent(&(*device)->dev.kobj, KOBJ_ONLINE);
276 }
277 @@ -972,6 +1016,10 @@ static void __ref acpi_processor_hotplug
278 break;
279 }
280
281 + if (processor_cntl_external())
282 + processor_notify_external(pr,
283 + PROCESSOR_HOTPLUG, HOTPLUG_TYPE_ADD);
284 +
285 if (pr->id >= 0 && (pr->id < nr_cpu_ids)) {
286 kobject_uevent(&device->dev.kobj, KOBJ_OFFLINE);
287 break;
288 @@ -1003,6 +1051,11 @@ static void __ref acpi_processor_hotplug
289
290 if ((pr->id < nr_cpu_ids) && (cpu_present(pr->id)))
291 kobject_uevent(&device->dev.kobj, KOBJ_OFFLINE);
292 +
293 + if (processor_cntl_external())
294 + processor_notify_external(pr, PROCESSOR_HOTPLUG,
295 + HOTPLUG_TYPE_REMOVE);
296 +
297 break;
298 default:
299 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
300 @@ -1067,6 +1120,11 @@ static acpi_status acpi_processor_hotadd
301
302 static int acpi_processor_handle_eject(struct acpi_processor *pr)
303 {
304 +#ifdef CONFIG_XEN
305 + if (pr->id == -1)
306 + return (0);
307 +#endif
308 +
309 if (cpu_online(pr->id))
310 cpu_down(pr->id);
311
312 Index: linux-2.6.27/drivers/acpi/processor_extcntl.c
313 ===================================================================
314 --- /dev/null
315 +++ linux-2.6.27/drivers/acpi/processor_extcntl.c
316 @@ -0,0 +1,241 @@
317 +/*
318 + * processor_extcntl.c - channel to external control logic
319 + *
320 + * Copyright (C) 2008, Intel corporation
321 + *
322 + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
323 + *
324 + * This program is free software; you can redistribute it and/or modify
325 + * it under the terms of the GNU General Public License as published by
326 + * the Free Software Foundation; either version 2 of the License, or (at
327 + * your option) any later version.
328 + *
329 + * This program is distributed in the hope that it will be useful, but
330 + * WITHOUT ANY WARRANTY; without even the implied warranty of
331 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
332 + * General Public License for more details.
333 + *
334 + * You should have received a copy of the GNU General Public License along
335 + * with this program; if not, write to the Free Software Foundation, Inc.,
336 + * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
337 + *
338 + */
339 +
340 +#include <linux/kernel.h>
341 +#include <linux/init.h>
342 +#include <linux/types.h>
343 +#include <linux/acpi.h>
344 +#include <linux/pm.h>
345 +#include <linux/cpu.h>
346 +
347 +#include <acpi/processor.h>
348 +
349 +#define ACPI_PROCESSOR_COMPONENT 0x01000000
350 +#define ACPI_PROCESSOR_CLASS "processor"
351 +#define ACPI_PROCESSOR_DRIVER_NAME "ACPI Processor Driver"
352 +#define _COMPONENT ACPI_PROCESSOR_COMPONENT
353 +ACPI_MODULE_NAME("acpi_processor")
354 +
355 +static int processor_extcntl_parse_csd(struct acpi_processor *pr);
356 +static int processor_extcntl_get_performance(struct acpi_processor *pr);
357 +/*
358 + * External processor control logic may register with its own set of
359 + * ops to get ACPI related notification. One example is like VMM.
360 + */
361 +const struct processor_extcntl_ops *processor_extcntl_ops;
362 +EXPORT_SYMBOL(processor_extcntl_ops);
363 +
364 +static int processor_notify_smm(void)
365 +{
366 + acpi_status status;
367 + static int is_done = 0;
368 +
369 + /* only need successfully notify BIOS once */
370 + /* avoid double notification which may lead to unexpected result */
371 + if (is_done)
372 + return 0;
373 +
374 + /* Can't write pstate_cnt to smi_cmd if either value is zero */
375 + if ((!acpi_fadt.smi_cmd) || (!acpi_fadt.pstate_cnt)) {
376 + ACPI_DEBUG_PRINT((ACPI_DB_INFO,"No SMI port or pstate_cnt\n"));
377 + return 0;
378 + }
379 +
380 + ACPI_DEBUG_PRINT((ACPI_DB_INFO,
381 + "Writing pstate_cnt [0x%x] to smi_cmd [0x%x]\n",
382 + acpi_fadt.pstate_cnt, acpi_fadt.smi_cmd));
383 +
384 + /* FADT v1 doesn't support pstate_cnt, many BIOS vendors use
385 + * it anyway, so we need to support it... */
386 + if (acpi_fadt_is_v1) {
387 + ACPI_DEBUG_PRINT((ACPI_DB_INFO,
388 + "Using v1.0 FADT reserved value for pstate_cnt\n"));
389 + }
390 +
391 + status = acpi_os_write_port(acpi_fadt.smi_cmd,
392 + (u32) acpi_fadt.pstate_cnt, 8);
393 + if (ACPI_FAILURE(status))
394 + return status;
395 +
396 + is_done = 1;
397 +
398 + return 0;
399 +}
400 +
401 +int processor_notify_external(struct acpi_processor *pr, int event, int type)
402 +{
403 + int ret = -EINVAL;
404 +
405 + if (!processor_cntl_external())
406 + return -EINVAL;
407 +
408 + switch (event) {
409 + case PROCESSOR_PM_INIT:
410 + case PROCESSOR_PM_CHANGE:
411 + if ((type >= PM_TYPE_MAX) ||
412 + !processor_extcntl_ops->pm_ops[type])
413 + break;
414 +
415 + ret = processor_extcntl_ops->pm_ops[type](pr, event);
416 + break;
417 + case PROCESSOR_HOTPLUG:
418 + if (processor_extcntl_ops->hotplug)
419 + ret = processor_extcntl_ops->hotplug(pr, type);
420 + break;
421 + default:
422 + printk(KERN_ERR "Unsupport processor events %d.\n", event);
423 + break;
424 + }
425 +
426 + return ret;
427 +}
428 +
429 +/*
430 + * External control logic can decide to grab full or part of physical
431 + * processor control bits. Take a VMM for example, physical processors
432 + * are owned by VMM and thus existence information like hotplug is
433 + * always required to be notified to VMM. Similar is processor idle
434 + * state which is also necessarily controlled by VMM. But for other
435 + * control bits like performance/throttle states, VMM may choose to
436 + * control or not upon its own policy.
437 + */
438 +void processor_extcntl_init(void)
439 +{
440 + if (!processor_extcntl_ops)
441 + arch_acpi_processor_init_extcntl(&processor_extcntl_ops);
442 +}
443 +
444 +/*
445 + * This is called from ACPI processor init, and targeted to hold
446 + * some tricky housekeeping jobs to satisfy external control model.
447 + * For example, we may put dependency parse stub here for idle
448 + * and performance state. Those information may be not available
449 + * if splitting from dom0 control logic like cpufreq driver.
450 + */
451 +int processor_extcntl_prepare(struct acpi_processor *pr)
452 +{
453 + /* parse cstate dependency information */
454 + if (processor_pm_external())
455 + processor_extcntl_parse_csd(pr);
456 +
457 + /* Initialize performance states */
458 + if (processor_pmperf_external())
459 + processor_extcntl_get_performance(pr);
460 +
461 + return 0;
462 +}
463 +
464 +/*
465 + * Currently no _CSD is implemented which is why existing ACPI code
466 + * doesn't parse _CSD at all. But to keep interface complete with
467 + * external control logic, we put a placeholder here for future
468 + * compatibility.
469 + */
470 +static int processor_extcntl_parse_csd(struct acpi_processor *pr)
471 +{
472 + int i;
473 +
474 + for (i = 0; i < pr->power.count; i++) {
475 + if (!pr->power.states[i].valid)
476 + continue;
477 +
478 + /* No dependency by default */
479 + pr->power.states[i].domain_info = NULL;
480 + pr->power.states[i].csd_count = 0;
481 + }
482 +
483 + return 0;
484 +}
485 +
486 +/*
487 + * Existing ACPI module does parse performance states at some point,
488 + * when acpi-cpufreq driver is loaded which however is something
489 + * we'd like to disable to avoid confliction with external control
490 + * logic. So we have to collect raw performance information here
491 + * when ACPI processor object is found and started.
492 + */
493 +static int processor_extcntl_get_performance(struct acpi_processor *pr)
494 +{
495 + int ret;
496 + struct acpi_processor_performance *perf;
497 + struct acpi_psd_package *pdomain;
498 +
499 + if (pr->performance)
500 + return -EBUSY;
501 +
502 + perf = kzalloc(sizeof(struct acpi_processor_performance), GFP_KERNEL);
503 + if (!perf)
504 + return -ENOMEM;
505 +
506 + pr->performance = perf;
507 + /* Get basic performance state information */
508 + ret = acpi_processor_get_performance_info(pr);
509 + if (ret < 0)
510 + goto err_out;
511 +
512 + /*
513 + * Well, here we need retrieve performance dependency information
514 + * from _PSD object. The reason why existing interface is not used
515 + * is due to the reason that existing interface sticks to Linux cpu
516 + * id to construct some bitmap, however we want to split ACPI
517 + * processor objects from Linux cpu id logic. For example, even
518 + * when Linux is configured as UP, we still want to parse all ACPI
519 + * processor objects to external logic. In this case, it's preferred
520 + * to use ACPI ID instead.
521 + */
522 + pdomain = &pr->performance->domain_info;
523 + pdomain->num_processors = 0;
524 + ret = acpi_processor_get_psd(pr);
525 + if (ret < 0) {
526 + /*
527 + * _PSD is optional - assume no coordination if absent (or
528 + * broken), matching native kernels' behavior.
529 + */
530 + pdomain->num_entries = ACPI_PSD_REV0_ENTRIES;
531 + pdomain->revision = ACPI_PSD_REV0_REVISION;
532 + pdomain->domain = pr->acpi_id;
533 + pdomain->coord_type = DOMAIN_COORD_TYPE_SW_ALL;
534 + pdomain->num_processors = 1;
535 + }
536 +
537 + /* Some sanity check */
538 + if ((pdomain->revision != ACPI_PSD_REV0_REVISION) ||
539 + (pdomain->num_entries != ACPI_PSD_REV0_ENTRIES) ||
540 + ((pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ALL) &&
541 + (pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ANY) &&
542 + (pdomain->coord_type != DOMAIN_COORD_TYPE_HW_ALL))) {
543 + ret = -EINVAL;
544 + goto err_out;
545 + }
546 +
547 + /* Last step is to notify BIOS that external logic exists */
548 + processor_notify_smm();
549 +
550 + processor_notify_external(pr, PROCESSOR_PM_INIT, PM_TYPE_PERF);
551 +
552 + return 0;
553 +err_out:
554 + pr->performance = NULL;
555 + kfree(perf);
556 + return ret;
557 +}
558 Index: linux-2.6.27/drivers/acpi/processor_idle.c
559 ===================================================================
560 --- linux-2.6.27.orig/drivers/acpi/processor_idle.c
561 +++ linux-2.6.27/drivers/acpi/processor_idle.c
562 @@ -908,7 +908,8 @@ static int acpi_processor_get_power_info
563 */
564 cx.entry_method = ACPI_CSTATE_HALT;
565 snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI HLT");
566 - } else {
567 + /* This doesn't apply to external control case */
568 + } else if (!processor_pm_external()) {
569 continue;
570 }
571 if (cx.type == ACPI_STATE_C1 &&
572 @@ -947,6 +948,12 @@ static int acpi_processor_get_power_info
573
574 cx.power = obj->integer.value;
575
576 +#ifdef CONFIG_PROCESSOR_EXTERNAL_CONTROL
577 + /* cache control methods to notify external logic */
578 + if (processor_pm_external())
579 + memcpy(&cx.reg, reg, sizeof(*reg));
580 +#endif
581 +
582 current_count++;
583 memcpy(&(pr->power.states[current_count]), &cx, sizeof(cx));
584
585 @@ -1289,14 +1296,18 @@ int acpi_processor_cst_has_changed(struc
586 * been initialized.
587 */
588 if (pm_idle_save) {
589 - pm_idle = pm_idle_save;
590 + if (!processor_pm_external())
591 + pm_idle = pm_idle_save;
592 /* Relies on interrupts forcing exit from idle. */
593 synchronize_sched();
594 }
595
596 pr->flags.power = 0;
597 result = acpi_processor_get_power_info(pr);
598 - if ((pr->flags.power == 1) && (pr->flags.power_setup_done))
599 + if (processor_pm_external())
600 + processor_notify_external(pr,
601 + PROCESSOR_PM_CHANGE, PM_TYPE_IDLE);
602 + else if ((pr->flags.power == 1) && (pr->flags.power_setup_done))
603 pm_idle = acpi_processor_idle;
604
605 return result;
606 @@ -1821,7 +1832,7 @@ int __cpuinit acpi_processor_power_init(
607 printk(")\n");
608
609 #ifndef CONFIG_CPU_IDLE
610 - if (pr->id == 0) {
611 + if (!processor_pm_external() && (pr->id == 0)) {
612 pm_idle_save = pm_idle;
613 pm_idle = acpi_processor_idle;
614 }
615 @@ -1835,6 +1846,11 @@ int __cpuinit acpi_processor_power_init(
616 acpi_driver_data(device));
617 if (!entry)
618 return -EIO;
619 +
620 + if (processor_pm_external())
621 + processor_notify_external(pr,
622 + PROCESSOR_PM_INIT, PM_TYPE_IDLE);
623 +
624 return 0;
625 }
626
627 Index: linux-2.6.27/drivers/acpi/processor_perflib.c
628 ===================================================================
629 --- linux-2.6.27.orig/drivers/acpi/processor_perflib.c
630 +++ linux-2.6.27/drivers/acpi/processor_perflib.c
631 @@ -80,6 +80,7 @@ MODULE_PARM_DESC(ignore_ppc, "If the fre
632
633 static int acpi_processor_ppc_status;
634
635 +#ifdef CONFIG_CPU_FREQ
636 static int acpi_processor_ppc_notifier(struct notifier_block *nb,
637 unsigned long event, void *data)
638 {
639 @@ -122,6 +123,7 @@ static int acpi_processor_ppc_notifier(s
640 static struct notifier_block acpi_ppc_notifier_block = {
641 .notifier_call = acpi_processor_ppc_notifier,
642 };
643 +#endif /* CONFIG_CPU_FREQ */
644
645 static int acpi_processor_get_platform_limit(struct acpi_processor *pr)
646 {
647 @@ -166,9 +168,15 @@ int acpi_processor_ppc_has_changed(struc
648 if (ret < 0)
649 return (ret);
650 else
651 +#ifdef CONFIG_CPU_FREQ
652 return cpufreq_update_policy(pr->id);
653 +#elif CONFIG_PROCESSOR_EXTERNAL_CONTROL
654 + return processor_notify_external(pr,
655 + PROCESSOR_PM_CHANGE, PM_TYPE_PERF);
656 +#endif
657 }
658
659 +#ifdef CONFIG_CPU_FREQ
660 void acpi_processor_ppc_init(void)
661 {
662 if (!cpufreq_register_notifier
663 @@ -187,6 +195,7 @@ void acpi_processor_ppc_exit(void)
664
665 acpi_processor_ppc_status &= ~PPC_REGISTERED;
666 }
667 +#endif /* CONFIG_CPU_FREQ */
668
669 static int acpi_processor_get_performance_control(struct acpi_processor *pr)
670 {
671 @@ -328,7 +337,10 @@ static int acpi_processor_get_performanc
672 return result;
673 }
674
675 -static int acpi_processor_get_performance_info(struct acpi_processor *pr)
676 +#ifndef CONFIG_PROCESSOR_EXTERNAL_CONTROL
677 +static
678 +#endif
679 +int acpi_processor_get_performance_info(struct acpi_processor *pr)
680 {
681 int result = 0;
682 acpi_status status = AE_OK;
683 @@ -356,6 +368,7 @@ static int acpi_processor_get_performanc
684 return 0;
685 }
686
687 +#ifdef CONFIG_CPU_FREQ
688 int acpi_processor_notify_smm(struct module *calling_module)
689 {
690 acpi_status status;
691 @@ -416,6 +429,7 @@ int acpi_processor_notify_smm(struct mod
692 }
693
694 EXPORT_SYMBOL(acpi_processor_notify_smm);
695 +#endif /* CONFIG_CPU_FREQ */
696
697 #ifdef CONFIG_X86_ACPI_CPUFREQ_PROC_INTF
698 /* /proc/acpi/processor/../performance interface (DEPRECATED) */
699 @@ -507,7 +521,10 @@ static void acpi_cpufreq_remove_file(str
700 }
701 #endif /* CONFIG_X86_ACPI_CPUFREQ_PROC_INTF */
702
703 -static int acpi_processor_get_psd(struct acpi_processor *pr)
704 +#ifndef CONFIG_PROCESSOR_EXTERNAL_CONTROL
705 +static
706 +#endif
707 +int acpi_processor_get_psd(struct acpi_processor *pr)
708 {
709 int result = 0;
710 acpi_status status = AE_OK;
711 Index: linux-2.6.27/drivers/acpi/sleep/main.c
712 ===================================================================
713 --- linux-2.6.27.orig/drivers/acpi/sleep/main.c
714 +++ linux-2.6.27/drivers/acpi/sleep/main.c
715 @@ -27,6 +27,7 @@ u8 sleep_states[ACPI_S_STATE_COUNT];
716 static int acpi_sleep_prepare(u32 acpi_state)
717 {
718 #ifdef CONFIG_ACPI_SLEEP
719 +#ifndef CONFIG_ACPI_PV_SLEEP
720 /* do we have a wakeup address for S2 and S3? */
721 if (acpi_state == ACPI_STATE_S3) {
722 if (!acpi_wakeup_address) {
723 @@ -36,6 +37,7 @@ static int acpi_sleep_prepare(u32 acpi_s
724 (acpi_physical_address)acpi_wakeup_address);
725
726 }
727 +#endif
728 ACPI_FLUSH_CPU_CACHE();
729 acpi_enable_wakeup_device_prep(acpi_state);
730 #endif
731 @@ -208,7 +210,14 @@ static int acpi_suspend_enter(suspend_st
732 break;
733
734 case ACPI_STATE_S3:
735 +#ifdef CONFIG_ACPI_PV_SLEEP
736 + /* Hyperviosr will save and restore CPU context
737 + * and then we can skip low level housekeeping here.
738 + */
739 + acpi_enter_sleep_state(acpi_state);
740 +#else
741 do_suspend_lowlevel();
742 +#endif
743 break;
744 }
745
746 Index: linux-2.6.27/drivers/char/agp/intel-agp.c
747 ===================================================================
748 --- linux-2.6.27.orig/drivers/char/agp/intel-agp.c
749 +++ linux-2.6.27/drivers/char/agp/intel-agp.c
750 @@ -250,6 +250,13 @@ static void *i8xx_alloc_pages(void)
751 if (page == NULL)
752 return NULL;
753
754 +#ifdef CONFIG_XEN
755 + if (xen_create_contiguous_region((unsigned long)page_address(page), 2, 32)) {
756 + __free_pages(page, 2);
757 + return NULL;
758 + }
759 +#endif
760 +
761 if (set_pages_uc(page, 4) < 0) {
762 set_pages_wb(page, 4);
763 __free_pages(page, 2);
764 @@ -269,6 +276,9 @@ static void i8xx_destroy_pages(void *add
765
766 page = virt_to_page(addr);
767 set_pages_wb(page, 4);
768 +#ifdef CONFIG_XEN
769 + xen_destroy_contiguous_region((unsigned long)page_address(page), 2);
770 +#endif
771 put_page(page);
772 __free_pages(page, 2);
773 atomic_dec(&agp_bridge->current_memory_agp);
774 Index: linux-2.6.27/drivers/char/mem.c
775 ===================================================================
776 --- linux-2.6.27.orig/drivers/char/mem.c
777 +++ linux-2.6.27/drivers/char/mem.c
778 @@ -110,6 +110,7 @@ void __attribute__((weak)) unxlate_dev_m
779 {
780 }
781
782 +#ifndef ARCH_HAS_DEV_MEM
783 /*
784 * This funcion reads the *physical* memory. The f_pos points directly to the
785 * memory location.
786 @@ -254,6 +255,7 @@ static ssize_t write_mem(struct file * f
787 *ppos += written;
788 return written;
789 }
790 +#endif
791
792 int __attribute__((weak)) phys_mem_access_prot_allowed(struct file *file,
793 unsigned long pfn, unsigned long size, pgprot_t *vma_prot)
794 @@ -372,6 +374,9 @@ static int mmap_mem(struct file * file,
795 static int mmap_kmem(struct file * file, struct vm_area_struct * vma)
796 {
797 unsigned long pfn;
798 +#ifdef CONFIG_XEN
799 + unsigned long i, count;
800 +#endif
801
802 /* Turn a kernel-virtual address into a physical page frame */
803 pfn = __pa((u64)vma->vm_pgoff << PAGE_SHIFT) >> PAGE_SHIFT;
804 @@ -386,6 +391,13 @@ static int mmap_kmem(struct file * file,
805 if (!pfn_valid(pfn))
806 return -EIO;
807
808 +#ifdef CONFIG_XEN
809 + count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
810 + for (i = 0; i < count; i++)
811 + if ((pfn + i) != mfn_to_local_pfn(pfn_to_mfn(pfn + i)))
812 + return -EIO;
813 +#endif
814 +
815 vma->vm_pgoff = pfn;
816 return mmap_mem(file, vma);
817 }
818 @@ -905,6 +917,7 @@ static int open_port(struct inode * inod
819 #define open_kmem open_mem
820 #define open_oldmem open_mem
821
822 +#ifndef ARCH_HAS_DEV_MEM
823 static const struct file_operations mem_fops = {
824 .llseek = memory_lseek,
825 .read = read_mem,
826 @@ -913,6 +926,9 @@ static const struct file_operations mem_
827 .open = open_mem,
828 .get_unmapped_area = get_unmapped_area_mem,
829 };
830 +#else
831 +extern const struct file_operations mem_fops;
832 +#endif
833
834 #ifdef CONFIG_DEVKMEM
835 static const struct file_operations kmem_fops = {
836 Index: linux-2.6.27/drivers/char/tpm/Makefile
837 ===================================================================
838 --- linux-2.6.27.orig/drivers/char/tpm/Makefile
839 +++ linux-2.6.27/drivers/char/tpm/Makefile
840 @@ -9,3 +9,5 @@ obj-$(CONFIG_TCG_TIS) += tpm_tis.o
841 obj-$(CONFIG_TCG_NSC) += tpm_nsc.o
842 obj-$(CONFIG_TCG_ATMEL) += tpm_atmel.o
843 obj-$(CONFIG_TCG_INFINEON) += tpm_infineon.o
844 +obj-$(CONFIG_TCG_XEN) += tpm_xenu.o
845 +tpm_xenu-y = tpm_xen.o tpm_vtpm.o
846 Index: linux-2.6.27/drivers/char/tpm/tpm.h
847 ===================================================================
848 --- linux-2.6.27.orig/drivers/char/tpm/tpm.h
849 +++ linux-2.6.27/drivers/char/tpm/tpm.h
850 @@ -107,6 +107,9 @@ struct tpm_chip {
851 struct dentry **bios_dir;
852
853 struct list_head list;
854 +#ifdef CONFIG_XEN
855 + void *priv;
856 +#endif
857 void (*release) (struct device *);
858 };
859
860 @@ -124,6 +127,18 @@ static inline void tpm_write_index(int b
861 outb(value & 0xFF, base+1);
862 }
863
864 +#ifdef CONFIG_XEN
865 +static inline void *chip_get_private(const struct tpm_chip *chip)
866 +{
867 + return chip->priv;
868 +}
869 +
870 +static inline void chip_set_private(struct tpm_chip *chip, void *priv)
871 +{
872 + chip->priv = priv;
873 +}
874 +#endif
875 +
876 extern void tpm_get_timeouts(struct tpm_chip *);
877 extern void tpm_gen_interrupt(struct tpm_chip *);
878 extern void tpm_continue_selftest(struct tpm_chip *);
879 Index: linux-2.6.27/drivers/char/tpm/tpm_vtpm.c
880 ===================================================================
881 --- /dev/null
882 +++ linux-2.6.27/drivers/char/tpm/tpm_vtpm.c
883 @@ -0,0 +1,542 @@
884 +/*
885 + * Copyright (C) 2006 IBM Corporation
886 + *
887 + * Authors:
888 + * Stefan Berger <stefanb@us.ibm.com>
889 + *
890 + * Generic device driver part for device drivers in a virtualized
891 + * environment.
892 + *
893 + * This program is free software; you can redistribute it and/or
894 + * modify it under the terms of the GNU General Public License as
895 + * published by the Free Software Foundation, version 2 of the
896 + * License.
897 + *
898 + */
899 +
900 +#include <asm/uaccess.h>
901 +#include <linux/list.h>
902 +#include <linux/device.h>
903 +#include <linux/interrupt.h>
904 +#include <linux/platform_device.h>
905 +#include "tpm.h"
906 +#include "tpm_vtpm.h"
907 +
908 +/* read status bits */
909 +enum {
910 + STATUS_BUSY = 0x01,
911 + STATUS_DATA_AVAIL = 0x02,
912 + STATUS_READY = 0x04
913 +};
914 +
915 +struct transmission {
916 + struct list_head next;
917 +
918 + unsigned char *request;
919 + size_t request_len;
920 + size_t request_buflen;
921 +
922 + unsigned char *response;
923 + size_t response_len;
924 + size_t response_buflen;
925 +
926 + unsigned int flags;
927 +};
928 +
929 +enum {
930 + TRANSMISSION_FLAG_WAS_QUEUED = 0x1
931 +};
932 +
933 +
934 +enum {
935 + DATAEX_FLAG_QUEUED_ONLY = 0x1
936 +};
937 +
938 +
939 +/* local variables */
940 +
941 +/* local function prototypes */
942 +static int _vtpm_send_queued(struct tpm_chip *chip);
943 +
944 +
945 +/* =============================================================
946 + * Some utility functions
947 + * =============================================================
948 + */
949 +static void vtpm_state_init(struct vtpm_state *vtpms)
950 +{
951 + vtpms->current_request = NULL;
952 + spin_lock_init(&vtpms->req_list_lock);
953 + init_waitqueue_head(&vtpms->req_wait_queue);
954 + INIT_LIST_HEAD(&vtpms->queued_requests);
955 +
956 + vtpms->current_response = NULL;
957 + spin_lock_init(&vtpms->resp_list_lock);
958 + init_waitqueue_head(&vtpms->resp_wait_queue);
959 +
960 + vtpms->disconnect_time = jiffies;
961 +}
962 +
963 +
964 +static inline struct transmission *transmission_alloc(void)
965 +{
966 + return kzalloc(sizeof(struct transmission), GFP_ATOMIC);
967 +}
968 +
969 +static unsigned char *
970 +transmission_set_req_buffer(struct transmission *t,
971 + unsigned char *buffer, size_t len)
972 +{
973 + if (t->request_buflen < len) {
974 + kfree(t->request);
975 + t->request = kmalloc(len, GFP_KERNEL);
976 + if (!t->request) {
977 + t->request_buflen = 0;
978 + return NULL;
979 + }
980 + t->request_buflen = len;
981 + }
982 +
983 + memcpy(t->request, buffer, len);
984 + t->request_len = len;
985 +
986 + return t->request;
987 +}
988 +
989 +static unsigned char *
990 +transmission_set_res_buffer(struct transmission *t,
991 + const unsigned char *buffer, size_t len)
992 +{
993 + if (t->response_buflen < len) {
994 + kfree(t->response);
995 + t->response = kmalloc(len, GFP_ATOMIC);
996 + if (!t->response) {
997 + t->response_buflen = 0;
998 + return NULL;
999 + }
1000 + t->response_buflen = len;
1001 + }
1002 +
1003 + memcpy(t->response, buffer, len);
1004 + t->response_len = len;
1005 +
1006 + return t->response;
1007 +}
1008 +
1009 +static inline void transmission_free(struct transmission *t)
1010 +{
1011 + kfree(t->request);
1012 + kfree(t->response);
1013 + kfree(t);
1014 +}
1015 +
1016 +/* =============================================================
1017 + * Interface with the lower layer driver
1018 + * =============================================================
1019 + */
1020 +/*
1021 + * Lower layer uses this function to make a response available.
1022 + */
1023 +int vtpm_vd_recv(const struct tpm_chip *chip,
1024 + const unsigned char *buffer, size_t count,
1025 + void *ptr)
1026 +{
1027 + unsigned long flags;
1028 + int ret_size = 0;
1029 + struct transmission *t;
1030 + struct vtpm_state *vtpms;
1031 +
1032 + vtpms = (struct vtpm_state *)chip_get_private(chip);
1033 +
1034 + /*
1035 + * The list with requests must contain one request
1036 + * only and the element there must be the one that
1037 + * was passed to me from the front-end.
1038 + */
1039 + spin_lock_irqsave(&vtpms->resp_list_lock, flags);
1040 + if (vtpms->current_request != ptr) {
1041 + spin_unlock_irqrestore(&vtpms->resp_list_lock, flags);
1042 + return 0;
1043 + }
1044 +
1045 + if ((t = vtpms->current_request)) {
1046 + transmission_free(t);
1047 + vtpms->current_request = NULL;
1048 + }
1049 +
1050 + t = transmission_alloc();
1051 + if (t) {
1052 + if (!transmission_set_res_buffer(t, buffer, count)) {
1053 + transmission_free(t);
1054 + spin_unlock_irqrestore(&vtpms->resp_list_lock, flags);
1055 + return -ENOMEM;
1056 + }
1057 + ret_size = count;
1058 + vtpms->current_response = t;
1059 + wake_up_interruptible(&vtpms->resp_wait_queue);
1060 + }
1061 + spin_unlock_irqrestore(&vtpms->resp_list_lock, flags);
1062 +
1063 + return ret_size;
1064 +}
1065 +
1066 +
1067 +/*
1068 + * Lower layer indicates its status (connected/disconnected)
1069 + */
1070 +void vtpm_vd_status(const struct tpm_chip *chip, u8 vd_status)
1071 +{
1072 + struct vtpm_state *vtpms;
1073 +
1074 + vtpms = (struct vtpm_state *)chip_get_private(chip);
1075 +
1076 + vtpms->vd_status = vd_status;
1077 + if ((vtpms->vd_status & TPM_VD_STATUS_CONNECTED) == 0) {
1078 + vtpms->disconnect_time = jiffies;
1079 + }
1080 +}
1081 +
1082 +/* =============================================================
1083 + * Interface with the generic TPM driver
1084 + * =============================================================
1085 + */
1086 +static int vtpm_recv(struct tpm_chip *chip, u8 *buf, size_t count)
1087 +{
1088 + int rc = 0;
1089 + unsigned long flags;
1090 + struct vtpm_state *vtpms;
1091 +
1092 + vtpms = (struct vtpm_state *)chip_get_private(chip);
1093 +
1094 + /*
1095 + * Check if the previous operation only queued the command
1096 + * In this case there won't be a response, so I just
1097 + * return from here and reset that flag. In any other
1098 + * case I should receive a response from the back-end.
1099 + */
1100 + spin_lock_irqsave(&vtpms->resp_list_lock, flags);
1101 + if ((vtpms->flags & DATAEX_FLAG_QUEUED_ONLY) != 0) {
1102 + vtpms->flags &= ~DATAEX_FLAG_QUEUED_ONLY;
1103 + spin_unlock_irqrestore(&vtpms->resp_list_lock, flags);
1104 + /*
1105 + * The first few commands (measurements) must be
1106 + * queued since it might not be possible to talk to the
1107 + * TPM, yet.
1108 + * Return a response of up to 30 '0's.
1109 + */
1110 +
1111 + count = min_t(size_t, count, 30);
1112 + memset(buf, 0x0, count);
1113 + return count;
1114 + }
1115 + /*
1116 + * Check whether something is in the responselist and if
1117 + * there's nothing in the list wait for something to appear.
1118 + */
1119 +
1120 + if (!vtpms->current_response) {
1121 + spin_unlock_irqrestore(&vtpms->resp_list_lock, flags);
1122 + interruptible_sleep_on_timeout(&vtpms->resp_wait_queue,
1123 + 1000);
1124 + spin_lock_irqsave(&vtpms->resp_list_lock ,flags);
1125 + }
1126 +
1127 + if (vtpms->current_response) {
1128 + struct transmission *t = vtpms->current_response;
1129 + vtpms->current_response = NULL;
1130 + rc = min(count, t->response_len);
1131 + memcpy(buf, t->response, rc);
1132 + transmission_free(t);
1133 + }
1134 +
1135 + spin_unlock_irqrestore(&vtpms->resp_list_lock, flags);
1136 + return rc;
1137 +}
1138 +
1139 +static int vtpm_send(struct tpm_chip *chip, u8 *buf, size_t count)
1140 +{
1141 + int rc = 0;
1142 + unsigned long flags;
1143 + struct transmission *t = transmission_alloc();
1144 + struct vtpm_state *vtpms;
1145 +
1146 + vtpms = (struct vtpm_state *)chip_get_private(chip);
1147 +
1148 + if (!t)
1149 + return -ENOMEM;
1150 + /*
1151 + * If there's a current request, it must be the
1152 + * previous request that has timed out.
1153 + */
1154 + spin_lock_irqsave(&vtpms->req_list_lock, flags);
1155 + if (vtpms->current_request != NULL) {
1156 + printk("WARNING: Sending although there is a request outstanding.\n"
1157 + " Previous request must have timed out.\n");
1158 + transmission_free(vtpms->current_request);
1159 + vtpms->current_request = NULL;
1160 + }
1161 + spin_unlock_irqrestore(&vtpms->req_list_lock, flags);
1162 +
1163 + /*
1164 + * Queue the packet if the driver below is not
1165 + * ready, yet, or there is any packet already
1166 + * in the queue.
1167 + * If the driver below is ready, unqueue all
1168 + * packets first before sending our current
1169 + * packet.
1170 + * For each unqueued packet, except for the
1171 + * last (=current) packet, call the function
1172 + * tpm_xen_recv to wait for the response to come
1173 + * back.
1174 + */
1175 + if ((vtpms->vd_status & TPM_VD_STATUS_CONNECTED) == 0) {
1176 + if (time_after(jiffies,
1177 + vtpms->disconnect_time + HZ * 10)) {
1178 + rc = -ENOENT;
1179 + } else {
1180 + goto queue_it;
1181 + }
1182 + } else {
1183 + /*
1184 + * Send all queued packets.
1185 + */
1186 + if (_vtpm_send_queued(chip) == 0) {
1187 +
1188 + vtpms->current_request = t;
1189 +
1190 + rc = vtpm_vd_send(vtpms->tpm_private,
1191 + buf,
1192 + count,
1193 + t);
1194 + /*
1195 + * The generic TPM driver will call
1196 + * the function to receive the response.
1197 + */
1198 + if (rc < 0) {
1199 + vtpms->current_request = NULL;
1200 + goto queue_it;
1201 + }
1202 + } else {
1203 +queue_it:
1204 + if (!transmission_set_req_buffer(t, buf, count)) {
1205 + transmission_free(t);
1206 + rc = -ENOMEM;
1207 + goto exit;
1208 + }
1209 + /*
1210 + * An error occurred. Don't event try
1211 + * to send the current request. Just
1212 + * queue it.
1213 + */
1214 + spin_lock_irqsave(&vtpms->req_list_lock, flags);
1215 + vtpms->flags |= DATAEX_FLAG_QUEUED_ONLY;
1216 + list_add_tail(&t->next, &vtpms->queued_requests);
1217 + spin_unlock_irqrestore(&vtpms->req_list_lock, flags);
1218 + }
1219 + }
1220 +
1221 +exit:
1222 + return rc;
1223 +}
1224 +
1225 +
1226 +/*
1227 + * Send all queued requests.
1228 + */
1229 +static int _vtpm_send_queued(struct tpm_chip *chip)
1230 +{
1231 + int rc;
1232 + int error = 0;
1233 + long flags;
1234 + unsigned char buffer[1];
1235 + struct vtpm_state *vtpms;
1236 + vtpms = (struct vtpm_state *)chip_get_private(chip);
1237 +
1238 + spin_lock_irqsave(&vtpms->req_list_lock, flags);
1239 +
1240 + while (!list_empty(&vtpms->queued_requests)) {
1241 + /*
1242 + * Need to dequeue them.
1243 + * Read the result into a dummy buffer.
1244 + */
1245 + struct transmission *qt = (struct transmission *)
1246 + vtpms->queued_requests.next;
1247 + list_del(&qt->next);
1248 + vtpms->current_request = qt;
1249 + spin_unlock_irqrestore(&vtpms->req_list_lock, flags);
1250 +
1251 + rc = vtpm_vd_send(vtpms->tpm_private,
1252 + qt->request,
1253 + qt->request_len,
1254 + qt);
1255 +
1256 + if (rc < 0) {
1257 + spin_lock_irqsave(&vtpms->req_list_lock, flags);
1258 + if ((qt = vtpms->current_request) != NULL) {
1259 + /*
1260 + * requeue it at the beginning
1261 + * of the list
1262 + */
1263 + list_add(&qt->next,
1264 + &vtpms->queued_requests);
1265 + }
1266 + vtpms->current_request = NULL;
1267 + error = 1;
1268 + break;
1269 + }
1270 + /*
1271 + * After this point qt is not valid anymore!
1272 + * It is freed when the front-end is delivering
1273 + * the data by calling tpm_recv
1274 + */
1275 + /*
1276 + * Receive response into provided dummy buffer
1277 + */
1278 + rc = vtpm_recv(chip, buffer, sizeof(buffer));
1279 + spin_lock_irqsave(&vtpms->req_list_lock, flags);
1280 + }
1281 +
1282 + spin_unlock_irqrestore(&vtpms->req_list_lock, flags);
1283 +
1284 + return error;
1285 +}
1286 +
1287 +static void vtpm_cancel(struct tpm_chip *chip)
1288 +{
1289 + unsigned long flags;
1290 + struct vtpm_state *vtpms = (struct vtpm_state *)chip_get_private(chip);
1291 +
1292 + spin_lock_irqsave(&vtpms->resp_list_lock,flags);
1293 +
1294 + if (!vtpms->current_response && vtpms->current_request) {
1295 + spin_unlock_irqrestore(&vtpms->resp_list_lock, flags);
1296 + interruptible_sleep_on(&vtpms->resp_wait_queue);
1297 + spin_lock_irqsave(&vtpms->resp_list_lock,flags);
1298 + }
1299 +
1300 + if (vtpms->current_response) {
1301 + struct transmission *t = vtpms->current_response;
1302 + vtpms->current_response = NULL;
1303 + transmission_free(t);
1304 + }
1305 +
1306 + spin_unlock_irqrestore(&vtpms->resp_list_lock,flags);
1307 +}
1308 +
1309 +static u8 vtpm_status(struct tpm_chip *chip)
1310 +{
1311 + u8 rc = 0;
1312 + unsigned long flags;
1313 + struct vtpm_state *vtpms;
1314 +
1315 + vtpms = (struct vtpm_state *)chip_get_private(chip);
1316 +
1317 + spin_lock_irqsave(&vtpms->resp_list_lock, flags);
1318 + /*
1319 + * Data are available if:
1320 + * - there's a current response
1321 + * - the last packet was queued only (this is fake, but necessary to
1322 + * get the generic TPM layer to call the receive function.)
1323 + */
1324 + if (vtpms->current_response ||
1325 + 0 != (vtpms->flags & DATAEX_FLAG_QUEUED_ONLY)) {
1326 + rc = STATUS_DATA_AVAIL;
1327 + } else if (!vtpms->current_response && !vtpms->current_request) {
1328 + rc = STATUS_READY;
1329 + }
1330 +
1331 + spin_unlock_irqrestore(&vtpms->resp_list_lock, flags);
1332 + return rc;
1333 +}
1334 +
1335 +static struct file_operations vtpm_ops = {
1336 + .owner = THIS_MODULE,
1337 + .llseek = no_llseek,
1338 + .open = tpm_open,
1339 + .read = tpm_read,
1340 + .write = tpm_write,
1341 + .release = tpm_release,
1342 +};
1343 +
1344 +static DEVICE_ATTR(pubek, S_IRUGO, tpm_show_pubek, NULL);
1345 +static DEVICE_ATTR(pcrs, S_IRUGO, tpm_show_pcrs, NULL);
1346 +static DEVICE_ATTR(enabled, S_IRUGO, tpm_show_enabled, NULL);
1347 +static DEVICE_ATTR(active, S_IRUGO, tpm_show_active, NULL);
1348 +static DEVICE_ATTR(owned, S_IRUGO, tpm_show_owned, NULL);
1349 +static DEVICE_ATTR(temp_deactivated, S_IRUGO, tpm_show_temp_deactivated,
1350 + NULL);
1351 +static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps, NULL);
1352 +static DEVICE_ATTR(cancel, S_IWUSR |S_IWGRP, NULL, tpm_store_cancel);
1353 +
1354 +static struct attribute *vtpm_attrs[] = {
1355 + &dev_attr_pubek.attr,
1356 + &dev_attr_pcrs.attr,
1357 + &dev_attr_enabled.attr,
1358 + &dev_attr_active.attr,
1359 + &dev_attr_owned.attr,
1360 + &dev_attr_temp_deactivated.attr,
1361 + &dev_attr_caps.attr,
1362 + &dev_attr_cancel.attr,
1363 + NULL,
1364 +};
1365 +
1366 +static struct attribute_group vtpm_attr_grp = { .attrs = vtpm_attrs };
1367 +
1368 +#define TPM_LONG_TIMEOUT (10 * 60 * HZ)
1369 +
1370 +static struct tpm_vendor_specific tpm_vtpm = {
1371 + .recv = vtpm_recv,
1372 + .send = vtpm_send,
1373 + .cancel = vtpm_cancel,
1374 + .status = vtpm_status,
1375 + .req_complete_mask = STATUS_BUSY | STATUS_DATA_AVAIL,
1376 + .req_complete_val = STATUS_DATA_AVAIL,
1377 + .req_canceled = STATUS_READY,
1378 + .attr_group = &vtpm_attr_grp,
1379 + .miscdev = {
1380 + .fops = &vtpm_ops,
1381 + },
1382 + .duration = {
1383 + TPM_LONG_TIMEOUT,
1384 + TPM_LONG_TIMEOUT,
1385 + TPM_LONG_TIMEOUT,
1386 + },
1387 +};
1388 +
1389 +struct tpm_chip *init_vtpm(struct device *dev,
1390 + struct tpm_private *tp)
1391 +{
1392 + long rc;
1393 + struct tpm_chip *chip;
1394 + struct vtpm_state *vtpms;
1395 +
1396 + vtpms = kzalloc(sizeof(struct vtpm_state), GFP_KERNEL);
1397 + if (!vtpms)
1398 + return ERR_PTR(-ENOMEM);
1399 +
1400 + vtpm_state_init(vtpms);
1401 + vtpms->tpm_private = tp;
1402 +
1403 + chip = tpm_register_hardware(dev, &tpm_vtpm);
1404 + if (!chip) {
1405 + rc = -ENODEV;
1406 + goto err_free_mem;
1407 + }
1408 +
1409 + chip_set_private(chip, vtpms);
1410 +
1411 + return chip;
1412 +
1413 +err_free_mem:
1414 + kfree(vtpms);
1415 +
1416 + return ERR_PTR(rc);
1417 +}
1418 +
1419 +void cleanup_vtpm(struct device *dev)
1420 +{
1421 + struct tpm_chip *chip = dev_get_drvdata(dev);
1422 + struct vtpm_state *vtpms = (struct vtpm_state*)chip_get_private(chip);
1423 + tpm_remove_hardware(dev);
1424 + kfree(vtpms);
1425 +}
1426 Index: linux-2.6.27/drivers/char/tpm/tpm_vtpm.h
1427 ===================================================================
1428 --- /dev/null
1429 +++ linux-2.6.27/drivers/char/tpm/tpm_vtpm.h
1430 @@ -0,0 +1,55 @@
1431 +#ifndef TPM_VTPM_H
1432 +#define TPM_VTPM_H
1433 +
1434 +struct tpm_chip;
1435 +struct tpm_private;
1436 +
1437 +struct vtpm_state {
1438 + struct transmission *current_request;
1439 + spinlock_t req_list_lock;
1440 + wait_queue_head_t req_wait_queue;
1441 +
1442 + struct list_head queued_requests;
1443 +
1444 + struct transmission *current_response;
1445 + spinlock_t resp_list_lock;
1446 + wait_queue_head_t resp_wait_queue; // processes waiting for responses
1447 +
1448 + u8 vd_status;
1449 + u8 flags;
1450 +
1451 + unsigned long disconnect_time;
1452 +
1453 + /*
1454 + * The following is a private structure of the underlying
1455 + * driver. It is passed as parameter in the send function.
1456 + */
1457 + struct tpm_private *tpm_private;
1458 +};
1459 +
1460 +
1461 +enum vdev_status {
1462 + TPM_VD_STATUS_DISCONNECTED = 0x0,
1463 + TPM_VD_STATUS_CONNECTED = 0x1
1464 +};
1465 +
1466 +/* this function is called from tpm_vtpm.c */
1467 +int vtpm_vd_send(struct tpm_private * tp,
1468 + const u8 * buf, size_t count, void *ptr);
1469 +
1470 +/* these functions are offered by tpm_vtpm.c */
1471 +struct tpm_chip *init_vtpm(struct device *,
1472 + struct tpm_private *);
1473 +void cleanup_vtpm(struct device *);
1474 +int vtpm_vd_recv(const struct tpm_chip* chip,
1475 + const unsigned char *buffer, size_t count, void *ptr);
1476 +void vtpm_vd_status(const struct tpm_chip *, u8 status);
1477 +
1478 +static inline struct tpm_private *tpm_private_from_dev(struct device *dev)
1479 +{
1480 + struct tpm_chip *chip = dev_get_drvdata(dev);
1481 + struct vtpm_state *vtpms = chip_get_private(chip);
1482 + return vtpms->tpm_private;
1483 +}
1484 +
1485 +#endif
1486 Index: linux-2.6.27/drivers/char/tpm/tpm_xen.c
1487 ===================================================================
1488 --- /dev/null
1489 +++ linux-2.6.27/drivers/char/tpm/tpm_xen.c
1490 @@ -0,0 +1,722 @@
1491 +/*
1492 + * Copyright (c) 2005, IBM Corporation
1493 + *
1494 + * Author: Stefan Berger, stefanb@us.ibm.com
1495 + * Grant table support: Mahadevan Gomathisankaran
1496 + *
1497 + * This code has been derived from drivers/xen/netfront/netfront.c
1498 + *
1499 + * Copyright (c) 2002-2004, K A Fraser
1500 + *
1501 + * This program is free software; you can redistribute it and/or
1502 + * modify it under the terms of the GNU General Public License version 2
1503 + * as published by the Free Software Foundation; or, when distributed
1504 + * separately from the Linux kernel or incorporated into other
1505 + * software packages, subject to the following license:
1506 + *
1507 + * Permission is hereby granted, free of charge, to any person obtaining a copy
1508 + * of this source file (the "Software"), to deal in the Software without
1509 + * restriction, including without limitation the rights to use, copy, modify,
1510 + * merge, publish, distribute, sublicense, and/or sell copies of the Software,
1511 + * and to permit persons to whom the Software is furnished to do so, subject to
1512 + * the following conditions:
1513 + *
1514 + * The above copyright notice and this permission notice shall be included in
1515 + * all copies or substantial portions of the Software.
1516 + *
1517 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
1518 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
1519 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
1520 + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
1521 + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
1522 + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
1523 + * IN THE SOFTWARE.
1524 + */
1525 +
1526 +#include <linux/errno.h>
1527 +#include <linux/err.h>
1528 +#include <linux/interrupt.h>
1529 +#include <linux/mutex.h>
1530 +#include <asm/uaccess.h>
1531 +#include <xen/evtchn.h>
1532 +#include <xen/interface/grant_table.h>
1533 +#include <xen/interface/io/tpmif.h>
1534 +#include <xen/gnttab.h>
1535 +#include <xen/xenbus.h>
1536 +#include "tpm.h"
1537 +#include "tpm_vtpm.h"
1538 +
1539 +#undef DEBUG
1540 +
1541 +/* local structures */
1542 +struct tpm_private {
1543 + struct tpm_chip *chip;
1544 +
1545 + tpmif_tx_interface_t *tx;
1546 + atomic_t refcnt;
1547 + unsigned int irq;
1548 + u8 is_connected;
1549 + u8 is_suspended;
1550 +
1551 + spinlock_t tx_lock;
1552 +
1553 + struct tx_buffer *tx_buffers[TPMIF_TX_RING_SIZE];
1554 +
1555 + atomic_t tx_busy;
1556 + void *tx_remember;
1557 +
1558 + domid_t backend_id;
1559 + wait_queue_head_t wait_q;
1560 +
1561 + struct xenbus_device *dev;
1562 + int ring_ref;
1563 +};
1564 +
1565 +struct tx_buffer {
1566 + unsigned int size; // available space in data
1567 + unsigned int len; // used space in data
1568 + unsigned char *data; // pointer to a page
1569 +};
1570 +
1571 +
1572 +/* locally visible variables */
1573 +static grant_ref_t gref_head;
1574 +static struct tpm_private *my_priv;
1575 +
1576 +/* local function prototypes */
1577 +static irqreturn_t tpmif_int(int irq,
1578 + void *tpm_priv,
1579 + struct pt_regs *ptregs);
1580 +static void tpmif_rx_action(unsigned long unused);
1581 +static int tpmif_connect(struct xenbus_device *dev,
1582 + struct tpm_private *tp,
1583 + domid_t domid);
1584 +static DECLARE_TASKLET(tpmif_rx_tasklet, tpmif_rx_action, 0);
1585 +static int tpmif_allocate_tx_buffers(struct tpm_private *tp);
1586 +static void tpmif_free_tx_buffers(struct tpm_private *tp);
1587 +static void tpmif_set_connected_state(struct tpm_private *tp,
1588 + u8 newstate);
1589 +static int tpm_xmit(struct tpm_private *tp,
1590 + const u8 * buf, size_t count, int userbuffer,
1591 + void *remember);
1592 +static void destroy_tpmring(struct tpm_private *tp);
1593 +void __exit tpmif_exit(void);
1594 +
1595 +#define DPRINTK(fmt, args...) \
1596 + pr_debug("xen_tpm_fr (%s:%d) " fmt, __FUNCTION__, __LINE__, ##args)
1597 +#define IPRINTK(fmt, args...) \
1598 + printk(KERN_INFO "xen_tpm_fr: " fmt, ##args)
1599 +#define WPRINTK(fmt, args...) \
1600 + printk(KERN_WARNING "xen_tpm_fr: " fmt, ##args)
1601 +
1602 +#define GRANT_INVALID_REF 0
1603 +
1604 +
1605 +static inline int
1606 +tx_buffer_copy(struct tx_buffer *txb, const u8 *src, int len,
1607 + int isuserbuffer)
1608 +{
1609 + int copied = len;
1610 +
1611 + if (len > txb->size)
1612 + copied = txb->size;
1613 + if (isuserbuffer) {
1614 + if (copy_from_user(txb->data, src, copied))
1615 + return -EFAULT;
1616 + } else {
1617 + memcpy(txb->data, src, copied);
1618 + }
1619 + txb->len = len;
1620 + return copied;
1621 +}
1622 +
1623 +static inline struct tx_buffer *tx_buffer_alloc(void)
1624 +{
1625 + struct tx_buffer *txb;
1626 +
1627 + txb = kzalloc(sizeof(struct tx_buffer), GFP_KERNEL);
1628 + if (!txb)
1629 + return NULL;
1630 +
1631 + txb->len = 0;
1632 + txb->size = PAGE_SIZE;
1633 + txb->data = (unsigned char *)__get_free_page(GFP_KERNEL);
1634 + if (txb->data == NULL) {
1635 + kfree(txb);
1636 + txb = NULL;
1637 + }
1638 +
1639 + return txb;
1640 +}
1641 +
1642 +
1643 +static inline void tx_buffer_free(struct tx_buffer *txb)
1644 +{
1645 + if (txb) {
1646 + free_page((long)txb->data);
1647 + kfree(txb);
1648 + }
1649 +}
1650 +
1651 +/**************************************************************
1652 + Utility function for the tpm_private structure
1653 +**************************************************************/
1654 +static void tpm_private_init(struct tpm_private *tp)
1655 +{
1656 + spin_lock_init(&tp->tx_lock);
1657 + init_waitqueue_head(&tp->wait_q);
1658 + atomic_set(&tp->refcnt, 1);
1659 +}
1660 +
1661 +static void tpm_private_put(void)
1662 +{
1663 + if (!atomic_dec_and_test(&my_priv->refcnt))
1664 + return;
1665 +
1666 + tpmif_free_tx_buffers(my_priv);
1667 + kfree(my_priv);
1668 + my_priv = NULL;
1669 +}
1670 +
1671 +static struct tpm_private *tpm_private_get(void)
1672 +{
1673 + int err;
1674 +
1675 + if (my_priv) {
1676 + atomic_inc(&my_priv->refcnt);
1677 + return my_priv;
1678 + }
1679 +
1680 + my_priv = kzalloc(sizeof(struct tpm_private), GFP_KERNEL);
1681 + if (!my_priv)
1682 + return NULL;
1683 +
1684 + tpm_private_init(my_priv);
1685 + err = tpmif_allocate_tx_buffers(my_priv);
1686 + if (err < 0)
1687 + tpm_private_put();
1688 +
1689 + return my_priv;
1690 +}
1691 +
1692 +/**************************************************************
1693 +
1694 + The interface to let the tpm plugin register its callback
1695 + function and send data to another partition using this module
1696 +
1697 +**************************************************************/
1698 +
1699 +static DEFINE_MUTEX(suspend_lock);
1700 +/*
1701 + * Send data via this module by calling this function
1702 + */
1703 +int vtpm_vd_send(struct tpm_private *tp,
1704 + const u8 * buf, size_t count, void *ptr)
1705 +{
1706 + int sent;
1707 +
1708 + mutex_lock(&suspend_lock);
1709 + sent = tpm_xmit(tp, buf, count, 0, ptr);
1710 + mutex_unlock(&suspend_lock);
1711 +
1712 + return sent;
1713 +}
1714 +
1715 +/**************************************************************
1716 + XENBUS support code
1717 +**************************************************************/
1718 +
1719 +static int setup_tpmring(struct xenbus_device *dev,
1720 + struct tpm_private *tp)
1721 +{
1722 + tpmif_tx_interface_t *sring;
1723 + int err;
1724 +
1725 + tp->ring_ref = GRANT_INVALID_REF;
1726 +
1727 + sring = (void *)__get_free_page(GFP_KERNEL);
1728 + if (!sring) {
1729 + xenbus_dev_fatal(dev, -ENOMEM, "allocating shared ring");
1730 + return -ENOMEM;
1731 + }
1732 + tp->tx = sring;
1733 +
1734 + err = xenbus_grant_ring(dev, virt_to_mfn(tp->tx));
1735 + if (err < 0) {
1736 + free_page((unsigned long)sring);
1737 + tp->tx = NULL;
1738 + xenbus_dev_fatal(dev, err, "allocating grant reference");
1739 + goto fail;
1740 + }
1741 + tp->ring_ref = err;
1742 +
1743 + err = tpmif_connect(dev, tp, dev->otherend_id);
1744 + if (err)
1745 + goto fail;
1746 +
1747 + return 0;
1748 +fail:
1749 + destroy_tpmring(tp);
1750 + return err;
1751 +}
1752 +
1753 +
1754 +static void destroy_tpmring(struct tpm_private *tp)
1755 +{
1756 + tpmif_set_connected_state(tp, 0);
1757 +
1758 + if (tp->ring_ref != GRANT_INVALID_REF) {
1759 + gnttab_end_foreign_access(tp->ring_ref, (unsigned long)tp->tx);
1760 + tp->ring_ref = GRANT_INVALID_REF;
1761 + tp->tx = NULL;
1762 + }
1763 +
1764 + if (tp->irq)
1765 + unbind_from_irqhandler(tp->irq, tp);
1766 +
1767 + tp->irq = 0;
1768 +}
1769 +
1770 +
1771 +static int talk_to_backend(struct xenbus_device *dev,
1772 + struct tpm_private *tp)
1773 +{
1774 + const char *message = NULL;
1775 + int err;
1776 + struct xenbus_transaction xbt;
1777 +
1778 + err = setup_tpmring(dev, tp);
1779 + if (err) {
1780 + xenbus_dev_fatal(dev, err, "setting up ring");
1781 + goto out;
1782 + }
1783 +
1784 +again:
1785 + err = xenbus_transaction_start(&xbt);
1786 + if (err) {
1787 + xenbus_dev_fatal(dev, err, "starting transaction");
1788 + goto destroy_tpmring;
1789 + }
1790 +
1791 + err = xenbus_printf(xbt, dev->nodename,
1792 + "ring-ref","%u", tp->ring_ref);
1793 + if (err) {
1794 + message = "writing ring-ref";
1795 + goto abort_transaction;
1796 + }
1797 +
1798 + err = xenbus_printf(xbt, dev->nodename, "event-channel", "%u",
1799 + irq_to_evtchn_port(tp->irq));
1800 + if (err) {
1801 + message = "writing event-channel";
1802 + goto abort_transaction;
1803 + }
1804 +
1805 + err = xenbus_transaction_end(xbt, 0);
1806 + if (err == -EAGAIN)
1807 + goto again;
1808 + if (err) {
1809 + xenbus_dev_fatal(dev, err, "completing transaction");
1810 + goto destroy_tpmring;
1811 + }
1812 +
1813 + xenbus_switch_state(dev, XenbusStateConnected);
1814 +
1815 + return 0;
1816 +
1817 +abort_transaction:
1818 + xenbus_transaction_end(xbt, 1);
1819 + if (message)
1820 + xenbus_dev_error(dev, err, "%s", message);
1821 +destroy_tpmring:
1822 + destroy_tpmring(tp);
1823 +out:
1824 + return err;
1825 +}
1826 +
1827 +/**
1828 + * Callback received when the backend's state changes.
1829 + */
1830 +static void backend_changed(struct xenbus_device *dev,
1831 + enum xenbus_state backend_state)
1832 +{
1833 + struct tpm_private *tp = tpm_private_from_dev(&dev->dev);
1834 + DPRINTK("\n");
1835 +
1836 + switch (backend_state) {
1837 + case XenbusStateInitialising:
1838 + case XenbusStateInitWait:
1839 + case XenbusStateInitialised:
1840 + case XenbusStateReconfiguring:
1841 + case XenbusStateReconfigured:
1842 + case XenbusStateUnknown:
1843 + break;
1844 +
1845 + case XenbusStateConnected:
1846 + tpmif_set_connected_state(tp, 1);
1847 + break;
1848 +
1849 + case XenbusStateClosing:
1850 + tpmif_set_connected_state(tp, 0);
1851 + xenbus_frontend_closed(dev);
1852 + break;
1853 +
1854 + case XenbusStateClosed:
1855 + tpmif_set_connected_state(tp, 0);
1856 + if (tp->is_suspended == 0)
1857 + device_unregister(&dev->dev);
1858 + xenbus_frontend_closed(dev);
1859 + break;
1860 + }
1861 +}
1862 +
1863 +static int tpmfront_probe(struct xenbus_device *dev,
1864 + const struct xenbus_device_id *id)
1865 +{
1866 + int err;
1867 + int handle;
1868 + struct tpm_private *tp = tpm_private_get();
1869 +
1870 + if (!tp)
1871 + return -ENOMEM;
1872 +
1873 + tp->chip = init_vtpm(&dev->dev, tp);
1874 + if (IS_ERR(tp->chip))
1875 + return PTR_ERR(tp->chip);
1876 +
1877 + err = xenbus_scanf(XBT_NIL, dev->nodename,
1878 + "handle", "%i", &handle);
1879 + if (XENBUS_EXIST_ERR(err))
1880 + return err;
1881 +
1882 + if (err < 0) {
1883 + xenbus_dev_fatal(dev,err,"reading virtual-device");
1884 + return err;
1885 + }
1886 +
1887 + tp->dev = dev;
1888 +
1889 + err = talk_to_backend(dev, tp);
1890 + if (err) {
1891 + tpm_private_put();
1892 + return err;
1893 + }
1894 +
1895 + return 0;
1896 +}
1897 +
1898 +
1899 +static int tpmfront_remove(struct xenbus_device *dev)
1900 +{
1901 + struct tpm_private *tp = tpm_private_from_dev(&dev->dev);
1902 + destroy_tpmring(tp);
1903 + cleanup_vtpm(&dev->dev);
1904 + return 0;
1905 +}
1906 +
1907 +static int tpmfront_suspend(struct xenbus_device *dev)
1908 +{
1909 + struct tpm_private *tp = tpm_private_from_dev(&dev->dev);
1910 + u32 ctr;
1911 +
1912 + /* Take the lock, preventing any application from sending. */
1913 + mutex_lock(&suspend_lock);
1914 + tp->is_suspended = 1;
1915 +
1916 + for (ctr = 0; atomic_read(&tp->tx_busy); ctr++) {
1917 + if ((ctr % 10) == 0)
1918 + printk("TPM-FE [INFO]: Waiting for outstanding "
1919 + "request.\n");
1920 + /* Wait for a request to be responded to. */
1921 + interruptible_sleep_on_timeout(&tp->wait_q, 100);
1922 + }
1923 +
1924 + return 0;
1925 +}
1926 +
1927 +static int tpmfront_suspend_finish(struct tpm_private *tp)
1928 +{
1929 + tp->is_suspended = 0;
1930 + /* Allow applications to send again. */
1931 + mutex_unlock(&suspend_lock);
1932 + return 0;
1933 +}
1934 +
1935 +static int tpmfront_suspend_cancel(struct xenbus_device *dev)
1936 +{
1937 + struct tpm_private *tp = tpm_private_from_dev(&dev->dev);
1938 + return tpmfront_suspend_finish(tp);
1939 +}
1940 +
1941 +static int tpmfront_resume(struct xenbus_device *dev)
1942 +{
1943 + struct tpm_private *tp = tpm_private_from_dev(&dev->dev);
1944 + destroy_tpmring(tp);
1945 + return talk_to_backend(dev, tp);
1946 +}
1947 +
1948 +static int tpmif_connect(struct xenbus_device *dev,
1949 + struct tpm_private *tp,
1950 + domid_t domid)
1951 +{
1952 + int err;
1953 +
1954 + tp->backend_id = domid;
1955 +
1956 + err = bind_listening_port_to_irqhandler(
1957 + domid, tpmif_int, SA_SAMPLE_RANDOM, "tpmif", tp);
1958 + if (err <= 0) {
1959 + WPRINTK("bind_listening_port_to_irqhandler failed "
1960 + "(err=%d)\n", err);
1961 + return err;
1962 + }
1963 + tp->irq = err;
1964 +
1965 + return 0;
1966 +}
1967 +
1968 +static struct xenbus_device_id tpmfront_ids[] = {
1969 + { "vtpm" },
1970 + { "" }
1971 +};
1972 +
1973 +static struct xenbus_driver tpmfront = {
1974 + .name = "vtpm",
1975 + .owner = THIS_MODULE,
1976 + .ids = tpmfront_ids,
1977 + .probe = tpmfront_probe,
1978 + .remove = tpmfront_remove,
1979 + .resume = tpmfront_resume,
1980 + .otherend_changed = backend_changed,
1981 + .suspend = tpmfront_suspend,
1982 + .suspend_cancel = tpmfront_suspend_cancel,
1983 +};
1984 +
1985 +static void __init init_tpm_xenbus(void)
1986 +{
1987 + xenbus_register_frontend(&tpmfront);
1988 +}
1989 +
1990 +static int tpmif_allocate_tx_buffers(struct tpm_private *tp)
1991 +{
1992 + unsigned int i;
1993 +
1994 + for (i = 0; i < TPMIF_TX_RING_SIZE; i++) {
1995 + tp->tx_buffers[i] = tx_buffer_alloc();
1996 + if (!tp->tx_buffers[i]) {
1997 + tpmif_free_tx_buffers(tp);
1998 + return -ENOMEM;
1999 + }
2000 + }
2001 + return 0;
2002 +}
2003 +
2004 +static void tpmif_free_tx_buffers(struct tpm_private *tp)
2005 +{
2006 + unsigned int i;
2007 +
2008 + for (i = 0; i < TPMIF_TX_RING_SIZE; i++)
2009 + tx_buffer_free(tp->tx_buffers[i]);
2010 +}
2011 +
2012 +static void tpmif_rx_action(unsigned long priv)
2013 +{
2014 + struct tpm_private *tp = (struct tpm_private *)priv;
2015 + int i = 0;
2016 + unsigned int received;
2017 + unsigned int offset = 0;
2018 + u8 *buffer;
2019 + tpmif_tx_request_t *tx = &tp->tx->ring[i].req;
2020 +
2021 + atomic_set(&tp->tx_busy, 0);
2022 + wake_up_interruptible(&tp->wait_q);
2023 +
2024 + received = tx->size;
2025 +
2026 + buffer = kmalloc(received, GFP_ATOMIC);
2027 + if (!buffer)
2028 + return;
2029 +
2030 + for (i = 0; i < TPMIF_TX_RING_SIZE && offset < received; i++) {
2031 + struct tx_buffer *txb = tp->tx_buffers[i];
2032 + tpmif_tx_request_t *tx;
2033 + unsigned int tocopy;
2034 +
2035 + tx = &tp->tx->ring[i].req;
2036 + tocopy = tx->size;
2037 + if (tocopy > PAGE_SIZE)
2038 + tocopy = PAGE_SIZE;
2039 +
2040 + memcpy(&buffer[offset], txb->data, tocopy);
2041 +
2042 + gnttab_release_grant_reference(&gref_head, tx->ref);
2043 +
2044 + offset += tocopy;
2045 + }
2046 +
2047 + vtpm_vd_recv(tp->chip, buffer, received, tp->tx_remember);
2048 + kfree(buffer);
2049 +}
2050 +
2051 +
2052 +static irqreturn_t tpmif_int(int irq, void *tpm_priv, struct pt_regs *ptregs)
2053 +{
2054 + struct tpm_private *tp = tpm_priv;
2055 + unsigned long flags;
2056 +
2057 + spin_lock_irqsave(&tp->tx_lock, flags);
2058 + tpmif_rx_tasklet.data = (unsigned long)tp;
2059 + tasklet_schedule(&tpmif_rx_tasklet);
2060 + spin_unlock_irqrestore(&tp->tx_lock, flags);
2061 +
2062 + return IRQ_HANDLED;
2063 +}
2064 +
2065 +
2066 +static int tpm_xmit(struct tpm_private *tp,
2067 + const u8 * buf, size_t count, int isuserbuffer,
2068 + void *remember)
2069 +{
2070 + tpmif_tx_request_t *tx;
2071 + TPMIF_RING_IDX i;
2072 + unsigned int offset = 0;
2073 +
2074 + spin_lock_irq(&tp->tx_lock);
2075 +
2076 + if (unlikely(atomic_read(&tp->tx_busy))) {
2077 + printk("tpm_xmit: There's an outstanding request/response "
2078 + "on the way!\n");
2079 + spin_unlock_irq(&tp->tx_lock);
2080 + return -EBUSY;
2081 + }
2082 +
2083 + if (tp->is_connected != 1) {
2084 + spin_unlock_irq(&tp->tx_lock);
2085 + return -EIO;
2086 + }
2087 +
2088 + for (i = 0; count > 0 && i < TPMIF_TX_RING_SIZE; i++) {
2089 + struct tx_buffer *txb = tp->tx_buffers[i];
2090 + int copied;
2091 +
2092 + if (!txb) {
2093 + DPRINTK("txb (i=%d) is NULL. buffers initilized?\n"
2094 + "Not transmitting anything!\n", i);
2095 + spin_unlock_irq(&tp->tx_lock);
2096 + return -EFAULT;
2097 + }
2098 +
2099 + copied = tx_buffer_copy(txb, &buf[offset], count,
2100 + isuserbuffer);
2101 + if (copied < 0) {
2102 + /* An error occurred */
2103 + spin_unlock_irq(&tp->tx_lock);
2104 + return copied;
2105 + }
2106 + count -= copied;
2107 + offset += copied;
2108 +
2109 + tx = &tp->tx->ring[i].req;
2110 + tx->addr = virt_to_machine(txb->data);
2111 + tx->size = txb->len;
2112 + tx->unused = 0;
2113 +
2114 + DPRINTK("First 4 characters sent by TPM-FE are "
2115 + "0x%02x 0x%02x 0x%02x 0x%02x\n",
2116 + txb->data[0],txb->data[1],txb->data[2],txb->data[3]);
2117 +
2118 + /* Get the granttable reference for this page. */
2119 + tx->ref = gnttab_claim_grant_reference(&gref_head);
2120 + if (tx->ref == -ENOSPC) {
2121 + spin_unlock_irq(&tp->tx_lock);
2122 + DPRINTK("Grant table claim reference failed in "
2123 + "func:%s line:%d file:%s\n",
2124 + __FUNCTION__, __LINE__, __FILE__);
2125 + return -ENOSPC;
2126 + }
2127 + gnttab_grant_foreign_access_ref(tx->ref,
2128 + tp->backend_id,
2129 + virt_to_mfn(txb->data),
2130 + 0 /*RW*/);
2131 + wmb();
2132 + }
2133 +
2134 + atomic_set(&tp->tx_busy, 1);
2135 + tp->tx_remember = remember;
2136 +
2137 + mb();
2138 +
2139 + notify_remote_via_irq(tp->irq);
2140 +
2141 + spin_unlock_irq(&tp->tx_lock);
2142 + return offset;
2143 +}
2144 +
2145 +
2146 +static void tpmif_notify_upperlayer(struct tpm_private *tp)
2147 +{
2148 + /* Notify upper layer about the state of the connection to the BE. */
2149 + vtpm_vd_status(tp->chip, (tp->is_connected
2150 + ? TPM_VD_STATUS_CONNECTED
2151 + : TPM_VD_STATUS_DISCONNECTED));
2152 +}
2153 +
2154 +
2155 +static void tpmif_set_connected_state(struct tpm_private *tp, u8 is_connected)
2156 +{
2157 + /*
2158 + * Don't notify upper layer if we are in suspend mode and
2159 + * should disconnect - assumption is that we will resume
2160 + * The mutex keeps apps from sending.
2161 + */
2162 + if (is_connected == 0 && tp->is_suspended == 1)
2163 + return;
2164 +
2165 + /*
2166 + * Unlock the mutex if we are connected again
2167 + * after being suspended - now resuming.
2168 + * This also removes the suspend state.
2169 + */
2170 + if (is_connected == 1 && tp->is_suspended == 1)
2171 + tpmfront_suspend_finish(tp);
2172 +
2173 + if (is_connected != tp->is_connected) {
2174 + tp->is_connected = is_connected;
2175 + tpmif_notify_upperlayer(tp);
2176 + }
2177 +}
2178 +
2179 +
2180 +
2181 +/* =================================================================
2182 + * Initialization function.
2183 + * =================================================================
2184 + */
2185 +
2186 +
2187 +static int __init tpmif_init(void)
2188 +{
2189 + struct tpm_private *tp;
2190 +
2191 + if (is_initial_xendomain())
2192 + return -EPERM;
2193 +
2194 + tp = tpm_private_get();
2195 + if (!tp)
2196 + return -ENOMEM;
2197 +
2198 + IPRINTK("Initialising the vTPM driver.\n");
2199 + if (gnttab_alloc_grant_references(TPMIF_TX_RING_SIZE,
2200 + &gref_head) < 0) {
2201 + tpm_private_put();
2202 + return -EFAULT;
2203 + }
2204 +
2205 + init_tpm_xenbus();
2206 + return 0;
2207 +}
2208 +
2209 +
2210 +module_init(tpmif_init);
2211 +
2212 +MODULE_LICENSE("Dual BSD/GPL");
2213 Index: linux-2.6.27/drivers/ide/ide-lib.c
2214 ===================================================================
2215 --- linux-2.6.27.orig/drivers/ide/ide-lib.c
2216 +++ linux-2.6.27/drivers/ide/ide-lib.c
2217 @@ -177,12 +177,12 @@ void ide_toggle_bounce(ide_drive_t *driv
2218 {
2219 u64 addr = BLK_BOUNCE_HIGH; /* dma64_addr_t */
2220
2221 - if (!PCI_DMA_BUS_IS_PHYS) {
2222 - addr = BLK_BOUNCE_ANY;
2223 - } else if (on && drive->media == ide_disk) {
2224 + if (on && drive->media == ide_disk) {
2225 struct device *dev = drive->hwif->dev;
2226
2227 - if (dev && dev->dma_mask)
2228 + if (!PCI_DMA_BUS_IS_PHYS)
2229 + addr = BLK_BOUNCE_ANY;
2230 + else if (dev && dev->dma_mask)
2231 addr = *dev->dma_mask;
2232 }
2233
2234 Index: linux-2.6.27/drivers/oprofile/buffer_sync.c
2235 ===================================================================
2236 --- linux-2.6.27.orig/drivers/oprofile/buffer_sync.c
2237 +++ linux-2.6.27/drivers/oprofile/buffer_sync.c
2238 @@ -6,6 +6,10 @@
2239 *
2240 * @author John Levon <levon@movementarian.org>
2241 *
2242 + * Modified by Aravind Menon for Xen
2243 + * These modifications are:
2244 + * Copyright (C) 2005 Hewlett-Packard Co.
2245 + *
2246 * This is the core of the buffer management. Each
2247 * CPU buffer is processed and entered into the
2248 * global event buffer. Such processing is necessary
2249 @@ -40,6 +44,7 @@ static cpumask_t marked_cpus = CPU_MASK_
2250 static DEFINE_SPINLOCK(task_mortuary);
2251 static void process_task_mortuary(void);
2252
2253 +static int cpu_current_domain[NR_CPUS];
2254
2255 /* Take ownership of the task struct and place it on the
2256 * list for processing. Only after two full buffer syncs
2257 @@ -148,6 +153,11 @@ static void end_sync(void)
2258 int sync_start(void)
2259 {
2260 int err;
2261 + int i;
2262 +
2263 + for (i = 0; i < NR_CPUS; i++) {
2264 + cpu_current_domain[i] = COORDINATOR_DOMAIN;
2265 + }
2266
2267 start_cpu_work();
2268
2269 @@ -274,15 +284,31 @@ static void add_cpu_switch(int i)
2270 last_cookie = INVALID_COOKIE;
2271 }
2272
2273 -static void add_kernel_ctx_switch(unsigned int in_kernel)
2274 +static void add_cpu_mode_switch(unsigned int cpu_mode)
2275 {
2276 add_event_entry(ESCAPE_CODE);
2277 - if (in_kernel)
2278 - add_event_entry(KERNEL_ENTER_SWITCH_CODE);
2279 - else
2280 - add_event_entry(KERNEL_EXIT_SWITCH_CODE);
2281 + switch (cpu_mode) {
2282 + case CPU_MODE_USER:
2283 + add_event_entry(USER_ENTER_SWITCH_CODE);
2284 + break;
2285 + case CPU_MODE_KERNEL:
2286 + add_event_entry(KERNEL_ENTER_SWITCH_CODE);
2287 + break;
2288 + case CPU_MODE_XEN:
2289 + add_event_entry(XEN_ENTER_SWITCH_CODE);
2290 + break;
2291 + default:
2292 + break;
2293 + }
2294 }
2295 -
2296 +
2297 +static void add_domain_switch(unsigned long domain_id)
2298 +{
2299 + add_event_entry(ESCAPE_CODE);
2300 + add_event_entry(DOMAIN_SWITCH_CODE);
2301 + add_event_entry(domain_id);
2302 +}
2303 +
2304 static void
2305 add_user_ctx_switch(struct task_struct const * task, unsigned long cookie)
2306 {
2307 @@ -347,9 +373,9 @@ static int add_us_sample(struct mm_struc
2308 * for later lookup from userspace.
2309 */
2310 static int
2311 -add_sample(struct mm_struct * mm, struct op_sample * s, int in_kernel)
2312 +add_sample(struct mm_struct * mm, struct op_sample * s, int cpu_mode)
2313 {
2314 - if (in_kernel) {
2315 + if (cpu_mode >= CPU_MODE_KERNEL) {
2316 add_sample_entry(s->eip, s->event);
2317 return 1;
2318 } else if (mm) {
2319 @@ -495,15 +521,21 @@ void sync_buffer(int cpu)
2320 struct mm_struct *mm = NULL;
2321 struct task_struct * new;
2322 unsigned long cookie = 0;
2323 - int in_kernel = 1;
2324 + int cpu_mode = 1;
2325 unsigned int i;
2326 sync_buffer_state state = sb_buffer_start;
2327 unsigned long available;
2328 + int domain_switch = 0;
2329
2330 mutex_lock(&buffer_mutex);
2331
2332 add_cpu_switch(cpu);
2333
2334 + /* We need to assign the first samples in this CPU buffer to the
2335 + same domain that we were processing at the last sync_buffer */
2336 + if (cpu_current_domain[cpu] != COORDINATOR_DOMAIN) {
2337 + add_domain_switch(cpu_current_domain[cpu]);
2338 + }
2339 /* Remember, only we can modify tail_pos */
2340
2341 available = get_slots(cpu_buf);
2342 @@ -511,16 +543,18 @@ void sync_buffer(int cpu)
2343 for (i = 0; i < available; ++i) {
2344 struct op_sample * s = &cpu_buf->buffer[cpu_buf->tail_pos];
2345
2346 - if (is_code(s->eip)) {
2347 - if (s->event <= CPU_IS_KERNEL) {
2348 - /* kernel/userspace switch */
2349 - in_kernel = s->event;
2350 + if (is_code(s->eip) && !domain_switch) {
2351 + if (s->event <= CPU_MODE_XEN) {
2352 + /* xen/kernel/userspace switch */
2353 + cpu_mode = s->event;
2354 if (state == sb_buffer_start)
2355 state = sb_sample_start;
2356 - add_kernel_ctx_switch(s->event);
2357 + add_cpu_mode_switch(s->event);
2358 } else if (s->event == CPU_TRACE_BEGIN) {
2359 state = sb_bt_start;
2360 add_trace_begin();
2361 + } else if (s->event == CPU_DOMAIN_SWITCH) {
2362 + domain_switch = 1;
2363 } else {
2364 struct mm_struct * oldmm = mm;
2365
2366 @@ -534,11 +568,21 @@ void sync_buffer(int cpu)
2367 add_user_ctx_switch(new, cookie);
2368 }
2369 } else {
2370 - if (state >= sb_bt_start &&
2371 - !add_sample(mm, s, in_kernel)) {
2372 - if (state == sb_bt_start) {
2373 - state = sb_bt_ignore;
2374 - atomic_inc(&oprofile_stats.bt_lost_no_mapping);
2375 + if (domain_switch) {
2376 + cpu_current_domain[cpu] = s->eip;
2377 + add_domain_switch(s->eip);
2378 + domain_switch = 0;
2379 + } else {
2380 + if (cpu_current_domain[cpu] !=
2381 + COORDINATOR_DOMAIN) {
2382 + add_sample_entry(s->eip, s->event);
2383 + }
2384 + else if (state >= sb_bt_start &&
2385 + !add_sample(mm, s, cpu_mode)) {
2386 + if (state == sb_bt_start) {
2387 + state = sb_bt_ignore;
2388 + atomic_inc(&oprofile_stats.bt_lost_no_mapping);
2389 + }
2390 }
2391 }
2392 }
2393 @@ -547,6 +591,11 @@ void sync_buffer(int cpu)
2394 }
2395 release_mm(mm);
2396
2397 + /* We reset domain to COORDINATOR at each CPU switch */
2398 + if (cpu_current_domain[cpu] != COORDINATOR_DOMAIN) {
2399 + add_domain_switch(COORDINATOR_DOMAIN);
2400 + }
2401 +
2402 mark_done(cpu);
2403
2404 mutex_unlock(&buffer_mutex);
2405 Index: linux-2.6.27/drivers/oprofile/cpu_buffer.c
2406 ===================================================================
2407 --- linux-2.6.27.orig/drivers/oprofile/cpu_buffer.c
2408 +++ linux-2.6.27/drivers/oprofile/cpu_buffer.c
2409 @@ -6,6 +6,10 @@
2410 *
2411 * @author John Levon <levon@movementarian.org>
2412 *
2413 + * Modified by Aravind Menon for Xen
2414 + * These modifications are:
2415 + * Copyright (C) 2005 Hewlett-Packard Co.
2416 + *
2417 * Each CPU has a local buffer that stores PC value/event
2418 * pairs. We also log context switches when we notice them.
2419 * Eventually each CPU's buffer is processed into the global
2420 @@ -34,6 +38,8 @@ static void wq_sync_buffer(struct work_s
2421 #define DEFAULT_TIMER_EXPIRE (HZ / 10)
2422 static int work_enabled;
2423
2424 +static int32_t current_domain = COORDINATOR_DOMAIN;
2425 +
2426 void free_cpu_buffers(void)
2427 {
2428 int i;
2429 @@ -72,7 +78,7 @@ int alloc_cpu_buffers(void)
2430 goto fail;
2431
2432 b->last_task = NULL;
2433 - b->last_is_kernel = -1;
2434 + b->last_cpu_mode = -1;
2435 b->tracing = 0;
2436 b->buffer_size = buffer_size;
2437 b->tail_pos = 0;
2438 @@ -130,7 +136,7 @@ void cpu_buffer_reset(struct oprofile_cp
2439 * collected will populate the buffer with proper
2440 * values to initialize the buffer
2441 */
2442 - cpu_buf->last_is_kernel = -1;
2443 + cpu_buf->last_cpu_mode = -1;
2444 cpu_buf->last_task = NULL;
2445 }
2446
2447 @@ -180,13 +186,13 @@ add_code(struct oprofile_cpu_buffer * bu
2448 * because of the head/tail separation of the writer and reader
2449 * of the CPU buffer.
2450 *
2451 - * is_kernel is needed because on some architectures you cannot
2452 + * cpu_mode is needed because on some architectures you cannot
2453 * tell if you are in kernel or user space simply by looking at
2454 - * pc. We tag this in the buffer by generating kernel enter/exit
2455 - * events whenever is_kernel changes
2456 + * pc. We tag this in the buffer by generating kernel/user (and xen)
2457 + * enter events whenever cpu_mode changes
2458 */
2459 static int log_sample(struct oprofile_cpu_buffer * cpu_buf, unsigned long pc,
2460 - int is_kernel, unsigned long event)
2461 + int cpu_mode, unsigned long event)
2462 {
2463 struct task_struct * task;
2464
2465 @@ -202,18 +208,18 @@ static int log_sample(struct oprofile_cp
2466 return 0;
2467 }
2468
2469 - is_kernel = !!is_kernel;
2470 -
2471 task = current;
2472
2473 /* notice a switch from user->kernel or vice versa */
2474 - if (cpu_buf->last_is_kernel != is_kernel) {
2475 - cpu_buf->last_is_kernel = is_kernel;
2476 - add_code(cpu_buf, is_kernel);
2477 + if (cpu_buf->last_cpu_mode != cpu_mode) {
2478 + cpu_buf->last_cpu_mode = cpu_mode;
2479 + add_code(cpu_buf, cpu_mode);
2480 }
2481 -
2482 +
2483 /* notice a task switch */
2484 - if (cpu_buf->last_task != task) {
2485 + /* if not processing other domain samples */
2486 + if ((cpu_buf->last_task != task) &&
2487 + (current_domain == COORDINATOR_DOMAIN)) {
2488 cpu_buf->last_task = task;
2489 add_code(cpu_buf, (unsigned long)task);
2490 }
2491 @@ -297,6 +303,25 @@ void oprofile_add_trace(unsigned long pc
2492 add_sample(cpu_buf, pc, 0);
2493 }
2494
2495 +int oprofile_add_domain_switch(int32_t domain_id)
2496 +{
2497 + struct oprofile_cpu_buffer * cpu_buf = &cpu_buffer[smp_processor_id()];
2498 +
2499 + /* should have space for switching into and out of domain
2500 + (2 slots each) plus one sample and one cpu mode switch */
2501 + if (((nr_available_slots(cpu_buf) < 6) &&
2502 + (domain_id != COORDINATOR_DOMAIN)) ||
2503 + (nr_available_slots(cpu_buf) < 2))
2504 + return 0;
2505 +
2506 + add_code(cpu_buf, CPU_DOMAIN_SWITCH);
2507 + add_sample(cpu_buf, domain_id, 0);
2508 +
2509 + current_domain = domain_id;
2510 +
2511 + return 1;
2512 +}
2513 +
2514 /*
2515 * This serves to avoid cpu buffer overflow, and makes sure
2516 * the task mortuary progresses
2517 Index: linux-2.6.27/drivers/oprofile/cpu_buffer.h
2518 ===================================================================
2519 --- linux-2.6.27.orig/drivers/oprofile/cpu_buffer.h
2520 +++ linux-2.6.27/drivers/oprofile/cpu_buffer.h
2521 @@ -37,7 +37,7 @@ struct oprofile_cpu_buffer {
2522 volatile unsigned long tail_pos;
2523 unsigned long buffer_size;
2524 struct task_struct * last_task;
2525 - int last_is_kernel;
2526 + int last_cpu_mode;
2527 int tracing;
2528 struct op_sample * buffer;
2529 unsigned long sample_received;
2530 @@ -53,7 +53,10 @@ DECLARE_PER_CPU(struct oprofile_cpu_buff
2531 void cpu_buffer_reset(struct oprofile_cpu_buffer * cpu_buf);
2532
2533 /* transient events for the CPU buffer -> event buffer */
2534 -#define CPU_IS_KERNEL 1
2535 -#define CPU_TRACE_BEGIN 2
2536 +#define CPU_MODE_USER 0
2537 +#define CPU_MODE_KERNEL 1
2538 +#define CPU_MODE_XEN 2
2539 +#define CPU_TRACE_BEGIN 3
2540 +#define CPU_DOMAIN_SWITCH 4
2541
2542 #endif /* OPROFILE_CPU_BUFFER_H */
2543 Index: linux-2.6.27/drivers/oprofile/event_buffer.h
2544 ===================================================================
2545 --- linux-2.6.27.orig/drivers/oprofile/event_buffer.h
2546 +++ linux-2.6.27/drivers/oprofile/event_buffer.h
2547 @@ -30,6 +30,9 @@ void wake_up_buffer_waiter(void);
2548 #define INVALID_COOKIE ~0UL
2549 #define NO_COOKIE 0UL
2550
2551 +/* Constant used to refer to coordinator domain (Xen) */
2552 +#define COORDINATOR_DOMAIN -1
2553 +
2554 extern const struct file_operations event_buffer_fops;
2555
2556 /* mutex between sync_cpu_buffers() and the
2557 Index: linux-2.6.27/drivers/oprofile/oprof.c
2558 ===================================================================
2559 --- linux-2.6.27.orig/drivers/oprofile/oprof.c
2560 +++ linux-2.6.27/drivers/oprofile/oprof.c
2561 @@ -5,6 +5,10 @@
2562 * @remark Read the file COPYING
2563 *
2564 * @author John Levon <levon@movementarian.org>
2565 + *
2566 + * Modified by Aravind Menon for Xen
2567 + * These modifications are:
2568 + * Copyright (C) 2005 Hewlett-Packard Co.
2569 */
2570
2571 #include <linux/kernel.h>
2572 @@ -33,6 +37,32 @@ static DEFINE_MUTEX(start_mutex);
2573 */
2574 static int timer = 0;
2575
2576 +int oprofile_set_active(int active_domains[], unsigned int adomains)
2577 +{
2578 + int err;
2579 +
2580 + if (!oprofile_ops.set_active)
2581 + return -EINVAL;
2582 +
2583 + mutex_lock(&start_mutex);
2584 + err = oprofile_ops.set_active(active_domains, adomains);
2585 + mutex_unlock(&start_mutex);
2586 + return err;
2587 +}
2588 +
2589 +int oprofile_set_passive(int passive_domains[], unsigned int pdomains)
2590 +{
2591 + int err;
2592 +
2593 + if (!oprofile_ops.set_passive)
2594 + return -EINVAL;
2595 +
2596 + mutex_lock(&start_mutex);
2597 + err = oprofile_ops.set_passive(passive_domains, pdomains);
2598 + mutex_unlock(&start_mutex);
2599 + return err;
2600 +}
2601 +
2602 int oprofile_setup(void)
2603 {
2604 int err;
2605 Index: linux-2.6.27/drivers/oprofile/oprof.h
2606 ===================================================================
2607 --- linux-2.6.27.orig/drivers/oprofile/oprof.h
2608 +++ linux-2.6.27/drivers/oprofile/oprof.h
2609 @@ -35,5 +35,8 @@ void oprofile_create_files(struct super_
2610 void oprofile_timer_init(struct oprofile_operations * ops);
2611
2612 int oprofile_set_backtrace(unsigned long depth);
2613 +
2614 +int oprofile_set_active(int active_domains[], unsigned int adomains);
2615 +int oprofile_set_passive(int passive_domains[], unsigned int pdomains);
2616
2617 #endif /* OPROF_H */
2618 Index: linux-2.6.27/drivers/oprofile/oprofile_files.c
2619 ===================================================================
2620 --- linux-2.6.27.orig/drivers/oprofile/oprofile_files.c
2621 +++ linux-2.6.27/drivers/oprofile/oprofile_files.c
2622 @@ -5,15 +5,21 @@
2623 * @remark Read the file COPYING
2624 *
2625 * @author John Levon <levon@movementarian.org>
2626 + *
2627 + * Modified by Aravind Menon for Xen
2628 + * These modifications are:
2629 + * Copyright (C) 2005 Hewlett-Packard Co.
2630 */
2631
2632 #include <linux/fs.h>
2633 #include <linux/oprofile.h>
2634 +#include <asm/uaccess.h>
2635 +#include <linux/ctype.h>
2636
2637 #include "event_buffer.h"
2638 #include "oprofile_stats.h"
2639 #include "oprof.h"
2640 -
2641 +
2642 unsigned long fs_buffer_size = 131072;
2643 unsigned long fs_cpu_buffer_size = 8192;
2644 unsigned long fs_buffer_watershed = 32768; /* FIXME: tune */
2645 @@ -117,11 +123,202 @@ static ssize_t dump_write(struct file *
2646 static const struct file_operations dump_fops = {
2647 .write = dump_write,
2648 };
2649 -
2650 +
2651 +#define TMPBUFSIZE 512
2652 +
2653 +static unsigned int adomains = 0;
2654 +static int active_domains[MAX_OPROF_DOMAINS + 1];
2655 +static DEFINE_MUTEX(adom_mutex);
2656 +
2657 +static ssize_t adomain_write(struct file * file, char const __user * buf,
2658 + size_t count, loff_t * offset)
2659 +{
2660 + char *tmpbuf;
2661 + char *startp, *endp;
2662 + int i;
2663 + unsigned long val;
2664 + ssize_t retval = count;
2665 +
2666 + if (*offset)
2667 + return -EINVAL;
2668 + if (count > TMPBUFSIZE - 1)
2669 + return -EINVAL;
2670 +
2671 + if (!(tmpbuf = kmalloc(TMPBUFSIZE, GFP_KERNEL)))
2672 + return -ENOMEM;
2673 +
2674 + if (copy_from_user(tmpbuf, buf, count)) {
2675 + kfree(tmpbuf);
2676 + return -EFAULT;
2677 + }
2678 + tmpbuf[count] = 0;
2679 +
2680 + mutex_lock(&adom_mutex);
2681 +
2682 + startp = tmpbuf;
2683 + /* Parse one more than MAX_OPROF_DOMAINS, for easy error checking */
2684 + for (i = 0; i <= MAX_OPROF_DOMAINS; i++) {
2685 + val = simple_strtoul(startp, &endp, 0);
2686 + if (endp == startp)
2687 + break;
2688 + while (ispunct(*endp) || isspace(*endp))
2689 + endp++;
2690 + active_domains[i] = val;
2691 + if (active_domains[i] != val)
2692 + /* Overflow, force error below */
2693 + i = MAX_OPROF_DOMAINS + 1;
2694 + startp = endp;
2695 + }
2696 + /* Force error on trailing junk */
2697 + adomains = *startp ? MAX_OPROF_DOMAINS + 1 : i;
2698 +
2699 + kfree(tmpbuf);
2700 +
2701 + if (adomains > MAX_OPROF_DOMAINS
2702 + || oprofile_set_active(active_domains, adomains)) {
2703 + adomains = 0;
2704 + retval = -EINVAL;
2705 + }
2706 +
2707 + mutex_unlock(&adom_mutex);
2708 + return retval;
2709 +}
2710 +
2711 +static ssize_t adomain_read(struct file * file, char __user * buf,
2712 + size_t count, loff_t * offset)
2713 +{
2714 + char * tmpbuf;
2715 + size_t len;
2716 + int i;
2717 + ssize_t retval;
2718 +
2719 + if (!(tmpbuf = kmalloc(TMPBUFSIZE, GFP_KERNEL)))
2720 + return -ENOMEM;
2721 +
2722 + mutex_lock(&adom_mutex);
2723 +
2724 + len = 0;
2725 + for (i = 0; i < adomains; i++)
2726 + len += snprintf(tmpbuf + len,
2727 + len < TMPBUFSIZE ? TMPBUFSIZE - len : 0,
2728 + "%u ", active_domains[i]);
2729 + WARN_ON(len > TMPBUFSIZE);
2730 + if (len != 0 && len <= TMPBUFSIZE)
2731 + tmpbuf[len-1] = '\n';
2732 +
2733 + mutex_unlock(&adom_mutex);
2734 +
2735 + retval = simple_read_from_buffer(buf, count, offset, tmpbuf, len);
2736 +
2737 + kfree(tmpbuf);
2738 + return retval;
2739 +}
2740 +
2741 +
2742 +static struct file_operations active_domain_ops = {
2743 + .read = adomain_read,
2744 + .write = adomain_write,
2745 +};
2746 +
2747 +static unsigned int pdomains = 0;
2748 +static int passive_domains[MAX_OPROF_DOMAINS];
2749 +static DEFINE_MUTEX(pdom_mutex);
2750 +
2751 +static ssize_t pdomain_write(struct file * file, char const __user * buf,
2752 + size_t count, loff_t * offset)
2753 +{
2754 + char *tmpbuf;
2755 + char *startp, *endp;
2756 + int i;
2757 + unsigned long val;
2758 + ssize_t retval = count;
2759 +
2760 + if (*offset)
2761 + return -EINVAL;
2762 + if (count > TMPBUFSIZE - 1)
2763 + return -EINVAL;
2764 +
2765 + if (!(tmpbuf = kmalloc(TMPBUFSIZE, GFP_KERNEL)))
2766 + return -ENOMEM;
2767 +
2768 + if (copy_from_user(tmpbuf, buf, count)) {
2769 + kfree(tmpbuf);
2770 + return -EFAULT;
2771 + }
2772 + tmpbuf[count] = 0;
2773 +
2774 + mutex_lock(&pdom_mutex);
2775 +
2776 + startp = tmpbuf;
2777 + /* Parse one more than MAX_OPROF_DOMAINS, for easy error checking */
2778 + for (i = 0; i <= MAX_OPROF_DOMAINS; i++) {
2779 + val = simple_strtoul(startp, &endp, 0);
2780 + if (endp == startp)
2781 + break;
2782 + while (ispunct(*endp) || isspace(*endp))
2783 + endp++;
2784 + passive_domains[i] = val;
2785 + if (passive_domains[i] != val)
2786 + /* Overflow, force error below */
2787 + i = MAX_OPROF_DOMAINS + 1;
2788 + startp = endp;
2789 + }
2790 + /* Force error on trailing junk */
2791 + pdomains = *startp ? MAX_OPROF_DOMAINS + 1 : i;
2792 +
2793 + kfree(tmpbuf);
2794 +
2795 + if (pdomains > MAX_OPROF_DOMAINS
2796 + || oprofile_set_passive(passive_domains, pdomains)) {
2797 + pdomains = 0;
2798 + retval = -EINVAL;
2799 + }
2800 +
2801 + mutex_unlock(&pdom_mutex);
2802 + return retval;
2803 +}
2804 +
2805 +static ssize_t pdomain_read(struct file * file, char __user * buf,
2806 + size_t count, loff_t * offset)
2807 +{
2808 + char * tmpbuf;
2809 + size_t len;
2810 + int i;
2811 + ssize_t retval;
2812 +
2813 + if (!(tmpbuf = kmalloc(TMPBUFSIZE, GFP_KERNEL)))
2814 + return -ENOMEM;
2815 +
2816 + mutex_lock(&pdom_mutex);
2817 +
2818 + len = 0;
2819 + for (i = 0; i < pdomains; i++)
2820 + len += snprintf(tmpbuf + len,
2821 + len < TMPBUFSIZE ? TMPBUFSIZE - len : 0,
2822 + "%u ", passive_domains[i]);
2823 + WARN_ON(len > TMPBUFSIZE);
2824 + if (len != 0 && len <= TMPBUFSIZE)
2825 + tmpbuf[len-1] = '\n';
2826 +
2827 + mutex_unlock(&pdom_mutex);
2828 +
2829 + retval = simple_read_from_buffer(buf, count, offset, tmpbuf, len);
2830 +
2831 + kfree(tmpbuf);
2832 + return retval;
2833 +}
2834 +
2835 +static struct file_operations passive_domain_ops = {
2836 + .read = pdomain_read,
2837 + .write = pdomain_write,
2838 +};
2839 +
2840 void oprofile_create_files(struct super_block * sb, struct dentry * root)
2841 {
2842 oprofilefs_create_file(sb, root, "enable", &enable_fops);
2843 oprofilefs_create_file_perm(sb, root, "dump", &dump_fops, 0666);
2844 + oprofilefs_create_file(sb, root, "active_domains", &active_domain_ops);
2845 + oprofilefs_create_file(sb, root, "passive_domains", &passive_domain_ops);
2846 oprofilefs_create_file(sb, root, "buffer", &event_buffer_fops);
2847 oprofilefs_create_ulong(sb, root, "buffer_size", &fs_buffer_size);
2848 oprofilefs_create_ulong(sb, root, "buffer_watershed", &fs_buffer_watershed);
2849 Index: linux-2.6.27/fs/aio.c
2850 ===================================================================
2851 --- linux-2.6.27.orig/fs/aio.c
2852 +++ linux-2.6.27/fs/aio.c
2853 @@ -36,6 +36,11 @@
2854 #include <asm/uaccess.h>
2855 #include <asm/mmu_context.h>
2856
2857 +#ifdef CONFIG_EPOLL
2858 +#include <linux/poll.h>
2859 +#include <linux/eventpoll.h>
2860 +#endif
2861 +
2862 #if DEBUG > 1
2863 #define dprintk printk
2864 #else
2865 @@ -1026,6 +1031,11 @@ put_rq:
2866 if (waitqueue_active(&ctx->wait))
2867 wake_up(&ctx->wait);
2868
2869 +#ifdef CONFIG_EPOLL
2870 + if (ctx->file && waitqueue_active(&ctx->poll_wait))
2871 + wake_up(&ctx->poll_wait);
2872 +#endif
2873 +
2874 spin_unlock_irqrestore(&ctx->ctx_lock, flags);
2875 return ret;
2876 }
2877 @@ -1033,6 +1043,8 @@ put_rq:
2878 /* aio_read_evt
2879 * Pull an event off of the ioctx's event ring. Returns the number of
2880 * events fetched (0 or 1 ;-)
2881 + * If ent parameter is 0, just returns the number of events that would
2882 + * be fetched.
2883 * FIXME: make this use cmpxchg.
2884 * TODO: make the ringbuffer user mmap()able (requires FIXME).
2885 */
2886 @@ -1055,13 +1067,18 @@ static int aio_read_evt(struct kioctx *i
2887
2888 head = ring->head % info->nr;
2889 if (head != ring->tail) {
2890 - struct io_event *evp = aio_ring_event(info, head, KM_USER1);
2891 - *ent = *evp;
2892 - head = (head + 1) % info->nr;
2893 - smp_mb(); /* finish reading the event before updatng the head */
2894 - ring->head = head;
2895 - ret = 1;
2896 - put_aio_ring_event(evp, KM_USER1);
2897 + if (ent) { /* event requested */
2898 + struct io_event *evp =
2899 + aio_ring_event(info, head, KM_USER1);
2900 + *ent = *evp;
2901 + head = (head + 1) % info->nr;
2902 + /* finish reading the event before updatng the head */
2903 + smp_mb();
2904 + ring->head = head;
2905 + ret = 1;
2906 + put_aio_ring_event(evp, KM_USER1);
2907 + } else /* only need to know availability */
2908 + ret = 1;
2909 }
2910 spin_unlock(&info->ring_lock);
2911
2912 @@ -1251,6 +1268,13 @@ static void io_destroy(struct kioctx *io
2913
2914 aio_cancel_all(ioctx);
2915 wait_for_all_aios(ioctx);
2916 +#ifdef CONFIG_EPOLL
2917 + /* forget the poll file, but it's up to the user to close it */
2918 + if (ioctx->file) {
2919 + ioctx->file->private_data = 0;
2920 + ioctx->file = 0;
2921 + }
2922 +#endif
2923
2924 /*
2925 * Wake up any waiters. The setting of ctx->dead must be seen
2926 @@ -1261,6 +1285,67 @@ static void io_destroy(struct kioctx *io
2927 put_ioctx(ioctx); /* once for the lookup */
2928 }
2929
2930 +#ifdef CONFIG_EPOLL
2931 +
2932 +static int aio_queue_fd_close(struct inode *inode, struct file *file)
2933 +{
2934 + struct kioctx *ioctx = file->private_data;
2935 + if (ioctx) {
2936 + file->private_data = 0;
2937 + spin_lock_irq(&ioctx->ctx_lock);
2938 + ioctx->file = 0;
2939 + spin_unlock_irq(&ioctx->ctx_lock);
2940 + }
2941 + return 0;
2942 +}
2943 +
2944 +static unsigned int aio_queue_fd_poll(struct file *file, poll_table *wait)
2945 +{ unsigned int pollflags = 0;
2946 + struct kioctx *ioctx = file->private_data;
2947 +
2948 + if (ioctx) {
2949 +
2950 + spin_lock_irq(&ioctx->ctx_lock);
2951 + /* Insert inside our poll wait queue */
2952 + poll_wait(file, &ioctx->poll_wait, wait);
2953 +
2954 + /* Check our condition */
2955 + if (aio_read_evt(ioctx, 0))
2956 + pollflags = POLLIN | POLLRDNORM;
2957 + spin_unlock_irq(&ioctx->ctx_lock);
2958 + }
2959 +
2960 + return pollflags;
2961 +}
2962 +
2963 +static const struct file_operations aioq_fops = {
2964 + .release = aio_queue_fd_close,
2965 + .poll = aio_queue_fd_poll
2966 +};
2967 +
2968 +/* make_aio_fd:
2969 + * Create a file descriptor that can be used to poll the event queue.
2970 + * Based and piggybacked on the excellent epoll code.
2971 + */
2972 +
2973 +static int make_aio_fd(struct kioctx *ioctx)
2974 +{
2975 + int error, fd;
2976 + struct inode *inode;
2977 + struct file *file;
2978 +
2979 + error = ep_getfd(&fd, &inode, &file, NULL, &aioq_fops);
2980 + if (error)
2981 + return error;
2982 +
2983 + /* associate the file with the IO context */
2984 + file->private_data = ioctx;
2985 + ioctx->file = file;
2986 + init_waitqueue_head(&ioctx->poll_wait);
2987 + return fd;
2988 +}
2989 +#endif
2990 +
2991 /* sys_io_setup:
2992 * Create an aio_context capable of receiving at least nr_events.
2993 * ctxp must not point to an aio_context that already exists, and
2994 @@ -1273,18 +1358,30 @@ static void io_destroy(struct kioctx *io
2995 * resources are available. May fail with -EFAULT if an invalid
2996 * pointer is passed for ctxp. Will fail with -ENOSYS if not
2997 * implemented.
2998 + *
2999 + * To request a selectable fd, the user context has to be initialized
3000 + * to 1, instead of 0, and the return value is the fd.
3001 + * This keeps the system call compatible, since a non-zero value
3002 + * was not allowed so far.
3003 */
3004 SYSCALL_DEFINE2(io_setup, unsigned, nr_events, aio_context_t __user *, ctxp)
3005 {
3006 struct kioctx *ioctx = NULL;
3007 unsigned long ctx;
3008 long ret;
3009 + int make_fd = 0;
3010
3011 ret = get_user(ctx, ctxp);
3012 if (unlikely(ret))
3013 goto out;
3014
3015 ret = -EINVAL;
3016 +#ifdef CONFIG_EPOLL
3017 + if (ctx == 1) {
3018 + make_fd = 1;
3019 + ctx = 0;
3020 + }
3021 +#endif
3022 if (unlikely(ctx || nr_events == 0)) {
3023 pr_debug("EINVAL: io_setup: ctx %lu nr_events %u\n",
3024 ctx, nr_events);
3025 @@ -1295,8 +1392,12 @@ SYSCALL_DEFINE2(io_setup, unsigned, nr_e
3026 ret = PTR_ERR(ioctx);
3027 if (!IS_ERR(ioctx)) {
3028 ret = put_user(ioctx->user_id, ctxp);
3029 - if (!ret)
3030 - return 0;
3031 +#ifdef CONFIG_EPOLL
3032 + if (make_fd && ret >= 0)
3033 + ret = make_aio_fd(ioctx);
3034 +#endif
3035 + if (ret >= 0)
3036 + return ret;
3037
3038 get_ioctx(ioctx); /* io_destroy() expects us to hold a ref */
3039 io_destroy(ioctx);
3040 Index: linux-2.6.27/fs/compat_ioctl.c
3041 ===================================================================
3042 --- linux-2.6.27.orig/fs/compat_ioctl.c
3043 +++ linux-2.6.27/fs/compat_ioctl.c
3044 @@ -114,6 +114,13 @@
3045 #include <asm/fbio.h>
3046 #endif
3047
3048 +#ifdef CONFIG_XEN
3049 +#include <xen/interface/xen.h>
3050 +#include <xen/public/evtchn.h>
3051 +#include <xen/public/privcmd.h>
3052 +#include <xen/compat_ioctl.h>
3053 +#endif
3054 +
3055 static int do_ioctl32_pointer(unsigned int fd, unsigned int cmd,
3056 unsigned long arg, struct file *f)
3057 {
3058 @@ -2736,6 +2743,18 @@ IGNORE_IOCTL(FBIOGETCMAP32)
3059 IGNORE_IOCTL(FBIOSCURSOR32)
3060 IGNORE_IOCTL(FBIOGCURSOR32)
3061 #endif
3062 +
3063 +#ifdef CONFIG_XEN
3064 +HANDLE_IOCTL(IOCTL_PRIVCMD_MMAP_32, privcmd_ioctl_32)
3065 +HANDLE_IOCTL(IOCTL_PRIVCMD_MMAPBATCH_32, privcmd_ioctl_32)
3066 +COMPATIBLE_IOCTL(IOCTL_PRIVCMD_HYPERCALL)
3067 +COMPATIBLE_IOCTL(IOCTL_EVTCHN_BIND_VIRQ)
3068 +COMPATIBLE_IOCTL(IOCTL_EVTCHN_BIND_INTERDOMAIN)
3069 +COMPATIBLE_IOCTL(IOCTL_EVTCHN_BIND_UNBOUND_PORT)
3070 +COMPATIBLE_IOCTL(IOCTL_EVTCHN_UNBIND)
3071 +COMPATIBLE_IOCTL(IOCTL_EVTCHN_NOTIFY)
3072 +COMPATIBLE_IOCTL(IOCTL_EVTCHN_RESET)
3073 +#endif
3074 };
3075
3076 #define IOCTL_HASHSIZE 256
3077 Index: linux-2.6.27/include/acpi/processor.h
3078 ===================================================================
3079 --- linux-2.6.27.orig/include/acpi/processor.h
3080 +++ linux-2.6.27/include/acpi/processor.h
3081 @@ -17,6 +17,12 @@
3082 #define ACPI_PROCESSOR_MAX_THROTTLE 250 /* 25% */
3083 #define ACPI_PROCESSOR_MAX_DUTY_WIDTH 4
3084
3085 +#ifdef CONFIG_XEN
3086 +#define NR_ACPI_CPUS (NR_CPUS < 256 ? 256 : NR_CPUS)
3087 +#else
3088 +#define NR_ACPI_CPUS NR_CPUS
3089 +#endif /* CONFIG_XEN */
3090 +
3091 #define ACPI_PDC_REVISION_ID 0x1
3092
3093 #define ACPI_PSD_REV0_REVISION 0 /* Support for _PSD as in ACPI 3.0 */
3094 @@ -42,6 +48,17 @@
3095
3096 struct acpi_processor_cx;
3097
3098 +#ifdef CONFIG_PROCESSOR_EXTERNAL_CONTROL
3099 +struct acpi_csd_package {
3100 + acpi_integer num_entries;
3101 + acpi_integer revision;
3102 + acpi_integer domain;
3103 + acpi_integer coord_type;
3104 + acpi_integer num_processors;
3105 + acpi_integer index;
3106 +} __attribute__ ((packed));
3107 +#endif
3108 +
3109 struct acpi_power_register {
3110 u8 descriptor;
3111 u16 length;
3112 @@ -74,6 +91,12 @@ struct acpi_processor_cx {
3113 u32 power;
3114 u32 usage;
3115 u64 time;
3116 +#ifdef CONFIG_PROCESSOR_EXTERNAL_CONTROL
3117 + /* Require raw information for external control logic */
3118 + struct acpi_power_register reg;
3119 + u32 csd_count;
3120 + struct acpi_csd_package *domain_info;
3121 +#endif
3122 struct acpi_processor_cx_policy promotion;
3123 struct acpi_processor_cx_policy demotion;
3124 char desc[ACPI_CX_DESC_LEN];
3125 @@ -304,6 +327,9 @@ static inline void acpi_processor_ppc_ex
3126 {
3127 return;
3128 }
3129 +#ifdef CONFIG_PROCESSOR_EXTERNAL_CONTROL
3130 +int acpi_processor_ppc_has_changed(struct acpi_processor *pr);
3131 +#else
3132 static inline int acpi_processor_ppc_has_changed(struct acpi_processor *pr)
3133 {
3134 static unsigned int printout = 1;
3135 @@ -316,6 +342,7 @@ static inline int acpi_processor_ppc_has
3136 }
3137 return 0;
3138 }
3139 +#endif /* CONFIG_PROCESSOR_EXTERNAL_CONTROL */
3140 #endif /* CONFIG_CPU_FREQ */
3141
3142 /* in processor_throttling.c */
3143 @@ -352,4 +379,120 @@ static inline void acpi_thermal_cpufreq_
3144 }
3145 #endif
3146
3147 +/*
3148 + * Following are interfaces geared to external processor PM control
3149 + * logic like a VMM
3150 + */
3151 +/* Events notified to external control logic */
3152 +#define PROCESSOR_PM_INIT 1
3153 +#define PROCESSOR_PM_CHANGE 2
3154 +#define PROCESSOR_HOTPLUG 3
3155 +
3156 +/* Objects for the PM events */
3157 +#define PM_TYPE_IDLE 0
3158 +#define PM_TYPE_PERF 1
3159 +#define PM_TYPE_THR 2
3160 +#define PM_TYPE_MAX 3
3161 +
3162 +/* Processor hotplug events */
3163 +#define HOTPLUG_TYPE_ADD 0
3164 +#define HOTPLUG_TYPE_REMOVE 1
3165 +
3166 +#ifdef CONFIG_PROCESSOR_EXTERNAL_CONTROL
3167 +struct processor_extcntl_ops {
3168 + /* Transfer processor PM events to external control logic */
3169 + int (*pm_ops[PM_TYPE_MAX])(struct acpi_processor *pr, int event);
3170 + /* Notify physical processor status to external control logic */
3171 + int (*hotplug)(struct acpi_processor *pr, int type);
3172 +};
3173 +extern const struct processor_extcntl_ops *processor_extcntl_ops;
3174 +
3175 +static inline int processor_cntl_external(void)
3176 +{
3177 + return (processor_extcntl_ops != NULL);
3178 +}
3179 +
3180 +static inline int processor_pm_external(void)
3181 +{
3182 + return processor_cntl_external() &&
3183 + (processor_extcntl_ops->pm_ops[PM_TYPE_IDLE] != NULL);
3184 +}
3185 +
3186 +static inline int processor_pmperf_external(void)
3187 +{
3188 + return processor_cntl_external() &&
3189 + (processor_extcntl_ops->pm_ops[PM_TYPE_PERF] != NULL);
3190 +}
3191 +
3192 +static inline int processor_pmthr_external(void)
3193 +{
3194 + return processor_cntl_external() &&
3195 + (processor_extcntl_ops->pm_ops[PM_TYPE_THR] != NULL);
3196 +}
3197 +
3198 +extern int processor_notify_external(struct acpi_processor *pr,
3199 + int event, int type);
3200 +extern void processor_extcntl_init(void);
3201 +extern int processor_extcntl_prepare(struct acpi_processor *pr);
3202 +extern int acpi_processor_get_performance_info(struct acpi_processor *pr);
3203 +extern int acpi_processor_get_psd(struct acpi_processor *pr);
3204 +void arch_acpi_processor_init_extcntl(const struct processor_extcntl_ops **);
3205 +#else
3206 +static inline int processor_cntl_external(void) {return 0;}
3207 +static inline int processor_pm_external(void) {return 0;}
3208 +static inline int processor_pmperf_external(void) {return 0;}
3209 +static inline int processor_pmthr_external(void) {return 0;}
3210 +static inline int processor_notify_external(struct acpi_processor *pr,
3211 + int event, int type)
3212 +{
3213 + return 0;
3214 +}
3215 +static inline void processor_extcntl_init(void) {}
3216 +static inline int processor_extcntl_prepare(struct acpi_processor *pr)
3217 +{
3218 + return 0;
3219 +}
3220 +#endif /* CONFIG_PROCESSOR_EXTERNAL_CONTROL */
3221 +
3222 +#ifdef CONFIG_XEN
3223 +static inline void xen_convert_pct_reg(struct xen_pct_register *xpct,
3224 + struct acpi_pct_register *apct)
3225 +{
3226 + xpct->descriptor = apct->descriptor;
3227 + xpct->length = apct->length;
3228 + xpct->space_id = apct->space_id;
3229 + xpct->bit_width = apct->bit_width;
3230 + xpct->bit_offset = apct->bit_offset;
3231 + xpct->reserved = apct->reserved;
3232 + xpct->address = apct->address;
3233 +}
3234 +
3235 +static inline void xen_convert_pss_states(struct xen_processor_px *xpss,
3236 + struct acpi_processor_px *apss, int state_count)
3237 +{
3238 + int i;
3239 + for(i=0; i<state_count; i++) {
3240 + xpss->core_frequency = apss->core_frequency;
3241 + xpss->power = apss->power;
3242 + xpss->transition_latency = apss->transition_latency;
3243 + xpss->bus_master_latency = apss->bus_master_latency;
3244 + xpss->control = apss->control;
3245 + xpss->status = apss->status;
3246 + xpss++;
3247 + apss++;
3248 + }
3249 +}
3250 +
3251 +static inline void xen_convert_psd_pack(struct xen_psd_package *xpsd,
3252 + struct acpi_psd_package *apsd)
3253 +{
3254 + xpsd->num_entries = apsd->num_entries;
3255 + xpsd->revision = apsd->revision;
3256 + xpsd->domain = apsd->domain;
3257 + xpsd->coord_type = apsd->coord_type;
3258 + xpsd->num_processors = apsd->num_processors;
3259 +}
3260 +
3261 +#endif /* CONFIG_XEN */
3262 +
3263 #endif
3264 Index: linux-2.6.27/include/asm-generic/pci.h
3265 ===================================================================
3266 --- linux-2.6.27.orig/include/asm-generic/pci.h
3267 +++ linux-2.6.27/include/asm-generic/pci.h
3268 @@ -43,7 +43,9 @@ pcibios_select_root(struct pci_dev *pdev
3269 return root;
3270 }
3271
3272 +#ifndef pcibios_scan_all_fns
3273 #define pcibios_scan_all_fns(a, b) 0
3274 +#endif
3275
3276 #ifndef HAVE_ARCH_PCI_GET_LEGACY_IDE_IRQ
3277 static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
3278 Index: linux-2.6.27/include/asm-generic/pgtable.h
3279 ===================================================================
3280 --- linux-2.6.27.orig/include/asm-generic/pgtable.h
3281 +++ linux-2.6.27/include/asm-generic/pgtable.h
3282 @@ -99,6 +99,10 @@ static inline void ptep_set_wrprotect(st
3283 }
3284 #endif
3285
3286 +#ifndef arch_change_pte_range
3287 +#define arch_change_pte_range(mm, pmd, addr, end, newprot) 0
3288 +#endif
3289 +
3290 #ifndef __HAVE_ARCH_PTE_SAME
3291 #define pte_same(A,B) (pte_val(A) == pte_val(B))
3292 #endif
3293 Index: linux-2.6.27/include/linux/aio.h
3294 ===================================================================
3295 --- linux-2.6.27.orig/include/linux/aio.h
3296 +++ linux-2.6.27/include/linux/aio.h
3297 @@ -199,6 +199,11 @@ struct kioctx {
3298 struct aio_ring_info ring_info;
3299
3300 struct delayed_work wq;
3301 +#ifdef CONFIG_EPOLL
3302 + // poll integration
3303 + wait_queue_head_t poll_wait;
3304 + struct file *file;
3305 +#endif
3306 };
3307
3308 /* prototypes */
3309 Index: linux-2.6.27/include/linux/highmem.h
3310 ===================================================================
3311 --- linux-2.6.27.orig/include/linux/highmem.h
3312 +++ linux-2.6.27/include/linux/highmem.h
3313 @@ -62,6 +62,7 @@ static inline void *kmap_atomic(struct p
3314
3315 #endif /* CONFIG_HIGHMEM */
3316
3317 +#ifndef __HAVE_ARCH_CLEAR_USER_HIGHPAGE
3318 /* when CONFIG_HIGHMEM is not set these will be plain clear/copy_page */
3319 static inline void clear_user_highpage(struct page *page, unsigned long vaddr)
3320 {
3321 @@ -69,6 +70,7 @@ static inline void clear_user_highpage(s
3322 clear_user_page(addr, vaddr, page);
3323 kunmap_atomic(addr, KM_USER0);
3324 }
3325 +#endif
3326
3327 #ifndef __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
3328 /**
3329 @@ -115,12 +117,14 @@ alloc_zeroed_user_highpage_movable(struc
3330 return __alloc_zeroed_user_highpage(__GFP_MOVABLE, vma, vaddr);
3331 }
3332
3333 +#ifndef __HAVE_ARCH_CLEAR_HIGHPAGE
3334 static inline void clear_highpage(struct page *page)
3335 {
3336 void *kaddr = kmap_atomic(page, KM_USER0);
3337 clear_page(kaddr);
3338 kunmap_atomic(kaddr, KM_USER0);
3339 }
3340 +#endif
3341
3342 static inline void zero_user_segments(struct page *page,
3343 unsigned start1, unsigned end1,
3344 @@ -174,6 +178,8 @@ static inline void copy_user_highpage(st
3345
3346 #endif
3347
3348 +#ifndef __HAVE_ARCH_COPY_HIGHPAGE
3349 +
3350 static inline void copy_highpage(struct page *to, struct page *from)
3351 {
3352 char *vfrom, *vto;
3353 @@ -185,4 +191,6 @@ static inline void copy_highpage(struct
3354 kunmap_atomic(vto, KM_USER1);
3355 }
3356
3357 +#endif
3358 +
3359 #endif /* _LINUX_HIGHMEM_H */
3360 Index: linux-2.6.27/include/linux/interrupt.h
3361 ===================================================================
3362 --- linux-2.6.27.orig/include/linux/interrupt.h
3363 +++ linux-2.6.27/include/linux/interrupt.h
3364 @@ -218,6 +218,12 @@ static inline int disable_irq_wake(unsig
3365 }
3366 #endif /* CONFIG_GENERIC_HARDIRQS */
3367
3368 +#ifdef CONFIG_HAVE_IRQ_IGNORE_UNHANDLED
3369 +int irq_ignore_unhandled(unsigned int irq);
3370 +#else
3371 +#define irq_ignore_unhandled(irq) 0
3372 +#endif
3373 +
3374 #ifndef __ARCH_SET_SOFTIRQ_PENDING
3375 #define set_softirq_pending(x) (local_softirq_pending() = (x))
3376 #define or_softirq_pending(x) (local_softirq_pending() |= (x))
3377 Index: linux-2.6.27/include/linux/kexec.h
3378 ===================================================================
3379 --- linux-2.6.27.orig/include/linux/kexec.h
3380 +++ linux-2.6.27/include/linux/kexec.h
3381 @@ -46,6 +46,13 @@
3382 KEXEC_CORE_NOTE_NAME_BYTES + \
3383 KEXEC_CORE_NOTE_DESC_BYTES )
3384
3385 +#ifndef KEXEC_ARCH_HAS_PAGE_MACROS
3386 +#define kexec_page_to_pfn(page) page_to_pfn(page)
3387 +#define kexec_pfn_to_page(pfn) pfn_to_page(pfn)
3388 +#define kexec_virt_to_phys(addr) virt_to_phys(addr)
3389 +#define kexec_phys_to_virt(addr) phys_to_virt(addr)
3390 +#endif
3391 +
3392 /*
3393 * This structure is used to hold the arguments that are used when loading
3394 * kernel binaries.
3395 @@ -108,6 +115,12 @@ struct kimage {
3396 extern void machine_kexec(struct kimage *image);
3397 extern int machine_kexec_prepare(struct kimage *image);
3398 extern void machine_kexec_cleanup(struct kimage *image);
3399 +#ifdef CONFIG_XEN
3400 +extern int xen_machine_kexec_load(struct kimage *image);
3401 +extern void xen_machine_kexec_unload(struct kimage *image);
3402 +extern void xen_machine_kexec_setup_resources(void);
3403 +extern void xen_machine_kexec_register_resources(struct resource *res);
3404 +#endif
3405 extern asmlinkage long sys_kexec_load(unsigned long entry,
3406 unsigned long nr_segments,
3407 struct kexec_segment __user *segments,
3408 Index: linux-2.6.27/include/linux/mm.h
3409 ===================================================================
3410 --- linux-2.6.27.orig/include/linux/mm.h
3411 +++ linux-2.6.27/include/linux/mm.h
3412 @@ -113,6 +113,9 @@ extern unsigned int kobjsize(const void
3413 #define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */
3414 #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
3415 #define VM_SAO 0x20000000 /* Strong Access Ordering (powerpc) */
3416 +#ifdef CONFIG_XEN
3417 +#define VM_FOREIGN 0x40000000 /* Has pages belonging to another VM */
3418 +#endif
3419 #define VM_PAGE_MKWRITE2 0x80000000 /* Uses page_mkwrite2 rather than page_mkwrite */
3420
3421 #ifndef VM_STACK_DEFAULT_FLAGS /* arch can override this */
3422 @@ -194,6 +197,11 @@ struct vm_operations_struct {
3423 */
3424 int (*access)(struct vm_area_struct *vma, unsigned long addr,
3425 void *buf, int len, int write);
3426 +
3427 + /* Area-specific function for clearing the PTE at @ptep. Returns the
3428 + * original value of @ptep. */
3429 + pte_t (*zap_pte)(struct vm_area_struct *vma,
3430 + unsigned long addr, pte_t *ptep, int is_fullmm);
3431 #ifdef CONFIG_NUMA
3432 /*
3433 * set_policy() op must add a reference to any non-NULL @new mempolicy
3434 Index: linux-2.6.27/include/linux/oprofile.h
3435 ===================================================================
3436 --- linux-2.6.27.orig/include/linux/oprofile.h
3437 +++ linux-2.6.27/include/linux/oprofile.h
3438 @@ -16,6 +16,8 @@
3439 #include <linux/types.h>
3440 #include <linux/spinlock.h>
3441 #include <asm/atomic.h>
3442 +
3443 +#include <xen/interface/xenoprof.h>
3444
3445 /* Each escaped entry is prefixed by ESCAPE_CODE
3446 * then one of the following codes, then the
3447 @@ -28,7 +30,7 @@
3448 #define CPU_SWITCH_CODE 2
3449 #define COOKIE_SWITCH_CODE 3
3450 #define KERNEL_ENTER_SWITCH_CODE 4
3451 -#define KERNEL_EXIT_SWITCH_CODE 5
3452 +#define USER_ENTER_SWITCH_CODE 5
3453 #define MODULE_LOADED_CODE 6
3454 #define CTX_TGID_CODE 7
3455 #define TRACE_BEGIN_CODE 8
3456 @@ -36,6 +38,7 @@
3457 #define XEN_ENTER_SWITCH_CODE 10
3458 #define SPU_PROFILING_CODE 11
3459 #define SPU_CTX_SWITCH_CODE 12
3460 +#define DOMAIN_SWITCH_CODE 13
3461
3462 struct super_block;
3463 struct dentry;
3464 @@ -47,6 +50,11 @@ struct oprofile_operations {
3465 /* create any necessary configuration files in the oprofile fs.
3466 * Optional. */
3467 int (*create_files)(struct super_block * sb, struct dentry * root);
3468 + /* setup active domains with Xen */
3469 + int (*set_active)(int *active_domains, unsigned int adomains);
3470 + /* setup passive domains with Xen */
3471 + int (*set_passive)(int *passive_domains, unsigned int pdomains);
3472 +
3473 /* Do any necessary interrupt setup. Optional. */
3474 int (*setup)(void);
3475 /* Do any necessary interrupt shutdown. Optional. */
3476 @@ -106,6 +114,8 @@ void oprofile_add_pc(unsigned long pc, i
3477 /* add a backtrace entry, to be called from the ->backtrace callback */
3478 void oprofile_add_trace(unsigned long eip);
3479
3480 +/* add a domain switch entry */
3481 +int oprofile_add_domain_switch(int32_t domain_id);
3482
3483 /**
3484 * Create a file of the given name as a child of the given root, with
3485 Index: linux-2.6.27/include/linux/page-flags.h
3486 ===================================================================
3487 --- linux-2.6.27.orig/include/linux/page-flags.h
3488 +++ linux-2.6.27/include/linux/page-flags.h
3489 @@ -98,6 +98,9 @@ enum pageflags {
3490 #ifdef CONFIG_IA64_UNCACHED_ALLOCATOR
3491 PG_uncached, /* Page has been mapped as uncached */
3492 #endif
3493 +#ifdef CONFIG_XEN
3494 + PG_foreign, /* Page is owned by foreign allocator. */
3495 +#endif
3496 __NR_PAGEFLAGS,
3497
3498 /* Filesystems */
3499 @@ -271,6 +274,19 @@ static inline void SetPageUptodate(struc
3500
3501 CLEARPAGEFLAG(Uptodate, uptodate)
3502
3503 +#define PageForeign(page) test_bit(PG_foreign, &(page)->flags)
3504 +#define SetPageForeign(_page, dtor) do { \
3505 + set_bit(PG_foreign, &(_page)->flags); \
3506 + BUG_ON((dtor) == (void (*)(struct page *))0); \
3507 + (_page)->index = (long)(dtor); \
3508 +} while (0)
3509 +#define ClearPageForeign(page) do { \
3510 + clear_bit(PG_foreign, &(page)->flags); \
3511 + (page)->index = 0; \
3512 +} while (0)
3513 +#define PageForeignDestructor(_page) \
3514 + ((void (*)(struct page *))(_page)->index)(_page)
3515 +
3516 extern void cancel_dirty_page(struct page *page, unsigned int account_size);
3517
3518 int test_clear_page_writeback(struct page *page);
3519 @@ -341,9 +357,18 @@ PAGEFLAG(MemError, memerror)
3520 PAGEFLAG_FALSE(MemError)
3521 #endif
3522
3523 +#if !defined(CONFIG_XEN)
3524 +# define PAGE_FLAGS_XEN 0
3525 +#elif defined(CONFIG_X86)
3526 +# define PAGE_FLAGS_XEN ((1 << PG_pinned) | (1 << PG_foreign))
3527 +#else
3528 +# define PAGE_FLAGS_XEN (1 << PG_foreign)
3529 +#endif
3530 +
3531 #define PAGE_FLAGS (1 << PG_lru | 1 << PG_private | 1 << PG_locked | \
3532 1 << PG_buddy | 1 << PG_writeback | 1 << PG_waiters | \
3533 - 1 << PG_slab | 1 << PG_swapcache | 1 << PG_active)
3534 + 1 << PG_slab | 1 << PG_swapcache | 1 << PG_active | \
3535 + PAGE_FLAGS_XEN)
3536
3537 /*
3538 * Flags checked in bad_page(). Pages on the free list should not have
3539 Index: linux-2.6.27/include/linux/pci.h
3540 ===================================================================
3541 --- linux-2.6.27.orig/include/linux/pci.h
3542 +++ linux-2.6.27/include/linux/pci.h
3543 @@ -211,6 +211,9 @@ struct pci_dev {
3544 * directly, use the values stored here. They might be different!
3545 */
3546 unsigned int irq;
3547 +#ifdef CONFIG_XEN
3548 + unsigned int irq_old;
3549 +#endif
3550 struct resource resource[DEVICE_COUNT_RESOURCE]; /* I/O and memory regions + expansion ROMs */
3551
3552 /* These fields are used by common fixups */
3553 @@ -772,6 +775,11 @@ static inline int pci_msi_enabled(void)
3554 {
3555 return 0;
3556 }
3557 +
3558 +#ifdef CONFIG_XEN
3559 +#define register_msi_get_owner(func) 0
3560 +#define unregister_msi_get_owner(func) 0
3561 +#endif
3562 #else
3563 extern int pci_enable_msi(struct pci_dev *dev);
3564 extern void pci_msi_shutdown(struct pci_dev *dev);
3565 @@ -784,6 +792,10 @@ extern void msi_remove_pci_irq_vectors(s
3566 extern void pci_restore_msi_state(struct pci_dev *dev);
3567 extern int pci_msi_enabled(void);
3568
3569 +#ifdef CONFIG_XEN
3570 +extern int register_msi_get_owner(int (*func)(struct pci_dev *dev));
3571 +extern int unregister_msi_get_owner(int (*func)(struct pci_dev *dev));
3572 +#endif
3573 #endif
3574
3575 #ifndef CONFIG_PCIEASPM
3576 Index: linux-2.6.27/include/linux/skbuff.h
3577 ===================================================================
3578 --- linux-2.6.27.orig/include/linux/skbuff.h
3579 +++ linux-2.6.27/include/linux/skbuff.h
3580 @@ -217,6 +217,8 @@ typedef unsigned char *sk_buff_data_t;
3581 * @local_df: allow local fragmentation
3582 * @cloned: Head may be cloned (check refcnt to be sure)
3583 * @nohdr: Payload reference only, must not modify header
3584 + * @proto_data_valid: Protocol data validated since arriving at localhost
3585 + * @proto_csum_blank: Protocol csum must be added before leaving localhost
3586 * @pkt_type: Packet class
3587 * @fclone: skbuff clone status
3588 * @ip_summed: Driver fed us an IP checksum
3589 @@ -323,7 +325,11 @@ struct sk_buff {
3590 #ifdef CONFIG_NETVM
3591 __u8 emergency:1;
3592 #endif
3593 - /* 12-16 bit hole */
3594 +#ifdef CONFIG_XEN
3595 + __u8 proto_data_valid:1,
3596 + proto_csum_blank:1;
3597 +#endif
3598 + /* 10-16 bit hole */
3599
3600 #ifdef CONFIG_NET_DMA
3601 dma_cookie_t dma_cookie;
3602 Index: linux-2.6.27/include/linux/vermagic.h
3603 ===================================================================
3604 --- linux-2.6.27.orig/include/linux/vermagic.h
3605 +++ linux-2.6.27/include/linux/vermagic.h
3606 @@ -22,6 +22,11 @@
3607 #else
3608 #define MODULE_VERMAGIC_MODVERSIONS ""
3609 #endif
3610 +#ifdef CONFIG_XEN
3611 +#define MODULE_VERMAGIC_XEN "Xen "
3612 +#else
3613 +#define MODULE_VERMAGIC_XEN
3614 +#endif
3615 #ifndef MODULE_ARCH_VERMAGIC
3616 #define MODULE_ARCH_VERMAGIC ""
3617 #endif
3618 @@ -30,5 +35,5 @@
3619 UTS_RELEASE " " \
3620 MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \
3621 MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS \
3622 - MODULE_ARCH_VERMAGIC
3623 + MODULE_VERMAGIC_XEN MODULE_ARCH_VERMAGIC
3624
3625 Index: linux-2.6.27/kernel/irq/spurious.c
3626 ===================================================================
3627 --- linux-2.6.27.orig/kernel/irq/spurious.c
3628 +++ linux-2.6.27/kernel/irq/spurious.c
3629 @@ -193,7 +193,7 @@ void note_interrupt(unsigned int irq, st
3630 */
3631 if (time_after(jiffies, desc->last_unhandled + HZ/10))
3632 desc->irqs_unhandled = 1;
3633 - else
3634 + else if (!irq_ignore_unhandled(irq))
3635 desc->irqs_unhandled++;
3636 desc->last_unhandled = jiffies;
3637 if (unlikely(action_ret != IRQ_NONE))
3638 Index: linux-2.6.27/kernel/kexec.c
3639 ===================================================================
3640 --- linux-2.6.27.orig/kernel/kexec.c
3641 +++ linux-2.6.27/kernel/kexec.c
3642 @@ -359,13 +359,26 @@ static int kimage_is_destination_range(s
3643 return 0;
3644 }
3645
3646 -static struct page *kimage_alloc_pages(gfp_t gfp_mask, unsigned int order)
3647 +static struct page *kimage_alloc_pages(gfp_t gfp_mask, unsigned int order, unsigned long limit)
3648 {
3649 struct page *pages;
3650
3651 pages = alloc_pages(gfp_mask, order);
3652 if (pages) {
3653 unsigned int count, i;
3654 +#ifdef CONFIG_XEN
3655 + int address_bits;
3656 +
3657 + if (limit == ~0UL)
3658 + address_bits = BITS_PER_LONG;
3659 + else
3660 + address_bits = long_log2(limit);
3661 +
3662 + if (xen_limit_pages_to_max_mfn(pages, order, address_bits) < 0) {
3663 + __free_pages(pages, order);
3664 + return NULL;
3665 + }
3666 +#endif
3667 pages->mapping = NULL;
3668 set_page_private(pages, order);
3669 count = 1 << order;
3670 @@ -384,6 +397,9 @@ static void kimage_free_pages(struct pag
3671 count = 1 << order;
3672 for (i = 0; i < count; i++)
3673 ClearPageReserved(page + i);
3674 +#ifdef CONFIG_XEN
3675 + xen_destroy_contiguous_region((unsigned long)page_address(page), order);
3676 +#endif
3677 __free_pages(page, order);
3678 }
3679
3680 @@ -429,10 +445,10 @@ static struct page *kimage_alloc_normal_
3681 do {
3682 unsigned long pfn, epfn, addr, eaddr;
3683
3684 - pages = kimage_alloc_pages(GFP_KERNEL, order);
3685 + pages = kimage_alloc_pages(GFP_KERNEL, order, KEXEC_CONTROL_MEMORY_LIMIT);
3686 if (!pages)
3687 break;
3688 - pfn = page_to_pfn(pages);
3689 + pfn = kexec_page_to_pfn(pages);
3690 epfn = pfn + count;
3691 addr = pfn << PAGE_SHIFT;
3692 eaddr = epfn << PAGE_SHIFT;
3693 @@ -466,6 +482,7 @@ static struct page *kimage_alloc_normal_
3694 return pages;
3695 }
3696
3697 +#ifndef CONFIG_XEN
3698 static struct page *kimage_alloc_crash_control_pages(struct kimage *image,
3699 unsigned int order)
3700 {
3701 @@ -519,7 +536,7 @@ static struct page *kimage_alloc_crash_c
3702 }
3703 /* If I don't overlap any segments I have found my hole! */
3704 if (i == image->nr_segments) {
3705 - pages = pfn_to_page(hole_start >> PAGE_SHIFT);
3706 + pages = kexec_pfn_to_page(hole_start >> PAGE_SHIFT);
3707 break;
3708 }
3709 }
3710 @@ -546,6 +563,13 @@ struct page *kimage_alloc_control_pages(
3711
3712 return pages;
3713 }
3714 +#else /* !CONFIG_XEN */
3715 +struct page *kimage_alloc_control_pages(struct kimage *image,
3716 + unsigned int order)
3717 +{
3718 + return kimage_alloc_normal_control_pages(image, order);
3719 +}
3720 +#endif
3721
3722 static int kimage_add_entry(struct kimage *image, kimage_entry_t entry)
3723 {
3724 @@ -561,7 +585,7 @@ static int kimage_add_entry(struct kimag
3725 return -ENOMEM;
3726
3727 ind_page = page_address(page);
3728 - *image->entry = virt_to_phys(ind_page) | IND_INDIRECTION;
3729 + *image->entry = kexec_virt_to_phys(ind_page) | IND_INDIRECTION;
3730 image->entry = ind_page;
3731 image->last_entry = ind_page +
3732 ((PAGE_SIZE/sizeof(kimage_entry_t)) - 1);
3733 @@ -620,13 +644,13 @@ static void kimage_terminate(struct kima
3734 #define for_each_kimage_entry(image, ptr, entry) \
3735 for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE); \
3736 ptr = (entry & IND_INDIRECTION)? \
3737 - phys_to_virt((entry & PAGE_MASK)): ptr +1)
3738 + kexec_phys_to_virt((entry & PAGE_MASK)): ptr +1)
3739
3740 static void kimage_free_entry(kimage_entry_t entry)
3741 {
3742 struct page *page;
3743
3744 - page = pfn_to_page(entry >> PAGE_SHIFT);
3745 + page = kexec_pfn_to_page(entry >> PAGE_SHIFT);
3746 kimage_free_pages(page);
3747 }
3748
3749 @@ -638,6 +662,10 @@ static void kimage_free(struct kimage *i
3750 if (!image)
3751 return;
3752
3753 +#ifdef CONFIG_XEN
3754 + xen_machine_kexec_unload(image);
3755 +#endif
3756 +
3757 kimage_free_extra_pages(image);
3758 for_each_kimage_entry(image, ptr, entry) {
3759 if (entry & IND_INDIRECTION) {
3760 @@ -713,7 +741,7 @@ static struct page *kimage_alloc_page(st
3761 * have a match.
3762 */
3763 list_for_each_entry(page, &image->dest_pages, lru) {
3764 - addr = page_to_pfn(page) << PAGE_SHIFT;
3765 + addr = kexec_page_to_pfn(page) << PAGE_SHIFT;
3766 if (addr == destination) {
3767 list_del(&page->lru);
3768 return page;
3769 @@ -724,16 +752,16 @@ static struct page *kimage_alloc_page(st
3770 kimage_entry_t *old;
3771
3772 /* Allocate a page, if we run out of memory give up */
3773 - page = kimage_alloc_pages(gfp_mask, 0);
3774 + page = kimage_alloc_pages(gfp_mask, 0, KEXEC_SOURCE_MEMORY_LIMIT);
3775 if (!page)
3776 return NULL;
3777 /* If the page cannot be used file it away */
3778 - if (page_to_pfn(page) >
3779 + if (kexec_page_to_pfn(page) >
3780 (KEXEC_SOURCE_MEMORY_LIMIT >> PAGE_SHIFT)) {
3781 list_add(&page->lru, &image->unuseable_pages);
3782 continue;
3783 }
3784 - addr = page_to_pfn(page) << PAGE_SHIFT;
3785 + addr = kexec_page_to_pfn(page) << PAGE_SHIFT;
3786
3787 /* If it is the destination page we want use it */
3788 if (addr == destination)
3789 @@ -756,7 +784,7 @@ static struct page *kimage_alloc_page(st
3790 struct page *old_page;
3791
3792 old_addr = *old & PAGE_MASK;
3793 - old_page = pfn_to_page(old_addr >> PAGE_SHIFT);
3794 + old_page = kexec_pfn_to_page(old_addr >> PAGE_SHIFT);
3795 copy_highpage(page, old_page);
3796 *old = addr | (*old & ~PAGE_MASK);
3797
3798 @@ -812,7 +840,7 @@ static int kimage_load_normal_segment(st
3799 result = -ENOMEM;
3800 goto out;
3801 }
3802 - result = kimage_add_page(image, page_to_pfn(page)
3803 + result = kimage_add_page(image, kexec_page_to_pfn(page)
3804 << PAGE_SHIFT);
3805 if (result < 0)
3806 goto out;
3807 @@ -844,6 +872,7 @@ out:
3808 return result;
3809 }
3810
3811 +#ifndef CONFIG_XEN
3812 static int kimage_load_crash_segment(struct kimage *image,
3813 struct kexec_segment *segment)
3814 {
3815 @@ -866,7 +895,7 @@ static int kimage_load_crash_segment(str
3816 char *ptr;
3817 size_t uchunk, mchunk;
3818
3819 - page = pfn_to_page(maddr >> PAGE_SHIFT);
3820 + page = kexec_pfn_to_page(maddr >> PAGE_SHIFT);
3821 if (!page) {
3822 result = -ENOMEM;
3823 goto out;
3824 @@ -915,6 +944,13 @@ static int kimage_load_segment(struct ki
3825
3826 return result;
3827 }
3828 +#else /* CONFIG_XEN */
3829 +static int kimage_load_segment(struct kimage *image,
3830 + struct kexec_segment *segment)
3831 +{
3832 + return kimage_load_normal_segment(image, segment);
3833 +}
3834 +#endif
3835
3836 /*
3837 * Exec Kernel system call: for obvious reasons only root may call it.
3838 @@ -1018,6 +1054,13 @@ SYSCALL_DEFINE4(kexec_load, unsigned lon
3839 }
3840 kimage_terminate(image);
3841 }
3842 +#ifdef CONFIG_XEN
3843 + if (image) {
3844 + result = xen_machine_kexec_load(image);
3845 + if (result)
3846 + goto out;
3847 + }
3848 +#endif
3849 /* Install the new kernel, and Uninstall the old */
3850 image = xchg(dest_image, image);
3851
3852 Index: linux-2.6.27/kernel/sysctl.c
3853 ===================================================================
3854 --- linux-2.6.27.orig/kernel/sysctl.c
3855 +++ linux-2.6.27/kernel/sysctl.c
3856 @@ -751,7 +751,7 @@ static struct ctl_table kern_table[] = {
3857 .proc_handler = &proc_dointvec,
3858 },
3859 #endif
3860 -#if defined(CONFIG_ACPI_SLEEP) && defined(CONFIG_X86)
3861 +#if defined(CONFIG_ACPI_SLEEP) && defined(CONFIG_X86) && !defined(CONFIG_ACPI_PV_SLEEP)
3862 {
3863 .procname = "acpi_video_flags",
3864 .data = &acpi_realmode_flags,
3865 Index: linux-2.6.27/mm/memory.c
3866 ===================================================================
3867 --- linux-2.6.27.orig/mm/memory.c
3868 +++ linux-2.6.27/mm/memory.c
3869 @@ -446,6 +446,12 @@ struct page *vm_normal_page(struct vm_ar
3870 {
3871 unsigned long pfn;
3872
3873 +#if defined(CONFIG_XEN) && defined(CONFIG_X86)
3874 + /* XEN: Covers user-space grant mappings (even of local pages). */
3875 + if (unlikely(vma->vm_flags & VM_FOREIGN))
3876 + return NULL;
3877 +#endif
3878 +
3879 if (HAVE_PTE_SPECIAL) {
3880 if (likely(!pte_special(pte))) {
3881 VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
3882 @@ -474,7 +480,14 @@ struct page *vm_normal_page(struct vm_ar
3883 }
3884 }
3885
3886 +#ifndef CONFIG_XEN
3887 VM_BUG_ON(!pfn_valid(pfn));
3888 +#else
3889 + if (unlikely(!pfn_valid(pfn))) {
3890 + VM_BUG_ON(!(vma->vm_flags & VM_RESERVED));
3891 + return NULL;
3892 + }
3893 +#endif
3894
3895 /*
3896 * NOTE! We still have PageReserved() pages in the page tables.
3897 @@ -745,8 +758,12 @@ static unsigned long zap_pte_range(struc
3898 page->index > details->last_index))
3899 continue;
3900 }
3901 - ptent = ptep_get_and_clear_full(mm, addr, pte,
3902 - tlb->fullmm);
3903 + if (unlikely(vma->vm_ops && vma->vm_ops->zap_pte))
3904 + ptent = vma->vm_ops->zap_pte(vma, addr, pte,
3905 + tlb->fullmm);
3906 + else
3907 + ptent = ptep_get_and_clear_full(mm, addr, pte,
3908 + tlb->fullmm);
3909 tlb_remove_tlb_entry(tlb, pte, addr);
3910 if (unlikely(!page))
3911 continue;
3912 @@ -996,6 +1013,7 @@ unsigned long zap_page_range(struct vm_a
3913 tlb_finish_mmu(tlb, address, end);
3914 return end;
3915 }
3916 +EXPORT_SYMBOL(zap_page_range);
3917
3918 /**
3919 * zap_vma_ptes - remove ptes mapping the vma
3920 @@ -1193,6 +1211,26 @@ int get_user_pages(struct task_struct *t
3921 continue;
3922 }
3923
3924 +#ifdef CONFIG_XEN
3925 + if (vma && (vma->vm_flags & VM_FOREIGN)) {
3926 + struct page **map = vma->vm_private_data;
3927 + int offset = (start - vma->vm_start) >> PAGE_SHIFT;
3928 + if (map[offset] != NULL) {
3929 + if (pages) {
3930 + struct page *page = map[offset];
3931 +
3932 + pages[i] = page;
3933 + get_page(page);
3934 + }
3935 + if (vmas)
3936 + vmas[i] = vma;
3937 + i++;
3938 + start += PAGE_SIZE;
3939 + len--;
3940 + continue;
3941 + }
3942 + }
3943 +#endif
3944 if (!vma || (vma->vm_flags & (VM_IO | VM_PFNMAP))
3945 || !(vm_flags & vma->vm_flags))
3946 return i ? : -EFAULT;
3947 Index: linux-2.6.27/mm/mprotect.c
3948 ===================================================================
3949 --- linux-2.6.27.orig/mm/mprotect.c
3950 +++ linux-2.6.27/mm/mprotect.c
3951 @@ -92,6 +92,8 @@ static inline void change_pmd_range(stru
3952 next = pmd_addr_end(addr, end);
3953 if (pmd_none_or_clear_bad(pmd))
3954 continue;
3955 + if (arch_change_pte_range(mm, pmd, addr, next, newprot))
3956 + continue;
3957 change_pte_range(mm, pmd, addr, next, newprot, dirty_accountable);
3958 } while (pmd++, addr = next, addr != end);
3959 }
3960 Index: linux-2.6.27/mm/page_alloc.c
3961 ===================================================================
3962 --- linux-2.6.27.orig/mm/page_alloc.c
3963 +++ linux-2.6.27/mm/page_alloc.c
3964 @@ -533,6 +533,12 @@ static void __free_pages_ok(struct page
3965 int i;
3966 int reserved = 0;
3967
3968 +#ifdef CONFIG_XEN
3969 + if (PageForeign(page)) {
3970 + PageForeignDestructor(page);
3971 + return;
3972 + }
3973 +#endif
3974 trace_page_free(page, order);
3975
3976 for (i = 0 ; i < (1 << order) ; ++i)
3977 @@ -995,6 +1001,12 @@ static void free_hot_cold_page(struct pa
3978 struct per_cpu_pages *pcp;
3979 unsigned long flags;
3980
3981 +#ifdef CONFIG_XEN
3982 + if (PageForeign(page)) {
3983 + PageForeignDestructor(page);
3984 + return;
3985 + }
3986 +#endif
3987 trace_page_free(page, 0);
3988
3989 if (PageAnon(page))
3990 Index: linux-2.6.27/net/core/dev.c
3991 ===================================================================
3992 --- linux-2.6.27.orig/net/core/dev.c
3993 +++ linux-2.6.27/net/core/dev.c
3994 @@ -131,6 +131,12 @@
3995
3996 #include "net-sysfs.h"
3997
3998 +#ifdef CONFIG_XEN
3999 +#include <net/ip.h>
4000 +#include <linux/tcp.h>
4001 +#include <linux/udp.h>
4002 +#endif
4003 +
4004 /*
4005 * The list of packet types we will receive (as opposed to discard)
4006 * and the routines to invoke.
4007 @@ -1734,6 +1740,42 @@ static struct netdev_queue *dev_pick_tx(
4008 return netdev_get_tx_queue(dev, queue_index);
4009 }
4010
4011 +#ifdef CONFIG_XEN
4012 +inline int skb_checksum_setup(struct sk_buff *skb)
4013 +{
4014 + if (skb->proto_csum_blank) {
4015 + if (skb->protocol != htons(ETH_P_IP))
4016 + goto out;
4017 + skb->h.raw = (unsigned char *)skb->nh.iph + 4*skb->nh.iph->ihl;
4018 + if (skb->h.raw >= skb->tail)
4019 + goto out;
4020 + switch (skb->nh.iph->protocol) {
4021 + case IPPROTO_TCP:
4022 + skb->csum = offsetof(struct tcphdr, check);
4023 + break;
4024 + case IPPROTO_UDP:
4025 + skb->csum = offsetof(struct udphdr, check);
4026 + break;
4027 + default:
4028 + if (net_ratelimit())
4029 + printk(KERN_ERR "Attempting to checksum a non-"
4030 + "TCP/UDP packet, dropping a protocol"
4031 + " %d packet", skb->nh.iph->protocol);
4032 + goto out;
4033 + }
4034 + if ((skb->h.raw + skb->csum + 2) > skb->tail)
4035 + goto out;
4036 + skb->ip_summed = CHECKSUM_HW;
4037 + skb->proto_csum_blank = 0;
4038 + }
4039 + return 0;
4040 +out:
4041 + return -EPROTO;
4042 +}
4043 +#else
4044 +inline int skb_checksum_setup(struct sk_buff *skb) { return 0; }
4045 +#endif
4046 +
4047 /**
4048 * dev_queue_xmit - transmit a buffer
4049 * @skb: buffer to transmit
4050 @@ -1766,6 +1808,12 @@ int dev_queue_xmit(struct sk_buff *skb)
4051 struct Qdisc *q;
4052 int rc = -ENOMEM;
4053
4054 + /* If a checksum-deferred packet is forwarded to a device that needs a
4055 + * checksum, correct the pointers and force checksumming.
4056 + */
4057 + if (skb_checksum_setup(skb))
4058 + goto out_kfree_skb;
4059 +
4060 /* GSO will handle the following emulations directly. */
4061 if (netif_needs_gso(dev, skb))
4062 goto gso;
4063 @@ -2274,6 +2322,19 @@ int netif_receive_skb(struct sk_buff *sk
4064 }
4065 #endif
4066
4067 +#ifdef CONFIG_XEN
4068 + switch (skb->ip_summed) {
4069 + case CHECKSUM_UNNECESSARY:
4070 + skb->proto_data_valid = 1;
4071 + break;
4072 + case CHECKSUM_HW:
4073 + /* XXX Implement me. */
4074 + default:
4075 + skb->proto_data_valid = 0;
4076 + break;
4077 + }
4078 +#endif
4079 +
4080 if (skb_emergency(skb))
4081 goto skip_taps;
4082
4083 @@ -4928,6 +4989,7 @@ EXPORT_SYMBOL(unregister_netdevice_notif
4084 EXPORT_SYMBOL(net_enable_timestamp);
4085 EXPORT_SYMBOL(net_disable_timestamp);
4086 EXPORT_SYMBOL(dev_get_flags);
4087 +EXPORT_SYMBOL(skb_checksum_setup);
4088
4089 #if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
4090 EXPORT_SYMBOL(br_handle_frame_hook);
4091 Index: linux-2.6.27/net/core/skbuff.c
4092 ===================================================================
4093 --- linux-2.6.27.orig/net/core/skbuff.c
4094 +++ linux-2.6.27/net/core/skbuff.c
4095 @@ -555,6 +555,10 @@ static struct sk_buff *__skb_clone(struc
4096 n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len;
4097 n->cloned = 1;
4098 n->nohdr = 0;
4099 +#ifdef CONFIG_XEN
4100 + C(proto_data_valid);
4101 + C(proto_csum_blank);
4102 +#endif
4103 n->destructor = NULL;
4104 C(iif);
4105 C(tail);
4106 Index: linux-2.6.27/net/ipv4/netfilter/nf_nat_proto_tcp.c
4107 ===================================================================
4108 --- linux-2.6.27.orig/net/ipv4/netfilter/nf_nat_proto_tcp.c
4109 +++ linux-2.6.27/net/ipv4/netfilter/nf_nat_proto_tcp.c
4110 @@ -75,6 +75,9 @@ tcp_manip_pkt(struct sk_buff *skb,
4111 if (hdrsize < sizeof(*hdr))
4112 return true;
4113
4114 + if (skb_checksum_setup(skb))
4115 + return false;
4116 +
4117 inet_proto_csum_replace4(&hdr->check, skb, oldip, newip, 1);
4118 inet_proto_csum_replace2(&hdr->check, skb, oldport, newport, 0);
4119 return true;
4120 Index: linux-2.6.27/net/ipv4/netfilter/nf_nat_proto_udp.c
4121 ===================================================================
4122 --- linux-2.6.27.orig/net/ipv4/netfilter/nf_nat_proto_udp.c
4123 +++ linux-2.6.27/net/ipv4/netfilter/nf_nat_proto_udp.c
4124 @@ -60,6 +60,10 @@ udp_manip_pkt(struct sk_buff *skb,
4125 newport = tuple->dst.u.udp.port;
4126 portptr = &hdr->dest;
4127 }
4128 +
4129 + if (skb_checksum_setup(skb))
4130 + return false;
4131 +
4132 if (hdr->check || skb->ip_summed == CHECKSUM_PARTIAL) {
4133 inet_proto_csum_replace4(&hdr->check, skb, oldip, newip, 1);
4134 inet_proto_csum_replace2(&hdr->check, skb, *portptr, newport,
4135 Index: linux-2.6.27/net/ipv4/xfrm4_output.c
4136 ===================================================================
4137 --- linux-2.6.27.orig/net/ipv4/xfrm4_output.c
4138 +++ linux-2.6.27/net/ipv4/xfrm4_output.c
4139 @@ -81,7 +81,7 @@ static int xfrm4_output_finish(struct sk
4140 #endif
4141
4142 skb->protocol = htons(ETH_P_IP);
4143 - return xfrm_output(skb);
4144 + return skb_checksum_setup(skb) ?: xfrm_output(skb);
4145 }
4146
4147 int xfrm4_output(struct sk_buff *skb)
4148 Index: linux-2.6.27/scripts/Makefile.build
4149 ===================================================================
4150 --- linux-2.6.27.orig/scripts/Makefile.build
4151 +++ linux-2.6.27/scripts/Makefile.build
4152 @@ -73,6 +73,20 @@ ifndef obj
4153 $(warning kbuild: Makefile.build is included improperly)
4154 endif
4155
4156 +ifeq ($(CONFIG_XEN),y)
4157 +$(objtree)/scripts/Makefile.xen: $(srctree)/scripts/Makefile.xen.awk $(srctree)/scripts/Makefile.build
4158 + @echo ' Updating $@'
4159 + $(if $(shell echo a | $(AWK) '{ print gensub(/a/, "AA", "g"); }'),\
4160 + ,$(error 'Your awk program does not define gensub. Use gawk or another awk with gensub'))
4161 + @$(AWK) -f $< $(filter-out $<,$^) >$@
4162 +
4163 +xen-src-single-used-m := $(patsubst $(srctree)/%,%,$(wildcard $(addprefix $(srctree)/,$(single-used-m:.o=-xen.c))))
4164 +xen-single-used-m := $(xen-src-single-used-m:-xen.c=.o)
4165 +single-used-m := $(filter-out $(xen-single-used-m),$(single-used-m))
4166 +
4167 +-include $(objtree)/scripts/Makefile.xen
4168 +endif
4169 +
4170 # ===========================================================================
4171
4172 ifneq ($(strip $(lib-y) $(lib-m) $(lib-n) $(lib-)),)
4173 Index: linux-2.6.27/scripts/Makefile.lib
4174 ===================================================================
4175 --- linux-2.6.27.orig/scripts/Makefile.lib
4176 +++ linux-2.6.27/scripts/Makefile.lib
4177 @@ -17,6 +17,12 @@ obj-m := $(filter-out $(obj-y),$(obj-m))
4178
4179 lib-y := $(filter-out $(obj-y), $(sort $(lib-y) $(lib-m)))
4180
4181 +# Remove objects forcibly disabled
4182 +
4183 +obj-y := $(filter-out $(disabled-obj-y),$(obj-y))
4184 +obj-m := $(filter-out $(disabled-obj-y),$(obj-m))
4185 +lib-y := $(filter-out $(disabled-obj-y),$(lib-y))
4186 +
4187
4188 # Handle objects in subdirs
4189 # ---------------------------------------------------------------------------