]> git.ipfire.org Git - people/teissler/ipfire-2.x.git/blob - src/patches/60024_xen3-fixup-common.patch1
Imported xen patches.
[people/teissler/ipfire-2.x.git] / src / patches / 60024_xen3-fixup-common.patch1
1 Subject: Fix xen build.
2 From: jbeulich@novell.com
3 Patch-mainline: obsolete
4
5 ---
6 drivers/acpi/hardware/hwsleep.c | 2 ++
7 drivers/ide/ide-lib.c | 11 +++++++++++
8 drivers/oprofile/buffer_sync.c | 35 ++++++++++++++++++++++++-----------
9 drivers/oprofile/cpu_buffer.c | 6 ++++++
10 drivers/oprofile/oprof.c | 2 ++
11 drivers/oprofile/oprofile_files.c | 6 ++++++
12 include/linux/mm.h | 2 ++
13 include/linux/oprofile.h | 6 ++++--
14 mm/memory.c | 2 ++
15 9 files changed, 59 insertions(+), 13 deletions(-)
16
17 --- a/drivers/acpi/hardware/hwsleep.c
18 +++ b/drivers/acpi/hardware/hwsleep.c
19 @@ -430,6 +430,7 @@ ACPI_EXPORT_SYMBOL(acpi_enter_sleep_stat
20 * THIS FUNCTION MUST BE CALLED WITH INTERRUPTS DISABLED
21 *
22 ******************************************************************************/
23 +#ifndef CONFIG_XEN
24 acpi_status asmlinkage acpi_enter_sleep_state_s4bios(void)
25 {
26 u32 in_value;
27 @@ -479,6 +480,7 @@ acpi_status asmlinkage acpi_enter_sleep_
28 }
29
30 ACPI_EXPORT_SYMBOL(acpi_enter_sleep_state_s4bios)
31 +#endif
32
33 /*******************************************************************************
34 *
35 --- a/drivers/ide/ide-lib.c
36 +++ b/drivers/ide/ide-lib.c
37 @@ -177,6 +177,16 @@ void ide_toggle_bounce(ide_drive_t *driv
38 {
39 u64 addr = BLK_BOUNCE_HIGH; /* dma64_addr_t */
40
41 +#ifndef CONFIG_XEN
42 + if (!PCI_DMA_BUS_IS_PHYS) {
43 + addr = BLK_BOUNCE_ANY;
44 + } else if (on && drive->media == ide_disk) {
45 + struct device *dev = drive->hwif->dev;
46 +
47 + if (dev && dev->dma_mask)
48 + addr = *dev->dma_mask;
49 + }
50 +#else
51 if (on && drive->media == ide_disk) {
52 struct device *dev = drive->hwif->dev;
53
54 @@ -185,6 +195,7 @@ void ide_toggle_bounce(ide_drive_t *driv
55 else if (dev && dev->dma_mask)
56 addr = *dev->dma_mask;
57 }
58 +#endif
59
60 if (drive->queue)
61 blk_queue_bounce_limit(drive->queue, addr);
62 --- a/drivers/oprofile/buffer_sync.c
63 +++ b/drivers/oprofile/buffer_sync.c
64 @@ -44,7 +44,9 @@ static cpumask_t marked_cpus = CPU_MASK_
65 static DEFINE_SPINLOCK(task_mortuary);
66 static void process_task_mortuary(void);
67
68 +#ifdef CONFIG_XEN
69 static int cpu_current_domain[NR_CPUS];
70 +#endif
71
72 /* Take ownership of the task struct and place it on the
73 * list for processing. Only after two full buffer syncs
74 @@ -153,11 +155,13 @@ static void end_sync(void)
75 int sync_start(void)
76 {
77 int err;
78 +#ifdef CONFIG_XEN
79 int i;
80
81 for (i = 0; i < NR_CPUS; i++) {
82 cpu_current_domain[i] = COORDINATOR_DOMAIN;
83 }
84 +#endif
85
86 start_cpu_work();
87
88 @@ -302,12 +306,14 @@ static void add_cpu_mode_switch(unsigned
89 }
90 }
91
92 +#ifdef CONFIG_XEN
93 static void add_domain_switch(unsigned long domain_id)
94 {
95 add_event_entry(ESCAPE_CODE);
96 add_event_entry(DOMAIN_SWITCH_CODE);
97 add_event_entry(domain_id);
98 }
99 +#endif
100
101 static void
102 add_user_ctx_switch(struct task_struct const * task, unsigned long cookie)
103 @@ -531,11 +537,14 @@ void sync_buffer(int cpu)
104
105 add_cpu_switch(cpu);
106
107 +#ifdef CONFIG_XEN
108 /* We need to assign the first samples in this CPU buffer to the
109 same domain that we were processing at the last sync_buffer */
110 if (cpu_current_domain[cpu] != COORDINATOR_DOMAIN) {
111 add_domain_switch(cpu_current_domain[cpu]);
112 }
113 +#endif
114 +
115 /* Remember, only we can modify tail_pos */
116
117 available = get_slots(cpu_buf);
118 @@ -553,8 +562,10 @@ void sync_buffer(int cpu)
119 } else if (s->event == CPU_TRACE_BEGIN) {
120 state = sb_bt_start;
121 add_trace_begin();
122 +#ifdef CONFIG_XEN
123 } else if (s->event == CPU_DOMAIN_SWITCH) {
124 - domain_switch = 1;
125 + domain_switch = 1;
126 +#endif
127 } else {
128 struct mm_struct * oldmm = mm;
129
130 @@ -568,21 +579,21 @@ void sync_buffer(int cpu)
131 add_user_ctx_switch(new, cookie);
132 }
133 } else {
134 +#ifdef CONFIG_XEN
135 if (domain_switch) {
136 cpu_current_domain[cpu] = s->eip;
137 add_domain_switch(s->eip);
138 domain_switch = 0;
139 - } else {
140 - if (cpu_current_domain[cpu] !=
141 + } else if (cpu_current_domain[cpu] !=
142 COORDINATOR_DOMAIN) {
143 - add_sample_entry(s->eip, s->event);
144 - }
145 - else if (state >= sb_bt_start &&
146 - !add_sample(mm, s, cpu_mode)) {
147 - if (state == sb_bt_start) {
148 - state = sb_bt_ignore;
149 - atomic_inc(&oprofile_stats.bt_lost_no_mapping);
150 - }
151 + add_sample_entry(s->eip, s->event);
152 + } else
153 +#endif
154 + if (state >= sb_bt_start &&
155 + !add_sample(mm, s, cpu_mode)) {
156 + if (state == sb_bt_start) {
157 + state = sb_bt_ignore;
158 + atomic_inc(&oprofile_stats.bt_lost_no_mapping);
159 }
160 }
161 }
162 @@ -591,10 +602,12 @@ void sync_buffer(int cpu)
163 }
164 release_mm(mm);
165
166 +#ifdef CONFIG_XEN
167 /* We reset domain to COORDINATOR at each CPU switch */
168 if (cpu_current_domain[cpu] != COORDINATOR_DOMAIN) {
169 add_domain_switch(COORDINATOR_DOMAIN);
170 }
171 +#endif
172
173 mark_done(cpu);
174
175 --- a/drivers/oprofile/cpu_buffer.c
176 +++ b/drivers/oprofile/cpu_buffer.c
177 @@ -38,7 +38,11 @@ static void wq_sync_buffer(struct work_s
178 #define DEFAULT_TIMER_EXPIRE (HZ / 10)
179 static int work_enabled;
180
181 +#ifndef CONFIG_XEN
182 +#define current_domain COORDINATOR_DOMAIN
183 +#else
184 static int32_t current_domain = COORDINATOR_DOMAIN;
185 +#endif
186
187 void free_cpu_buffers(void)
188 {
189 @@ -303,6 +307,7 @@ void oprofile_add_trace(unsigned long pc
190 add_sample(cpu_buf, pc, 0);
191 }
192
193 +#ifdef CONFIG_XEN
194 int oprofile_add_domain_switch(int32_t domain_id)
195 {
196 struct oprofile_cpu_buffer * cpu_buf = &cpu_buffer[smp_processor_id()];
197 @@ -321,6 +326,7 @@ int oprofile_add_domain_switch(int32_t d
198
199 return 1;
200 }
201 +#endif
202
203 /*
204 * This serves to avoid cpu buffer overflow, and makes sure
205 --- a/drivers/oprofile/oprof.c
206 +++ b/drivers/oprofile/oprof.c
207 @@ -37,6 +37,7 @@ static DEFINE_MUTEX(start_mutex);
208 */
209 static int timer = 0;
210
211 +#ifdef CONFIG_XEN
212 int oprofile_set_active(int active_domains[], unsigned int adomains)
213 {
214 int err;
215 @@ -62,6 +63,7 @@ int oprofile_set_passive(int passive_dom
216 mutex_unlock(&start_mutex);
217 return err;
218 }
219 +#endif
220
221 int oprofile_setup(void)
222 {
223 --- a/drivers/oprofile/oprofile_files.c
224 +++ b/drivers/oprofile/oprofile_files.c
225 @@ -124,6 +124,8 @@ static const struct file_operations dump
226 .write = dump_write,
227 };
228
229 +#ifdef CONFIG_XEN
230 +
231 #define TMPBUFSIZE 512
232
233 static unsigned int adomains = 0;
234 @@ -313,12 +315,16 @@ static struct file_operations passive_do
235 .write = pdomain_write,
236 };
237
238 +#endif /* CONFIG_XEN */
239 +
240 void oprofile_create_files(struct super_block * sb, struct dentry * root)
241 {
242 oprofilefs_create_file(sb, root, "enable", &enable_fops);
243 oprofilefs_create_file_perm(sb, root, "dump", &dump_fops, 0666);
244 +#ifdef CONFIG_XEN
245 oprofilefs_create_file(sb, root, "active_domains", &active_domain_ops);
246 oprofilefs_create_file(sb, root, "passive_domains", &passive_domain_ops);
247 +#endif
248 oprofilefs_create_file(sb, root, "buffer", &event_buffer_fops);
249 oprofilefs_create_ulong(sb, root, "buffer_size", &fs_buffer_size);
250 oprofilefs_create_ulong(sb, root, "buffer_watershed", &fs_buffer_watershed);
251 --- a/include/linux/mm.h
252 +++ b/include/linux/mm.h
253 @@ -187,10 +187,12 @@ struct vm_operations_struct {
254 int (*access)(struct vm_area_struct *vma, unsigned long addr,
255 void *buf, int len, int write);
256
257 +#ifdef CONFIG_XEN
258 /* Area-specific function for clearing the PTE at @ptep. Returns the
259 * original value of @ptep. */
260 pte_t (*zap_pte)(struct vm_area_struct *vma,
261 unsigned long addr, pte_t *ptep, int is_fullmm);
262 +#endif
263 #ifdef CONFIG_NUMA
264 /*
265 * set_policy() op must add a reference to any non-NULL @new mempolicy
266 --- a/include/linux/oprofile.h
267 +++ b/include/linux/oprofile.h
268 @@ -16,8 +16,9 @@
269 #include <linux/types.h>
270 #include <linux/spinlock.h>
271 #include <asm/atomic.h>
272 -
273 +#ifdef CONFIG_XEN
274 #include <xen/interface/xenoprof.h>
275 +#endif
276
277 /* Each escaped entry is prefixed by ESCAPE_CODE
278 * then one of the following codes, then the
279 @@ -50,11 +51,12 @@ struct oprofile_operations {
280 /* create any necessary configuration files in the oprofile fs.
281 * Optional. */
282 int (*create_files)(struct super_block * sb, struct dentry * root);
283 +#ifdef CONFIG_XEN
284 /* setup active domains with Xen */
285 int (*set_active)(int *active_domains, unsigned int adomains);
286 /* setup passive domains with Xen */
287 int (*set_passive)(int *passive_domains, unsigned int pdomains);
288 -
289 +#endif
290 /* Do any necessary interrupt setup. Optional. */
291 int (*setup)(void);
292 /* Do any necessary interrupt shutdown. Optional. */
293 --- a/mm/memory.c
294 +++ b/mm/memory.c
295 @@ -758,10 +758,12 @@ static unsigned long zap_pte_range(struc
296 page->index > details->last_index))
297 continue;
298 }
299 +#ifdef CONFIG_XEN
300 if (unlikely(vma->vm_ops && vma->vm_ops->zap_pte))
301 ptent = vma->vm_ops->zap_pte(vma, addr, pte,
302 tlb->fullmm);
303 else
304 +#endif
305 ptent = ptep_get_and_clear_full(mm, addr, pte,
306 tlb->fullmm);
307 tlb_remove_tlb_entry(tlb, pte, addr);