]> git.ipfire.org Git - people/pmueller/ipfire-2.x.git/blame - src/patches/60066_xen-x86-exit-mmap.patch1
Fix core28 updater kernel version
[people/pmueller/ipfire-2.x.git] / src / patches / 60066_xen-x86-exit-mmap.patch1
CommitLineData
cc90b958
BS
1Subject: be more aggressive about de-activating mm-s under destruction
2From: jbeulich@novell.com
3Patch-mainline: obsolete
4
5... by not only handling the current task on the CPU arch_exit_mmap()
6gets executed on, but also forcing remote CPUs to do so.
7
8Index: head-2008-12-01/arch/x86/mm/pgtable-xen.c
9===================================================================
10--- head-2008-12-01.orig/arch/x86/mm/pgtable-xen.c 2008-12-01 12:13:06.000000000 +0100
11+++ head-2008-12-01/arch/x86/mm/pgtable-xen.c 2008-12-01 12:13:16.000000000 +0100
12@@ -418,27 +418,44 @@ void arch_dup_mmap(struct mm_struct *old
13 mm_pin(mm);
14 }
15
16-void arch_exit_mmap(struct mm_struct *mm)
17+/*
18+ * We aggressively remove defunct pgd from cr3. We execute unmap_vmas() *much*
19+ * faster this way, as no hypercalls are needed for the page table updates.
20+ */
21+static void leave_active_mm(struct task_struct *tsk, struct mm_struct *mm)
22+ __releases(tsk->alloc_lock)
23 {
24- struct task_struct *tsk = current;
25-
26- task_lock(tsk);
27-
28- /*
29- * We aggressively remove defunct pgd from cr3. We execute unmap_vmas()
30- * *much* faster this way, as no tlb flushes means bigger wrpt batches.
31- */
32 if (tsk->active_mm == mm) {
33 tsk->active_mm = &init_mm;
34 atomic_inc(&init_mm.mm_count);
35
36 switch_mm(mm, &init_mm, tsk);
37
38- atomic_dec(&mm->mm_count);
39- BUG_ON(atomic_read(&mm->mm_count) == 0);
40+ if (atomic_dec_and_test(&mm->mm_count))
41+ BUG();
42 }
43
44 task_unlock(tsk);
45+}
46+
47+static void _leave_active_mm(void *mm)
48+{
49+ struct task_struct *tsk = current;
50+
51+ if (spin_trylock(&tsk->alloc_lock))
52+ leave_active_mm(tsk, mm);
53+}
54+
55+void arch_exit_mmap(struct mm_struct *mm)
56+{
57+ struct task_struct *tsk = current;
58+
59+ task_lock(tsk);
60+ leave_active_mm(tsk, mm);
61+
62+ preempt_disable();
63+ smp_call_function_mask(mm->cpu_vm_mask, _leave_active_mm, mm, 1);
64+ preempt_enable();
65
66 if (PagePinned(virt_to_page(mm->pgd))
67 && atomic_read(&mm->mm_count) == 1