1 Subject: be more aggressive about de-activating mm-s under destruction
2 From: jbeulich@novell.com
3 Patch-mainline: obsolete
5 ... by not only handling the current task on the CPU arch_exit_mmap()
6 gets executed on, but also forcing remote CPUs to do so.
8 Index: head-2008-12-01/arch/x86/mm/pgtable-xen.c
9 ===================================================================
10 --- head-2008-12-01.orig/arch/x86/mm/pgtable-xen.c 2008-12-01 12:13:06.000000000 +0100
11 +++ head-2008-12-01/arch/x86/mm/pgtable-xen.c 2008-12-01 12:13:16.000000000 +0100
12 @@ -418,27 +418,44 @@ void arch_dup_mmap(struct mm_struct *old
16 -void arch_exit_mmap(struct mm_struct *mm)
18 + * We aggressively remove defunct pgd from cr3. We execute unmap_vmas() *much*
19 + * faster this way, as no hypercalls are needed for the page table updates.
21 +static void leave_active_mm(struct task_struct *tsk, struct mm_struct *mm)
22 + __releases(tsk->alloc_lock)
24 - struct task_struct *tsk = current;
29 - * We aggressively remove defunct pgd from cr3. We execute unmap_vmas()
30 - * *much* faster this way, as no tlb flushes means bigger wrpt batches.
32 if (tsk->active_mm == mm) {
33 tsk->active_mm = &init_mm;
34 atomic_inc(&init_mm.mm_count);
36 switch_mm(mm, &init_mm, tsk);
38 - atomic_dec(&mm->mm_count);
39 - BUG_ON(atomic_read(&mm->mm_count) == 0);
40 + if (atomic_dec_and_test(&mm->mm_count))
47 +static void _leave_active_mm(void *mm)
49 + struct task_struct *tsk = current;
51 + if (spin_trylock(&tsk->alloc_lock))
52 + leave_active_mm(tsk, mm);
55 +void arch_exit_mmap(struct mm_struct *mm)
57 + struct task_struct *tsk = current;
60 + leave_active_mm(tsk, mm);
63 + smp_call_function_mask(mm->cpu_vm_mask, _leave_active_mm, mm, 1);
66 if (PagePinned(virt_to_page(mm->pgd))
67 && atomic_read(&mm->mm_count) == 1