return temp_state;
}
+__ro_after_init struct mm_struct *poking_mm;
+__ro_after_init unsigned long poking_addr;
+
static inline void unuse_temporary_mm(temp_mm_state_t prev_state)
{
lockdep_assert_irqs_disabled();
+
switch_mm_irqs_off(NULL, prev_state.mm, current);
+ /* Clear the cpumask, to indicate no TLB flushing is needed anywhere */
+ cpumask_clear_cpu(raw_smp_processor_id(), mm_cpumask(poking_mm));
+
/*
* Restore the breakpoints if they were disabled before the temporary mm
* was loaded.
hw_breakpoint_restore();
}
-__ro_after_init struct mm_struct *poking_mm;
-__ro_after_init unsigned long poking_addr;
-
static void text_poke_memcpy(void *dst, const void *src, size_t len)
{
memcpy(dst, src, len);
cond_mitigation(tsk);
/*
- * Stop remote flushes for the previous mm.
- * Skip kernel threads; we never send init_mm TLB flushing IPIs,
- * but the bitmap manipulation can cause cache line contention.
+ * Leave this CPU in prev's mm_cpumask. Atomic writes to
+ * mm_cpumask can be expensive under contention. The CPU
+ * will be removed lazily at TLB flush time.
*/
- if (prev != &init_mm) {
- VM_WARN_ON_ONCE(!cpumask_test_cpu(cpu,
- mm_cpumask(prev)));
- cpumask_clear_cpu(cpu, mm_cpumask(prev));
- }
+ VM_WARN_ON_ONCE(prev != &init_mm && !cpumask_test_cpu(cpu,
+ mm_cpumask(prev)));
/* Start receiving IPIs and then read tlb_gen (and LAM below) */
- if (next != &init_mm)
+ if (next != &init_mm && !cpumask_test_cpu(cpu, mm_cpumask(next)))
cpumask_set_cpu(cpu, mm_cpumask(next));
next_tlb_gen = atomic64_read(&next->context.tlb_gen);
count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
/* Can only happen on remote CPUs */
- if (f->mm && f->mm != loaded_mm)
+ if (f->mm && f->mm != loaded_mm) {
+ cpumask_clear_cpu(raw_smp_processor_id(), mm_cpumask(f->mm));
return;
+ }
}
if (unlikely(loaded_mm == &init_mm))