]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
x86/tlb: Move __flush_tlb() out of line
authorThomas Gleixner <tglx@linutronix.de>
Tue, 21 Apr 2020 09:20:32 +0000 (11:20 +0200)
committerBorislav Petkov <bp@suse.de>
Sun, 26 Apr 2020 09:00:05 +0000 (11:00 +0200)
cpu_tlbstate is exported because various TLB-related functions need
access to it, but cpu_tlbstate is sensitive information which should
only be accessed by well-contained kernel functions and not be directly
exposed to modules.

As a first step, move __flush_tlb() out of line and hide the native
function. The latter can be static when CONFIG_PARAVIRT is disabled.

Consolidate the namespace while at it and remove the pointless extra
wrapper in the paravirt code.

No functional change.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Borislav Petkov <bp@suse.de>
Reviewed-by: Alexandre Chartre <alexandre.chartre@oracle.com>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lkml.kernel.org/r/20200421092559.246130908@linutronix.de
arch/x86/include/asm/paravirt.h
arch/x86/include/asm/tlbflush.h
arch/x86/kernel/cpu/mtrr/generic.c
arch/x86/kernel/paravirt.c
arch/x86/mm/mem_encrypt.c
arch/x86/mm/tlb.c
arch/x86/platform/uv/tlb_uv.c

index 694d8daf498376ef7e91a1a3026638e275378cab..f412450668d8f8898428a6e6900282cd7f00acec 100644 (file)
@@ -47,7 +47,9 @@ static inline void slow_down_io(void)
 #endif
 }
 
-static inline void __flush_tlb(void)
+void native_flush_tlb_local(void);
+
+static inline void __flush_tlb_local(void)
 {
        PVOP_VCALL0(mmu.flush_tlb_user);
 }
index d804030079da14a178882cdcd4692d1f227fe425..fe1fd02904ba2c660c48bb0934bf665db30185d1 100644 (file)
@@ -140,12 +140,13 @@ static inline unsigned long build_cr3_noflush(pgd_t *pgd, u16 asid)
        return __sme_pa(pgd) | kern_pcid(asid) | CR3_NOFLUSH;
 }
 
+void flush_tlb_local(void);
+
 #ifdef CONFIG_PARAVIRT
 #include <asm/paravirt.h>
 #else
-#define __flush_tlb() __native_flush_tlb()
-#define __flush_tlb_global() __native_flush_tlb_global()
-#define __flush_tlb_one_user(addr) __native_flush_tlb_one_user(addr)
+#define __flush_tlb_global()           __native_flush_tlb_global()
+#define __flush_tlb_one_user(addr)     __native_flush_tlb_one_user(addr)
 #endif
 
 struct tlb_context {
@@ -370,24 +371,6 @@ static inline void invalidate_user_asid(u16 asid)
                  (unsigned long *)this_cpu_ptr(&cpu_tlbstate.user_pcid_flush_mask));
 }
 
-/*
- * flush the entire current user mapping
- */
-static inline void __native_flush_tlb(void)
-{
-       /*
-        * Preemption or interrupts must be disabled to protect the access
-        * to the per CPU variable and to prevent being preempted between
-        * read_cr3() and write_cr3().
-        */
-       WARN_ON_ONCE(preemptible());
-
-       invalidate_user_asid(this_cpu_read(cpu_tlbstate.loaded_mm_asid));
-
-       /* If current->mm == NULL then the read_cr3() "borrows" an mm */
-       native_write_cr3(__native_read_cr3());
-}
-
 /*
  * flush everything
  */
@@ -461,7 +444,7 @@ static inline void __flush_tlb_all(void)
                /*
                 * !PGE -> !PCID (setup_pcid()), thus every flush is total.
                 */
-               __flush_tlb();
+               flush_tlb_local();
        }
 }
 
@@ -537,8 +520,6 @@ struct flush_tlb_info {
        bool                    freed_tables;
 };
 
-#define local_flush_tlb() __flush_tlb()
-
 #define flush_tlb_mm(mm)                                               \
                flush_tlb_mm_range(mm, 0UL, TLB_FLUSH_ALL, 0UL, true)
 
index 51b9190c628b220b77f5ab386e6b62677ffeb185..23ad8e953dfb1502a0dea9bdbc02a507c378bf8a 100644 (file)
@@ -761,7 +761,7 @@ static void prepare_set(void) __acquires(set_atomicity_lock)
 
        /* Flush all TLBs via a mov %cr3, %reg; mov %reg, %cr3 */
        count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
-       __flush_tlb();
+       flush_tlb_local();
 
        /* Save MTRR state */
        rdmsr(MSR_MTRRdefType, deftype_lo, deftype_hi);
@@ -778,7 +778,7 @@ static void post_set(void) __releases(set_atomicity_lock)
 {
        /* Flush TLBs (no need to flush caches - they are disabled) */
        count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
-       __flush_tlb();
+       flush_tlb_local();
 
        /* Intel (P6) standard MTRRs */
        mtrr_wrmsr(MSR_MTRRdefType, deftype_lo, deftype_hi);
index c131ba4e70ef8229d2c9f1722f88d100f942489e..4cb3d822ea09cd2583e0e9a0031fdcb75bd2ada9 100644 (file)
@@ -160,11 +160,6 @@ unsigned paravirt_patch_insns(void *insn_buff, unsigned len,
        return insn_len;
 }
 
-static void native_flush_tlb(void)
-{
-       __native_flush_tlb();
-}
-
 /*
  * Global pages have to be flushed a bit differently. Not a real
  * performance problem because this does not happen often.
@@ -359,7 +354,7 @@ struct paravirt_patch_template pv_ops = {
 #endif /* CONFIG_PARAVIRT_XXL */
 
        /* Mmu ops. */
-       .mmu.flush_tlb_user     = native_flush_tlb,
+       .mmu.flush_tlb_user     = native_flush_tlb_local,
        .mmu.flush_tlb_kernel   = native_flush_tlb_global,
        .mmu.flush_tlb_one_user = native_flush_tlb_one_user,
        .mmu.flush_tlb_others   = native_flush_tlb_others,
index a03614bd3e1a26045c99ded2d20cd490657a72fa..4a781cf99e92954951a01463f88b915407420b57 100644 (file)
@@ -134,7 +134,7 @@ static void __init __sme_early_map_unmap_mem(void *vaddr, unsigned long size,
                size = (size <= PMD_SIZE) ? 0 : size - PMD_SIZE;
        } while (size);
 
-       __native_flush_tlb();
+       flush_tlb_local();
 }
 
 void __init sme_unmap_bootdata(char *real_mode_data)
index 3d9d81951962d0de2e8517047d2f44bbb7280ecb..06116480c34300af47b6bc99295655547ec9c452 100644 (file)
 
 #include "mm_internal.h"
 
+#ifdef CONFIG_PARAVIRT
+# define STATIC_NOPV
+#else
+# define STATIC_NOPV                   static
+# define __flush_tlb_local             native_flush_tlb_local
+#endif
+
 /*
  *     TLB flushing, formerly SMP-only
  *             c/o Linus Torvalds.
@@ -645,7 +652,7 @@ static void flush_tlb_func_common(const struct flush_tlb_info *f,
                trace_tlb_flush(reason, nr_invalidate);
        } else {
                /* Full flush. */
-               local_flush_tlb();
+               flush_tlb_local();
                if (local)
                        count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
                trace_tlb_flush(reason, TLB_FLUSH_ALL);
@@ -883,6 +890,30 @@ unsigned long __get_current_cr3_fast(void)
 }
 EXPORT_SYMBOL_GPL(__get_current_cr3_fast);
 
+/*
+ * Flush the entire current user mapping
+ */
+STATIC_NOPV void native_flush_tlb_local(void)
+{
+       /*
+        * Preemption or interrupts must be disabled to protect the access
+        * to the per CPU variable and to prevent being preempted between
+        * read_cr3() and write_cr3().
+        */
+       WARN_ON_ONCE(preemptible());
+
+       invalidate_user_asid(this_cpu_read(cpu_tlbstate.loaded_mm_asid));
+
+       /* If current->mm == NULL then the read_cr3() "borrows" an mm */
+       native_write_cr3(__native_read_cr3());
+}
+
+void flush_tlb_local(void)
+{
+       __flush_tlb_local();
+}
+EXPORT_SYMBOL_GPL(flush_tlb_local);
+
 /*
  * arch_tlbbatch_flush() performs a full TLB flush regardless of the active mm.
  * This means that the 'struct flush_tlb_info' that describes which mappings to
index 1fd321f37f1b154a5179cfbfe82a136359e56b3e..6af766c47dd26bca1a9260e0babf2585bb076d13 100644 (file)
@@ -293,7 +293,7 @@ static void bau_process_message(struct msg_desc *mdp, struct bau_control *bcp,
         * This must be a normal message, or retry of a normal message
         */
        if (msg->address == TLB_FLUSH_ALL) {
-               local_flush_tlb();
+               flush_tlb_local();
                stat->d_alltlb++;
        } else {
                __flush_tlb_one_user(msg->address);