]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
x86/mm: Build arch/x86/mm/tlb.c even on !SMP
authorAndy Lutomirski <luto@kernel.org>
Tue, 26 Apr 2016 16:39:07 +0000 (09:39 -0700)
committerBen Hutchings <ben@decadent.org.uk>
Sun, 7 Jan 2018 01:46:47 +0000 (01:46 +0000)
commit e1074888c326038340a1ada9129d679e661f2ea6 upstream.

Currently all of the functions that live in tlb.c are inlined on
!SMP builds.  One can debate whether this is a good idea (in many
respects the code in tlb.c is better than the inlined UP code).

Regardless, I want to add code that needs to be built on UP and SMP
kernels and relates to tlb flushing, so arrange for tlb.c to be
compiled unconditionally.

Signed-off-by: Andy Lutomirski <luto@kernel.org>
Reviewed-by: Borislav Petkov <bp@suse.de>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/f0d778f0d828fc46e5d1946bca80f0aaf9abf032.1461688545.git.luto@kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Signed-off-by: Hugh Dickins <hughd@google.com>
Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
arch/x86/mm/Makefile
arch/x86/mm/tlb.c

index 3d11327c9ab4630cd0aa687dcf7118bc3efb4d25..cf2a84031dfd25172e637556fbfe25e622918804 100644 (file)
@@ -1,5 +1,5 @@
 obj-y  :=  init.o init_$(BITS).o fault.o ioremap.o extable.o pageattr.o mmap.o \
-           pat.o pgtable.o physaddr.o gup.o setup_nx.o
+           pat.o pgtable.o physaddr.o gup.o setup_nx.o tlb.o
 
 # Make sure __phys_addr has no stackprotector
 nostackp := $(call cc-option, -fno-stack-protector)
@@ -7,7 +7,6 @@ CFLAGS_physaddr.o               := $(nostackp)
 CFLAGS_setup_nx.o              := $(nostackp)
 
 obj-$(CONFIG_X86_PAT)          += pat_rbtree.o
-obj-$(CONFIG_SMP)              += tlb.o
 
 obj-$(CONFIG_X86_32)           += pgtable_32.o iomap_32.o
 
index 55034a15f13cc5d825a8dd8a292858fc06d27e77..8b731b6b461344b77084463c7043dcbb7439b3ac 100644 (file)
@@ -38,6 +38,8 @@ DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate)
  *     fixed, at the cost of triggering multiple IPIs in some cases.
  */
 
+#ifdef CONFIG_SMP
+
 union smp_flush_state {
        struct {
                struct mm_struct *flush_mm;
@@ -350,3 +352,5 @@ void flush_tlb_all(void)
 {
        on_each_cpu(do_flush_tlb_all, NULL, 1);
 }
+
+#endif /* CONFIG_SMP */