]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/blame - releases/4.14.13/x86-events-intel-ds-use-the-proper-cache-flush-method-for-mapping-ds-buffers.patch
Fix up backported ptrace patch
[thirdparty/kernel/stable-queue.git] / releases / 4.14.13 / x86-events-intel-ds-use-the-proper-cache-flush-method-for-mapping-ds-buffers.patch
CommitLineData
af53c2aa
GKH
1From 42f3bdc5dd962a5958bc024c1e1444248a6b8b4a Mon Sep 17 00:00:00 2001
2From: Peter Zijlstra <peterz@infradead.org>
3Date: Thu, 4 Jan 2018 18:07:12 +0100
4Subject: x86/events/intel/ds: Use the proper cache flush method for mapping ds buffers
5
6From: Peter Zijlstra <peterz@infradead.org>
7
8commit 42f3bdc5dd962a5958bc024c1e1444248a6b8b4a upstream.
9
10Thomas reported the following warning:
11
12 BUG: using smp_processor_id() in preemptible [00000000] code: ovsdb-server/4498
13 caller is native_flush_tlb_single+0x57/0xc0
14 native_flush_tlb_single+0x57/0xc0
15 __set_pte_vaddr+0x2d/0x40
16 set_pte_vaddr+0x2f/0x40
17 cea_set_pte+0x30/0x40
18 ds_update_cea.constprop.4+0x4d/0x70
19 reserve_ds_buffers+0x159/0x410
20 x86_reserve_hardware+0x150/0x160
21 x86_pmu_event_init+0x3e/0x1f0
22 perf_try_init_event+0x69/0x80
23 perf_event_alloc+0x652/0x740
24 SyS_perf_event_open+0x3f6/0xd60
25 do_syscall_64+0x5c/0x190
26
27set_pte_vaddr is used to map the ds buffers into the cpu entry area, but
28there are two problems with that:
29
30 1) The resulting flush is not supposed to be called in preemptible context
31
32 2) The cpu entry area is supposed to be per CPU, but the debug store
33 buffers are mapped for all CPUs so these mappings need to be flushed
34 globally.
35
36Add the necessary preemption protection across the mapping code and flush
37TLBs globally.
38
39Fixes: c1961a4631da ("x86/events/intel/ds: Map debug buffers in cpu_entry_area")
40Reported-by: Thomas Zeitlhofer <thomas.zeitlhofer+lkml@ze-it.at>
41Signed-off-by: Peter Zijlstra <peterz@infradead.org>
42Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
43Tested-by: Thomas Zeitlhofer <thomas.zeitlhofer+lkml@ze-it.at>
44Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
45Cc: Hugh Dickins <hughd@google.com>
46Link: https://lkml.kernel.org/r/20180104170712.GB3040@hirez.programming.kicks-ass.net
47Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
48
49---
50 arch/x86/events/intel/ds.c | 16 ++++++++++++++++
51 1 file changed, 16 insertions(+)
52
53--- a/arch/x86/events/intel/ds.c
54+++ b/arch/x86/events/intel/ds.c
55@@ -5,6 +5,7 @@
56
57 #include <asm/cpu_entry_area.h>
58 #include <asm/perf_event.h>
59+#include <asm/tlbflush.h>
60 #include <asm/insn.h>
61
62 #include "../perf_event.h"
63@@ -283,20 +284,35 @@ static DEFINE_PER_CPU(void *, insn_buffe
64
65 static void ds_update_cea(void *cea, void *addr, size_t size, pgprot_t prot)
66 {
67+ unsigned long start = (unsigned long)cea;
68 phys_addr_t pa;
69 size_t msz = 0;
70
71 pa = virt_to_phys(addr);
72+
73+ preempt_disable();
74 for (; msz < size; msz += PAGE_SIZE, pa += PAGE_SIZE, cea += PAGE_SIZE)
75 cea_set_pte(cea, pa, prot);
76+
77+ /*
78+ * This is a cross-CPU update of the cpu_entry_area, we must shoot down
79+ * all TLB entries for it.
80+ */
81+ flush_tlb_kernel_range(start, start + size);
82+ preempt_enable();
83 }
84
85 static void ds_clear_cea(void *cea, size_t size)
86 {
87+ unsigned long start = (unsigned long)cea;
88 size_t msz = 0;
89
90+ preempt_disable();
91 for (; msz < size; msz += PAGE_SIZE, cea += PAGE_SIZE)
92 cea_set_pte(cea, 0, PAGE_NONE);
93+
94+ flush_tlb_kernel_range(start, start + size);
95+ preempt_enable();
96 }
97
98 static void *dsalloc_pages(size_t size, gfp_t flags, int cpu)