]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
5.19-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 3 Oct 2022 06:24:24 +0000 (08:24 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 3 Oct 2022 06:24:24 +0000 (08:24 +0200)
added patches:
damon-sysfs-fix-possible-memleak-on-damon_sysfs_add_target.patch
x86-alternative-fix-race-in-try_get_desc.patch
x86-cacheinfo-add-a-cpu_llc_shared_mask-up-variant.patch

queue-5.19/damon-sysfs-fix-possible-memleak-on-damon_sysfs_add_target.patch [new file with mode: 0644]
queue-5.19/series
queue-5.19/x86-alternative-fix-race-in-try_get_desc.patch [new file with mode: 0644]
queue-5.19/x86-cacheinfo-add-a-cpu_llc_shared_mask-up-variant.patch [new file with mode: 0644]

diff --git a/queue-5.19/damon-sysfs-fix-possible-memleak-on-damon_sysfs_add_target.patch b/queue-5.19/damon-sysfs-fix-possible-memleak-on-damon_sysfs_add_target.patch
new file mode 100644 (file)
index 0000000..959c4a2
--- /dev/null
@@ -0,0 +1,45 @@
+From 1c8e2349f2d033f634d046063b704b2ca6c46972 Mon Sep 17 00:00:00 2001
+From: Levi Yun <ppbuk5246@gmail.com>
+Date: Mon, 26 Sep 2022 16:06:11 +0000
+Subject: damon/sysfs: fix possible memleak on damon_sysfs_add_target
+
+From: Levi Yun <ppbuk5246@gmail.com>
+
+commit 1c8e2349f2d033f634d046063b704b2ca6c46972 upstream.
+
+When damon_sysfs_add_target couldn't find proper task, New allocated
+damon_target structure isn't registered yet, So, it's impossible to free
+new allocated one by damon_sysfs_destroy_targets.
+
+By calling damon_add_target as soon as allocating new target, Fix this
+possible memory leak.
+
+Link: https://lkml.kernel.org/r/20220926160611.48536-1-sj@kernel.org
+Fixes: a61ea561c871 ("mm/damon/sysfs: link DAMON for virtual address spaces monitoring")
+Signed-off-by: Levi Yun <ppbuk5246@gmail.com>
+Signed-off-by: SeongJae Park <sj@kernel.org>
+Reviewed-by: SeongJae Park <sj@kernel.org>
+Cc: <stable@vger.kernel.org>   [5.17.x]
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/damon/sysfs.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/mm/damon/sysfs.c
++++ b/mm/damon/sysfs.c
+@@ -2181,13 +2181,13 @@ static int damon_sysfs_add_target(struct
+       if (!t)
+               return -ENOMEM;
++      damon_add_target(ctx, t);
+       if (ctx->ops.id == DAMON_OPS_VADDR ||
+                       ctx->ops.id == DAMON_OPS_FVADDR) {
+               t->pid = find_get_pid(sys_target->pid);
+               if (!t->pid)
+                       goto destroy_targets_out;
+       }
+-      damon_add_target(ctx, t);
+       err = damon_sysfs_set_regions(t, sys_target->regions);
+       if (err)
+               goto destroy_targets_out;
index 1cd733f2fc173b4ac1d9ea2ea618cc4eec248d19..399475893306de8f0e50eaa26f4c0155a6df7de4 100644 (file)
@@ -96,3 +96,6 @@ net-ethernet-mtk_eth_soc-fix-mask-of-rx_dma_get_spor.patch
 perf-test-fix-test-case-87-perf-record-tests-for-hyb.patch
 perf-tests-record-fail-the-test-if-the-errs-counter-.patch
 kvm-x86-hide-ia32_platform_dca_cap-31-0-from-the-gue.patch
+x86-cacheinfo-add-a-cpu_llc_shared_mask-up-variant.patch
+x86-alternative-fix-race-in-try_get_desc.patch
+damon-sysfs-fix-possible-memleak-on-damon_sysfs_add_target.patch
diff --git a/queue-5.19/x86-alternative-fix-race-in-try_get_desc.patch b/queue-5.19/x86-alternative-fix-race-in-try_get_desc.patch
new file mode 100644 (file)
index 0000000..2d5a695
--- /dev/null
@@ -0,0 +1,167 @@
+From efd608fa7403ba106412b437f873929e2c862e28 Mon Sep 17 00:00:00 2001
+From: Nadav Amit <namit@vmware.com>
+Date: Wed, 21 Sep 2022 18:09:32 +0000
+Subject: x86/alternative: Fix race in try_get_desc()
+
+From: Nadav Amit <namit@vmware.com>
+
+commit efd608fa7403ba106412b437f873929e2c862e28 upstream.
+
+I encountered some occasional crashes of poke_int3_handler() when
+kprobes are set, while accessing desc->vec.
+
+The text poke mechanism claims to have an RCU-like behavior, but it
+does not appear that there is any quiescent state to ensure that
+nobody holds reference to desc. As a result, the following race
+appears to be possible, which can lead to memory corruption.
+
+  CPU0                                 CPU1
+  ----                                 ----
+  text_poke_bp_batch()
+  -> smp_store_release(&bp_desc, &desc)
+
+  [ notice that desc is on
+    the stack                  ]
+
+                                       poke_int3_handler()
+
+                                       [ int3 might be kprobe's
+                                         so sync events are do not
+                                         help ]
+
+                                       -> try_get_desc(descp=&bp_desc)
+                                          desc = __READ_ONCE(bp_desc)
+
+                                          if (!desc) [false, success]
+  WRITE_ONCE(bp_desc, NULL);
+  atomic_dec_and_test(&desc.refs)
+
+  [ success, desc space on the stack
+    is being reused and might have
+    non-zero value. ]
+                                       arch_atomic_inc_not_zero(&desc->refs)
+
+                                       [ might succeed since desc points to
+                                         stack memory that was freed and might
+                                         be reused. ]
+
+Fix this issue with small backportable patch. Instead of trying to
+make RCU-like behavior for bp_desc, just eliminate the unnecessary
+level of indirection of bp_desc, and hold the whole descriptor as a
+global.  Anyhow, there is only a single descriptor at any given
+moment.
+
+Fixes: 1f676247f36a4 ("x86/alternatives: Implement a better poke_int3_handler() completion scheme")
+Signed-off-by: Nadav Amit <namit@vmware.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Cc: stable@kernel.org
+Link: https://lkml.kernel.org/r/20220920224743.3089-1-namit@vmware.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/alternative.c |   45 +++++++++++++++++++++---------------------
+ 1 file changed, 23 insertions(+), 22 deletions(-)
+
+--- a/arch/x86/kernel/alternative.c
++++ b/arch/x86/kernel/alternative.c
+@@ -1319,22 +1319,23 @@ struct bp_patching_desc {
+       atomic_t refs;
+ };
+-static struct bp_patching_desc *bp_desc;
++static struct bp_patching_desc bp_desc;
+ static __always_inline
+-struct bp_patching_desc *try_get_desc(struct bp_patching_desc **descp)
++struct bp_patching_desc *try_get_desc(void)
+ {
+-      /* rcu_dereference */
+-      struct bp_patching_desc *desc = __READ_ONCE(*descp);
++      struct bp_patching_desc *desc = &bp_desc;
+-      if (!desc || !arch_atomic_inc_not_zero(&desc->refs))
++      if (!arch_atomic_inc_not_zero(&desc->refs))
+               return NULL;
+       return desc;
+ }
+-static __always_inline void put_desc(struct bp_patching_desc *desc)
++static __always_inline void put_desc(void)
+ {
++      struct bp_patching_desc *desc = &bp_desc;
++
+       smp_mb__before_atomic();
+       arch_atomic_dec(&desc->refs);
+ }
+@@ -1367,15 +1368,15 @@ noinstr int poke_int3_handler(struct pt_
+       /*
+        * Having observed our INT3 instruction, we now must observe
+-       * bp_desc:
++       * bp_desc with non-zero refcount:
+        *
+-       *      bp_desc = desc                  INT3
++       *      bp_desc.refs = 1                INT3
+        *      WMB                             RMB
+-       *      write INT3                      if (desc)
++       *      write INT3                      if (bp_desc.refs != 0)
+        */
+       smp_rmb();
+-      desc = try_get_desc(&bp_desc);
++      desc = try_get_desc();
+       if (!desc)
+               return 0;
+@@ -1429,7 +1430,7 @@ noinstr int poke_int3_handler(struct pt_
+       ret = 1;
+ out_put:
+-      put_desc(desc);
++      put_desc();
+       return ret;
+ }
+@@ -1460,18 +1461,20 @@ static int tp_vec_nr;
+  */
+ static void text_poke_bp_batch(struct text_poke_loc *tp, unsigned int nr_entries)
+ {
+-      struct bp_patching_desc desc = {
+-              .vec = tp,
+-              .nr_entries = nr_entries,
+-              .refs = ATOMIC_INIT(1),
+-      };
+       unsigned char int3 = INT3_INSN_OPCODE;
+       unsigned int i;
+       int do_sync;
+       lockdep_assert_held(&text_mutex);
+-      smp_store_release(&bp_desc, &desc); /* rcu_assign_pointer */
++      bp_desc.vec = tp;
++      bp_desc.nr_entries = nr_entries;
++
++      /*
++       * Corresponds to the implicit memory barrier in try_get_desc() to
++       * ensure reading a non-zero refcount provides up to date bp_desc data.
++       */
++      atomic_set_release(&bp_desc.refs, 1);
+       /*
+        * Corresponding read barrier in int3 notifier for making sure the
+@@ -1559,12 +1562,10 @@ static void text_poke_bp_batch(struct te
+               text_poke_sync();
+       /*
+-       * Remove and synchronize_rcu(), except we have a very primitive
+-       * refcount based completion.
++       * Remove and wait for refs to be zero.
+        */
+-      WRITE_ONCE(bp_desc, NULL); /* RCU_INIT_POINTER */
+-      if (!atomic_dec_and_test(&desc.refs))
+-              atomic_cond_read_acquire(&desc.refs, !VAL);
++      if (!atomic_dec_and_test(&bp_desc.refs))
++              atomic_cond_read_acquire(&bp_desc.refs, !VAL);
+ }
+ static void text_poke_loc_init(struct text_poke_loc *tp, void *addr,
diff --git a/queue-5.19/x86-cacheinfo-add-a-cpu_llc_shared_mask-up-variant.patch b/queue-5.19/x86-cacheinfo-add-a-cpu_llc_shared_mask-up-variant.patch
new file mode 100644 (file)
index 0000000..1fc0ce7
--- /dev/null
@@ -0,0 +1,76 @@
+From df5b035b5683d6a25f077af889fb88e09827f8bc Mon Sep 17 00:00:00 2001
+From: Borislav Petkov <bp@suse.de>
+Date: Fri, 19 Aug 2022 19:47:44 +0200
+Subject: x86/cacheinfo: Add a cpu_llc_shared_mask() UP variant
+
+From: Borislav Petkov <bp@suse.de>
+
+commit df5b035b5683d6a25f077af889fb88e09827f8bc upstream.
+
+On a CONFIG_SMP=n kernel, the LLC shared mask is 0, which prevents
+__cache_amd_cpumap_setup() from doing the L3 masks setup, and more
+specifically from setting up the shared_cpu_map and shared_cpu_list
+files in sysfs, leading to lscpu from util-linux getting confused and
+segfaulting.
+
+Add a cpu_llc_shared_mask() UP variant which returns a mask with a
+single bit set, i.e., for CPU0.
+
+Fixes: 2b83809a5e6d ("x86/cpu/amd: Derive L3 shared_cpu_map from cpu_llc_shared_mask")
+Reported-by: Saurabh Sengar <ssengar@linux.microsoft.com>
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Cc: <stable@vger.kernel.org>
+Link: https://lore.kernel.org/r/1660148115-302-1-git-send-email-ssengar@linux.microsoft.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/smp.h |   25 +++++++++++++++----------
+ 1 file changed, 15 insertions(+), 10 deletions(-)
+
+--- a/arch/x86/include/asm/smp.h
++++ b/arch/x86/include/asm/smp.h
+@@ -21,16 +21,6 @@ DECLARE_PER_CPU_READ_MOSTLY(u16, cpu_llc
+ DECLARE_PER_CPU_READ_MOSTLY(u16, cpu_l2c_id);
+ DECLARE_PER_CPU_READ_MOSTLY(int, cpu_number);
+-static inline struct cpumask *cpu_llc_shared_mask(int cpu)
+-{
+-      return per_cpu(cpu_llc_shared_map, cpu);
+-}
+-
+-static inline struct cpumask *cpu_l2c_shared_mask(int cpu)
+-{
+-      return per_cpu(cpu_l2c_shared_map, cpu);
+-}
+-
+ DECLARE_EARLY_PER_CPU_READ_MOSTLY(u16, x86_cpu_to_apicid);
+ DECLARE_EARLY_PER_CPU_READ_MOSTLY(u32, x86_cpu_to_acpiid);
+ DECLARE_EARLY_PER_CPU_READ_MOSTLY(u16, x86_bios_cpu_apicid);
+@@ -172,6 +162,16 @@ extern int safe_smp_processor_id(void);
+ # define safe_smp_processor_id()      smp_processor_id()
+ #endif
++static inline struct cpumask *cpu_llc_shared_mask(int cpu)
++{
++      return per_cpu(cpu_llc_shared_map, cpu);
++}
++
++static inline struct cpumask *cpu_l2c_shared_mask(int cpu)
++{
++      return per_cpu(cpu_l2c_shared_map, cpu);
++}
++
+ #else /* !CONFIG_SMP */
+ #define wbinvd_on_cpu(cpu)     wbinvd()
+ static inline int wbinvd_on_all_cpus(void)
+@@ -179,6 +179,11 @@ static inline int wbinvd_on_all_cpus(voi
+       wbinvd();
+       return 0;
+ }
++
++static inline struct cpumask *cpu_llc_shared_mask(int cpu)
++{
++      return (struct cpumask *)cpumask_of(0);
++}
+ #endif /* CONFIG_SMP */
+ extern unsigned disabled_cpus;