From cad42366d1838d5df4206faebf86fcb258dc98a0 Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Mon, 8 May 2023 10:11:24 +0200 Subject: [PATCH] 5.10-stable patches added patches: debugobject-ensure-pool-refill-again.patch --- ...debugobject-ensure-pool-refill-again.patch | 80 +++++++++++++++++++ queue-5.10/series | 1 + 2 files changed, 81 insertions(+) create mode 100644 queue-5.10/debugobject-ensure-pool-refill-again.patch diff --git a/queue-5.10/debugobject-ensure-pool-refill-again.patch b/queue-5.10/debugobject-ensure-pool-refill-again.patch new file mode 100644 index 00000000000..e630e032563 --- /dev/null +++ b/queue-5.10/debugobject-ensure-pool-refill-again.patch @@ -0,0 +1,80 @@ +From 0af462f19e635ad522f28981238334620881badc Mon Sep 17 00:00:00 2001 +From: Thomas Gleixner +Date: Mon, 1 May 2023 17:42:06 +0200 +Subject: debugobject: Ensure pool refill (again) + +From: Thomas Gleixner + +commit 0af462f19e635ad522f28981238334620881badc upstream. + +The recent fix to ensure atomicity of lookup and allocation inadvertently +broke the pool refill mechanism. + +Prior to that change debug_objects_activate() and debug_objecs_assert_init() +invoked debug_objecs_init() to set up the tracking object for statically +initialized objects. That's not longer the case and debug_objecs_init() is +now the only place which does pool refills. + +Depending on the number of statically initialized objects this can be +enough to actually deplete the pool, which was observed by Ido via a +debugobjects OOM warning. + +Restore the old behaviour by adding explicit refill opportunities to +debug_objects_activate() and debug_objecs_assert_init(). + +Fixes: 63a759694eed ("debugobject: Prevent init race with static objects") +Reported-by: Ido Schimmel +Signed-off-by: Thomas Gleixner +Tested-by: Ido Schimmel +Link: https://lore.kernel.org/r/871qk05a9d.ffs@tglx +Signed-off-by: Greg Kroah-Hartman +--- + lib/debugobjects.c | 16 +++++++++++++++- + 1 file changed, 15 insertions(+), 1 deletion(-) + +--- a/lib/debugobjects.c ++++ b/lib/debugobjects.c +@@ -590,6 +590,16 @@ static struct debug_obj *lookup_object_o + return NULL; + } + ++static void debug_objects_fill_pool(void) ++{ ++ /* ++ * On RT enabled kernels the pool refill must happen in preemptible ++ * context: ++ */ ++ if (!IS_ENABLED(CONFIG_PREEMPT_RT) || preemptible()) ++ fill_pool(); ++} ++ + static void + __debug_object_init(void *addr, const struct debug_obj_descr *descr, int onstack) + { +@@ -598,7 +608,7 @@ __debug_object_init(void *addr, const st + struct debug_obj *obj; + unsigned long flags; + +- fill_pool(); ++ debug_objects_fill_pool(); + + db = get_bucket((unsigned long) addr); + +@@ -683,6 +693,8 @@ int debug_object_activate(void *addr, co + if (!debug_objects_enabled) + return 0; + ++ debug_objects_fill_pool(); ++ + db = get_bucket((unsigned long) addr); + + raw_spin_lock_irqsave(&db->lock, flags); +@@ -892,6 +904,8 @@ void debug_object_assert_init(void *addr + if (!debug_objects_enabled) + return; + ++ debug_objects_fill_pool(); ++ + db = get_bucket((unsigned long) addr); + + raw_spin_lock_irqsave(&db->lock, flags); diff --git a/queue-5.10/series b/queue-5.10/series index 059e9447810..b372e28cbf1 100644 --- a/queue-5.10/series +++ b/queue-5.10/series @@ -289,3 +289,4 @@ perf-auxtrace-fix-address-filter-entire-kernel-size.patch perf-intel-pt-fix-cyc-timestamps-after-standalone-cbr.patch arm64-always-load-shadow-stack-pointer-directly-from-the-task-struct.patch arm64-stash-shadow-stack-pointer-in-the-task-struct-on-interrupt.patch +debugobject-ensure-pool-refill-again.patch -- 2.47.3