]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
x86/split_lock: Simplify reenabling
authorMaksim Davydov <davydov-max@yandex-team.ru>
Tue, 25 Mar 2025 08:58:07 +0000 (11:58 +0300)
committerIngo Molnar <mingo@kernel.org>
Tue, 25 Mar 2025 09:16:50 +0000 (10:16 +0100)
When split_lock_mitigate is disabled, each CPU needs its own delayed_work
structure. They are used to reenable split lock detection after its
disabling. But delayed_work structure must be correctly initialized after
its allocation.

Current implementation uses deferred initialization that makes the
split lock handler code unclear. The code can be simplified a bit
if the initialization is moved to the appropriate initcall.

sld_setup() is called before setup_per_cpu_areas(), thus it can't be used
for this purpose, so introduce an independent initcall for
the initialization.

[ mingo: Simplified the 'work' assignment line a bit more. ]

Signed-off-by: Maksim Davydov <davydov-max@yandex-team.ru>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Link: https://lore.kernel.org/r/20250325085807.171885-1-davydov-max@yandex-team.ru
arch/x86/kernel/cpu/bus_lock.c

index 97222efb4d2a6c4c29d33c4b596686d9b6837d2f..237faf7e700cb4a97a59a94374367c3e9390577b 100644 (file)
@@ -200,6 +200,26 @@ static void __split_lock_reenable(struct work_struct *work)
  */
 static DEFINE_PER_CPU(struct delayed_work, sl_reenable);
 
+/*
+ * Per-CPU delayed_work can't be statically initialized properly because
+ * the struct address is unknown. Thus per-CPU delayed_work structures
+ * have to be initialized during kernel initialization and after calling
+ * setup_per_cpu_areas().
+ */
+static int __init setup_split_lock_delayed_work(void)
+{
+       unsigned int cpu;
+
+       for_each_possible_cpu(cpu) {
+               struct delayed_work *work = per_cpu_ptr(&sl_reenable, cpu);
+
+               INIT_DELAYED_WORK(work, __split_lock_reenable);
+       }
+
+       return 0;
+}
+pure_initcall(setup_split_lock_delayed_work);
+
 /*
  * If a CPU goes offline with pending delayed work to re-enable split lock
  * detection then the delayed work will be executed on some other CPU. That
@@ -219,15 +239,16 @@ static int splitlock_cpu_offline(unsigned int cpu)
 
 static void split_lock_warn(unsigned long ip)
 {
-       struct delayed_work *work = NULL;
+       struct delayed_work *work;
        int cpu;
+       unsigned int saved_sld_mitigate = READ_ONCE(sysctl_sld_mitigate);
 
        if (!current->reported_split_lock)
                pr_warn_ratelimited("#AC: %s/%d took a split_lock trap at address: 0x%lx\n",
                                    current->comm, current->pid, ip);
        current->reported_split_lock = 1;
 
-       if (sysctl_sld_mitigate) {
+       if (saved_sld_mitigate) {
                /*
                 * misery factor #1:
                 * sleep 10ms before trying to execute split lock.
@@ -240,18 +261,10 @@ static void split_lock_warn(unsigned long ip)
                 */
                if (down_interruptible(&buslock_sem) == -EINTR)
                        return;
-               work = &sl_reenable_unlock;
        }
 
        cpu = get_cpu();
-
-       if (!work) {
-               work = this_cpu_ptr(&sl_reenable);
-               /* Deferred initialization of per-CPU struct */
-               if (!work->work.func)
-                       INIT_DELAYED_WORK(work, __split_lock_reenable);
-       }
-
+       work = saved_sld_mitigate ? &sl_reenable_unlock : per_cpu_ptr(&sl_reenable, cpu);
        schedule_delayed_work_on(cpu, work, 2);
 
        /* Disable split lock detection on this CPU to make progress */