]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
workqueue: Avoid premature init of wq->node_nr_active[].max
authorTejun Heo <tj@kernel.org>
Wed, 31 Jan 2024 05:06:43 +0000 (19:06 -1000)
committerTejun Heo <tj@kernel.org>
Wed, 31 Jan 2024 05:17:00 +0000 (19:17 -1000)
System workqueues are allocated early during boot from
workqueue_init_early(). While allocating unbound workqueues,
wq_update_node_max_active() is invoked from apply_workqueue_attrs() and
accesses NUMA topology to initialize wq->node_nr_active[].max.

However, topology information may not be set up at this point.
wq_update_node_max_active() is explicitly invoked from
workqueue_init_topology() later when topology information is known to be
available.

This doesn't seem to crash anything but it's doing useless work with dubious
data. Let's skip the premature and duplicate node_max_active updates by
initializing the field to WQ_DFL_MIN_ACTIVE on allocation and making
wq_update_node_max_active() noop until workqueue_init_topology().

Signed-off-by: Tejun Heo <tj@kernel.org>
---
 kernel/workqueue.c |    8 ++++++++
 1 file changed, 8 insertions(+)

diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 9221a4c57ae1..a65081ec6780 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -386,6 +386,8 @@ static const char *wq_affn_names[WQ_AFFN_NR_TYPES] = {
  [WQ_AFFN_SYSTEM] = "system",
 };

+static bool wq_topo_initialized = false;
+
 /*
  * Per-cpu work items which run for longer than the following threshold are
  * automatically considered CPU intensive and excluded from concurrency
@@ -1510,6 +1512,9 @@ static void wq_update_node_max_active(struct workqueue_struct *wq, int off_cpu)

  lockdep_assert_held(&wq->mutex);

+ if (!wq_topo_initialized)
+ return;
+
  if (!cpumask_test_cpu(off_cpu, effective))
  off_cpu = -1;

@@ -4356,6 +4361,7 @@ static void free_node_nr_active(struct wq_node_nr_active **nna_ar)

 static void init_node_nr_active(struct wq_node_nr_active *nna)
 {
+ nna->max = WQ_DFL_MIN_ACTIVE;
  atomic_set(&nna->nr, 0);
  raw_spin_lock_init(&nna->lock);
  INIT_LIST_HEAD(&nna->pending_pwqs);
@@ -7400,6 +7406,8 @@ void __init workqueue_init_topology(void)
  init_pod_type(&wq_pod_types[WQ_AFFN_CACHE], cpus_share_cache);
  init_pod_type(&wq_pod_types[WQ_AFFN_NUMA], cpus_share_numa);

+ wq_topo_initialized = true;
+
  mutex_lock(&wq_pool_mutex);

  /*

kernel/workqueue.c

index 31c1373505d8176f7598e8db74f6885d05712486..ffb625db97717c2a867d8e363035057588a26e25 100644 (file)
@@ -386,6 +386,8 @@ static const char *wq_affn_names[WQ_AFFN_NR_TYPES] = {
        [WQ_AFFN_SYSTEM]                = "system",
 };
 
+static bool wq_topo_initialized __read_mostly = false;
+
 /*
  * Per-cpu work items which run for longer than the following threshold are
  * automatically considered CPU intensive and excluded from concurrency
@@ -1510,6 +1512,9 @@ static void wq_update_node_max_active(struct workqueue_struct *wq, int off_cpu)
 
        lockdep_assert_held(&wq->mutex);
 
+       if (!wq_topo_initialized)
+               return;
+
        if (off_cpu >= 0 && !cpumask_test_cpu(off_cpu, effective))
                off_cpu = -1;
 
@@ -4356,6 +4361,7 @@ static void free_node_nr_active(struct wq_node_nr_active **nna_ar)
 
 static void init_node_nr_active(struct wq_node_nr_active *nna)
 {
+       nna->max = WQ_DFL_MIN_ACTIVE;
        atomic_set(&nna->nr, 0);
        raw_spin_lock_init(&nna->lock);
        INIT_LIST_HEAD(&nna->pending_pwqs);
@@ -7400,6 +7406,8 @@ void __init workqueue_init_topology(void)
        init_pod_type(&wq_pod_types[WQ_AFFN_CACHE], cpus_share_cache);
        init_pod_type(&wq_pod_types[WQ_AFFN_NUMA], cpus_share_numa);
 
+       wq_topo_initialized = true;
+
        mutex_lock(&wq_pool_mutex);
 
        /*