]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/blob
96413fb7e83c6fbafb1d30d878f67bec621ead8d
[thirdparty/kernel/stable-queue.git] /
1 From a46197fa531d3f2cf00b43a84babd3bc6f14d656 Mon Sep 17 00:00:00 2001
2 From: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
3 Date: Wed, 3 Apr 2024 16:36:17 +0200
4 Subject: Revert "workqueue: Implement system-wide nr_active enforcement for unbound workqueues"
5
6 From: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
7
8 This reverts commit 5a70baec2294e8a7d0fcc4558741c23e752dad5c which is
9 commit 5797b1c18919cd9c289ded7954383e499f729ce0 upstream.
10
11 The workqueue patches backported to 6.6.y caused some reported
12 regressions, so revert them for now.
13
14 Reported-by: Thorsten Leemhuis <regressions@leemhuis.info>
15 Cc: Tejun Heo <tj@kernel.org>
16 Cc: Marek Szyprowski <m.szyprowski@samsung.com>
17 Cc: Nathan Chancellor <nathan@kernel.org>
18 Cc: Sasha Levin <sashal@kernel.org>
19 Cc: Audra Mitchell <audra@redhat.com>
20 Link: https://lore.kernel.org/all/ce4c2f67-c298-48a0-87a3-f933d646c73b@leemhuis.info/
21 Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
22 ---
23 include/linux/workqueue.h | 35 ----
24 kernel/workqueue.c | 341 ++++------------------------------------------
25 2 files changed, 35 insertions(+), 341 deletions(-)
26
27 --- a/include/linux/workqueue.h
28 +++ b/include/linux/workqueue.h
29 @@ -405,13 +405,6 @@ enum {
30 WQ_MAX_ACTIVE = 512, /* I like 512, better ideas? */
31 WQ_UNBOUND_MAX_ACTIVE = WQ_MAX_ACTIVE,
32 WQ_DFL_ACTIVE = WQ_MAX_ACTIVE / 2,
33 -
34 - /*
35 - * Per-node default cap on min_active. Unless explicitly set, min_active
36 - * is set to min(max_active, WQ_DFL_MIN_ACTIVE). For more details, see
37 - * workqueue_struct->min_active definition.
38 - */
39 - WQ_DFL_MIN_ACTIVE = 8,
40 };
41
42 /*
43 @@ -454,33 +447,11 @@ extern struct workqueue_struct *system_f
44 * alloc_workqueue - allocate a workqueue
45 * @fmt: printf format for the name of the workqueue
46 * @flags: WQ_* flags
47 - * @max_active: max in-flight work items, 0 for default
48 + * @max_active: max in-flight work items per CPU, 0 for default
49 * remaining args: args for @fmt
50 *
51 - * For a per-cpu workqueue, @max_active limits the number of in-flight work
52 - * items for each CPU. e.g. @max_active of 1 indicates that each CPU can be
53 - * executing at most one work item for the workqueue.
54 - *
55 - * For unbound workqueues, @max_active limits the number of in-flight work items
56 - * for the whole system. e.g. @max_active of 16 indicates that that there can be
57 - * at most 16 work items executing for the workqueue in the whole system.
58 - *
59 - * As sharing the same active counter for an unbound workqueue across multiple
60 - * NUMA nodes can be expensive, @max_active is distributed to each NUMA node
61 - * according to the proportion of the number of online CPUs and enforced
62 - * independently.
63 - *
64 - * Depending on online CPU distribution, a node may end up with per-node
65 - * max_active which is significantly lower than @max_active, which can lead to
66 - * deadlocks if the per-node concurrency limit is lower than the maximum number
67 - * of interdependent work items for the workqueue.
68 - *
69 - * To guarantee forward progress regardless of online CPU distribution, the
70 - * concurrency limit on every node is guaranteed to be equal to or greater than
71 - * min_active which is set to min(@max_active, %WQ_DFL_MIN_ACTIVE). This means
72 - * that the sum of per-node max_active's may be larger than @max_active.
73 - *
74 - * For detailed information on %WQ_* flags, please refer to
75 + * Allocate a workqueue with the specified parameters. For detailed
76 + * information on WQ_* flags, please refer to
77 * Documentation/core-api/workqueue.rst.
78 *
79 * RETURNS:
80 --- a/kernel/workqueue.c
81 +++ b/kernel/workqueue.c
82 @@ -122,9 +122,6 @@ enum {
83 *
84 * L: pool->lock protected. Access with pool->lock held.
85 *
86 - * LN: pool->lock and wq_node_nr_active->lock protected for writes. Either for
87 - * reads.
88 - *
89 * K: Only modified by worker while holding pool->lock. Can be safely read by
90 * self, while holding pool->lock or from IRQ context if %current is the
91 * kworker.
92 @@ -246,18 +243,17 @@ struct pool_workqueue {
93 * pwq->inactive_works instead of pool->worklist and marked with
94 * WORK_STRUCT_INACTIVE.
95 *
96 - * All work items marked with WORK_STRUCT_INACTIVE do not participate in
97 - * nr_active and all work items in pwq->inactive_works are marked with
98 - * WORK_STRUCT_INACTIVE. But not all WORK_STRUCT_INACTIVE work items are
99 - * in pwq->inactive_works. Some of them are ready to run in
100 - * pool->worklist or worker->scheduled. Those work itmes are only struct
101 - * wq_barrier which is used for flush_work() and should not participate
102 - * in nr_active. For non-barrier work item, it is marked with
103 - * WORK_STRUCT_INACTIVE iff it is in pwq->inactive_works.
104 + * All work items marked with WORK_STRUCT_INACTIVE do not participate
105 + * in pwq->nr_active and all work items in pwq->inactive_works are
106 + * marked with WORK_STRUCT_INACTIVE. But not all WORK_STRUCT_INACTIVE
107 + * work items are in pwq->inactive_works. Some of them are ready to
108 + * run in pool->worklist or worker->scheduled. Those work itmes are
109 + * only struct wq_barrier which is used for flush_work() and should
110 + * not participate in pwq->nr_active. For non-barrier work item, it
111 + * is marked with WORK_STRUCT_INACTIVE iff it is in pwq->inactive_works.
112 */
113 int nr_active; /* L: nr of active works */
114 struct list_head inactive_works; /* L: inactive works */
115 - struct list_head pending_node; /* LN: node on wq_node_nr_active->pending_pwqs */
116 struct list_head pwqs_node; /* WR: node on wq->pwqs */
117 struct list_head mayday_node; /* MD: node on wq->maydays */
118
119 @@ -289,19 +285,9 @@ struct wq_device;
120 * on each CPU, in an unbound workqueue, max_active applies to the whole system.
121 * As sharing a single nr_active across multiple sockets can be very expensive,
122 * the counting and enforcement is per NUMA node.
123 - *
124 - * The following struct is used to enforce per-node max_active. When a pwq wants
125 - * to start executing a work item, it should increment ->nr using
126 - * tryinc_node_nr_active(). If acquisition fails due to ->nr already being over
127 - * ->max, the pwq is queued on ->pending_pwqs. As in-flight work items finish
128 - * and decrement ->nr, node_activate_pending_pwq() activates the pending pwqs in
129 - * round-robin order.
130 */
131 struct wq_node_nr_active {
132 - int max; /* per-node max_active */
133 - atomic_t nr; /* per-node nr_active */
134 - raw_spinlock_t lock; /* nests inside pool locks */
135 - struct list_head pending_pwqs; /* LN: pwqs with inactive works */
136 + atomic_t nr; /* per-node nr_active count */
137 };
138
139 /*
140 @@ -324,12 +310,8 @@ struct workqueue_struct {
141 struct worker *rescuer; /* MD: rescue worker */
142
143 int nr_drainers; /* WQ: drain in progress */
144 -
145 - /* See alloc_workqueue() function comment for info on min/max_active */
146 int max_active; /* WO: max active works */
147 - int min_active; /* WO: min active works */
148 int saved_max_active; /* WQ: saved max_active */
149 - int saved_min_active; /* WQ: saved min_active */
150
151 struct workqueue_attrs *unbound_attrs; /* PW: only for unbound wqs */
152 struct pool_workqueue __rcu *dfl_pwq; /* PW: only for unbound wqs */
153 @@ -675,19 +657,6 @@ static struct pool_workqueue *unbound_pw
154 lockdep_is_held(&wq->mutex));
155 }
156
157 -/**
158 - * unbound_effective_cpumask - effective cpumask of an unbound workqueue
159 - * @wq: workqueue of interest
160 - *
161 - * @wq->unbound_attrs->cpumask contains the cpumask requested by the user which
162 - * is masked with wq_unbound_cpumask to determine the effective cpumask. The
163 - * default pwq is always mapped to the pool with the current effective cpumask.
164 - */
165 -static struct cpumask *unbound_effective_cpumask(struct workqueue_struct *wq)
166 -{
167 - return unbound_pwq(wq, -1)->pool->attrs->__pod_cpumask;
168 -}
169 -
170 static unsigned int work_color_to_flags(int color)
171 {
172 return color << WORK_STRUCT_COLOR_SHIFT;
173 @@ -1483,46 +1452,6 @@ static struct wq_node_nr_active *wq_node
174 }
175
176 /**
177 - * wq_update_node_max_active - Update per-node max_actives to use
178 - * @wq: workqueue to update
179 - * @off_cpu: CPU that's going down, -1 if a CPU is not going down
180 - *
181 - * Update @wq->node_nr_active[]->max. @wq must be unbound. max_active is
182 - * distributed among nodes according to the proportions of numbers of online
183 - * cpus. The result is always between @wq->min_active and max_active.
184 - */
185 -static void wq_update_node_max_active(struct workqueue_struct *wq, int off_cpu)
186 -{
187 - struct cpumask *effective = unbound_effective_cpumask(wq);
188 - int min_active = READ_ONCE(wq->min_active);
189 - int max_active = READ_ONCE(wq->max_active);
190 - int total_cpus, node;
191 -
192 - lockdep_assert_held(&wq->mutex);
193 -
194 - if (!cpumask_test_cpu(off_cpu, effective))
195 - off_cpu = -1;
196 -
197 - total_cpus = cpumask_weight_and(effective, cpu_online_mask);
198 - if (off_cpu >= 0)
199 - total_cpus--;
200 -
201 - for_each_node(node) {
202 - int node_cpus;
203 -
204 - node_cpus = cpumask_weight_and(effective, cpumask_of_node(node));
205 - if (off_cpu >= 0 && cpu_to_node(off_cpu) == node)
206 - node_cpus--;
207 -
208 - wq_node_nr_active(wq, node)->max =
209 - clamp(DIV_ROUND_UP(max_active * node_cpus, total_cpus),
210 - min_active, max_active);
211 - }
212 -
213 - wq_node_nr_active(wq, NUMA_NO_NODE)->max = min_active;
214 -}
215 -
216 -/**
217 * get_pwq - get an extra reference on the specified pool_workqueue
218 * @pwq: pool_workqueue to get
219 *
220 @@ -1619,98 +1548,35 @@ static bool pwq_activate_work(struct poo
221 return true;
222 }
223
224 -static bool tryinc_node_nr_active(struct wq_node_nr_active *nna)
225 -{
226 - int max = READ_ONCE(nna->max);
227 -
228 - while (true) {
229 - int old, tmp;
230 -
231 - old = atomic_read(&nna->nr);
232 - if (old >= max)
233 - return false;
234 - tmp = atomic_cmpxchg_relaxed(&nna->nr, old, old + 1);
235 - if (tmp == old)
236 - return true;
237 - }
238 -}
239 -
240 /**
241 * pwq_tryinc_nr_active - Try to increment nr_active for a pwq
242 * @pwq: pool_workqueue of interest
243 - * @fill: max_active may have increased, try to increase concurrency level
244 *
245 * Try to increment nr_active for @pwq. Returns %true if an nr_active count is
246 * successfully obtained. %false otherwise.
247 */
248 -static bool pwq_tryinc_nr_active(struct pool_workqueue *pwq, bool fill)
249 +static bool pwq_tryinc_nr_active(struct pool_workqueue *pwq)
250 {
251 struct workqueue_struct *wq = pwq->wq;
252 struct worker_pool *pool = pwq->pool;
253 struct wq_node_nr_active *nna = wq_node_nr_active(wq, pool->node);
254 - bool obtained = false;
255 + bool obtained;
256
257 lockdep_assert_held(&pool->lock);
258
259 - if (!nna) {
260 - /* per-cpu workqueue, pwq->nr_active is sufficient */
261 - obtained = pwq->nr_active < READ_ONCE(wq->max_active);
262 - goto out;
263 - }
264 -
265 - /*
266 - * Unbound workqueue uses per-node shared nr_active $nna. If @pwq is
267 - * already waiting on $nna, pwq_dec_nr_active() will maintain the
268 - * concurrency level. Don't jump the line.
269 - *
270 - * We need to ignore the pending test after max_active has increased as
271 - * pwq_dec_nr_active() can only maintain the concurrency level but not
272 - * increase it. This is indicated by @fill.
273 - */
274 - if (!list_empty(&pwq->pending_node) && likely(!fill))
275 - goto out;
276 -
277 - obtained = tryinc_node_nr_active(nna);
278 - if (obtained)
279 - goto out;
280 -
281 - /*
282 - * Lockless acquisition failed. Lock, add ourself to $nna->pending_pwqs
283 - * and try again. The smp_mb() is paired with the implied memory barrier
284 - * of atomic_dec_return() in pwq_dec_nr_active() to ensure that either
285 - * we see the decremented $nna->nr or they see non-empty
286 - * $nna->pending_pwqs.
287 - */
288 - raw_spin_lock(&nna->lock);
289 -
290 - if (list_empty(&pwq->pending_node))
291 - list_add_tail(&pwq->pending_node, &nna->pending_pwqs);
292 - else if (likely(!fill))
293 - goto out_unlock;
294 -
295 - smp_mb();
296 -
297 - obtained = tryinc_node_nr_active(nna);
298 -
299 - /*
300 - * If @fill, @pwq might have already been pending. Being spuriously
301 - * pending in cold paths doesn't affect anything. Let's leave it be.
302 - */
303 - if (obtained && likely(!fill))
304 - list_del_init(&pwq->pending_node);
305 + obtained = pwq->nr_active < READ_ONCE(wq->max_active);
306
307 -out_unlock:
308 - raw_spin_unlock(&nna->lock);
309 -out:
310 - if (obtained)
311 + if (obtained) {
312 pwq->nr_active++;
313 + if (nna)
314 + atomic_inc(&nna->nr);
315 + }
316 return obtained;
317 }
318
319 /**
320 * pwq_activate_first_inactive - Activate the first inactive work item on a pwq
321 * @pwq: pool_workqueue of interest
322 - * @fill: max_active may have increased, try to increase concurrency level
323 *
324 * Activate the first inactive work item of @pwq if available and allowed by
325 * max_active limit.
326 @@ -1718,13 +1584,13 @@ out:
327 * Returns %true if an inactive work item has been activated. %false if no
328 * inactive work item is found or max_active limit is reached.
329 */
330 -static bool pwq_activate_first_inactive(struct pool_workqueue *pwq, bool fill)
331 +static bool pwq_activate_first_inactive(struct pool_workqueue *pwq)
332 {
333 struct work_struct *work =
334 list_first_entry_or_null(&pwq->inactive_works,
335 struct work_struct, entry);
336
337 - if (work && pwq_tryinc_nr_active(pwq, fill)) {
338 + if (work && pwq_tryinc_nr_active(pwq)) {
339 __pwq_activate_work(pwq, work);
340 return true;
341 } else {
342 @@ -1733,92 +1599,10 @@ static bool pwq_activate_first_inactive(
343 }
344
345 /**
346 - * node_activate_pending_pwq - Activate a pending pwq on a wq_node_nr_active
347 - * @nna: wq_node_nr_active to activate a pending pwq for
348 - * @caller_pool: worker_pool the caller is locking
349 - *
350 - * Activate a pwq in @nna->pending_pwqs. Called with @caller_pool locked.
351 - * @caller_pool may be unlocked and relocked to lock other worker_pools.
352 - */
353 -static void node_activate_pending_pwq(struct wq_node_nr_active *nna,
354 - struct worker_pool *caller_pool)
355 -{
356 - struct worker_pool *locked_pool = caller_pool;
357 - struct pool_workqueue *pwq;
358 - struct work_struct *work;
359 -
360 - lockdep_assert_held(&caller_pool->lock);
361 -
362 - raw_spin_lock(&nna->lock);
363 -retry:
364 - pwq = list_first_entry_or_null(&nna->pending_pwqs,
365 - struct pool_workqueue, pending_node);
366 - if (!pwq)
367 - goto out_unlock;
368 -
369 - /*
370 - * If @pwq is for a different pool than @locked_pool, we need to lock
371 - * @pwq->pool->lock. Let's trylock first. If unsuccessful, do the unlock
372 - * / lock dance. For that, we also need to release @nna->lock as it's
373 - * nested inside pool locks.
374 - */
375 - if (pwq->pool != locked_pool) {
376 - raw_spin_unlock(&locked_pool->lock);
377 - locked_pool = pwq->pool;
378 - if (!raw_spin_trylock(&locked_pool->lock)) {
379 - raw_spin_unlock(&nna->lock);
380 - raw_spin_lock(&locked_pool->lock);
381 - raw_spin_lock(&nna->lock);
382 - goto retry;
383 - }
384 - }
385 -
386 - /*
387 - * $pwq may not have any inactive work items due to e.g. cancellations.
388 - * Drop it from pending_pwqs and see if there's another one.
389 - */
390 - work = list_first_entry_or_null(&pwq->inactive_works,
391 - struct work_struct, entry);
392 - if (!work) {
393 - list_del_init(&pwq->pending_node);
394 - goto retry;
395 - }
396 -
397 - /*
398 - * Acquire an nr_active count and activate the inactive work item. If
399 - * $pwq still has inactive work items, rotate it to the end of the
400 - * pending_pwqs so that we round-robin through them. This means that
401 - * inactive work items are not activated in queueing order which is fine
402 - * given that there has never been any ordering across different pwqs.
403 - */
404 - if (likely(tryinc_node_nr_active(nna))) {
405 - pwq->nr_active++;
406 - __pwq_activate_work(pwq, work);
407 -
408 - if (list_empty(&pwq->inactive_works))
409 - list_del_init(&pwq->pending_node);
410 - else
411 - list_move_tail(&pwq->pending_node, &nna->pending_pwqs);
412 -
413 - /* if activating a foreign pool, make sure it's running */
414 - if (pwq->pool != caller_pool)
415 - kick_pool(pwq->pool);
416 - }
417 -
418 -out_unlock:
419 - raw_spin_unlock(&nna->lock);
420 - if (locked_pool != caller_pool) {
421 - raw_spin_unlock(&locked_pool->lock);
422 - raw_spin_lock(&caller_pool->lock);
423 - }
424 -}
425 -
426 -/**
427 * pwq_dec_nr_active - Retire an active count
428 * @pwq: pool_workqueue of interest
429 *
430 * Decrement @pwq's nr_active and try to activate the first inactive work item.
431 - * For unbound workqueues, this function may temporarily drop @pwq->pool->lock.
432 */
433 static void pwq_dec_nr_active(struct pool_workqueue *pwq)
434 {
435 @@ -1838,29 +1622,12 @@ static void pwq_dec_nr_active(struct poo
436 * inactive work item on @pwq itself.
437 */
438 if (!nna) {
439 - pwq_activate_first_inactive(pwq, false);
440 + pwq_activate_first_inactive(pwq);
441 return;
442 }
443
444 - /*
445 - * If @pwq is for an unbound workqueue, it's more complicated because
446 - * multiple pwqs and pools may be sharing the nr_active count. When a
447 - * pwq needs to wait for an nr_active count, it puts itself on
448 - * $nna->pending_pwqs. The following atomic_dec_return()'s implied
449 - * memory barrier is paired with smp_mb() in pwq_tryinc_nr_active() to
450 - * guarantee that either we see non-empty pending_pwqs or they see
451 - * decremented $nna->nr.
452 - *
453 - * $nna->max may change as CPUs come online/offline and @pwq->wq's
454 - * max_active gets updated. However, it is guaranteed to be equal to or
455 - * larger than @pwq->wq->min_active which is above zero unless freezing.
456 - * This maintains the forward progress guarantee.
457 - */
458 - if (atomic_dec_return(&nna->nr) >= READ_ONCE(nna->max))
459 - return;
460 -
461 - if (!list_empty(&nna->pending_pwqs))
462 - node_activate_pending_pwq(nna, pool);
463 + atomic_dec(&nna->nr);
464 + pwq_activate_first_inactive(pwq);
465 }
466
467 /**
468 @@ -2181,7 +1948,7 @@ retry:
469 * @work must also queue behind existing inactive work items to maintain
470 * ordering when max_active changes. See wq_adjust_max_active().
471 */
472 - if (list_empty(&pwq->inactive_works) && pwq_tryinc_nr_active(pwq, false)) {
473 + if (list_empty(&pwq->inactive_works) && pwq_tryinc_nr_active(pwq)) {
474 if (list_empty(&pool->worklist))
475 pool->watchdog_ts = jiffies;
476
477 @@ -3414,7 +3181,7 @@ static void insert_wq_barrier(struct poo
478
479 barr->task = current;
480
481 - /* The barrier work item does not participate in nr_active. */
482 + /* The barrier work item does not participate in pwq->nr_active. */
483 work_flags |= WORK_STRUCT_INACTIVE;
484
485 /*
486 @@ -4330,8 +4097,6 @@ static void free_node_nr_active(struct w
487 static void init_node_nr_active(struct wq_node_nr_active *nna)
488 {
489 atomic_set(&nna->nr, 0);
490 - raw_spin_lock_init(&nna->lock);
491 - INIT_LIST_HEAD(&nna->pending_pwqs);
492 }
493
494 /*
495 @@ -4571,15 +4336,6 @@ static void pwq_release_workfn(struct kt
496 mutex_unlock(&wq_pool_mutex);
497 }
498
499 - if (!list_empty(&pwq->pending_node)) {
500 - struct wq_node_nr_active *nna =
501 - wq_node_nr_active(pwq->wq, pwq->pool->node);
502 -
503 - raw_spin_lock_irq(&nna->lock);
504 - list_del_init(&pwq->pending_node);
505 - raw_spin_unlock_irq(&nna->lock);
506 - }
507 -
508 call_rcu(&pwq->rcu, rcu_free_pwq);
509
510 /*
511 @@ -4605,7 +4361,6 @@ static void init_pwq(struct pool_workque
512 pwq->flush_color = -1;
513 pwq->refcnt = 1;
514 INIT_LIST_HEAD(&pwq->inactive_works);
515 - INIT_LIST_HEAD(&pwq->pending_node);
516 INIT_LIST_HEAD(&pwq->pwqs_node);
517 INIT_LIST_HEAD(&pwq->mayday_node);
518 kthread_init_work(&pwq->release_work, pwq_release_workfn);
519 @@ -4813,9 +4568,6 @@ static void apply_wqattrs_commit(struct
520 ctx->pwq_tbl[cpu]);
521 ctx->dfl_pwq = install_unbound_pwq(ctx->wq, -1, ctx->dfl_pwq);
522
523 - /* update node_nr_active->max */
524 - wq_update_node_max_active(ctx->wq, -1);
525 -
526 mutex_unlock(&ctx->wq->mutex);
527 }
528
529 @@ -5089,35 +4841,24 @@ static int init_rescuer(struct workqueue
530 static void wq_adjust_max_active(struct workqueue_struct *wq)
531 {
532 bool activated;
533 - int new_max, new_min;
534
535 lockdep_assert_held(&wq->mutex);
536
537 if ((wq->flags & WQ_FREEZABLE) && workqueue_freezing) {
538 - new_max = 0;
539 - new_min = 0;
540 - } else {
541 - new_max = wq->saved_max_active;
542 - new_min = wq->saved_min_active;
543 + WRITE_ONCE(wq->max_active, 0);
544 + return;
545 }
546
547 - if (wq->max_active == new_max && wq->min_active == new_min)
548 + if (wq->max_active == wq->saved_max_active)
549 return;
550
551 /*
552 - * Update @wq->max/min_active and then kick inactive work items if more
553 + * Update @wq->max_active and then kick inactive work items if more
554 * active work items are allowed. This doesn't break work item ordering
555 * because new work items are always queued behind existing inactive
556 * work items if there are any.
557 */
558 - WRITE_ONCE(wq->max_active, new_max);
559 - WRITE_ONCE(wq->min_active, new_min);
560 -
561 - if (wq->flags & WQ_UNBOUND)
562 - wq_update_node_max_active(wq, -1);
563 -
564 - if (new_max == 0)
565 - return;
566 + WRITE_ONCE(wq->max_active, wq->saved_max_active);
567
568 /*
569 * Round-robin through pwq's activating the first inactive work item
570 @@ -5132,7 +4873,7 @@ static void wq_adjust_max_active(struct
571
572 /* can be called during early boot w/ irq disabled */
573 raw_spin_lock_irqsave(&pwq->pool->lock, flags);
574 - if (pwq_activate_first_inactive(pwq, true)) {
575 + if (pwq_activate_first_inactive(pwq)) {
576 activated = true;
577 kick_pool(pwq->pool);
578 }
579 @@ -5194,9 +4935,7 @@ struct workqueue_struct *alloc_workqueue
580 /* init wq */
581 wq->flags = flags;
582 wq->max_active = max_active;
583 - wq->min_active = min(max_active, WQ_DFL_MIN_ACTIVE);
584 - wq->saved_max_active = wq->max_active;
585 - wq->saved_min_active = wq->min_active;
586 + wq->saved_max_active = max_active;
587 mutex_init(&wq->mutex);
588 atomic_set(&wq->nr_pwqs_to_flush, 0);
589 INIT_LIST_HEAD(&wq->pwqs);
590 @@ -5362,8 +5101,7 @@ EXPORT_SYMBOL_GPL(destroy_workqueue);
591 * @wq: target workqueue
592 * @max_active: new max_active value.
593 *
594 - * Set max_active of @wq to @max_active. See the alloc_workqueue() function
595 - * comment.
596 + * Set max_active of @wq to @max_active.
597 *
598 * CONTEXT:
599 * Don't call from IRQ context.
600 @@ -5380,9 +5118,6 @@ void workqueue_set_max_active(struct wor
601
602 wq->flags &= ~__WQ_ORDERED;
603 wq->saved_max_active = max_active;
604 - if (wq->flags & WQ_UNBOUND)
605 - wq->saved_min_active = min(wq->saved_min_active, max_active);
606 -
607 wq_adjust_max_active(wq);
608
609 mutex_unlock(&wq->mutex);
610 @@ -6064,10 +5799,6 @@ int workqueue_online_cpu(unsigned int cp
611
612 for_each_cpu(tcpu, pt->pod_cpus[pt->cpu_pod[cpu]])
613 wq_update_pod(wq, tcpu, cpu, true);
614 -
615 - mutex_lock(&wq->mutex);
616 - wq_update_node_max_active(wq, -1);
617 - mutex_unlock(&wq->mutex);
618 }
619 }
620
621 @@ -6096,10 +5827,6 @@ int workqueue_offline_cpu(unsigned int c
622
623 for_each_cpu(tcpu, pt->pod_cpus[pt->cpu_pod[cpu]])
624 wq_update_pod(wq, tcpu, cpu, false);
625 -
626 - mutex_lock(&wq->mutex);
627 - wq_update_node_max_active(wq, cpu);
628 - mutex_unlock(&wq->mutex);
629 }
630 }
631 mutex_unlock(&wq_pool_mutex);
632 @@ -7296,12 +7023,8 @@ void __init workqueue_init_topology(void
633 * combinations to apply per-pod sharing.
634 */
635 list_for_each_entry(wq, &workqueues, list) {
636 - for_each_online_cpu(cpu)
637 + for_each_online_cpu(cpu) {
638 wq_update_pod(wq, cpu, cpu, true);
639 - if (wq->flags & WQ_UNBOUND) {
640 - mutex_lock(&wq->mutex);
641 - wq_update_node_max_active(wq, -1);
642 - mutex_unlock(&wq->mutex);
643 }
644 }
645