]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/blob
fa9a3a2549882b17f38b925fed0841f7c27db3cd
[thirdparty/kernel/stable-queue.git] /
1 From 258a95622e9ad94cf68aa8dc56f31532a65545bc Mon Sep 17 00:00:00 2001
2 From: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
3 Date: Wed, 3 Apr 2024 16:29:14 +0200
4 Subject: Revert "workqueue: Implement system-wide nr_active enforcement for unbound workqueues"
5
6 From: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
7
8 This reverts commit 843288afd3cc6f3342659c6cf81fc47684d25563 which is commit
9 5797b1c18919cd9c289ded7954383e499f729ce0 upstream.
10
11 Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
12 ---
13 include/linux/workqueue.h | 35 ----
14 kernel/workqueue.c | 341 ++++------------------------------------------
15 2 files changed, 35 insertions(+), 341 deletions(-)
16
17 --- a/include/linux/workqueue.h
18 +++ b/include/linux/workqueue.h
19 @@ -391,13 +391,6 @@ enum {
20 WQ_MAX_ACTIVE = 512, /* I like 512, better ideas? */
21 WQ_UNBOUND_MAX_ACTIVE = WQ_MAX_ACTIVE,
22 WQ_DFL_ACTIVE = WQ_MAX_ACTIVE / 2,
23 -
24 - /*
25 - * Per-node default cap on min_active. Unless explicitly set, min_active
26 - * is set to min(max_active, WQ_DFL_MIN_ACTIVE). For more details, see
27 - * workqueue_struct->min_active definition.
28 - */
29 - WQ_DFL_MIN_ACTIVE = 8,
30 };
31
32 /*
33 @@ -440,33 +433,11 @@ extern struct workqueue_struct *system_f
34 * alloc_workqueue - allocate a workqueue
35 * @fmt: printf format for the name of the workqueue
36 * @flags: WQ_* flags
37 - * @max_active: max in-flight work items, 0 for default
38 + * @max_active: max in-flight work items per CPU, 0 for default
39 * remaining args: args for @fmt
40 *
41 - * For a per-cpu workqueue, @max_active limits the number of in-flight work
42 - * items for each CPU. e.g. @max_active of 1 indicates that each CPU can be
43 - * executing at most one work item for the workqueue.
44 - *
45 - * For unbound workqueues, @max_active limits the number of in-flight work items
46 - * for the whole system. e.g. @max_active of 16 indicates that that there can be
47 - * at most 16 work items executing for the workqueue in the whole system.
48 - *
49 - * As sharing the same active counter for an unbound workqueue across multiple
50 - * NUMA nodes can be expensive, @max_active is distributed to each NUMA node
51 - * according to the proportion of the number of online CPUs and enforced
52 - * independently.
53 - *
54 - * Depending on online CPU distribution, a node may end up with per-node
55 - * max_active which is significantly lower than @max_active, which can lead to
56 - * deadlocks if the per-node concurrency limit is lower than the maximum number
57 - * of interdependent work items for the workqueue.
58 - *
59 - * To guarantee forward progress regardless of online CPU distribution, the
60 - * concurrency limit on every node is guaranteed to be equal to or greater than
61 - * min_active which is set to min(@max_active, %WQ_DFL_MIN_ACTIVE). This means
62 - * that the sum of per-node max_active's may be larger than @max_active.
63 - *
64 - * For detailed information on %WQ_* flags, please refer to
65 + * Allocate a workqueue with the specified parameters. For detailed
66 + * information on WQ_* flags, please refer to
67 * Documentation/core-api/workqueue.rst.
68 *
69 * RETURNS:
70 --- a/kernel/workqueue.c
71 +++ b/kernel/workqueue.c
72 @@ -122,9 +122,6 @@ enum {
73 *
74 * L: pool->lock protected. Access with pool->lock held.
75 *
76 - * LN: pool->lock and wq_node_nr_active->lock protected for writes. Either for
77 - * reads.
78 - *
79 * K: Only modified by worker while holding pool->lock. Can be safely read by
80 * self, while holding pool->lock or from IRQ context if %current is the
81 * kworker.
82 @@ -246,18 +243,17 @@ struct pool_workqueue {
83 * pwq->inactive_works instead of pool->worklist and marked with
84 * WORK_STRUCT_INACTIVE.
85 *
86 - * All work items marked with WORK_STRUCT_INACTIVE do not participate in
87 - * nr_active and all work items in pwq->inactive_works are marked with
88 - * WORK_STRUCT_INACTIVE. But not all WORK_STRUCT_INACTIVE work items are
89 - * in pwq->inactive_works. Some of them are ready to run in
90 - * pool->worklist or worker->scheduled. Those work itmes are only struct
91 - * wq_barrier which is used for flush_work() and should not participate
92 - * in nr_active. For non-barrier work item, it is marked with
93 - * WORK_STRUCT_INACTIVE iff it is in pwq->inactive_works.
94 + * All work items marked with WORK_STRUCT_INACTIVE do not participate
95 + * in pwq->nr_active and all work items in pwq->inactive_works are
96 + * marked with WORK_STRUCT_INACTIVE. But not all WORK_STRUCT_INACTIVE
97 + * work items are in pwq->inactive_works. Some of them are ready to
98 + * run in pool->worklist or worker->scheduled. Those work itmes are
99 + * only struct wq_barrier which is used for flush_work() and should
100 + * not participate in pwq->nr_active. For non-barrier work item, it
101 + * is marked with WORK_STRUCT_INACTIVE iff it is in pwq->inactive_works.
102 */
103 int nr_active; /* L: nr of active works */
104 struct list_head inactive_works; /* L: inactive works */
105 - struct list_head pending_node; /* LN: node on wq_node_nr_active->pending_pwqs */
106 struct list_head pwqs_node; /* WR: node on wq->pwqs */
107 struct list_head mayday_node; /* MD: node on wq->maydays */
108
109 @@ -289,19 +285,9 @@ struct wq_device;
110 * on each CPU, in an unbound workqueue, max_active applies to the whole system.
111 * As sharing a single nr_active across multiple sockets can be very expensive,
112 * the counting and enforcement is per NUMA node.
113 - *
114 - * The following struct is used to enforce per-node max_active. When a pwq wants
115 - * to start executing a work item, it should increment ->nr using
116 - * tryinc_node_nr_active(). If acquisition fails due to ->nr already being over
117 - * ->max, the pwq is queued on ->pending_pwqs. As in-flight work items finish
118 - * and decrement ->nr, node_activate_pending_pwq() activates the pending pwqs in
119 - * round-robin order.
120 */
121 struct wq_node_nr_active {
122 - int max; /* per-node max_active */
123 - atomic_t nr; /* per-node nr_active */
124 - raw_spinlock_t lock; /* nests inside pool locks */
125 - struct list_head pending_pwqs; /* LN: pwqs with inactive works */
126 + atomic_t nr; /* per-node nr_active count */
127 };
128
129 /*
130 @@ -324,12 +310,8 @@ struct workqueue_struct {
131 struct worker *rescuer; /* MD: rescue worker */
132
133 int nr_drainers; /* WQ: drain in progress */
134 -
135 - /* See alloc_workqueue() function comment for info on min/max_active */
136 int max_active; /* WO: max active works */
137 - int min_active; /* WO: min active works */
138 int saved_max_active; /* WQ: saved max_active */
139 - int saved_min_active; /* WQ: saved min_active */
140
141 struct workqueue_attrs *unbound_attrs; /* PW: only for unbound wqs */
142 struct pool_workqueue __rcu *dfl_pwq; /* PW: only for unbound wqs */
143 @@ -681,19 +663,6 @@ static struct pool_workqueue *unbound_pw
144 lockdep_is_held(&wq->mutex));
145 }
146
147 -/**
148 - * unbound_effective_cpumask - effective cpumask of an unbound workqueue
149 - * @wq: workqueue of interest
150 - *
151 - * @wq->unbound_attrs->cpumask contains the cpumask requested by the user which
152 - * is masked with wq_unbound_cpumask to determine the effective cpumask. The
153 - * default pwq is always mapped to the pool with the current effective cpumask.
154 - */
155 -static struct cpumask *unbound_effective_cpumask(struct workqueue_struct *wq)
156 -{
157 - return unbound_pwq(wq, -1)->pool->attrs->__pod_cpumask;
158 -}
159 -
160 static unsigned int work_color_to_flags(int color)
161 {
162 return color << WORK_STRUCT_COLOR_SHIFT;
163 @@ -1489,46 +1458,6 @@ static struct wq_node_nr_active *wq_node
164 }
165
166 /**
167 - * wq_update_node_max_active - Update per-node max_actives to use
168 - * @wq: workqueue to update
169 - * @off_cpu: CPU that's going down, -1 if a CPU is not going down
170 - *
171 - * Update @wq->node_nr_active[]->max. @wq must be unbound. max_active is
172 - * distributed among nodes according to the proportions of numbers of online
173 - * cpus. The result is always between @wq->min_active and max_active.
174 - */
175 -static void wq_update_node_max_active(struct workqueue_struct *wq, int off_cpu)
176 -{
177 - struct cpumask *effective = unbound_effective_cpumask(wq);
178 - int min_active = READ_ONCE(wq->min_active);
179 - int max_active = READ_ONCE(wq->max_active);
180 - int total_cpus, node;
181 -
182 - lockdep_assert_held(&wq->mutex);
183 -
184 - if (!cpumask_test_cpu(off_cpu, effective))
185 - off_cpu = -1;
186 -
187 - total_cpus = cpumask_weight_and(effective, cpu_online_mask);
188 - if (off_cpu >= 0)
189 - total_cpus--;
190 -
191 - for_each_node(node) {
192 - int node_cpus;
193 -
194 - node_cpus = cpumask_weight_and(effective, cpumask_of_node(node));
195 - if (off_cpu >= 0 && cpu_to_node(off_cpu) == node)
196 - node_cpus--;
197 -
198 - wq_node_nr_active(wq, node)->max =
199 - clamp(DIV_ROUND_UP(max_active * node_cpus, total_cpus),
200 - min_active, max_active);
201 - }
202 -
203 - wq_node_nr_active(wq, NUMA_NO_NODE)->max = min_active;
204 -}
205 -
206 -/**
207 * get_pwq - get an extra reference on the specified pool_workqueue
208 * @pwq: pool_workqueue to get
209 *
210 @@ -1625,98 +1554,35 @@ static bool pwq_activate_work(struct poo
211 return true;
212 }
213
214 -static bool tryinc_node_nr_active(struct wq_node_nr_active *nna)
215 -{
216 - int max = READ_ONCE(nna->max);
217 -
218 - while (true) {
219 - int old, tmp;
220 -
221 - old = atomic_read(&nna->nr);
222 - if (old >= max)
223 - return false;
224 - tmp = atomic_cmpxchg_relaxed(&nna->nr, old, old + 1);
225 - if (tmp == old)
226 - return true;
227 - }
228 -}
229 -
230 /**
231 * pwq_tryinc_nr_active - Try to increment nr_active for a pwq
232 * @pwq: pool_workqueue of interest
233 - * @fill: max_active may have increased, try to increase concurrency level
234 *
235 * Try to increment nr_active for @pwq. Returns %true if an nr_active count is
236 * successfully obtained. %false otherwise.
237 */
238 -static bool pwq_tryinc_nr_active(struct pool_workqueue *pwq, bool fill)
239 +static bool pwq_tryinc_nr_active(struct pool_workqueue *pwq)
240 {
241 struct workqueue_struct *wq = pwq->wq;
242 struct worker_pool *pool = pwq->pool;
243 struct wq_node_nr_active *nna = wq_node_nr_active(wq, pool->node);
244 - bool obtained = false;
245 + bool obtained;
246
247 lockdep_assert_held(&pool->lock);
248
249 - if (!nna) {
250 - /* per-cpu workqueue, pwq->nr_active is sufficient */
251 - obtained = pwq->nr_active < READ_ONCE(wq->max_active);
252 - goto out;
253 - }
254 -
255 - /*
256 - * Unbound workqueue uses per-node shared nr_active $nna. If @pwq is
257 - * already waiting on $nna, pwq_dec_nr_active() will maintain the
258 - * concurrency level. Don't jump the line.
259 - *
260 - * We need to ignore the pending test after max_active has increased as
261 - * pwq_dec_nr_active() can only maintain the concurrency level but not
262 - * increase it. This is indicated by @fill.
263 - */
264 - if (!list_empty(&pwq->pending_node) && likely(!fill))
265 - goto out;
266 -
267 - obtained = tryinc_node_nr_active(nna);
268 - if (obtained)
269 - goto out;
270 -
271 - /*
272 - * Lockless acquisition failed. Lock, add ourself to $nna->pending_pwqs
273 - * and try again. The smp_mb() is paired with the implied memory barrier
274 - * of atomic_dec_return() in pwq_dec_nr_active() to ensure that either
275 - * we see the decremented $nna->nr or they see non-empty
276 - * $nna->pending_pwqs.
277 - */
278 - raw_spin_lock(&nna->lock);
279 -
280 - if (list_empty(&pwq->pending_node))
281 - list_add_tail(&pwq->pending_node, &nna->pending_pwqs);
282 - else if (likely(!fill))
283 - goto out_unlock;
284 -
285 - smp_mb();
286 -
287 - obtained = tryinc_node_nr_active(nna);
288 -
289 - /*
290 - * If @fill, @pwq might have already been pending. Being spuriously
291 - * pending in cold paths doesn't affect anything. Let's leave it be.
292 - */
293 - if (obtained && likely(!fill))
294 - list_del_init(&pwq->pending_node);
295 + obtained = pwq->nr_active < READ_ONCE(wq->max_active);
296
297 -out_unlock:
298 - raw_spin_unlock(&nna->lock);
299 -out:
300 - if (obtained)
301 + if (obtained) {
302 pwq->nr_active++;
303 + if (nna)
304 + atomic_inc(&nna->nr);
305 + }
306 return obtained;
307 }
308
309 /**
310 * pwq_activate_first_inactive - Activate the first inactive work item on a pwq
311 * @pwq: pool_workqueue of interest
312 - * @fill: max_active may have increased, try to increase concurrency level
313 *
314 * Activate the first inactive work item of @pwq if available and allowed by
315 * max_active limit.
316 @@ -1724,13 +1590,13 @@ out:
317 * Returns %true if an inactive work item has been activated. %false if no
318 * inactive work item is found or max_active limit is reached.
319 */
320 -static bool pwq_activate_first_inactive(struct pool_workqueue *pwq, bool fill)
321 +static bool pwq_activate_first_inactive(struct pool_workqueue *pwq)
322 {
323 struct work_struct *work =
324 list_first_entry_or_null(&pwq->inactive_works,
325 struct work_struct, entry);
326
327 - if (work && pwq_tryinc_nr_active(pwq, fill)) {
328 + if (work && pwq_tryinc_nr_active(pwq)) {
329 __pwq_activate_work(pwq, work);
330 return true;
331 } else {
332 @@ -1739,92 +1605,10 @@ static bool pwq_activate_first_inactive(
333 }
334
335 /**
336 - * node_activate_pending_pwq - Activate a pending pwq on a wq_node_nr_active
337 - * @nna: wq_node_nr_active to activate a pending pwq for
338 - * @caller_pool: worker_pool the caller is locking
339 - *
340 - * Activate a pwq in @nna->pending_pwqs. Called with @caller_pool locked.
341 - * @caller_pool may be unlocked and relocked to lock other worker_pools.
342 - */
343 -static void node_activate_pending_pwq(struct wq_node_nr_active *nna,
344 - struct worker_pool *caller_pool)
345 -{
346 - struct worker_pool *locked_pool = caller_pool;
347 - struct pool_workqueue *pwq;
348 - struct work_struct *work;
349 -
350 - lockdep_assert_held(&caller_pool->lock);
351 -
352 - raw_spin_lock(&nna->lock);
353 -retry:
354 - pwq = list_first_entry_or_null(&nna->pending_pwqs,
355 - struct pool_workqueue, pending_node);
356 - if (!pwq)
357 - goto out_unlock;
358 -
359 - /*
360 - * If @pwq is for a different pool than @locked_pool, we need to lock
361 - * @pwq->pool->lock. Let's trylock first. If unsuccessful, do the unlock
362 - * / lock dance. For that, we also need to release @nna->lock as it's
363 - * nested inside pool locks.
364 - */
365 - if (pwq->pool != locked_pool) {
366 - raw_spin_unlock(&locked_pool->lock);
367 - locked_pool = pwq->pool;
368 - if (!raw_spin_trylock(&locked_pool->lock)) {
369 - raw_spin_unlock(&nna->lock);
370 - raw_spin_lock(&locked_pool->lock);
371 - raw_spin_lock(&nna->lock);
372 - goto retry;
373 - }
374 - }
375 -
376 - /*
377 - * $pwq may not have any inactive work items due to e.g. cancellations.
378 - * Drop it from pending_pwqs and see if there's another one.
379 - */
380 - work = list_first_entry_or_null(&pwq->inactive_works,
381 - struct work_struct, entry);
382 - if (!work) {
383 - list_del_init(&pwq->pending_node);
384 - goto retry;
385 - }
386 -
387 - /*
388 - * Acquire an nr_active count and activate the inactive work item. If
389 - * $pwq still has inactive work items, rotate it to the end of the
390 - * pending_pwqs so that we round-robin through them. This means that
391 - * inactive work items are not activated in queueing order which is fine
392 - * given that there has never been any ordering across different pwqs.
393 - */
394 - if (likely(tryinc_node_nr_active(nna))) {
395 - pwq->nr_active++;
396 - __pwq_activate_work(pwq, work);
397 -
398 - if (list_empty(&pwq->inactive_works))
399 - list_del_init(&pwq->pending_node);
400 - else
401 - list_move_tail(&pwq->pending_node, &nna->pending_pwqs);
402 -
403 - /* if activating a foreign pool, make sure it's running */
404 - if (pwq->pool != caller_pool)
405 - kick_pool(pwq->pool);
406 - }
407 -
408 -out_unlock:
409 - raw_spin_unlock(&nna->lock);
410 - if (locked_pool != caller_pool) {
411 - raw_spin_unlock(&locked_pool->lock);
412 - raw_spin_lock(&caller_pool->lock);
413 - }
414 -}
415 -
416 -/**
417 * pwq_dec_nr_active - Retire an active count
418 * @pwq: pool_workqueue of interest
419 *
420 * Decrement @pwq's nr_active and try to activate the first inactive work item.
421 - * For unbound workqueues, this function may temporarily drop @pwq->pool->lock.
422 */
423 static void pwq_dec_nr_active(struct pool_workqueue *pwq)
424 {
425 @@ -1844,29 +1628,12 @@ static void pwq_dec_nr_active(struct poo
426 * inactive work item on @pwq itself.
427 */
428 if (!nna) {
429 - pwq_activate_first_inactive(pwq, false);
430 + pwq_activate_first_inactive(pwq);
431 return;
432 }
433
434 - /*
435 - * If @pwq is for an unbound workqueue, it's more complicated because
436 - * multiple pwqs and pools may be sharing the nr_active count. When a
437 - * pwq needs to wait for an nr_active count, it puts itself on
438 - * $nna->pending_pwqs. The following atomic_dec_return()'s implied
439 - * memory barrier is paired with smp_mb() in pwq_tryinc_nr_active() to
440 - * guarantee that either we see non-empty pending_pwqs or they see
441 - * decremented $nna->nr.
442 - *
443 - * $nna->max may change as CPUs come online/offline and @pwq->wq's
444 - * max_active gets updated. However, it is guaranteed to be equal to or
445 - * larger than @pwq->wq->min_active which is above zero unless freezing.
446 - * This maintains the forward progress guarantee.
447 - */
448 - if (atomic_dec_return(&nna->nr) >= READ_ONCE(nna->max))
449 - return;
450 -
451 - if (!list_empty(&nna->pending_pwqs))
452 - node_activate_pending_pwq(nna, pool);
453 + atomic_dec(&nna->nr);
454 + pwq_activate_first_inactive(pwq);
455 }
456
457 /**
458 @@ -2187,7 +1954,7 @@ retry:
459 * @work must also queue behind existing inactive work items to maintain
460 * ordering when max_active changes. See wq_adjust_max_active().
461 */
462 - if (list_empty(&pwq->inactive_works) && pwq_tryinc_nr_active(pwq, false)) {
463 + if (list_empty(&pwq->inactive_works) && pwq_tryinc_nr_active(pwq)) {
464 if (list_empty(&pool->worklist))
465 pool->watchdog_ts = jiffies;
466
467 @@ -3420,7 +3187,7 @@ static void insert_wq_barrier(struct poo
468
469 barr->task = current;
470
471 - /* The barrier work item does not participate in nr_active. */
472 + /* The barrier work item does not participate in pwq->nr_active. */
473 work_flags |= WORK_STRUCT_INACTIVE;
474
475 /*
476 @@ -4336,8 +4103,6 @@ static void free_node_nr_active(struct w
477 static void init_node_nr_active(struct wq_node_nr_active *nna)
478 {
479 atomic_set(&nna->nr, 0);
480 - raw_spin_lock_init(&nna->lock);
481 - INIT_LIST_HEAD(&nna->pending_pwqs);
482 }
483
484 /*
485 @@ -4577,15 +4342,6 @@ static void pwq_release_workfn(struct kt
486 mutex_unlock(&wq_pool_mutex);
487 }
488
489 - if (!list_empty(&pwq->pending_node)) {
490 - struct wq_node_nr_active *nna =
491 - wq_node_nr_active(pwq->wq, pwq->pool->node);
492 -
493 - raw_spin_lock_irq(&nna->lock);
494 - list_del_init(&pwq->pending_node);
495 - raw_spin_unlock_irq(&nna->lock);
496 - }
497 -
498 call_rcu(&pwq->rcu, rcu_free_pwq);
499
500 /*
501 @@ -4611,7 +4367,6 @@ static void init_pwq(struct pool_workque
502 pwq->flush_color = -1;
503 pwq->refcnt = 1;
504 INIT_LIST_HEAD(&pwq->inactive_works);
505 - INIT_LIST_HEAD(&pwq->pending_node);
506 INIT_LIST_HEAD(&pwq->pwqs_node);
507 INIT_LIST_HEAD(&pwq->mayday_node);
508 kthread_init_work(&pwq->release_work, pwq_release_workfn);
509 @@ -4819,9 +4574,6 @@ static void apply_wqattrs_commit(struct
510 ctx->pwq_tbl[cpu]);
511 ctx->dfl_pwq = install_unbound_pwq(ctx->wq, -1, ctx->dfl_pwq);
512
513 - /* update node_nr_active->max */
514 - wq_update_node_max_active(ctx->wq, -1);
515 -
516 mutex_unlock(&ctx->wq->mutex);
517 }
518
519 @@ -5082,35 +4834,24 @@ static int init_rescuer(struct workqueue
520 static void wq_adjust_max_active(struct workqueue_struct *wq)
521 {
522 bool activated;
523 - int new_max, new_min;
524
525 lockdep_assert_held(&wq->mutex);
526
527 if ((wq->flags & WQ_FREEZABLE) && workqueue_freezing) {
528 - new_max = 0;
529 - new_min = 0;
530 - } else {
531 - new_max = wq->saved_max_active;
532 - new_min = wq->saved_min_active;
533 + WRITE_ONCE(wq->max_active, 0);
534 + return;
535 }
536
537 - if (wq->max_active == new_max && wq->min_active == new_min)
538 + if (wq->max_active == wq->saved_max_active)
539 return;
540
541 /*
542 - * Update @wq->max/min_active and then kick inactive work items if more
543 + * Update @wq->max_active and then kick inactive work items if more
544 * active work items are allowed. This doesn't break work item ordering
545 * because new work items are always queued behind existing inactive
546 * work items if there are any.
547 */
548 - WRITE_ONCE(wq->max_active, new_max);
549 - WRITE_ONCE(wq->min_active, new_min);
550 -
551 - if (wq->flags & WQ_UNBOUND)
552 - wq_update_node_max_active(wq, -1);
553 -
554 - if (new_max == 0)
555 - return;
556 + WRITE_ONCE(wq->max_active, wq->saved_max_active);
557
558 /*
559 * Round-robin through pwq's activating the first inactive work item
560 @@ -5125,7 +4866,7 @@ static void wq_adjust_max_active(struct
561
562 /* can be called during early boot w/ irq disabled */
563 raw_spin_lock_irqsave(&pwq->pool->lock, flags);
564 - if (pwq_activate_first_inactive(pwq, true)) {
565 + if (pwq_activate_first_inactive(pwq)) {
566 activated = true;
567 kick_pool(pwq->pool);
568 }
569 @@ -5187,9 +4928,7 @@ struct workqueue_struct *alloc_workqueue
570 /* init wq */
571 wq->flags = flags;
572 wq->max_active = max_active;
573 - wq->min_active = min(max_active, WQ_DFL_MIN_ACTIVE);
574 - wq->saved_max_active = wq->max_active;
575 - wq->saved_min_active = wq->min_active;
576 + wq->saved_max_active = max_active;
577 mutex_init(&wq->mutex);
578 atomic_set(&wq->nr_pwqs_to_flush, 0);
579 INIT_LIST_HEAD(&wq->pwqs);
580 @@ -5355,8 +5094,7 @@ EXPORT_SYMBOL_GPL(destroy_workqueue);
581 * @wq: target workqueue
582 * @max_active: new max_active value.
583 *
584 - * Set max_active of @wq to @max_active. See the alloc_workqueue() function
585 - * comment.
586 + * Set max_active of @wq to @max_active.
587 *
588 * CONTEXT:
589 * Don't call from IRQ context.
590 @@ -5373,9 +5111,6 @@ void workqueue_set_max_active(struct wor
591
592 wq->flags &= ~__WQ_ORDERED;
593 wq->saved_max_active = max_active;
594 - if (wq->flags & WQ_UNBOUND)
595 - wq->saved_min_active = min(wq->saved_min_active, max_active);
596 -
597 wq_adjust_max_active(wq);
598
599 mutex_unlock(&wq->mutex);
600 @@ -6057,10 +5792,6 @@ int workqueue_online_cpu(unsigned int cp
601
602 for_each_cpu(tcpu, pt->pod_cpus[pt->cpu_pod[cpu]])
603 wq_update_pod(wq, tcpu, cpu, true);
604 -
605 - mutex_lock(&wq->mutex);
606 - wq_update_node_max_active(wq, -1);
607 - mutex_unlock(&wq->mutex);
608 }
609 }
610
611 @@ -6089,10 +5820,6 @@ int workqueue_offline_cpu(unsigned int c
612
613 for_each_cpu(tcpu, pt->pod_cpus[pt->cpu_pod[cpu]])
614 wq_update_pod(wq, tcpu, cpu, false);
615 -
616 - mutex_lock(&wq->mutex);
617 - wq_update_node_max_active(wq, cpu);
618 - mutex_unlock(&wq->mutex);
619 }
620 }
621 mutex_unlock(&wq_pool_mutex);
622 @@ -7373,12 +7100,8 @@ void __init workqueue_init_topology(void
623 * combinations to apply per-pod sharing.
624 */
625 list_for_each_entry(wq, &workqueues, list) {
626 - for_each_online_cpu(cpu)
627 + for_each_online_cpu(cpu) {
628 wq_update_pod(wq, cpu, cpu, true);
629 - if (wq->flags & WQ_UNBOUND) {
630 - mutex_lock(&wq->mutex);
631 - wq_update_node_max_active(wq, -1);
632 - mutex_unlock(&wq->mutex);
633 }
634 }
635