]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
4.9-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 18 Feb 2020 04:51:04 +0000 (05:51 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 18 Feb 2020 04:51:04 +0000 (05:51 +0100)
added patches:
padata-remove-broken-queue-flushing.patch

queue-4.9/padata-remove-broken-queue-flushing.patch [new file with mode: 0644]
queue-4.9/series

diff --git a/queue-4.9/padata-remove-broken-queue-flushing.patch b/queue-4.9/padata-remove-broken-queue-flushing.patch
new file mode 100644 (file)
index 0000000..667c98a
--- /dev/null
@@ -0,0 +1,141 @@
+From 07928d9bfc81640bab36f5190e8725894d93b659 Mon Sep 17 00:00:00 2001
+From: Herbert Xu <herbert@gondor.apana.org.au>
+Date: Tue, 19 Nov 2019 13:17:31 +0800
+Subject: padata: Remove broken queue flushing
+
+From: Herbert Xu <herbert@gondor.apana.org.au>
+
+commit 07928d9bfc81640bab36f5190e8725894d93b659 upstream.
+
+The function padata_flush_queues is fundamentally broken because
+it cannot force padata users to complete the request that is
+underway.  IOW padata has to passively wait for the completion
+of any outstanding work.
+
+As it stands flushing is used in two places.  Its use in padata_stop
+is simply unnecessary because nothing depends on the queues to
+be flushed afterwards.
+
+The other use in padata_replace is more substantial as we depend
+on it to free the old pd structure.  This patch instead uses the
+pd->refcnt to dynamically free the pd structure once all requests
+are complete.
+
+Fixes: 2b73b07ab8a4 ("padata: Flush the padata queues actively")
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Reviewed-by: Daniel Jordan <daniel.m.jordan@oracle.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+[dj: leave "pd->pinst = pinst" assignment in padata_alloc_pd()]
+Signed-off-by: Daniel Jordan <daniel.m.jordan@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/padata.c |   45 ++++++++++++---------------------------------
+ 1 file changed, 12 insertions(+), 33 deletions(-)
+
+--- a/kernel/padata.c
++++ b/kernel/padata.c
+@@ -34,6 +34,8 @@
+ #define MAX_OBJ_NUM 1000
++static void padata_free_pd(struct parallel_data *pd);
++
+ static int padata_index_to_cpu(struct parallel_data *pd, int cpu_index)
+ {
+       int cpu, target_cpu;
+@@ -301,6 +303,7 @@ static void padata_serial_worker(struct
+       struct padata_serial_queue *squeue;
+       struct parallel_data *pd;
+       LIST_HEAD(local_list);
++      int cnt;
+       local_bh_disable();
+       squeue = container_of(serial_work, struct padata_serial_queue, work);
+@@ -310,6 +313,8 @@ static void padata_serial_worker(struct
+       list_replace_init(&squeue->serial.list, &local_list);
+       spin_unlock(&squeue->serial.lock);
++      cnt = 0;
++
+       while (!list_empty(&local_list)) {
+               struct padata_priv *padata;
+@@ -319,9 +324,12 @@ static void padata_serial_worker(struct
+               list_del_init(&padata->list);
+               padata->serial(padata);
+-              atomic_dec(&pd->refcnt);
++              cnt++;
+       }
+       local_bh_enable();
++
++      if (atomic_sub_and_test(cnt, &pd->refcnt))
++              padata_free_pd(pd);
+ }
+ /**
+@@ -444,7 +452,7 @@ static struct parallel_data *padata_allo
+       setup_timer(&pd->timer, padata_reorder_timer, (unsigned long)pd);
+       atomic_set(&pd->seq_nr, -1);
+       atomic_set(&pd->reorder_objects, 0);
+-      atomic_set(&pd->refcnt, 0);
++      atomic_set(&pd->refcnt, 1);
+       pd->pinst = pinst;
+       spin_lock_init(&pd->lock);
+@@ -469,31 +477,6 @@ static void padata_free_pd(struct parall
+       kfree(pd);
+ }
+-/* Flush all objects out of the padata queues. */
+-static void padata_flush_queues(struct parallel_data *pd)
+-{
+-      int cpu;
+-      struct padata_parallel_queue *pqueue;
+-      struct padata_serial_queue *squeue;
+-
+-      for_each_cpu(cpu, pd->cpumask.pcpu) {
+-              pqueue = per_cpu_ptr(pd->pqueue, cpu);
+-              flush_work(&pqueue->work);
+-      }
+-
+-      del_timer_sync(&pd->timer);
+-
+-      if (atomic_read(&pd->reorder_objects))
+-              padata_reorder(pd);
+-
+-      for_each_cpu(cpu, pd->cpumask.cbcpu) {
+-              squeue = per_cpu_ptr(pd->squeue, cpu);
+-              flush_work(&squeue->work);
+-      }
+-
+-      BUG_ON(atomic_read(&pd->refcnt) != 0);
+-}
+-
+ static void __padata_start(struct padata_instance *pinst)
+ {
+       pinst->flags |= PADATA_INIT;
+@@ -507,10 +490,6 @@ static void __padata_stop(struct padata_
+       pinst->flags &= ~PADATA_INIT;
+       synchronize_rcu();
+-
+-      get_online_cpus();
+-      padata_flush_queues(pinst->pd);
+-      put_online_cpus();
+ }
+ /* Replace the internal control structure with a new one. */
+@@ -531,8 +510,8 @@ static void padata_replace(struct padata
+       if (!cpumask_equal(pd_old->cpumask.cbcpu, pd_new->cpumask.cbcpu))
+               notification_mask |= PADATA_CPU_SERIAL;
+-      padata_flush_queues(pd_old);
+-      padata_free_pd(pd_old);
++      if (atomic_dec_and_test(&pd_old->refcnt))
++              padata_free_pd(pd_old);
+       if (notification_mask)
+               blocking_notifier_call_chain(&pinst->cpumask_change_notifier,
index a6356e9a73763d13c796ae246cf84e88c94ced87..8c5e296d7e2c99430234192d8ab9243f7eaf60fa 100644 (file)
@@ -5,3 +5,4 @@ ext4-improve-explanation-of-a-mount-failure-caused-by-a-misconfigured-kernel.pat
 btrfs-fix-race-between-using-extent-maps-and-merging-them.patch
 btrfs-log-message-when-rw-remount-is-attempted-with-unclean-tree-log.patch
 perf-x86-amd-add-missing-l2-misses-event-spec-to-amd-family-17h-s-event-map.patch
+padata-remove-broken-queue-flushing.patch