]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
fixes for 4.4
authorSasha Levin <sashal@kernel.org>
Fri, 7 Feb 2020 16:10:38 +0000 (11:10 -0500)
committerSasha Levin <sashal@kernel.org>
Fri, 7 Feb 2020 16:10:38 +0000 (11:10 -0500)
Signed-off-by: Sasha Levin <sashal@kernel.org>
queue-4.4/padata-remove-broken-queue-flushing.patch [new file with mode: 0644]
queue-4.4/series

diff --git a/queue-4.4/padata-remove-broken-queue-flushing.patch b/queue-4.4/padata-remove-broken-queue-flushing.patch
new file mode 100644 (file)
index 0000000..e3b44b1
--- /dev/null
@@ -0,0 +1,144 @@
+From ebd6b41ce3a926d28af493905a97e784b4db8eb9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 19 Nov 2019 13:17:31 +0800
+Subject: padata: Remove broken queue flushing
+
+From: Herbert Xu <herbert@gondor.apana.org.au>
+
+[ Upstream commit 07928d9bfc81640bab36f5190e8725894d93b659 ]
+
+The function padata_flush_queues is fundamentally broken because
+it cannot force padata users to complete the request that is
+underway.  IOW padata has to passively wait for the completion
+of any outstanding work.
+
+As it stands flushing is used in two places.  Its use in padata_stop
+is simply unnecessary because nothing depends on the queues to
+be flushed afterwards.
+
+The other use in padata_replace is more substantial as we depend
+on it to free the old pd structure.  This patch instead uses the
+pd->refcnt to dynamically free the pd structure once all requests
+are complete.
+
+Fixes: 2b73b07ab8a4 ("padata: Flush the padata queues actively")
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Reviewed-by: Daniel Jordan <daniel.m.jordan@oracle.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/padata.c | 46 ++++++++++++----------------------------------
+ 1 file changed, 12 insertions(+), 34 deletions(-)
+
+diff --git a/kernel/padata.c b/kernel/padata.c
+index 282b489a286db..bb632033e6887 100644
+--- a/kernel/padata.c
++++ b/kernel/padata.c
+@@ -33,6 +33,8 @@
+ #define MAX_OBJ_NUM 1000
++static void padata_free_pd(struct parallel_data *pd);
++
+ static int padata_index_to_cpu(struct parallel_data *pd, int cpu_index)
+ {
+       int cpu, target_cpu;
+@@ -300,6 +302,7 @@ static void padata_serial_worker(struct work_struct *serial_work)
+       struct padata_serial_queue *squeue;
+       struct parallel_data *pd;
+       LIST_HEAD(local_list);
++      int cnt;
+       local_bh_disable();
+       squeue = container_of(serial_work, struct padata_serial_queue, work);
+@@ -309,6 +312,8 @@ static void padata_serial_worker(struct work_struct *serial_work)
+       list_replace_init(&squeue->serial.list, &local_list);
+       spin_unlock(&squeue->serial.lock);
++      cnt = 0;
++
+       while (!list_empty(&local_list)) {
+               struct padata_priv *padata;
+@@ -318,9 +323,12 @@ static void padata_serial_worker(struct work_struct *serial_work)
+               list_del_init(&padata->list);
+               padata->serial(padata);
+-              atomic_dec(&pd->refcnt);
++              cnt++;
+       }
+       local_bh_enable();
++
++      if (atomic_sub_and_test(cnt, &pd->refcnt))
++              padata_free_pd(pd);
+ }
+ /**
+@@ -443,8 +451,7 @@ static struct parallel_data *padata_alloc_pd(struct padata_instance *pinst,
+       setup_timer(&pd->timer, padata_reorder_timer, (unsigned long)pd);
+       atomic_set(&pd->seq_nr, -1);
+       atomic_set(&pd->reorder_objects, 0);
+-      atomic_set(&pd->refcnt, 0);
+-      pd->pinst = pinst;
++      atomic_set(&pd->refcnt, 1);
+       spin_lock_init(&pd->lock);
+       return pd;
+@@ -468,31 +475,6 @@ static void padata_free_pd(struct parallel_data *pd)
+       kfree(pd);
+ }
+-/* Flush all objects out of the padata queues. */
+-static void padata_flush_queues(struct parallel_data *pd)
+-{
+-      int cpu;
+-      struct padata_parallel_queue *pqueue;
+-      struct padata_serial_queue *squeue;
+-
+-      for_each_cpu(cpu, pd->cpumask.pcpu) {
+-              pqueue = per_cpu_ptr(pd->pqueue, cpu);
+-              flush_work(&pqueue->work);
+-      }
+-
+-      del_timer_sync(&pd->timer);
+-
+-      if (atomic_read(&pd->reorder_objects))
+-              padata_reorder(pd);
+-
+-      for_each_cpu(cpu, pd->cpumask.cbcpu) {
+-              squeue = per_cpu_ptr(pd->squeue, cpu);
+-              flush_work(&squeue->work);
+-      }
+-
+-      BUG_ON(atomic_read(&pd->refcnt) != 0);
+-}
+-
+ static void __padata_start(struct padata_instance *pinst)
+ {
+       pinst->flags |= PADATA_INIT;
+@@ -506,10 +488,6 @@ static void __padata_stop(struct padata_instance *pinst)
+       pinst->flags &= ~PADATA_INIT;
+       synchronize_rcu();
+-
+-      get_online_cpus();
+-      padata_flush_queues(pinst->pd);
+-      put_online_cpus();
+ }
+ /* Replace the internal control structure with a new one. */
+@@ -530,8 +508,8 @@ static void padata_replace(struct padata_instance *pinst,
+       if (!cpumask_equal(pd_old->cpumask.cbcpu, pd_new->cpumask.cbcpu))
+               notification_mask |= PADATA_CPU_SERIAL;
+-      padata_flush_queues(pd_old);
+-      padata_free_pd(pd_old);
++      if (atomic_dec_and_test(&pd_old->refcnt))
++              padata_free_pd(pd_old);
+       if (notification_mask)
+               blocking_notifier_call_chain(&pinst->cpumask_change_notifier,
+-- 
+2.20.1
+
index b5b960d44008be9fca55b37ea305f7051d743750..587a8045aa37d0203a4df0db606bb7f68180be0b 100644 (file)
@@ -24,3 +24,4 @@ power-supply-ltc2941-battery-gauge-fix-use-after-free.patch
 revert-ovl-modify-ovl_permission-to-do-checks-on-two-inodes.patch
 of-add-of_dma_default_coherent-select-it-on-powerpc.patch
 dm-space-map-common-fix-to-ensure-new-block-isn-t-already-in-use.patch
+padata-remove-broken-queue-flushing.patch