]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
fixes for 4.19
authorSasha Levin <sashal@kernel.org>
Fri, 7 Feb 2020 16:10:37 +0000 (11:10 -0500)
committerSasha Levin <sashal@kernel.org>
Fri, 7 Feb 2020 16:10:37 +0000 (11:10 -0500)
Signed-off-by: Sasha Levin <sashal@kernel.org>
queue-4.19/ftrace-add-comment-to-why-rcu_dereference_sched-is-o.patch [new file with mode: 0644]
queue-4.19/ftrace-protect-ftrace_graph_hash-with-ftrace_sync.patch [new file with mode: 0644]
queue-4.19/padata-remove-broken-queue-flushing.patch [new file with mode: 0644]
queue-4.19/series
queue-4.19/tracing-annotate-ftrace_graph_hash-pointer-with-__rc.patch [new file with mode: 0644]
queue-4.19/tracing-annotate-ftrace_graph_notrace_hash-pointer-w.patch [new file with mode: 0644]

diff --git a/queue-4.19/ftrace-add-comment-to-why-rcu_dereference_sched-is-o.patch b/queue-4.19/ftrace-add-comment-to-why-rcu_dereference_sched-is-o.patch
new file mode 100644 (file)
index 0000000..f031f64
--- /dev/null
@@ -0,0 +1,53 @@
+From b7cb2bfdda56f5d45eb9b603be0e5751c1046711 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 5 Feb 2020 02:17:57 -0500
+Subject: ftrace: Add comment to why rcu_dereference_sched() is open coded
+
+From: Steven Rostedt (VMware) <rostedt@goodmis.org>
+
+[ Upstream commit 16052dd5bdfa16dbe18d8c1d4cde2ddab9d23177 ]
+
+Because the function graph tracer can execute in sections where RCU is not
+"watching", the rcu_dereference_sched() for the has needs to be open coded.
+This is fine because the RCU "flavor" of the ftrace hash is protected by
+its own RCU handling (it does its own little synchronization on every CPU
+and does not rely on RCU sched).
+
+Acked-by: Joel Fernandes (Google) <joel@joelfernandes.org>
+Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/trace/trace.h | 10 ++++++++++
+ 1 file changed, 10 insertions(+)
+
+diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
+index cf1a7d1f35109..1721b95ba9b7d 100644
+--- a/kernel/trace/trace.h
++++ b/kernel/trace/trace.h
+@@ -883,6 +883,11 @@ static inline int ftrace_graph_addr(struct ftrace_graph_ent *trace)
+       preempt_disable_notrace();
++      /*
++       * Have to open code "rcu_dereference_sched()" because the
++       * function graph tracer can be called when RCU is not
++       * "watching".
++       */
+       hash = rcu_dereference_protected(ftrace_graph_hash, !preemptible());
+       if (ftrace_hash_empty(hash)) {
+@@ -930,6 +935,11 @@ static inline int ftrace_graph_notrace_addr(unsigned long addr)
+       preempt_disable_notrace();
++      /*
++       * Have to open code "rcu_dereference_sched()" because the
++       * function graph tracer can be called when RCU is not
++       * "watching".
++       */
+       notrace_hash = rcu_dereference_protected(ftrace_graph_notrace_hash,
+                                                !preemptible());
+-- 
+2.20.1
+
diff --git a/queue-4.19/ftrace-protect-ftrace_graph_hash-with-ftrace_sync.patch b/queue-4.19/ftrace-protect-ftrace_graph_hash-with-ftrace_sync.patch
new file mode 100644 (file)
index 0000000..b35eeb4
--- /dev/null
@@ -0,0 +1,71 @@
+From b2d59693100d10712e84e1d63c99896b5fca820a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 5 Feb 2020 09:20:32 -0500
+Subject: ftrace: Protect ftrace_graph_hash with ftrace_sync
+
+From: Steven Rostedt (VMware) <rostedt@goodmis.org>
+
+[ Upstream commit 54a16ff6f2e50775145b210bcd94d62c3c2af117 ]
+
+As function_graph tracer can run when RCU is not "watching", it can not be
+protected by synchronize_rcu() it requires running a task on each CPU before
+it can be freed. Calling schedule_on_each_cpu(ftrace_sync) needs to be used.
+
+Link: https://lore.kernel.org/r/20200205131110.GT2935@paulmck-ThinkPad-P72
+
+Cc: stable@vger.kernel.org
+Fixes: b9b0c831bed26 ("ftrace: Convert graph filter to use hash tables")
+Reported-by: "Paul E. McKenney" <paulmck@kernel.org>
+Reviewed-by: Joel Fernandes (Google) <joel@joelfernandes.org>
+Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/trace/ftrace.c | 11 +++++++++--
+ kernel/trace/trace.h  |  2 ++
+ 2 files changed, 11 insertions(+), 2 deletions(-)
+
+diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
+index 09c69ad8439ef..53795237e9751 100644
+--- a/kernel/trace/ftrace.c
++++ b/kernel/trace/ftrace.c
+@@ -5344,8 +5344,15 @@ ftrace_graph_release(struct inode *inode, struct file *file)
+               mutex_unlock(&graph_lock);
+-              /* Wait till all users are no longer using the old hash */
+-              synchronize_sched();
++              /*
++               * We need to do a hard force of sched synchronization.
++               * This is because we use preempt_disable() to do RCU, but
++               * the function tracers can be called where RCU is not watching
++               * (like before user_exit()). We can not rely on the RCU
++               * infrastructure to do the synchronization, thus we must do it
++               * ourselves.
++               */
++              schedule_on_each_cpu(ftrace_sync);
+               free_ftrace_hash(old_hash);
+       }
+diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
+index 1721b95ba9b7d..ee0c6a313ed1a 100644
+--- a/kernel/trace/trace.h
++++ b/kernel/trace/trace.h
+@@ -887,6 +887,7 @@ static inline int ftrace_graph_addr(struct ftrace_graph_ent *trace)
+        * Have to open code "rcu_dereference_sched()" because the
+        * function graph tracer can be called when RCU is not
+        * "watching".
++       * Protected with schedule_on_each_cpu(ftrace_sync)
+        */
+       hash = rcu_dereference_protected(ftrace_graph_hash, !preemptible());
+@@ -939,6 +940,7 @@ static inline int ftrace_graph_notrace_addr(unsigned long addr)
+        * Have to open code "rcu_dereference_sched()" because the
+        * function graph tracer can be called when RCU is not
+        * "watching".
++       * Protected with schedule_on_each_cpu(ftrace_sync)
+        */
+       notrace_hash = rcu_dereference_protected(ftrace_graph_notrace_hash,
+                                                !preemptible());
+-- 
+2.20.1
+
diff --git a/queue-4.19/padata-remove-broken-queue-flushing.patch b/queue-4.19/padata-remove-broken-queue-flushing.patch
new file mode 100644 (file)
index 0000000..c39fade
--- /dev/null
@@ -0,0 +1,144 @@
+From 3fe3bada09edaa6b98ef45cee48c024ee2f9f251 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 19 Nov 2019 13:17:31 +0800
+Subject: padata: Remove broken queue flushing
+
+From: Herbert Xu <herbert@gondor.apana.org.au>
+
+[ Upstream commit 07928d9bfc81640bab36f5190e8725894d93b659 ]
+
+The function padata_flush_queues is fundamentally broken because
+it cannot force padata users to complete the request that is
+underway.  IOW padata has to passively wait for the completion
+of any outstanding work.
+
+As it stands flushing is used in two places.  Its use in padata_stop
+is simply unnecessary because nothing depends on the queues to
+be flushed afterwards.
+
+The other use in padata_replace is more substantial as we depend
+on it to free the old pd structure.  This patch instead uses the
+pd->refcnt to dynamically free the pd structure once all requests
+are complete.
+
+Fixes: 2b73b07ab8a4 ("padata: Flush the padata queues actively")
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Reviewed-by: Daniel Jordan <daniel.m.jordan@oracle.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/padata.c | 46 ++++++++++++----------------------------------
+ 1 file changed, 12 insertions(+), 34 deletions(-)
+
+diff --git a/kernel/padata.c b/kernel/padata.c
+index 6c06b3039faed..11c5f9c8779ea 100644
+--- a/kernel/padata.c
++++ b/kernel/padata.c
+@@ -35,6 +35,8 @@
+ #define MAX_OBJ_NUM 1000
++static void padata_free_pd(struct parallel_data *pd);
++
+ static int padata_index_to_cpu(struct parallel_data *pd, int cpu_index)
+ {
+       int cpu, target_cpu;
+@@ -334,6 +336,7 @@ static void padata_serial_worker(struct work_struct *serial_work)
+       struct padata_serial_queue *squeue;
+       struct parallel_data *pd;
+       LIST_HEAD(local_list);
++      int cnt;
+       local_bh_disable();
+       squeue = container_of(serial_work, struct padata_serial_queue, work);
+@@ -343,6 +346,8 @@ static void padata_serial_worker(struct work_struct *serial_work)
+       list_replace_init(&squeue->serial.list, &local_list);
+       spin_unlock(&squeue->serial.lock);
++      cnt = 0;
++
+       while (!list_empty(&local_list)) {
+               struct padata_priv *padata;
+@@ -352,9 +357,12 @@ static void padata_serial_worker(struct work_struct *serial_work)
+               list_del_init(&padata->list);
+               padata->serial(padata);
+-              atomic_dec(&pd->refcnt);
++              cnt++;
+       }
+       local_bh_enable();
++
++      if (atomic_sub_and_test(cnt, &pd->refcnt))
++              padata_free_pd(pd);
+ }
+ /**
+@@ -501,8 +509,7 @@ static struct parallel_data *padata_alloc_pd(struct padata_instance *pinst,
+       timer_setup(&pd->timer, padata_reorder_timer, 0);
+       atomic_set(&pd->seq_nr, -1);
+       atomic_set(&pd->reorder_objects, 0);
+-      atomic_set(&pd->refcnt, 0);
+-      pd->pinst = pinst;
++      atomic_set(&pd->refcnt, 1);
+       spin_lock_init(&pd->lock);
+       return pd;
+@@ -526,31 +533,6 @@ static void padata_free_pd(struct parallel_data *pd)
+       kfree(pd);
+ }
+-/* Flush all objects out of the padata queues. */
+-static void padata_flush_queues(struct parallel_data *pd)
+-{
+-      int cpu;
+-      struct padata_parallel_queue *pqueue;
+-      struct padata_serial_queue *squeue;
+-
+-      for_each_cpu(cpu, pd->cpumask.pcpu) {
+-              pqueue = per_cpu_ptr(pd->pqueue, cpu);
+-              flush_work(&pqueue->work);
+-      }
+-
+-      del_timer_sync(&pd->timer);
+-
+-      if (atomic_read(&pd->reorder_objects))
+-              padata_reorder(pd);
+-
+-      for_each_cpu(cpu, pd->cpumask.cbcpu) {
+-              squeue = per_cpu_ptr(pd->squeue, cpu);
+-              flush_work(&squeue->work);
+-      }
+-
+-      BUG_ON(atomic_read(&pd->refcnt) != 0);
+-}
+-
+ static void __padata_start(struct padata_instance *pinst)
+ {
+       pinst->flags |= PADATA_INIT;
+@@ -564,10 +546,6 @@ static void __padata_stop(struct padata_instance *pinst)
+       pinst->flags &= ~PADATA_INIT;
+       synchronize_rcu();
+-
+-      get_online_cpus();
+-      padata_flush_queues(pinst->pd);
+-      put_online_cpus();
+ }
+ /* Replace the internal control structure with a new one. */
+@@ -588,8 +566,8 @@ static void padata_replace(struct padata_instance *pinst,
+       if (!cpumask_equal(pd_old->cpumask.cbcpu, pd_new->cpumask.cbcpu))
+               notification_mask |= PADATA_CPU_SERIAL;
+-      padata_flush_queues(pd_old);
+-      padata_free_pd(pd_old);
++      if (atomic_dec_and_test(&pd_old->refcnt))
++              padata_free_pd(pd_old);
+       if (notification_mask)
+               blocking_notifier_call_chain(&pinst->cpumask_change_notifier,
+-- 
+2.20.1
+
index 028fdc9c05a103c1268e3ff57482631d35b23be6..fd95c3d709ce2fa3cf9f00566559b903e8f9872b 100644 (file)
@@ -88,3 +88,8 @@ dm-space-map-common-fix-to-ensure-new-block-isn-t-already-in-use.patch
 dm-crypt-fix-benbi-iv-constructor-crash-if-used-in-authenticated-mode.patch
 dm-fix-potential-for-q-make_request_fn-null-pointer.patch
 dm-writecache-fix-incorrect-flush-sequence-when-doing-ssd-mode-commit.patch
+padata-remove-broken-queue-flushing.patch
+tracing-annotate-ftrace_graph_hash-pointer-with-__rc.patch
+tracing-annotate-ftrace_graph_notrace_hash-pointer-w.patch
+ftrace-add-comment-to-why-rcu_dereference_sched-is-o.patch
+ftrace-protect-ftrace_graph_hash-with-ftrace_sync.patch
diff --git a/queue-4.19/tracing-annotate-ftrace_graph_hash-pointer-with-__rc.patch b/queue-4.19/tracing-annotate-ftrace_graph_hash-pointer-with-__rc.patch
new file mode 100644 (file)
index 0000000..66d9a4b
--- /dev/null
@@ -0,0 +1,77 @@
+From 02e5db5f218b39e76233b8e5cba88eb912d20d6a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 1 Feb 2020 12:57:04 +0530
+Subject: tracing: Annotate ftrace_graph_hash pointer with __rcu
+
+From: Amol Grover <frextrite@gmail.com>
+
+[ Upstream commit 24a9729f831462b1d9d61dc85ecc91c59037243f ]
+
+Fix following instances of sparse error
+kernel/trace/ftrace.c:5664:29: error: incompatible types in comparison
+kernel/trace/ftrace.c:5785:21: error: incompatible types in comparison
+kernel/trace/ftrace.c:5864:36: error: incompatible types in comparison
+kernel/trace/ftrace.c:5866:25: error: incompatible types in comparison
+
+Use rcu_dereference_protected to access the __rcu annotated pointer.
+
+Link: http://lkml.kernel.org/r/20200201072703.17330-1-frextrite@gmail.com
+
+Reviewed-by: Joel Fernandes (Google) <joel@joelfernandes.org>
+Signed-off-by: Amol Grover <frextrite@gmail.com>
+Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/trace/ftrace.c | 2 +-
+ kernel/trace/trace.h  | 9 ++++++---
+ 2 files changed, 7 insertions(+), 4 deletions(-)
+
+diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
+index 37a435bac1618..00d987d9bd4a6 100644
+--- a/kernel/trace/ftrace.c
++++ b/kernel/trace/ftrace.c
+@@ -5072,7 +5072,7 @@ static const struct file_operations ftrace_notrace_fops = {
+ static DEFINE_MUTEX(graph_lock);
+-struct ftrace_hash *ftrace_graph_hash = EMPTY_HASH;
++struct ftrace_hash __rcu *ftrace_graph_hash = EMPTY_HASH;
+ struct ftrace_hash *ftrace_graph_notrace_hash = EMPTY_HASH;
+ enum graph_filter_type {
+diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
+index d11d7bfc3fa5c..70806f2f89bab 100644
+--- a/kernel/trace/trace.h
++++ b/kernel/trace/trace.h
+@@ -872,22 +872,25 @@ extern void __trace_graph_return(struct trace_array *tr,
+                                unsigned long flags, int pc);
+ #ifdef CONFIG_DYNAMIC_FTRACE
+-extern struct ftrace_hash *ftrace_graph_hash;
++extern struct ftrace_hash __rcu *ftrace_graph_hash;
+ extern struct ftrace_hash *ftrace_graph_notrace_hash;
+ static inline int ftrace_graph_addr(struct ftrace_graph_ent *trace)
+ {
+       unsigned long addr = trace->func;
+       int ret = 0;
++      struct ftrace_hash *hash;
+       preempt_disable_notrace();
+-      if (ftrace_hash_empty(ftrace_graph_hash)) {
++      hash = rcu_dereference_protected(ftrace_graph_hash, !preemptible());
++
++      if (ftrace_hash_empty(hash)) {
+               ret = 1;
+               goto out;
+       }
+-      if (ftrace_lookup_ip(ftrace_graph_hash, addr)) {
++      if (ftrace_lookup_ip(hash, addr)) {
+               /*
+                * This needs to be cleared on the return functions
+-- 
+2.20.1
+
diff --git a/queue-4.19/tracing-annotate-ftrace_graph_notrace_hash-pointer-w.patch b/queue-4.19/tracing-annotate-ftrace_graph_notrace_hash-pointer-w.patch
new file mode 100644 (file)
index 0000000..f926056
--- /dev/null
@@ -0,0 +1,72 @@
+From becabf65f75fa51db0ec42cfa05c34abe002f821 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 5 Feb 2020 11:27:02 +0530
+Subject: tracing: Annotate ftrace_graph_notrace_hash pointer with __rcu
+
+From: Amol Grover <frextrite@gmail.com>
+
+[ Upstream commit fd0e6852c407dd9aefc594f54ddcc21d84803d3b ]
+
+Fix following instances of sparse error
+kernel/trace/ftrace.c:5667:29: error: incompatible types in comparison
+kernel/trace/ftrace.c:5813:21: error: incompatible types in comparison
+kernel/trace/ftrace.c:5868:36: error: incompatible types in comparison
+kernel/trace/ftrace.c:5870:25: error: incompatible types in comparison
+
+Use rcu_dereference_protected to dereference the newly annotated pointer.
+
+Link: http://lkml.kernel.org/r/20200205055701.30195-1-frextrite@gmail.com
+
+Signed-off-by: Amol Grover <frextrite@gmail.com>
+Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/trace/ftrace.c | 2 +-
+ kernel/trace/trace.h  | 8 ++++++--
+ 2 files changed, 7 insertions(+), 3 deletions(-)
+
+diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
+index 00d987d9bd4a6..09c69ad8439ef 100644
+--- a/kernel/trace/ftrace.c
++++ b/kernel/trace/ftrace.c
+@@ -5073,7 +5073,7 @@ static const struct file_operations ftrace_notrace_fops = {
+ static DEFINE_MUTEX(graph_lock);
+ struct ftrace_hash __rcu *ftrace_graph_hash = EMPTY_HASH;
+-struct ftrace_hash *ftrace_graph_notrace_hash = EMPTY_HASH;
++struct ftrace_hash __rcu *ftrace_graph_notrace_hash = EMPTY_HASH;
+ enum graph_filter_type {
+       GRAPH_FILTER_NOTRACE    = 0,
+diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
+index 70806f2f89bab..cf1a7d1f35109 100644
+--- a/kernel/trace/trace.h
++++ b/kernel/trace/trace.h
+@@ -873,7 +873,7 @@ extern void __trace_graph_return(struct trace_array *tr,
+ #ifdef CONFIG_DYNAMIC_FTRACE
+ extern struct ftrace_hash __rcu *ftrace_graph_hash;
+-extern struct ftrace_hash *ftrace_graph_notrace_hash;
++extern struct ftrace_hash __rcu *ftrace_graph_notrace_hash;
+ static inline int ftrace_graph_addr(struct ftrace_graph_ent *trace)
+ {
+@@ -926,10 +926,14 @@ static inline void ftrace_graph_addr_finish(struct ftrace_graph_ret *trace)
+ static inline int ftrace_graph_notrace_addr(unsigned long addr)
+ {
+       int ret = 0;
++      struct ftrace_hash *notrace_hash;
+       preempt_disable_notrace();
+-      if (ftrace_lookup_ip(ftrace_graph_notrace_hash, addr))
++      notrace_hash = rcu_dereference_protected(ftrace_graph_notrace_hash,
++                                               !preemptible());
++
++      if (ftrace_lookup_ip(notrace_hash, addr))
+               ret = 1;
+       preempt_enable_notrace();
+-- 
+2.20.1
+