]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
4.19-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 10 Feb 2021 14:15:16 +0000 (15:15 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 10 Feb 2021 14:15:16 +0000 (15:15 +0100)
added patches:
block-fix-null-pointer-dereference-in-register_disk.patch
fgraph-initialize-tracing_graph_pause-at-task-creation.patch

queue-4.19/block-fix-null-pointer-dereference-in-register_disk.patch [new file with mode: 0644]
queue-4.19/fgraph-initialize-tracing_graph_pause-at-task-creation.patch [new file with mode: 0644]

diff --git a/queue-4.19/block-fix-null-pointer-dereference-in-register_disk.patch b/queue-4.19/block-fix-null-pointer-dereference-in-register_disk.patch
new file mode 100644 (file)
index 0000000..4d89ea6
--- /dev/null
@@ -0,0 +1,41 @@
+From 4d7c1d3fd7c7eda7dea351f071945e843a46c145 Mon Sep 17 00:00:00 2001
+From: zhengbin <zhengbin13@huawei.com>
+Date: Wed, 20 Feb 2019 21:27:05 +0800
+Subject: block: fix NULL pointer dereference in register_disk
+
+From: zhengbin <zhengbin13@huawei.com>
+
+commit 4d7c1d3fd7c7eda7dea351f071945e843a46c145 upstream.
+
+If __device_add_disk-->bdi_register_owner-->bdi_register-->
+bdi_register_va-->device_create_vargs fails, bdi->dev is still
+NULL, __device_add_disk-->register_disk will visit bdi->dev->kobj.
+This patch fixes that.
+
+Signed-off-by: zhengbin <zhengbin13@huawei.com>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Jack Wang <jinpu.wang@cloud.ionos.com>
+---
+ block/genhd.c |   10 ++++++----
+ 1 file changed, 6 insertions(+), 4 deletions(-)
+
+--- a/block/genhd.c
++++ b/block/genhd.c
+@@ -652,10 +652,12 @@ exit:
+               kobject_uevent(&part_to_dev(part)->kobj, KOBJ_ADD);
+       disk_part_iter_exit(&piter);
+-      err = sysfs_create_link(&ddev->kobj,
+-                              &disk->queue->backing_dev_info->dev->kobj,
+-                              "bdi");
+-      WARN_ON(err);
++      if (disk->queue->backing_dev_info->dev) {
++              err = sysfs_create_link(&ddev->kobj,
++                        &disk->queue->backing_dev_info->dev->kobj,
++                        "bdi");
++              WARN_ON(err);
++      }
+ }
+ /**
diff --git a/queue-4.19/fgraph-initialize-tracing_graph_pause-at-task-creation.patch b/queue-4.19/fgraph-initialize-tracing_graph_pause-at-task-creation.patch
new file mode 100644 (file)
index 0000000..87f13af
--- /dev/null
@@ -0,0 +1,82 @@
+From 7e0a9220467dbcfdc5bc62825724f3e52e50ab31 Mon Sep 17 00:00:00 2001
+From: "Steven Rostedt (VMware)" <rostedt@goodmis.org>
+Date: Fri, 29 Jan 2021 10:13:53 -0500
+Subject: fgraph: Initialize tracing_graph_pause at task creation
+
+From: Steven Rostedt (VMware) <rostedt@goodmis.org>
+
+commit 7e0a9220467dbcfdc5bc62825724f3e52e50ab31 upstream.
+
+On some archs, the idle task can call into cpu_suspend(). The cpu_suspend()
+will disable or pause function graph tracing, as there's some paths in
+bringing down the CPU that can have issues with its return address being
+modified. The task_struct structure has a "tracing_graph_pause" atomic
+counter, that when set to something other than zero, the function graph
+tracer will not modify the return address.
+
+The problem is that the tracing_graph_pause counter is initialized when the
+function graph tracer is enabled. This can corrupt the counter for the idle
+task if it is suspended in these architectures.
+
+   CPU 1                               CPU 2
+   -----                               -----
+  do_idle()
+    cpu_suspend()
+      pause_graph_tracing()
+          task_struct->tracing_graph_pause++ (0 -> 1)
+
+                               start_graph_tracing()
+                                 for_each_online_cpu(cpu) {
+                                   ftrace_graph_init_idle_task(cpu)
+                                     task-struct->tracing_graph_pause = 0 (1 -> 0)
+
+      unpause_graph_tracing()
+          task_struct->tracing_graph_pause-- (0 -> -1)
+
+The above should have gone from 1 to zero, and enabled function graph
+tracing again. But instead, it is set to -1, which keeps it disabled.
+
+There's no reason that the field tracing_graph_pause on the task_struct can
+not be initialized at boot up.
+
+Cc: stable@vger.kernel.org
+Fixes: 380c4b1411ccd ("tracing/function-graph-tracer: append the tracing_graph_flag")
+Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=211339
+Reported-by: pierre.gondois@arm.com
+Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ init/init_task.c      |    3 ++-
+ kernel/trace/ftrace.c |    2 --
+ 2 files changed, 2 insertions(+), 3 deletions(-)
+
+--- a/init/init_task.c
++++ b/init/init_task.c
+@@ -168,7 +168,8 @@ struct task_struct init_task
+       .lockdep_recursion = 0,
+ #endif
+ #ifdef CONFIG_FUNCTION_GRAPH_TRACER
+-      .ret_stack      = NULL,
++      .ret_stack              = NULL,
++      .tracing_graph_pause    = ATOMIC_INIT(0),
+ #endif
+ #if defined(CONFIG_TRACING) && defined(CONFIG_PREEMPT)
+       .trace_recursion = 0,
+--- a/kernel/trace/ftrace.c
++++ b/kernel/trace/ftrace.c
+@@ -6875,7 +6875,6 @@ static int alloc_retstack_tasklist(struc
+               }
+               if (t->ret_stack == NULL) {
+-                      atomic_set(&t->tracing_graph_pause, 0);
+                       atomic_set(&t->trace_overrun, 0);
+                       t->curr_ret_stack = -1;
+                       t->curr_ret_depth = -1;
+@@ -7088,7 +7087,6 @@ static DEFINE_PER_CPU(struct ftrace_ret_
+ static void
+ graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack)
+ {
+-      atomic_set(&t->tracing_graph_pause, 0);
+       atomic_set(&t->trace_overrun, 0);
+       t->ftrace_timestamp = 0;
+       /* make curr_ret_stack visible before we add the ret_stack */