]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
Fixes for 4.9
authorSasha Levin <sashal@kernel.org>
Sun, 23 Aug 2020 01:16:36 +0000 (21:16 -0400)
committerSasha Levin <sashal@kernel.org>
Sun, 23 Aug 2020 01:16:36 +0000 (21:16 -0400)
Signed-off-by: Sasha Levin <sashal@kernel.org>
queue-4.9/btrfs-don-t-show-full-path-of-bind-mounts-in-subvol.patch [new file with mode: 0644]
queue-4.9/btrfs-export-helpers-for-subvolume-name-id-resolutio.patch [new file with mode: 0644]
queue-4.9/drm-imx-imx-ldb-disable-both-channels-for-split-mode.patch [new file with mode: 0644]
queue-4.9/khugepaged-adjust-vm_bug_on_mm-in-__khugepaged_enter.patch [new file with mode: 0644]
queue-4.9/khugepaged-khugepaged_test_exit-check-mmget_still_va.patch [new file with mode: 0644]
queue-4.9/perf-probe-fix-memory-leakage-when-the-probe-point-i.patch [new file with mode: 0644]
queue-4.9/series [new file with mode: 0644]
queue-4.9/tracing-clean-up-the-hwlat-binding-code.patch [new file with mode: 0644]
queue-4.9/tracing-hwlat-honor-the-tracing_cpumask.patch [new file with mode: 0644]

diff --git a/queue-4.9/btrfs-don-t-show-full-path-of-bind-mounts-in-subvol.patch b/queue-4.9/btrfs-don-t-show-full-path-of-bind-mounts-in-subvol.patch
new file mode 100644 (file)
index 0000000..010952e
--- /dev/null
@@ -0,0 +1,67 @@
+From 86881a957c7ec048de43882ddc12ec3742f04f4e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 22 Jul 2020 11:12:46 -0400
+Subject: btrfs: don't show full path of bind mounts in subvol=
+
+From: Josef Bacik <josef@toxicpanda.com>
+
+[ Upstream commit 3ef3959b29c4a5bd65526ab310a1a18ae533172a ]
+
+Chris Murphy reported a problem where rpm ostree will bind mount a bunch
+of things for whatever voodoo it's doing.  But when it does this
+/proc/mounts shows something like
+
+  /dev/sda /mnt/test btrfs rw,relatime,subvolid=256,subvol=/foo 0 0
+  /dev/sda /mnt/test/baz btrfs rw,relatime,subvolid=256,subvol=/foo/bar 0 0
+
+Despite subvolid=256 being subvol=/foo.  This is because we're just
+spitting out the dentry of the mount point, which in the case of bind
+mounts is the source path for the mountpoint.  Instead we should spit
+out the path to the actual subvol.  Fix this by looking up the name for
+the subvolid we have mounted.  With this fix the same test looks like
+this
+
+  /dev/sda /mnt/test btrfs rw,relatime,subvolid=256,subvol=/foo 0 0
+  /dev/sda /mnt/test/baz btrfs rw,relatime,subvolid=256,subvol=/foo 0 0
+
+Reported-by: Chris Murphy <chris@colorremedies.com>
+CC: stable@vger.kernel.org # 4.4+
+Signed-off-by: Josef Bacik <josef@toxicpanda.com>
+Reviewed-by: David Sterba <dsterba@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/btrfs/super.c | 10 ++++++++--
+ 1 file changed, 8 insertions(+), 2 deletions(-)
+
+diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
+index af5be060c651f..3a0cb745164f8 100644
+--- a/fs/btrfs/super.c
++++ b/fs/btrfs/super.c
+@@ -1225,6 +1225,7 @@ static int btrfs_show_options(struct seq_file *seq, struct dentry *dentry)
+       struct btrfs_fs_info *info = btrfs_sb(dentry->d_sb);
+       struct btrfs_root *root = info->tree_root;
+       char *compress_type;
++      const char *subvol_name;
+       if (btrfs_test_opt(info, DEGRADED))
+               seq_puts(seq, ",degraded");
+@@ -1311,8 +1312,13 @@ static int btrfs_show_options(struct seq_file *seq, struct dentry *dentry)
+ #endif
+       seq_printf(seq, ",subvolid=%llu",
+                 BTRFS_I(d_inode(dentry))->root->root_key.objectid);
+-      seq_puts(seq, ",subvol=");
+-      seq_dentry(seq, dentry, " \t\n\\");
++      subvol_name = btrfs_get_subvol_name_from_objectid(info,
++                      BTRFS_I(d_inode(dentry))->root->root_key.objectid);
++      if (!IS_ERR(subvol_name)) {
++              seq_puts(seq, ",subvol=");
++              seq_escape(seq, subvol_name, " \t\n\\");
++              kfree(subvol_name);
++      }
+       return 0;
+ }
+-- 
+2.25.1
+
diff --git a/queue-4.9/btrfs-export-helpers-for-subvolume-name-id-resolutio.patch b/queue-4.9/btrfs-export-helpers-for-subvolume-name-id-resolutio.patch
new file mode 100644 (file)
index 0000000..1e27f21
--- /dev/null
@@ -0,0 +1,107 @@
+From 524aebfb6637a01be115ef7b755b0e8812c16aa2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 21 Feb 2020 14:56:12 +0100
+Subject: btrfs: export helpers for subvolume name/id resolution
+
+From: Marcos Paulo de Souza <mpdesouza@suse.com>
+
+[ Upstream commit c0c907a47dccf2cf26251a8fb4a8e7a3bf79ce84 ]
+
+The functions will be used outside of export.c and super.c to allow
+resolving subvolume name from a given id, eg. for subvolume deletion by
+id ioctl.
+
+Signed-off-by: Marcos Paulo de Souza <mpdesouza@suse.com>
+Reviewed-by: David Sterba <dsterba@suse.com>
+[ split from the next patch ]
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/btrfs/ctree.h  | 2 ++
+ fs/btrfs/export.c | 8 ++++----
+ fs/btrfs/export.h | 5 +++++
+ fs/btrfs/super.c  | 8 ++++----
+ 4 files changed, 15 insertions(+), 8 deletions(-)
+
+diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
+index 2bc37d03d4075..abfc090510480 100644
+--- a/fs/btrfs/ctree.h
++++ b/fs/btrfs/ctree.h
+@@ -3261,6 +3261,8 @@ ssize_t btrfs_listxattr(struct dentry *dentry, char *buffer, size_t size);
+ int btrfs_parse_options(struct btrfs_root *root, char *options,
+                       unsigned long new_flags);
+ int btrfs_sync_fs(struct super_block *sb, int wait);
++char *btrfs_get_subvol_name_from_objectid(struct btrfs_fs_info *fs_info,
++                                        u64 subvol_objectid);
+ static inline __printf(2, 3)
+ void btrfs_no_printk(const struct btrfs_fs_info *fs_info, const char *fmt, ...)
+diff --git a/fs/btrfs/export.c b/fs/btrfs/export.c
+index 2513a7f533342..92f80ed642194 100644
+--- a/fs/btrfs/export.c
++++ b/fs/btrfs/export.c
+@@ -55,9 +55,9 @@ static int btrfs_encode_fh(struct inode *inode, u32 *fh, int *max_len,
+       return type;
+ }
+-static struct dentry *btrfs_get_dentry(struct super_block *sb, u64 objectid,
+-                                     u64 root_objectid, u32 generation,
+-                                     int check_generation)
++struct dentry *btrfs_get_dentry(struct super_block *sb, u64 objectid,
++                              u64 root_objectid, u32 generation,
++                              int check_generation)
+ {
+       struct btrfs_fs_info *fs_info = btrfs_sb(sb);
+       struct btrfs_root *root;
+@@ -150,7 +150,7 @@ static struct dentry *btrfs_fh_to_dentry(struct super_block *sb, struct fid *fh,
+       return btrfs_get_dentry(sb, objectid, root_objectid, generation, 1);
+ }
+-static struct dentry *btrfs_get_parent(struct dentry *child)
++struct dentry *btrfs_get_parent(struct dentry *child)
+ {
+       struct inode *dir = d_inode(child);
+       struct btrfs_root *root = BTRFS_I(dir)->root;
+diff --git a/fs/btrfs/export.h b/fs/btrfs/export.h
+index 074348a95841f..7a305e5549991 100644
+--- a/fs/btrfs/export.h
++++ b/fs/btrfs/export.h
+@@ -16,4 +16,9 @@ struct btrfs_fid {
+       u64 parent_root_objectid;
+ } __attribute__ ((packed));
++struct dentry *btrfs_get_dentry(struct super_block *sb, u64 objectid,
++                              u64 root_objectid, u32 generation,
++                              int check_generation);
++struct dentry *btrfs_get_parent(struct dentry *child);
++
+ #endif
+diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
+index 9286603a6a98b..af5be060c651f 100644
+--- a/fs/btrfs/super.c
++++ b/fs/btrfs/super.c
+@@ -948,8 +948,8 @@ out:
+       return error;
+ }
+-static char *get_subvol_name_from_objectid(struct btrfs_fs_info *fs_info,
+-                                         u64 subvol_objectid)
++char *btrfs_get_subvol_name_from_objectid(struct btrfs_fs_info *fs_info,
++                                        u64 subvol_objectid)
+ {
+       struct btrfs_root *root = fs_info->tree_root;
+       struct btrfs_root *fs_root;
+@@ -1430,8 +1430,8 @@ static struct dentry *mount_subvol(const char *subvol_name, u64 subvol_objectid,
+                               goto out;
+                       }
+               }
+-              subvol_name = get_subvol_name_from_objectid(btrfs_sb(mnt->mnt_sb),
+-                                                          subvol_objectid);
++              subvol_name = btrfs_get_subvol_name_from_objectid(
++                                      btrfs_sb(mnt->mnt_sb), subvol_objectid);
+               if (IS_ERR(subvol_name)) {
+                       root = ERR_CAST(subvol_name);
+                       subvol_name = NULL;
+-- 
+2.25.1
+
diff --git a/queue-4.9/drm-imx-imx-ldb-disable-both-channels-for-split-mode.patch b/queue-4.9/drm-imx-imx-ldb-disable-both-channels-for-split-mode.patch
new file mode 100644 (file)
index 0000000..a10a624
--- /dev/null
@@ -0,0 +1,60 @@
+From a2c1faf5a5134fb2558bc84c894bddf01ab8267b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 9 Jul 2020 10:28:52 +0800
+Subject: drm/imx: imx-ldb: Disable both channels for split mode in
+ enc->disable()
+
+From: Liu Ying <victor.liu@nxp.com>
+
+[ Upstream commit 3b2a999582c467d1883716b37ffcc00178a13713 ]
+
+Both of the two LVDS channels should be disabled for split mode
+in the encoder's ->disable() callback, because they are enabled
+in the encoder's ->enable() callback.
+
+Fixes: 6556f7f82b9c ("drm: imx: Move imx-drm driver out of staging")
+Cc: Philipp Zabel <p.zabel@pengutronix.de>
+Cc: Sascha Hauer <s.hauer@pengutronix.de>
+Cc: Pengutronix Kernel Team <kernel@pengutronix.de>
+Cc: NXP Linux Team <linux-imx@nxp.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Liu Ying <victor.liu@nxp.com>
+Signed-off-by: Philipp Zabel <p.zabel@pengutronix.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/imx/imx-ldb.c | 7 ++++---
+ 1 file changed, 4 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/gpu/drm/imx/imx-ldb.c b/drivers/gpu/drm/imx/imx-ldb.c
+index 67881e5517fbf..2df407b2b0da7 100644
+--- a/drivers/gpu/drm/imx/imx-ldb.c
++++ b/drivers/gpu/drm/imx/imx-ldb.c
+@@ -317,6 +317,7 @@ static void imx_ldb_encoder_disable(struct drm_encoder *encoder)
+ {
+       struct imx_ldb_channel *imx_ldb_ch = enc_to_imx_ldb_ch(encoder);
+       struct imx_ldb *ldb = imx_ldb_ch->ldb;
++      int dual = ldb->ldb_ctrl & LDB_SPLIT_MODE_EN;
+       int mux, ret;
+       /*
+@@ -333,14 +334,14 @@ static void imx_ldb_encoder_disable(struct drm_encoder *encoder)
+       drm_panel_disable(imx_ldb_ch->panel);
+-      if (imx_ldb_ch == &ldb->channel[0])
++      if (imx_ldb_ch == &ldb->channel[0] || dual)
+               ldb->ldb_ctrl &= ~LDB_CH0_MODE_EN_MASK;
+-      else if (imx_ldb_ch == &ldb->channel[1])
++      if (imx_ldb_ch == &ldb->channel[1] || dual)
+               ldb->ldb_ctrl &= ~LDB_CH1_MODE_EN_MASK;
+       regmap_write(ldb->regmap, IOMUXC_GPR2, ldb->ldb_ctrl);
+-      if (ldb->ldb_ctrl & LDB_SPLIT_MODE_EN) {
++      if (dual) {
+               clk_disable_unprepare(ldb->clk[0]);
+               clk_disable_unprepare(ldb->clk[1]);
+       }
+-- 
+2.25.1
+
diff --git a/queue-4.9/khugepaged-adjust-vm_bug_on_mm-in-__khugepaged_enter.patch b/queue-4.9/khugepaged-adjust-vm_bug_on_mm-in-__khugepaged_enter.patch
new file mode 100644 (file)
index 0000000..85de7f9
--- /dev/null
@@ -0,0 +1,51 @@
+From 991d4b41483ebea609ed07862432a510aaee699b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 20 Aug 2020 17:42:02 -0700
+Subject: khugepaged: adjust VM_BUG_ON_MM() in __khugepaged_enter()
+
+From: Hugh Dickins <hughd@google.com>
+
+[ Upstream commit f3f99d63a8156c7a4a6b20aac22b53c5579c7dc1 ]
+
+syzbot crashes on the VM_BUG_ON_MM(khugepaged_test_exit(mm), mm) in
+__khugepaged_enter(): yes, when one thread is about to dump core, has set
+core_state, and is waiting for others, another might do something calling
+__khugepaged_enter(), which now crashes because I lumped the core_state
+test (known as "mmget_still_valid") into khugepaged_test_exit().  I still
+think it's best to lump them together, so just in this exceptional case,
+check mm->mm_users directly instead of khugepaged_test_exit().
+
+Fixes: bbe98f9cadff ("khugepaged: khugepaged_test_exit() check mmget_still_valid()")
+Reported-by: syzbot <syzkaller@googlegroups.com>
+Signed-off-by: Hugh Dickins <hughd@google.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Acked-by: Yang Shi <shy828301@gmail.com>
+Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
+Cc: Andrea Arcangeli <aarcange@redhat.com>
+Cc: Song Liu <songliubraving@fb.com>
+Cc: Mike Kravetz <mike.kravetz@oracle.com>
+Cc: Eric Dumazet <edumazet@google.com>
+Cc: <stable@vger.kernel.org>   [4.8+]
+Link: http://lkml.kernel.org/r/alpine.LSU.2.11.2008141503370.18085@eggly.anvils
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ mm/khugepaged.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/mm/khugepaged.c b/mm/khugepaged.c
+index f4ed056d9f863..1538e5e5c628a 100644
+--- a/mm/khugepaged.c
++++ b/mm/khugepaged.c
+@@ -404,7 +404,7 @@ int __khugepaged_enter(struct mm_struct *mm)
+               return -ENOMEM;
+       /* __khugepaged_exit() must not run from under us */
+-      VM_BUG_ON_MM(khugepaged_test_exit(mm), mm);
++      VM_BUG_ON_MM(atomic_read(&mm->mm_users) == 0, mm);
+       if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) {
+               free_mm_slot(mm_slot);
+               return 0;
+-- 
+2.25.1
+
diff --git a/queue-4.9/khugepaged-khugepaged_test_exit-check-mmget_still_va.patch b/queue-4.9/khugepaged-khugepaged_test_exit-check-mmget_still_va.patch
new file mode 100644 (file)
index 0000000..dc3421d
--- /dev/null
@@ -0,0 +1,60 @@
+From e2c71c1d363188d960d03ea884192a2e0d77cdf4 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 6 Aug 2020 23:26:25 -0700
+Subject: khugepaged: khugepaged_test_exit() check mmget_still_valid()
+
+From: Hugh Dickins <hughd@google.com>
+
+[ Upstream commit bbe98f9cadff58cdd6a4acaeba0efa8565dabe65 ]
+
+Move collapse_huge_page()'s mmget_still_valid() check into
+khugepaged_test_exit() itself.  collapse_huge_page() is used for anon THP
+only, and earned its mmget_still_valid() check because it inserts a huge
+pmd entry in place of the page table's pmd entry; whereas
+collapse_file()'s retract_page_tables() or collapse_pte_mapped_thp()
+merely clears the page table's pmd entry.  But core dumping without mmap
+lock must have been as open to mistaking a racily cleared pmd entry for a
+page table at physical page 0, as exit_mmap() was.  And we certainly have
+no interest in mapping as a THP once dumping core.
+
+Fixes: 59ea6d06cfa9 ("coredump: fix race condition between collapse_huge_page() and core dumping")
+Signed-off-by: Hugh Dickins <hughd@google.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Cc: Andrea Arcangeli <aarcange@redhat.com>
+Cc: Song Liu <songliubraving@fb.com>
+Cc: Mike Kravetz <mike.kravetz@oracle.com>
+Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
+Cc: <stable@vger.kernel.org>   [4.8+]
+Link: http://lkml.kernel.org/r/alpine.LSU.2.11.2008021217020.27773@eggly.anvils
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ mm/khugepaged.c | 5 +----
+ 1 file changed, 1 insertion(+), 4 deletions(-)
+
+diff --git a/mm/khugepaged.c b/mm/khugepaged.c
+index 3080c6415493c..f4ed056d9f863 100644
+--- a/mm/khugepaged.c
++++ b/mm/khugepaged.c
+@@ -391,7 +391,7 @@ static void insert_to_mm_slots_hash(struct mm_struct *mm,
+ static inline int khugepaged_test_exit(struct mm_struct *mm)
+ {
+-      return atomic_read(&mm->mm_users) == 0;
++      return atomic_read(&mm->mm_users) == 0 || !mmget_still_valid(mm);
+ }
+ int __khugepaged_enter(struct mm_struct *mm)
+@@ -1004,9 +1004,6 @@ static void collapse_huge_page(struct mm_struct *mm,
+        * handled by the anon_vma lock + PG_lock.
+        */
+       down_write(&mm->mmap_sem);
+-      result = SCAN_ANY_PROCESS;
+-      if (!mmget_still_valid(mm))
+-              goto out;
+       result = hugepage_vma_revalidate(mm, address, &vma);
+       if (result)
+               goto out;
+-- 
+2.25.1
+
diff --git a/queue-4.9/perf-probe-fix-memory-leakage-when-the-probe-point-i.patch b/queue-4.9/perf-probe-fix-memory-leakage-when-the-probe-point-i.patch
new file mode 100644 (file)
index 0000000..9846c65
--- /dev/null
@@ -0,0 +1,52 @@
+From 34e668632892134f7730f1c18c1ca0ad14d8dbdb Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 10 Jul 2020 22:11:23 +0900
+Subject: perf probe: Fix memory leakage when the probe point is not found
+
+From: Masami Hiramatsu <mhiramat@kernel.org>
+
+[ Upstream commit 12d572e785b15bc764e956caaa8a4c846fd15694 ]
+
+Fix the memory leakage in debuginfo__find_trace_events() when the probe
+point is not found in the debuginfo. If there is no probe point found in
+the debuginfo, debuginfo__find_probes() will NOT return -ENOENT, but 0.
+
+Thus the caller of debuginfo__find_probes() must check the tf.ntevs and
+release the allocated memory for the array of struct probe_trace_event.
+
+The current code releases the memory only if the debuginfo__find_probes()
+hits an error but not checks tf.ntevs. In the result, the memory allocated
+on *tevs are not released if tf.ntevs == 0.
+
+This fixes the memory leakage by checking tf.ntevs == 0 in addition to
+ret < 0.
+
+Fixes: ff741783506c ("perf probe: Introduce debuginfo to encapsulate dwarf information")
+Signed-off-by: Masami Hiramatsu <mhiramat@kernel.org>
+Reviewed-by: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
+Cc: Andi Kleen <ak@linux.intel.com>
+Cc: Oleg Nesterov <oleg@redhat.com>
+Cc: stable@vger.kernel.org
+Link: http://lore.kernel.org/lkml/159438668346.62703.10887420400718492503.stgit@devnote2
+Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/perf/util/probe-finder.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c
+index 7d0d44b4f3d5c..863f668a07355 100644
+--- a/tools/perf/util/probe-finder.c
++++ b/tools/perf/util/probe-finder.c
+@@ -1351,7 +1351,7 @@ int debuginfo__find_trace_events(struct debuginfo *dbg,
+       tf.ntevs = 0;
+       ret = debuginfo__find_probes(dbg, &tf.pf);
+-      if (ret < 0) {
++      if (ret < 0 || tf.ntevs == 0) {
+               for (i = 0; i < tf.ntevs; i++)
+                       clear_probe_trace_event(&tf.tevs[i]);
+               zfree(tevs);
+-- 
+2.25.1
+
diff --git a/queue-4.9/series b/queue-4.9/series
new file mode 100644 (file)
index 0000000..d14f0d1
--- /dev/null
@@ -0,0 +1,8 @@
+drm-imx-imx-ldb-disable-both-channels-for-split-mode.patch
+perf-probe-fix-memory-leakage-when-the-probe-point-i.patch
+tracing-clean-up-the-hwlat-binding-code.patch
+tracing-hwlat-honor-the-tracing_cpumask.patch
+khugepaged-khugepaged_test_exit-check-mmget_still_va.patch
+khugepaged-adjust-vm_bug_on_mm-in-__khugepaged_enter.patch
+btrfs-export-helpers-for-subvolume-name-id-resolutio.patch
+btrfs-don-t-show-full-path-of-bind-mounts-in-subvol.patch
diff --git a/queue-4.9/tracing-clean-up-the-hwlat-binding-code.patch b/queue-4.9/tracing-clean-up-the-hwlat-binding-code.patch
new file mode 100644 (file)
index 0000000..6882574
--- /dev/null
@@ -0,0 +1,103 @@
+From 7151a0062b397dbfdeb506022172557df4f463df Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 31 Jan 2017 16:48:23 -0500
+Subject: tracing: Clean up the hwlat binding code
+
+From: Steven Rostedt (VMware) <rostedt@goodmis.org>
+
+[ Upstream commit f447c196fe7a3a92c6396f7628020cb8d564be15 ]
+
+Instead of initializing the affinity of the hwlat kthread in the thread
+itself, simply set up the initial affinity at thread creation. This
+simplifies the code.
+
+Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/trace/trace_hwlat.c | 34 +++++++++++++++++-----------------
+ 1 file changed, 17 insertions(+), 17 deletions(-)
+
+diff --git a/kernel/trace/trace_hwlat.c b/kernel/trace/trace_hwlat.c
+index 5fe23f0ee7db6..158af5ddbc3aa 100644
+--- a/kernel/trace/trace_hwlat.c
++++ b/kernel/trace/trace_hwlat.c
+@@ -268,24 +268,13 @@ out:
+ static struct cpumask save_cpumask;
+ static bool disable_migrate;
+-static void move_to_next_cpu(bool initmask)
++static void move_to_next_cpu(void)
+ {
+-      static struct cpumask *current_mask;
++      struct cpumask *current_mask = &save_cpumask;
+       int next_cpu;
+       if (disable_migrate)
+               return;
+-
+-      /* Just pick the first CPU on first iteration */
+-      if (initmask) {
+-              current_mask = &save_cpumask;
+-              get_online_cpus();
+-              cpumask_and(current_mask, cpu_online_mask, tracing_buffer_mask);
+-              put_online_cpus();
+-              next_cpu = cpumask_first(current_mask);
+-              goto set_affinity;
+-      }
+-
+       /*
+        * If for some reason the user modifies the CPU affinity
+        * of this thread, than stop migrating for the duration
+@@ -302,7 +291,6 @@ static void move_to_next_cpu(bool initmask)
+       if (next_cpu >= nr_cpu_ids)
+               next_cpu = cpumask_first(current_mask);
+- set_affinity:
+       if (next_cpu >= nr_cpu_ids) /* Shouldn't happen! */
+               goto disable;
+@@ -332,12 +320,10 @@ static void move_to_next_cpu(bool initmask)
+ static int kthread_fn(void *data)
+ {
+       u64 interval;
+-      bool initmask = true;
+       while (!kthread_should_stop()) {
+-              move_to_next_cpu(initmask);
+-              initmask = false;
++              move_to_next_cpu();
+               local_irq_disable();
+               get_sample();
+@@ -368,13 +354,27 @@ static int kthread_fn(void *data)
+  */
+ static int start_kthread(struct trace_array *tr)
+ {
++      struct cpumask *current_mask = &save_cpumask;
+       struct task_struct *kthread;
++      int next_cpu;
++
++      /* Just pick the first CPU on first iteration */
++      current_mask = &save_cpumask;
++      get_online_cpus();
++      cpumask_and(current_mask, cpu_online_mask, tracing_buffer_mask);
++      put_online_cpus();
++      next_cpu = cpumask_first(current_mask);
+       kthread = kthread_create(kthread_fn, NULL, "hwlatd");
+       if (IS_ERR(kthread)) {
+               pr_err(BANNER "could not start sampling thread\n");
+               return -ENOMEM;
+       }
++
++      cpumask_clear(current_mask);
++      cpumask_set_cpu(next_cpu, current_mask);
++      sched_setaffinity(kthread->pid, current_mask);
++
+       hwlat_kthread = kthread;
+       wake_up_process(kthread);
+-- 
+2.25.1
+
diff --git a/queue-4.9/tracing-hwlat-honor-the-tracing_cpumask.patch b/queue-4.9/tracing-hwlat-honor-the-tracing_cpumask.patch
new file mode 100644 (file)
index 0000000..7485e2c
--- /dev/null
@@ -0,0 +1,58 @@
+From fd3f17307235090e6283eb7e4ee55874b7fd9626 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 30 Jul 2020 16:23:18 +0800
+Subject: tracing/hwlat: Honor the tracing_cpumask
+
+From: Kevin Hao <haokexin@gmail.com>
+
+[ Upstream commit 96b4833b6827a62c295b149213c68b559514c929 ]
+
+In calculation of the cpu mask for the hwlat kernel thread, the wrong
+cpu mask is used instead of the tracing_cpumask, this causes the
+tracing/tracing_cpumask useless for hwlat tracer. Fixes it.
+
+Link: https://lkml.kernel.org/r/20200730082318.42584-2-haokexin@gmail.com
+
+Cc: Ingo Molnar <mingo@redhat.com>
+Cc: stable@vger.kernel.org
+Fixes: 0330f7aa8ee6 ("tracing: Have hwlat trace migrate across tracing_cpumask CPUs")
+Signed-off-by: Kevin Hao <haokexin@gmail.com>
+Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/trace/trace_hwlat.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+diff --git a/kernel/trace/trace_hwlat.c b/kernel/trace/trace_hwlat.c
+index 158af5ddbc3aa..d1e007c729235 100644
+--- a/kernel/trace/trace_hwlat.c
++++ b/kernel/trace/trace_hwlat.c
+@@ -271,6 +271,7 @@ static bool disable_migrate;
+ static void move_to_next_cpu(void)
+ {
+       struct cpumask *current_mask = &save_cpumask;
++      struct trace_array *tr = hwlat_trace;
+       int next_cpu;
+       if (disable_migrate)
+@@ -284,7 +285,7 @@ static void move_to_next_cpu(void)
+               goto disable;
+       get_online_cpus();
+-      cpumask_and(current_mask, cpu_online_mask, tracing_buffer_mask);
++      cpumask_and(current_mask, cpu_online_mask, tr->tracing_cpumask);
+       next_cpu = cpumask_next(smp_processor_id(), current_mask);
+       put_online_cpus();
+@@ -361,7 +362,7 @@ static int start_kthread(struct trace_array *tr)
+       /* Just pick the first CPU on first iteration */
+       current_mask = &save_cpumask;
+       get_online_cpus();
+-      cpumask_and(current_mask, cpu_online_mask, tracing_buffer_mask);
++      cpumask_and(current_mask, cpu_online_mask, tr->tracing_cpumask);
+       put_online_cpus();
+       next_cpu = cpumask_first(current_mask);
+-- 
+2.25.1
+