]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
4.9-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sun, 23 Aug 2020 12:37:57 +0000 (14:37 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sun, 23 Aug 2020 12:37:57 +0000 (14:37 +0200)
added patches:
jbd2-add-the-missing-unlock_buffer-in-the-error-path-of-jbd2_write_superblock.patch
kernel-relay.c-fix-memleak-on-destroy-relay-channel.patch
mm-include-cma-pages-in-lowmem_reserve-at-boot.patch
mm-page_alloc-fix-core-hung-in-free_pcppages_bulk.patch
romfs-fix-uninitialized-memory-leak-in-romfs_dev_read.patch

queue-4.9/jbd2-add-the-missing-unlock_buffer-in-the-error-path-of-jbd2_write_superblock.patch [new file with mode: 0644]
queue-4.9/kernel-relay.c-fix-memleak-on-destroy-relay-channel.patch [new file with mode: 0644]
queue-4.9/mm-include-cma-pages-in-lowmem_reserve-at-boot.patch [new file with mode: 0644]
queue-4.9/mm-page_alloc-fix-core-hung-in-free_pcppages_bulk.patch [new file with mode: 0644]
queue-4.9/romfs-fix-uninitialized-memory-leak-in-romfs_dev_read.patch [new file with mode: 0644]
queue-4.9/series

diff --git a/queue-4.9/jbd2-add-the-missing-unlock_buffer-in-the-error-path-of-jbd2_write_superblock.patch b/queue-4.9/jbd2-add-the-missing-unlock_buffer-in-the-error-path-of-jbd2_write_superblock.patch
new file mode 100644 (file)
index 0000000..853ae80
--- /dev/null
@@ -0,0 +1,39 @@
+From ef3f5830b859604eda8723c26d90ab23edc027a4 Mon Sep 17 00:00:00 2001
+From: "zhangyi (F)" <yi.zhang@huawei.com>
+Date: Sat, 20 Jun 2020 14:19:48 +0800
+Subject: jbd2: add the missing unlock_buffer() in the error path of jbd2_write_superblock()
+
+From: zhangyi (F) <yi.zhang@huawei.com>
+
+commit ef3f5830b859604eda8723c26d90ab23edc027a4 upstream.
+
+jbd2_write_superblock() is under the buffer lock of journal superblock
+before ending that superblock write, so add a missing unlock_buffer() in
+in the error path before submitting buffer.
+
+Fixes: 742b06b5628f ("jbd2: check superblock mapped prior to committing")
+Signed-off-by: zhangyi (F) <yi.zhang@huawei.com>
+Reviewed-by: Ritesh Harjani <riteshh@linux.ibm.com>
+Cc: stable@kernel.org
+Link: https://lore.kernel.org/r/20200620061948.2049579-1-yi.zhang@huawei.com
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/jbd2/journal.c |    4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/fs/jbd2/journal.c
++++ b/fs/jbd2/journal.c
+@@ -1340,8 +1340,10 @@ static int jbd2_write_superblock(journal
+       int ret;
+       /* Buffer got discarded which means block device got invalidated */
+-      if (!buffer_mapped(bh))
++      if (!buffer_mapped(bh)) {
++              unlock_buffer(bh);
+               return -EIO;
++      }
+       trace_jbd2_write_superblock(journal, write_flags);
+       if (!(journal->j_flags & JBD2_BARRIER))
diff --git a/queue-4.9/kernel-relay.c-fix-memleak-on-destroy-relay-channel.patch b/queue-4.9/kernel-relay.c-fix-memleak-on-destroy-relay-channel.patch
new file mode 100644 (file)
index 0000000..91e94d1
--- /dev/null
@@ -0,0 +1,65 @@
+From 71e843295c680898959b22dc877ae3839cc22470 Mon Sep 17 00:00:00 2001
+From: Wei Yongjun <weiyongjun1@huawei.com>
+Date: Thu, 20 Aug 2020 17:42:14 -0700
+Subject: kernel/relay.c: fix memleak on destroy relay channel
+
+From: Wei Yongjun <weiyongjun1@huawei.com>
+
+commit 71e843295c680898959b22dc877ae3839cc22470 upstream.
+
+kmemleak report memory leak as follows:
+
+  unreferenced object 0x607ee4e5f948 (size 8):
+  comm "syz-executor.1", pid 2098, jiffies 4295031601 (age 288.468s)
+  hex dump (first 8 bytes):
+  00 00 00 00 00 00 00 00 ........
+  backtrace:
+     relay_open kernel/relay.c:583 [inline]
+     relay_open+0xb6/0x970 kernel/relay.c:563
+     do_blk_trace_setup+0x4a8/0xb20 kernel/trace/blktrace.c:557
+     __blk_trace_setup+0xb6/0x150 kernel/trace/blktrace.c:597
+     blk_trace_ioctl+0x146/0x280 kernel/trace/blktrace.c:738
+     blkdev_ioctl+0xb2/0x6a0 block/ioctl.c:613
+     block_ioctl+0xe5/0x120 fs/block_dev.c:1871
+     vfs_ioctl fs/ioctl.c:48 [inline]
+     __do_sys_ioctl fs/ioctl.c:753 [inline]
+     __se_sys_ioctl fs/ioctl.c:739 [inline]
+     __x64_sys_ioctl+0x170/0x1ce fs/ioctl.c:739
+     do_syscall_64+0x33/0x40 arch/x86/entry/common.c:46
+     entry_SYSCALL_64_after_hwframe+0x44/0xa9
+
+'chan->buf' is malloced in relay_open() by alloc_percpu() but not free
+while destroy the relay channel.  Fix it by adding free_percpu() before
+return from relay_destroy_channel().
+
+Fixes: 017c59c042d0 ("relay: Use per CPU constructs for the relay channel buffer pointers")
+Reported-by: Hulk Robot <hulkci@huawei.com>
+Signed-off-by: Wei Yongjun <weiyongjun1@huawei.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
+Cc: Al Viro <viro@zeniv.linux.org.uk>
+Cc: Michael Ellerman <mpe@ellerman.id.au>
+Cc: David Rientjes <rientjes@google.com>
+Cc: Michel Lespinasse <walken@google.com>
+Cc: Daniel Axtens <dja@axtens.net>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Akash Goel <akash.goel@intel.com>
+Cc: <stable@vger.kernel.org>
+Link: http://lkml.kernel.org/r/20200817122826.48518-1-weiyongjun1@huawei.com
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/relay.c |    1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/kernel/relay.c
++++ b/kernel/relay.c
+@@ -196,6 +196,7 @@ free_buf:
+ static void relay_destroy_channel(struct kref *kref)
+ {
+       struct rchan *chan = container_of(kref, struct rchan, kref);
++      free_percpu(chan->buf);
+       kfree(chan);
+ }
diff --git a/queue-4.9/mm-include-cma-pages-in-lowmem_reserve-at-boot.patch b/queue-4.9/mm-include-cma-pages-in-lowmem_reserve-at-boot.patch
new file mode 100644 (file)
index 0000000..41e29c9
--- /dev/null
@@ -0,0 +1,85 @@
+From e08d3fdfe2dafa0331843f70ce1ff6c1c4900bf4 Mon Sep 17 00:00:00 2001
+From: Doug Berger <opendmb@gmail.com>
+Date: Thu, 20 Aug 2020 17:42:24 -0700
+Subject: mm: include CMA pages in lowmem_reserve at boot
+
+From: Doug Berger <opendmb@gmail.com>
+
+commit e08d3fdfe2dafa0331843f70ce1ff6c1c4900bf4 upstream.
+
+The lowmem_reserve arrays provide a means of applying pressure against
+allocations from lower zones that were targeted at higher zones.  Its
+values are a function of the number of pages managed by higher zones and
+are assigned by a call to the setup_per_zone_lowmem_reserve() function.
+
+The function is initially called at boot time by the function
+init_per_zone_wmark_min() and may be called later by accesses of the
+/proc/sys/vm/lowmem_reserve_ratio sysctl file.
+
+The function init_per_zone_wmark_min() was moved up from a module_init to
+a core_initcall to resolve a sequencing issue with khugepaged.
+Unfortunately this created a sequencing issue with CMA page accounting.
+
+The CMA pages are added to the managed page count of a zone when
+cma_init_reserved_areas() is called at boot also as a core_initcall.  This
+makes it uncertain whether the CMA pages will be added to the managed page
+counts of their zones before or after the call to
+init_per_zone_wmark_min() as it becomes dependent on link order.  With the
+current link order the pages are added to the managed count after the
+lowmem_reserve arrays are initialized at boot.
+
+This means the lowmem_reserve values at boot may be lower than the values
+used later if /proc/sys/vm/lowmem_reserve_ratio is accessed even if the
+ratio values are unchanged.
+
+In many cases the difference is not significant, but for example
+an ARM platform with 1GB of memory and the following memory layout
+
+  cma: Reserved 256 MiB at 0x0000000030000000
+  Zone ranges:
+    DMA      [mem 0x0000000000000000-0x000000002fffffff]
+    Normal   empty
+    HighMem  [mem 0x0000000030000000-0x000000003fffffff]
+
+would result in 0 lowmem_reserve for the DMA zone.  This would allow
+userspace to deplete the DMA zone easily.
+
+Funnily enough
+
+  $ cat /proc/sys/vm/lowmem_reserve_ratio
+
+would fix up the situation because as a side effect it forces
+setup_per_zone_lowmem_reserve.
+
+This commit breaks the link order dependency by invoking
+init_per_zone_wmark_min() as a postcore_initcall so that the CMA pages
+have the chance to be properly accounted in their zone(s) and allowing
+the lowmem_reserve arrays to receive consistent values.
+
+Fixes: bc22af74f271 ("mm: update min_free_kbytes from khugepaged after core initialization")
+Signed-off-by: Doug Berger <opendmb@gmail.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Acked-by: Michal Hocko <mhocko@suse.com>
+Cc: Jason Baron <jbaron@akamai.com>
+Cc: David Rientjes <rientjes@google.com>
+Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
+Cc: <stable@vger.kernel.org>
+Link: http://lkml.kernel.org/r/1597423766-27849-1-git-send-email-opendmb@gmail.com
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ mm/page_alloc.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -6782,7 +6782,7 @@ int __meminit init_per_zone_wmark_min(vo
+       return 0;
+ }
+-core_initcall(init_per_zone_wmark_min)
++postcore_initcall(init_per_zone_wmark_min)
+ /*
+  * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so
diff --git a/queue-4.9/mm-page_alloc-fix-core-hung-in-free_pcppages_bulk.patch b/queue-4.9/mm-page_alloc-fix-core-hung-in-free_pcppages_bulk.patch
new file mode 100644 (file)
index 0000000..55f3793
--- /dev/null
@@ -0,0 +1,100 @@
+From 88e8ac11d2ea3acc003cf01bb5a38c8aa76c3cfd Mon Sep 17 00:00:00 2001
+From: Charan Teja Reddy <charante@codeaurora.org>
+Date: Thu, 20 Aug 2020 17:42:27 -0700
+Subject: mm, page_alloc: fix core hung in free_pcppages_bulk()
+
+From: Charan Teja Reddy <charante@codeaurora.org>
+
+commit 88e8ac11d2ea3acc003cf01bb5a38c8aa76c3cfd upstream.
+
+The following race is observed with the repeated online, offline and a
+delay between two successive online of memory blocks of movable zone.
+
+P1                                             P2
+
+Online the first memory block in
+the movable zone. The pcp struct
+values are initialized to default
+values,i.e., pcp->high = 0 &
+pcp->batch = 1.
+
+                                       Allocate the pages from the
+                                       movable zone.
+
+Try to Online the second memory
+block in the movable zone thus it
+entered the online_pages() but yet
+to call zone_pcp_update().
+                                       This process is entered into
+                                       the exit path thus it tries
+                                       to release the order-0 pages
+                                       to pcp lists through
+                                       free_unref_page_commit().
+                                       As pcp->high = 0, pcp->count = 1
+                                       proceed to call the function
+                                       free_pcppages_bulk().
+Update the pcp values thus the
+new pcp values are like, say,
+pcp->high = 378, pcp->batch = 63.
+                                       Read the pcp's batch value using
+                                       READ_ONCE() and pass the same to
+                                       free_pcppages_bulk(), pcp values
+                                       passed here are, batch = 63,
+                                       count = 1.
+
+                                       Since num of pages in the pcp
+                                       lists are less than ->batch,
+                                       then it will stuck in
+                                       while(list_empty(list)) loop
+                                       with interrupts disabled thus
+                                       a core hung.
+
+Avoid this by ensuring free_pcppages_bulk() is called with proper count of
+pcp list pages.
+
+The mentioned race is some what easily reproducible without [1] because
+pcp's are not updated for the first memory block online and thus there is
+a enough race window for P2 between alloc+free and pcp struct values
+update through onlining of second memory block.
+
+With [1], the race still exists but it is very narrow as we update the pcp
+struct values for the first memory block online itself.
+
+This is not limited to the movable zone, it could also happen in cases
+with the normal zone (e.g., hotplug to a node that only has DMA memory, or
+no other memory yet).
+
+[1]: https://patchwork.kernel.org/patch/11696389/
+
+Fixes: 5f8dcc21211a ("page-allocator: split per-cpu list into one-list-per-migrate-type")
+Signed-off-by: Charan Teja Reddy <charante@codeaurora.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Acked-by: David Hildenbrand <david@redhat.com>
+Acked-by: David Rientjes <rientjes@google.com>
+Acked-by: Michal Hocko <mhocko@suse.com>
+Cc: Michal Hocko <mhocko@suse.com>
+Cc: Vlastimil Babka <vbabka@suse.cz>
+Cc: Vinayak Menon <vinmenon@codeaurora.org>
+Cc: <stable@vger.kernel.org> [2.6+]
+Link: http://lkml.kernel.org/r/1597150703-19003-1-git-send-email-charante@codeaurora.org
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ mm/page_alloc.c |    5 +++++
+ 1 file changed, 5 insertions(+)
+
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -1116,6 +1116,11 @@ static void free_pcppages_bulk(struct zo
+       if (nr_scanned)
+               __mod_node_page_state(zone->zone_pgdat, NR_PAGES_SCANNED, -nr_scanned);
++      /*
++       * Ensure proper count is passed which otherwise would stuck in the
++       * below while (list_empty(list)) loop.
++       */
++      count = min(pcp->count, count);
+       while (count) {
+               struct page *page;
+               struct list_head *list;
diff --git a/queue-4.9/romfs-fix-uninitialized-memory-leak-in-romfs_dev_read.patch b/queue-4.9/romfs-fix-uninitialized-memory-leak-in-romfs_dev_read.patch
new file mode 100644 (file)
index 0000000..9a75279
--- /dev/null
@@ -0,0 +1,55 @@
+From bcf85fcedfdd17911982a3e3564fcfec7b01eebd Mon Sep 17 00:00:00 2001
+From: Jann Horn <jannh@google.com>
+Date: Thu, 20 Aug 2020 17:42:11 -0700
+Subject: romfs: fix uninitialized memory leak in romfs_dev_read()
+
+From: Jann Horn <jannh@google.com>
+
+commit bcf85fcedfdd17911982a3e3564fcfec7b01eebd upstream.
+
+romfs has a superblock field that limits the size of the filesystem; data
+beyond that limit is never accessed.
+
+romfs_dev_read() fetches a caller-supplied number of bytes from the
+backing device.  It returns 0 on success or an error code on failure;
+therefore, its API can't represent short reads, it's all-or-nothing.
+
+However, when romfs_dev_read() detects that the requested operation would
+cross the filesystem size limit, it currently silently truncates the
+requested number of bytes.  This e.g.  means that when the content of a
+file with size 0x1000 starts one byte before the filesystem size limit,
+->readpage() will only fill a single byte of the supplied page while
+leaving the rest uninitialized, leaking that uninitialized memory to
+userspace.
+
+Fix it by returning an error code instead of truncating the read when the
+requested read operation would go beyond the end of the filesystem.
+
+Fixes: da4458bda237 ("NOMMU: Make it possible for RomFS to use MTD devices directly")
+Signed-off-by: Jann Horn <jannh@google.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Reviewed-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Cc: David Howells <dhowells@redhat.com>
+Cc: <stable@vger.kernel.org>
+Link: http://lkml.kernel.org/r/20200818013202.2246365-1-jannh@google.com
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/romfs/storage.c |    4 +---
+ 1 file changed, 1 insertion(+), 3 deletions(-)
+
+--- a/fs/romfs/storage.c
++++ b/fs/romfs/storage.c
+@@ -221,10 +221,8 @@ int romfs_dev_read(struct super_block *s
+       size_t limit;
+       limit = romfs_maxsize(sb);
+-      if (pos >= limit)
++      if (pos >= limit || buflen > limit - pos)
+               return -EIO;
+-      if (buflen > limit - pos)
+-              buflen = limit - pos;
+ #ifdef CONFIG_ROMFS_ON_MTD
+       if (sb->s_mtd)
index 58da9b584cda646e8c9999c85041229dc11fdead..3a148c1e5dc1bc56ff7691b863b450074982e742 100644 (file)
@@ -8,3 +8,8 @@ khugepaged-khugepaged_test_exit-check-mmget_still_va.patch
 khugepaged-adjust-vm_bug_on_mm-in-__khugepaged_enter.patch
 btrfs-export-helpers-for-subvolume-name-id-resolutio.patch
 btrfs-don-t-show-full-path-of-bind-mounts-in-subvol.patch
+romfs-fix-uninitialized-memory-leak-in-romfs_dev_read.patch
+kernel-relay.c-fix-memleak-on-destroy-relay-channel.patch
+mm-include-cma-pages-in-lowmem_reserve-at-boot.patch
+mm-page_alloc-fix-core-hung-in-free_pcppages_bulk.patch
+jbd2-add-the-missing-unlock_buffer-in-the-error-path-of-jbd2_write_superblock.patch