--- /dev/null
+From 673bdf8ce0a387ef585c13b69a2676096c6edfe9 Mon Sep 17 00:00:00 2001
+From: Arnd Bergmann <arnd@arndb.de>
+Date: Fri, 29 Nov 2019 11:28:22 +0100
+Subject: compat_ioctl: block: handle BLKREPORTZONE/BLKRESETZONE
+
+From: Arnd Bergmann <arnd@arndb.de>
+
+commit 673bdf8ce0a387ef585c13b69a2676096c6edfe9 upstream.
+
+These were added to blkdev_ioctl() but not blkdev_compat_ioctl,
+so add them now.
+
+Cc: <stable@vger.kernel.org> # v4.10+
+Fixes: 3ed05a987e0f ("blk-zoned: implement ioctls")
+Reviewed-by: Damien Le Moal <damien.lemoal@wdc.com>
+Signed-off-by: Arnd Bergmann <arnd@arndb.de>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ block/compat_ioctl.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/block/compat_ioctl.c
++++ b/block/compat_ioctl.c
+@@ -355,6 +355,8 @@ long compat_blkdev_ioctl(struct file *fi
+ * but we call blkdev_ioctl, which gets the lock for us
+ */
+ case BLKRRPART:
++ case BLKREPORTZONE:
++ case BLKRESETZONE:
+ return blkdev_ioctl(bdev, mode, cmd,
+ (unsigned long)compat_ptr(arg));
+ case BLKBSZSET_32:
--- /dev/null
+From b2c0fcd28772f99236d261509bcd242135677965 Mon Sep 17 00:00:00 2001
+From: Arnd Bergmann <arnd@arndb.de>
+Date: Fri, 29 Nov 2019 11:28:22 +0100
+Subject: compat_ioctl: block: handle Persistent Reservations
+
+From: Arnd Bergmann <arnd@arndb.de>
+
+commit b2c0fcd28772f99236d261509bcd242135677965 upstream.
+
+These were added to blkdev_ioctl() in linux-5.5 but not
+blkdev_compat_ioctl, so add them now.
+
+Cc: <stable@vger.kernel.org> # v4.4+
+Fixes: bbd3e064362e ("block: add an API for Persistent Reservations")
+Signed-off-by: Arnd Bergmann <arnd@arndb.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+Fold in followup patch from Arnd with missing pr.h header include.
+
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+
+---
+ block/compat_ioctl.c | 9 +++++++++
+ 1 file changed, 9 insertions(+)
+
+--- a/block/compat_ioctl.c
++++ b/block/compat_ioctl.c
+@@ -6,6 +6,7 @@
+ #include <linux/compat.h>
+ #include <linux/elevator.h>
+ #include <linux/hdreg.h>
++#include <linux/pr.h>
+ #include <linux/slab.h>
+ #include <linux/syscalls.h>
+ #include <linux/types.h>
+@@ -401,6 +402,14 @@ long compat_blkdev_ioctl(struct file *fi
+ case BLKTRACETEARDOWN: /* compatible */
+ ret = blk_trace_ioctl(bdev, cmd, compat_ptr(arg));
+ return ret;
++ case IOC_PR_REGISTER:
++ case IOC_PR_RESERVE:
++ case IOC_PR_RELEASE:
++ case IOC_PR_PREEMPT:
++ case IOC_PR_PREEMPT_ABORT:
++ case IOC_PR_CLEAR:
++ return blkdev_ioctl(bdev, mode, cmd,
++ (unsigned long)compat_ptr(arg));
+ default:
+ if (disk->fops->compat_ioctl)
+ ret = disk->fops->compat_ioctl(bdev, mode, cmd, arg);
--- /dev/null
+From 53a256a9b925b47c7e67fc1f16ca41561a7b877c Mon Sep 17 00:00:00 2001
+From: Lukas Wunner <lukas@wunner.de>
+Date: Thu, 5 Dec 2019 12:54:49 +0100
+Subject: dmaengine: Fix access to uninitialized dma_slave_caps
+
+From: Lukas Wunner <lukas@wunner.de>
+
+commit 53a256a9b925b47c7e67fc1f16ca41561a7b877c upstream.
+
+dmaengine_desc_set_reuse() allocates a struct dma_slave_caps on the
+stack, populates it using dma_get_slave_caps() and then accesses one
+of its members.
+
+However dma_get_slave_caps() may fail and this isn't accounted for,
+leading to a legitimate warning of gcc-4.9 (but not newer versions):
+
+ In file included from drivers/spi/spi-bcm2835.c:19:0:
+ drivers/spi/spi-bcm2835.c: In function 'dmaengine_desc_set_reuse':
+>> include/linux/dmaengine.h:1370:10: warning: 'caps.descriptor_reuse' is used uninitialized in this function [-Wuninitialized]
+ if (caps.descriptor_reuse) {
+
+Fix it, thereby also silencing the gcc-4.9 warning.
+
+The issue has been present for 4 years but surfaces only now that
+the first caller of dmaengine_desc_set_reuse() has been added in
+spi-bcm2835.c. Another user of reusable DMA descriptors has existed
+for a while in pxa_camera.c, but it sets the DMA_CTRL_REUSE flag
+directly instead of calling dmaengine_desc_set_reuse(). Nevertheless,
+tag this commit for stable in case there are out-of-tree users.
+
+Fixes: 272420214d26 ("dmaengine: Add DMA_CTRL_REUSE")
+Reported-by: kbuild test robot <lkp@intel.com>
+Signed-off-by: Lukas Wunner <lukas@wunner.de>
+Cc: stable@vger.kernel.org # v4.3+
+Link: https://lore.kernel.org/r/ca92998ccc054b4f2bfd60ef3adbab2913171eac.1575546234.git.lukas@wunner.de
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ include/linux/dmaengine.h | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+--- a/include/linux/dmaengine.h
++++ b/include/linux/dmaengine.h
+@@ -1373,8 +1373,11 @@ static inline int dma_get_slave_caps(str
+ static inline int dmaengine_desc_set_reuse(struct dma_async_tx_descriptor *tx)
+ {
+ struct dma_slave_caps caps;
++ int ret;
+
+- dma_get_slave_caps(tx->chan, &caps);
++ ret = dma_get_slave_caps(tx->chan, &caps);
++ if (ret)
++ return ret;
+
+ if (caps.descriptor_reuse) {
+ tx->flags |= DMA_CTRL_REUSE;
--- /dev/null
+From 98ca480a8f22fdbd768e3dad07024c8d4856576c Mon Sep 17 00:00:00 2001
+From: Amir Goldstein <amir73il@gmail.com>
+Date: Sun, 22 Dec 2019 20:45:28 +0200
+Subject: locks: print unsigned ino in /proc/locks
+
+From: Amir Goldstein <amir73il@gmail.com>
+
+commit 98ca480a8f22fdbd768e3dad07024c8d4856576c upstream.
+
+An ino is unsigned, so display it as such in /proc/locks.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Amir Goldstein <amir73il@gmail.com>
+Signed-off-by: Jeff Layton <jlayton@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/locks.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/fs/locks.c
++++ b/fs/locks.c
+@@ -2678,7 +2678,7 @@ static void lock_get_status(struct seq_f
+ }
+ if (inode) {
+ /* userspace relies on this representation of dev_t */
+- seq_printf(f, "%d %02x:%02x:%ld ", fl_pid,
++ seq_printf(f, "%d %02x:%02x:%lu ", fl_pid,
+ MAJOR(inode->i_sb->s_dev),
+ MINOR(inode->i_sb->s_dev), inode->i_ino);
+ } else {
--- /dev/null
+From 84029fd04c201a4c7e0b07ba262664900f47c6f5 Mon Sep 17 00:00:00 2001
+From: Shakeel Butt <shakeelb@google.com>
+Date: Sat, 4 Jan 2020 12:59:43 -0800
+Subject: memcg: account security cred as well to kmemcg
+
+From: Shakeel Butt <shakeelb@google.com>
+
+commit 84029fd04c201a4c7e0b07ba262664900f47c6f5 upstream.
+
+The cred_jar kmem_cache is already memcg accounted in the current kernel
+but cred->security is not. Account cred->security to kmemcg.
+
+Recently we saw high root slab usage on our production and on further
+inspection, we found a buggy application leaking processes. Though that
+buggy application was contained within its memcg but we observe much
+more system memory overhead, couple of GiBs, during that period. This
+overhead can adversely impact the isolation on the system.
+
+One source of high overhead we found was cred->security objects, which
+have a lifetime of at least the life of the process which allocated
+them.
+
+Link: http://lkml.kernel.org/r/20191205223721.40034-1-shakeelb@google.com
+Signed-off-by: Shakeel Butt <shakeelb@google.com>
+Acked-by: Chris Down <chris@chrisdown.name>
+Reviewed-by: Roman Gushchin <guro@fb.com>
+Acked-by: Michal Hocko <mhocko@suse.com>
+Cc: Johannes Weiner <hannes@cmpxchg.org>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/cred.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/kernel/cred.c
++++ b/kernel/cred.c
+@@ -220,7 +220,7 @@ struct cred *cred_alloc_blank(void)
+ new->magic = CRED_MAGIC;
+ #endif
+
+- if (security_cred_alloc_blank(new, GFP_KERNEL) < 0)
++ if (security_cred_alloc_blank(new, GFP_KERNEL_ACCOUNT) < 0)
+ goto error;
+
+ return new;
+@@ -279,7 +279,7 @@ struct cred *prepare_creds(void)
+ new->security = NULL;
+ #endif
+
+- if (security_prepare_creds(new, old, GFP_KERNEL) < 0)
++ if (security_prepare_creds(new, old, GFP_KERNEL_ACCOUNT) < 0)
+ goto error;
+ validate_creds(new);
+ return new;
+@@ -654,7 +654,7 @@ struct cred *prepare_kernel_cred(struct
+ #ifdef CONFIG_SECURITY
+ new->security = NULL;
+ #endif
+- if (security_prepare_creds(new, old, GFP_KERNEL) < 0)
++ if (security_prepare_creds(new, old, GFP_KERNEL_ACCOUNT) < 0)
+ goto error;
+
+ put_cred(old);
--- /dev/null
+From e0153fc2c7606f101392b682e720a7a456d6c766 Mon Sep 17 00:00:00 2001
+From: Yang Shi <yang.shi@linux.alibaba.com>
+Date: Sat, 4 Jan 2020 12:59:46 -0800
+Subject: mm: move_pages: return valid node id in status if the page is already on the target node
+
+From: Yang Shi <yang.shi@linux.alibaba.com>
+
+commit e0153fc2c7606f101392b682e720a7a456d6c766 upstream.
+
+Felix Abecassis reports move_pages() would return random status if the
+pages are already on the target node by the below test program:
+
+ int main(void)
+ {
+ const long node_id = 1;
+ const long page_size = sysconf(_SC_PAGESIZE);
+ const int64_t num_pages = 8;
+
+ unsigned long nodemask = 1 << node_id;
+ long ret = set_mempolicy(MPOL_BIND, &nodemask, sizeof(nodemask));
+ if (ret < 0)
+ return (EXIT_FAILURE);
+
+ void **pages = malloc(sizeof(void*) * num_pages);
+ for (int i = 0; i < num_pages; ++i) {
+ pages[i] = mmap(NULL, page_size, PROT_WRITE | PROT_READ,
+ MAP_PRIVATE | MAP_POPULATE | MAP_ANONYMOUS,
+ -1, 0);
+ if (pages[i] == MAP_FAILED)
+ return (EXIT_FAILURE);
+ }
+
+ ret = set_mempolicy(MPOL_DEFAULT, NULL, 0);
+ if (ret < 0)
+ return (EXIT_FAILURE);
+
+ int *nodes = malloc(sizeof(int) * num_pages);
+ int *status = malloc(sizeof(int) * num_pages);
+ for (int i = 0; i < num_pages; ++i) {
+ nodes[i] = node_id;
+ status[i] = 0xd0; /* simulate garbage values */
+ }
+
+ ret = move_pages(0, num_pages, pages, nodes, status, MPOL_MF_MOVE);
+ printf("move_pages: %ld\n", ret);
+ for (int i = 0; i < num_pages; ++i)
+ printf("status[%d] = %d\n", i, status[i]);
+ }
+
+Then running the program would return nonsense status values:
+
+ $ ./move_pages_bug
+ move_pages: 0
+ status[0] = 208
+ status[1] = 208
+ status[2] = 208
+ status[3] = 208
+ status[4] = 208
+ status[5] = 208
+ status[6] = 208
+ status[7] = 208
+
+This is because the status is not set if the page is already on the
+target node, but move_pages() should return valid status as long as it
+succeeds. The valid status may be errno or node id.
+
+We can't simply initialize status array to zero since the pages may be
+not on node 0. Fix it by updating status with node id which the page is
+already on.
+
+Link: http://lkml.kernel.org/r/1575584353-125392-1-git-send-email-yang.shi@linux.alibaba.com
+Fixes: a49bd4d71637 ("mm, numa: rework do_pages_move")
+Signed-off-by: Yang Shi <yang.shi@linux.alibaba.com>
+Reported-by: Felix Abecassis <fabecassis@nvidia.com>
+Tested-by: Felix Abecassis <fabecassis@nvidia.com>
+Suggested-by: Michal Hocko <mhocko@suse.com>
+Reviewed-by: John Hubbard <jhubbard@nvidia.com>
+Acked-by: Christoph Lameter <cl@linux.com>
+Acked-by: Michal Hocko <mhocko@suse.com>
+Reviewed-by: Vlastimil Babka <vbabka@suse.cz>
+Cc: Mel Gorman <mgorman@techsingularity.net>
+Cc: <stable@vger.kernel.org> [4.17+]
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ mm/migrate.c | 23 +++++++++++++++++------
+ 1 file changed, 17 insertions(+), 6 deletions(-)
+
+--- a/mm/migrate.c
++++ b/mm/migrate.c
+@@ -1508,9 +1508,11 @@ static int do_move_pages_to_node(struct
+ /*
+ * Resolves the given address to a struct page, isolates it from the LRU and
+ * puts it to the given pagelist.
+- * Returns -errno if the page cannot be found/isolated or 0 when it has been
+- * queued or the page doesn't need to be migrated because it is already on
+- * the target node
++ * Returns:
++ * errno - if the page cannot be found/isolated
++ * 0 - when it doesn't have to be migrated because it is already on the
++ * target node
++ * 1 - when it has been queued
+ */
+ static int add_page_for_migration(struct mm_struct *mm, unsigned long addr,
+ int node, struct list_head *pagelist, bool migrate_all)
+@@ -1549,7 +1551,7 @@ static int add_page_for_migration(struct
+ if (PageHuge(page)) {
+ if (PageHead(page)) {
+ isolate_huge_page(page, pagelist);
+- err = 0;
++ err = 1;
+ }
+ } else {
+ struct page *head;
+@@ -1559,7 +1561,7 @@ static int add_page_for_migration(struct
+ if (err)
+ goto out_putpage;
+
+- err = 0;
++ err = 1;
+ list_add_tail(&head->lru, pagelist);
+ mod_node_page_state(page_pgdat(head),
+ NR_ISOLATED_ANON + page_is_file_cache(head),
+@@ -1636,8 +1638,17 @@ static int do_pages_move(struct mm_struc
+ */
+ err = add_page_for_migration(mm, addr, current_node,
+ &pagelist, flags & MPOL_MF_MOVE_ALL);
+- if (!err)
++
++ if (!err) {
++ /* The page is already on the target node */
++ err = store_status(status, i, current_node, 1);
++ if (err)
++ goto out_flush;
+ continue;
++ } else if (err > 0) {
++ /* The page is successfully queued for migration */
++ continue;
++ }
+
+ err = store_status(status, i, err, 1);
+ if (err)
--- /dev/null
+From ac8f05da5174c560de122c499ce5dfb5d0dfbee5 Mon Sep 17 00:00:00 2001
+From: Chanho Min <chanho.min@lge.com>
+Date: Sat, 4 Jan 2020 12:59:36 -0800
+Subject: mm/zsmalloc.c: fix the migrated zspage statistics.
+
+From: Chanho Min <chanho.min@lge.com>
+
+commit ac8f05da5174c560de122c499ce5dfb5d0dfbee5 upstream.
+
+When zspage is migrated to the other zone, the zone page state should be
+updated as well, otherwise the NR_ZSPAGE for each zone shows wrong
+counts including proc/zoneinfo in practice.
+
+Link: http://lkml.kernel.org/r/1575434841-48009-1-git-send-email-chanho.min@lge.com
+Fixes: 91537fee0013 ("mm: add NR_ZSMALLOC to vmstat")
+Signed-off-by: Chanho Min <chanho.min@lge.com>
+Signed-off-by: Jinsuk Choi <jjinsuk.choi@lge.com>
+Reviewed-by: Sergey Senozhatsky <sergey.senozhatsky@gmail.com>
+Acked-by: Minchan Kim <minchan@kernel.org>
+Cc: <stable@vger.kernel.org> [4.9+]
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ mm/zsmalloc.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+--- a/mm/zsmalloc.c
++++ b/mm/zsmalloc.c
+@@ -2092,6 +2092,11 @@ static int zs_page_migrate(struct addres
+ zs_pool_dec_isolated(pool);
+ }
+
++ if (page_zone(newpage) != page_zone(page)) {
++ dec_zone_page_state(page, NR_ZSPAGES);
++ inc_zone_page_state(newpage, NR_ZSPAGES);
++ }
++
+ reset_page(page);
+ put_page(page);
+ page = newpage;
--- /dev/null
+From 9e5f1c19800b808a37fb9815a26d382132c26c3d Mon Sep 17 00:00:00 2001
+From: Aleksandr Yashkin <a.yashkin@inango-systems.com>
+Date: Mon, 23 Dec 2019 18:38:16 +0500
+Subject: pstore/ram: Write new dumps to start of recycled zones
+
+From: Aleksandr Yashkin <a.yashkin@inango-systems.com>
+
+commit 9e5f1c19800b808a37fb9815a26d382132c26c3d upstream.
+
+The ram_core.c routines treat przs as circular buffers. When writing a
+new crash dump, the old buffer needs to be cleared so that the new dump
+doesn't end up in the wrong place (i.e. at the end).
+
+The solution to this problem is to reset the circular buffer state before
+writing a new Oops dump.
+
+Signed-off-by: Aleksandr Yashkin <a.yashkin@inango-systems.com>
+Signed-off-by: Nikolay Merinov <n.merinov@inango-systems.com>
+Signed-off-by: Ariel Gilman <a.gilman@inango-systems.com>
+Link: https://lore.kernel.org/r/20191223133816.28155-1-n.merinov@inango-systems.com
+Fixes: 896fc1f0c4c6 ("pstore/ram: Switch to persistent_ram routines")
+Cc: stable@vger.kernel.org
+Signed-off-by: Kees Cook <keescook@chromium.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/pstore/ram.c | 11 +++++++++++
+ 1 file changed, 11 insertions(+)
+
+--- a/fs/pstore/ram.c
++++ b/fs/pstore/ram.c
+@@ -437,6 +437,17 @@ static int notrace ramoops_pstore_write(
+
+ prz = cxt->dprzs[cxt->dump_write_cnt];
+
++ /*
++ * Since this is a new crash dump, we need to reset the buffer in
++ * case it still has an old dump present. Without this, the new dump
++ * will get appended, which would seriously confuse anything trying
++ * to check dump file contents. Specifically, ramoops_read_kmsg_hdr()
++ * expects to find a dump header in the beginning of buffer data, so
++ * we must to reset the buffer values, in order to ensure that the
++ * header will be written to the beginning of the buffer.
++ */
++ persistent_ram_zap(prz);
++
+ /* Build header and append record contents. */
+ hlen = ramoops_write_kmsg_hdr(prz, record);
+ size = record->size;
media-cec-cec-2.0-only-bcast-messages-were-ignored.patch
media-cec-avoid-decrementing-transmit_queue_sz-if-it-is-0.patch
media-cec-check-transmit_in_progress-not-transmitting.patch
+mm-zsmalloc.c-fix-the-migrated-zspage-statistics.patch
+memcg-account-security-cred-as-well-to-kmemcg.patch
+mm-move_pages-return-valid-node-id-in-status-if-the-page-is-already-on-the-target-node.patch
+pstore-ram-write-new-dumps-to-start-of-recycled-zones.patch
+locks-print-unsigned-ino-in-proc-locks.patch
+dmaengine-fix-access-to-uninitialized-dma_slave_caps.patch
+compat_ioctl-block-handle-persistent-reservations.patch
+compat_ioctl-block-handle-blkreportzone-blkresetzone.patch