--- /dev/null
+From 0f394daef89b38d58c91118a2b08b8a1b316703b Mon Sep 17 00:00:00 2001
+From: Kevin Hao <haokexin@gmail.com>
+Date: Mon, 20 Jan 2020 12:35:47 +0800
+Subject: irqdomain: Fix a memory leak in irq_domain_push_irq()
+
+From: Kevin Hao <haokexin@gmail.com>
+
+commit 0f394daef89b38d58c91118a2b08b8a1b316703b upstream.
+
+Fix a memory leak reported by kmemleak:
+unreferenced object 0xffff000bc6f50e80 (size 128):
+ comm "kworker/23:2", pid 201, jiffies 4294894947 (age 942.132s)
+ hex dump (first 32 bytes):
+ 00 00 00 00 41 00 00 00 86 c0 03 00 00 00 00 00 ....A...........
+ 00 a0 b2 c6 0b 00 ff ff 40 51 fd 10 00 80 ff ff ........@Q......
+ backtrace:
+ [<00000000e62d2240>] kmem_cache_alloc_trace+0x1a4/0x320
+ [<00000000279143c9>] irq_domain_push_irq+0x7c/0x188
+ [<00000000d9f4c154>] thunderx_gpio_probe+0x3ac/0x438
+ [<00000000fd09ec22>] pci_device_probe+0xe4/0x198
+ [<00000000d43eca75>] really_probe+0xdc/0x320
+ [<00000000d3ebab09>] driver_probe_device+0x5c/0xf0
+ [<000000005b3ecaa0>] __device_attach_driver+0x88/0xc0
+ [<000000004e5915f5>] bus_for_each_drv+0x7c/0xc8
+ [<0000000079d4db41>] __device_attach+0xe4/0x140
+ [<00000000883bbda9>] device_initial_probe+0x18/0x20
+ [<000000003be59ef6>] bus_probe_device+0x98/0xa0
+ [<0000000039b03d3f>] deferred_probe_work_func+0x74/0xa8
+ [<00000000870934ce>] process_one_work+0x1c8/0x470
+ [<00000000e3cce570>] worker_thread+0x1f8/0x428
+ [<000000005d64975e>] kthread+0xfc/0x128
+ [<00000000f0eaa764>] ret_from_fork+0x10/0x18
+
+Fixes: 495c38d3001f ("irqdomain: Add irq_domain_{push,pop}_irq() functions")
+Signed-off-by: Kevin Hao <haokexin@gmail.com>
+Signed-off-by: Marc Zyngier <maz@kernel.org>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/20200120043547.22271-1-haokexin@gmail.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/irq/irqdomain.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/kernel/irq/irqdomain.c
++++ b/kernel/irq/irqdomain.c
+@@ -1459,6 +1459,7 @@ int irq_domain_push_irq(struct irq_domai
+ if (rv) {
+ /* Restore the original irq_data. */
+ *root_irq_data = *child_irq_data;
++ kfree(child_irq_data);
+ goto error;
+ }
+
--- /dev/null
+From 3e21d9a501bf99aee2e5835d7f34d8c823f115b5 Mon Sep 17 00:00:00 2001
+From: "Gustavo A. R. Silva" <gustavo@embeddedor.com>
+Date: Thu, 30 Jan 2020 22:13:51 -0800
+Subject: lib/test_kasan.c: fix memory leak in kmalloc_oob_krealloc_more()
+
+From: Gustavo A. R. Silva <gustavo@embeddedor.com>
+
+commit 3e21d9a501bf99aee2e5835d7f34d8c823f115b5 upstream.
+
+In case memory resources for _ptr2_ were allocated, release them before
+return.
+
+Notice that in case _ptr1_ happens to be NULL, krealloc() behaves
+exactly like kmalloc().
+
+Addresses-Coverity-ID: 1490594 ("Resource leak")
+Link: http://lkml.kernel.org/r/20200123160115.GA4202@embeddedor
+Fixes: 3f15801cdc23 ("lib: add kasan test module")
+Signed-off-by: Gustavo A. R. Silva <gustavo@embeddedor.com>
+Reviewed-by: Dmitry Vyukov <dvyukov@google.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ lib/test_kasan.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/lib/test_kasan.c
++++ b/lib/test_kasan.c
+@@ -157,6 +157,7 @@ static noinline void __init kmalloc_oob_
+ if (!ptr1 || !ptr2) {
+ pr_err("Allocation failed\n");
+ kfree(ptr1);
++ kfree(ptr2);
+ return;
+ }
+
--- /dev/null
+From 4a873f3fa5d6ca52e446d306dd7194dd86a09422 Mon Sep 17 00:00:00 2001
+From: Arnd Bergmann <arnd@arndb.de>
+Date: Mon, 16 Dec 2019 15:15:01 +0100
+Subject: media: v4l2-core: compat: ignore native command codes
+
+From: Arnd Bergmann <arnd@arndb.de>
+
+commit 4a873f3fa5d6ca52e446d306dd7194dd86a09422 upstream.
+
+The do_video_ioctl() compat handler converts the compat command
+codes into the native ones before processing further, but this
+causes problems for 32-bit user applications that pass a command
+code that matches a 64-bit native number, which will then be
+handled the same way.
+
+Specifically, this breaks VIDIOC_DQEVENT_TIME from user space
+applications with 64-bit time_t, as the structure layout is
+the same as the native 64-bit layout on many architectures
+(x86 being the notable exception).
+
+Change the handler to use the converted command code only for
+passing into the native ioctl handler, not for deciding on the
+conversion, in order to make the compat behavior match the
+native behavior.
+
+Actual support for the 64-bit time_t version of VIDIOC_DQEVENT_TIME
+and other commands still needs to be added in a separate patch.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Arnd Bergmann <arnd@arndb.de>
+Signed-off-by: Hans Verkuil <hverkuil-cisco@xs4all.nl>
+Signed-off-by: Mauro Carvalho Chehab <mchehab+huawei@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/media/v4l2-core/v4l2-compat-ioctl32.c | 148 +++++++++++++-------------
+ 1 file changed, 75 insertions(+), 73 deletions(-)
+
+--- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
++++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
+@@ -1183,36 +1183,38 @@ static long do_video_ioctl(struct file *
+ u32 aux_space;
+ int compatible_arg = 1;
+ long err = 0;
++ unsigned int ncmd;
+
+ /*
+ * 1. When struct size is different, converts the command.
+ */
+ switch (cmd) {
+- case VIDIOC_G_FMT32: cmd = VIDIOC_G_FMT; break;
+- case VIDIOC_S_FMT32: cmd = VIDIOC_S_FMT; break;
+- case VIDIOC_QUERYBUF32: cmd = VIDIOC_QUERYBUF; break;
+- case VIDIOC_G_FBUF32: cmd = VIDIOC_G_FBUF; break;
+- case VIDIOC_S_FBUF32: cmd = VIDIOC_S_FBUF; break;
+- case VIDIOC_QBUF32: cmd = VIDIOC_QBUF; break;
+- case VIDIOC_DQBUF32: cmd = VIDIOC_DQBUF; break;
+- case VIDIOC_ENUMSTD32: cmd = VIDIOC_ENUMSTD; break;
+- case VIDIOC_ENUMINPUT32: cmd = VIDIOC_ENUMINPUT; break;
+- case VIDIOC_TRY_FMT32: cmd = VIDIOC_TRY_FMT; break;
+- case VIDIOC_G_EXT_CTRLS32: cmd = VIDIOC_G_EXT_CTRLS; break;
+- case VIDIOC_S_EXT_CTRLS32: cmd = VIDIOC_S_EXT_CTRLS; break;
+- case VIDIOC_TRY_EXT_CTRLS32: cmd = VIDIOC_TRY_EXT_CTRLS; break;
+- case VIDIOC_DQEVENT32: cmd = VIDIOC_DQEVENT; break;
+- case VIDIOC_OVERLAY32: cmd = VIDIOC_OVERLAY; break;
+- case VIDIOC_STREAMON32: cmd = VIDIOC_STREAMON; break;
+- case VIDIOC_STREAMOFF32: cmd = VIDIOC_STREAMOFF; break;
+- case VIDIOC_G_INPUT32: cmd = VIDIOC_G_INPUT; break;
+- case VIDIOC_S_INPUT32: cmd = VIDIOC_S_INPUT; break;
+- case VIDIOC_G_OUTPUT32: cmd = VIDIOC_G_OUTPUT; break;
+- case VIDIOC_S_OUTPUT32: cmd = VIDIOC_S_OUTPUT; break;
+- case VIDIOC_CREATE_BUFS32: cmd = VIDIOC_CREATE_BUFS; break;
+- case VIDIOC_PREPARE_BUF32: cmd = VIDIOC_PREPARE_BUF; break;
+- case VIDIOC_G_EDID32: cmd = VIDIOC_G_EDID; break;
+- case VIDIOC_S_EDID32: cmd = VIDIOC_S_EDID; break;
++ case VIDIOC_G_FMT32: ncmd = VIDIOC_G_FMT; break;
++ case VIDIOC_S_FMT32: ncmd = VIDIOC_S_FMT; break;
++ case VIDIOC_QUERYBUF32: ncmd = VIDIOC_QUERYBUF; break;
++ case VIDIOC_G_FBUF32: ncmd = VIDIOC_G_FBUF; break;
++ case VIDIOC_S_FBUF32: ncmd = VIDIOC_S_FBUF; break;
++ case VIDIOC_QBUF32: ncmd = VIDIOC_QBUF; break;
++ case VIDIOC_DQBUF32: ncmd = VIDIOC_DQBUF; break;
++ case VIDIOC_ENUMSTD32: ncmd = VIDIOC_ENUMSTD; break;
++ case VIDIOC_ENUMINPUT32: ncmd = VIDIOC_ENUMINPUT; break;
++ case VIDIOC_TRY_FMT32: ncmd = VIDIOC_TRY_FMT; break;
++ case VIDIOC_G_EXT_CTRLS32: ncmd = VIDIOC_G_EXT_CTRLS; break;
++ case VIDIOC_S_EXT_CTRLS32: ncmd = VIDIOC_S_EXT_CTRLS; break;
++ case VIDIOC_TRY_EXT_CTRLS32: ncmd = VIDIOC_TRY_EXT_CTRLS; break;
++ case VIDIOC_DQEVENT32: ncmd = VIDIOC_DQEVENT; break;
++ case VIDIOC_OVERLAY32: ncmd = VIDIOC_OVERLAY; break;
++ case VIDIOC_STREAMON32: ncmd = VIDIOC_STREAMON; break;
++ case VIDIOC_STREAMOFF32: ncmd = VIDIOC_STREAMOFF; break;
++ case VIDIOC_G_INPUT32: ncmd = VIDIOC_G_INPUT; break;
++ case VIDIOC_S_INPUT32: ncmd = VIDIOC_S_INPUT; break;
++ case VIDIOC_G_OUTPUT32: ncmd = VIDIOC_G_OUTPUT; break;
++ case VIDIOC_S_OUTPUT32: ncmd = VIDIOC_S_OUTPUT; break;
++ case VIDIOC_CREATE_BUFS32: ncmd = VIDIOC_CREATE_BUFS; break;
++ case VIDIOC_PREPARE_BUF32: ncmd = VIDIOC_PREPARE_BUF; break;
++ case VIDIOC_G_EDID32: ncmd = VIDIOC_G_EDID; break;
++ case VIDIOC_S_EDID32: ncmd = VIDIOC_S_EDID; break;
++ default: ncmd = cmd; break;
+ }
+
+ /*
+@@ -1221,11 +1223,11 @@ static long do_video_ioctl(struct file *
+ * argument into it.
+ */
+ switch (cmd) {
+- case VIDIOC_OVERLAY:
+- case VIDIOC_STREAMON:
+- case VIDIOC_STREAMOFF:
+- case VIDIOC_S_INPUT:
+- case VIDIOC_S_OUTPUT:
++ case VIDIOC_OVERLAY32:
++ case VIDIOC_STREAMON32:
++ case VIDIOC_STREAMOFF32:
++ case VIDIOC_S_INPUT32:
++ case VIDIOC_S_OUTPUT32:
+ err = alloc_userspace(sizeof(unsigned int), 0, &new_p64);
+ if (!err && assign_in_user((unsigned int __user *)new_p64,
+ (compat_uint_t __user *)p32))
+@@ -1233,23 +1235,23 @@ static long do_video_ioctl(struct file *
+ compatible_arg = 0;
+ break;
+
+- case VIDIOC_G_INPUT:
+- case VIDIOC_G_OUTPUT:
++ case VIDIOC_G_INPUT32:
++ case VIDIOC_G_OUTPUT32:
+ err = alloc_userspace(sizeof(unsigned int), 0, &new_p64);
+ compatible_arg = 0;
+ break;
+
+- case VIDIOC_G_EDID:
+- case VIDIOC_S_EDID:
++ case VIDIOC_G_EDID32:
++ case VIDIOC_S_EDID32:
+ err = alloc_userspace(sizeof(struct v4l2_edid), 0, &new_p64);
+ if (!err)
+ err = get_v4l2_edid32(new_p64, p32);
+ compatible_arg = 0;
+ break;
+
+- case VIDIOC_G_FMT:
+- case VIDIOC_S_FMT:
+- case VIDIOC_TRY_FMT:
++ case VIDIOC_G_FMT32:
++ case VIDIOC_S_FMT32:
++ case VIDIOC_TRY_FMT32:
+ err = bufsize_v4l2_format(p32, &aux_space);
+ if (!err)
+ err = alloc_userspace(sizeof(struct v4l2_format),
+@@ -1262,7 +1264,7 @@ static long do_video_ioctl(struct file *
+ compatible_arg = 0;
+ break;
+
+- case VIDIOC_CREATE_BUFS:
++ case VIDIOC_CREATE_BUFS32:
+ err = bufsize_v4l2_create(p32, &aux_space);
+ if (!err)
+ err = alloc_userspace(sizeof(struct v4l2_create_buffers),
+@@ -1275,10 +1277,10 @@ static long do_video_ioctl(struct file *
+ compatible_arg = 0;
+ break;
+
+- case VIDIOC_PREPARE_BUF:
+- case VIDIOC_QUERYBUF:
+- case VIDIOC_QBUF:
+- case VIDIOC_DQBUF:
++ case VIDIOC_PREPARE_BUF32:
++ case VIDIOC_QUERYBUF32:
++ case VIDIOC_QBUF32:
++ case VIDIOC_DQBUF32:
+ err = bufsize_v4l2_buffer(p32, &aux_space);
+ if (!err)
+ err = alloc_userspace(sizeof(struct v4l2_buffer),
+@@ -1291,7 +1293,7 @@ static long do_video_ioctl(struct file *
+ compatible_arg = 0;
+ break;
+
+- case VIDIOC_S_FBUF:
++ case VIDIOC_S_FBUF32:
+ err = alloc_userspace(sizeof(struct v4l2_framebuffer), 0,
+ &new_p64);
+ if (!err)
+@@ -1299,13 +1301,13 @@ static long do_video_ioctl(struct file *
+ compatible_arg = 0;
+ break;
+
+- case VIDIOC_G_FBUF:
++ case VIDIOC_G_FBUF32:
+ err = alloc_userspace(sizeof(struct v4l2_framebuffer), 0,
+ &new_p64);
+ compatible_arg = 0;
+ break;
+
+- case VIDIOC_ENUMSTD:
++ case VIDIOC_ENUMSTD32:
+ err = alloc_userspace(sizeof(struct v4l2_standard), 0,
+ &new_p64);
+ if (!err)
+@@ -1313,16 +1315,16 @@ static long do_video_ioctl(struct file *
+ compatible_arg = 0;
+ break;
+
+- case VIDIOC_ENUMINPUT:
++ case VIDIOC_ENUMINPUT32:
+ err = alloc_userspace(sizeof(struct v4l2_input), 0, &new_p64);
+ if (!err)
+ err = get_v4l2_input32(new_p64, p32);
+ compatible_arg = 0;
+ break;
+
+- case VIDIOC_G_EXT_CTRLS:
+- case VIDIOC_S_EXT_CTRLS:
+- case VIDIOC_TRY_EXT_CTRLS:
++ case VIDIOC_G_EXT_CTRLS32:
++ case VIDIOC_S_EXT_CTRLS32:
++ case VIDIOC_TRY_EXT_CTRLS32:
+ err = bufsize_v4l2_ext_controls(p32, &aux_space);
+ if (!err)
+ err = alloc_userspace(sizeof(struct v4l2_ext_controls),
+@@ -1334,7 +1336,7 @@ static long do_video_ioctl(struct file *
+ }
+ compatible_arg = 0;
+ break;
+- case VIDIOC_DQEVENT:
++ case VIDIOC_DQEVENT32:
+ err = alloc_userspace(sizeof(struct v4l2_event), 0, &new_p64);
+ compatible_arg = 0;
+ break;
+@@ -1352,9 +1354,9 @@ static long do_video_ioctl(struct file *
+ * Otherwise, it will pass the newly allocated @new_p64 argument.
+ */
+ if (compatible_arg)
+- err = native_ioctl(file, cmd, (unsigned long)p32);
++ err = native_ioctl(file, ncmd, (unsigned long)p32);
+ else
+- err = native_ioctl(file, cmd, (unsigned long)new_p64);
++ err = native_ioctl(file, ncmd, (unsigned long)new_p64);
+
+ if (err == -ENOTTY)
+ return err;
+@@ -1370,13 +1372,13 @@ static long do_video_ioctl(struct file *
+ * the blocks to maximum allowed value.
+ */
+ switch (cmd) {
+- case VIDIOC_G_EXT_CTRLS:
+- case VIDIOC_S_EXT_CTRLS:
+- case VIDIOC_TRY_EXT_CTRLS:
++ case VIDIOC_G_EXT_CTRLS32:
++ case VIDIOC_S_EXT_CTRLS32:
++ case VIDIOC_TRY_EXT_CTRLS32:
+ if (put_v4l2_ext_controls32(file, new_p64, p32))
+ err = -EFAULT;
+ break;
+- case VIDIOC_S_EDID:
++ case VIDIOC_S_EDID32:
+ if (put_v4l2_edid32(new_p64, p32))
+ err = -EFAULT;
+ break;
+@@ -1389,49 +1391,49 @@ static long do_video_ioctl(struct file *
+ * the original 32 bits structure.
+ */
+ switch (cmd) {
+- case VIDIOC_S_INPUT:
+- case VIDIOC_S_OUTPUT:
+- case VIDIOC_G_INPUT:
+- case VIDIOC_G_OUTPUT:
++ case VIDIOC_S_INPUT32:
++ case VIDIOC_S_OUTPUT32:
++ case VIDIOC_G_INPUT32:
++ case VIDIOC_G_OUTPUT32:
+ if (assign_in_user((compat_uint_t __user *)p32,
+ ((unsigned int __user *)new_p64)))
+ err = -EFAULT;
+ break;
+
+- case VIDIOC_G_FBUF:
++ case VIDIOC_G_FBUF32:
+ err = put_v4l2_framebuffer32(new_p64, p32);
+ break;
+
+- case VIDIOC_DQEVENT:
++ case VIDIOC_DQEVENT32:
+ err = put_v4l2_event32(new_p64, p32);
+ break;
+
+- case VIDIOC_G_EDID:
++ case VIDIOC_G_EDID32:
+ err = put_v4l2_edid32(new_p64, p32);
+ break;
+
+- case VIDIOC_G_FMT:
+- case VIDIOC_S_FMT:
+- case VIDIOC_TRY_FMT:
++ case VIDIOC_G_FMT32:
++ case VIDIOC_S_FMT32:
++ case VIDIOC_TRY_FMT32:
+ err = put_v4l2_format32(new_p64, p32);
+ break;
+
+- case VIDIOC_CREATE_BUFS:
++ case VIDIOC_CREATE_BUFS32:
+ err = put_v4l2_create32(new_p64, p32);
+ break;
+
+- case VIDIOC_PREPARE_BUF:
+- case VIDIOC_QUERYBUF:
+- case VIDIOC_QBUF:
+- case VIDIOC_DQBUF:
++ case VIDIOC_PREPARE_BUF32:
++ case VIDIOC_QUERYBUF32:
++ case VIDIOC_QBUF32:
++ case VIDIOC_DQBUF32:
+ err = put_v4l2_buffer32(new_p64, p32);
+ break;
+
+- case VIDIOC_ENUMSTD:
++ case VIDIOC_ENUMSTD32:
+ err = put_v4l2_standard32(new_p64, p32);
+ break;
+
+- case VIDIOC_ENUMINPUT:
++ case VIDIOC_ENUMINPUT32:
+ err = put_v4l2_input32(new_p64, p32);
+ break;
+ }
--- /dev/null
+From 3c7470b6f68434acae459482ab920d1e3fabd1c7 Mon Sep 17 00:00:00 2001
+From: John Hubbard <jhubbard@nvidia.com>
+Date: Thu, 30 Jan 2020 22:12:50 -0800
+Subject: media/v4l2-core: set pages dirty upon releasing DMA buffers
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: John Hubbard <jhubbard@nvidia.com>
+
+commit 3c7470b6f68434acae459482ab920d1e3fabd1c7 upstream.
+
+After DMA is complete, and the device and CPU caches are synchronized,
+it's still required to mark the CPU pages as dirty, if the data was
+coming from the device. However, this driver was just issuing a bare
+put_page() call, without any set_page_dirty*() call.
+
+Fix the problem, by calling set_page_dirty_lock() if the CPU pages were
+potentially receiving data from the device.
+
+Link: http://lkml.kernel.org/r/20200107224558.2362728-11-jhubbard@nvidia.com
+Signed-off-by: John Hubbard <jhubbard@nvidia.com>
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+Acked-by: Hans Verkuil <hverkuil-cisco@xs4all.nl>
+Cc: Mauro Carvalho Chehab <mchehab@kernel.org>
+Cc: <stable@vger.kernel.org>
+Cc: Alex Williamson <alex.williamson@redhat.com>
+Cc: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
+Cc: Björn Töpel <bjorn.topel@intel.com>
+Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
+Cc: Dan Williams <dan.j.williams@intel.com>
+Cc: Ira Weiny <ira.weiny@intel.com>
+Cc: Jan Kara <jack@suse.cz>
+Cc: Jason Gunthorpe <jgg@mellanox.com>
+Cc: Jason Gunthorpe <jgg@ziepe.ca>
+Cc: Jens Axboe <axboe@kernel.dk>
+Cc: Jerome Glisse <jglisse@redhat.com>
+Cc: Jonathan Corbet <corbet@lwn.net>
+Cc: Kirill A. Shutemov <kirill@shutemov.name>
+Cc: Leon Romanovsky <leonro@mellanox.com>
+Cc: Mike Rapoport <rppt@linux.ibm.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/media/v4l2-core/videobuf-dma-sg.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+--- a/drivers/media/v4l2-core/videobuf-dma-sg.c
++++ b/drivers/media/v4l2-core/videobuf-dma-sg.c
+@@ -349,8 +349,11 @@ int videobuf_dma_free(struct videobuf_dm
+ BUG_ON(dma->sglen);
+
+ if (dma->pages) {
+- for (i = 0; i < dma->nr_pages; i++)
++ for (i = 0; i < dma->nr_pages; i++) {
++ if (dma->direction == DMA_FROM_DEVICE)
++ set_page_dirty_lock(dma->pages[i]);
+ put_page(dma->pages[i]);
++ }
+ kfree(dma->pages);
+ dma->pages = NULL;
+ }
--- /dev/null
+From f51e50db4c20d46930b33be3f208851265694f3e Mon Sep 17 00:00:00 2001
+From: Helen Koike <helen.koike@collabora.com>
+Date: Tue, 17 Dec 2019 21:00:22 +0100
+Subject: media: v4l2-rect.h: fix v4l2_rect_map_inside() top/left adjustments
+
+From: Helen Koike <helen.koike@collabora.com>
+
+commit f51e50db4c20d46930b33be3f208851265694f3e upstream.
+
+boundary->width and boundary->height are sizes relative to
+boundary->left and boundary->top coordinates, but they were not being
+taken into consideration to adjust r->left and r->top, leading to the
+following error:
+
+Consider the follow as initial values for boundary and r:
+
+struct v4l2_rect boundary = {
+ .left = 100,
+ .top = 100,
+ .width = 800,
+ .height = 600,
+}
+
+struct v4l2_rect r = {
+ .left = 0,
+ .top = 0,
+ .width = 1920,
+ .height = 960,
+}
+
+calling v4l2_rect_map_inside(&r, &boundary) was modifying r to:
+
+r = {
+ .left = 0,
+ .top = 0,
+ .width = 800,
+ .height = 600,
+}
+
+Which is wrongly outside the boundary rectangle, because:
+
+ v4l2_rect_set_max_size(r, boundary); // r->width = 800, r->height = 600
+ ...
+ if (r->left + r->width > boundary->width) // true
+ r->left = boundary->width - r->width; // r->left = 800 - 800
+ if (r->top + r->height > boundary->height) // true
+ r->top = boundary->height - r->height; // r->height = 600 - 600
+
+Fix this by considering top/left coordinates from boundary.
+
+Fixes: ac49de8c49d7 ("[media] v4l2-rect.h: new header with struct v4l2_rect helper functions")
+Signed-off-by: Helen Koike <helen.koike@collabora.com>
+Cc: <stable@vger.kernel.org> # for v4.7 and up
+Signed-off-by: Hans Verkuil <hverkuil-cisco@xs4all.nl>
+Signed-off-by: Mauro Carvalho Chehab <mchehab+huawei@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ include/media/v4l2-rect.h | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+--- a/include/media/v4l2-rect.h
++++ b/include/media/v4l2-rect.h
+@@ -63,10 +63,10 @@ static inline void v4l2_rect_map_inside(
+ r->left = boundary->left;
+ if (r->top < boundary->top)
+ r->top = boundary->top;
+- if (r->left + r->width > boundary->width)
+- r->left = boundary->width - r->width;
+- if (r->top + r->height > boundary->height)
+- r->top = boundary->height - r->height;
++ if (r->left + r->width > boundary->left + boundary->width)
++ r->left = boundary->left + boundary->width - r->width;
++ if (r->top + r->height > boundary->top + boundary->height)
++ r->top = boundary->top + boundary->height - r->height;
+ }
+
+ /**
--- /dev/null
+From 68f23b89067fdf187763e75a56087550624fdbee Mon Sep 17 00:00:00 2001
+From: Theodore Ts'o <tytso@mit.edu>
+Date: Thu, 30 Jan 2020 22:11:04 -0800
+Subject: memcg: fix a crash in wb_workfn when a device disappears
+
+From: Theodore Ts'o <tytso@mit.edu>
+
+commit 68f23b89067fdf187763e75a56087550624fdbee upstream.
+
+Without memcg, there is a one-to-one mapping between the bdi and
+bdi_writeback structures. In this world, things are fairly
+straightforward; the first thing bdi_unregister() does is to shutdown
+the bdi_writeback structure (or wb), and part of that writeback ensures
+that no other work queued against the wb, and that the wb is fully
+drained.
+
+With memcg, however, there is a one-to-many relationship between the bdi
+and bdi_writeback structures; that is, there are multiple wb objects
+which can all point to a single bdi. There is a refcount which prevents
+the bdi object from being released (and hence, unregistered). So in
+theory, the bdi_unregister() *should* only get called once its refcount
+goes to zero (bdi_put will drop the refcount, and when it is zero,
+release_bdi gets called, which calls bdi_unregister).
+
+Unfortunately, del_gendisk() in block/gen_hd.c never got the memo about
+the Brave New memcg World, and calls bdi_unregister directly. It does
+this without informing the file system, or the memcg code, or anything
+else. This causes the root wb associated with the bdi to be
+unregistered, but none of the memcg-specific wb's are shutdown. So when
+one of these wb's are woken up to do delayed work, they try to
+dereference their wb->bdi->dev to fetch the device name, but
+unfortunately bdi->dev is now NULL, thanks to the bdi_unregister()
+called by del_gendisk(). As a result, *boom*.
+
+Fortunately, it looks like the rest of the writeback path is perfectly
+happy with bdi->dev and bdi->owner being NULL, so the simplest fix is to
+create a bdi_dev_name() function which can handle bdi->dev being NULL.
+This also allows us to bulletproof the writeback tracepoints to prevent
+them from dereferencing a NULL pointer and crashing the kernel if one is
+tracing with memcg's enabled, and an iSCSI device dies or a USB storage
+stick is pulled.
+
+The most common way of triggering this will be hotremoval of a device
+while writeback with memcg enabled is going on. It was triggering
+several times a day in a heavily loaded production environment.
+
+Google Bug Id: 145475544
+
+Link: https://lore.kernel.org/r/20191227194829.150110-1-tytso@mit.edu
+Link: http://lkml.kernel.org/r/20191228005211.163952-1-tytso@mit.edu
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Cc: Chris Mason <clm@fb.com>
+Cc: Tejun Heo <tj@kernel.org>
+Cc: Jens Axboe <axboe@kernel.dk>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/fs-writeback.c | 2 +-
+ include/linux/backing-dev.h | 10 ++++++++++
+ include/trace/events/writeback.h | 37 +++++++++++++++++--------------------
+ mm/backing-dev.c | 1 +
+ 4 files changed, 29 insertions(+), 21 deletions(-)
+
+--- a/fs/fs-writeback.c
++++ b/fs/fs-writeback.c
+@@ -2063,7 +2063,7 @@ void wb_workfn(struct work_struct *work)
+ struct bdi_writeback, dwork);
+ long pages_written;
+
+- set_worker_desc("flush-%s", dev_name(wb->bdi->dev));
++ set_worker_desc("flush-%s", bdi_dev_name(wb->bdi));
+ current->flags |= PF_SWAPWRITE;
+
+ if (likely(!current_is_workqueue_rescuer() ||
+--- a/include/linux/backing-dev.h
++++ b/include/linux/backing-dev.h
+@@ -13,6 +13,7 @@
+ #include <linux/fs.h>
+ #include <linux/sched.h>
+ #include <linux/blkdev.h>
++#include <linux/device.h>
+ #include <linux/writeback.h>
+ #include <linux/blk-cgroup.h>
+ #include <linux/backing-dev-defs.h>
+@@ -504,4 +505,13 @@ static inline int bdi_rw_congested(struc
+ (1 << WB_async_congested));
+ }
+
++extern const char *bdi_unknown_name;
++
++static inline const char *bdi_dev_name(struct backing_dev_info *bdi)
++{
++ if (!bdi || !bdi->dev)
++ return bdi_unknown_name;
++ return dev_name(bdi->dev);
++}
++
+ #endif /* _LINUX_BACKING_DEV_H */
+--- a/include/trace/events/writeback.h
++++ b/include/trace/events/writeback.h
+@@ -67,8 +67,8 @@ DECLARE_EVENT_CLASS(writeback_page_templ
+
+ TP_fast_assign(
+ strscpy_pad(__entry->name,
+- mapping ? dev_name(inode_to_bdi(mapping->host)->dev) : "(unknown)",
+- 32);
++ bdi_dev_name(mapping ? inode_to_bdi(mapping->host) :
++ NULL), 32);
+ __entry->ino = mapping ? mapping->host->i_ino : 0;
+ __entry->index = page->index;
+ ),
+@@ -111,8 +111,7 @@ DECLARE_EVENT_CLASS(writeback_dirty_inod
+ struct backing_dev_info *bdi = inode_to_bdi(inode);
+
+ /* may be called for files on pseudo FSes w/ unregistered bdi */
+- strscpy_pad(__entry->name,
+- bdi->dev ? dev_name(bdi->dev) : "(unknown)", 32);
++ strscpy_pad(__entry->name, bdi_dev_name(bdi), 32);
+ __entry->ino = inode->i_ino;
+ __entry->state = inode->i_state;
+ __entry->flags = flags;
+@@ -193,7 +192,7 @@ TRACE_EVENT(inode_foreign_history,
+ ),
+
+ TP_fast_assign(
+- strncpy(__entry->name, dev_name(inode_to_bdi(inode)->dev), 32);
++ strncpy(__entry->name, bdi_dev_name(inode_to_bdi(inode)), 32);
+ __entry->ino = inode->i_ino;
+ __entry->cgroup_ino = __trace_wbc_assign_cgroup(wbc);
+ __entry->history = history;
+@@ -222,7 +221,7 @@ TRACE_EVENT(inode_switch_wbs,
+ ),
+
+ TP_fast_assign(
+- strncpy(__entry->name, dev_name(old_wb->bdi->dev), 32);
++ strncpy(__entry->name, bdi_dev_name(old_wb->bdi), 32);
+ __entry->ino = inode->i_ino;
+ __entry->old_cgroup_ino = __trace_wb_assign_cgroup(old_wb);
+ __entry->new_cgroup_ino = __trace_wb_assign_cgroup(new_wb);
+@@ -255,7 +254,7 @@ TRACE_EVENT(track_foreign_dirty,
+ struct address_space *mapping = page_mapping(page);
+ struct inode *inode = mapping ? mapping->host : NULL;
+
+- strncpy(__entry->name, dev_name(wb->bdi->dev), 32);
++ strncpy(__entry->name, bdi_dev_name(wb->bdi), 32);
+ __entry->bdi_id = wb->bdi->id;
+ __entry->ino = inode ? inode->i_ino : 0;
+ __entry->memcg_id = wb->memcg_css->id;
+@@ -288,7 +287,7 @@ TRACE_EVENT(flush_foreign,
+ ),
+
+ TP_fast_assign(
+- strncpy(__entry->name, dev_name(wb->bdi->dev), 32);
++ strncpy(__entry->name, bdi_dev_name(wb->bdi), 32);
+ __entry->cgroup_ino = __trace_wb_assign_cgroup(wb);
+ __entry->frn_bdi_id = frn_bdi_id;
+ __entry->frn_memcg_id = frn_memcg_id;
+@@ -318,7 +317,7 @@ DECLARE_EVENT_CLASS(writeback_write_inod
+
+ TP_fast_assign(
+ strscpy_pad(__entry->name,
+- dev_name(inode_to_bdi(inode)->dev), 32);
++ bdi_dev_name(inode_to_bdi(inode)), 32);
+ __entry->ino = inode->i_ino;
+ __entry->sync_mode = wbc->sync_mode;
+ __entry->cgroup_ino = __trace_wbc_assign_cgroup(wbc);
+@@ -361,9 +360,7 @@ DECLARE_EVENT_CLASS(writeback_work_class
+ __field(unsigned int, cgroup_ino)
+ ),
+ TP_fast_assign(
+- strscpy_pad(__entry->name,
+- wb->bdi->dev ? dev_name(wb->bdi->dev) :
+- "(unknown)", 32);
++ strscpy_pad(__entry->name, bdi_dev_name(wb->bdi), 32);
+ __entry->nr_pages = work->nr_pages;
+ __entry->sb_dev = work->sb ? work->sb->s_dev : 0;
+ __entry->sync_mode = work->sync_mode;
+@@ -416,7 +413,7 @@ DECLARE_EVENT_CLASS(writeback_class,
+ __field(unsigned int, cgroup_ino)
+ ),
+ TP_fast_assign(
+- strscpy_pad(__entry->name, dev_name(wb->bdi->dev), 32);
++ strscpy_pad(__entry->name, bdi_dev_name(wb->bdi), 32);
+ __entry->cgroup_ino = __trace_wb_assign_cgroup(wb);
+ ),
+ TP_printk("bdi %s: cgroup_ino=%u",
+@@ -438,7 +435,7 @@ TRACE_EVENT(writeback_bdi_register,
+ __array(char, name, 32)
+ ),
+ TP_fast_assign(
+- strscpy_pad(__entry->name, dev_name(bdi->dev), 32);
++ strscpy_pad(__entry->name, bdi_dev_name(bdi), 32);
+ ),
+ TP_printk("bdi %s",
+ __entry->name
+@@ -463,7 +460,7 @@ DECLARE_EVENT_CLASS(wbc_class,
+ ),
+
+ TP_fast_assign(
+- strscpy_pad(__entry->name, dev_name(bdi->dev), 32);
++ strscpy_pad(__entry->name, bdi_dev_name(bdi), 32);
+ __entry->nr_to_write = wbc->nr_to_write;
+ __entry->pages_skipped = wbc->pages_skipped;
+ __entry->sync_mode = wbc->sync_mode;
+@@ -514,7 +511,7 @@ TRACE_EVENT(writeback_queue_io,
+ ),
+ TP_fast_assign(
+ unsigned long *older_than_this = work->older_than_this;
+- strscpy_pad(__entry->name, dev_name(wb->bdi->dev), 32);
++ strscpy_pad(__entry->name, bdi_dev_name(wb->bdi), 32);
+ __entry->older = older_than_this ? *older_than_this : 0;
+ __entry->age = older_than_this ?
+ (jiffies - *older_than_this) * 1000 / HZ : -1;
+@@ -600,7 +597,7 @@ TRACE_EVENT(bdi_dirty_ratelimit,
+ ),
+
+ TP_fast_assign(
+- strscpy_pad(__entry->bdi, dev_name(wb->bdi->dev), 32);
++ strscpy_pad(__entry->bdi, bdi_dev_name(wb->bdi), 32);
+ __entry->write_bw = KBps(wb->write_bandwidth);
+ __entry->avg_write_bw = KBps(wb->avg_write_bandwidth);
+ __entry->dirty_rate = KBps(dirty_rate);
+@@ -665,7 +662,7 @@ TRACE_EVENT(balance_dirty_pages,
+
+ TP_fast_assign(
+ unsigned long freerun = (thresh + bg_thresh) / 2;
+- strscpy_pad(__entry->bdi, dev_name(wb->bdi->dev), 32);
++ strscpy_pad(__entry->bdi, bdi_dev_name(wb->bdi), 32);
+
+ __entry->limit = global_wb_domain.dirty_limit;
+ __entry->setpoint = (global_wb_domain.dirty_limit +
+@@ -726,7 +723,7 @@ TRACE_EVENT(writeback_sb_inodes_requeue,
+
+ TP_fast_assign(
+ strscpy_pad(__entry->name,
+- dev_name(inode_to_bdi(inode)->dev), 32);
++ bdi_dev_name(inode_to_bdi(inode)), 32);
+ __entry->ino = inode->i_ino;
+ __entry->state = inode->i_state;
+ __entry->dirtied_when = inode->dirtied_when;
+@@ -800,7 +797,7 @@ DECLARE_EVENT_CLASS(writeback_single_ino
+
+ TP_fast_assign(
+ strscpy_pad(__entry->name,
+- dev_name(inode_to_bdi(inode)->dev), 32);
++ bdi_dev_name(inode_to_bdi(inode)), 32);
+ __entry->ino = inode->i_ino;
+ __entry->state = inode->i_state;
+ __entry->dirtied_when = inode->dirtied_when;
+--- a/mm/backing-dev.c
++++ b/mm/backing-dev.c
+@@ -21,6 +21,7 @@ struct backing_dev_info noop_backing_dev
+ EXPORT_SYMBOL_GPL(noop_backing_dev_info);
+
+ static struct class *bdi_class;
++const char *bdi_unknown_name = "(unknown)";
+
+ /*
+ * bdi_lock protects bdi_tree and updates to bdi_list. bdi_list has RCU
--- /dev/null
+From f1037ec0cc8ac1a450974ad9754e991f72884f48 Mon Sep 17 00:00:00 2001
+From: Dan Williams <dan.j.williams@intel.com>
+Date: Thu, 30 Jan 2020 22:11:17 -0800
+Subject: mm/memory_hotplug: fix remove_memory() lockdep splat
+
+From: Dan Williams <dan.j.williams@intel.com>
+
+commit f1037ec0cc8ac1a450974ad9754e991f72884f48 upstream.
+
+The daxctl unit test for the dax_kmem driver currently triggers the
+(false positive) lockdep splat below. It results from the fact that
+remove_memory_block_devices() is invoked under the mem_hotplug_lock()
+causing lockdep entanglements with cpu_hotplug_lock() and sysfs (kernfs
+active state tracking). It is a false positive because the sysfs
+attribute path triggering the memory remove is not the same attribute
+path associated with memory-block device.
+
+sysfs_break_active_protection() is not applicable since there is no real
+deadlock conflict, instead move memory-block device removal outside the
+lock. The mem_hotplug_lock() is not needed to synchronize the
+memory-block device removal vs the page online state, that is already
+handled by lock_device_hotplug(). Specifically, lock_device_hotplug()
+is sufficient to allow try_remove_memory() to check the offline state of
+the memblocks and be assured that any in progress online attempts are
+flushed / blocked by kernfs_drain() / attribute removal.
+
+The add_memory() path safely creates memblock devices under the
+mem_hotplug_lock(). There is no kernfs active state synchronization in
+the memblock device_register() path, so nothing to fix there.
+
+This change is only possible thanks to the recent change that refactored
+memory block device removal out of arch_remove_memory() (commit
+4c4b7f9ba948 "mm/memory_hotplug: remove memory block devices before
+arch_remove_memory()"), and David's due diligence tracking down the
+guarantees afforded by kernfs_drain(). Not flagged for -stable since
+this only impacts ongoing development and lockdep validation, not a
+runtime issue.
+
+ ======================================================
+ WARNING: possible circular locking dependency detected
+ 5.5.0-rc3+ #230 Tainted: G OE
+ ------------------------------------------------------
+ lt-daxctl/6459 is trying to acquire lock:
+ ffff99c7f0003510 (kn->count#241){++++}, at: kernfs_remove_by_name_ns+0x41/0x80
+
+ but task is already holding lock:
+ ffffffffa76a5450 (mem_hotplug_lock.rw_sem){++++}, at: percpu_down_write+0x20/0xe0
+
+ which lock already depends on the new lock.
+
+ the existing dependency chain (in reverse order) is:
+
+ -> #2 (mem_hotplug_lock.rw_sem){++++}:
+ __lock_acquire+0x39c/0x790
+ lock_acquire+0xa2/0x1b0
+ get_online_mems+0x3e/0xb0
+ kmem_cache_create_usercopy+0x2e/0x260
+ kmem_cache_create+0x12/0x20
+ ptlock_cache_init+0x20/0x28
+ start_kernel+0x243/0x547
+ secondary_startup_64+0xb6/0xc0
+
+ -> #1 (cpu_hotplug_lock.rw_sem){++++}:
+ __lock_acquire+0x39c/0x790
+ lock_acquire+0xa2/0x1b0
+ cpus_read_lock+0x3e/0xb0
+ online_pages+0x37/0x300
+ memory_subsys_online+0x17d/0x1c0
+ device_online+0x60/0x80
+ state_store+0x65/0xd0
+ kernfs_fop_write+0xcf/0x1c0
+ vfs_write+0xdb/0x1d0
+ ksys_write+0x65/0xe0
+ do_syscall_64+0x5c/0xa0
+ entry_SYSCALL_64_after_hwframe+0x49/0xbe
+
+ -> #0 (kn->count#241){++++}:
+ check_prev_add+0x98/0xa40
+ validate_chain+0x576/0x860
+ __lock_acquire+0x39c/0x790
+ lock_acquire+0xa2/0x1b0
+ __kernfs_remove+0x25f/0x2e0
+ kernfs_remove_by_name_ns+0x41/0x80
+ remove_files.isra.0+0x30/0x70
+ sysfs_remove_group+0x3d/0x80
+ sysfs_remove_groups+0x29/0x40
+ device_remove_attrs+0x39/0x70
+ device_del+0x16a/0x3f0
+ device_unregister+0x16/0x60
+ remove_memory_block_devices+0x82/0xb0
+ try_remove_memory+0xb5/0x130
+ remove_memory+0x26/0x40
+ dev_dax_kmem_remove+0x44/0x6a [kmem]
+ device_release_driver_internal+0xe4/0x1c0
+ unbind_store+0xef/0x120
+ kernfs_fop_write+0xcf/0x1c0
+ vfs_write+0xdb/0x1d0
+ ksys_write+0x65/0xe0
+ do_syscall_64+0x5c/0xa0
+ entry_SYSCALL_64_after_hwframe+0x49/0xbe
+
+ other info that might help us debug this:
+
+ Chain exists of:
+ kn->count#241 --> cpu_hotplug_lock.rw_sem --> mem_hotplug_lock.rw_sem
+
+ Possible unsafe locking scenario:
+
+ CPU0 CPU1
+ ---- ----
+ lock(mem_hotplug_lock.rw_sem);
+ lock(cpu_hotplug_lock.rw_sem);
+ lock(mem_hotplug_lock.rw_sem);
+ lock(kn->count#241);
+
+ *** DEADLOCK ***
+
+No fixes tag as this has been a long standing issue that predated the
+addition of kernfs lockdep annotations.
+
+Link: http://lkml.kernel.org/r/157991441887.2763922.4770790047389427325.stgit@dwillia2-desk3.amr.corp.intel.com
+Signed-off-by: Dan Williams <dan.j.williams@intel.com>
+Acked-by: Michal Hocko <mhocko@suse.com>
+Reviewed-by: David Hildenbrand <david@redhat.com>
+Cc: Vishal Verma <vishal.l.verma@intel.com>
+Cc: Pavel Tatashin <pasha.tatashin@soleen.com>
+Cc: Dave Hansen <dave.hansen@linux.intel.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ mm/memory_hotplug.c | 9 ++++++---
+ 1 file changed, 6 insertions(+), 3 deletions(-)
+
+--- a/mm/memory_hotplug.c
++++ b/mm/memory_hotplug.c
+@@ -1738,8 +1738,6 @@ static int __ref try_remove_memory(int n
+
+ BUG_ON(check_hotplug_memory_range(start, size));
+
+- mem_hotplug_begin();
+-
+ /*
+ * All memory blocks must be offlined before removing memory. Check
+ * whether all memory blocks in question are offline and return error
+@@ -1754,9 +1752,14 @@ static int __ref try_remove_memory(int n
+ memblock_free(start, size);
+ memblock_remove(start, size);
+
+- /* remove memory block devices before removing memory */
++ /*
++ * Memory block device removal under the device_hotplug_lock is
++ * a barrier against racing online attempts.
++ */
+ remove_memory_block_devices(start, size);
+
++ mem_hotplug_begin();
++
+ arch_remove_memory(nid, start, size, NULL);
+ __release_memory_resource(start, size);
+
--- /dev/null
+From 5984fabb6e82d9ab4e6305cb99694c85d46de8ae Mon Sep 17 00:00:00 2001
+From: Yang Shi <yang.shi@linux.alibaba.com>
+Date: Thu, 30 Jan 2020 22:11:24 -0800
+Subject: mm: move_pages: report the number of non-attempted pages
+
+From: Yang Shi <yang.shi@linux.alibaba.com>
+
+commit 5984fabb6e82d9ab4e6305cb99694c85d46de8ae upstream.
+
+Since commit a49bd4d71637 ("mm, numa: rework do_pages_move"), the
+semantic of move_pages() has changed to return the number of
+non-migrated pages if they were result of a non-fatal reasons (usually a
+busy page).
+
+This was an unintentional change that hasn't been noticed except for LTP
+tests which checked for the documented behavior.
+
+There are two ways to go around this change. We can even get back to
+the original behavior and return -EAGAIN whenever migrate_pages is not
+able to migrate pages due to non-fatal reasons. Another option would be
+to simply continue with the changed semantic and extend move_pages
+documentation to clarify that -errno is returned on an invalid input or
+when migration simply cannot succeed (e.g. -ENOMEM, -EBUSY) or the
+number of pages that couldn't have been migrated due to ephemeral
+reasons (e.g. page is pinned or locked for other reasons).
+
+This patch implements the second option because this behavior is in
+place for some time without anybody complaining and possibly new users
+depending on it. Also it allows to have a slightly easier error
+handling as the caller knows that it is worth to retry when err > 0.
+
+But since the new semantic would be aborted immediately if migration is
+failed due to ephemeral reasons, need include the number of
+non-attempted pages in the return value too.
+
+Link: http://lkml.kernel.org/r/1580160527-109104-1-git-send-email-yang.shi@linux.alibaba.com
+Fixes: a49bd4d71637 ("mm, numa: rework do_pages_move")
+Signed-off-by: Yang Shi <yang.shi@linux.alibaba.com>
+Suggested-by: Michal Hocko <mhocko@suse.com>
+Acked-by: Michal Hocko <mhocko@suse.com>
+Reviewed-by: Wei Yang <richardw.yang@linux.intel.com>
+Cc: <stable@vger.kernel.org> [4.17+]
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ mm/migrate.c | 25 +++++++++++++++++++++++--
+ 1 file changed, 23 insertions(+), 2 deletions(-)
+
+--- a/mm/migrate.c
++++ b/mm/migrate.c
+@@ -1631,8 +1631,19 @@ static int do_pages_move(struct mm_struc
+ start = i;
+ } else if (node != current_node) {
+ err = do_move_pages_to_node(mm, &pagelist, current_node);
+- if (err)
++ if (err) {
++ /*
++ * Positive err means the number of failed
++ * pages to migrate. Since we are going to
++ * abort and return the number of non-migrated
++ * pages, so need to incude the rest of the
++ * nr_pages that have not been attempted as
++ * well.
++ */
++ if (err > 0)
++ err += nr_pages - i - 1;
+ goto out;
++ }
+ err = store_status(status, start, current_node, i - start);
+ if (err)
+ goto out;
+@@ -1663,8 +1674,11 @@ static int do_pages_move(struct mm_struc
+ goto out_flush;
+
+ err = do_move_pages_to_node(mm, &pagelist, current_node);
+- if (err)
++ if (err) {
++ if (err > 0)
++ err += nr_pages - i - 1;
+ goto out;
++ }
+ if (i > start) {
+ err = store_status(status, start, current_node, i - start);
+ if (err)
+@@ -1678,6 +1692,13 @@ out_flush:
+
+ /* Make sure we do not overwrite the existing error */
+ err1 = do_move_pages_to_node(mm, &pagelist, current_node);
++ /*
++ * Don't have to report non-attempted pages here since:
++ * - If the above loop is done gracefully all pages have been
++ * attempted.
++ * - If the above loop is aborted it means a fatal error
++ * happened, should return ret.
++ */
+ if (!err1)
+ err1 = store_status(status, start, current_node, i - start);
+ if (err >= 0)
--- /dev/null
+From 1f503443e7df8dc8366608b4d810ce2d6669827c Mon Sep 17 00:00:00 2001
+From: Pingfan Liu <kernelfans@gmail.com>
+Date: Thu, 30 Jan 2020 22:11:10 -0800
+Subject: mm/sparse.c: reset section's mem_map when fully deactivated
+
+From: Pingfan Liu <kernelfans@gmail.com>
+
+commit 1f503443e7df8dc8366608b4d810ce2d6669827c upstream.
+
+After commit ba72b4c8cf60 ("mm/sparsemem: support sub-section hotplug"),
+when a mem section is fully deactivated, section_mem_map still records
+the section's start pfn, which is not used any more and will be
+reassigned during re-addition.
+
+In analogy with alloc/free pattern, it is better to clear all fields of
+section_mem_map.
+
+Beside this, it breaks the user space tool "makedumpfile" [1], which
+makes assumption that a hot-removed section has mem_map as NULL, instead
+of checking directly against SECTION_MARKED_PRESENT bit. (makedumpfile
+will be better to change the assumption, and need a patch)
+
+The bug can be reproduced on IBM POWERVM by "drmgr -c mem -r -q 5" ,
+trigger a crash, and save vmcore by makedumpfile
+
+[1]: makedumpfile, commit e73016540293 ("[v1.6.7] Update version")
+
+Link: http://lkml.kernel.org/r/1579487594-28889-1-git-send-email-kernelfans@gmail.com
+Signed-off-by: Pingfan Liu <kernelfans@gmail.com>
+Acked-by: Michal Hocko <mhocko@suse.com>
+Acked-by: David Hildenbrand <david@redhat.com>
+Cc: Dan Williams <dan.j.williams@intel.com>
+Cc: Oscar Salvador <osalvador@suse.de>
+Cc: Baoquan He <bhe@redhat.com>
+Cc: Qian Cai <cai@lca.pw>
+Cc: Kazuhito Hagio <k-hagio@ab.jp.nec.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ mm/sparse.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/mm/sparse.c
++++ b/mm/sparse.c
+@@ -787,7 +787,7 @@ static void section_deactivate(unsigned
+ ms->usage = NULL;
+ }
+ memmap = sparse_decode_mem_map(ms->section_mem_map, section_nr);
+- ms->section_mem_map = sparse_encode_mem_map(NULL, section_nr);
++ ms->section_mem_map = (unsigned long)NULL;
+ }
+
+ if (section_is_early && memmap)
--- /dev/null
+From fac0516b5534897bf4c4a88daa06a8cfa5611b23 Mon Sep 17 00:00:00 2001
+From: Wei Yang <richardw.yang@linux.intel.com>
+Date: Thu, 30 Jan 2020 22:11:20 -0800
+Subject: mm: thp: don't need care deferred split queue in memcg charge move path
+
+From: Wei Yang <richardw.yang@linux.intel.com>
+
+commit fac0516b5534897bf4c4a88daa06a8cfa5611b23 upstream.
+
+If compound is true, this means it is a PMD mapped THP. Which implies
+the page is not linked to any defer list. So the first code chunk will
+not be executed.
+
+Also with this reason, it would not be proper to add this page to a
+defer list. So the second code chunk is not correct.
+
+Based on this, we should remove the defer list related code.
+
+[yang.shi@linux.alibaba.com: better patch title]
+Link: http://lkml.kernel.org/r/20200117233836.3434-1-richardw.yang@linux.intel.com
+Fixes: 87eaceb3faa5 ("mm: thp: make deferred split shrinker memcg aware")
+Signed-off-by: Wei Yang <richardw.yang@linux.intel.com>
+Suggested-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
+Acked-by: Yang Shi <yang.shi@linux.alibaba.com>
+Cc: David Rientjes <rientjes@google.com>
+Cc: Michal Hocko <mhocko@suse.com>
+Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
+Cc: Johannes Weiner <hannes@cmpxchg.org>
+Cc: Vladimir Davydov <vdavydov.dev@gmail.com>
+Cc: <stable@vger.kernel.org> [5.4+]
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ mm/memcontrol.c | 18 ------------------
+ 1 file changed, 18 deletions(-)
+
+--- a/mm/memcontrol.c
++++ b/mm/memcontrol.c
+@@ -5465,14 +5465,6 @@ static int mem_cgroup_move_account(struc
+ __mod_lruvec_state(to_vec, NR_WRITEBACK, nr_pages);
+ }
+
+-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+- if (compound && !list_empty(page_deferred_list(page))) {
+- spin_lock(&from->deferred_split_queue.split_queue_lock);
+- list_del_init(page_deferred_list(page));
+- from->deferred_split_queue.split_queue_len--;
+- spin_unlock(&from->deferred_split_queue.split_queue_lock);
+- }
+-#endif
+ /*
+ * It is safe to change page->mem_cgroup here because the page
+ * is referenced, charged, and isolated - we can't race with
+@@ -5482,16 +5474,6 @@ static int mem_cgroup_move_account(struc
+ /* caller should have done css_get */
+ page->mem_cgroup = to;
+
+-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+- if (compound && list_empty(page_deferred_list(page))) {
+- spin_lock(&to->deferred_split_queue.split_queue_lock);
+- list_add_tail(page_deferred_list(page),
+- &to->deferred_split_queue.split_queue);
+- to->deferred_split_queue.split_queue_len++;
+- spin_unlock(&to->deferred_split_queue.split_queue_lock);
+- }
+-#endif
+-
+ spin_unlock_irqrestore(&from->move_lock, flags);
+
+ ret = 0;
--- /dev/null
+From 38413ce39a4bd908c02257cd2f9e0c92b27886f4 Mon Sep 17 00:00:00 2001
+From: zhengbin <zhengbin13@huawei.com>
+Date: Fri, 4 Oct 2019 17:44:20 +0800
+Subject: mmc: sdhci-pci: Make function amd_sdhci_reset static
+
+From: zhengbin <zhengbin13@huawei.com>
+
+commit 38413ce39a4bd908c02257cd2f9e0c92b27886f4 upstream.
+
+Fix sparse warnings:
+
+drivers/mmc/host/sdhci-pci-core.c:1599:6: warning: symbol 'amd_sdhci_reset' was not declared. Should it be static?
+
+Reported-by: Hulk Robot <hulkci@huawei.com>
+Signed-off-by: zhengbin <zhengbin13@huawei.com>
+Acked-by: Adrian Hunter <adrian.hunter@intel.com>
+Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/mmc/host/sdhci-pci-core.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/mmc/host/sdhci-pci-core.c
++++ b/drivers/mmc/host/sdhci-pci-core.c
+@@ -1604,7 +1604,7 @@ static u32 sdhci_read_present_state(stru
+ return sdhci_readl(host, SDHCI_PRESENT_STATE);
+ }
+
+-void amd_sdhci_reset(struct sdhci_host *host, u8 mask)
++static void amd_sdhci_reset(struct sdhci_host *host, u8 mask)
+ {
+ struct sdhci_pci_slot *slot = sdhci_priv(host);
+ struct pci_dev *pdev = slot->chip->pdev;
--- /dev/null
+From e48b72a568bbd641c91dad354138d3c17d03ee6f Mon Sep 17 00:00:00 2001
+From: Mika Westerberg <mika.westerberg@linux.intel.com>
+Date: Wed, 22 Jan 2020 19:28:04 +0300
+Subject: platform/x86: intel_scu_ipc: Fix interrupt support
+
+From: Mika Westerberg <mika.westerberg@linux.intel.com>
+
+commit e48b72a568bbd641c91dad354138d3c17d03ee6f upstream.
+
+Currently the driver has disabled interrupt support for Tangier but
+actually interrupt works just fine if the command is not written twice
+in a row. Also we need to ack the interrupt in the handler.
+
+Signed-off-by: Mika Westerberg <mika.westerberg@linux.intel.com>
+Reviewed-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/platform/x86/intel_scu_ipc.c | 21 ++++++++-------------
+ 1 file changed, 8 insertions(+), 13 deletions(-)
+
+--- a/drivers/platform/x86/intel_scu_ipc.c
++++ b/drivers/platform/x86/intel_scu_ipc.c
+@@ -67,26 +67,22 @@
+ struct intel_scu_ipc_pdata_t {
+ u32 i2c_base;
+ u32 i2c_len;
+- u8 irq_mode;
+ };
+
+ static const struct intel_scu_ipc_pdata_t intel_scu_ipc_lincroft_pdata = {
+ .i2c_base = 0xff12b000,
+ .i2c_len = 0x10,
+- .irq_mode = 0,
+ };
+
+ /* Penwell and Cloverview */
+ static const struct intel_scu_ipc_pdata_t intel_scu_ipc_penwell_pdata = {
+ .i2c_base = 0xff12b000,
+ .i2c_len = 0x10,
+- .irq_mode = 1,
+ };
+
+ static const struct intel_scu_ipc_pdata_t intel_scu_ipc_tangier_pdata = {
+ .i2c_base = 0xff00d000,
+ .i2c_len = 0x10,
+- .irq_mode = 0,
+ };
+
+ struct intel_scu_ipc_dev {
+@@ -99,6 +95,9 @@ struct intel_scu_ipc_dev {
+
+ static struct intel_scu_ipc_dev ipcdev; /* Only one for now */
+
++#define IPC_STATUS 0x04
++#define IPC_STATUS_IRQ BIT(2)
++
+ /*
+ * IPC Read Buffer (Read Only):
+ * 16 byte buffer for receiving data from SCU, if IPC command
+@@ -120,11 +119,8 @@ static DEFINE_MUTEX(ipclock); /* lock us
+ */
+ static inline void ipc_command(struct intel_scu_ipc_dev *scu, u32 cmd)
+ {
+- if (scu->irq_mode) {
+- reinit_completion(&scu->cmd_complete);
+- writel(cmd | IPC_IOC, scu->ipc_base);
+- }
+- writel(cmd, scu->ipc_base);
++ reinit_completion(&scu->cmd_complete);
++ writel(cmd | IPC_IOC, scu->ipc_base);
+ }
+
+ /*
+@@ -610,9 +606,10 @@ EXPORT_SYMBOL(intel_scu_ipc_i2c_cntrl);
+ static irqreturn_t ioc(int irq, void *dev_id)
+ {
+ struct intel_scu_ipc_dev *scu = dev_id;
++ int status = ipc_read_status(scu);
+
+- if (scu->irq_mode)
+- complete(&scu->cmd_complete);
++ writel(status | IPC_STATUS_IRQ, scu->ipc_base + IPC_STATUS);
++ complete(&scu->cmd_complete);
+
+ return IRQ_HANDLED;
+ }
+@@ -638,8 +635,6 @@ static int ipc_probe(struct pci_dev *pde
+ if (!pdata)
+ return -ENODEV;
+
+- scu->irq_mode = pdata->irq_mode;
+-
+ err = pcim_enable_device(pdev);
+ if (err)
+ return err;
alsa-usb-audio-fix-endianess-in-descriptor-validation.patch
alsa-usb-audio-annotate-endianess-in-scarlett-gen2-quirk.patch
alsa-dummy-fix-pcm-format-loop-in-proc-output.patch
+memcg-fix-a-crash-in-wb_workfn-when-a-device-disappears.patch
+mm-sparse.c-reset-section-s-mem_map-when-fully-deactivated.patch
+mmc-sdhci-pci-make-function-amd_sdhci_reset-static.patch
+utimes-clamp-the-timestamps-in-notify_change.patch
+mm-memory_hotplug-fix-remove_memory-lockdep-splat.patch
+mm-thp-don-t-need-care-deferred-split-queue-in-memcg-charge-move-path.patch
+mm-move_pages-report-the-number-of-non-attempted-pages.patch
+media-v4l2-core-set-pages-dirty-upon-releasing-dma-buffers.patch
+media-v4l2-core-compat-ignore-native-command-codes.patch
+media-v4l2-rect.h-fix-v4l2_rect_map_inside-top-left-adjustments.patch
+lib-test_kasan.c-fix-memory-leak-in-kmalloc_oob_krealloc_more.patch
+irqdomain-fix-a-memory-leak-in-irq_domain_push_irq.patch
+x86-cpu-update-cached-hle-state-on-write-to-tsx_ctrl_cpuid_clear.patch
+platform-x86-intel_scu_ipc-fix-interrupt-support.patch
--- /dev/null
+From eb31e2f63d85d1bec4f7b136f317e03c03db5503 Mon Sep 17 00:00:00 2001
+From: Amir Goldstein <amir73il@gmail.com>
+Date: Sun, 24 Nov 2019 21:31:45 +0200
+Subject: utimes: Clamp the timestamps in notify_change()
+
+From: Amir Goldstein <amir73il@gmail.com>
+
+commit eb31e2f63d85d1bec4f7b136f317e03c03db5503 upstream.
+
+Push clamping timestamps into notify_change(), so in-kernel
+callers like nfsd and overlayfs will get similar timestamp
+set behavior as utimes.
+
+AV: get rid of clamping in ->setattr() instances; we don't need
+to bother with that there, with notify_change() doing normalization
+in all cases now (it already did for implicit case, since current_time()
+clamps).
+
+Suggested-by: Miklos Szeredi <mszeredi@redhat.com>
+Fixes: 42e729b9ddbb ("utimes: Clamp the timestamps before update")
+Cc: stable@vger.kernel.org # v5.4
+Cc: Deepa Dinamani <deepa.kernel@gmail.com>
+Cc: Jeff Layton <jlayton@kernel.org>
+Signed-off-by: Amir Goldstein <amir73il@gmail.com>
+Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/attr.c | 23 +++++++++++------------
+ fs/configfs/inode.c | 9 +++------
+ fs/f2fs/file.c | 18 ++++++------------
+ fs/ntfs/inode.c | 18 ++++++------------
+ fs/ubifs/file.c | 18 ++++++------------
+ fs/utimes.c | 4 ++--
+ 6 files changed, 34 insertions(+), 56 deletions(-)
+
+--- a/fs/attr.c
++++ b/fs/attr.c
+@@ -183,18 +183,12 @@ void setattr_copy(struct inode *inode, c
+ inode->i_uid = attr->ia_uid;
+ if (ia_valid & ATTR_GID)
+ inode->i_gid = attr->ia_gid;
+- if (ia_valid & ATTR_ATIME) {
+- inode->i_atime = timestamp_truncate(attr->ia_atime,
+- inode);
+- }
+- if (ia_valid & ATTR_MTIME) {
+- inode->i_mtime = timestamp_truncate(attr->ia_mtime,
+- inode);
+- }
+- if (ia_valid & ATTR_CTIME) {
+- inode->i_ctime = timestamp_truncate(attr->ia_ctime,
+- inode);
+- }
++ if (ia_valid & ATTR_ATIME)
++ inode->i_atime = attr->ia_atime;
++ if (ia_valid & ATTR_MTIME)
++ inode->i_mtime = attr->ia_mtime;
++ if (ia_valid & ATTR_CTIME)
++ inode->i_ctime = attr->ia_ctime;
+ if (ia_valid & ATTR_MODE) {
+ umode_t mode = attr->ia_mode;
+
+@@ -268,8 +262,13 @@ int notify_change(struct dentry * dentry
+ attr->ia_ctime = now;
+ if (!(ia_valid & ATTR_ATIME_SET))
+ attr->ia_atime = now;
++ else
++ attr->ia_atime = timestamp_truncate(attr->ia_atime, inode);
+ if (!(ia_valid & ATTR_MTIME_SET))
+ attr->ia_mtime = now;
++ else
++ attr->ia_mtime = timestamp_truncate(attr->ia_mtime, inode);
++
+ if (ia_valid & ATTR_KILL_PRIV) {
+ error = security_inode_need_killpriv(dentry);
+ if (error < 0)
+--- a/fs/configfs/inode.c
++++ b/fs/configfs/inode.c
+@@ -76,14 +76,11 @@ int configfs_setattr(struct dentry * den
+ if (ia_valid & ATTR_GID)
+ sd_iattr->ia_gid = iattr->ia_gid;
+ if (ia_valid & ATTR_ATIME)
+- sd_iattr->ia_atime = timestamp_truncate(iattr->ia_atime,
+- inode);
++ sd_iattr->ia_atime = iattr->ia_atime;
+ if (ia_valid & ATTR_MTIME)
+- sd_iattr->ia_mtime = timestamp_truncate(iattr->ia_mtime,
+- inode);
++ sd_iattr->ia_mtime = iattr->ia_mtime;
+ if (ia_valid & ATTR_CTIME)
+- sd_iattr->ia_ctime = timestamp_truncate(iattr->ia_ctime,
+- inode);
++ sd_iattr->ia_ctime = iattr->ia_ctime;
+ if (ia_valid & ATTR_MODE) {
+ umode_t mode = iattr->ia_mode;
+
+--- a/fs/f2fs/file.c
++++ b/fs/f2fs/file.c
+@@ -751,18 +751,12 @@ static void __setattr_copy(struct inode
+ inode->i_uid = attr->ia_uid;
+ if (ia_valid & ATTR_GID)
+ inode->i_gid = attr->ia_gid;
+- if (ia_valid & ATTR_ATIME) {
+- inode->i_atime = timestamp_truncate(attr->ia_atime,
+- inode);
+- }
+- if (ia_valid & ATTR_MTIME) {
+- inode->i_mtime = timestamp_truncate(attr->ia_mtime,
+- inode);
+- }
+- if (ia_valid & ATTR_CTIME) {
+- inode->i_ctime = timestamp_truncate(attr->ia_ctime,
+- inode);
+- }
++ if (ia_valid & ATTR_ATIME)
++ inode->i_atime = attr->ia_atime;
++ if (ia_valid & ATTR_MTIME)
++ inode->i_mtime = attr->ia_mtime;
++ if (ia_valid & ATTR_CTIME)
++ inode->i_ctime = attr->ia_ctime;
+ if (ia_valid & ATTR_MODE) {
+ umode_t mode = attr->ia_mode;
+
+--- a/fs/ntfs/inode.c
++++ b/fs/ntfs/inode.c
+@@ -2899,18 +2899,12 @@ int ntfs_setattr(struct dentry *dentry,
+ ia_valid |= ATTR_MTIME | ATTR_CTIME;
+ }
+ }
+- if (ia_valid & ATTR_ATIME) {
+- vi->i_atime = timestamp_truncate(attr->ia_atime,
+- vi);
+- }
+- if (ia_valid & ATTR_MTIME) {
+- vi->i_mtime = timestamp_truncate(attr->ia_mtime,
+- vi);
+- }
+- if (ia_valid & ATTR_CTIME) {
+- vi->i_ctime = timestamp_truncate(attr->ia_ctime,
+- vi);
+- }
++ if (ia_valid & ATTR_ATIME)
++ vi->i_atime = attr->ia_atime;
++ if (ia_valid & ATTR_MTIME)
++ vi->i_mtime = attr->ia_mtime;
++ if (ia_valid & ATTR_CTIME)
++ vi->i_ctime = attr->ia_ctime;
+ mark_inode_dirty(vi);
+ out:
+ return err;
+--- a/fs/ubifs/file.c
++++ b/fs/ubifs/file.c
+@@ -1078,18 +1078,12 @@ static void do_attr_changes(struct inode
+ inode->i_uid = attr->ia_uid;
+ if (attr->ia_valid & ATTR_GID)
+ inode->i_gid = attr->ia_gid;
+- if (attr->ia_valid & ATTR_ATIME) {
+- inode->i_atime = timestamp_truncate(attr->ia_atime,
+- inode);
+- }
+- if (attr->ia_valid & ATTR_MTIME) {
+- inode->i_mtime = timestamp_truncate(attr->ia_mtime,
+- inode);
+- }
+- if (attr->ia_valid & ATTR_CTIME) {
+- inode->i_ctime = timestamp_truncate(attr->ia_ctime,
+- inode);
+- }
++ if (attr->ia_valid & ATTR_ATIME)
++ inode->i_atime = attr->ia_atime;
++ if (attr->ia_valid & ATTR_MTIME)
++ inode->i_mtime = attr->ia_mtime;
++ if (attr->ia_valid & ATTR_CTIME)
++ inode->i_ctime = attr->ia_ctime;
+ if (attr->ia_valid & ATTR_MODE) {
+ umode_t mode = attr->ia_mode;
+
+--- a/fs/utimes.c
++++ b/fs/utimes.c
+@@ -36,14 +36,14 @@ static int utimes_common(const struct pa
+ if (times[0].tv_nsec == UTIME_OMIT)
+ newattrs.ia_valid &= ~ATTR_ATIME;
+ else if (times[0].tv_nsec != UTIME_NOW) {
+- newattrs.ia_atime = timestamp_truncate(times[0], inode);
++ newattrs.ia_atime = times[0];
+ newattrs.ia_valid |= ATTR_ATIME_SET;
+ }
+
+ if (times[1].tv_nsec == UTIME_OMIT)
+ newattrs.ia_valid &= ~ATTR_MTIME;
+ else if (times[1].tv_nsec != UTIME_NOW) {
+- newattrs.ia_mtime = timestamp_truncate(times[1], inode);
++ newattrs.ia_mtime = times[1];
+ newattrs.ia_valid |= ATTR_MTIME_SET;
+ }
+ /*
--- /dev/null
+From 5efc6fa9044c3356d6046c6e1da6d02572dbed6b Mon Sep 17 00:00:00 2001
+From: Pawan Gupta <pawan.kumar.gupta@linux.intel.com>
+Date: Fri, 10 Jan 2020 14:50:54 -0800
+Subject: x86/cpu: Update cached HLE state on write to TSX_CTRL_CPUID_CLEAR
+
+From: Pawan Gupta <pawan.kumar.gupta@linux.intel.com>
+
+commit 5efc6fa9044c3356d6046c6e1da6d02572dbed6b upstream.
+
+/proc/cpuinfo currently reports Hardware Lock Elision (HLE) feature to
+be present on boot cpu even if it was disabled during the bootup. This
+is because cpuinfo_x86->x86_capability HLE bit is not updated after TSX
+state is changed via the new MSR IA32_TSX_CTRL.
+
+Update the cached HLE bit also since it is expected to change after an
+update to CPUID_CLEAR bit in MSR IA32_TSX_CTRL.
+
+Fixes: 95c5824f75f3 ("x86/cpu: Add a "tsx=" cmdline option with TSX disabled by default")
+Signed-off-by: Pawan Gupta <pawan.kumar.gupta@linux.intel.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Tested-by: Neelima Krishnan <neelima.krishnan@intel.com>
+Reviewed-by: Dave Hansen <dave.hansen@linux.intel.com>
+Reviewed-by: Josh Poimboeuf <jpoimboe@redhat.com>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/2529b99546294c893dfa1c89e2b3e46da3369a59.1578685425.git.pawan.kumar.gupta@linux.intel.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kernel/cpu/tsx.c | 13 +++++++------
+ 1 file changed, 7 insertions(+), 6 deletions(-)
+
+--- a/arch/x86/kernel/cpu/tsx.c
++++ b/arch/x86/kernel/cpu/tsx.c
+@@ -115,11 +115,12 @@ void __init tsx_init(void)
+ tsx_disable();
+
+ /*
+- * tsx_disable() will change the state of the
+- * RTM CPUID bit. Clear it here since it is now
+- * expected to be not set.
++ * tsx_disable() will change the state of the RTM and HLE CPUID
++ * bits. Clear them here since they are now expected to be not
++ * set.
+ */
+ setup_clear_cpu_cap(X86_FEATURE_RTM);
++ setup_clear_cpu_cap(X86_FEATURE_HLE);
+ } else if (tsx_ctrl_state == TSX_CTRL_ENABLE) {
+
+ /*
+@@ -131,10 +132,10 @@ void __init tsx_init(void)
+ tsx_enable();
+
+ /*
+- * tsx_enable() will change the state of the
+- * RTM CPUID bit. Force it here since it is now
+- * expected to be set.
++ * tsx_enable() will change the state of the RTM and HLE CPUID
++ * bits. Force them here since they are now expected to be set.
+ */
+ setup_force_cpu_cap(X86_FEATURE_RTM);
++ setup_force_cpu_cap(X86_FEATURE_HLE);
+ }
+ }