]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
5.1-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 23 Jul 2019 12:12:36 +0000 (14:12 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 23 Jul 2019 12:12:36 +0000 (14:12 +0200)
added patches:
9p-virtio-add-cleanup-path-in-p9_virtio_init.patch
9p-xen-add-cleanup-path-in-p9_trans_xen_init.patch
arm-dts-gemini-set-dir-685-spi-cs-as-active-low.patch
block-allow-mapping-of-vmalloc-ed-buffers.patch
block-fix-potential-overflow-in-blk_report_zones.patch
btrfs-add-missing-inode-version-ctime-and-mtime-updates-when-punching-hole.patch
btrfs-fix-data-loss-after-inode-eviction-renaming-it-and-fsync-it.patch
btrfs-fix-fsync-not-persisting-dentry-deletions-due-to-inode-evictions.patch
coda-pass-the-host-file-in-vma-vm_file-on-mmap.patch
dm-zoned-fix-zone-state-management-race.patch
drm-edid-parse-cea-blocks-embedded-in-displayid.patch
drm-nouveau-i2c-enable-i2c-pads-busses-during-preinit.patch
hid-wacom-correct-touch-resolution-x-y-typo.patch
hid-wacom-generic-correct-pad-syncing.patch
hid-wacom-generic-only-switch-the-mode-on-devices-with-leds.patch
ib-mlx5-report-correctly-tag-matching-rendezvous-capability.patch
include-asm-generic-bug.h-fix-cut-here-for-warn_on-for-__warn_taint-architectures.patch
intel_th-pci-add-ice-lake-nnpi-support.patch
libnvdimm-pfn-fix-fsdax-mode-namespace-info-block-zero-fields.patch
mm-nvdimm-add-is_ioremap_addr-and-use-that-to-check-ioremap-address.patch
padata-use-smp_mb-in-padata_reorder-to-avoid-orphaned-padata-jobs.patch
pci-do-not-poll-for-pme-if-the-device-is-in-d3cold.patch
pci-hv-fix-a-use-after-free-bug-in-hv_eject_device_work.patch
pci-qcom-ensure-that-perst-is-asserted-for-at-least-100-ms.patch
perf-x86-amd-uncore-do-not-set-threadmask-and-slicemask-for-non-l3-pmcs.patch
perf-x86-amd-uncore-set-the-thread-mask-for-f17h-l3-pmcs.patch
perf-x86-intel-fix-spurious-nmi-on-fixed-counter.patch
rdma-srp-accept-again-source-addresses-that-do-not-have-a-port-number.patch
resource-fix-locking-in-find_next_iomem_res.patch
rt2x00usb-fix-rx-queue-hang.patch
x86-boot-fix-memory-leak-in-default_get_smp_config.patch
xen-events-fix-binding-user-event-channels-to-cpus.patch

33 files changed:
queue-5.1/9p-virtio-add-cleanup-path-in-p9_virtio_init.patch [new file with mode: 0644]
queue-5.1/9p-xen-add-cleanup-path-in-p9_trans_xen_init.patch [new file with mode: 0644]
queue-5.1/arm-dts-gemini-set-dir-685-spi-cs-as-active-low.patch [new file with mode: 0644]
queue-5.1/block-allow-mapping-of-vmalloc-ed-buffers.patch [new file with mode: 0644]
queue-5.1/block-fix-potential-overflow-in-blk_report_zones.patch [new file with mode: 0644]
queue-5.1/btrfs-add-missing-inode-version-ctime-and-mtime-updates-when-punching-hole.patch [new file with mode: 0644]
queue-5.1/btrfs-fix-data-loss-after-inode-eviction-renaming-it-and-fsync-it.patch [new file with mode: 0644]
queue-5.1/btrfs-fix-fsync-not-persisting-dentry-deletions-due-to-inode-evictions.patch [new file with mode: 0644]
queue-5.1/coda-pass-the-host-file-in-vma-vm_file-on-mmap.patch [new file with mode: 0644]
queue-5.1/dm-zoned-fix-zone-state-management-race.patch [new file with mode: 0644]
queue-5.1/drm-edid-parse-cea-blocks-embedded-in-displayid.patch [new file with mode: 0644]
queue-5.1/drm-nouveau-i2c-enable-i2c-pads-busses-during-preinit.patch [new file with mode: 0644]
queue-5.1/hid-wacom-correct-touch-resolution-x-y-typo.patch [new file with mode: 0644]
queue-5.1/hid-wacom-generic-correct-pad-syncing.patch [new file with mode: 0644]
queue-5.1/hid-wacom-generic-only-switch-the-mode-on-devices-with-leds.patch [new file with mode: 0644]
queue-5.1/ib-mlx5-report-correctly-tag-matching-rendezvous-capability.patch [new file with mode: 0644]
queue-5.1/include-asm-generic-bug.h-fix-cut-here-for-warn_on-for-__warn_taint-architectures.patch [new file with mode: 0644]
queue-5.1/intel_th-pci-add-ice-lake-nnpi-support.patch [new file with mode: 0644]
queue-5.1/libnvdimm-pfn-fix-fsdax-mode-namespace-info-block-zero-fields.patch [new file with mode: 0644]
queue-5.1/mm-nvdimm-add-is_ioremap_addr-and-use-that-to-check-ioremap-address.patch [new file with mode: 0644]
queue-5.1/padata-use-smp_mb-in-padata_reorder-to-avoid-orphaned-padata-jobs.patch [new file with mode: 0644]
queue-5.1/pci-do-not-poll-for-pme-if-the-device-is-in-d3cold.patch [new file with mode: 0644]
queue-5.1/pci-hv-fix-a-use-after-free-bug-in-hv_eject_device_work.patch [new file with mode: 0644]
queue-5.1/pci-qcom-ensure-that-perst-is-asserted-for-at-least-100-ms.patch [new file with mode: 0644]
queue-5.1/perf-x86-amd-uncore-do-not-set-threadmask-and-slicemask-for-non-l3-pmcs.patch [new file with mode: 0644]
queue-5.1/perf-x86-amd-uncore-set-the-thread-mask-for-f17h-l3-pmcs.patch [new file with mode: 0644]
queue-5.1/perf-x86-intel-fix-spurious-nmi-on-fixed-counter.patch [new file with mode: 0644]
queue-5.1/rdma-srp-accept-again-source-addresses-that-do-not-have-a-port-number.patch [new file with mode: 0644]
queue-5.1/resource-fix-locking-in-find_next_iomem_res.patch [new file with mode: 0644]
queue-5.1/rt2x00usb-fix-rx-queue-hang.patch [new file with mode: 0644]
queue-5.1/series
queue-5.1/x86-boot-fix-memory-leak-in-default_get_smp_config.patch [new file with mode: 0644]
queue-5.1/xen-events-fix-binding-user-event-channels-to-cpus.patch [new file with mode: 0644]

diff --git a/queue-5.1/9p-virtio-add-cleanup-path-in-p9_virtio_init.patch b/queue-5.1/9p-virtio-add-cleanup-path-in-p9_virtio_init.patch
new file mode 100644 (file)
index 0000000..f716062
--- /dev/null
@@ -0,0 +1,89 @@
+From d4548543fc4ece56c6f04b8586f435fb4fd84c20 Mon Sep 17 00:00:00 2001
+From: YueHaibing <yuehaibing@huawei.com>
+Date: Tue, 30 Apr 2019 19:59:42 +0800
+Subject: 9p/virtio: Add cleanup path in p9_virtio_init
+
+From: YueHaibing <yuehaibing@huawei.com>
+
+commit d4548543fc4ece56c6f04b8586f435fb4fd84c20 upstream.
+
+KASAN report this:
+
+BUG: unable to handle kernel paging request at ffffffffa0097000
+PGD 3870067 P4D 3870067 PUD 3871063 PMD 2326e2067 PTE 0
+Oops: 0000 [#1
+CPU: 0 PID: 5340 Comm: modprobe Not tainted 5.1.0-rc7+ #25
+Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.9.3-0-ge2fc41e-prebuilt.qemu-project.org 04/01/2014
+RIP: 0010:__list_add_valid+0x10/0x70
+Code: c3 48 8b 06 55 48 89 e5 5d 48 39 07 0f 94 c0 0f b6 c0 c3 90 90 90 90 90 90 90 55 48 89 d0 48 8b 52 08 48 89 e5 48 39 f2 75 19 <48> 8b 32 48 39 f0 75 3a
+
+RSP: 0018:ffffc90000e23c68 EFLAGS: 00010246
+RAX: ffffffffa00ad000 RBX: ffffffffa009d000 RCX: 0000000000000000
+RDX: ffffffffa0097000 RSI: ffffffffa0097000 RDI: ffffffffa009d000
+RBP: ffffc90000e23c68 R08: 0000000000000001 R09: 0000000000000000
+R10: 0000000000000000 R11: 0000000000000000 R12: ffffffffa0097000
+R13: ffff888231797180 R14: 0000000000000000 R15: ffffc90000e23e78
+FS:  00007fb215285540(0000) GS:ffff888237a00000(0000) knlGS:0000000000000000
+CS:  0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+CR2: ffffffffa0097000 CR3: 000000022f144000 CR4: 00000000000006f0
+Call Trace:
+ v9fs_register_trans+0x2f/0x60 [9pnet
+ ? 0xffffffffa0087000
+ p9_virtio_init+0x25/0x1000 [9pnet_virtio
+ do_one_initcall+0x6c/0x3cc
+ ? kmem_cache_alloc_trace+0x248/0x3b0
+ do_init_module+0x5b/0x1f1
+ load_module+0x1db1/0x2690
+ ? m_show+0x1d0/0x1d0
+ __do_sys_finit_module+0xc5/0xd0
+ __x64_sys_finit_module+0x15/0x20
+ do_syscall_64+0x6b/0x1d0
+ entry_SYSCALL_64_after_hwframe+0x49/0xbe
+RIP: 0033:0x7fb214d8e839
+Code: 00 f3 c3 66 2e 0f 1f 84 00 00 00 00 00 0f 1f 40 00 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 73 01
+
+RSP: 002b:00007ffc96554278 EFLAGS: 00000246 ORIG_RAX: 0000000000000139
+RAX: ffffffffffffffda RBX: 000055e67eed2aa0 RCX: 00007fb214d8e839
+RDX: 0000000000000000 RSI: 000055e67ce95c2e RDI: 0000000000000003
+RBP: 000055e67ce95c2e R08: 0000000000000000 R09: 000055e67eed2aa0
+R10: 0000000000000003 R11: 0000000000000246 R12: 0000000000000000
+R13: 000055e67eeda500 R14: 0000000000040000 R15: 000055e67eed2aa0
+Modules linked in: 9pnet_virtio(+) 9pnet gre rfkill vmw_vsock_virtio_transport_common vsock [last unloaded: 9pnet_virtio
+CR2: ffffffffa0097000
+---[ end trace 4a52bb13ff07b761
+
+If register_virtio_driver() fails in p9_virtio_init,
+we should call v9fs_unregister_trans() to do cleanup.
+
+Link: http://lkml.kernel.org/r/20190430115942.41840-1-yuehaibing@huawei.com
+Cc: stable@vger.kernel.org
+Reported-by: Hulk Robot <hulkci@huawei.com>
+Fixes: b530cc794024 ("9p: add virtio transport")
+Signed-off-by: YueHaibing <yuehaibing@huawei.com>
+Signed-off-by: Dominique Martinet <dominique.martinet@cea.fr>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/9p/trans_virtio.c |    8 +++++++-
+ 1 file changed, 7 insertions(+), 1 deletion(-)
+
+--- a/net/9p/trans_virtio.c
++++ b/net/9p/trans_virtio.c
+@@ -782,10 +782,16 @@ static struct p9_trans_module p9_virtio_
+ /* The standard init function */
+ static int __init p9_virtio_init(void)
+ {
++      int rc;
++
+       INIT_LIST_HEAD(&virtio_chan_list);
+       v9fs_register_trans(&p9_virtio_trans);
+-      return register_virtio_driver(&p9_virtio_drv);
++      rc = register_virtio_driver(&p9_virtio_drv);
++      if (rc)
++              v9fs_unregister_trans(&p9_virtio_trans);
++
++      return rc;
+ }
+ static void __exit p9_virtio_cleanup(void)
diff --git a/queue-5.1/9p-xen-add-cleanup-path-in-p9_trans_xen_init.patch b/queue-5.1/9p-xen-add-cleanup-path-in-p9_trans_xen_init.patch
new file mode 100644 (file)
index 0000000..7c79412
--- /dev/null
@@ -0,0 +1,46 @@
+From 80a316ff16276b36d0392a8f8b2f63259857ae98 Mon Sep 17 00:00:00 2001
+From: YueHaibing <yuehaibing@huawei.com>
+Date: Tue, 30 Apr 2019 22:39:33 +0800
+Subject: 9p/xen: Add cleanup path in p9_trans_xen_init
+
+From: YueHaibing <yuehaibing@huawei.com>
+
+commit 80a316ff16276b36d0392a8f8b2f63259857ae98 upstream.
+
+If xenbus_register_frontend() fails in p9_trans_xen_init,
+we should call v9fs_unregister_trans() to do cleanup.
+
+Link: http://lkml.kernel.org/r/20190430143933.19368-1-yuehaibing@huawei.com
+Cc: stable@vger.kernel.org
+Fixes: 868eb122739a ("xen/9pfs: introduce Xen 9pfs transport driver")
+Signed-off-by: YueHaibing <yuehaibing@huawei.com>
+Signed-off-by: Dominique Martinet <dominique.martinet@cea.fr>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/9p/trans_xen.c |    8 +++++++-
+ 1 file changed, 7 insertions(+), 1 deletion(-)
+
+--- a/net/9p/trans_xen.c
++++ b/net/9p/trans_xen.c
+@@ -530,13 +530,19 @@ static struct xenbus_driver xen_9pfs_fro
+ static int p9_trans_xen_init(void)
+ {
++      int rc;
++
+       if (!xen_domain())
+               return -ENODEV;
+       pr_info("Initialising Xen transport for 9pfs\n");
+       v9fs_register_trans(&p9_xen_trans);
+-      return xenbus_register_frontend(&xen_9pfs_front_driver);
++      rc = xenbus_register_frontend(&xen_9pfs_front_driver);
++      if (rc)
++              v9fs_unregister_trans(&p9_xen_trans);
++
++      return rc;
+ }
+ module_init(p9_trans_xen_init);
diff --git a/queue-5.1/arm-dts-gemini-set-dir-685-spi-cs-as-active-low.patch b/queue-5.1/arm-dts-gemini-set-dir-685-spi-cs-as-active-low.patch
new file mode 100644 (file)
index 0000000..f435dd5
--- /dev/null
@@ -0,0 +1,34 @@
+From f90b8fda3a9d72a9422ea80ae95843697f94ea4a Mon Sep 17 00:00:00 2001
+From: Linus Walleij <linus.walleij@linaro.org>
+Date: Mon, 15 Jul 2019 22:21:01 +0200
+Subject: ARM: dts: gemini: Set DIR-685 SPI CS as active low
+
+From: Linus Walleij <linus.walleij@linaro.org>
+
+commit f90b8fda3a9d72a9422ea80ae95843697f94ea4a upstream.
+
+The SPI to the display on the DIR-685 is active low, we were
+just saved by the SPI library enforcing active low on everything
+before, so set it as active low to avoid ambiguity.
+
+Link: https://lore.kernel.org/r/20190715202101.16060-1-linus.walleij@linaro.org
+Cc: stable@vger.kernel.org
+Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
+Signed-off-by: Olof Johansson <olof@lixom.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/boot/dts/gemini-dlink-dir-685.dts |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/arm/boot/dts/gemini-dlink-dir-685.dts
++++ b/arch/arm/boot/dts/gemini-dlink-dir-685.dts
+@@ -64,7 +64,7 @@
+               gpio-sck = <&gpio1 5 GPIO_ACTIVE_HIGH>;
+               gpio-miso = <&gpio1 8 GPIO_ACTIVE_HIGH>;
+               gpio-mosi = <&gpio1 7 GPIO_ACTIVE_HIGH>;
+-              cs-gpios = <&gpio0 20 GPIO_ACTIVE_HIGH>;
++              cs-gpios = <&gpio0 20 GPIO_ACTIVE_LOW>;
+               num-chipselects = <1>;
+               panel: display@0 {
diff --git a/queue-5.1/block-allow-mapping-of-vmalloc-ed-buffers.patch b/queue-5.1/block-allow-mapping-of-vmalloc-ed-buffers.patch
new file mode 100644 (file)
index 0000000..7a6edf3
--- /dev/null
@@ -0,0 +1,106 @@
+From b4c5875d36178e8df409bdce232f270cac89fafe Mon Sep 17 00:00:00 2001
+From: Damien Le Moal <damien.lemoal@wdc.com>
+Date: Mon, 1 Jul 2019 14:09:15 +0900
+Subject: block: Allow mapping of vmalloc-ed buffers
+
+From: Damien Le Moal <damien.lemoal@wdc.com>
+
+commit b4c5875d36178e8df409bdce232f270cac89fafe upstream.
+
+To allow the SCSI subsystem scsi_execute_req() function to issue
+requests using large buffers that are better allocated with vmalloc()
+rather than kmalloc(), modify bio_map_kern() to allow passing a buffer
+allocated with vmalloc().
+
+To do so, detect vmalloc-ed buffers using is_vmalloc_addr(). For
+vmalloc-ed buffers, flush the buffer using flush_kernel_vmap_range(),
+use vmalloc_to_page() instead of virt_to_page() to obtain the pages of
+the buffer, and invalidate the buffer addresses with
+invalidate_kernel_vmap_range() on completion of read BIOs. This last
+point is executed using the function bio_invalidate_vmalloc_pages()
+which is defined only if the architecture defines
+ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE, that is, if the architecture
+actually needs the invalidation done.
+
+Fixes: 515ce6061312 ("scsi: sd_zbc: Fix sd_zbc_report_zones() buffer allocation")
+Fixes: e76239a3748c ("block: add a report_zones method")
+Cc: stable@vger.kernel.org
+Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Damien Le Moal <damien.lemoal@wdc.com>
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+Reviewed-by: Chaitanya Kulkarni <chaitanya.kulkarni@wdc.com>
+Reviewed-by: Ming Lei <ming.lei@redhat.com>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ block/bio.c |   28 +++++++++++++++++++++++++++-
+ 1 file changed, 27 insertions(+), 1 deletion(-)
+
+--- a/block/bio.c
++++ b/block/bio.c
+@@ -29,6 +29,7 @@
+ #include <linux/workqueue.h>
+ #include <linux/cgroup.h>
+ #include <linux/blk-cgroup.h>
++#include <linux/highmem.h>
+ #include <trace/events/block.h>
+ #include "blk.h"
+@@ -1475,8 +1476,22 @@ void bio_unmap_user(struct bio *bio)
+       bio_put(bio);
+ }
++static void bio_invalidate_vmalloc_pages(struct bio *bio)
++{
++#ifdef ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
++      if (bio->bi_private && !op_is_write(bio_op(bio))) {
++              unsigned long i, len = 0;
++
++              for (i = 0; i < bio->bi_vcnt; i++)
++                      len += bio->bi_io_vec[i].bv_len;
++              invalidate_kernel_vmap_range(bio->bi_private, len);
++      }
++#endif
++}
++
+ static void bio_map_kern_endio(struct bio *bio)
+ {
++      bio_invalidate_vmalloc_pages(bio);
+       bio_put(bio);
+ }
+@@ -1497,6 +1512,8 @@ struct bio *bio_map_kern(struct request_
+       unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
+       unsigned long start = kaddr >> PAGE_SHIFT;
+       const int nr_pages = end - start;
++      bool is_vmalloc = is_vmalloc_addr(data);
++      struct page *page;
+       int offset, i;
+       struct bio *bio;
+@@ -1504,6 +1521,11 @@ struct bio *bio_map_kern(struct request_
+       if (!bio)
+               return ERR_PTR(-ENOMEM);
++      if (is_vmalloc) {
++              flush_kernel_vmap_range(data, len);
++              bio->bi_private = data;
++      }
++
+       offset = offset_in_page(kaddr);
+       for (i = 0; i < nr_pages; i++) {
+               unsigned int bytes = PAGE_SIZE - offset;
+@@ -1514,7 +1536,11 @@ struct bio *bio_map_kern(struct request_
+               if (bytes > len)
+                       bytes = len;
+-              if (bio_add_pc_page(q, bio, virt_to_page(data), bytes,
++              if (!is_vmalloc)
++                      page = virt_to_page(data);
++              else
++                      page = vmalloc_to_page(data);
++              if (bio_add_pc_page(q, bio, page, bytes,
+                                   offset) < bytes) {
+                       /* we don't support partial mappings */
+                       bio_put(bio);
diff --git a/queue-5.1/block-fix-potential-overflow-in-blk_report_zones.patch b/queue-5.1/block-fix-potential-overflow-in-blk_report_zones.patch
new file mode 100644 (file)
index 0000000..4a430e8
--- /dev/null
@@ -0,0 +1,66 @@
+From 113ab72ed4794c193509a97d7c6d32a6886e1682 Mon Sep 17 00:00:00 2001
+From: Damien Le Moal <damien.lemoal@wdc.com>
+Date: Wed, 10 Jul 2019 13:53:10 +0900
+Subject: block: Fix potential overflow in blk_report_zones()
+
+From: Damien Le Moal <damien.lemoal@wdc.com>
+
+commit 113ab72ed4794c193509a97d7c6d32a6886e1682 upstream.
+
+For large values of the number of zones reported and/or large zone
+sizes, the sector increment calculated with
+
+blk_queue_zone_sectors(q) * n
+
+in blk_report_zones() loop can overflow the unsigned int type used for
+the calculation as both "n" and blk_queue_zone_sectors() value are
+unsigned int. E.g. for a device with 256 MB zones (524288 sectors),
+overflow happens with 8192 or more zones reported.
+
+Changing the return type of blk_queue_zone_sectors() to sector_t, fixes
+this problem and avoids overflow problem for all other callers of this
+helper too. The same change is also applied to the bdev_zone_sectors()
+helper.
+
+Fixes: e76239a3748c ("block: add a report_zones method")
+Cc: stable@vger.kernel.org
+Signed-off-by: Damien Le Moal <damien.lemoal@wdc.com>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ block/blk-zoned.c      |    2 +-
+ include/linux/blkdev.h |    4 ++--
+ 2 files changed, 3 insertions(+), 3 deletions(-)
+
+--- a/block/blk-zoned.c
++++ b/block/blk-zoned.c
+@@ -69,7 +69,7 @@ EXPORT_SYMBOL_GPL(__blk_req_zone_write_u
+ static inline unsigned int __blkdev_nr_zones(struct request_queue *q,
+                                            sector_t nr_sectors)
+ {
+-      unsigned long zone_sectors = blk_queue_zone_sectors(q);
++      sector_t zone_sectors = blk_queue_zone_sectors(q);
+       return (nr_sectors + zone_sectors - 1) >> ilog2(zone_sectors);
+ }
+--- a/include/linux/blkdev.h
++++ b/include/linux/blkdev.h
+@@ -662,7 +662,7 @@ static inline bool blk_queue_is_zoned(st
+       }
+ }
+-static inline unsigned int blk_queue_zone_sectors(struct request_queue *q)
++static inline sector_t blk_queue_zone_sectors(struct request_queue *q)
+ {
+       return blk_queue_is_zoned(q) ? q->limits.chunk_sectors : 0;
+ }
+@@ -1400,7 +1400,7 @@ static inline bool bdev_is_zoned(struct
+       return false;
+ }
+-static inline unsigned int bdev_zone_sectors(struct block_device *bdev)
++static inline sector_t bdev_zone_sectors(struct block_device *bdev)
+ {
+       struct request_queue *q = bdev_get_queue(bdev);
diff --git a/queue-5.1/btrfs-add-missing-inode-version-ctime-and-mtime-updates-when-punching-hole.patch b/queue-5.1/btrfs-add-missing-inode-version-ctime-and-mtime-updates-when-punching-hole.patch
new file mode 100644 (file)
index 0000000..715d99e
--- /dev/null
@@ -0,0 +1,42 @@
+From 179006688a7e888cbff39577189f2e034786d06a Mon Sep 17 00:00:00 2001
+From: Filipe Manana <fdmanana@suse.com>
+Date: Wed, 19 Jun 2019 13:05:50 +0100
+Subject: Btrfs: add missing inode version, ctime and mtime updates when punching hole
+
+From: Filipe Manana <fdmanana@suse.com>
+
+commit 179006688a7e888cbff39577189f2e034786d06a upstream.
+
+If the range for which we are punching a hole covers only part of a page,
+we end up updating the inode item but we skip the update of the inode's
+iversion, mtime and ctime. Fix that by ensuring we update those properties
+of the inode.
+
+A patch for fstests test case generic/059 that tests this as been sent
+along with this fix.
+
+Fixes: 2aaa66558172b0 ("Btrfs: add hole punching")
+Fixes: e8c1c76e804b18 ("Btrfs: add missing inode update when punching hole")
+CC: stable@vger.kernel.org # 4.4+
+Signed-off-by: Filipe Manana <fdmanana@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/btrfs/file.c |    5 +++++
+ 1 file changed, 5 insertions(+)
+
+--- a/fs/btrfs/file.c
++++ b/fs/btrfs/file.c
+@@ -2713,6 +2713,11 @@ out_only_mutex:
+                * for detecting, at fsync time, if the inode isn't yet in the
+                * log tree or it's there but not up to date.
+                */
++              struct timespec64 now = current_time(inode);
++
++              inode_inc_iversion(inode);
++              inode->i_mtime = now;
++              inode->i_ctime = now;
+               trans = btrfs_start_transaction(root, 1);
+               if (IS_ERR(trans)) {
+                       err = PTR_ERR(trans);
diff --git a/queue-5.1/btrfs-fix-data-loss-after-inode-eviction-renaming-it-and-fsync-it.patch b/queue-5.1/btrfs-fix-data-loss-after-inode-eviction-renaming-it-and-fsync-it.patch
new file mode 100644 (file)
index 0000000..b81df3b
--- /dev/null
@@ -0,0 +1,110 @@
+From d1d832a0b51dd9570429bb4b81b2a6c1759e681a Mon Sep 17 00:00:00 2001
+From: Filipe Manana <fdmanana@suse.com>
+Date: Fri, 7 Jun 2019 11:25:24 +0100
+Subject: Btrfs: fix data loss after inode eviction, renaming it, and fsync it
+
+From: Filipe Manana <fdmanana@suse.com>
+
+commit d1d832a0b51dd9570429bb4b81b2a6c1759e681a upstream.
+
+When we log an inode, regardless of logging it completely or only that it
+exists, we always update it as logged (logged_trans and last_log_commit
+fields of the inode are updated). This is generally fine and avoids future
+attempts to log it from having to do repeated work that brings no value.
+
+However, if we write data to a file, then evict its inode after all the
+dealloc was flushed (and ordered extents completed), rename the file and
+fsync it, we end up not logging the new extents, since the rename may
+result in logging that the inode exists in case the parent directory was
+logged before. The following reproducer shows and explains how this can
+happen:
+
+  $ mkfs.btrfs -f /dev/sdb
+  $ mount /dev/sdb /mnt
+
+  $ mkdir /mnt/dir
+  $ touch /mnt/dir/foo
+  $ touch /mnt/dir/bar
+
+  # Do a direct IO write instead of a buffered write because with a
+  # buffered write we would need to make sure dealloc gets flushed and
+  # complete before we do the inode eviction later, and we can not do that
+  # from user space with call to things such as sync(2) since that results
+  # in a transaction commit as well.
+  $ xfs_io -d -c "pwrite -S 0xd3 0 4K" /mnt/dir/bar
+
+  # Keep the directory dir in use while we evict inodes. We want our file
+  # bar's inode to be evicted but we don't want our directory's inode to
+  # be evicted (if it were evicted too, we would not be able to reproduce
+  # the issue since the first fsync below, of file foo, would result in a
+  # transaction commit.
+  $ ( cd /mnt/dir; while true; do :; done ) &
+  $ pid=$!
+
+  # Wait a bit to give time for the background process to chdir.
+  $ sleep 0.1
+
+  # Evict all inodes, except the inode for the directory dir because it is
+  # currently in use by our background process.
+  $ echo 2 > /proc/sys/vm/drop_caches
+
+  # fsync file foo, which ends up persisting information about the parent
+  # directory because it is a new inode.
+  $ xfs_io -c fsync /mnt/dir/foo
+
+  # Rename bar, this results in logging that this inode exists (inode item,
+  # names, xattrs) because the parent directory is in the log.
+  $ mv /mnt/dir/bar /mnt/dir/baz
+
+  # Now fsync baz, which ends up doing absolutely nothing because of the
+  # rename operation which logged that the inode exists only.
+  $ xfs_io -c fsync /mnt/dir/baz
+
+  <power failure>
+
+  $ mount /dev/sdb /mnt
+  $ od -t x1 -A d /mnt/dir/baz
+  0000000
+
+    --> Empty file, data we wrote is missing.
+
+Fix this by not updating last_sub_trans of an inode when we are logging
+only that it exists and the inode was not yet logged since it was loaded
+from disk (full_sync bit set), this is enough to make btrfs_inode_in_log()
+return false for this scenario and make us log the inode. The logged_trans
+of the inode is still always setsince that alone is used to track if names
+need to be deleted as part of unlink operations.
+
+Fixes: 257c62e1bce03e ("Btrfs: avoid tree log commit when there are no changes")
+CC: stable@vger.kernel.org # 4.4+
+Signed-off-by: Filipe Manana <fdmanana@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/btrfs/tree-log.c |   12 +++++++++++-
+ 1 file changed, 11 insertions(+), 1 deletion(-)
+
+--- a/fs/btrfs/tree-log.c
++++ b/fs/btrfs/tree-log.c
+@@ -5407,9 +5407,19 @@ log_extents:
+               }
+       }
++      /*
++       * Don't update last_log_commit if we logged that an inode exists after
++       * it was loaded to memory (full_sync bit set).
++       * This is to prevent data loss when we do a write to the inode, then
++       * the inode gets evicted after all delalloc was flushed, then we log
++       * it exists (due to a rename for example) and then fsync it. This last
++       * fsync would do nothing (not logging the extents previously written).
++       */
+       spin_lock(&inode->lock);
+       inode->logged_trans = trans->transid;
+-      inode->last_log_commit = inode->last_sub_trans;
++      if (inode_only != LOG_INODE_EXISTS ||
++          !test_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &inode->runtime_flags))
++              inode->last_log_commit = inode->last_sub_trans;
+       spin_unlock(&inode->lock);
+ out_unlock:
+       mutex_unlock(&inode->log_mutex);
diff --git a/queue-5.1/btrfs-fix-fsync-not-persisting-dentry-deletions-due-to-inode-evictions.patch b/queue-5.1/btrfs-fix-fsync-not-persisting-dentry-deletions-due-to-inode-evictions.patch
new file mode 100644 (file)
index 0000000..6159b37
--- /dev/null
@@ -0,0 +1,133 @@
+From 803f0f64d17769071d7287d9e3e3b79a3e1ae937 Mon Sep 17 00:00:00 2001
+From: Filipe Manana <fdmanana@suse.com>
+Date: Wed, 19 Jun 2019 13:05:39 +0100
+Subject: Btrfs: fix fsync not persisting dentry deletions due to inode evictions
+
+From: Filipe Manana <fdmanana@suse.com>
+
+commit 803f0f64d17769071d7287d9e3e3b79a3e1ae937 upstream.
+
+In order to avoid searches on a log tree when unlinking an inode, we check
+if the inode being unlinked was logged in the current transaction, as well
+as the inode of its parent directory. When any of the inodes are logged,
+we proceed to delete directory items and inode reference items from the
+log, to ensure that if a subsequent fsync of only the inode being unlinked
+or only of the parent directory when the other is not fsync'ed as well,
+does not result in the entry still existing after a power failure.
+
+That check however is not reliable when one of the inodes involved (the
+one being unlinked or its parent directory's inode) is evicted, since the
+logged_trans field is transient, that is, it is not stored on disk, so it
+is lost when the inode is evicted and loaded into memory again (which is
+set to zero on load). As a consequence the checks currently being done by
+btrfs_del_dir_entries_in_log() and btrfs_del_inode_ref_in_log() always
+return true if the inode was evicted before, regardless of the inode
+having been logged or not before (and in the current transaction), this
+results in the dentry being unlinked still existing after a log replay
+if after the unlink operation only one of the inodes involved is fsync'ed.
+
+Example:
+
+  $ mkfs.btrfs -f /dev/sdb
+  $ mount /dev/sdb /mnt
+
+  $ mkdir /mnt/dir
+  $ touch /mnt/dir/foo
+  $ xfs_io -c fsync /mnt/dir/foo
+
+  # Keep an open file descriptor on our directory while we evict inodes.
+  # We just want to evict the file's inode, the directory's inode must not
+  # be evicted.
+  $ ( cd /mnt/dir; while true; do :; done ) &
+  $ pid=$!
+
+  # Wait a bit to give time to background process to chdir to our test
+  # directory.
+  $ sleep 0.5
+
+  # Trigger eviction of the file's inode.
+  $ echo 2 > /proc/sys/vm/drop_caches
+
+  # Unlink our file and fsync the parent directory. After a power failure
+  # we don't expect to see the file anymore, since we fsync'ed the parent
+  # directory.
+  $ rm -f $SCRATCH_MNT/dir/foo
+  $ xfs_io -c fsync /mnt/dir
+
+  <power failure>
+
+  $ mount /dev/sdb /mnt
+  $ ls /mnt/dir
+  foo
+  $
+   --> file still there, unlink not persisted despite explicit fsync on dir
+
+Fix this by checking if the inode has the full_sync bit set in its runtime
+flags as well, since that bit is set everytime an inode is loaded from
+disk, or for other less common cases such as after a shrinking truncate
+or failure to allocate extent maps for holes, and gets cleared after the
+first fsync. Also consider the inode as possibly logged only if it was
+last modified in the current transaction (besides having the full_fsync
+flag set).
+
+Fixes: 3a5f1d458ad161 ("Btrfs: Optimize btree walking while logging inodes")
+CC: stable@vger.kernel.org # 4.4+
+Signed-off-by: Filipe Manana <fdmanana@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/btrfs/tree-log.c |   28 ++++++++++++++++++++++++++--
+ 1 file changed, 26 insertions(+), 2 deletions(-)
+
+--- a/fs/btrfs/tree-log.c
++++ b/fs/btrfs/tree-log.c
+@@ -3309,6 +3309,30 @@ int btrfs_free_log_root_tree(struct btrf
+ }
+ /*
++ * Check if an inode was logged in the current transaction. We can't always rely
++ * on an inode's logged_trans value, because it's an in-memory only field and
++ * therefore not persisted. This means that its value is lost if the inode gets
++ * evicted and loaded again from disk (in which case it has a value of 0, and
++ * certainly it is smaller then any possible transaction ID), when that happens
++ * the full_sync flag is set in the inode's runtime flags, so on that case we
++ * assume eviction happened and ignore the logged_trans value, assuming the
++ * worst case, that the inode was logged before in the current transaction.
++ */
++static bool inode_logged(struct btrfs_trans_handle *trans,
++                       struct btrfs_inode *inode)
++{
++      if (inode->logged_trans == trans->transid)
++              return true;
++
++      if (inode->last_trans == trans->transid &&
++          test_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &inode->runtime_flags) &&
++          !test_bit(BTRFS_FS_LOG_RECOVERING, &trans->fs_info->flags))
++              return true;
++
++      return false;
++}
++
++/*
+  * If both a file and directory are logged, and unlinks or renames are
+  * mixed in, we have a few interesting corners:
+  *
+@@ -3342,7 +3366,7 @@ int btrfs_del_dir_entries_in_log(struct
+       int bytes_del = 0;
+       u64 dir_ino = btrfs_ino(dir);
+-      if (dir->logged_trans < trans->transid)
++      if (!inode_logged(trans, dir))
+               return 0;
+       ret = join_running_log_trans(root);
+@@ -3447,7 +3471,7 @@ int btrfs_del_inode_ref_in_log(struct bt
+       u64 index;
+       int ret;
+-      if (inode->logged_trans < trans->transid)
++      if (!inode_logged(trans, inode))
+               return 0;
+       ret = join_running_log_trans(root);
diff --git a/queue-5.1/coda-pass-the-host-file-in-vma-vm_file-on-mmap.patch b/queue-5.1/coda-pass-the-host-file-in-vma-vm_file-on-mmap.patch
new file mode 100644 (file)
index 0000000..0fb8831
--- /dev/null
@@ -0,0 +1,167 @@
+From 7fa0a1da3dadfd9216df7745a1331fdaa0940d1c Mon Sep 17 00:00:00 2001
+From: Jan Harkes <jaharkes@cs.cmu.edu>
+Date: Tue, 16 Jul 2019 16:28:04 -0700
+Subject: coda: pass the host file in vma->vm_file on mmap
+
+From: Jan Harkes <jaharkes@cs.cmu.edu>
+
+commit 7fa0a1da3dadfd9216df7745a1331fdaa0940d1c upstream.
+
+Patch series "Coda updates".
+
+The following patch series is a collection of various fixes for Coda,
+most of which were collected from linux-fsdevel or linux-kernel but
+which have as yet not found their way upstream.
+
+This patch (of 22):
+
+Various file systems expect that vma->vm_file points at their own file
+handle, several use file_inode(vma->vm_file) to get at their inode or
+use vma->vm_file->private_data.  However the way Coda wrapped mmap on a
+host file broke this assumption, vm_file was still pointing at the Coda
+file and the host file systems would scribble over Coda's inode and
+private file data.
+
+This patch fixes the incorrect expectation and wraps vm_ops->open and
+vm_ops->close to allow Coda to track when the vm_area_struct is
+destroyed so we still release the reference on the Coda file handle at
+the right time.
+
+Link: http://lkml.kernel.org/r/0e850c6e59c0b147dc2dcd51a3af004c948c3697.1558117389.git.jaharkes@cs.cmu.edu
+Signed-off-by: Jan Harkes <jaharkes@cs.cmu.edu>
+Cc: Arnd Bergmann <arnd@arndb.de>
+Cc: Colin Ian King <colin.king@canonical.com>
+Cc: Dan Carpenter <dan.carpenter@oracle.com>
+Cc: David Howells <dhowells@redhat.com>
+Cc: Fabian Frederick <fabf@skynet.be>
+Cc: Mikko Rapeli <mikko.rapeli@iki.fi>
+Cc: Sam Protsenko <semen.protsenko@linaro.org>
+Cc: Yann Droneaud <ydroneaud@opteya.com>
+Cc: Zhouyang Jia <jiazhouyang09@gmail.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/coda/file.c |   70 +++++++++++++++++++++++++++++++++++++++++++++++++++++++--
+ 1 file changed, 68 insertions(+), 2 deletions(-)
+
+--- a/fs/coda/file.c
++++ b/fs/coda/file.c
+@@ -27,6 +27,13 @@
+ #include "coda_linux.h"
+ #include "coda_int.h"
++struct coda_vm_ops {
++      atomic_t refcnt;
++      struct file *coda_file;
++      const struct vm_operations_struct *host_vm_ops;
++      struct vm_operations_struct vm_ops;
++};
++
+ static ssize_t
+ coda_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
+ {
+@@ -61,6 +68,34 @@ coda_file_write_iter(struct kiocb *iocb,
+       return ret;
+ }
++static void
++coda_vm_open(struct vm_area_struct *vma)
++{
++      struct coda_vm_ops *cvm_ops =
++              container_of(vma->vm_ops, struct coda_vm_ops, vm_ops);
++
++      atomic_inc(&cvm_ops->refcnt);
++
++      if (cvm_ops->host_vm_ops && cvm_ops->host_vm_ops->open)
++              cvm_ops->host_vm_ops->open(vma);
++}
++
++static void
++coda_vm_close(struct vm_area_struct *vma)
++{
++      struct coda_vm_ops *cvm_ops =
++              container_of(vma->vm_ops, struct coda_vm_ops, vm_ops);
++
++      if (cvm_ops->host_vm_ops && cvm_ops->host_vm_ops->close)
++              cvm_ops->host_vm_ops->close(vma);
++
++      if (atomic_dec_and_test(&cvm_ops->refcnt)) {
++              vma->vm_ops = cvm_ops->host_vm_ops;
++              fput(cvm_ops->coda_file);
++              kfree(cvm_ops);
++      }
++}
++
+ static int
+ coda_file_mmap(struct file *coda_file, struct vm_area_struct *vma)
+ {
+@@ -68,6 +103,8 @@ coda_file_mmap(struct file *coda_file, s
+       struct coda_inode_info *cii;
+       struct file *host_file;
+       struct inode *coda_inode, *host_inode;
++      struct coda_vm_ops *cvm_ops;
++      int ret;
+       cfi = CODA_FTOC(coda_file);
+       BUG_ON(!cfi || cfi->cfi_magic != CODA_MAGIC);
+@@ -76,6 +113,13 @@ coda_file_mmap(struct file *coda_file, s
+       if (!host_file->f_op->mmap)
+               return -ENODEV;
++      if (WARN_ON(coda_file != vma->vm_file))
++              return -EIO;
++
++      cvm_ops = kmalloc(sizeof(struct coda_vm_ops), GFP_KERNEL);
++      if (!cvm_ops)
++              return -ENOMEM;
++
+       coda_inode = file_inode(coda_file);
+       host_inode = file_inode(host_file);
+@@ -89,6 +133,7 @@ coda_file_mmap(struct file *coda_file, s
+        * the container file on us! */
+       else if (coda_inode->i_mapping != host_inode->i_mapping) {
+               spin_unlock(&cii->c_lock);
++              kfree(cvm_ops);
+               return -EBUSY;
+       }
+@@ -97,7 +142,29 @@ coda_file_mmap(struct file *coda_file, s
+       cfi->cfi_mapcount++;
+       spin_unlock(&cii->c_lock);
+-      return call_mmap(host_file, vma);
++      vma->vm_file = get_file(host_file);
++      ret = call_mmap(vma->vm_file, vma);
++
++      if (ret) {
++              /* if call_mmap fails, our caller will put coda_file so we
++               * should drop the reference to the host_file that we got.
++               */
++              fput(host_file);
++              kfree(cvm_ops);
++      } else {
++              /* here we add redirects for the open/close vm_operations */
++              cvm_ops->host_vm_ops = vma->vm_ops;
++              if (vma->vm_ops)
++                      cvm_ops->vm_ops = *vma->vm_ops;
++
++              cvm_ops->vm_ops.open = coda_vm_open;
++              cvm_ops->vm_ops.close = coda_vm_close;
++              cvm_ops->coda_file = coda_file;
++              atomic_set(&cvm_ops->refcnt, 1);
++
++              vma->vm_ops = &cvm_ops->vm_ops;
++      }
++      return ret;
+ }
+ int coda_open(struct inode *coda_inode, struct file *coda_file)
+@@ -207,4 +274,3 @@ const struct file_operations coda_file_o
+       .fsync          = coda_fsync,
+       .splice_read    = generic_file_splice_read,
+ };
+-
diff --git a/queue-5.1/dm-zoned-fix-zone-state-management-race.patch b/queue-5.1/dm-zoned-fix-zone-state-management-race.patch
new file mode 100644 (file)
index 0000000..afd3499
--- /dev/null
@@ -0,0 +1,130 @@
+From 3b8cafdd5436f9298b3bf6eb831df5eef5ee82b6 Mon Sep 17 00:00:00 2001
+From: Damien Le Moal <damien.lemoal@wdc.com>
+Date: Tue, 16 Jul 2019 14:39:34 +0900
+Subject: dm zoned: fix zone state management race
+
+From: Damien Le Moal <damien.lemoal@wdc.com>
+
+commit 3b8cafdd5436f9298b3bf6eb831df5eef5ee82b6 upstream.
+
+dm-zoned uses the zone flag DMZ_ACTIVE to indicate that a zone of the
+backend device is being actively read or written and so cannot be
+reclaimed. This flag is set as long as the zone atomic reference
+counter is not 0. When this atomic is decremented and reaches 0 (e.g.
+on BIO completion), the active flag is cleared and set again whenever
+the zone is reused and BIO issued with the atomic counter incremented.
+These 2 operations (atomic inc/dec and flag set/clear) are however not
+always executed atomically under the target metadata mutex lock and
+this causes the warning:
+
+WARN_ON(!test_bit(DMZ_ACTIVE, &zone->flags));
+
+in dmz_deactivate_zone() to be displayed. This problem is regularly
+triggered with xfstests generic/209, generic/300, generic/451 and
+xfs/077 with XFS being used as the file system on the dm-zoned target
+device. Similarly, xfstests ext4/303, ext4/304, generic/209 and
+generic/300 trigger the warning with ext4 use.
+
+This problem can be easily fixed by simply removing the DMZ_ACTIVE flag
+and managing the "ACTIVE" state by directly looking at the reference
+counter value. To do so, the functions dmz_activate_zone() and
+dmz_deactivate_zone() are changed to inline functions respectively
+calling atomic_inc() and atomic_dec(), while the dmz_is_active() macro
+is changed to an inline function calling atomic_read().
+
+Fixes: 3b1a94c88b79 ("dm zoned: drive-managed zoned block device target")
+Cc: stable@vger.kernel.org
+Reported-by: Masato Suzuki <masato.suzuki@wdc.com>
+Signed-off-by: Damien Le Moal <damien.lemoal@wdc.com>
+Signed-off-by: Mike Snitzer <snitzer@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/dm-zoned-metadata.c |   24 ------------------------
+ drivers/md/dm-zoned.h          |   28 ++++++++++++++++++++++++----
+ 2 files changed, 24 insertions(+), 28 deletions(-)
+
+--- a/drivers/md/dm-zoned-metadata.c
++++ b/drivers/md/dm-zoned-metadata.c
+@@ -1594,30 +1594,6 @@ struct dm_zone *dmz_get_zone_for_reclaim
+ }
+ /*
+- * Activate a zone (increment its reference count).
+- */
+-void dmz_activate_zone(struct dm_zone *zone)
+-{
+-      set_bit(DMZ_ACTIVE, &zone->flags);
+-      atomic_inc(&zone->refcount);
+-}
+-
+-/*
+- * Deactivate a zone. This decrement the zone reference counter
+- * and clears the active state of the zone once the count reaches 0,
+- * indicating that all BIOs to the zone have completed. Returns
+- * true if the zone was deactivated.
+- */
+-void dmz_deactivate_zone(struct dm_zone *zone)
+-{
+-      if (atomic_dec_and_test(&zone->refcount)) {
+-              WARN_ON(!test_bit(DMZ_ACTIVE, &zone->flags));
+-              clear_bit_unlock(DMZ_ACTIVE, &zone->flags);
+-              smp_mb__after_atomic();
+-      }
+-}
+-
+-/*
+  * Get the zone mapping a chunk, if the chunk is mapped already.
+  * If no mapping exist and the operation is WRITE, a zone is
+  * allocated and used to map the chunk.
+--- a/drivers/md/dm-zoned.h
++++ b/drivers/md/dm-zoned.h
+@@ -115,7 +115,6 @@ enum {
+       DMZ_BUF,
+       /* Zone internal state */
+-      DMZ_ACTIVE,
+       DMZ_RECLAIM,
+       DMZ_SEQ_WRITE_ERR,
+ };
+@@ -128,7 +127,6 @@ enum {
+ #define dmz_is_empty(z)               ((z)->wp_block == 0)
+ #define dmz_is_offline(z)     test_bit(DMZ_OFFLINE, &(z)->flags)
+ #define dmz_is_readonly(z)    test_bit(DMZ_READ_ONLY, &(z)->flags)
+-#define dmz_is_active(z)      test_bit(DMZ_ACTIVE, &(z)->flags)
+ #define dmz_in_reclaim(z)     test_bit(DMZ_RECLAIM, &(z)->flags)
+ #define dmz_seq_write_err(z)  test_bit(DMZ_SEQ_WRITE_ERR, &(z)->flags)
+@@ -188,8 +186,30 @@ void dmz_unmap_zone(struct dmz_metadata
+ unsigned int dmz_nr_rnd_zones(struct dmz_metadata *zmd);
+ unsigned int dmz_nr_unmap_rnd_zones(struct dmz_metadata *zmd);
+-void dmz_activate_zone(struct dm_zone *zone);
+-void dmz_deactivate_zone(struct dm_zone *zone);
++/*
++ * Activate a zone (increment its reference count).
++ */
++static inline void dmz_activate_zone(struct dm_zone *zone)
++{
++      atomic_inc(&zone->refcount);
++}
++
++/*
++ * Deactivate a zone. This decrement the zone reference counter
++ * indicating that all BIOs to the zone have completed when the count is 0.
++ */
++static inline void dmz_deactivate_zone(struct dm_zone *zone)
++{
++      atomic_dec(&zone->refcount);
++}
++
++/*
++ * Test if a zone is active, that is, has a refcount > 0.
++ */
++static inline bool dmz_is_active(struct dm_zone *zone)
++{
++      return atomic_read(&zone->refcount);
++}
+ int dmz_lock_zone_reclaim(struct dm_zone *zone);
+ void dmz_unlock_zone_reclaim(struct dm_zone *zone);
diff --git a/queue-5.1/drm-edid-parse-cea-blocks-embedded-in-displayid.patch b/queue-5.1/drm-edid-parse-cea-blocks-embedded-in-displayid.patch
new file mode 100644 (file)
index 0000000..fa13f3b
--- /dev/null
@@ -0,0 +1,173 @@
+From e28ad544f462231d3fd081a7316339359efbb481 Mon Sep 17 00:00:00 2001
+From: Andres Rodriguez <andresx7@gmail.com>
+Date: Wed, 19 Jun 2019 14:09:01 -0400
+Subject: drm/edid: parse CEA blocks embedded in DisplayID
+
+From: Andres Rodriguez <andresx7@gmail.com>
+
+commit e28ad544f462231d3fd081a7316339359efbb481 upstream.
+
+DisplayID blocks allow embedding of CEA blocks. The payloads are
+identical to traditional top level CEA extension blocks, but the header
+is slightly different.
+
+This change allows the CEA parser to find a CEA block inside a DisplayID
+block. Additionally, it adds support for parsing the embedded CTA
+header. No further changes are necessary due to payload parity.
+
+This change fixes audio support for the Valve Index HMD.
+
+Signed-off-by: Andres Rodriguez <andresx7@gmail.com>
+Reviewed-by: Dave Airlie <airlied@redhat.com>
+Cc: Jani Nikula <jani.nikula@linux.intel.com>
+Cc: <stable@vger.kernel.org> # v4.15
+Signed-off-by: Dave Airlie <airlied@redhat.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20190619180901.17901-1-andresx7@gmail.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/drm_edid.c  |   81 ++++++++++++++++++++++++++++++++++++++------
+ include/drm/drm_displayid.h |   10 +++++
+ 2 files changed, 80 insertions(+), 11 deletions(-)
+
+--- a/drivers/gpu/drm/drm_edid.c
++++ b/drivers/gpu/drm/drm_edid.c
+@@ -1349,6 +1349,7 @@ MODULE_PARM_DESC(edid_fixup,
+ static void drm_get_displayid(struct drm_connector *connector,
+                             struct edid *edid);
++static int validate_displayid(u8 *displayid, int length, int idx);
+ static int drm_edid_block_checksum(const u8 *raw_edid)
+ {
+@@ -2932,16 +2933,46 @@ static u8 *drm_find_edid_extension(const
+       return edid_ext;
+ }
+-static u8 *drm_find_cea_extension(const struct edid *edid)
+-{
+-      return drm_find_edid_extension(edid, CEA_EXT);
+-}
+ static u8 *drm_find_displayid_extension(const struct edid *edid)
+ {
+       return drm_find_edid_extension(edid, DISPLAYID_EXT);
+ }
++static u8 *drm_find_cea_extension(const struct edid *edid)
++{
++      int ret;
++      int idx = 1;
++      int length = EDID_LENGTH;
++      struct displayid_block *block;
++      u8 *cea;
++      u8 *displayid;
++
++      /* Look for a top level CEA extension block */
++      cea = drm_find_edid_extension(edid, CEA_EXT);
++      if (cea)
++              return cea;
++
++      /* CEA blocks can also be found embedded in a DisplayID block */
++      displayid = drm_find_displayid_extension(edid);
++      if (!displayid)
++              return NULL;
++
++      ret = validate_displayid(displayid, length, idx);
++      if (ret)
++              return NULL;
++
++      idx += sizeof(struct displayid_hdr);
++      for_each_displayid_db(displayid, block, idx, length) {
++              if (block->tag == DATA_BLOCK_CTA) {
++                      cea = (u8 *)block;
++                      break;
++              }
++      }
++
++      return cea;
++}
++
+ /*
+  * Calculate the alternate clock for the CEA mode
+  * (60Hz vs. 59.94Hz etc.)
+@@ -3665,13 +3696,38 @@ cea_revision(const u8 *cea)
+ static int
+ cea_db_offsets(const u8 *cea, int *start, int *end)
+ {
+-      /* Data block offset in CEA extension block */
+-      *start = 4;
+-      *end = cea[2];
+-      if (*end == 0)
+-              *end = 127;
+-      if (*end < 4 || *end > 127)
+-              return -ERANGE;
++      /* DisplayID CTA extension blocks and top-level CEA EDID
++       * block header definitions differ in the following bytes:
++       *   1) Byte 2 of the header specifies length differently,
++       *   2) Byte 3 is only present in the CEA top level block.
++       *
++       * The different definitions for byte 2 follow.
++       *
++       * DisplayID CTA extension block defines byte 2 as:
++       *   Number of payload bytes
++       *
++       * CEA EDID block defines byte 2 as:
++       *   Byte number (decimal) within this block where the 18-byte
++       *   DTDs begin. If no non-DTD data is present in this extension
++       *   block, the value should be set to 04h (the byte after next).
++       *   If set to 00h, there are no DTDs present in this block and
++       *   no non-DTD data.
++       */
++      if (cea[0] == DATA_BLOCK_CTA) {
++              *start = 3;
++              *end = *start + cea[2];
++      } else if (cea[0] == CEA_EXT) {
++              /* Data block offset in CEA extension block */
++              *start = 4;
++              *end = cea[2];
++              if (*end == 0)
++                      *end = 127;
++              if (*end < 4 || *end > 127)
++                      return -ERANGE;
++      } else {
++              return -ENOTSUPP;
++      }
++
+       return 0;
+ }
+@@ -5219,6 +5275,9 @@ static int drm_parse_display_id(struct d
+               case DATA_BLOCK_TYPE_1_DETAILED_TIMING:
+                       /* handled in mode gathering code. */
+                       break;
++              case DATA_BLOCK_CTA:
++                      /* handled in the cea parser code. */
++                      break;
+               default:
+                       DRM_DEBUG_KMS("found DisplayID tag 0x%x, unhandled\n", block->tag);
+                       break;
+--- a/include/drm/drm_displayid.h
++++ b/include/drm/drm_displayid.h
+@@ -40,6 +40,7 @@
+ #define DATA_BLOCK_DISPLAY_INTERFACE 0x0f
+ #define DATA_BLOCK_STEREO_DISPLAY_INTERFACE 0x10
+ #define DATA_BLOCK_TILED_DISPLAY 0x12
++#define DATA_BLOCK_CTA 0x81
+ #define DATA_BLOCK_VENDOR_SPECIFIC 0x7f
+@@ -90,4 +91,13 @@ struct displayid_detailed_timing_block {
+       struct displayid_block base;
+       struct displayid_detailed_timings_1 timings[0];
+ };
++
++#define for_each_displayid_db(displayid, block, idx, length) \
++      for ((block) = (struct displayid_block *)&(displayid)[idx]; \
++           (idx) + sizeof(struct displayid_block) <= (length) && \
++           (idx) + sizeof(struct displayid_block) + (block)->num_bytes <= (length) && \
++           (block)->num_bytes > 0; \
++           (idx) += (block)->num_bytes + sizeof(struct displayid_block), \
++           (block) = (struct displayid_block *)&(displayid)[idx])
++
+ #endif
diff --git a/queue-5.1/drm-nouveau-i2c-enable-i2c-pads-busses-during-preinit.patch b/queue-5.1/drm-nouveau-i2c-enable-i2c-pads-busses-during-preinit.patch
new file mode 100644 (file)
index 0000000..4539b61
--- /dev/null
@@ -0,0 +1,78 @@
+From 7cb95eeea6706c790571042a06782e378b2561ea Mon Sep 17 00:00:00 2001
+From: Lyude Paul <lyude@redhat.com>
+Date: Wed, 26 Jun 2019 14:10:27 -0400
+Subject: drm/nouveau/i2c: Enable i2c pads & busses during preinit
+
+From: Lyude Paul <lyude@redhat.com>
+
+commit 7cb95eeea6706c790571042a06782e378b2561ea upstream.
+
+It turns out that while disabling i2c bus access from software when the
+GPU is suspended was a step in the right direction with:
+
+commit 342406e4fbba ("drm/nouveau/i2c: Disable i2c bus access after
+->fini()")
+
+We also ended up accidentally breaking the vbios init scripts on some
+older Tesla GPUs, as apparently said scripts can actually use the i2c
+bus. Since these scripts are executed before initializing any
+subdevices, we end up failing to acquire access to the i2c bus which has
+left a number of cards with their fan controllers uninitialized. Luckily
+this doesn't break hardware - it just means the fan gets stuck at 100%.
+
+This also means that we've always been using our i2c busses before
+initializing them during the init scripts for older GPUs, we just didn't
+notice it until we started preventing them from being used until init.
+It's pretty impressive this never caused us any issues before!
+
+So, fix this by initializing our i2c pad and busses during subdev
+pre-init. We skip initializing aux busses during pre-init, as those are
+guaranteed to only ever be used by nouveau for DP aux transactions.
+
+Signed-off-by: Lyude Paul <lyude@redhat.com>
+Tested-by: Marc Meledandri <m.meledandri@gmail.com>
+Fixes: 342406e4fbba ("drm/nouveau/i2c: Disable i2c bus access after ->fini()")
+Cc: stable@vger.kernel.org
+Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c |   20 ++++++++++++++++++++
+ 1 file changed, 20 insertions(+)
+
+--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c
++++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c
+@@ -185,6 +185,25 @@ nvkm_i2c_fini(struct nvkm_subdev *subdev
+ }
+ static int
++nvkm_i2c_preinit(struct nvkm_subdev *subdev)
++{
++      struct nvkm_i2c *i2c = nvkm_i2c(subdev);
++      struct nvkm_i2c_bus *bus;
++      struct nvkm_i2c_pad *pad;
++
++      /*
++       * We init our i2c busses as early as possible, since they may be
++       * needed by the vbios init scripts on some cards
++       */
++      list_for_each_entry(pad, &i2c->pad, head)
++              nvkm_i2c_pad_init(pad);
++      list_for_each_entry(bus, &i2c->bus, head)
++              nvkm_i2c_bus_init(bus);
++
++      return 0;
++}
++
++static int
+ nvkm_i2c_init(struct nvkm_subdev *subdev)
+ {
+       struct nvkm_i2c *i2c = nvkm_i2c(subdev);
+@@ -238,6 +257,7 @@ nvkm_i2c_dtor(struct nvkm_subdev *subdev
+ static const struct nvkm_subdev_func
+ nvkm_i2c = {
+       .dtor = nvkm_i2c_dtor,
++      .preinit = nvkm_i2c_preinit,
+       .init = nvkm_i2c_init,
+       .fini = nvkm_i2c_fini,
+       .intr = nvkm_i2c_intr,
diff --git a/queue-5.1/hid-wacom-correct-touch-resolution-x-y-typo.patch b/queue-5.1/hid-wacom-correct-touch-resolution-x-y-typo.patch
new file mode 100644 (file)
index 0000000..eb34c19
--- /dev/null
@@ -0,0 +1,34 @@
+From 68c20cc2164cc5c7c73f8012ae6491afdb1f7f72 Mon Sep 17 00:00:00 2001
+From: Aaron Armstrong Skomra <skomra@gmail.com>
+Date: Fri, 10 May 2019 15:34:18 -0700
+Subject: HID: wacom: correct touch resolution x/y typo
+
+From: Aaron Armstrong Skomra <skomra@gmail.com>
+
+commit 68c20cc2164cc5c7c73f8012ae6491afdb1f7f72 upstream.
+
+This affects the 2nd-gen Intuos Pro Medium and Large
+when using their Bluetooth connection.
+
+Fixes: 4922cd26f03c ("HID: wacom: Support 2nd-gen Intuos Pro's Bluetooth classic interface")
+Cc: <stable@vger.kernel.org> # v4.11+
+Signed-off-by: Aaron Armstrong Skomra <aaron.skomra@wacom.com>
+Reviewed-by: Jason Gerecke <jason.gerecke@wacom.com>
+Signed-off-by: Jiri Kosina <jkosina@suse.cz>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/hid/wacom_wac.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/hid/wacom_wac.c
++++ b/drivers/hid/wacom_wac.c
+@@ -3716,7 +3716,7 @@ int wacom_setup_touch_input_capabilities
+                                            0, 5920, 4, 0);
+               }
+               input_abs_set_res(input_dev, ABS_MT_POSITION_X, 40);
+-              input_abs_set_res(input_dev, ABS_MT_POSITION_X, 40);
++              input_abs_set_res(input_dev, ABS_MT_POSITION_Y, 40);
+               /* fall through */
diff --git a/queue-5.1/hid-wacom-generic-correct-pad-syncing.patch b/queue-5.1/hid-wacom-generic-correct-pad-syncing.patch
new file mode 100644 (file)
index 0000000..8fe4086
--- /dev/null
@@ -0,0 +1,79 @@
+From d4b8efeb46d99a5d02e7f88ac4eaccbe49370770 Mon Sep 17 00:00:00 2001
+From: Aaron Armstrong Skomra <skomra@gmail.com>
+Date: Fri, 10 May 2019 15:34:17 -0700
+Subject: HID: wacom: generic: Correct pad syncing
+
+From: Aaron Armstrong Skomra <skomra@gmail.com>
+
+commit d4b8efeb46d99a5d02e7f88ac4eaccbe49370770 upstream.
+
+Only sync the pad once per report, not once per collection.
+Also avoid syncing the pad on battery reports.
+
+Fixes: f8b6a74719b5 ("HID: wacom: generic: Support multiple tools per report")
+Cc: <stable@vger.kernel.org> # v4.17+
+Signed-off-by: Aaron Armstrong Skomra <aaron.skomra@wacom.com>
+Signed-off-by: Jiri Kosina <jkosina@suse.cz>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/hid/wacom_wac.c |   15 ++++++++-------
+ 1 file changed, 8 insertions(+), 7 deletions(-)
+
+--- a/drivers/hid/wacom_wac.c
++++ b/drivers/hid/wacom_wac.c
+@@ -2121,14 +2121,12 @@ static void wacom_wac_pad_report(struct
+       bool active = wacom_wac->hid_data.inrange_state != 0;
+       /* report prox for expresskey events */
+-      if ((wacom_equivalent_usage(field->physical) == HID_DG_TABLETFUNCTIONKEY) &&
+-          wacom_wac->hid_data.pad_input_event_flag) {
++      if (wacom_wac->hid_data.pad_input_event_flag) {
+               input_event(input, EV_ABS, ABS_MISC, active ? PAD_DEVICE_ID : 0);
+               input_sync(input);
+               if (!active)
+                       wacom_wac->hid_data.pad_input_event_flag = false;
+       }
+-
+ }
+ static void wacom_wac_pen_usage_mapping(struct hid_device *hdev,
+@@ -2704,9 +2702,7 @@ static int wacom_wac_collection(struct h
+       if (report->type != HID_INPUT_REPORT)
+               return -1;
+-      if (WACOM_PAD_FIELD(field) && wacom->wacom_wac.pad_input)
+-              wacom_wac_pad_report(hdev, report, field);
+-      else if (WACOM_PEN_FIELD(field) && wacom->wacom_wac.pen_input)
++      if (WACOM_PEN_FIELD(field) && wacom->wacom_wac.pen_input)
+               wacom_wac_pen_report(hdev, report);
+       else if (WACOM_FINGER_FIELD(field) && wacom->wacom_wac.touch_input)
+               wacom_wac_finger_report(hdev, report);
+@@ -2720,7 +2716,7 @@ void wacom_wac_report(struct hid_device
+       struct wacom_wac *wacom_wac = &wacom->wacom_wac;
+       struct hid_field *field;
+       bool pad_in_hid_field = false, pen_in_hid_field = false,
+-              finger_in_hid_field = false;
++              finger_in_hid_field = false, true_pad = false;
+       int r;
+       int prev_collection = -1;
+@@ -2736,6 +2732,8 @@ void wacom_wac_report(struct hid_device
+                       pen_in_hid_field = true;
+               if (WACOM_FINGER_FIELD(field))
+                       finger_in_hid_field = true;
++              if (wacom_equivalent_usage(field->physical) == HID_DG_TABLETFUNCTIONKEY)
++                      true_pad = true;
+       }
+       wacom_wac_battery_pre_report(hdev, report);
+@@ -2759,6 +2757,9 @@ void wacom_wac_report(struct hid_device
+       }
+       wacom_wac_battery_report(hdev, report);
++
++      if (true_pad && wacom->wacom_wac.pad_input)
++              wacom_wac_pad_report(hdev, report, field);
+ }
+ static int wacom_bpt_touch(struct wacom_wac *wacom)
diff --git a/queue-5.1/hid-wacom-generic-only-switch-the-mode-on-devices-with-leds.patch b/queue-5.1/hid-wacom-generic-only-switch-the-mode-on-devices-with-leds.patch
new file mode 100644 (file)
index 0000000..b39249d
--- /dev/null
@@ -0,0 +1,67 @@
+From d8e9806005f28bbb49899dab2068e3359e22ba35 Mon Sep 17 00:00:00 2001
+From: Aaron Armstrong Skomra <skomra@gmail.com>
+Date: Fri, 10 May 2019 15:31:16 -0700
+Subject: HID: wacom: generic: only switch the mode on devices with LEDs
+
+From: Aaron Armstrong Skomra <skomra@gmail.com>
+
+commit d8e9806005f28bbb49899dab2068e3359e22ba35 upstream.
+
+Currently, the driver will attempt to set the mode on all
+devices with a center button, but some devices with a center
+button lack LEDs, and attempting to set the LEDs on devices
+without LEDs results in the kernel error message of the form:
+
+"leds input8::wacom-0.1: Setting an LED's brightness failed (-32)"
+
+This is because the generic codepath erroneously assumes that the
+BUTTON_CENTER usage indicates that the device has LEDs, the
+previously ignored TOUCH_RING_SETTING usage is a more accurate
+indication of the existence of LEDs on the device.
+
+Fixes: 10c55cacb8b2 ("HID: wacom: generic: support LEDs")
+Cc: <stable@vger.kernel.org> # v4.11+
+Signed-off-by: Aaron Armstrong Skomra <aaron.skomra@wacom.com>
+Reviewed-by: Jason Gerecke <jason.gerecke@wacom.com>
+Signed-off-by: Jiri Kosina <jkosina@suse.cz>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/hid/wacom_sys.c |    3 +++
+ drivers/hid/wacom_wac.c |    2 --
+ drivers/hid/wacom_wac.h |    1 +
+ 3 files changed, 4 insertions(+), 2 deletions(-)
+
+--- a/drivers/hid/wacom_sys.c
++++ b/drivers/hid/wacom_sys.c
+@@ -307,6 +307,9 @@ static void wacom_feature_mapping(struct
+       wacom_hid_usage_quirk(hdev, field, usage);
+       switch (equivalent_usage) {
++      case WACOM_HID_WD_TOUCH_RING_SETTING:
++              wacom->generic_has_leds = true;
++              break;
+       case HID_DG_CONTACTMAX:
+               /* leave touch_max as is if predefined */
+               if (!features->touch_max) {
+--- a/drivers/hid/wacom_wac.c
++++ b/drivers/hid/wacom_wac.c
+@@ -1930,8 +1930,6 @@ static void wacom_wac_pad_usage_mapping(
+               features->device_type |= WACOM_DEVICETYPE_PAD;
+               break;
+       case WACOM_HID_WD_BUTTONCENTER:
+-              wacom->generic_has_leds = true;
+-              /* fall through */
+       case WACOM_HID_WD_BUTTONHOME:
+       case WACOM_HID_WD_BUTTONUP:
+       case WACOM_HID_WD_BUTTONDOWN:
+--- a/drivers/hid/wacom_wac.h
++++ b/drivers/hid/wacom_wac.h
+@@ -145,6 +145,7 @@
+ #define WACOM_HID_WD_OFFSETBOTTOM       (WACOM_HID_UP_WACOMDIGITIZER | 0x0d33)
+ #define WACOM_HID_WD_DATAMODE           (WACOM_HID_UP_WACOMDIGITIZER | 0x1002)
+ #define WACOM_HID_WD_DIGITIZERINFO      (WACOM_HID_UP_WACOMDIGITIZER | 0x1013)
++#define WACOM_HID_WD_TOUCH_RING_SETTING (WACOM_HID_UP_WACOMDIGITIZER | 0x1032)
+ #define WACOM_HID_UP_G9                 0xff090000
+ #define WACOM_HID_G9_PEN                (WACOM_HID_UP_G9 | 0x02)
+ #define WACOM_HID_G9_TOUCHSCREEN        (WACOM_HID_UP_G9 | 0x11)
diff --git a/queue-5.1/ib-mlx5-report-correctly-tag-matching-rendezvous-capability.patch b/queue-5.1/ib-mlx5-report-correctly-tag-matching-rendezvous-capability.patch
new file mode 100644 (file)
index 0000000..da936f1
--- /dev/null
@@ -0,0 +1,68 @@
+From 89705e92700170888236555fe91b45e4c1bb0985 Mon Sep 17 00:00:00 2001
+From: Danit Goldberg <danitg@mellanox.com>
+Date: Fri, 5 Jul 2019 19:21:57 +0300
+Subject: IB/mlx5: Report correctly tag matching rendezvous capability
+
+From: Danit Goldberg <danitg@mellanox.com>
+
+commit 89705e92700170888236555fe91b45e4c1bb0985 upstream.
+
+Userspace expects the IB_TM_CAP_RC bit to indicate that the device
+supports RC transport tag matching with rendezvous offload. However the
+firmware splits this into two capabilities for eager and rendezvous tag
+matching.
+
+Only if the FW supports both modes should userspace be told the tag
+matching capability is available.
+
+Cc: <stable@vger.kernel.org> # 4.13
+Fixes: eb761894351d ("IB/mlx5: Fill XRQ capabilities")
+Signed-off-by: Danit Goldberg <danitg@mellanox.com>
+Reviewed-by: Yishai Hadas <yishaih@mellanox.com>
+Reviewed-by: Artemy Kovalyov <artemyko@mellanox.com>
+Signed-off-by: Leon Romanovsky <leonro@mellanox.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/infiniband/hw/mlx5/main.c |    8 ++++++--
+ include/rdma/ib_verbs.h           |    4 ++--
+ 2 files changed, 8 insertions(+), 4 deletions(-)
+
+--- a/drivers/infiniband/hw/mlx5/main.c
++++ b/drivers/infiniband/hw/mlx5/main.c
+@@ -1041,15 +1041,19 @@ static int mlx5_ib_query_device(struct i
+       }
+       if (MLX5_CAP_GEN(mdev, tag_matching)) {
+-              props->tm_caps.max_rndv_hdr_size = MLX5_TM_MAX_RNDV_MSG_SIZE;
+               props->tm_caps.max_num_tags =
+                       (1 << MLX5_CAP_GEN(mdev, log_tag_matching_list_sz)) - 1;
+-              props->tm_caps.flags = IB_TM_CAP_RC;
+               props->tm_caps.max_ops =
+                       1 << MLX5_CAP_GEN(mdev, log_max_qp_sz);
+               props->tm_caps.max_sge = MLX5_TM_MAX_SGE;
+       }
++      if (MLX5_CAP_GEN(mdev, tag_matching) &&
++          MLX5_CAP_GEN(mdev, rndv_offload_rc)) {
++              props->tm_caps.flags = IB_TM_CAP_RNDV_RC;
++              props->tm_caps.max_rndv_hdr_size = MLX5_TM_MAX_RNDV_MSG_SIZE;
++      }
++
+       if (MLX5_CAP_GEN(dev->mdev, cq_moderation)) {
+               props->cq_caps.max_cq_moderation_count =
+                                               MLX5_MAX_CQ_COUNT;
+--- a/include/rdma/ib_verbs.h
++++ b/include/rdma/ib_verbs.h
+@@ -293,8 +293,8 @@ struct ib_rss_caps {
+ };
+ enum ib_tm_cap_flags {
+-      /*  Support tag matching on RC transport */
+-      IB_TM_CAP_RC                = 1 << 0,
++      /*  Support tag matching with rendezvous offload for RC transport */
++      IB_TM_CAP_RNDV_RC = 1 << 0,
+ };
+ struct ib_tm_caps {
diff --git a/queue-5.1/include-asm-generic-bug.h-fix-cut-here-for-warn_on-for-__warn_taint-architectures.patch b/queue-5.1/include-asm-generic-bug.h-fix-cut-here-for-warn_on-for-__warn_taint-architectures.patch
new file mode 100644 (file)
index 0000000..815dea8
--- /dev/null
@@ -0,0 +1,43 @@
+From 6b15f678fb7d5ef54e089e6ace72f007fe6e9895 Mon Sep 17 00:00:00 2001
+From: Drew Davenport <ddavenport@chromium.org>
+Date: Tue, 16 Jul 2019 16:30:18 -0700
+Subject: include/asm-generic/bug.h: fix "cut here" for WARN_ON for __WARN_TAINT architectures
+
+From: Drew Davenport <ddavenport@chromium.org>
+
+commit 6b15f678fb7d5ef54e089e6ace72f007fe6e9895 upstream.
+
+For architectures using __WARN_TAINT, the WARN_ON macro did not print
+out the "cut here" string.  The other WARN_XXX macros would print "cut
+here" inside __warn_printk, which is not called for WARN_ON since it
+doesn't have a message to print.
+
+Link: http://lkml.kernel.org/r/20190624154831.163888-1-ddavenport@chromium.org
+Fixes: a7bed27af194 ("bug: fix "cut here" location for __WARN_TAINT architectures")
+Signed-off-by: Drew Davenport <ddavenport@chromium.org>
+Acked-by: Kees Cook <keescook@chromium.org>
+Tested-by: Kees Cook <keescook@chromium.org>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ include/asm-generic/bug.h |    6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+--- a/include/asm-generic/bug.h
++++ b/include/asm-generic/bug.h
+@@ -104,8 +104,10 @@ extern void warn_slowpath_null(const cha
+       warn_slowpath_fmt_taint(__FILE__, __LINE__, taint, arg)
+ #else
+ extern __printf(1, 2) void __warn_printk(const char *fmt, ...);
+-#define __WARN()              __WARN_TAINT(TAINT_WARN)
+-#define __WARN_printf(arg...) do { __warn_printk(arg); __WARN(); } while (0)
++#define __WARN() do { \
++      printk(KERN_WARNING CUT_HERE); __WARN_TAINT(TAINT_WARN); \
++} while (0)
++#define __WARN_printf(arg...) __WARN_printf_taint(TAINT_WARN, arg)
+ #define __WARN_printf_taint(taint, arg...)                            \
+       do { __warn_printk(arg); __WARN_TAINT(taint); } while (0)
+ #endif
diff --git a/queue-5.1/intel_th-pci-add-ice-lake-nnpi-support.patch b/queue-5.1/intel_th-pci-add-ice-lake-nnpi-support.patch
new file mode 100644 (file)
index 0000000..d9bc0cf
--- /dev/null
@@ -0,0 +1,35 @@
+From 4aa5aed2b6f267592705a526f57518a5d715b769 Mon Sep 17 00:00:00 2001
+From: Alexander Shishkin <alexander.shishkin@linux.intel.com>
+Date: Fri, 21 Jun 2019 19:19:30 +0300
+Subject: intel_th: pci: Add Ice Lake NNPI support
+
+From: Alexander Shishkin <alexander.shishkin@linux.intel.com>
+
+commit 4aa5aed2b6f267592705a526f57518a5d715b769 upstream.
+
+This adds Ice Lake NNPI support to the Intel(R) Trace Hub.
+
+Signed-off-by: Alexander Shishkin <alexander.shishkin@linux.intel.com>
+Reviewed-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Cc: stable <stable@vger.kernel.org>
+Link: https://lore.kernel.org/r/20190621161930.60785-5-alexander.shishkin@linux.intel.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/hwtracing/intel_th/pci.c |    5 +++++
+ 1 file changed, 5 insertions(+)
+
+--- a/drivers/hwtracing/intel_th/pci.c
++++ b/drivers/hwtracing/intel_th/pci.c
+@@ -170,6 +170,11 @@ static const struct pci_device_id intel_
+               PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x02a6),
+               .driver_data = (kernel_ulong_t)&intel_th_2x,
+       },
++      {
++              /* Ice Lake NNPI */
++              PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x45c5),
++              .driver_data = (kernel_ulong_t)&intel_th_2x,
++      },
+       { 0 },
+ };
diff --git a/queue-5.1/libnvdimm-pfn-fix-fsdax-mode-namespace-info-block-zero-fields.patch b/queue-5.1/libnvdimm-pfn-fix-fsdax-mode-namespace-info-block-zero-fields.patch
new file mode 100644 (file)
index 0000000..9a0ee2f
--- /dev/null
@@ -0,0 +1,146 @@
+From 7e3e888dfc138089f4c15a81b418e88f0978f744 Mon Sep 17 00:00:00 2001
+From: Dan Williams <dan.j.williams@intel.com>
+Date: Thu, 18 Jul 2019 15:58:36 -0700
+Subject: libnvdimm/pfn: fix fsdax-mode namespace info-block zero-fields
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Dan Williams <dan.j.williams@intel.com>
+
+commit 7e3e888dfc138089f4c15a81b418e88f0978f744 upstream.
+
+At namespace creation time there is the potential for the "expected to
+be zero" fields of a 'pfn' info-block to be filled with indeterminate
+data.  While the kernel buffer is zeroed on allocation it is immediately
+overwritten by nd_pfn_validate() filling it with the current contents of
+the on-media info-block location.  For fields like, 'flags' and the
+'padding' it potentially means that future implementations can not rely on
+those fields being zero.
+
+In preparation to stop using the 'start_pad' and 'end_trunc' fields for
+section alignment, arrange for fields that are not explicitly
+initialized to be guaranteed zero.  Bump the minor version to indicate
+it is safe to assume the 'padding' and 'flags' are zero.  Otherwise,
+this corruption is expected to benign since all other critical fields
+are explicitly initialized.
+
+Note The cc: stable is about spreading this new policy to as many
+kernels as possible not fixing an issue in those kernels.  It is not
+until the change titled "libnvdimm/pfn: Stop padding pmem namespaces to
+section alignment" where this improper initialization becomes a problem.
+So if someone decides to backport "libnvdimm/pfn: Stop padding pmem
+namespaces to section alignment" (which is not tagged for stable), make
+sure this pre-requisite is flagged.
+
+Link: http://lkml.kernel.org/r/156092356065.979959.6681003754765958296.stgit@dwillia2-desk3.amr.corp.intel.com
+Fixes: 32ab0a3f5170 ("libnvdimm, pmem: 'struct page' for pmem")
+Signed-off-by: Dan Williams <dan.j.williams@intel.com>
+Tested-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>       [ppc64]
+Cc: <stable@vger.kernel.org>
+Cc: David Hildenbrand <david@redhat.com>
+Cc: Jane Chu <jane.chu@oracle.com>
+Cc: Jeff Moyer <jmoyer@redhat.com>
+Cc: Jérôme Glisse <jglisse@redhat.com>
+Cc: Jonathan Corbet <corbet@lwn.net>
+Cc: Logan Gunthorpe <logang@deltatee.com>
+Cc: Michal Hocko <mhocko@suse.com>
+Cc: Mike Rapoport <rppt@linux.ibm.com>
+Cc: Oscar Salvador <osalvador@suse.de>
+Cc: Pavel Tatashin <pasha.tatashin@soleen.com>
+Cc: Toshi Kani <toshi.kani@hpe.com>
+Cc: Vlastimil Babka <vbabka@suse.cz>
+Cc: Wei Yang <richardw.yang@linux.intel.com>
+Cc: Jason Gunthorpe <jgg@mellanox.com>
+Cc: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/nvdimm/dax_devs.c |    2 +-
+ drivers/nvdimm/pfn.h      |    1 +
+ drivers/nvdimm/pfn_devs.c |   18 +++++++++++++++---
+ 3 files changed, 17 insertions(+), 4 deletions(-)
+
+--- a/drivers/nvdimm/dax_devs.c
++++ b/drivers/nvdimm/dax_devs.c
+@@ -126,7 +126,7 @@ int nd_dax_probe(struct device *dev, str
+       nvdimm_bus_unlock(&ndns->dev);
+       if (!dax_dev)
+               return -ENOMEM;
+-      pfn_sb = devm_kzalloc(dev, sizeof(*pfn_sb), GFP_KERNEL);
++      pfn_sb = devm_kmalloc(dev, sizeof(*pfn_sb), GFP_KERNEL);
+       nd_pfn->pfn_sb = pfn_sb;
+       rc = nd_pfn_validate(nd_pfn, DAX_SIG);
+       dev_dbg(dev, "dax: %s\n", rc == 0 ? dev_name(dax_dev) : "<none>");
+--- a/drivers/nvdimm/pfn.h
++++ b/drivers/nvdimm/pfn.h
+@@ -36,6 +36,7 @@ struct nd_pfn_sb {
+       __le32 end_trunc;
+       /* minor-version-2 record the base alignment of the mapping */
+       __le32 align;
++      /* minor-version-3 guarantee the padding and flags are zero */
+       u8 padding[4000];
+       __le64 checksum;
+ };
+--- a/drivers/nvdimm/pfn_devs.c
++++ b/drivers/nvdimm/pfn_devs.c
+@@ -420,6 +420,15 @@ static int nd_pfn_clear_memmap_errors(st
+       return 0;
+ }
++/**
++ * nd_pfn_validate - read and validate info-block
++ * @nd_pfn: fsdax namespace runtime state / properties
++ * @sig: 'devdax' or 'fsdax' signature
++ *
++ * Upon return the info-block buffer contents (->pfn_sb) are
++ * indeterminate when validation fails, and a coherent info-block
++ * otherwise.
++ */
+ int nd_pfn_validate(struct nd_pfn *nd_pfn, const char *sig)
+ {
+       u64 checksum, offset;
+@@ -565,7 +574,7 @@ int nd_pfn_probe(struct device *dev, str
+       nvdimm_bus_unlock(&ndns->dev);
+       if (!pfn_dev)
+               return -ENOMEM;
+-      pfn_sb = devm_kzalloc(dev, sizeof(*pfn_sb), GFP_KERNEL);
++      pfn_sb = devm_kmalloc(dev, sizeof(*pfn_sb), GFP_KERNEL);
+       nd_pfn = to_nd_pfn(pfn_dev);
+       nd_pfn->pfn_sb = pfn_sb;
+       rc = nd_pfn_validate(nd_pfn, PFN_SIG);
+@@ -702,7 +711,7 @@ static int nd_pfn_init(struct nd_pfn *nd
+       u64 checksum;
+       int rc;
+-      pfn_sb = devm_kzalloc(&nd_pfn->dev, sizeof(*pfn_sb), GFP_KERNEL);
++      pfn_sb = devm_kmalloc(&nd_pfn->dev, sizeof(*pfn_sb), GFP_KERNEL);
+       if (!pfn_sb)
+               return -ENOMEM;
+@@ -711,11 +720,14 @@ static int nd_pfn_init(struct nd_pfn *nd
+               sig = DAX_SIG;
+       else
+               sig = PFN_SIG;
++
+       rc = nd_pfn_validate(nd_pfn, sig);
+       if (rc != -ENODEV)
+               return rc;
+       /* no info block, do init */;
++      memset(pfn_sb, 0, sizeof(*pfn_sb));
++
+       nd_region = to_nd_region(nd_pfn->dev.parent);
+       if (nd_region->ro) {
+               dev_info(&nd_pfn->dev,
+@@ -768,7 +780,7 @@ static int nd_pfn_init(struct nd_pfn *nd
+       memcpy(pfn_sb->uuid, nd_pfn->uuid, 16);
+       memcpy(pfn_sb->parent_uuid, nd_dev_to_uuid(&ndns->dev), 16);
+       pfn_sb->version_major = cpu_to_le16(1);
+-      pfn_sb->version_minor = cpu_to_le16(2);
++      pfn_sb->version_minor = cpu_to_le16(3);
+       pfn_sb->start_pad = cpu_to_le32(start_pad);
+       pfn_sb->end_trunc = cpu_to_le32(end_trunc);
+       pfn_sb->align = cpu_to_le32(nd_pfn->align);
diff --git a/queue-5.1/mm-nvdimm-add-is_ioremap_addr-and-use-that-to-check-ioremap-address.patch b/queue-5.1/mm-nvdimm-add-is_ioremap_addr-and-use-that-to-check-ioremap-address.patch
new file mode 100644 (file)
index 0000000..03d85d9
--- /dev/null
@@ -0,0 +1,79 @@
+From 9bd3bb6703d8c0a5fb8aec8e3287bd55b7341dcd Mon Sep 17 00:00:00 2001
+From: "Aneesh Kumar K.V" <aneesh.kumar@linux.ibm.com>
+Date: Thu, 11 Jul 2019 20:52:08 -0700
+Subject: mm/nvdimm: add is_ioremap_addr and use that to check ioremap address
+
+From: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
+
+commit 9bd3bb6703d8c0a5fb8aec8e3287bd55b7341dcd upstream.
+
+Architectures like powerpc use different address range to map ioremap
+and vmalloc range.  The memunmap() check used by the nvdimm layer was
+wrongly using is_vmalloc_addr() to check for ioremap range which fails
+for ppc64.  This result in ppc64 not freeing the ioremap mapping.  The
+side effect of this is an unbind failure during module unload with
+papr_scm nvdimm driver
+
+Link: http://lkml.kernel.org/r/20190701134038.14165-1-aneesh.kumar@linux.ibm.com
+Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
+Fixes: b5beae5e224f ("powerpc/pseries: Add driver for PAPR SCM regions")
+Cc: Dan Williams <dan.j.williams@intel.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/include/asm/pgtable.h |   14 ++++++++++++++
+ include/linux/mm.h                 |    5 +++++
+ kernel/iomem.c                     |    2 +-
+ 3 files changed, 20 insertions(+), 1 deletion(-)
+
+--- a/arch/powerpc/include/asm/pgtable.h
++++ b/arch/powerpc/include/asm/pgtable.h
+@@ -137,6 +137,20 @@ static inline void pte_frag_set(mm_conte
+ }
+ #endif
++#ifdef CONFIG_PPC64
++#define is_ioremap_addr is_ioremap_addr
++static inline bool is_ioremap_addr(const void *x)
++{
++#ifdef CONFIG_MMU
++      unsigned long addr = (unsigned long)x;
++
++      return addr >= IOREMAP_BASE && addr < IOREMAP_END;
++#else
++      return false;
++#endif
++}
++#endif /* CONFIG_PPC64 */
++
+ #endif /* __ASSEMBLY__ */
+ #endif /* _ASM_POWERPC_PGTABLE_H */
+--- a/include/linux/mm.h
++++ b/include/linux/mm.h
+@@ -590,6 +590,11 @@ static inline bool is_vmalloc_addr(const
+       return false;
+ #endif
+ }
++
++#ifndef is_ioremap_addr
++#define is_ioremap_addr(x) is_vmalloc_addr(x)
++#endif
++
+ #ifdef CONFIG_MMU
+ extern int is_vmalloc_or_module_addr(const void *x);
+ #else
+--- a/kernel/iomem.c
++++ b/kernel/iomem.c
+@@ -121,7 +121,7 @@ EXPORT_SYMBOL(memremap);
+ void memunmap(void *addr)
+ {
+-      if (is_vmalloc_addr(addr))
++      if (is_ioremap_addr(addr))
+               iounmap((void __iomem *) addr);
+ }
+ EXPORT_SYMBOL(memunmap);
diff --git a/queue-5.1/padata-use-smp_mb-in-padata_reorder-to-avoid-orphaned-padata-jobs.patch b/queue-5.1/padata-use-smp_mb-in-padata_reorder-to-avoid-orphaned-padata-jobs.patch
new file mode 100644 (file)
index 0000000..ca51d31
--- /dev/null
@@ -0,0 +1,117 @@
+From cf144f81a99d1a3928f90b0936accfd3f45c9a0a Mon Sep 17 00:00:00 2001
+From: Daniel Jordan <daniel.m.jordan@oracle.com>
+Date: Tue, 16 Jul 2019 12:32:53 -0400
+Subject: padata: use smp_mb in padata_reorder to avoid orphaned padata jobs
+
+From: Daniel Jordan <daniel.m.jordan@oracle.com>
+
+commit cf144f81a99d1a3928f90b0936accfd3f45c9a0a upstream.
+
+Testing padata with the tcrypt module on a 5.2 kernel...
+
+    # modprobe tcrypt alg="pcrypt(rfc4106(gcm(aes)))" type=3
+    # modprobe tcrypt mode=211 sec=1
+
+...produces this splat:
+
+    INFO: task modprobe:10075 blocked for more than 120 seconds.
+          Not tainted 5.2.0-base+ #16
+    modprobe        D    0 10075  10064 0x80004080
+    Call Trace:
+     ? __schedule+0x4dd/0x610
+     ? ring_buffer_unlock_commit+0x23/0x100
+     schedule+0x6c/0x90
+     schedule_timeout+0x3b/0x320
+     ? trace_buffer_unlock_commit_regs+0x4f/0x1f0
+     wait_for_common+0x160/0x1a0
+     ? wake_up_q+0x80/0x80
+     { crypto_wait_req }             # entries in braces added by hand
+     { do_one_aead_op }
+     { test_aead_jiffies }
+     test_aead_speed.constprop.17+0x681/0xf30 [tcrypt]
+     do_test+0x4053/0x6a2b [tcrypt]
+     ? 0xffffffffa00f4000
+     tcrypt_mod_init+0x50/0x1000 [tcrypt]
+     ...
+
+The second modprobe command never finishes because in padata_reorder,
+CPU0's load of reorder_objects is executed before the unlocking store in
+spin_unlock_bh(pd->lock), causing CPU0 to miss CPU1's increment:
+
+CPU0                                 CPU1
+
+padata_reorder                       padata_do_serial
+  LOAD reorder_objects  // 0
+                                       INC reorder_objects  // 1
+                                       padata_reorder
+                                         TRYLOCK pd->lock   // failed
+  UNLOCK pd->lock
+
+CPU0 deletes the timer before returning from padata_reorder and since no
+other job is submitted to padata, modprobe waits indefinitely.
+
+Add a pair of full barriers to guarantee proper ordering:
+
+CPU0                                 CPU1
+
+padata_reorder                       padata_do_serial
+  UNLOCK pd->lock
+  smp_mb()
+  LOAD reorder_objects
+                                       INC reorder_objects
+                                       smp_mb__after_atomic()
+                                       padata_reorder
+                                         TRYLOCK pd->lock
+
+smp_mb__after_atomic is needed so the read part of the trylock operation
+comes after the INC, as Andrea points out.   Thanks also to Andrea for
+help with writing a litmus test.
+
+Fixes: 16295bec6398 ("padata: Generic parallelization/serialization interface")
+Signed-off-by: Daniel Jordan <daniel.m.jordan@oracle.com>
+Cc: <stable@vger.kernel.org>
+Cc: Andrea Parri <andrea.parri@amarulasolutions.com>
+Cc: Boqun Feng <boqun.feng@gmail.com>
+Cc: Herbert Xu <herbert@gondor.apana.org.au>
+Cc: Paul E. McKenney <paulmck@linux.ibm.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Steffen Klassert <steffen.klassert@secunet.com>
+Cc: linux-arch@vger.kernel.org
+Cc: linux-crypto@vger.kernel.org
+Cc: linux-kernel@vger.kernel.org
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/padata.c |   12 ++++++++++++
+ 1 file changed, 12 insertions(+)
+
+--- a/kernel/padata.c
++++ b/kernel/padata.c
+@@ -267,7 +267,12 @@ static void padata_reorder(struct parall
+        * The next object that needs serialization might have arrived to
+        * the reorder queues in the meantime, we will be called again
+        * from the timer function if no one else cares for it.
++       *
++       * Ensure reorder_objects is read after pd->lock is dropped so we see
++       * an increment from another task in padata_do_serial.  Pairs with
++       * smp_mb__after_atomic in padata_do_serial.
+        */
++      smp_mb();
+       if (atomic_read(&pd->reorder_objects)
+                       && !(pinst->flags & PADATA_RESET))
+               mod_timer(&pd->timer, jiffies + HZ);
+@@ -387,6 +392,13 @@ void padata_do_serial(struct padata_priv
+       list_add_tail(&padata->list, &pqueue->reorder.list);
+       spin_unlock(&pqueue->reorder.lock);
++      /*
++       * Ensure the atomic_inc of reorder_objects above is ordered correctly
++       * with the trylock of pd->lock in padata_reorder.  Pairs with smp_mb
++       * in padata_reorder.
++       */
++      smp_mb__after_atomic();
++
+       put_cpu();
+       /* If we're running on the wrong CPU, call padata_reorder() via a
diff --git a/queue-5.1/pci-do-not-poll-for-pme-if-the-device-is-in-d3cold.patch b/queue-5.1/pci-do-not-poll-for-pme-if-the-device-is-in-d3cold.patch
new file mode 100644 (file)
index 0000000..c223467
--- /dev/null
@@ -0,0 +1,56 @@
+From 000dd5316e1c756a1c028f22e01d06a38249dd4d Mon Sep 17 00:00:00 2001
+From: Mika Westerberg <mika.westerberg@linux.intel.com>
+Date: Wed, 12 Jun 2019 13:57:39 +0300
+Subject: PCI: Do not poll for PME if the device is in D3cold
+
+From: Mika Westerberg <mika.westerberg@linux.intel.com>
+
+commit 000dd5316e1c756a1c028f22e01d06a38249dd4d upstream.
+
+PME polling does not take into account that a device that is directly
+connected to the host bridge may go into D3cold as well. This leads to a
+situation where the PME poll thread reads from a config space of a
+device that is in D3cold and gets incorrect information because the
+config space is not accessible.
+
+Here is an example from Intel Ice Lake system where two PCIe root ports
+are in D3cold (I've instrumented the kernel to log the PMCSR register
+contents):
+
+  [   62.971442] pcieport 0000:00:07.1: Check PME status, PMCSR=0xffff
+  [   62.971504] pcieport 0000:00:07.0: Check PME status, PMCSR=0xffff
+
+Since 0xffff is interpreted so that PME is pending, the root ports will
+be runtime resumed. This repeats over and over again essentially
+blocking all runtime power management.
+
+Prevent this from happening by checking whether the device is in D3cold
+before its PME status is read.
+
+Fixes: 71a83bd727cc ("PCI/PM: add runtime PM support to PCIe port")
+Signed-off-by: Mika Westerberg <mika.westerberg@linux.intel.com>
+Reviewed-by: Lukas Wunner <lukas@wunner.de>
+Cc: 3.6+ <stable@vger.kernel.org> # v3.6+
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/pci/pci.c |    7 +++++++
+ 1 file changed, 7 insertions(+)
+
+--- a/drivers/pci/pci.c
++++ b/drivers/pci/pci.c
+@@ -2052,6 +2052,13 @@ static void pci_pme_list_scan(struct wor
+                        */
+                       if (bridge && bridge->current_state != PCI_D0)
+                               continue;
++                      /*
++                       * If the device is in D3cold it should not be
++                       * polled either.
++                       */
++                      if (pme_dev->dev->current_state == PCI_D3cold)
++                              continue;
++
+                       pci_pme_wakeup(pme_dev->dev, NULL);
+               } else {
+                       list_del(&pme_dev->list);
diff --git a/queue-5.1/pci-hv-fix-a-use-after-free-bug-in-hv_eject_device_work.patch b/queue-5.1/pci-hv-fix-a-use-after-free-bug-in-hv_eject_device_work.patch
new file mode 100644 (file)
index 0000000..e07060f
--- /dev/null
@@ -0,0 +1,82 @@
+From 4df591b20b80cb77920953812d894db259d85bd7 Mon Sep 17 00:00:00 2001
+From: Dexuan Cui <decui@microsoft.com>
+Date: Fri, 21 Jun 2019 23:45:23 +0000
+Subject: PCI: hv: Fix a use-after-free bug in hv_eject_device_work()
+
+From: Dexuan Cui <decui@microsoft.com>
+
+commit 4df591b20b80cb77920953812d894db259d85bd7 upstream.
+
+Fix a use-after-free in hv_eject_device_work().
+
+Fixes: 05f151a73ec2 ("PCI: hv: Fix a memory leak in hv_eject_device_work()")
+Signed-off-by: Dexuan Cui <decui@microsoft.com>
+Signed-off-by: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
+Reviewed-by: Michael Kelley <mikelley@microsoft.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/pci/controller/pci-hyperv.c |   15 +++++++++------
+ 1 file changed, 9 insertions(+), 6 deletions(-)
+
+--- a/drivers/pci/controller/pci-hyperv.c
++++ b/drivers/pci/controller/pci-hyperv.c
+@@ -1875,6 +1875,7 @@ static void hv_pci_devices_present(struc
+ static void hv_eject_device_work(struct work_struct *work)
+ {
+       struct pci_eject_response *ejct_pkt;
++      struct hv_pcibus_device *hbus;
+       struct hv_pci_dev *hpdev;
+       struct pci_dev *pdev;
+       unsigned long flags;
+@@ -1885,6 +1886,7 @@ static void hv_eject_device_work(struct
+       } ctxt;
+       hpdev = container_of(work, struct hv_pci_dev, wrk);
++      hbus = hpdev->hbus;
+       WARN_ON(hpdev->state != hv_pcichild_ejecting);
+@@ -1895,8 +1897,7 @@ static void hv_eject_device_work(struct
+        * because hbus->pci_bus may not exist yet.
+        */
+       wslot = wslot_to_devfn(hpdev->desc.win_slot.slot);
+-      pdev = pci_get_domain_bus_and_slot(hpdev->hbus->sysdata.domain, 0,
+-                                         wslot);
++      pdev = pci_get_domain_bus_and_slot(hbus->sysdata.domain, 0, wslot);
+       if (pdev) {
+               pci_lock_rescan_remove();
+               pci_stop_and_remove_bus_device(pdev);
+@@ -1904,9 +1905,9 @@ static void hv_eject_device_work(struct
+               pci_unlock_rescan_remove();
+       }
+-      spin_lock_irqsave(&hpdev->hbus->device_list_lock, flags);
++      spin_lock_irqsave(&hbus->device_list_lock, flags);
+       list_del(&hpdev->list_entry);
+-      spin_unlock_irqrestore(&hpdev->hbus->device_list_lock, flags);
++      spin_unlock_irqrestore(&hbus->device_list_lock, flags);
+       if (hpdev->pci_slot)
+               pci_destroy_slot(hpdev->pci_slot);
+@@ -1915,7 +1916,7 @@ static void hv_eject_device_work(struct
+       ejct_pkt = (struct pci_eject_response *)&ctxt.pkt.message;
+       ejct_pkt->message_type.type = PCI_EJECTION_COMPLETE;
+       ejct_pkt->wslot.slot = hpdev->desc.win_slot.slot;
+-      vmbus_sendpacket(hpdev->hbus->hdev->channel, ejct_pkt,
++      vmbus_sendpacket(hbus->hdev->channel, ejct_pkt,
+                        sizeof(*ejct_pkt), (unsigned long)&ctxt.pkt,
+                        VM_PKT_DATA_INBAND, 0);
+@@ -1924,7 +1925,9 @@ static void hv_eject_device_work(struct
+       /* For the two refs got in new_pcichild_device() */
+       put_pcichild(hpdev);
+       put_pcichild(hpdev);
+-      put_hvpcibus(hpdev->hbus);
++      /* hpdev has been freed. Do not use it any more. */
++
++      put_hvpcibus(hbus);
+ }
+ /**
diff --git a/queue-5.1/pci-qcom-ensure-that-perst-is-asserted-for-at-least-100-ms.patch b/queue-5.1/pci-qcom-ensure-that-perst-is-asserted-for-at-least-100-ms.patch
new file mode 100644 (file)
index 0000000..a8bb9d6
--- /dev/null
@@ -0,0 +1,49 @@
+From 64adde31c8e996a6db6f7a1a4131180e363aa9f2 Mon Sep 17 00:00:00 2001
+From: Niklas Cassel <niklas.cassel@linaro.org>
+Date: Wed, 29 May 2019 11:43:52 +0200
+Subject: PCI: qcom: Ensure that PERST is asserted for at least 100 ms
+
+From: Niklas Cassel <niklas.cassel@linaro.org>
+
+commit 64adde31c8e996a6db6f7a1a4131180e363aa9f2 upstream.
+
+Currently, there is only a 1 ms sleep after asserting PERST.
+
+Reading the datasheets for different endpoints, some require PERST to be
+asserted for 10 ms in order for the endpoint to perform a reset, others
+require it to be asserted for 50 ms.
+
+Several SoCs using this driver uses PCIe Mini Card, where we don't know
+what endpoint will be plugged in.
+
+The PCI Express Card Electromechanical Specification r2.0, section
+2.2, "PERST# Signal" specifies:
+
+"On power up, the deassertion of PERST# is delayed 100 ms (TPVPERL) from
+the power rails achieving specified operating limits."
+
+Add a sleep of 100 ms before deasserting PERST, in order to ensure that
+we are compliant with the spec.
+
+Fixes: 82a823833f4e ("PCI: qcom: Add Qualcomm PCIe controller driver")
+Signed-off-by: Niklas Cassel <niklas.cassel@linaro.org>
+Signed-off-by: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
+Acked-by: Stanimir Varbanov <svarbanov@mm-sol.com>
+Cc: stable@vger.kernel.org # 4.5+
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/pci/controller/dwc/pcie-qcom.c |    2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/drivers/pci/controller/dwc/pcie-qcom.c
++++ b/drivers/pci/controller/dwc/pcie-qcom.c
+@@ -178,6 +178,8 @@ static void qcom_ep_reset_assert(struct
+ static void qcom_ep_reset_deassert(struct qcom_pcie *pcie)
+ {
++      /* Ensure that PERST has been asserted for at least 100 ms */
++      msleep(100);
+       gpiod_set_value_cansleep(pcie->reset, 0);
+       usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500);
+ }
diff --git a/queue-5.1/perf-x86-amd-uncore-do-not-set-threadmask-and-slicemask-for-non-l3-pmcs.patch b/queue-5.1/perf-x86-amd-uncore-do-not-set-threadmask-and-slicemask-for-non-l3-pmcs.patch
new file mode 100644 (file)
index 0000000..d050028
--- /dev/null
@@ -0,0 +1,67 @@
+From 16f4641166b10e199f0d7b68c2c5f004fef0bda3 Mon Sep 17 00:00:00 2001
+From: Kim Phillips <kim.phillips@amd.com>
+Date: Fri, 28 Jun 2019 21:59:20 +0000
+Subject: perf/x86/amd/uncore: Do not set 'ThreadMask' and 'SliceMask' for non-L3 PMCs
+
+From: Kim Phillips <kim.phillips@amd.com>
+
+commit 16f4641166b10e199f0d7b68c2c5f004fef0bda3 upstream.
+
+The following commit:
+
+  d7cbbe49a930 ("perf/x86/amd/uncore: Set ThreadMask and SliceMask for L3 Cache perf events")
+
+enables L3 PMC events for all threads and slices by writing 1's in
+'ChL3PmcCfg' (L3 PMC PERF_CTL) register fields.
+
+Those bitfields overlap with high order event select bits in the Data
+Fabric PMC control register, however.
+
+So when a user requests raw Data Fabric events (-e amd_df/event=0xYYY/),
+the two highest order bits get inadvertently set, changing the counter
+select to events that don't exist, and for which no counts are read.
+
+This patch changes the logic to write the L3 masks only when dealing
+with L3 PMC counters.
+
+AMD Family 16h and below Northbridge (NB) counters were not affected.
+
+Signed-off-by: Kim Phillips <kim.phillips@amd.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Cc: <stable@vger.kernel.org>
+Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
+Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Gary Hook <Gary.Hook@amd.com>
+Cc: H. Peter Anvin <hpa@zytor.com>
+Cc: Janakarajan Natarajan <Janakarajan.Natarajan@amd.com>
+Cc: Jiri Olsa <jolsa@redhat.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Martin Liska <mliska@suse.cz>
+Cc: Namhyung Kim <namhyung@kernel.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Pu Wen <puwen@hygon.cn>
+Cc: Stephane Eranian <eranian@google.com>
+Cc: Suravee Suthikulpanit <Suravee.Suthikulpanit@amd.com>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Vince Weaver <vincent.weaver@maine.edu>
+Fixes: d7cbbe49a930 ("perf/x86/amd/uncore: Set ThreadMask and SliceMask for L3 Cache perf events")
+Link: https://lkml.kernel.org/r/20190628215906.4276-1-kim.phillips@amd.com
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/events/amd/uncore.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/x86/events/amd/uncore.c
++++ b/arch/x86/events/amd/uncore.c
+@@ -209,7 +209,7 @@ static int amd_uncore_event_init(struct
+        * SliceMask and ThreadMask need to be set for certain L3 events in
+        * Family 17h. For other events, the two fields do not affect the count.
+        */
+-      if (l3_mask)
++      if (l3_mask && is_llc_event(event))
+               hwc->config |= (AMD64_L3_SLICE_MASK | AMD64_L3_THREAD_MASK);
+       if (event->cpu < 0)
diff --git a/queue-5.1/perf-x86-amd-uncore-set-the-thread-mask-for-f17h-l3-pmcs.patch b/queue-5.1/perf-x86-amd-uncore-set-the-thread-mask-for-f17h-l3-pmcs.patch
new file mode 100644 (file)
index 0000000..44be6b9
--- /dev/null
@@ -0,0 +1,68 @@
+From 2f217d58a8a086d3399fecce39fb358848e799c4 Mon Sep 17 00:00:00 2001
+From: Kim Phillips <kim.phillips@amd.com>
+Date: Fri, 28 Jun 2019 21:59:33 +0000
+Subject: perf/x86/amd/uncore: Set the thread mask for F17h L3 PMCs
+
+From: Kim Phillips <kim.phillips@amd.com>
+
+commit 2f217d58a8a086d3399fecce39fb358848e799c4 upstream.
+
+Fill in the L3 performance event select register ThreadMask
+bitfield, to enable per hardware thread accounting.
+
+Signed-off-by: Kim Phillips <kim.phillips@amd.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Cc: <stable@vger.kernel.org>
+Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
+Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Gary Hook <Gary.Hook@amd.com>
+Cc: H. Peter Anvin <hpa@zytor.com>
+Cc: Janakarajan Natarajan <Janakarajan.Natarajan@amd.com>
+Cc: Jiri Olsa <jolsa@redhat.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Martin Liska <mliska@suse.cz>
+Cc: Namhyung Kim <namhyung@kernel.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Pu Wen <puwen@hygon.cn>
+Cc: Stephane Eranian <eranian@google.com>
+Cc: Suravee Suthikulpanit <Suravee.Suthikulpanit@amd.com>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Vince Weaver <vincent.weaver@maine.edu>
+Link: https://lkml.kernel.org/r/20190628215906.4276-2-kim.phillips@amd.com
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/events/amd/uncore.c |   15 +++++++++++----
+ 1 file changed, 11 insertions(+), 4 deletions(-)
+
+--- a/arch/x86/events/amd/uncore.c
++++ b/arch/x86/events/amd/uncore.c
+@@ -205,15 +205,22 @@ static int amd_uncore_event_init(struct
+       hwc->config = event->attr.config & AMD64_RAW_EVENT_MASK_NB;
+       hwc->idx = -1;
++      if (event->cpu < 0)
++              return -EINVAL;
++
+       /*
+        * SliceMask and ThreadMask need to be set for certain L3 events in
+        * Family 17h. For other events, the two fields do not affect the count.
+        */
+-      if (l3_mask && is_llc_event(event))
+-              hwc->config |= (AMD64_L3_SLICE_MASK | AMD64_L3_THREAD_MASK);
++      if (l3_mask && is_llc_event(event)) {
++              int thread = 2 * (cpu_data(event->cpu).cpu_core_id % 4);
+-      if (event->cpu < 0)
+-              return -EINVAL;
++              if (smp_num_siblings > 1)
++                      thread += cpu_data(event->cpu).apicid & 1;
++
++              hwc->config |= (1ULL << (AMD64_L3_THREAD_SHIFT + thread) &
++                              AMD64_L3_THREAD_MASK) | AMD64_L3_SLICE_MASK;
++      }
+       uncore = event_to_amd_uncore(event);
+       if (!uncore)
diff --git a/queue-5.1/perf-x86-intel-fix-spurious-nmi-on-fixed-counter.patch b/queue-5.1/perf-x86-intel-fix-spurious-nmi-on-fixed-counter.patch
new file mode 100644 (file)
index 0000000..e4bcb59
--- /dev/null
@@ -0,0 +1,73 @@
+From e4557c1a46b0d32746bd309e1941914b5a6912b4 Mon Sep 17 00:00:00 2001
+From: Kan Liang <kan.liang@linux.intel.com>
+Date: Tue, 25 Jun 2019 07:21:35 -0700
+Subject: perf/x86/intel: Fix spurious NMI on fixed counter
+
+From: Kan Liang <kan.liang@linux.intel.com>
+
+commit e4557c1a46b0d32746bd309e1941914b5a6912b4 upstream.
+
+If a user first sample a PEBS event on a fixed counter, then sample a
+non-PEBS event on the same fixed counter on Icelake, it will trigger
+spurious NMI. For example:
+
+  perf record -e 'cycles:p' -a
+  perf record -e 'cycles' -a
+
+The error message for spurious NMI:
+
+  [June 21 15:38] Uhhuh. NMI received for unknown reason 30 on CPU 2.
+  [    +0.000000] Do you have a strange power saving mode enabled?
+  [    +0.000000] Dazed and confused, but trying to continue
+
+The bug was introduced by the following commit:
+
+  commit 6f55967ad9d9 ("perf/x86/intel: Fix race in intel_pmu_disable_event()")
+
+The commit moves the intel_pmu_pebs_disable() after intel_pmu_disable_fixed(),
+which returns immediately.  The related bit of PEBS_ENABLE MSR will never be
+cleared for the fixed counter. Then a non-PEBS event runs on the fixed counter,
+but the bit on PEBS_ENABLE is still set, which triggers spurious NMIs.
+
+Check and disable PEBS for fixed counters after intel_pmu_disable_fixed().
+
+Reported-by: Yi, Ammy <ammy.yi@intel.com>
+Signed-off-by: Kan Liang <kan.liang@linux.intel.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Acked-by: Jiri Olsa <jolsa@kernel.org>
+Cc: <stable@vger.kernel.org>
+Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
+Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
+Cc: Jiri Olsa <jolsa@redhat.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Stephane Eranian <eranian@google.com>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Vince Weaver <vincent.weaver@maine.edu>
+Fixes: 6f55967ad9d9 ("perf/x86/intel: Fix race in intel_pmu_disable_event()")
+Link: https://lkml.kernel.org/r/20190625142135.22112-1-kan.liang@linux.intel.com
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/events/intel/core.c |    8 +++-----
+ 1 file changed, 3 insertions(+), 5 deletions(-)
+
+--- a/arch/x86/events/intel/core.c
++++ b/arch/x86/events/intel/core.c
+@@ -2092,12 +2092,10 @@ static void intel_pmu_disable_event(stru
+       cpuc->intel_ctrl_host_mask &= ~(1ull << hwc->idx);
+       cpuc->intel_cp_status &= ~(1ull << hwc->idx);
+-      if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
++      if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL))
+               intel_pmu_disable_fixed(hwc);
+-              return;
+-      }
+-
+-      x86_pmu_disable_event(event);
++      else
++              x86_pmu_disable_event(event);
+       /*
+        * Needs to be called after x86_pmu_disable_event,
diff --git a/queue-5.1/rdma-srp-accept-again-source-addresses-that-do-not-have-a-port-number.patch b/queue-5.1/rdma-srp-accept-again-source-addresses-that-do-not-have-a-port-number.patch
new file mode 100644 (file)
index 0000000..4b0576f
--- /dev/null
@@ -0,0 +1,89 @@
+From bcef5b7215681250c4bf8961dfe15e9e4fef97d0 Mon Sep 17 00:00:00 2001
+From: Bart Van Assche <bvanassche@acm.org>
+Date: Wed, 29 May 2019 09:38:31 -0700
+Subject: RDMA/srp: Accept again source addresses that do not have a port number
+
+From: Bart Van Assche <bvanassche@acm.org>
+
+commit bcef5b7215681250c4bf8961dfe15e9e4fef97d0 upstream.
+
+The function srp_parse_in() is used both for parsing source address
+specifications and for target address specifications. Target addresses
+must have a port number. Having to specify a port number for source
+addresses is inconvenient. Make sure that srp_parse_in() supports again
+parsing addresses with no port number.
+
+Cc: <stable@vger.kernel.org>
+Fixes: c62adb7def71 ("IB/srp: Fix IPv6 address parsing")
+Signed-off-by: Bart Van Assche <bvanassche@acm.org>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/infiniband/ulp/srp/ib_srp.c |   21 +++++++++++++++------
+ 1 file changed, 15 insertions(+), 6 deletions(-)
+
+--- a/drivers/infiniband/ulp/srp/ib_srp.c
++++ b/drivers/infiniband/ulp/srp/ib_srp.c
+@@ -3481,13 +3481,14 @@ static const match_table_t srp_opt_token
+  * @net:         [in]  Network namespace.
+  * @sa:                  [out] Address family, IP address and port number.
+  * @addr_port_str: [in]  IP address and port number.
++ * @has_port:    [out] Whether or not @addr_port_str includes a port number.
+  *
+  * Parse the following address formats:
+  * - IPv4: <ip_address>:<port>, e.g. 1.2.3.4:5.
+  * - IPv6: \[<ipv6_address>\]:<port>, e.g. [1::2:3%4]:5.
+  */
+ static int srp_parse_in(struct net *net, struct sockaddr_storage *sa,
+-                      const char *addr_port_str)
++                      const char *addr_port_str, bool *has_port)
+ {
+       char *addr_end, *addr = kstrdup(addr_port_str, GFP_KERNEL);
+       char *port_str;
+@@ -3496,9 +3497,12 @@ static int srp_parse_in(struct net *net,
+       if (!addr)
+               return -ENOMEM;
+       port_str = strrchr(addr, ':');
+-      if (!port_str)
+-              return -EINVAL;
+-      *port_str++ = '\0';
++      if (port_str && strchr(port_str, ']'))
++              port_str = NULL;
++      if (port_str)
++              *port_str++ = '\0';
++      if (has_port)
++              *has_port = port_str != NULL;
+       ret = inet_pton_with_scope(net, AF_INET, addr, port_str, sa);
+       if (ret && addr[0]) {
+               addr_end = addr + strlen(addr) - 1;
+@@ -3520,6 +3524,7 @@ static int srp_parse_options(struct net
+       char *p;
+       substring_t args[MAX_OPT_ARGS];
+       unsigned long long ull;
++      bool has_port;
+       int opt_mask = 0;
+       int token;
+       int ret = -EINVAL;
+@@ -3618,7 +3623,8 @@ static int srp_parse_options(struct net
+                               ret = -ENOMEM;
+                               goto out;
+                       }
+-                      ret = srp_parse_in(net, &target->rdma_cm.src.ss, p);
++                      ret = srp_parse_in(net, &target->rdma_cm.src.ss, p,
++                                         NULL);
+                       if (ret < 0) {
+                               pr_warn("bad source parameter '%s'\n", p);
+                               kfree(p);
+@@ -3634,7 +3640,10 @@ static int srp_parse_options(struct net
+                               ret = -ENOMEM;
+                               goto out;
+                       }
+-                      ret = srp_parse_in(net, &target->rdma_cm.dst.ss, p);
++                      ret = srp_parse_in(net, &target->rdma_cm.dst.ss, p,
++                                         &has_port);
++                      if (!has_port)
++                              ret = -EINVAL;
+                       if (ret < 0) {
+                               pr_warn("bad dest parameter '%s'\n", p);
+                               kfree(p);
diff --git a/queue-5.1/resource-fix-locking-in-find_next_iomem_res.patch b/queue-5.1/resource-fix-locking-in-find_next_iomem_res.patch
new file mode 100644 (file)
index 0000000..02f0524
--- /dev/null
@@ -0,0 +1,75 @@
+From 49f17c26c123b60fd1c74629eef077740d16ffc2 Mon Sep 17 00:00:00 2001
+From: Nadav Amit <namit@vmware.com>
+Date: Thu, 18 Jul 2019 15:57:31 -0700
+Subject: resource: fix locking in find_next_iomem_res()
+
+From: Nadav Amit <namit@vmware.com>
+
+commit 49f17c26c123b60fd1c74629eef077740d16ffc2 upstream.
+
+Since resources can be removed, locking should ensure that the resource
+is not removed while accessing it.  However, find_next_iomem_res() does
+not hold the lock while copying the data of the resource.
+
+Keep holding the lock while the data is copied.  While at it, change the
+return value to a more informative value.  It is disregarded by the
+callers.
+
+[akpm@linux-foundation.org: fix find_next_iomem_res() documentation]
+Link: http://lkml.kernel.org/r/20190613045903.4922-2-namit@vmware.com
+Fixes: ff3cc952d3f00 ("resource: Add remove_resource interface")
+Signed-off-by: Nadav Amit <namit@vmware.com>
+Reviewed-by: Andrew Morton <akpm@linux-foundation.org>
+Reviewed-by: Dan Williams <dan.j.williams@intel.com>
+Cc: Borislav Petkov <bp@suse.de>
+Cc: Toshi Kani <toshi.kani@hpe.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Dave Hansen <dave.hansen@linux.intel.com>
+Cc: Bjorn Helgaas <bhelgaas@google.com>
+Cc: Ingo Molnar <mingo@kernel.org>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/resource.c |   20 ++++++++++----------
+ 1 file changed, 10 insertions(+), 10 deletions(-)
+
+--- a/kernel/resource.c
++++ b/kernel/resource.c
+@@ -325,7 +325,7 @@ EXPORT_SYMBOL(release_resource);
+  *
+  * If a resource is found, returns 0 and @*res is overwritten with the part
+  * of the resource that's within [@start..@end]; if none is found, returns
+- * -1 or -EINVAL for other invalid parameters.
++ * -ENODEV.  Returns -EINVAL for invalid parameters.
+  *
+  * This function walks the whole tree and not just first level children
+  * unless @first_lvl is true.
+@@ -364,16 +364,16 @@ static int find_next_iomem_res(resource_
+                       break;
+       }
+-      read_unlock(&resource_lock);
+-      if (!p)
+-              return -1;
++      if (p) {
++              /* copy data */
++              res->start = max(start, p->start);
++              res->end = min(end, p->end);
++              res->flags = p->flags;
++              res->desc = p->desc;
++      }
+-      /* copy data */
+-      res->start = max(start, p->start);
+-      res->end = min(end, p->end);
+-      res->flags = p->flags;
+-      res->desc = p->desc;
+-      return 0;
++      read_unlock(&resource_lock);
++      return p ? 0 : -ENODEV;
+ }
+ static int __walk_iomem_res_desc(resource_size_t start, resource_size_t end,
diff --git a/queue-5.1/rt2x00usb-fix-rx-queue-hang.patch b/queue-5.1/rt2x00usb-fix-rx-queue-hang.patch
new file mode 100644 (file)
index 0000000..d8eb4f6
--- /dev/null
@@ -0,0 +1,73 @@
+From 41a531ffa4c5aeb062f892227c00fabb3b4a9c91 Mon Sep 17 00:00:00 2001
+From: Soeren Moch <smoch@web.de>
+Date: Mon, 1 Jul 2019 12:53:13 +0200
+Subject: rt2x00usb: fix rx queue hang
+
+From: Soeren Moch <smoch@web.de>
+
+commit 41a531ffa4c5aeb062f892227c00fabb3b4a9c91 upstream.
+
+Since commit ed194d136769 ("usb: core: remove local_irq_save() around
+ ->complete() handler") the handler rt2x00usb_interrupt_rxdone() is
+not running with interrupts disabled anymore. So this completion handler
+is not guaranteed to run completely before workqueue processing starts
+for the same queue entry.
+Be sure to set all other flags in the entry correctly before marking
+this entry ready for workqueue processing. This way we cannot miss error
+conditions that need to be signalled from the completion handler to the
+worker thread.
+Note that rt2x00usb_work_rxdone() processes all available entries, not
+only such for which queue_work() was called.
+
+This patch is similar to what commit df71c9cfceea ("rt2x00: fix order
+of entry flags modification") did for TX processing.
+
+This fixes a regression on a RT5370 based wifi stick in AP mode, which
+suddenly stopped data transmission after some period of heavy load. Also
+stopping the hanging hostapd resulted in the error message "ieee80211
+phy0: rt2x00queue_flush_queue: Warning - Queue 14 failed to flush".
+Other operation modes are probably affected as well, this just was
+the used testcase.
+
+Fixes: ed194d136769 ("usb: core: remove local_irq_save() around ->complete() handler")
+Cc: stable@vger.kernel.org # 4.20+
+Signed-off-by: Soeren Moch <smoch@web.de>
+Acked-by: Stanislaw Gruszka <sgruszka@redhat.com>
+Signed-off-by: Kalle Valo <kvalo@codeaurora.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/net/wireless/ralink/rt2x00/rt2x00usb.c |   12 ++++++------
+ 1 file changed, 6 insertions(+), 6 deletions(-)
+
+--- a/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c
++++ b/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c
+@@ -367,15 +367,10 @@ static void rt2x00usb_interrupt_rxdone(s
+       struct queue_entry *entry = (struct queue_entry *)urb->context;
+       struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
+-      if (!test_and_clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags))
++      if (!test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags))
+               return;
+       /*
+-       * Report the frame as DMA done
+-       */
+-      rt2x00lib_dmadone(entry);
+-
+-      /*
+        * Check if the received data is simply too small
+        * to be actually valid, or if the urb is signaling
+        * a problem.
+@@ -384,6 +379,11 @@ static void rt2x00usb_interrupt_rxdone(s
+               set_bit(ENTRY_DATA_IO_FAILED, &entry->flags);
+       /*
++       * Report the frame as DMA done
++       */
++      rt2x00lib_dmadone(entry);
++
++      /*
+        * Schedule the delayed work for reading the RX status
+        * from the device.
+        */
index c10cd986ec8e706e7b7f3637a9b0c9d369b29c32..4f02838b7dfe24cf72c74cb550dadf5b0049a2b4 100644 (file)
@@ -310,3 +310,35 @@ signal-correct-namespace-fixups-of-si_pid-and-si_uid.patch
 fs-proc-proc_sysctl.c-fix-the-default-values-of-i_uid-i_gid-on-proc-sys-inodes.patch
 i3c-fix-i2c-and-i3c-scl-rate-by-bus-mode.patch
 kconfig-fix-missing-choice-values-in-auto.conf.patch
+arm-dts-gemini-set-dir-685-spi-cs-as-active-low.patch
+drm-nouveau-i2c-enable-i2c-pads-busses-during-preinit.patch
+padata-use-smp_mb-in-padata_reorder-to-avoid-orphaned-padata-jobs.patch
+dm-zoned-fix-zone-state-management-race.patch
+xen-events-fix-binding-user-event-channels-to-cpus.patch
+9p-xen-add-cleanup-path-in-p9_trans_xen_init.patch
+9p-virtio-add-cleanup-path-in-p9_virtio_init.patch
+rt2x00usb-fix-rx-queue-hang.patch
+x86-boot-fix-memory-leak-in-default_get_smp_config.patch
+perf-x86-intel-fix-spurious-nmi-on-fixed-counter.patch
+perf-x86-amd-uncore-do-not-set-threadmask-and-slicemask-for-non-l3-pmcs.patch
+perf-x86-amd-uncore-set-the-thread-mask-for-f17h-l3-pmcs.patch
+drm-edid-parse-cea-blocks-embedded-in-displayid.patch
+block-allow-mapping-of-vmalloc-ed-buffers.patch
+block-fix-potential-overflow-in-blk_report_zones.patch
+rdma-srp-accept-again-source-addresses-that-do-not-have-a-port-number.patch
+intel_th-pci-add-ice-lake-nnpi-support.patch
+pci-hv-fix-a-use-after-free-bug-in-hv_eject_device_work.patch
+pci-do-not-poll-for-pme-if-the-device-is-in-d3cold.patch
+pci-qcom-ensure-that-perst-is-asserted-for-at-least-100-ms.patch
+btrfs-fix-data-loss-after-inode-eviction-renaming-it-and-fsync-it.patch
+btrfs-fix-fsync-not-persisting-dentry-deletions-due-to-inode-evictions.patch
+btrfs-add-missing-inode-version-ctime-and-mtime-updates-when-punching-hole.patch
+ib-mlx5-report-correctly-tag-matching-rendezvous-capability.patch
+hid-wacom-generic-only-switch-the-mode-on-devices-with-leds.patch
+hid-wacom-generic-correct-pad-syncing.patch
+hid-wacom-correct-touch-resolution-x-y-typo.patch
+mm-nvdimm-add-is_ioremap_addr-and-use-that-to-check-ioremap-address.patch
+libnvdimm-pfn-fix-fsdax-mode-namespace-info-block-zero-fields.patch
+coda-pass-the-host-file-in-vma-vm_file-on-mmap.patch
+include-asm-generic-bug.h-fix-cut-here-for-warn_on-for-__warn_taint-architectures.patch
+resource-fix-locking-in-find_next_iomem_res.patch
diff --git a/queue-5.1/x86-boot-fix-memory-leak-in-default_get_smp_config.patch b/queue-5.1/x86-boot-fix-memory-leak-in-default_get_smp_config.patch
new file mode 100644 (file)
index 0000000..a275f19
--- /dev/null
@@ -0,0 +1,59 @@
+From e74bd96989dd42a51a73eddb4a5510a6f5e42ac3 Mon Sep 17 00:00:00 2001
+From: David Rientjes <rientjes@google.com>
+Date: Tue, 9 Jul 2019 19:44:03 -0700
+Subject: x86/boot: Fix memory leak in default_get_smp_config()
+
+From: David Rientjes <rientjes@google.com>
+
+commit e74bd96989dd42a51a73eddb4a5510a6f5e42ac3 upstream.
+
+When default_get_smp_config() is called with early == 1 and mpf->feature1
+is non-zero, mpf is leaked because the return path does not do
+early_memunmap().
+
+Fix this and share a common exit routine.
+
+Fixes: 5997efb96756 ("x86/boot: Use memremap() to map the MPF and MPC data")
+Reported-by: Cfir Cohen <cfir@google.com>
+Signed-off-by: David Rientjes <rientjes@google.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: stable@vger.kernel.org
+Link: https://lkml.kernel.org/r/alpine.DEB.2.21.1907091942570.28240@chino.kir.corp.google.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kernel/mpparse.c |   10 ++++------
+ 1 file changed, 4 insertions(+), 6 deletions(-)
+
+--- a/arch/x86/kernel/mpparse.c
++++ b/arch/x86/kernel/mpparse.c
+@@ -546,17 +546,15 @@ void __init default_get_smp_config(unsig
+                        * local APIC has default address
+                        */
+                       mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
+-                      return;
++                      goto out;
+               }
+               pr_info("Default MP configuration #%d\n", mpf->feature1);
+               construct_default_ISA_mptable(mpf->feature1);
+       } else if (mpf->physptr) {
+-              if (check_physptr(mpf, early)) {
+-                      early_memunmap(mpf, sizeof(*mpf));
+-                      return;
+-              }
++              if (check_physptr(mpf, early))
++                      goto out;
+       } else
+               BUG();
+@@ -565,7 +563,7 @@ void __init default_get_smp_config(unsig
+       /*
+        * Only use the first configuration found.
+        */
+-
++out:
+       early_memunmap(mpf, sizeof(*mpf));
+ }
diff --git a/queue-5.1/xen-events-fix-binding-user-event-channels-to-cpus.patch b/queue-5.1/xen-events-fix-binding-user-event-channels-to-cpus.patch
new file mode 100644 (file)
index 0000000..6c8768a
--- /dev/null
@@ -0,0 +1,93 @@
+From bce5963bcb4f9934faa52be323994511d59fd13c Mon Sep 17 00:00:00 2001
+From: Juergen Gross <jgross@suse.com>
+Date: Fri, 21 Jun 2019 20:47:03 +0200
+Subject: xen/events: fix binding user event channels to cpus
+
+From: Juergen Gross <jgross@suse.com>
+
+commit bce5963bcb4f9934faa52be323994511d59fd13c upstream.
+
+When binding an interdomain event channel to a vcpu via
+IOCTL_EVTCHN_BIND_INTERDOMAIN not only the event channel needs to be
+bound, but the affinity of the associated IRQi must be changed, too.
+Otherwise the IRQ and the event channel won't be moved to another vcpu
+in case the original vcpu they were bound to is going offline.
+
+Cc: <stable@vger.kernel.org> # 4.13
+Fixes: c48f64ab472389df ("xen-evtchn: Bind dyn evtchn:qemu-dm interrupt to next online VCPU")
+Signed-off-by: Juergen Gross <jgross@suse.com>
+Reviewed-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
+Signed-off-by: Juergen Gross <jgross@suse.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/xen/events/events_base.c |   12 ++++++++++--
+ drivers/xen/evtchn.c             |    2 +-
+ include/xen/events.h             |    3 ++-
+ 3 files changed, 13 insertions(+), 4 deletions(-)
+
+--- a/drivers/xen/events/events_base.c
++++ b/drivers/xen/events/events_base.c
+@@ -1293,7 +1293,7 @@ void rebind_evtchn_irq(int evtchn, int i
+ }
+ /* Rebind an evtchn so that it gets delivered to a specific cpu */
+-int xen_rebind_evtchn_to_cpu(int evtchn, unsigned tcpu)
++static int xen_rebind_evtchn_to_cpu(int evtchn, unsigned int tcpu)
+ {
+       struct evtchn_bind_vcpu bind_vcpu;
+       int masked;
+@@ -1327,7 +1327,6 @@ int xen_rebind_evtchn_to_cpu(int evtchn,
+       return 0;
+ }
+-EXPORT_SYMBOL_GPL(xen_rebind_evtchn_to_cpu);
+ static int set_affinity_irq(struct irq_data *data, const struct cpumask *dest,
+                           bool force)
+@@ -1341,6 +1340,15 @@ static int set_affinity_irq(struct irq_d
+       return ret;
+ }
++/* To be called with desc->lock held. */
++int xen_set_affinity_evtchn(struct irq_desc *desc, unsigned int tcpu)
++{
++      struct irq_data *d = irq_desc_get_irq_data(desc);
++
++      return set_affinity_irq(d, cpumask_of(tcpu), false);
++}
++EXPORT_SYMBOL_GPL(xen_set_affinity_evtchn);
++
+ static void enable_dynirq(struct irq_data *data)
+ {
+       int evtchn = evtchn_from_irq(data->irq);
+--- a/drivers/xen/evtchn.c
++++ b/drivers/xen/evtchn.c
+@@ -447,7 +447,7 @@ static void evtchn_bind_interdom_next_vc
+       this_cpu_write(bind_last_selected_cpu, selected_cpu);
+       /* unmask expects irqs to be disabled */
+-      xen_rebind_evtchn_to_cpu(evtchn, selected_cpu);
++      xen_set_affinity_evtchn(desc, selected_cpu);
+       raw_spin_unlock_irqrestore(&desc->lock, flags);
+ }
+--- a/include/xen/events.h
++++ b/include/xen/events.h
+@@ -3,6 +3,7 @@
+ #define _XEN_EVENTS_H
+ #include <linux/interrupt.h>
++#include <linux/irq.h>
+ #ifdef CONFIG_PCI_MSI
+ #include <linux/msi.h>
+ #endif
+@@ -59,7 +60,7 @@ void evtchn_put(unsigned int evtchn);
+ void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector);
+ void rebind_evtchn_irq(int evtchn, int irq);
+-int xen_rebind_evtchn_to_cpu(int evtchn, unsigned tcpu);
++int xen_set_affinity_evtchn(struct irq_desc *desc, unsigned int tcpu);
+ static inline void notify_remote_via_evtchn(int port)
+ {