]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
Fixes for 5.11
authorSasha Levin <sashal@kernel.org>
Mon, 17 May 2021 03:14:55 +0000 (23:14 -0400)
committerSasha Levin <sashal@kernel.org>
Mon, 17 May 2021 03:14:55 +0000 (23:14 -0400)
Signed-off-by: Sasha Levin <sashal@kernel.org>
32 files changed:
queue-5.11/acpi-scan-fix-a-memory-leak-in-an-error-handling-pat.patch [new file with mode: 0644]
queue-5.11/blk-mq-plug-request-for-shared-sbitmap.patch [new file with mode: 0644]
queue-5.11/blk-mq-swap-two-calls-in-blk_mq_exit_queue.patch [new file with mode: 0644]
queue-5.11/dax-add-a-wakeup-mode-parameter-to-put_unlocked_entr.patch [new file with mode: 0644]
queue-5.11/dax-add-an-enum-for-specifying-dax-wakup-mode.patch [new file with mode: 0644]
queue-5.11/dax-wake-up-all-waiters-after-invalidating-dax-entry.patch [new file with mode: 0644]
queue-5.11/drm-msm-dp-initialize-audio_comp-when-audio-starts.patch [new file with mode: 0644]
queue-5.11/drm-msm-fix-llc-not-being-enabled-for-mmu500-targets.patch [new file with mode: 0644]
queue-5.11/f2fs-compress-fix-race-condition-of-overwrite-vs-tru.patch [new file with mode: 0644]
queue-5.11/f2fs-compress-fix-to-assign-cc.cluster_idx-correctly.patch [new file with mode: 0644]
queue-5.11/f2fs-compress-fix-to-free-compress-page-correctly.patch [new file with mode: 0644]
queue-5.11/hwmon-ltc2992-put-fwnode-in-error-case-during-probe.patch [new file with mode: 0644]
queue-5.11/hwmon-occ-fix-poll-rate-limiting.patch [new file with mode: 0644]
queue-5.11/iio-core-return-enodev-if-ioctl-is-unknown.patch [new file with mode: 0644]
queue-5.11/iio-hid-sensors-select-iio_triggered_buffer-under-hi.patch [new file with mode: 0644]
queue-5.11/iio-light-gp2ap002-fix-rumtime-pm-imbalance-on-error.patch [new file with mode: 0644]
queue-5.11/iio-proximity-pulsedlight-fix-rumtime-pm-imbalance-o.patch [new file with mode: 0644]
queue-5.11/kvm-lapic-accurately-guarantee-busy-wait-for-timer-t.patch [new file with mode: 0644]
queue-5.11/kvm-x86-cancel-pvclock_gtod_work-on-module-removal.patch [new file with mode: 0644]
queue-5.11/kvm-x86-prevent-deadlock-against-tk_core.seq.patch [new file with mode: 0644]
queue-5.11/kyber-fix-out-of-bounds-access-when-preempted.patch [new file with mode: 0644]
queue-5.11/nbd-fix-null-pointer-in-flush_workqueue.patch [new file with mode: 0644]
queue-5.11/nvmet-add-lba-to-sect-conversion-helpers.patch [new file with mode: 0644]
queue-5.11/nvmet-fix-inline-bio-check-for-bdev-ns.patch [new file with mode: 0644]
queue-5.11/nvmet-fix-inline-bio-check-for-passthru.patch [new file with mode: 0644]
queue-5.11/nvmet-rdma-fix-null-deref-when-send-is-completed-wit.patch [new file with mode: 0644]
queue-5.11/perf-tools-fix-dynamic-libbpf-link.patch [new file with mode: 0644]
queue-5.11/series
queue-5.11/usb-dwc3-gadget-free-gadget-structure-only-after-fre.patch [new file with mode: 0644]
queue-5.11/usb-fotg210-hcd-fix-an-error-message.patch [new file with mode: 0644]
queue-5.11/usb-musb-fix-an-error-message.patch [new file with mode: 0644]
queue-5.11/xen-unpopulated-alloc-fix-error-return-code-in-fill_.patch [new file with mode: 0644]

diff --git a/queue-5.11/acpi-scan-fix-a-memory-leak-in-an-error-handling-pat.patch b/queue-5.11/acpi-scan-fix-a-memory-leak-in-an-error-handling-pat.patch
new file mode 100644 (file)
index 0000000..d7030fb
--- /dev/null
@@ -0,0 +1,36 @@
+From 179ebe935d440229bb1775ca86ecf91d03311efe Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 8 May 2021 09:23:09 +0200
+Subject: ACPI: scan: Fix a memory leak in an error handling path
+
+From: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
+
+[ Upstream commit 0c8bd174f0fc131bc9dfab35cd8784f59045da87 ]
+
+If 'acpi_device_set_name()' fails, we must free
+'acpi_device_bus_id->bus_id' or there is a (potential) memory leak.
+
+Fixes: eb50aaf960e3 ("ACPI: scan: Use unique number for instance_no")
+Signed-off-by: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
+Reviewed-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/acpi/scan.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
+index 239eeeafc62f..32a9bd878852 100644
+--- a/drivers/acpi/scan.c
++++ b/drivers/acpi/scan.c
+@@ -705,6 +705,7 @@ int acpi_device_add(struct acpi_device *device,
+               result = acpi_device_set_name(device, acpi_device_bus_id);
+               if (result) {
++                      kfree_const(acpi_device_bus_id->bus_id);
+                       kfree(acpi_device_bus_id);
+                       goto err_unlock;
+               }
+-- 
+2.30.2
+
diff --git a/queue-5.11/blk-mq-plug-request-for-shared-sbitmap.patch b/queue-5.11/blk-mq-plug-request-for-shared-sbitmap.patch
new file mode 100644 (file)
index 0000000..4ae42ad
--- /dev/null
@@ -0,0 +1,53 @@
+From 5d4b75bd83687f1ec7642ad3ec0dc279cb5d28b8 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 14 May 2021 10:20:52 +0800
+Subject: blk-mq: plug request for shared sbitmap
+
+From: Ming Lei <ming.lei@redhat.com>
+
+[ Upstream commit 03f26d8f11403295de445b6e4e0e57ac57755791 ]
+
+In case of shared sbitmap, request won't be held in plug list any more
+sine commit 32bc15afed04 ("blk-mq: Facilitate a shared sbitmap per
+tagset"), this way makes request merge from flush plug list & batching
+submission not possible, so cause performance regression.
+
+Yanhui reports performance regression when running sequential IO
+test(libaio, 16 jobs, 8 depth for each job) in VM, and the VM disk
+is emulated with image stored on xfs/megaraid_sas.
+
+Fix the issue by recovering original behavior to allow to hold request
+in plug list.
+
+Cc: Yanhui Ma <yama@redhat.com>
+Cc: John Garry <john.garry@huawei.com>
+Cc: Bart Van Assche <bvanassche@acm.org>
+Cc: kashyap.desai@broadcom.com
+Fixes: 32bc15afed04 ("blk-mq: Facilitate a shared sbitmap per tagset")
+Signed-off-by: Ming Lei <ming.lei@redhat.com>
+Link: https://lore.kernel.org/r/20210514022052.1047665-1-ming.lei@redhat.com
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ block/blk-mq.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+diff --git a/block/blk-mq.c b/block/blk-mq.c
+index f285a9123a8b..2cd922579b2f 100644
+--- a/block/blk-mq.c
++++ b/block/blk-mq.c
+@@ -2189,8 +2189,9 @@ blk_qc_t blk_mq_submit_bio(struct bio *bio)
+               /* Bypass scheduler for flush requests */
+               blk_insert_flush(rq);
+               blk_mq_run_hw_queue(data.hctx, true);
+-      } else if (plug && (q->nr_hw_queues == 1 || q->mq_ops->commit_rqs ||
+-                              !blk_queue_nonrot(q))) {
++      } else if (plug && (q->nr_hw_queues == 1 ||
++                 blk_mq_is_sbitmap_shared(rq->mq_hctx->flags) ||
++                 q->mq_ops->commit_rqs || !blk_queue_nonrot(q))) {
+               /*
+                * Use plugging if we have a ->commit_rqs() hook as well, as
+                * we know the driver uses bd->last in a smart fashion.
+-- 
+2.30.2
+
diff --git a/queue-5.11/blk-mq-swap-two-calls-in-blk_mq_exit_queue.patch b/queue-5.11/blk-mq-swap-two-calls-in-blk_mq_exit_queue.patch
new file mode 100644 (file)
index 0000000..9c95fd6
--- /dev/null
@@ -0,0 +1,51 @@
+From 3c3829f49b8fc7a0fe97caf9b24f1f46a14a2880 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 13 May 2021 10:15:29 -0700
+Subject: blk-mq: Swap two calls in blk_mq_exit_queue()
+
+From: Bart Van Assche <bvanassche@acm.org>
+
+[ Upstream commit 630ef623ed26c18a457cdc070cf24014e50129c2 ]
+
+If a tag set is shared across request queues (e.g. SCSI LUNs) then the
+block layer core keeps track of the number of active request queues in
+tags->active_queues. blk_mq_tag_busy() and blk_mq_tag_idle() update that
+atomic counter if the hctx flag BLK_MQ_F_TAG_QUEUE_SHARED is set. Make
+sure that blk_mq_exit_queue() calls blk_mq_tag_idle() before that flag is
+cleared by blk_mq_del_queue_tag_set().
+
+Cc: Christoph Hellwig <hch@infradead.org>
+Cc: Ming Lei <ming.lei@redhat.com>
+Cc: Hannes Reinecke <hare@suse.com>
+Fixes: 0d2602ca30e4 ("blk-mq: improve support for shared tags maps")
+Signed-off-by: Bart Van Assche <bvanassche@acm.org>
+Reviewed-by: Ming Lei <ming.lei@redhat.com>
+Link: https://lore.kernel.org/r/20210513171529.7977-1-bvanassche@acm.org
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ block/blk-mq.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+diff --git a/block/blk-mq.c b/block/blk-mq.c
+index 2cd922579b2f..88c843fa8d13 100644
+--- a/block/blk-mq.c
++++ b/block/blk-mq.c
+@@ -3244,10 +3244,12 @@ EXPORT_SYMBOL(blk_mq_init_allocated_queue);
+ /* tags can _not_ be used after returning from blk_mq_exit_queue */
+ void blk_mq_exit_queue(struct request_queue *q)
+ {
+-      struct blk_mq_tag_set   *set = q->tag_set;
++      struct blk_mq_tag_set *set = q->tag_set;
+-      blk_mq_del_queue_tag_set(q);
++      /* Checks hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED. */
+       blk_mq_exit_hw_queues(q, set, set->nr_hw_queues);
++      /* May clear BLK_MQ_F_TAG_QUEUE_SHARED in hctx->flags. */
++      blk_mq_del_queue_tag_set(q);
+ }
+ static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
+-- 
+2.30.2
+
diff --git a/queue-5.11/dax-add-a-wakeup-mode-parameter-to-put_unlocked_entr.patch b/queue-5.11/dax-add-a-wakeup-mode-parameter-to-put_unlocked_entr.patch
new file mode 100644 (file)
index 0000000..f70e733
--- /dev/null
@@ -0,0 +1,84 @@
+From 917c1a3ed5d8a7f00fa0ad7e44e08f8b0d50e98c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 28 Apr 2021 15:03:13 -0400
+Subject: dax: Add a wakeup mode parameter to put_unlocked_entry()
+
+From: Vivek Goyal <vgoyal@redhat.com>
+
+[ Upstream commit 4c3d043d271d4d629aa2328796cdfc96b37d3b3c ]
+
+As of now put_unlocked_entry() always wakes up next waiter. In next
+patches we want to wake up all waiters at one callsite. Hence, add a
+parameter to the function.
+
+This patch does not introduce any change of behavior.
+
+Reviewed-by: Greg Kurz <groug@kaod.org>
+Reviewed-by: Jan Kara <jack@suse.cz>
+Suggested-by: Dan Williams <dan.j.williams@intel.com>
+Signed-off-by: Vivek Goyal <vgoyal@redhat.com>
+Link: https://lore.kernel.org/r/20210428190314.1865312-3-vgoyal@redhat.com
+Signed-off-by: Dan Williams <dan.j.williams@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/dax.c | 14 +++++++-------
+ 1 file changed, 7 insertions(+), 7 deletions(-)
+
+diff --git a/fs/dax.c b/fs/dax.c
+index 5ecee51c44ee..56eb1c759ca5 100644
+--- a/fs/dax.c
++++ b/fs/dax.c
+@@ -275,11 +275,11 @@ static void wait_entry_unlocked(struct xa_state *xas, void *entry)
+       finish_wait(wq, &ewait.wait);
+ }
+-static void put_unlocked_entry(struct xa_state *xas, void *entry)
++static void put_unlocked_entry(struct xa_state *xas, void *entry,
++                             enum dax_wake_mode mode)
+ {
+-      /* If we were the only waiter woken, wake the next one */
+       if (entry && !dax_is_conflict(entry))
+-              dax_wake_entry(xas, entry, WAKE_NEXT);
++              dax_wake_entry(xas, entry, mode);
+ }
+ /*
+@@ -633,7 +633,7 @@ struct page *dax_layout_busy_page_range(struct address_space *mapping,
+                       entry = get_unlocked_entry(&xas, 0);
+               if (entry)
+                       page = dax_busy_page(entry);
+-              put_unlocked_entry(&xas, entry);
++              put_unlocked_entry(&xas, entry, WAKE_NEXT);
+               if (page)
+                       break;
+               if (++scanned % XA_CHECK_SCHED)
+@@ -675,7 +675,7 @@ static int __dax_invalidate_entry(struct address_space *mapping,
+       mapping->nrexceptional--;
+       ret = 1;
+ out:
+-      put_unlocked_entry(&xas, entry);
++      put_unlocked_entry(&xas, entry, WAKE_NEXT);
+       xas_unlock_irq(&xas);
+       return ret;
+ }
+@@ -954,7 +954,7 @@ static int dax_writeback_one(struct xa_state *xas, struct dax_device *dax_dev,
+       return ret;
+  put_unlocked:
+-      put_unlocked_entry(xas, entry);
++      put_unlocked_entry(xas, entry, WAKE_NEXT);
+       return ret;
+ }
+@@ -1695,7 +1695,7 @@ dax_insert_pfn_mkwrite(struct vm_fault *vmf, pfn_t pfn, unsigned int order)
+       /* Did we race with someone splitting entry or so? */
+       if (!entry || dax_is_conflict(entry) ||
+           (order == 0 && !dax_is_pte_entry(entry))) {
+-              put_unlocked_entry(&xas, entry);
++              put_unlocked_entry(&xas, entry, WAKE_NEXT);
+               xas_unlock_irq(&xas);
+               trace_dax_insert_pfn_mkwrite_no_entry(mapping->host, vmf,
+                                                     VM_FAULT_NOPAGE);
+-- 
+2.30.2
+
diff --git a/queue-5.11/dax-add-an-enum-for-specifying-dax-wakup-mode.patch b/queue-5.11/dax-add-an-enum-for-specifying-dax-wakup-mode.patch
new file mode 100644 (file)
index 0000000..9f26130
--- /dev/null
@@ -0,0 +1,106 @@
+From ea07bb891210c12049d1253a4526bce584f17fb3 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 28 Apr 2021 15:03:12 -0400
+Subject: dax: Add an enum for specifying dax wakup mode
+
+From: Vivek Goyal <vgoyal@redhat.com>
+
+[ Upstream commit 698ab77aebffe08b312fbcdddeb0e8bd08b78717 ]
+
+Dan mentioned that he is not very fond of passing around a boolean true/false
+to specify if only next waiter should be woken up or all waiters should be
+woken up. He instead prefers that we introduce an enum and make it very
+explicity at the callsite itself. Easier to read code.
+
+This patch should not introduce any change of behavior.
+
+Reviewed-by: Greg Kurz <groug@kaod.org>
+Reviewed-by: Jan Kara <jack@suse.cz>
+Suggested-by: Dan Williams <dan.j.williams@intel.com>
+Signed-off-by: Vivek Goyal <vgoyal@redhat.com>
+Link: https://lore.kernel.org/r/20210428190314.1865312-2-vgoyal@redhat.com
+Signed-off-by: Dan Williams <dan.j.williams@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/dax.c | 23 +++++++++++++++++------
+ 1 file changed, 17 insertions(+), 6 deletions(-)
+
+diff --git a/fs/dax.c b/fs/dax.c
+index b3d27fdc6775..5ecee51c44ee 100644
+--- a/fs/dax.c
++++ b/fs/dax.c
+@@ -144,6 +144,16 @@ struct wait_exceptional_entry_queue {
+       struct exceptional_entry_key key;
+ };
++/**
++ * enum dax_wake_mode: waitqueue wakeup behaviour
++ * @WAKE_ALL: wake all waiters in the waitqueue
++ * @WAKE_NEXT: wake only the first waiter in the waitqueue
++ */
++enum dax_wake_mode {
++      WAKE_ALL,
++      WAKE_NEXT,
++};
++
+ static wait_queue_head_t *dax_entry_waitqueue(struct xa_state *xas,
+               void *entry, struct exceptional_entry_key *key)
+ {
+@@ -182,7 +192,8 @@ static int wake_exceptional_entry_func(wait_queue_entry_t *wait,
+  * The important information it's conveying is whether the entry at
+  * this index used to be a PMD entry.
+  */
+-static void dax_wake_entry(struct xa_state *xas, void *entry, bool wake_all)
++static void dax_wake_entry(struct xa_state *xas, void *entry,
++                         enum dax_wake_mode mode)
+ {
+       struct exceptional_entry_key key;
+       wait_queue_head_t *wq;
+@@ -196,7 +207,7 @@ static void dax_wake_entry(struct xa_state *xas, void *entry, bool wake_all)
+        * must be in the waitqueue and the following check will see them.
+        */
+       if (waitqueue_active(wq))
+-              __wake_up(wq, TASK_NORMAL, wake_all ? 0 : 1, &key);
++              __wake_up(wq, TASK_NORMAL, mode == WAKE_ALL ? 0 : 1, &key);
+ }
+ /*
+@@ -268,7 +279,7 @@ static void put_unlocked_entry(struct xa_state *xas, void *entry)
+ {
+       /* If we were the only waiter woken, wake the next one */
+       if (entry && !dax_is_conflict(entry))
+-              dax_wake_entry(xas, entry, false);
++              dax_wake_entry(xas, entry, WAKE_NEXT);
+ }
+ /*
+@@ -286,7 +297,7 @@ static void dax_unlock_entry(struct xa_state *xas, void *entry)
+       old = xas_store(xas, entry);
+       xas_unlock_irq(xas);
+       BUG_ON(!dax_is_locked(old));
+-      dax_wake_entry(xas, entry, false);
++      dax_wake_entry(xas, entry, WAKE_NEXT);
+ }
+ /*
+@@ -524,7 +535,7 @@ static void *grab_mapping_entry(struct xa_state *xas,
+               dax_disassociate_entry(entry, mapping, false);
+               xas_store(xas, NULL);   /* undo the PMD join */
+-              dax_wake_entry(xas, entry, true);
++              dax_wake_entry(xas, entry, WAKE_ALL);
+               mapping->nrexceptional--;
+               entry = NULL;
+               xas_set(xas, index);
+@@ -937,7 +948,7 @@ static int dax_writeback_one(struct xa_state *xas, struct dax_device *dax_dev,
+       xas_lock_irq(xas);
+       xas_store(xas, entry);
+       xas_clear_mark(xas, PAGECACHE_TAG_DIRTY);
+-      dax_wake_entry(xas, entry, false);
++      dax_wake_entry(xas, entry, WAKE_NEXT);
+       trace_dax_writeback_one(mapping->host, index, count);
+       return ret;
+-- 
+2.30.2
+
diff --git a/queue-5.11/dax-wake-up-all-waiters-after-invalidating-dax-entry.patch b/queue-5.11/dax-wake-up-all-waiters-after-invalidating-dax-entry.patch
new file mode 100644 (file)
index 0000000..15e0658
--- /dev/null
@@ -0,0 +1,81 @@
+From 5bd590f1a41a820b4140ab14e32ee15dab0135f9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 28 Apr 2021 15:03:14 -0400
+Subject: dax: Wake up all waiters after invalidating dax entry
+
+From: Vivek Goyal <vgoyal@redhat.com>
+
+[ Upstream commit 237388320deffde7c2d65ed8fc9eef670dc979b3 ]
+
+I am seeing missed wakeups which ultimately lead to a deadlock when I am
+using virtiofs with DAX enabled and running "make -j". I had to mount
+virtiofs as rootfs and also reduce to dax window size to 256M to reproduce
+the problem consistently.
+
+So here is the problem. put_unlocked_entry() wakes up waiters only
+if entry is not null as well as !dax_is_conflict(entry). But if I
+call multiple instances of invalidate_inode_pages2() in parallel,
+then I can run into a situation where there are waiters on
+this index but nobody will wake these waiters.
+
+invalidate_inode_pages2()
+  invalidate_inode_pages2_range()
+    invalidate_exceptional_entry2()
+      dax_invalidate_mapping_entry_sync()
+        __dax_invalidate_entry() {
+                xas_lock_irq(&xas);
+                entry = get_unlocked_entry(&xas, 0);
+                ...
+                ...
+                dax_disassociate_entry(entry, mapping, trunc);
+                xas_store(&xas, NULL);
+                ...
+                ...
+                put_unlocked_entry(&xas, entry);
+                xas_unlock_irq(&xas);
+        }
+
+Say a fault in in progress and it has locked entry at offset say "0x1c".
+Now say three instances of invalidate_inode_pages2() are in progress
+(A, B, C) and they all try to invalidate entry at offset "0x1c". Given
+dax entry is locked, all tree instances A, B, C will wait in wait queue.
+
+When dax fault finishes, say A is woken up. It will store NULL entry
+at index "0x1c" and wake up B. When B comes along it will find "entry=0"
+at page offset 0x1c and it will call put_unlocked_entry(&xas, 0). And
+this means put_unlocked_entry() will not wake up next waiter, given
+the current code. And that means C continues to wait and is not woken
+up.
+
+This patch fixes the issue by waking up all waiters when a dax entry
+has been invalidated. This seems to fix the deadlock I am facing
+and I can make forward progress.
+
+Reported-by: Sergio Lopez <slp@redhat.com>
+Fixes: ac401cc78242 ("dax: New fault locking")
+Reviewed-by: Jan Kara <jack@suse.cz>
+Suggested-by: Dan Williams <dan.j.williams@intel.com>
+Signed-off-by: Vivek Goyal <vgoyal@redhat.com>
+Link: https://lore.kernel.org/r/20210428190314.1865312-4-vgoyal@redhat.com
+Signed-off-by: Dan Williams <dan.j.williams@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/dax.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/fs/dax.c b/fs/dax.c
+index 56eb1c759ca5..df5485b4bddf 100644
+--- a/fs/dax.c
++++ b/fs/dax.c
+@@ -675,7 +675,7 @@ static int __dax_invalidate_entry(struct address_space *mapping,
+       mapping->nrexceptional--;
+       ret = 1;
+ out:
+-      put_unlocked_entry(&xas, entry, WAKE_NEXT);
++      put_unlocked_entry(&xas, entry, WAKE_ALL);
+       xas_unlock_irq(&xas);
+       return ret;
+ }
+-- 
+2.30.2
+
diff --git a/queue-5.11/drm-msm-dp-initialize-audio_comp-when-audio-starts.patch b/queue-5.11/drm-msm-dp-initialize-audio_comp-when-audio-starts.patch
new file mode 100644 (file)
index 0000000..647f875
--- /dev/null
@@ -0,0 +1,98 @@
+From e92bbae90f42071ddc1b7d4fd28c952305925bb9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 21 Apr 2021 16:37:36 -0700
+Subject: drm/msm/dp: initialize audio_comp when audio starts
+
+From: Kuogee Hsieh <khsieh@codeaurora.org>
+
+[ Upstream commit f2f46b878777e0d3f885c7ddad48f477b4dea247 ]
+
+Initialize audio_comp when audio starts and wait for audio_comp at
+dp_display_disable(). This will take care of both dongle unplugged
+and display off (suspend) cases.
+
+Changes in v2:
+-- add dp_display_signal_audio_start()
+
+Changes in v3:
+-- restore dp_display_handle_plugged_change() at dp_hpd_unplug_handle().
+
+Changes in v4:
+-- none
+
+Signed-off-by: Kuogee Hsieh <khsieh@codeaurora.org>
+Reviewed-by: Stephen Boyd <swboyd@chromium.org>
+Tested-by: Stephen Boyd <swboyd@chromium.org>
+Fixes: c703d5789590 ("drm/msm/dp: trigger unplug event in msm_dp_display_disable")
+Link: https://lore.kernel.org/r/1619048258-8717-3-git-send-email-khsieh@codeaurora.org
+Signed-off-by: Rob Clark <robdclark@chromium.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/msm/dp/dp_audio.c   |  1 +
+ drivers/gpu/drm/msm/dp/dp_display.c | 11 +++++++++--
+ drivers/gpu/drm/msm/dp/dp_display.h |  1 +
+ 3 files changed, 11 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/msm/dp/dp_audio.c b/drivers/gpu/drm/msm/dp/dp_audio.c
+index 82a8673ab8da..d7e4a39a904e 100644
+--- a/drivers/gpu/drm/msm/dp/dp_audio.c
++++ b/drivers/gpu/drm/msm/dp/dp_audio.c
+@@ -527,6 +527,7 @@ int dp_audio_hw_params(struct device *dev,
+       dp_audio_setup_acr(audio);
+       dp_audio_safe_to_exit_level(audio);
+       dp_audio_enable(audio, true);
++      dp_display_signal_audio_start(dp_display);
+       dp_display->audio_enabled = true;
+ end:
+diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c
+index 81f6794a2510..e0cf26935a35 100644
+--- a/drivers/gpu/drm/msm/dp/dp_display.c
++++ b/drivers/gpu/drm/msm/dp/dp_display.c
+@@ -178,6 +178,15 @@ static int dp_del_event(struct dp_display_private *dp_priv, u32 event)
+       return 0;
+ }
++void dp_display_signal_audio_start(struct msm_dp *dp_display)
++{
++      struct dp_display_private *dp;
++
++      dp = container_of(dp_display, struct dp_display_private, dp_display);
++
++      reinit_completion(&dp->audio_comp);
++}
++
+ void dp_display_signal_audio_complete(struct msm_dp *dp_display)
+ {
+       struct dp_display_private *dp;
+@@ -651,7 +660,6 @@ static int dp_hpd_unplug_handle(struct dp_display_private *dp, u32 data)
+       dp_add_event(dp, EV_DISCONNECT_PENDING_TIMEOUT, 0, DP_TIMEOUT_5_SECOND);
+       /* signal the disconnect event early to ensure proper teardown */
+-      reinit_completion(&dp->audio_comp);
+       dp_display_handle_plugged_change(g_dp_display, false);
+       dp_catalog_hpd_config_intr(dp->catalog, DP_DP_HPD_PLUG_INT_MASK |
+@@ -891,7 +899,6 @@ static int dp_display_disable(struct dp_display_private *dp, u32 data)
+       /* wait only if audio was enabled */
+       if (dp_display->audio_enabled) {
+               /* signal the disconnect event */
+-              reinit_completion(&dp->audio_comp);
+               dp_display_handle_plugged_change(dp_display, false);
+               if (!wait_for_completion_timeout(&dp->audio_comp,
+                               HZ * 5))
+diff --git a/drivers/gpu/drm/msm/dp/dp_display.h b/drivers/gpu/drm/msm/dp/dp_display.h
+index 6092ba1ed85e..5173c89eedf7 100644
+--- a/drivers/gpu/drm/msm/dp/dp_display.h
++++ b/drivers/gpu/drm/msm/dp/dp_display.h
+@@ -34,6 +34,7 @@ int dp_display_get_modes(struct msm_dp *dp_display,
+ int dp_display_request_irq(struct msm_dp *dp_display);
+ bool dp_display_check_video_test(struct msm_dp *dp_display);
+ int dp_display_get_test_bpp(struct msm_dp *dp_display);
++void dp_display_signal_audio_start(struct msm_dp *dp_display);
+ void dp_display_signal_audio_complete(struct msm_dp *dp_display);
+ #endif /* _DP_DISPLAY_H_ */
+-- 
+2.30.2
+
diff --git a/queue-5.11/drm-msm-fix-llc-not-being-enabled-for-mmu500-targets.patch b/queue-5.11/drm-msm-fix-llc-not-being-enabled-for-mmu500-targets.patch
new file mode 100644 (file)
index 0000000..108279d
--- /dev/null
@@ -0,0 +1,51 @@
+From 16e0779afc71d07f6b1e4d88bfbecb7213008ec9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 23 Apr 2021 21:49:26 -0400
+Subject: drm/msm: fix LLC not being enabled for mmu500 targets
+
+From: Jonathan Marek <jonathan@marek.ca>
+
+[ Upstream commit 4b95d371fb001185af84d177e69a23d55bd0167a ]
+
+mmu500 targets don't have a "cx_mem" region, set llc_mmio to NULL in that
+case to avoid the IS_ERR() condition in a6xx_llc_activate().
+
+Fixes: 3d247123b5a1 ("drm/msm/a6xx: Add support for using system cache on MMU500 based targets")
+Signed-off-by: Jonathan Marek <jonathan@marek.ca>
+Link: https://lore.kernel.org/r/20210424014927.1661-1-jonathan@marek.ca
+Signed-off-by: Rob Clark <robdclark@chromium.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/msm/adreno/a6xx_gpu.c | 9 +++++----
+ 1 file changed, 5 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+index b6e8ff2782da..50ddc5834cab 100644
+--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
++++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+@@ -1152,10 +1152,6 @@ static void a6xx_llc_slices_init(struct platform_device *pdev,
+ {
+       struct device_node *phandle;
+-      a6xx_gpu->llc_mmio = msm_ioremap(pdev, "cx_mem", "gpu_cx");
+-      if (IS_ERR(a6xx_gpu->llc_mmio))
+-              return;
+-
+       /*
+        * There is a different programming path for targets with an mmu500
+        * attached, so detect if that is the case
+@@ -1165,6 +1161,11 @@ static void a6xx_llc_slices_init(struct platform_device *pdev,
+               of_device_is_compatible(phandle, "arm,mmu-500"));
+       of_node_put(phandle);
++      if (a6xx_gpu->have_mmu500)
++              a6xx_gpu->llc_mmio = NULL;
++      else
++              a6xx_gpu->llc_mmio = msm_ioremap(pdev, "cx_mem", "gpu_cx");
++
+       a6xx_gpu->llc_slice = llcc_slice_getd(LLCC_GPU);
+       a6xx_gpu->htw_llc_slice = llcc_slice_getd(LLCC_GPUHTW);
+-- 
+2.30.2
+
diff --git a/queue-5.11/f2fs-compress-fix-race-condition-of-overwrite-vs-tru.patch b/queue-5.11/f2fs-compress-fix-race-condition-of-overwrite-vs-tru.patch
new file mode 100644 (file)
index 0000000..8557eb6
--- /dev/null
@@ -0,0 +1,148 @@
+From e0cc0d096a7718921c6eb0c77ae65b0e76d1c21f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 10 May 2021 17:30:31 +0800
+Subject: f2fs: compress: fix race condition of overwrite vs truncate
+
+From: Chao Yu <yuchao0@huawei.com>
+
+[ Upstream commit a949dc5f2c5cfe0c910b664650f45371254c0744 ]
+
+pos_fsstress testcase complains a panic as belew:
+
+------------[ cut here ]------------
+kernel BUG at fs/f2fs/compress.c:1082!
+invalid opcode: 0000 [#1] SMP PTI
+CPU: 4 PID: 2753477 Comm: kworker/u16:2 Tainted: G           OE     5.12.0-rc1-custom #1
+Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.14.0-2 04/01/2014
+Workqueue: writeback wb_workfn (flush-252:16)
+RIP: 0010:prepare_compress_overwrite+0x4c0/0x760 [f2fs]
+Call Trace:
+ f2fs_prepare_compress_overwrite+0x5f/0x80 [f2fs]
+ f2fs_write_cache_pages+0x468/0x8a0 [f2fs]
+ f2fs_write_data_pages+0x2a4/0x2f0 [f2fs]
+ do_writepages+0x38/0xc0
+ __writeback_single_inode+0x44/0x2a0
+ writeback_sb_inodes+0x223/0x4d0
+ __writeback_inodes_wb+0x56/0xf0
+ wb_writeback+0x1dd/0x290
+ wb_workfn+0x309/0x500
+ process_one_work+0x220/0x3c0
+ worker_thread+0x53/0x420
+ kthread+0x12f/0x150
+ ret_from_fork+0x22/0x30
+
+The root cause is truncate() may race with overwrite as below,
+so that one reference count left in page can not guarantee the
+page attaching in mapping tree all the time, after truncation,
+later find_lock_page() may return NULL pointer.
+
+- prepare_compress_overwrite
+ - f2fs_pagecache_get_page
+ - unlock_page
+                                       - f2fs_setattr
+                                        - truncate_setsize
+                                         - truncate_inode_page
+                                          - delete_from_page_cache
+ - find_lock_page
+
+Fix this by avoiding referencing updated page.
+
+Fixes: 4c8ff7095bef ("f2fs: support data compression")
+Signed-off-by: Chao Yu <yuchao0@huawei.com>
+Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/f2fs/compress.c | 35 ++++++++++++-----------------------
+ 1 file changed, 12 insertions(+), 23 deletions(-)
+
+diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
+index ac12adc17f5e..8093d06116b4 100644
+--- a/fs/f2fs/compress.c
++++ b/fs/f2fs/compress.c
+@@ -123,19 +123,6 @@ static void f2fs_unlock_rpages(struct compress_ctx *cc, int len)
+       f2fs_drop_rpages(cc, len, true);
+ }
+-static void f2fs_put_rpages_mapping(struct address_space *mapping,
+-                              pgoff_t start, int len)
+-{
+-      int i;
+-
+-      for (i = 0; i < len; i++) {
+-              struct page *page = find_get_page(mapping, start + i);
+-
+-              put_page(page);
+-              put_page(page);
+-      }
+-}
+-
+ static void f2fs_put_rpages_wbc(struct compress_ctx *cc,
+               struct writeback_control *wbc, bool redirty, int unlock)
+ {
+@@ -1008,7 +995,7 @@ static int prepare_compress_overwrite(struct compress_ctx *cc,
+               }
+               if (PageUptodate(page))
+-                      unlock_page(page);
++                      f2fs_put_page(page, 1);
+               else
+                       f2fs_compress_ctx_add_page(cc, page);
+       }
+@@ -1018,32 +1005,34 @@ static int prepare_compress_overwrite(struct compress_ctx *cc,
+               ret = f2fs_read_multi_pages(cc, &bio, cc->cluster_size,
+                                       &last_block_in_bio, false, true);
++              f2fs_put_rpages(cc);
+               f2fs_destroy_compress_ctx(cc);
+               if (ret)
+-                      goto release_pages;
++                      goto out;
+               if (bio)
+                       f2fs_submit_bio(sbi, bio, DATA);
+               ret = f2fs_init_compress_ctx(cc);
+               if (ret)
+-                      goto release_pages;
++                      goto out;
+       }
+       for (i = 0; i < cc->cluster_size; i++) {
+               f2fs_bug_on(sbi, cc->rpages[i]);
+               page = find_lock_page(mapping, start_idx + i);
+-              f2fs_bug_on(sbi, !page);
++              if (!page) {
++                      /* page can be truncated */
++                      goto release_and_retry;
++              }
+               f2fs_wait_on_page_writeback(page, DATA, true, true);
+-
+               f2fs_compress_ctx_add_page(cc, page);
+-              f2fs_put_page(page, 0);
+               if (!PageUptodate(page)) {
++release_and_retry:
++                      f2fs_put_rpages(cc);
+                       f2fs_unlock_rpages(cc, i + 1);
+-                      f2fs_put_rpages_mapping(mapping, start_idx,
+-                                      cc->cluster_size);
+                       f2fs_destroy_compress_ctx(cc);
+                       goto retry;
+               }
+@@ -1075,10 +1064,10 @@ static int prepare_compress_overwrite(struct compress_ctx *cc,
+       }
+ unlock_pages:
++      f2fs_put_rpages(cc);
+       f2fs_unlock_rpages(cc, i);
+-release_pages:
+-      f2fs_put_rpages_mapping(mapping, start_idx, i);
+       f2fs_destroy_compress_ctx(cc);
++out:
+       return ret;
+ }
+-- 
+2.30.2
+
diff --git a/queue-5.11/f2fs-compress-fix-to-assign-cc.cluster_idx-correctly.patch b/queue-5.11/f2fs-compress-fix-to-assign-cc.cluster_idx-correctly.patch
new file mode 100644 (file)
index 0000000..e02be81
--- /dev/null
@@ -0,0 +1,145 @@
+From f83d2e85e1db99f9d188b5324e9fea860a7ab008 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 10 May 2021 17:30:32 +0800
+Subject: f2fs: compress: fix to assign cc.cluster_idx correctly
+
+From: Chao Yu <yuchao0@huawei.com>
+
+[ Upstream commit 8bfbfb0ddd706b1ce2e89259ecc45f192c0ec2bf ]
+
+In f2fs_destroy_compress_ctx(), after f2fs_destroy_compress_ctx(),
+cc.cluster_idx will be cleared w/ NULL_CLUSTER, f2fs_cluster_blocks()
+may check wrong cluster metadata, fix it.
+
+Fixes: 4c8ff7095bef ("f2fs: support data compression")
+Signed-off-by: Chao Yu <yuchao0@huawei.com>
+Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/f2fs/compress.c | 17 +++++++++--------
+ fs/f2fs/data.c     |  6 +++---
+ fs/f2fs/f2fs.h     |  2 +-
+ 3 files changed, 13 insertions(+), 12 deletions(-)
+
+diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
+index 8093d06116b4..3a503e5a8c11 100644
+--- a/fs/f2fs/compress.c
++++ b/fs/f2fs/compress.c
+@@ -151,13 +151,14 @@ int f2fs_init_compress_ctx(struct compress_ctx *cc)
+       return cc->rpages ? 0 : -ENOMEM;
+ }
+-void f2fs_destroy_compress_ctx(struct compress_ctx *cc)
++void f2fs_destroy_compress_ctx(struct compress_ctx *cc, bool reuse)
+ {
+       page_array_free(cc->inode, cc->rpages, cc->cluster_size);
+       cc->rpages = NULL;
+       cc->nr_rpages = 0;
+       cc->nr_cpages = 0;
+-      cc->cluster_idx = NULL_CLUSTER;
++      if (!reuse)
++              cc->cluster_idx = NULL_CLUSTER;
+ }
+ void f2fs_compress_ctx_add_page(struct compress_ctx *cc, struct page *page)
+@@ -1006,7 +1007,7 @@ static int prepare_compress_overwrite(struct compress_ctx *cc,
+               ret = f2fs_read_multi_pages(cc, &bio, cc->cluster_size,
+                                       &last_block_in_bio, false, true);
+               f2fs_put_rpages(cc);
+-              f2fs_destroy_compress_ctx(cc);
++              f2fs_destroy_compress_ctx(cc, true);
+               if (ret)
+                       goto out;
+               if (bio)
+@@ -1033,7 +1034,7 @@ static int prepare_compress_overwrite(struct compress_ctx *cc,
+ release_and_retry:
+                       f2fs_put_rpages(cc);
+                       f2fs_unlock_rpages(cc, i + 1);
+-                      f2fs_destroy_compress_ctx(cc);
++                      f2fs_destroy_compress_ctx(cc, true);
+                       goto retry;
+               }
+       }
+@@ -1066,7 +1067,7 @@ static int prepare_compress_overwrite(struct compress_ctx *cc,
+ unlock_pages:
+       f2fs_put_rpages(cc);
+       f2fs_unlock_rpages(cc, i);
+-      f2fs_destroy_compress_ctx(cc);
++      f2fs_destroy_compress_ctx(cc, true);
+ out:
+       return ret;
+ }
+@@ -1102,7 +1103,7 @@ bool f2fs_compress_write_end(struct inode *inode, void *fsdata,
+               set_cluster_dirty(&cc);
+       f2fs_put_rpages_wbc(&cc, NULL, false, 1);
+-      f2fs_destroy_compress_ctx(&cc);
++      f2fs_destroy_compress_ctx(&cc, false);
+       return first_index;
+ }
+@@ -1321,7 +1322,7 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
+       f2fs_put_rpages(cc);
+       page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
+       cc->cpages = NULL;
+-      f2fs_destroy_compress_ctx(cc);
++      f2fs_destroy_compress_ctx(cc, false);
+       return 0;
+ out_destroy_crypt:
+@@ -1483,7 +1484,7 @@ int f2fs_write_multi_pages(struct compress_ctx *cc,
+       err = f2fs_write_raw_pages(cc, submitted, wbc, io_type);
+       f2fs_put_rpages_wbc(cc, wbc, false, 0);
+ destroy_out:
+-      f2fs_destroy_compress_ctx(cc);
++      f2fs_destroy_compress_ctx(cc, false);
+       return err;
+ }
+diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
+index 4d3ebf094f6d..3802ad227a1e 100644
+--- a/fs/f2fs/data.c
++++ b/fs/f2fs/data.c
+@@ -2405,7 +2405,7 @@ static int f2fs_mpage_readpages(struct inode *inode,
+                                                       max_nr_pages,
+                                                       &last_block_in_bio,
+                                                       rac != NULL, false);
+-                              f2fs_destroy_compress_ctx(&cc);
++                              f2fs_destroy_compress_ctx(&cc, false);
+                               if (ret)
+                                       goto set_error_page;
+                       }
+@@ -2450,7 +2450,7 @@ static int f2fs_mpage_readpages(struct inode *inode,
+                                                       max_nr_pages,
+                                                       &last_block_in_bio,
+                                                       rac != NULL, false);
+-                              f2fs_destroy_compress_ctx(&cc);
++                              f2fs_destroy_compress_ctx(&cc, false);
+                       }
+               }
+ #endif
+@@ -3154,7 +3154,7 @@ static int f2fs_write_cache_pages(struct address_space *mapping,
+               }
+       }
+       if (f2fs_compressed_file(inode))
+-              f2fs_destroy_compress_ctx(&cc);
++              f2fs_destroy_compress_ctx(&cc, false);
+ #endif
+       if (retry) {
+               index = 0;
+diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
+index c9d54652a518..43e76529d674 100644
+--- a/fs/f2fs/f2fs.h
++++ b/fs/f2fs/f2fs.h
+@@ -3894,7 +3894,7 @@ void f2fs_free_dic(struct decompress_io_ctx *dic);
+ void f2fs_decompress_end_io(struct page **rpages,
+                       unsigned int cluster_size, bool err, bool verity);
+ int f2fs_init_compress_ctx(struct compress_ctx *cc);
+-void f2fs_destroy_compress_ctx(struct compress_ctx *cc);
++void f2fs_destroy_compress_ctx(struct compress_ctx *cc, bool reuse);
+ void f2fs_init_compress_info(struct f2fs_sb_info *sbi);
+ int f2fs_init_page_array_cache(struct f2fs_sb_info *sbi);
+ void f2fs_destroy_page_array_cache(struct f2fs_sb_info *sbi);
+-- 
+2.30.2
+
diff --git a/queue-5.11/f2fs-compress-fix-to-free-compress-page-correctly.patch b/queue-5.11/f2fs-compress-fix-to-free-compress-page-correctly.patch
new file mode 100644 (file)
index 0000000..3c71d65
--- /dev/null
@@ -0,0 +1,37 @@
+From 001afd7a38a19cc8e75470720cdc8926cebd4a56 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 6 May 2021 17:00:43 +0800
+Subject: f2fs: compress: fix to free compress page correctly
+
+From: Chao Yu <yuchao0@huawei.com>
+
+[ Upstream commit a12cc5b423d4f36dc1a1ea3911e49cf9dff43898 ]
+
+In error path of f2fs_write_compressed_pages(), it needs to call
+f2fs_compress_free_page() to release temporary page.
+
+Fixes: 5e6bbde95982 ("f2fs: introduce mempool for {,de}compress intermediate page allocation")
+Signed-off-by: Chao Yu <yuchao0@huawei.com>
+Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/f2fs/compress.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
+index 7a774c9e4cb8..ac12adc17f5e 100644
+--- a/fs/f2fs/compress.c
++++ b/fs/f2fs/compress.c
+@@ -1343,7 +1343,8 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
+       for (i = 0; i < cc->nr_cpages; i++) {
+               if (!cc->cpages[i])
+                       continue;
+-              f2fs_put_page(cc->cpages[i], 1);
++              f2fs_compress_free_page(cc->cpages[i]);
++              cc->cpages[i] = NULL;
+       }
+ out_put_cic:
+       kmem_cache_free(cic_entry_slab, cic);
+-- 
+2.30.2
+
diff --git a/queue-5.11/hwmon-ltc2992-put-fwnode-in-error-case-during-probe.patch b/queue-5.11/hwmon-ltc2992-put-fwnode-in-error-case-during-probe.patch
new file mode 100644 (file)
index 0000000..92b4f27
--- /dev/null
@@ -0,0 +1,50 @@
+From 1d2a9fdef41bc0af3df4132a53888ae93aea0110 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 10 May 2021 13:01:36 +0300
+Subject: hwmon: (ltc2992) Put fwnode in error case during ->probe()
+
+From: Andy Shevchenko <andy.shevchenko@gmail.com>
+
+[ Upstream commit 8370e5b093080c03cf89f7ebf0bef6984545429e ]
+
+In each iteration fwnode_for_each_available_child_node() bumps a reference
+counting of a loop variable followed by dropping in on a next iteration,
+
+Since in error case the loop is broken, we have to drop a reference count
+by ourselves. Do it for port_fwnode in error case during ->probe().
+
+Fixes: b0bd407e94b0 ("hwmon: (ltc2992) Add support")
+Cc: Alexandru Tachici <alexandru.tachici@analog.com>
+Signed-off-by: Andy Shevchenko <andy.shevchenko@gmail.com>
+Link: https://lore.kernel.org/r/20210510100136.3303142-1-andy.shevchenko@gmail.com
+Signed-off-by: Guenter Roeck <linux@roeck-us.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/hwmon/ltc2992.c | 8 ++++++--
+ 1 file changed, 6 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/hwmon/ltc2992.c b/drivers/hwmon/ltc2992.c
+index 4382105bf142..2a4bed0ab226 100644
+--- a/drivers/hwmon/ltc2992.c
++++ b/drivers/hwmon/ltc2992.c
+@@ -900,11 +900,15 @@ static int ltc2992_parse_dt(struct ltc2992_state *st)
+       fwnode_for_each_available_child_node(fwnode, child) {
+               ret = fwnode_property_read_u32(child, "reg", &addr);
+-              if (ret < 0)
++              if (ret < 0) {
++                      fwnode_handle_put(child);
+                       return ret;
++              }
+-              if (addr > 1)
++              if (addr > 1) {
++                      fwnode_handle_put(child);
+                       return -EINVAL;
++              }
+               ret = fwnode_property_read_u32(child, "shunt-resistor-micro-ohms", &val);
+               if (!ret)
+-- 
+2.30.2
+
diff --git a/queue-5.11/hwmon-occ-fix-poll-rate-limiting.patch b/queue-5.11/hwmon-occ-fix-poll-rate-limiting.patch
new file mode 100644 (file)
index 0000000..67c4bf0
--- /dev/null
@@ -0,0 +1,64 @@
+From 94a9b925171fc87fdf32086b9bd0ab9df09d6fcd Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 29 Apr 2021 10:13:36 -0500
+Subject: hwmon: (occ) Fix poll rate limiting
+
+From: Eddie James <eajames@linux.ibm.com>
+
+[ Upstream commit 5216dff22dc2bbbbe6f00335f9fd2879670e753b ]
+
+The poll rate limiter time was initialized at zero. This breaks the
+comparison in time_after if jiffies is large. Switch to storing the
+next update time rather than the previous time, and initialize the
+time when the device is probed.
+
+Fixes: c10e753d43eb ("hwmon (occ): Add sensor types and versions")
+Signed-off-by: Eddie James <eajames@linux.ibm.com>
+Link: https://lore.kernel.org/r/20210429151336.18980-1-eajames@linux.ibm.com
+Signed-off-by: Guenter Roeck <linux@roeck-us.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/hwmon/occ/common.c | 5 +++--
+ drivers/hwmon/occ/common.h | 2 +-
+ 2 files changed, 4 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/hwmon/occ/common.c b/drivers/hwmon/occ/common.c
+index 7a5e539b567b..580e63d7daa0 100644
+--- a/drivers/hwmon/occ/common.c
++++ b/drivers/hwmon/occ/common.c
+@@ -217,9 +217,9 @@ int occ_update_response(struct occ *occ)
+               return rc;
+       /* limit the maximum rate of polling the OCC */
+-      if (time_after(jiffies, occ->last_update + OCC_UPDATE_FREQUENCY)) {
++      if (time_after(jiffies, occ->next_update)) {
+               rc = occ_poll(occ);
+-              occ->last_update = jiffies;
++              occ->next_update = jiffies + OCC_UPDATE_FREQUENCY;
+       } else {
+               rc = occ->last_error;
+       }
+@@ -1164,6 +1164,7 @@ int occ_setup(struct occ *occ, const char *name)
+               return rc;
+       }
++      occ->next_update = jiffies + OCC_UPDATE_FREQUENCY;
+       occ_parse_poll_response(occ);
+       rc = occ_setup_sensor_attrs(occ);
+diff --git a/drivers/hwmon/occ/common.h b/drivers/hwmon/occ/common.h
+index 67e6968b8978..e6df719770e8 100644
+--- a/drivers/hwmon/occ/common.h
++++ b/drivers/hwmon/occ/common.h
+@@ -99,7 +99,7 @@ struct occ {
+       u8 poll_cmd_data;               /* to perform OCC poll command */
+       int (*send_cmd)(struct occ *occ, u8 *cmd);
+-      unsigned long last_update;
++      unsigned long next_update;
+       struct mutex lock;              /* lock OCC access */
+       struct device *hwmon;
+-- 
+2.30.2
+
diff --git a/queue-5.11/iio-core-return-enodev-if-ioctl-is-unknown.patch b/queue-5.11/iio-core-return-enodev-if-ioctl-is-unknown.patch
new file mode 100644 (file)
index 0000000..5021cf8
--- /dev/null
@@ -0,0 +1,60 @@
+From 02aae57ee9f0c8daf77ee589e4c45273cee0e8d3 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 3 May 2021 17:43:50 +0300
+Subject: iio: core: return ENODEV if ioctl is unknown
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Alexandru Ardelean <aardelean@deviqon.com>
+
+[ Upstream commit af0670b0bf1b116fd729b1b1011cf814bc34e12e ]
+
+When the ioctl() mechanism was introduced in IIO core to centralize the
+registration of all ioctls in one place via commit 8dedcc3eee3ac ("iio:
+core: centralize ioctl() calls to the main chardev"), the return code was
+changed from ENODEV to EINVAL, when the ioctl code isn't known.
+
+This was done by accident.
+
+This change reverts back to the old behavior, where if the ioctl() code
+isn't known, ENODEV is returned (vs EINVAL).
+
+This was brought into perspective by this patch:
+  https://lore.kernel.org/linux-iio/20210428150815.136150-1-paul@crapouillou.net/
+
+Fixes: 8dedcc3eee3ac ("iio: core: centralize ioctl() calls to the main chardev")
+Signed-off-by: Alexandru Ardelean <aardelean@deviqon.com>
+Reviewed-by: Nuno Sá <nuno.sa@analog.com>
+Tested-by: Paul Cercueil <paul@crapouillou.net>
+Reviewed-by: Linus Walleij <linus.walleij@linaro.org>
+Signed-off-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/iio/industrialio-core.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
+index c2e4c267c36b..afba32b57814 100644
+--- a/drivers/iio/industrialio-core.c
++++ b/drivers/iio/industrialio-core.c
+@@ -1698,7 +1698,6 @@ static long iio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+       if (!indio_dev->info)
+               goto out_unlock;
+-      ret = -EINVAL;
+       list_for_each_entry(h, &iio_dev_opaque->ioctl_handlers, entry) {
+               ret = h->ioctl(indio_dev, filp, cmd, arg);
+               if (ret != IIO_IOCTL_UNHANDLED)
+@@ -1706,7 +1705,7 @@ static long iio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+       }
+       if (ret == IIO_IOCTL_UNHANDLED)
+-              ret = -EINVAL;
++              ret = -ENODEV;
+ out_unlock:
+       mutex_unlock(&indio_dev->info_exist_lock);
+-- 
+2.30.2
+
diff --git a/queue-5.11/iio-hid-sensors-select-iio_triggered_buffer-under-hi.patch b/queue-5.11/iio-hid-sensors-select-iio_triggered_buffer-under-hi.patch
new file mode 100644 (file)
index 0000000..2781134
--- /dev/null
@@ -0,0 +1,174 @@
+From 1042d64ef71025d9cbd3503971f444ecdc0c16b3 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 14 Apr 2021 11:49:55 +0300
+Subject: iio: hid-sensors: select IIO_TRIGGERED_BUFFER under
+ HID_SENSOR_IIO_TRIGGER
+
+From: Alexandru Ardelean <aardelean@deviqon.com>
+
+[ Upstream commit 7061803522ee7876df1ca18cdd1e1551f761352d ]
+
+During commit 067fda1c065ff ("iio: hid-sensors: move triggered buffer
+setup into hid_sensor_setup_trigger"), the
+iio_triggered_buffer_{setup,cleanup}() functions got moved under the
+hid-sensor-trigger module.
+
+The above change works fine, if any of the sensors get built. However, when
+only the common hid-sensor-trigger module gets built (and none of the
+drivers), then the IIO_TRIGGERED_BUFFER symbol isn't selected/enforced.
+
+Previously, each driver would enforce/select the IIO_TRIGGERED_BUFFER
+symbol. With this change the HID_SENSOR_IIO_TRIGGER (for the
+hid-sensor-trigger module) will enforce that IIO_TRIGGERED_BUFFER gets
+selected.
+
+All HID sensor drivers select the HID_SENSOR_IIO_TRIGGER symbol. So, this
+change removes the IIO_TRIGGERED_BUFFER enforcement from each driver.
+
+Fixes: 067fda1c065ff ("iio: hid-sensors: move triggered buffer setup into hid_sensor_setup_trigger")
+Reported-by: Thomas Deutschmann <whissi@gentoo.org>
+Cc: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>
+Signed-off-by: Alexandru Ardelean <aardelean@deviqon.com>
+Acked-by: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>
+Link: https://lore.kernel.org/r/20210414084955.260117-1-aardelean@deviqon.com
+Signed-off-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/iio/accel/Kconfig              | 1 -
+ drivers/iio/common/hid-sensors/Kconfig | 1 +
+ drivers/iio/gyro/Kconfig               | 1 -
+ drivers/iio/humidity/Kconfig           | 1 -
+ drivers/iio/light/Kconfig              | 2 --
+ drivers/iio/magnetometer/Kconfig       | 1 -
+ drivers/iio/orientation/Kconfig        | 2 --
+ drivers/iio/pressure/Kconfig           | 1 -
+ drivers/iio/temperature/Kconfig        | 1 -
+ 9 files changed, 1 insertion(+), 10 deletions(-)
+
+diff --git a/drivers/iio/accel/Kconfig b/drivers/iio/accel/Kconfig
+index 2e0c62c39155..8acf277b8b25 100644
+--- a/drivers/iio/accel/Kconfig
++++ b/drivers/iio/accel/Kconfig
+@@ -211,7 +211,6 @@ config DMARD10
+ config HID_SENSOR_ACCEL_3D
+       depends on HID_SENSOR_HUB
+       select IIO_BUFFER
+-      select IIO_TRIGGERED_BUFFER
+       select HID_SENSOR_IIO_COMMON
+       select HID_SENSOR_IIO_TRIGGER
+       tristate "HID Accelerometers 3D"
+diff --git a/drivers/iio/common/hid-sensors/Kconfig b/drivers/iio/common/hid-sensors/Kconfig
+index 24d492567336..2a3dd3b907be 100644
+--- a/drivers/iio/common/hid-sensors/Kconfig
++++ b/drivers/iio/common/hid-sensors/Kconfig
+@@ -19,6 +19,7 @@ config HID_SENSOR_IIO_TRIGGER
+       tristate "Common module (trigger) for all HID Sensor IIO drivers"
+       depends on HID_SENSOR_HUB && HID_SENSOR_IIO_COMMON && IIO_BUFFER
+       select IIO_TRIGGER
++      select IIO_TRIGGERED_BUFFER
+       help
+         Say yes here to build trigger support for HID sensors.
+         Triggers will be send if all requested attributes were read.
+diff --git a/drivers/iio/gyro/Kconfig b/drivers/iio/gyro/Kconfig
+index 5824f2edf975..20b5ac7ab66a 100644
+--- a/drivers/iio/gyro/Kconfig
++++ b/drivers/iio/gyro/Kconfig
+@@ -111,7 +111,6 @@ config FXAS21002C_SPI
+ config HID_SENSOR_GYRO_3D
+       depends on HID_SENSOR_HUB
+       select IIO_BUFFER
+-      select IIO_TRIGGERED_BUFFER
+       select HID_SENSOR_IIO_COMMON
+       select HID_SENSOR_IIO_TRIGGER
+       tristate "HID Gyroscope 3D"
+diff --git a/drivers/iio/humidity/Kconfig b/drivers/iio/humidity/Kconfig
+index 6549fcf6db69..2de5494e7c22 100644
+--- a/drivers/iio/humidity/Kconfig
++++ b/drivers/iio/humidity/Kconfig
+@@ -52,7 +52,6 @@ config HID_SENSOR_HUMIDITY
+       tristate "HID Environmental humidity sensor"
+       depends on HID_SENSOR_HUB
+       select IIO_BUFFER
+-      select IIO_TRIGGERED_BUFFER
+       select HID_SENSOR_IIO_COMMON
+       select HID_SENSOR_IIO_TRIGGER
+       help
+diff --git a/drivers/iio/light/Kconfig b/drivers/iio/light/Kconfig
+index 33ad4dd0b5c7..917f9becf9c7 100644
+--- a/drivers/iio/light/Kconfig
++++ b/drivers/iio/light/Kconfig
+@@ -256,7 +256,6 @@ config ISL29125
+ config HID_SENSOR_ALS
+       depends on HID_SENSOR_HUB
+       select IIO_BUFFER
+-      select IIO_TRIGGERED_BUFFER
+       select HID_SENSOR_IIO_COMMON
+       select HID_SENSOR_IIO_TRIGGER
+       tristate "HID ALS"
+@@ -270,7 +269,6 @@ config HID_SENSOR_ALS
+ config HID_SENSOR_PROX
+       depends on HID_SENSOR_HUB
+       select IIO_BUFFER
+-      select IIO_TRIGGERED_BUFFER
+       select HID_SENSOR_IIO_COMMON
+       select HID_SENSOR_IIO_TRIGGER
+       tristate "HID PROX"
+diff --git a/drivers/iio/magnetometer/Kconfig b/drivers/iio/magnetometer/Kconfig
+index 1697a8c03506..7e9489a35571 100644
+--- a/drivers/iio/magnetometer/Kconfig
++++ b/drivers/iio/magnetometer/Kconfig
+@@ -95,7 +95,6 @@ config MAG3110
+ config HID_SENSOR_MAGNETOMETER_3D
+       depends on HID_SENSOR_HUB
+       select IIO_BUFFER
+-      select IIO_TRIGGERED_BUFFER
+       select HID_SENSOR_IIO_COMMON
+       select HID_SENSOR_IIO_TRIGGER
+       tristate "HID Magenetometer 3D"
+diff --git a/drivers/iio/orientation/Kconfig b/drivers/iio/orientation/Kconfig
+index a505583cc2fd..396cbbb867f4 100644
+--- a/drivers/iio/orientation/Kconfig
++++ b/drivers/iio/orientation/Kconfig
+@@ -9,7 +9,6 @@ menu "Inclinometer sensors"
+ config HID_SENSOR_INCLINOMETER_3D
+       depends on HID_SENSOR_HUB
+       select IIO_BUFFER
+-      select IIO_TRIGGERED_BUFFER
+       select HID_SENSOR_IIO_COMMON
+       select HID_SENSOR_IIO_TRIGGER
+       tristate "HID Inclinometer 3D"
+@@ -20,7 +19,6 @@ config HID_SENSOR_INCLINOMETER_3D
+ config HID_SENSOR_DEVICE_ROTATION
+       depends on HID_SENSOR_HUB
+       select IIO_BUFFER
+-      select IIO_TRIGGERED_BUFFER
+       select HID_SENSOR_IIO_COMMON
+       select HID_SENSOR_IIO_TRIGGER
+       tristate "HID Device Rotation"
+diff --git a/drivers/iio/pressure/Kconfig b/drivers/iio/pressure/Kconfig
+index 689b978db4f9..fc0d3cfca418 100644
+--- a/drivers/iio/pressure/Kconfig
++++ b/drivers/iio/pressure/Kconfig
+@@ -79,7 +79,6 @@ config DPS310
+ config HID_SENSOR_PRESS
+       depends on HID_SENSOR_HUB
+       select IIO_BUFFER
+-      select IIO_TRIGGERED_BUFFER
+       select HID_SENSOR_IIO_COMMON
+       select HID_SENSOR_IIO_TRIGGER
+       tristate "HID PRESS"
+diff --git a/drivers/iio/temperature/Kconfig b/drivers/iio/temperature/Kconfig
+index f1f2a1499c9e..4df60082c1fa 100644
+--- a/drivers/iio/temperature/Kconfig
++++ b/drivers/iio/temperature/Kconfig
+@@ -45,7 +45,6 @@ config HID_SENSOR_TEMP
+       tristate "HID Environmental temperature sensor"
+       depends on HID_SENSOR_HUB
+       select IIO_BUFFER
+-      select IIO_TRIGGERED_BUFFER
+       select HID_SENSOR_IIO_COMMON
+       select HID_SENSOR_IIO_TRIGGER
+       help
+-- 
+2.30.2
+
diff --git a/queue-5.11/iio-light-gp2ap002-fix-rumtime-pm-imbalance-on-error.patch b/queue-5.11/iio-light-gp2ap002-fix-rumtime-pm-imbalance-on-error.patch
new file mode 100644 (file)
index 0000000..d38a93f
--- /dev/null
@@ -0,0 +1,51 @@
+From c22ad1833354d3cd32bed4154ee9875db95918a0 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 7 Apr 2021 11:49:27 +0800
+Subject: iio: light: gp2ap002: Fix rumtime PM imbalance on error
+
+From: Dinghao Liu <dinghao.liu@zju.edu.cn>
+
+[ Upstream commit 8edb79af88efc6e49e735f9baf61d9f0748b881f ]
+
+When devm_request_threaded_irq() fails, we should decrease the
+runtime PM counter to keep the counter balanced. But when
+iio_device_register() fails, we need not to decrease it because
+we have already decreased it before.
+
+Signed-off-by: Dinghao Liu <dinghao.liu@zju.edu.cn>
+Reviewed-by: Linus Walleij <linus.walleij@linaro.org>
+Fixes: 97d642e23037 ("iio: light: Add a driver for Sharp GP2AP002x00F")
+Link: https://lore.kernel.org/r/20210407034927.16882-1-dinghao.liu@zju.edu.cn
+Signed-off-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/iio/light/gp2ap002.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/iio/light/gp2ap002.c b/drivers/iio/light/gp2ap002.c
+index 7ba7aa59437c..040d8429a6e0 100644
+--- a/drivers/iio/light/gp2ap002.c
++++ b/drivers/iio/light/gp2ap002.c
+@@ -583,7 +583,7 @@ static int gp2ap002_probe(struct i2c_client *client,
+                                       "gp2ap002", indio_dev);
+       if (ret) {
+               dev_err(dev, "unable to request IRQ\n");
+-              goto out_disable_vio;
++              goto out_put_pm;
+       }
+       gp2ap002->irq = client->irq;
+@@ -613,8 +613,9 @@ static int gp2ap002_probe(struct i2c_client *client,
+       return 0;
+-out_disable_pm:
++out_put_pm:
+       pm_runtime_put_noidle(dev);
++out_disable_pm:
+       pm_runtime_disable(dev);
+ out_disable_vio:
+       regulator_disable(gp2ap002->vio);
+-- 
+2.30.2
+
diff --git a/queue-5.11/iio-proximity-pulsedlight-fix-rumtime-pm-imbalance-o.patch b/queue-5.11/iio-proximity-pulsedlight-fix-rumtime-pm-imbalance-o.patch
new file mode 100644 (file)
index 0000000..f9c1b24
--- /dev/null
@@ -0,0 +1,37 @@
+From 9cfcc15208c7ee4c85ac94424f6fba32e1c64a57 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 12 Apr 2021 13:32:02 +0800
+Subject: iio: proximity: pulsedlight: Fix rumtime PM imbalance on error
+
+From: Dinghao Liu <dinghao.liu@zju.edu.cn>
+
+[ Upstream commit a2fa9242e89f27696515699fe0f0296bf1ac1815 ]
+
+When lidar_write_control() fails, a pairing PM usage counter
+decrement is needed to keep the counter balanced.
+
+Fixes: 4ac4e086fd8c5 ("iio: pulsedlight-lidar-lite: add runtime PM")
+Signed-off-by: Dinghao Liu <dinghao.liu@zju.edu.cn>
+Reviewed-by: Andy Shevchenko <andy.shevchenko@gmail.com>
+Link: https://lore.kernel.org/r/20210412053204.4889-1-dinghao.liu@zju.edu.cn
+Signed-off-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/iio/proximity/pulsedlight-lidar-lite-v2.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c b/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c
+index c685f10b5ae4..cc206bfa09c7 100644
+--- a/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c
++++ b/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c
+@@ -160,6 +160,7 @@ static int lidar_get_measurement(struct lidar_data *data, u16 *reg)
+       ret = lidar_write_control(data, LIDAR_REG_CONTROL_ACQUIRE);
+       if (ret < 0) {
+               dev_err(&client->dev, "cannot send start measurement command");
++              pm_runtime_put_noidle(&client->dev);
+               return ret;
+       }
+-- 
+2.30.2
+
diff --git a/queue-5.11/kvm-lapic-accurately-guarantee-busy-wait-for-timer-t.patch b/queue-5.11/kvm-lapic-accurately-guarantee-busy-wait-for-timer-t.patch
new file mode 100644 (file)
index 0000000..df88107
--- /dev/null
@@ -0,0 +1,45 @@
+From cfca1ef08d26f38bbbb3d039355acb92b35b1994 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 28 Apr 2021 19:08:02 +0800
+Subject: KVM: LAPIC: Accurately guarantee busy wait for timer to expire when
+ using hv_timer
+
+From: Wanpeng Li <wanpengli@tencent.com>
+
+[ Upstream commit d981dd15498b188636ec5a7d8ad485e650f63d8d ]
+
+Commit ee66e453db13d (KVM: lapic: Busy wait for timer to expire when
+using hv_timer) tries to set ktime->expired_tscdeadline by checking
+ktime->hv_timer_in_use since lapic timer oneshot/periodic modes which
+are emulated by vmx preemption timer also get advanced, they leverage
+the same vmx preemption timer logic with tsc-deadline mode. However,
+ktime->hv_timer_in_use is cleared before apic_timer_expired() handling,
+let's delay this clearing in preemption-disabled region.
+
+Fixes: ee66e453db13d ("KVM: lapic: Busy wait for timer to expire when using hv_timer")
+Reviewed-by: Sean Christopherson <seanjc@google.com>
+Signed-off-by: Wanpeng Li <wanpengli@tencent.com>
+Message-Id: <1619608082-4187-1-git-send-email-wanpengli@tencent.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/x86/kvm/lapic.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
+index 570fa298083c..70eb00f4317f 100644
+--- a/arch/x86/kvm/lapic.c
++++ b/arch/x86/kvm/lapic.c
+@@ -1908,8 +1908,8 @@ void kvm_lapic_expired_hv_timer(struct kvm_vcpu *vcpu)
+       if (!apic->lapic_timer.hv_timer_in_use)
+               goto out;
+       WARN_ON(rcuwait_active(&vcpu->wait));
+-      cancel_hv_timer(apic);
+       apic_timer_expired(apic, false);
++      cancel_hv_timer(apic);
+       if (apic_lvtt_period(apic) && apic->lapic_timer.period) {
+               advance_periodic_target_expiration(apic);
+-- 
+2.30.2
+
diff --git a/queue-5.11/kvm-x86-cancel-pvclock_gtod_work-on-module-removal.patch b/queue-5.11/kvm-x86-cancel-pvclock_gtod_work-on-module-removal.patch
new file mode 100644 (file)
index 0000000..040aa60
--- /dev/null
@@ -0,0 +1,49 @@
+From cf6d5dc9234d22525f24f3eb53efcf4e93028112 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 5 May 2021 23:48:17 +0200
+Subject: KVM: x86: Cancel pvclock_gtod_work on module removal
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+[ Upstream commit 594b27e677b35f9734b1969d175ebc6146741109 ]
+
+Nothing prevents the following:
+
+  pvclock_gtod_notify()
+    queue_work(system_long_wq, &pvclock_gtod_work);
+  ...
+  remove_module(kvm);
+  ...
+  work_queue_run()
+    pvclock_gtod_work()        <- UAF
+
+Ditto for any other operation on that workqueue list head which touches
+pvclock_gtod_work after module removal.
+
+Cancel the work in kvm_arch_exit() to prevent that.
+
+Fixes: 16e8d74d2da9 ("KVM: x86: notifier for clocksource changes")
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Message-Id: <87czu4onry.ffs@nanos.tec.linutronix.de>
+Cc: stable@vger.kernel.org
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/x86/kvm/x86.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index 3b3f96e87b8c..b010ad6cbd14 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -8005,6 +8005,7 @@ void kvm_arch_exit(void)
+       cpuhp_remove_state_nocalls(CPUHP_AP_X86_KVM_CLK_ONLINE);
+ #ifdef CONFIG_X86_64
+       pvclock_gtod_unregister_notifier(&pvclock_gtod_notifier);
++      cancel_work_sync(&pvclock_gtod_work);
+ #endif
+       kvm_x86_ops.hardware_enable = NULL;
+       kvm_mmu_module_exit();
+-- 
+2.30.2
+
diff --git a/queue-5.11/kvm-x86-prevent-deadlock-against-tk_core.seq.patch b/queue-5.11/kvm-x86-prevent-deadlock-against-tk_core.seq.patch
new file mode 100644 (file)
index 0000000..9e7dbd3
--- /dev/null
@@ -0,0 +1,88 @@
+From 0dfe4e15a8839df44fda6f0796d517cd5293b7c0 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 6 May 2021 15:21:37 +0200
+Subject: KVM: x86: Prevent deadlock against tk_core.seq
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+[ Upstream commit 3f804f6d201ca93adf4c3df04d1bfd152c1129d6 ]
+
+syzbot reported a possible deadlock in pvclock_gtod_notify():
+
+CPU 0                                              CPU 1
+write_seqcount_begin(&tk_core.seq);
+  pvclock_gtod_notify()                            spin_lock(&pool->lock);
+    queue_work(..., &pvclock_gtod_work)            ktime_get()
+     spin_lock(&pool->lock);                 do {
+                                               seq = read_seqcount_begin(tk_core.seq)
+                                               ...
+                                             } while (read_seqcount_retry(&tk_core.seq, seq);
+
+While this is unlikely to happen, it's possible.
+
+Delegate queue_work() to irq_work() which postpones it until the
+tk_core.seq write held region is left and interrupts are reenabled.
+
+Fixes: 16e8d74d2da9 ("KVM: x86: notifier for clocksource changes")
+Reported-by: syzbot+6beae4000559d41d80f8@syzkaller.appspotmail.com
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Message-Id: <87h7jgm1zy.ffs@nanos.tec.linutronix.de>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/x86/kvm/x86.c | 22 ++++++++++++++++++----
+ 1 file changed, 18 insertions(+), 4 deletions(-)
+
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index b010ad6cbd14..8105e9ae1ff8 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -7872,6 +7872,18 @@ static void pvclock_gtod_update_fn(struct work_struct *work)
+ static DECLARE_WORK(pvclock_gtod_work, pvclock_gtod_update_fn);
++/*
++ * Indirection to move queue_work() out of the tk_core.seq write held
++ * region to prevent possible deadlocks against time accessors which
++ * are invoked with work related locks held.
++ */
++static void pvclock_irq_work_fn(struct irq_work *w)
++{
++      queue_work(system_long_wq, &pvclock_gtod_work);
++}
++
++static DEFINE_IRQ_WORK(pvclock_irq_work, pvclock_irq_work_fn);
++
+ /*
+  * Notification about pvclock gtod data update.
+  */
+@@ -7883,13 +7895,14 @@ static int pvclock_gtod_notify(struct notifier_block *nb, unsigned long unused,
+       update_pvclock_gtod(tk);
+-      /* disable master clock if host does not trust, or does not
+-       * use, TSC based clocksource.
++      /*
++       * Disable master clock if host does not trust, or does not use,
++       * TSC based clocksource. Delegate queue_work() to irq_work as
++       * this is invoked with tk_core.seq write held.
+        */
+       if (!gtod_is_based_on_tsc(gtod->clock.vclock_mode) &&
+           atomic_read(&kvm_guest_has_master_clock) != 0)
+-              queue_work(system_long_wq, &pvclock_gtod_work);
+-
++              irq_work_queue(&pvclock_irq_work);
+       return 0;
+ }
+@@ -8005,6 +8018,7 @@ void kvm_arch_exit(void)
+       cpuhp_remove_state_nocalls(CPUHP_AP_X86_KVM_CLK_ONLINE);
+ #ifdef CONFIG_X86_64
+       pvclock_gtod_unregister_notifier(&pvclock_gtod_notifier);
++      irq_work_sync(&pvclock_irq_work);
+       cancel_work_sync(&pvclock_gtod_work);
+ #endif
+       kvm_x86_ops.hardware_enable = NULL;
+-- 
+2.30.2
+
diff --git a/queue-5.11/kyber-fix-out-of-bounds-access-when-preempted.patch b/queue-5.11/kyber-fix-out-of-bounds-access-when-preempted.patch
new file mode 100644 (file)
index 0000000..2eacfec
--- /dev/null
@@ -0,0 +1,162 @@
+From 3c11c9fd8baef003841022574c4d937bbafe9dad Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 10 May 2021 17:05:35 -0700
+Subject: kyber: fix out of bounds access when preempted
+
+From: Omar Sandoval <osandov@fb.com>
+
+[ Upstream commit efed9a3337e341bd0989161b97453b52567bc59d ]
+
+__blk_mq_sched_bio_merge() gets the ctx and hctx for the current CPU and
+passes the hctx to ->bio_merge(). kyber_bio_merge() then gets the ctx
+for the current CPU again and uses that to get the corresponding Kyber
+context in the passed hctx. However, the thread may be preempted between
+the two calls to blk_mq_get_ctx(), and the ctx returned the second time
+may no longer correspond to the passed hctx. This "works" accidentally
+most of the time, but it can cause us to read garbage if the second ctx
+came from an hctx with more ctx's than the first one (i.e., if
+ctx->index_hw[hctx->type] > hctx->nr_ctx).
+
+This manifested as this UBSAN array index out of bounds error reported
+by Jakub:
+
+UBSAN: array-index-out-of-bounds in ../kernel/locking/qspinlock.c:130:9
+index 13106 is out of range for type 'long unsigned int [128]'
+Call Trace:
+ dump_stack+0xa4/0xe5
+ ubsan_epilogue+0x5/0x40
+ __ubsan_handle_out_of_bounds.cold.13+0x2a/0x34
+ queued_spin_lock_slowpath+0x476/0x480
+ do_raw_spin_lock+0x1c2/0x1d0
+ kyber_bio_merge+0x112/0x180
+ blk_mq_submit_bio+0x1f5/0x1100
+ submit_bio_noacct+0x7b0/0x870
+ submit_bio+0xc2/0x3a0
+ btrfs_map_bio+0x4f0/0x9d0
+ btrfs_submit_data_bio+0x24e/0x310
+ submit_one_bio+0x7f/0xb0
+ submit_extent_page+0xc4/0x440
+ __extent_writepage_io+0x2b8/0x5e0
+ __extent_writepage+0x28d/0x6e0
+ extent_write_cache_pages+0x4d7/0x7a0
+ extent_writepages+0xa2/0x110
+ do_writepages+0x8f/0x180
+ __writeback_single_inode+0x99/0x7f0
+ writeback_sb_inodes+0x34e/0x790
+ __writeback_inodes_wb+0x9e/0x120
+ wb_writeback+0x4d2/0x660
+ wb_workfn+0x64d/0xa10
+ process_one_work+0x53a/0xa80
+ worker_thread+0x69/0x5b0
+ kthread+0x20b/0x240
+ ret_from_fork+0x1f/0x30
+
+Only Kyber uses the hctx, so fix it by passing the request_queue to
+->bio_merge() instead. BFQ and mq-deadline just use that, and Kyber can
+map the queues itself to avoid the mismatch.
+
+Fixes: a6088845c2bf ("block: kyber: make kyber more friendly with merging")
+Reported-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Omar Sandoval <osandov@fb.com>
+Link: https://lore.kernel.org/r/c7598605401a48d5cfeadebb678abd10af22b83f.1620691329.git.osandov@fb.com
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ block/bfq-iosched.c      | 3 +--
+ block/blk-mq-sched.c     | 8 +++++---
+ block/kyber-iosched.c    | 5 +++--
+ block/mq-deadline.c      | 3 +--
+ include/linux/elevator.h | 2 +-
+ 5 files changed, 11 insertions(+), 10 deletions(-)
+
+diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
+index 5720978e4d09..c91dca641eb4 100644
+--- a/block/bfq-iosched.c
++++ b/block/bfq-iosched.c
+@@ -2210,10 +2210,9 @@ static void bfq_remove_request(struct request_queue *q,
+ }
+-static bool bfq_bio_merge(struct blk_mq_hw_ctx *hctx, struct bio *bio,
++static bool bfq_bio_merge(struct request_queue *q, struct bio *bio,
+               unsigned int nr_segs)
+ {
+-      struct request_queue *q = hctx->queue;
+       struct bfq_data *bfqd = q->elevator->elevator_data;
+       struct request *free = NULL;
+       /*
+diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c
+index deff4e826e23..d93b45834776 100644
+--- a/block/blk-mq-sched.c
++++ b/block/blk-mq-sched.c
+@@ -348,14 +348,16 @@ bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio,
+               unsigned int nr_segs)
+ {
+       struct elevator_queue *e = q->elevator;
+-      struct blk_mq_ctx *ctx = blk_mq_get_ctx(q);
+-      struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, bio->bi_opf, ctx);
++      struct blk_mq_ctx *ctx;
++      struct blk_mq_hw_ctx *hctx;
+       bool ret = false;
+       enum hctx_type type;
+       if (e && e->type->ops.bio_merge)
+-              return e->type->ops.bio_merge(hctx, bio, nr_segs);
++              return e->type->ops.bio_merge(q, bio, nr_segs);
++      ctx = blk_mq_get_ctx(q);
++      hctx = blk_mq_map_queue(q, bio->bi_opf, ctx);
+       type = hctx->type;
+       if (!(hctx->flags & BLK_MQ_F_SHOULD_MERGE) ||
+           list_empty_careful(&ctx->rq_lists[type]))
+diff --git a/block/kyber-iosched.c b/block/kyber-iosched.c
+index dc89199bc8c6..7f9ef773bf44 100644
+--- a/block/kyber-iosched.c
++++ b/block/kyber-iosched.c
+@@ -562,11 +562,12 @@ static void kyber_limit_depth(unsigned int op, struct blk_mq_alloc_data *data)
+       }
+ }
+-static bool kyber_bio_merge(struct blk_mq_hw_ctx *hctx, struct bio *bio,
++static bool kyber_bio_merge(struct request_queue *q, struct bio *bio,
+               unsigned int nr_segs)
+ {
++      struct blk_mq_ctx *ctx = blk_mq_get_ctx(q);
++      struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, bio->bi_opf, ctx);
+       struct kyber_hctx_data *khd = hctx->sched_data;
+-      struct blk_mq_ctx *ctx = blk_mq_get_ctx(hctx->queue);
+       struct kyber_ctx_queue *kcq = &khd->kcqs[ctx->index_hw[hctx->type]];
+       unsigned int sched_domain = kyber_sched_domain(bio->bi_opf);
+       struct list_head *rq_list = &kcq->rq_list[sched_domain];
+diff --git a/block/mq-deadline.c b/block/mq-deadline.c
+index 800ac902809b..2b9635d0dcba 100644
+--- a/block/mq-deadline.c
++++ b/block/mq-deadline.c
+@@ -461,10 +461,9 @@ static int dd_request_merge(struct request_queue *q, struct request **rq,
+       return ELEVATOR_NO_MERGE;
+ }
+-static bool dd_bio_merge(struct blk_mq_hw_ctx *hctx, struct bio *bio,
++static bool dd_bio_merge(struct request_queue *q, struct bio *bio,
+               unsigned int nr_segs)
+ {
+-      struct request_queue *q = hctx->queue;
+       struct deadline_data *dd = q->elevator->elevator_data;
+       struct request *free = NULL;
+       bool ret;
+diff --git a/include/linux/elevator.h b/include/linux/elevator.h
+index bacc40a0bdf3..bc26b4e11f62 100644
+--- a/include/linux/elevator.h
++++ b/include/linux/elevator.h
+@@ -34,7 +34,7 @@ struct elevator_mq_ops {
+       void (*depth_updated)(struct blk_mq_hw_ctx *);
+       bool (*allow_merge)(struct request_queue *, struct request *, struct bio *);
+-      bool (*bio_merge)(struct blk_mq_hw_ctx *, struct bio *, unsigned int);
++      bool (*bio_merge)(struct request_queue *, struct bio *, unsigned int);
+       int (*request_merge)(struct request_queue *q, struct request **, struct bio *);
+       void (*request_merged)(struct request_queue *, struct request *, enum elv_merge);
+       void (*requests_merged)(struct request_queue *, struct request *, struct request *);
+-- 
+2.30.2
+
diff --git a/queue-5.11/nbd-fix-null-pointer-in-flush_workqueue.patch b/queue-5.11/nbd-fix-null-pointer-in-flush_workqueue.patch
new file mode 100644 (file)
index 0000000..e84b52a
--- /dev/null
@@ -0,0 +1,86 @@
+From 9ee055f61313b892c8d53edae5f8d29503773ed1 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 12 May 2021 19:43:30 +0800
+Subject: nbd: Fix NULL pointer in flush_workqueue
+
+From: Sun Ke <sunke32@huawei.com>
+
+[ Upstream commit 79ebe9110fa458d58f1fceb078e2068d7ad37390 ]
+
+Open /dev/nbdX first, the config_refs will be 1 and
+the pointers in nbd_device are still null. Disconnect
+/dev/nbdX, then reference a null recv_workq. The
+protection by config_refs in nbd_genl_disconnect is useless.
+
+[  656.366194] BUG: kernel NULL pointer dereference, address: 0000000000000020
+[  656.368943] #PF: supervisor write access in kernel mode
+[  656.369844] #PF: error_code(0x0002) - not-present page
+[  656.370717] PGD 10cc87067 P4D 10cc87067 PUD 1074b4067 PMD 0
+[  656.371693] Oops: 0002 [#1] SMP
+[  656.372242] CPU: 5 PID: 7977 Comm: nbd-client Not tainted 5.11.0-rc5-00040-g76c057c84d28 #1
+[  656.373661] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS ?-20190727_073836-buildvm-ppc64le-16.ppc.fedoraproject.org-3.fc31 04/01/2014
+[  656.375904] RIP: 0010:mutex_lock+0x29/0x60
+[  656.376627] Code: 00 0f 1f 44 00 00 55 48 89 fd 48 83 05 6f d7 fe 08 01 e8 7a c3 ff ff 48 83 05 6a d7 fe 08 01 31 c0 65 48 8b 14 25 00 6d 01 00 <f0> 48 0f b1 55 d
+[  656.378934] RSP: 0018:ffffc900005eb9b0 EFLAGS: 00010246
+[  656.379350] RAX: 0000000000000000 RBX: 0000000000000000 RCX: 0000000000000000
+[  656.379915] RDX: ffff888104cf2600 RSI: ffffffffaae8f452 RDI: 0000000000000020
+[  656.380473] RBP: 0000000000000020 R08: 0000000000000000 R09: ffff88813bd6b318
+[  656.381039] R10: 00000000000000c7 R11: fefefefefefefeff R12: ffff888102710b40
+[  656.381599] R13: ffffc900005eb9e0 R14: ffffffffb2930680 R15: ffff88810770ef00
+[  656.382166] FS:  00007fdf117ebb40(0000) GS:ffff88813bd40000(0000) knlGS:0000000000000000
+[  656.382806] CS:  0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+[  656.383261] CR2: 0000000000000020 CR3: 0000000100c84000 CR4: 00000000000006e0
+[  656.383819] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
+[  656.384370] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
+[  656.384927] Call Trace:
+[  656.385111]  flush_workqueue+0x92/0x6c0
+[  656.385395]  nbd_disconnect_and_put+0x81/0xd0
+[  656.385716]  nbd_genl_disconnect+0x125/0x2a0
+[  656.386034]  genl_family_rcv_msg_doit.isra.0+0x102/0x1b0
+[  656.386422]  genl_rcv_msg+0xfc/0x2b0
+[  656.386685]  ? nbd_ioctl+0x490/0x490
+[  656.386954]  ? genl_family_rcv_msg_doit.isra.0+0x1b0/0x1b0
+[  656.387354]  netlink_rcv_skb+0x62/0x180
+[  656.387638]  genl_rcv+0x34/0x60
+[  656.387874]  netlink_unicast+0x26d/0x590
+[  656.388162]  netlink_sendmsg+0x398/0x6c0
+[  656.388451]  ? netlink_rcv_skb+0x180/0x180
+[  656.388750]  ____sys_sendmsg+0x1da/0x320
+[  656.389038]  ? ____sys_recvmsg+0x130/0x220
+[  656.389334]  ___sys_sendmsg+0x8e/0xf0
+[  656.389605]  ? ___sys_recvmsg+0xa2/0xf0
+[  656.389889]  ? handle_mm_fault+0x1671/0x21d0
+[  656.390201]  __sys_sendmsg+0x6d/0xe0
+[  656.390464]  __x64_sys_sendmsg+0x23/0x30
+[  656.390751]  do_syscall_64+0x45/0x70
+[  656.391017]  entry_SYSCALL_64_after_hwframe+0x44/0xa9
+
+To fix it, just add if (nbd->recv_workq) to nbd_disconnect_and_put().
+
+Fixes: e9e006f5fcf2 ("nbd: fix max number of supported devs")
+Signed-off-by: Sun Ke <sunke32@huawei.com>
+Reviewed-by: Josef Bacik <josef@toxicpanda.com>
+Link: https://lore.kernel.org/r/20210512114331.1233964-2-sunke32@huawei.com
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/block/nbd.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
+index 0f3bab47c0d6..b21eb58d6a45 100644
+--- a/drivers/block/nbd.c
++++ b/drivers/block/nbd.c
+@@ -2000,7 +2000,8 @@ static void nbd_disconnect_and_put(struct nbd_device *nbd)
+        * config ref and try to destroy the workqueue from inside the work
+        * queue.
+        */
+-      flush_workqueue(nbd->recv_workq);
++      if (nbd->recv_workq)
++              flush_workqueue(nbd->recv_workq);
+       if (test_and_clear_bit(NBD_RT_HAS_CONFIG_REF,
+                              &nbd->config->runtime_flags))
+               nbd_config_put(nbd);
+-- 
+2.30.2
+
diff --git a/queue-5.11/nvmet-add-lba-to-sect-conversion-helpers.patch b/queue-5.11/nvmet-add-lba-to-sect-conversion-helpers.patch
new file mode 100644 (file)
index 0000000..2c42878
--- /dev/null
@@ -0,0 +1,79 @@
+From 64a0e699a7ffe6d35c6b6d8f537a7571389a6191 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 11 Jan 2021 20:26:16 -0800
+Subject: nvmet: add lba to sect conversion helpers
+
+From: Chaitanya Kulkarni <chaitanya.kulkarni@wdc.com>
+
+[ Upstream commit 193fcf371f9e3705c14a0bf1d4bfc44af0f7c124 ]
+
+In this preparation patch, we add helpers to convert lbas to sectors &
+sectors to lba. This is needed to eliminate code duplication in the ZBD
+backend.
+
+Use these helpers in the block device backend.
+
+Signed-off-by: Chaitanya Kulkarni <chaitanya.kulkarni@wdc.com>
+Reviewed-by: Damien Le Moal <damien.lemoal@wdc.com>
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/nvme/target/io-cmd-bdev.c |  8 +++-----
+ drivers/nvme/target/nvmet.h       | 10 ++++++++++
+ 2 files changed, 13 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/nvme/target/io-cmd-bdev.c b/drivers/nvme/target/io-cmd-bdev.c
+index 125dde3f410e..23095bdfce06 100644
+--- a/drivers/nvme/target/io-cmd-bdev.c
++++ b/drivers/nvme/target/io-cmd-bdev.c
+@@ -256,8 +256,7 @@ static void nvmet_bdev_execute_rw(struct nvmet_req *req)
+       if (is_pci_p2pdma_page(sg_page(req->sg)))
+               op |= REQ_NOMERGE;
+-      sector = le64_to_cpu(req->cmd->rw.slba);
+-      sector <<= (req->ns->blksize_shift - 9);
++      sector = nvmet_lba_to_sect(req->ns, req->cmd->rw.slba);
+       if (req->transfer_len <= NVMET_MAX_INLINE_DATA_LEN) {
+               bio = &req->b.inline_bio;
+@@ -345,7 +344,7 @@ static u16 nvmet_bdev_discard_range(struct nvmet_req *req,
+       int ret;
+       ret = __blkdev_issue_discard(ns->bdev,
+-                      le64_to_cpu(range->slba) << (ns->blksize_shift - 9),
++                      nvmet_lba_to_sect(ns, range->slba),
+                       le32_to_cpu(range->nlb) << (ns->blksize_shift - 9),
+                       GFP_KERNEL, 0, bio);
+       if (ret && ret != -EOPNOTSUPP) {
+@@ -414,8 +413,7 @@ static void nvmet_bdev_execute_write_zeroes(struct nvmet_req *req)
+       if (!nvmet_check_transfer_len(req, 0))
+               return;
+-      sector = le64_to_cpu(write_zeroes->slba) <<
+-              (req->ns->blksize_shift - 9);
++      sector = nvmet_lba_to_sect(req->ns, write_zeroes->slba);
+       nr_sector = (((sector_t)le16_to_cpu(write_zeroes->length) + 1) <<
+               (req->ns->blksize_shift - 9));
+diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
+index 592763732065..8776dd1a0490 100644
+--- a/drivers/nvme/target/nvmet.h
++++ b/drivers/nvme/target/nvmet.h
+@@ -603,4 +603,14 @@ static inline bool nvmet_ns_has_pi(struct nvmet_ns *ns)
+       return ns->pi_type && ns->metadata_size == sizeof(struct t10_pi_tuple);
+ }
++static inline __le64 nvmet_sect_to_lba(struct nvmet_ns *ns, sector_t sect)
++{
++      return cpu_to_le64(sect >> (ns->blksize_shift - SECTOR_SHIFT));
++}
++
++static inline sector_t nvmet_lba_to_sect(struct nvmet_ns *ns, __le64 lba)
++{
++      return le64_to_cpu(lba) << (ns->blksize_shift - SECTOR_SHIFT);
++}
++
+ #endif /* _NVMET_H */
+-- 
+2.30.2
+
diff --git a/queue-5.11/nvmet-fix-inline-bio-check-for-bdev-ns.patch b/queue-5.11/nvmet-fix-inline-bio-check-for-bdev-ns.patch
new file mode 100644 (file)
index 0000000..ee29eda
--- /dev/null
@@ -0,0 +1,82 @@
+From 737d71a9d0e44a644137ab0bd973689b1436c04d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 6 May 2021 18:51:35 -0700
+Subject: nvmet: fix inline bio check for bdev-ns
+
+From: Chaitanya Kulkarni <chaitanya.kulkarni@wdc.com>
+
+[ Upstream commit 608a969046e6e0567d05a166be66c77d2dd8220b ]
+
+When handling rw commands, for inline bio case we only consider
+transfer size. This works well when req->sg_cnt fits into the
+req->inline_bvec, but it will result in the warning in
+__bio_add_page() when req->sg_cnt > NVMET_MAX_INLINE_BVEC.
+
+Consider an I/O size 32768 and first page is not aligned to the page
+boundary, then I/O is split in following manner :-
+
+[ 2206.256140] nvmet: sg->length 3440 sg->offset 656
+[ 2206.256144] nvmet: sg->length 4096 sg->offset 0
+[ 2206.256148] nvmet: sg->length 4096 sg->offset 0
+[ 2206.256152] nvmet: sg->length 4096 sg->offset 0
+[ 2206.256155] nvmet: sg->length 4096 sg->offset 0
+[ 2206.256159] nvmet: sg->length 4096 sg->offset 0
+[ 2206.256163] nvmet: sg->length 4096 sg->offset 0
+[ 2206.256166] nvmet: sg->length 4096 sg->offset 0
+[ 2206.256170] nvmet: sg->length 656 sg->offset 0
+
+Now the req->transfer_size == NVMET_MAX_INLINE_DATA_LEN i.e. 32768, but
+the req->sg_cnt is (9) > NVMET_MAX_INLINE_BIOVEC which is (8).
+This will result in the following warning message :-
+
+nvmet_bdev_execute_rw()
+       bio_add_page()
+               __bio_add_page()
+                       WARN_ON_ONCE(bio_full(bio, len));
+
+This scenario is very hard to reproduce on the nvme-loop transport only
+with rw commands issued with the passthru IOCTL interface from the host
+application and the data buffer is allocated with the malloc() and not
+the posix_memalign().
+
+Fixes: 73383adfad24 ("nvmet: don't split large I/Os unconditionally")
+Signed-off-by: Chaitanya Kulkarni <chaitanya.kulkarni@wdc.com>
+Reviewed-by: Sagi Grimberg <sagi@grimberg.me>
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/nvme/target/io-cmd-bdev.c | 2 +-
+ drivers/nvme/target/nvmet.h       | 6 ++++++
+ 2 files changed, 7 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/nvme/target/io-cmd-bdev.c b/drivers/nvme/target/io-cmd-bdev.c
+index 23095bdfce06..6a9626ff0713 100644
+--- a/drivers/nvme/target/io-cmd-bdev.c
++++ b/drivers/nvme/target/io-cmd-bdev.c
+@@ -258,7 +258,7 @@ static void nvmet_bdev_execute_rw(struct nvmet_req *req)
+       sector = nvmet_lba_to_sect(req->ns, req->cmd->rw.slba);
+-      if (req->transfer_len <= NVMET_MAX_INLINE_DATA_LEN) {
++      if (nvmet_use_inline_bvec(req)) {
+               bio = &req->b.inline_bio;
+               bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec));
+       } else {
+diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
+index 8776dd1a0490..7f8712de77e0 100644
+--- a/drivers/nvme/target/nvmet.h
++++ b/drivers/nvme/target/nvmet.h
+@@ -613,4 +613,10 @@ static inline sector_t nvmet_lba_to_sect(struct nvmet_ns *ns, __le64 lba)
+       return le64_to_cpu(lba) << (ns->blksize_shift - SECTOR_SHIFT);
+ }
++static inline bool nvmet_use_inline_bvec(struct nvmet_req *req)
++{
++      return req->transfer_len <= NVMET_MAX_INLINE_DATA_LEN &&
++             req->sg_cnt <= NVMET_MAX_INLINE_BIOVEC;
++}
++
+ #endif /* _NVMET_H */
+-- 
+2.30.2
+
diff --git a/queue-5.11/nvmet-fix-inline-bio-check-for-passthru.patch b/queue-5.11/nvmet-fix-inline-bio-check-for-passthru.patch
new file mode 100644 (file)
index 0000000..49ca437
--- /dev/null
@@ -0,0 +1,66 @@
+From f8807ac73f7a86ecaa78d9372166b3bcbe43e926 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 6 May 2021 18:51:36 -0700
+Subject: nvmet: fix inline bio check for passthru
+
+From: Chaitanya Kulkarni <chaitanya.kulkarni@wdc.com>
+
+[ Upstream commit ab96de5def854d8fc51280b6a20597e64b14ac31 ]
+
+When handling passthru commands, for inline bio allocation we only
+consider the transfer size. This works well when req->sg_cnt fits into
+the req->inline_bvec, but it will result in the early return from
+bio_add_hw_page() when req->sg_cnt > NVMET_MAX_INLINE_BVEC.
+
+Consider an I/O of size 32768 and first buffer is not aligned to the
+page boundary, then I/O is split in following manner :-
+
+[ 2206.256140] nvmet: sg->length 3440 sg->offset 656
+[ 2206.256144] nvmet: sg->length 4096 sg->offset 0
+[ 2206.256148] nvmet: sg->length 4096 sg->offset 0
+[ 2206.256152] nvmet: sg->length 4096 sg->offset 0
+[ 2206.256155] nvmet: sg->length 4096 sg->offset 0
+[ 2206.256159] nvmet: sg->length 4096 sg->offset 0
+[ 2206.256163] nvmet: sg->length 4096 sg->offset 0
+[ 2206.256166] nvmet: sg->length 4096 sg->offset 0
+[ 2206.256170] nvmet: sg->length 656 sg->offset 0
+
+Now the req->transfer_size == NVMET_MAX_INLINE_DATA_LEN i.e. 32768, but
+the req->sg_cnt is (9) > NVMET_MAX_INLINE_BIOVEC which is (8).
+This will result in early return in the following code path :-
+
+nvmet_bdev_execute_rw()
+       bio_add_pc_page()
+               bio_add_hw_page()
+                       if (bio_full(bio, len))
+                               return 0;
+
+Use previously introduced helper nvmet_use_inline_bvec() to consider
+req->sg_cnt when using inline bio. This only affects nvme-loop
+transport.
+
+Fixes: dab3902b19a0 ("nvmet: use inline bio for passthru fast path")
+Signed-off-by: Chaitanya Kulkarni <chaitanya.kulkarni@wdc.com>
+Reviewed-by: Sagi Grimberg <sagi@grimberg.me>
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/nvme/target/passthru.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/nvme/target/passthru.c b/drivers/nvme/target/passthru.c
+index b9776fc8f08f..df6f64870cec 100644
+--- a/drivers/nvme/target/passthru.c
++++ b/drivers/nvme/target/passthru.c
+@@ -194,7 +194,7 @@ static int nvmet_passthru_map_sg(struct nvmet_req *req, struct request *rq)
+       if (req->sg_cnt > BIO_MAX_PAGES)
+               return -EINVAL;
+-      if (req->transfer_len <= NVMET_MAX_INLINE_DATA_LEN) {
++      if (nvmet_use_inline_bvec(req)) {
+               bio = &req->p.inline_bio;
+               bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec));
+       } else {
+-- 
+2.30.2
+
diff --git a/queue-5.11/nvmet-rdma-fix-null-deref-when-send-is-completed-wit.patch b/queue-5.11/nvmet-rdma-fix-null-deref-when-send-is-completed-wit.patch
new file mode 100644 (file)
index 0000000..af89c0a
--- /dev/null
@@ -0,0 +1,82 @@
+From d2a7d63b4eb2f73d99a5a4622e893888ae0f2ce6 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 6 May 2021 10:08:19 +0300
+Subject: nvmet-rdma: Fix NULL deref when SEND is completed with error
+
+From: Michal Kalderon <michal.kalderon@marvell.com>
+
+[ Upstream commit 8cc365f9559b86802afc0208389f5c8d46b4ad61 ]
+
+When running some traffic and taking down the link on peer, a
+retry counter exceeded error is received. This leads to
+nvmet_rdma_error_comp which tried accessing the cq_context to
+obtain the queue. The cq_context is no longer valid after the
+fix to use shared CQ mechanism and should be obtained similar
+to how it is obtained in other functions from the wc->qp.
+
+[ 905.786331] nvmet_rdma: SEND for CQE 0x00000000e3337f90 failed with status transport retry counter exceeded (12).
+[ 905.832048] BUG: unable to handle kernel NULL pointer dereference at 0000000000000048
+[ 905.839919] PGD 0 P4D 0
+[ 905.842464] Oops: 0000 1 SMP NOPTI
+[ 905.846144] CPU: 13 PID: 1557 Comm: kworker/13:1H Kdump: loaded Tainted: G OE --------- - - 4.18.0-304.el8.x86_64 #1
+[ 905.872135] RIP: 0010:nvmet_rdma_error_comp+0x5/0x1b [nvmet_rdma]
+[ 905.878259] Code: 19 4f c0 e8 89 b3 a5 f6 e9 5b e0 ff ff 0f b7 75 14 4c 89 ea 48 c7 c7 08 1a 4f c0 e8 71 b3 a5 f6 e9 4b e0 ff ff 0f 1f 44 00 00 <48> 8b 47 48 48 85 c0 74 08 48 89 c7 e9 98 bf 49 00 e9 c3 e3 ff ff
+[ 905.897135] RSP: 0018:ffffab601c45fe28 EFLAGS: 00010246
+[ 905.902387] RAX: 0000000000000065 RBX: ffff9e729ea2f800 RCX: 0000000000000000
+[ 905.909558] RDX: 0000000000000000 RSI: ffff9e72df9567c8 RDI: 0000000000000000
+[ 905.916731] RBP: ffff9e729ea2b400 R08: 000000000000074d R09: 0000000000000074
+[ 905.923903] R10: 0000000000000000 R11: ffffab601c45fcc0 R12: 0000000000000010
+[ 905.931074] R13: 0000000000000000 R14: 0000000000000010 R15: ffff9e729ea2f400
+[ 905.938247] FS: 0000000000000000(0000) GS:ffff9e72df940000(0000) knlGS:0000000000000000
+[ 905.938249] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+[ 905.950067] nvmet_rdma: SEND for CQE 0x00000000c7356cca failed with status transport retry counter exceeded (12).
+[ 905.961855] CR2: 0000000000000048 CR3: 000000678d010004 CR4: 00000000007706e0
+[ 905.961855] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
+[ 905.961856] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
+[ 905.961857] PKRU: 55555554
+[ 906.010315] Call Trace:
+[ 906.012778] __ib_process_cq+0x89/0x170 [ib_core]
+[ 906.017509] ib_cq_poll_work+0x26/0x80 [ib_core]
+[ 906.022152] process_one_work+0x1a7/0x360
+[ 906.026182] ? create_worker+0x1a0/0x1a0
+[ 906.030123] worker_thread+0x30/0x390
+[ 906.033802] ? create_worker+0x1a0/0x1a0
+[ 906.037744] kthread+0x116/0x130
+[ 906.040988] ? kthread_flush_work_fn+0x10/0x10
+[ 906.045456] ret_from_fork+0x1f/0x40
+
+Fixes: ca0f1a8055be2 ("nvmet-rdma: use new shared CQ mechanism")
+Signed-off-by: Shai Malin <smalin@marvell.com>
+Signed-off-by: Michal Kalderon <michal.kalderon@marvell.com>
+Reviewed-by: Sagi Grimberg <sagi@grimberg.me>
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/nvme/target/rdma.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
+index 6c1f3ab7649c..7d607f435e36 100644
+--- a/drivers/nvme/target/rdma.c
++++ b/drivers/nvme/target/rdma.c
+@@ -700,7 +700,7 @@ static void nvmet_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc)
+ {
+       struct nvmet_rdma_rsp *rsp =
+               container_of(wc->wr_cqe, struct nvmet_rdma_rsp, send_cqe);
+-      struct nvmet_rdma_queue *queue = cq->cq_context;
++      struct nvmet_rdma_queue *queue = wc->qp->qp_context;
+       nvmet_rdma_release_rsp(rsp);
+@@ -786,7 +786,7 @@ static void nvmet_rdma_write_data_done(struct ib_cq *cq, struct ib_wc *wc)
+ {
+       struct nvmet_rdma_rsp *rsp =
+               container_of(wc->wr_cqe, struct nvmet_rdma_rsp, write_cqe);
+-      struct nvmet_rdma_queue *queue = cq->cq_context;
++      struct nvmet_rdma_queue *queue = wc->qp->qp_context;
+       struct rdma_cm_id *cm_id = rsp->queue->cm_id;
+       u16 status;
+-- 
+2.30.2
+
diff --git a/queue-5.11/perf-tools-fix-dynamic-libbpf-link.patch b/queue-5.11/perf-tools-fix-dynamic-libbpf-link.patch
new file mode 100644 (file)
index 0000000..f74f18b
--- /dev/null
@@ -0,0 +1,74 @@
+From f2c67c17af082096697c551268c2c1909d28802d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 8 May 2021 22:50:20 +0200
+Subject: perf tools: Fix dynamic libbpf link
+
+From: Jiri Olsa <jolsa@kernel.org>
+
+[ Upstream commit ad1237c30d975535a669746496cbed136aa5a045 ]
+
+Justin reported broken build with LIBBPF_DYNAMIC=1.
+
+When linking libbpf dynamically we need to use perf's
+hashmap object, because it's not exported in libbpf.so
+(only in libbpf.a).
+
+Following build is now passing:
+
+  $ make LIBBPF_DYNAMIC=1
+    BUILD:   Doing 'make -j8' parallel build
+    ...
+  $ ldd perf | grep libbpf
+        libbpf.so.0 => /lib64/libbpf.so.0 (0x00007fa7630db000)
+
+Fixes: eee19501926d ("perf tools: Grab a copy of libbpf's hashmap")
+Reported-by: Justin M. Forbes <jforbes@redhat.com>
+Signed-off-by: Jiri Olsa <jolsa@kernel.org>
+Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
+Cc: Ian Rogers <irogers@google.com>
+Cc: Mark Rutland <mark.rutland@arm.com>
+Cc: Michael Petlan <mpetlan@redhat.com>
+Cc: Namhyung Kim <namhyung@kernel.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Link: http://lore.kernel.org/lkml/20210508205020.617984-1-jolsa@kernel.org
+Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/perf/Makefile.config | 1 +
+ tools/perf/util/Build      | 7 +++++++
+ 2 files changed, 8 insertions(+)
+
+diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config
+index ce8516e4de34..2abbd75fbf2e 100644
+--- a/tools/perf/Makefile.config
++++ b/tools/perf/Makefile.config
+@@ -530,6 +530,7 @@ ifndef NO_LIBELF
+       ifdef LIBBPF_DYNAMIC
+         ifeq ($(feature-libbpf), 1)
+           EXTLIBS += -lbpf
++          $(call detected,CONFIG_LIBBPF_DYNAMIC)
+         else
+           dummy := $(error Error: No libbpf devel library found, please install libbpf-devel);
+         endif
+diff --git a/tools/perf/util/Build b/tools/perf/util/Build
+index e2563d0154eb..0cf27354aa45 100644
+--- a/tools/perf/util/Build
++++ b/tools/perf/util/Build
+@@ -140,7 +140,14 @@ perf-$(CONFIG_LIBELF) += symbol-elf.o
+ perf-$(CONFIG_LIBELF) += probe-file.o
+ perf-$(CONFIG_LIBELF) += probe-event.o
++ifdef CONFIG_LIBBPF_DYNAMIC
++  hashmap := 1
++endif
+ ifndef CONFIG_LIBBPF
++  hashmap := 1
++endif
++
++ifdef hashmap
+ perf-y += hashmap.o
+ endif
+-- 
+2.30.2
+
index 7dcd05bc762d74af0daf515d4ac09678fb55a37a..adc69c5eda2ad03a89e1f867e6dd5850542b46e0 100644 (file)
@@ -247,3 +247,34 @@ drm-radeon-dpm-disable-sclk-switching-on-oland-when-two-4k-60hz-monitors-are-con
 drm-amd-display-initialize-attribute-for-hdcp_srm-sysfs-file.patch
 drm-i915-avoid-div-by-zero-on-gen2.patch
 kvm-exit-halt-polling-on-need_resched-as-well.patch
+drm-msm-fix-llc-not-being-enabled-for-mmu500-targets.patch
+kvm-lapic-accurately-guarantee-busy-wait-for-timer-t.patch
+drm-msm-dp-initialize-audio_comp-when-audio-starts.patch
+kvm-x86-cancel-pvclock_gtod_work-on-module-removal.patch
+kvm-x86-prevent-deadlock-against-tk_core.seq.patch
+dax-add-an-enum-for-specifying-dax-wakup-mode.patch
+dax-add-a-wakeup-mode-parameter-to-put_unlocked_entr.patch
+dax-wake-up-all-waiters-after-invalidating-dax-entry.patch
+xen-unpopulated-alloc-fix-error-return-code-in-fill_.patch
+perf-tools-fix-dynamic-libbpf-link.patch
+usb-dwc3-gadget-free-gadget-structure-only-after-fre.patch
+iio-light-gp2ap002-fix-rumtime-pm-imbalance-on-error.patch
+iio-proximity-pulsedlight-fix-rumtime-pm-imbalance-o.patch
+iio-hid-sensors-select-iio_triggered_buffer-under-hi.patch
+iio-core-return-enodev-if-ioctl-is-unknown.patch
+usb-fotg210-hcd-fix-an-error-message.patch
+hwmon-occ-fix-poll-rate-limiting.patch
+usb-musb-fix-an-error-message.patch
+hwmon-ltc2992-put-fwnode-in-error-case-during-probe.patch
+acpi-scan-fix-a-memory-leak-in-an-error-handling-pat.patch
+kyber-fix-out-of-bounds-access-when-preempted.patch
+nvmet-add-lba-to-sect-conversion-helpers.patch
+nvmet-fix-inline-bio-check-for-bdev-ns.patch
+nvmet-fix-inline-bio-check-for-passthru.patch
+nvmet-rdma-fix-null-deref-when-send-is-completed-wit.patch
+f2fs-compress-fix-to-free-compress-page-correctly.patch
+f2fs-compress-fix-race-condition-of-overwrite-vs-tru.patch
+f2fs-compress-fix-to-assign-cc.cluster_idx-correctly.patch
+nbd-fix-null-pointer-in-flush_workqueue.patch
+blk-mq-plug-request-for-shared-sbitmap.patch
+blk-mq-swap-two-calls-in-blk_mq_exit_queue.patch
diff --git a/queue-5.11/usb-dwc3-gadget-free-gadget-structure-only-after-fre.patch b/queue-5.11/usb-dwc3-gadget-free-gadget-structure-only-after-fre.patch
new file mode 100644 (file)
index 0000000..1861abb
--- /dev/null
@@ -0,0 +1,64 @@
+From 7bcce31d56168f033cc27785ad1429995991fc16 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 1 May 2021 02:35:58 -0700
+Subject: usb: dwc3: gadget: Free gadget structure only after freeing endpoints
+
+From: Jack Pham <jackp@codeaurora.org>
+
+[ Upstream commit bb9c74a5bd1462499fe5ccb1e3c5ac40dcfa9139 ]
+
+As part of commit e81a7018d93a ("usb: dwc3: allocate gadget structure
+dynamically") the dwc3_gadget_release() was added which will free
+the dwc->gadget structure upon the device's removal when
+usb_del_gadget_udc() is called in dwc3_gadget_exit().
+
+However, simply freeing the gadget results a dangling pointer
+situation: the endpoints created in dwc3_gadget_init_endpoints()
+have their dep->endpoint.ep_list members chained off the list_head
+anchored at dwc->gadget->ep_list.  Thus when dwc->gadget is freed,
+the first dwc3_ep in the list now has a dangling prev pointer and
+likewise for the next pointer of the dwc3_ep at the tail of the list.
+The dwc3_gadget_free_endpoints() that follows will result in a
+use-after-free when it calls list_del().
+
+This was caught by enabling KASAN and performing a driver unbind.
+The recent commit 568262bf5492 ("usb: dwc3: core: Add shutdown
+callback for dwc3") also exposes this as a panic during shutdown.
+
+There are a few possibilities to fix this.  One could be to perform
+a list_del() of the gadget->ep_list itself which removes it from
+the rest of the dwc3_ep chain.
+
+Another approach is what this patch does, by splitting up the
+usb_del_gadget_udc() call into its separate "del" and "put"
+components.  This allows dwc3_gadget_free_endpoints() to be
+called before the gadget is finally freed with usb_put_gadget().
+
+Fixes: e81a7018d93a ("usb: dwc3: allocate gadget structure dynamically")
+Reviewed-by: Peter Chen <peter.chen@kernel.org>
+Signed-off-by: Jack Pham <jackp@codeaurora.org>
+Link: https://lore.kernel.org/r/20210501093558.7375-1-jackp@codeaurora.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/usb/dwc3/gadget.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
+index 84d1487e9f06..dab9b5fd15a9 100644
+--- a/drivers/usb/dwc3/gadget.c
++++ b/drivers/usb/dwc3/gadget.c
+@@ -3948,8 +3948,9 @@ int dwc3_gadget_init(struct dwc3 *dwc)
+ void dwc3_gadget_exit(struct dwc3 *dwc)
+ {
+-      usb_del_gadget_udc(dwc->gadget);
++      usb_del_gadget(dwc->gadget);
+       dwc3_gadget_free_endpoints(dwc);
++      usb_put_gadget(dwc->gadget);
+       dma_free_coherent(dwc->sysdev, DWC3_BOUNCE_SIZE, dwc->bounce,
+                         dwc->bounce_addr);
+       kfree(dwc->setup_buf);
+-- 
+2.30.2
+
diff --git a/queue-5.11/usb-fotg210-hcd-fix-an-error-message.patch b/queue-5.11/usb-fotg210-hcd-fix-an-error-message.patch
new file mode 100644 (file)
index 0000000..e218c9a
--- /dev/null
@@ -0,0 +1,53 @@
+From e16bf961644a72110a06dbeb99a86af03375c1e5 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 6 May 2021 22:39:10 +0200
+Subject: usb: fotg210-hcd: Fix an error message
+
+From: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
+
+[ Upstream commit a60a34366e0d09ca002c966dd7c43a68c28b1f82 ]
+
+'retval' is known to be -ENODEV here.
+This is a hard-coded default error code which is not useful in the error
+message. Moreover, another error message is printed at the end of the
+error handling path. The corresponding error code (-ENOMEM) is more
+informative.
+
+So remove simplify the first error message.
+
+While at it, also remove the useless initialization of 'retval'.
+
+Fixes: 7d50195f6c50 ("usb: host: Faraday fotg210-hcd driver")
+Signed-off-by: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
+Link: https://lore.kernel.org/r/94531bcff98e46d4f9c20183a90b7f47f699126c.1620333419.git.christophe.jaillet@wanadoo.fr
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/usb/host/fotg210-hcd.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/usb/host/fotg210-hcd.c b/drivers/usb/host/fotg210-hcd.c
+index 5617ef30530a..f0e4a315cc81 100644
+--- a/drivers/usb/host/fotg210-hcd.c
++++ b/drivers/usb/host/fotg210-hcd.c
+@@ -5568,7 +5568,7 @@ static int fotg210_hcd_probe(struct platform_device *pdev)
+       struct usb_hcd *hcd;
+       struct resource *res;
+       int irq;
+-      int retval = -ENODEV;
++      int retval;
+       struct fotg210_hcd *fotg210;
+       if (usb_disabled())
+@@ -5588,7 +5588,7 @@ static int fotg210_hcd_probe(struct platform_device *pdev)
+       hcd = usb_create_hcd(&fotg210_fotg210_hc_driver, dev,
+                       dev_name(dev));
+       if (!hcd) {
+-              dev_err(dev, "failed to create hcd with err %d\n", retval);
++              dev_err(dev, "failed to create hcd\n");
+               retval = -ENOMEM;
+               goto fail_create_hcd;
+       }
+-- 
+2.30.2
+
diff --git a/queue-5.11/usb-musb-fix-an-error-message.patch b/queue-5.11/usb-musb-fix-an-error-message.patch
new file mode 100644 (file)
index 0000000..3fdc22f
--- /dev/null
@@ -0,0 +1,38 @@
+From 7290d9f8945d6251940aaddd5ddf477ff5573f43 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 4 May 2021 22:26:29 +0200
+Subject: usb: musb: Fix an error message
+
+From: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
+
+[ Upstream commit d9ff1096a840dddea3d5cfa2149ff7da9f499fb2 ]
+
+'ret' is known to be 0 here.
+Initialize 'ret' with the expected error code before using it.
+
+Fixes: 0990366bab3c ("usb: musb: Add support for MediaTek musb controller")
+Signed-off-by: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
+Link: https://lore.kernel.org/r/69f514dc7134e3c917cad208e73cc650cb9e2bd6.1620159879.git.christophe.jaillet@wanadoo.fr
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/usb/musb/mediatek.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/usb/musb/mediatek.c b/drivers/usb/musb/mediatek.c
+index eebeadd26946..6b92d037d8fc 100644
+--- a/drivers/usb/musb/mediatek.c
++++ b/drivers/usb/musb/mediatek.c
+@@ -518,8 +518,8 @@ static int mtk_musb_probe(struct platform_device *pdev)
+       glue->xceiv = devm_usb_get_phy(dev, USB_PHY_TYPE_USB2);
+       if (IS_ERR(glue->xceiv)) {
+-              dev_err(dev, "fail to getting usb-phy %d\n", ret);
+               ret = PTR_ERR(glue->xceiv);
++              dev_err(dev, "fail to getting usb-phy %d\n", ret);
+               goto err_unregister_usb_phy;
+       }
+-- 
+2.30.2
+
diff --git a/queue-5.11/xen-unpopulated-alloc-fix-error-return-code-in-fill_.patch b/queue-5.11/xen-unpopulated-alloc-fix-error-return-code-in-fill_.patch
new file mode 100644 (file)
index 0000000..0ac8971
--- /dev/null
@@ -0,0 +1,42 @@
+From 257e336476857075e3c5ffebf9f794ab53118dda Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 8 May 2021 10:19:13 +0800
+Subject: xen/unpopulated-alloc: fix error return code in fill_list()
+
+From: Zhen Lei <thunder.leizhen@huawei.com>
+
+[ Upstream commit dbc03e81586fc33e4945263fd6e09e22eb4b980f ]
+
+Fix to return a negative error code from the error handling case instead
+of 0, as done elsewhere in this function.
+
+Fixes: a4574f63edc6 ("mm/memremap_pages: convert to 'struct range'")
+Reported-by: Hulk Robot <hulkci@huawei.com>
+Signed-off-by: Zhen Lei <thunder.leizhen@huawei.com>
+Reviewed-by: Juergen Gross <jgross@suse.com>
+Link: https://lore.kernel.org/r/20210508021913.1727-1-thunder.leizhen@huawei.com
+Signed-off-by: Juergen Gross <jgross@suse.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/xen/unpopulated-alloc.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/xen/unpopulated-alloc.c b/drivers/xen/unpopulated-alloc.c
+index e64e6befc63b..87e6b7db892f 100644
+--- a/drivers/xen/unpopulated-alloc.c
++++ b/drivers/xen/unpopulated-alloc.c
+@@ -39,8 +39,10 @@ static int fill_list(unsigned int nr_pages)
+       }
+       pgmap = kzalloc(sizeof(*pgmap), GFP_KERNEL);
+-      if (!pgmap)
++      if (!pgmap) {
++              ret = -ENOMEM;
+               goto err_pgmap;
++      }
+       pgmap->type = MEMORY_DEVICE_GENERIC;
+       pgmap->range = (struct range) {
+-- 
+2.30.2
+