From 1beee6a95bbf190b86ab33f964ea30c25e0f24db Mon Sep 17 00:00:00 2001 From: Sasha Levin Date: Sun, 16 May 2021 23:14:55 -0400 Subject: [PATCH] Fixes for 5.12 Signed-off-by: Sasha Levin --- ...memory-leak-in-an-error-handling-pat.patch | 36 ++++ ...k-mq-plug-request-for-shared-sbitmap.patch | 53 ++++++ ...-swap-two-calls-in-blk_mq_exit_queue.patch | 51 +++++ ...-mode-parameter-to-put_unlocked_entr.patch | 84 +++++++++ ...n-enum-for-specifying-dax-wakup-mode.patch | 106 +++++++++++ ...waiters-after-invalidating-dax-entry.patch | 81 ++++++++ ...tialize-audio_comp-when-audio-starts.patch | 98 ++++++++++ ...not-being-enabled-for-mmu500-targets.patch | 51 +++++ ...x-race-condition-of-overwrite-vs-tru.patch | 148 +++++++++++++++ ...x-to-assign-cc.cluster_idx-correctly.patch | 145 +++++++++++++++ ...-fix-to-free-compress-page-correctly.patch | 37 ++++ ...ut-fwnode-in-error-case-during-probe.patch | 50 +++++ .../hwmon-occ-fix-poll-rate-limiting.patch | 64 +++++++ ...re-return-enodev-if-ioctl-is-unknown.patch | 60 ++++++ ...select-iio_triggered_buffer-under-hi.patch | 174 ++++++++++++++++++ ...02-fix-rumtime-pm-imbalance-on-error.patch | 51 +++++ ...lsedlight-fix-rumtime-pm-imbalance-o.patch | 37 ++++ ...tely-guarantee-busy-wait-for-timer-t.patch | 45 +++++ ...ve-ghcb-unmapping-to-fix-rcu-warning.patch | 82 +++++++++ ...-pvclock_gtod_work-on-module-removal.patch | 49 +++++ ...prevent-deadlock-against-tk_core.seq.patch | 88 +++++++++ ...-out-of-bounds-access-when-preempted.patch | 162 ++++++++++++++++ ...-fix-null-pointer-in-flush_workqueue.patch | 86 +++++++++ ...met-fix-inline-bio-check-for-bdev-ns.patch | 82 +++++++++ ...et-fix-inline-bio-check-for-passthru.patch | 66 +++++++ ...ull-deref-when-send-is-completed-wit.patch | 82 +++++++++ .../perf-tools-fix-dynamic-libbpf-link.patch | 74 ++++++++ ...-nmi-record-implicitly-soft-masked-c.patch | 98 ++++++++++ ...learing-of-has_idle_cores-flag-in-se.patch | 50 +++++ queue-5.12/series | 34 ++++ ...free-gadget-structure-only-after-fre.patch | 64 +++++++ ...usb-fotg210-hcd-fix-an-error-message.patch | 53 ++++++ .../usb-musb-fix-an-error-message.patch | 38 ++++ ...ix-wrong-handling-for-not_supported-.patch | 79 ++++++++ ...alloc-fix-error-return-code-in-fill_.patch | 42 +++++ 35 files changed, 2600 insertions(+) create mode 100644 queue-5.12/acpi-scan-fix-a-memory-leak-in-an-error-handling-pat.patch create mode 100644 queue-5.12/blk-mq-plug-request-for-shared-sbitmap.patch create mode 100644 queue-5.12/blk-mq-swap-two-calls-in-blk_mq_exit_queue.patch create mode 100644 queue-5.12/dax-add-a-wakeup-mode-parameter-to-put_unlocked_entr.patch create mode 100644 queue-5.12/dax-add-an-enum-for-specifying-dax-wakup-mode.patch create mode 100644 queue-5.12/dax-wake-up-all-waiters-after-invalidating-dax-entry.patch create mode 100644 queue-5.12/drm-msm-dp-initialize-audio_comp-when-audio-starts.patch create mode 100644 queue-5.12/drm-msm-fix-llc-not-being-enabled-for-mmu500-targets.patch create mode 100644 queue-5.12/f2fs-compress-fix-race-condition-of-overwrite-vs-tru.patch create mode 100644 queue-5.12/f2fs-compress-fix-to-assign-cc.cluster_idx-correctly.patch create mode 100644 queue-5.12/f2fs-compress-fix-to-free-compress-page-correctly.patch create mode 100644 queue-5.12/hwmon-ltc2992-put-fwnode-in-error-case-during-probe.patch create mode 100644 queue-5.12/hwmon-occ-fix-poll-rate-limiting.patch create mode 100644 queue-5.12/iio-core-return-enodev-if-ioctl-is-unknown.patch create mode 100644 queue-5.12/iio-hid-sensors-select-iio_triggered_buffer-under-hi.patch create mode 100644 queue-5.12/iio-light-gp2ap002-fix-rumtime-pm-imbalance-on-error.patch create mode 100644 queue-5.12/iio-proximity-pulsedlight-fix-rumtime-pm-imbalance-o.patch create mode 100644 queue-5.12/kvm-lapic-accurately-guarantee-busy-wait-for-timer-t.patch create mode 100644 queue-5.12/kvm-svm-move-ghcb-unmapping-to-fix-rcu-warning.patch create mode 100644 queue-5.12/kvm-x86-cancel-pvclock_gtod_work-on-module-removal.patch create mode 100644 queue-5.12/kvm-x86-prevent-deadlock-against-tk_core.seq.patch create mode 100644 queue-5.12/kyber-fix-out-of-bounds-access-when-preempted.patch create mode 100644 queue-5.12/nbd-fix-null-pointer-in-flush_workqueue.patch create mode 100644 queue-5.12/nvmet-fix-inline-bio-check-for-bdev-ns.patch create mode 100644 queue-5.12/nvmet-fix-inline-bio-check-for-passthru.patch create mode 100644 queue-5.12/nvmet-rdma-fix-null-deref-when-send-is-completed-wit.patch create mode 100644 queue-5.12/perf-tools-fix-dynamic-libbpf-link.patch create mode 100644 queue-5.12/powerpc-64s-make-nmi-record-implicitly-soft-masked-c.patch create mode 100644 queue-5.12/sched-fair-fix-clearing-of-has_idle_cores-flag-in-se.patch create mode 100644 queue-5.12/usb-dwc3-gadget-free-gadget-structure-only-after-fre.patch create mode 100644 queue-5.12/usb-fotg210-hcd-fix-an-error-message.patch create mode 100644 queue-5.12/usb-musb-fix-an-error-message.patch create mode 100644 queue-5.12/usb-typec-tcpm-fix-wrong-handling-for-not_supported-.patch create mode 100644 queue-5.12/xen-unpopulated-alloc-fix-error-return-code-in-fill_.patch diff --git a/queue-5.12/acpi-scan-fix-a-memory-leak-in-an-error-handling-pat.patch b/queue-5.12/acpi-scan-fix-a-memory-leak-in-an-error-handling-pat.patch new file mode 100644 index 00000000000..52a7ab94715 --- /dev/null +++ b/queue-5.12/acpi-scan-fix-a-memory-leak-in-an-error-handling-pat.patch @@ -0,0 +1,36 @@ +From 0eedbdf87f63b33286fb5245934e76a163fc6112 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Sat, 8 May 2021 09:23:09 +0200 +Subject: ACPI: scan: Fix a memory leak in an error handling path + +From: Christophe JAILLET + +[ Upstream commit 0c8bd174f0fc131bc9dfab35cd8784f59045da87 ] + +If 'acpi_device_set_name()' fails, we must free +'acpi_device_bus_id->bus_id' or there is a (potential) memory leak. + +Fixes: eb50aaf960e3 ("ACPI: scan: Use unique number for instance_no") +Signed-off-by: Christophe JAILLET +Reviewed-by: Andy Shevchenko +Signed-off-by: Rafael J. Wysocki +Signed-off-by: Sasha Levin +--- + drivers/acpi/scan.c | 1 + + 1 file changed, 1 insertion(+) + +diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c +index 6efe7edd7b1e..345777bf7af9 100644 +--- a/drivers/acpi/scan.c ++++ b/drivers/acpi/scan.c +@@ -701,6 +701,7 @@ int acpi_device_add(struct acpi_device *device, + + result = acpi_device_set_name(device, acpi_device_bus_id); + if (result) { ++ kfree_const(acpi_device_bus_id->bus_id); + kfree(acpi_device_bus_id); + goto err_unlock; + } +-- +2.30.2 + diff --git a/queue-5.12/blk-mq-plug-request-for-shared-sbitmap.patch b/queue-5.12/blk-mq-plug-request-for-shared-sbitmap.patch new file mode 100644 index 00000000000..eea4308aa62 --- /dev/null +++ b/queue-5.12/blk-mq-plug-request-for-shared-sbitmap.patch @@ -0,0 +1,53 @@ +From 0629b59e21b77d081d0651064657d0852f6f3bc3 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Fri, 14 May 2021 10:20:52 +0800 +Subject: blk-mq: plug request for shared sbitmap + +From: Ming Lei + +[ Upstream commit 03f26d8f11403295de445b6e4e0e57ac57755791 ] + +In case of shared sbitmap, request won't be held in plug list any more +sine commit 32bc15afed04 ("blk-mq: Facilitate a shared sbitmap per +tagset"), this way makes request merge from flush plug list & batching +submission not possible, so cause performance regression. + +Yanhui reports performance regression when running sequential IO +test(libaio, 16 jobs, 8 depth for each job) in VM, and the VM disk +is emulated with image stored on xfs/megaraid_sas. + +Fix the issue by recovering original behavior to allow to hold request +in plug list. + +Cc: Yanhui Ma +Cc: John Garry +Cc: Bart Van Assche +Cc: kashyap.desai@broadcom.com +Fixes: 32bc15afed04 ("blk-mq: Facilitate a shared sbitmap per tagset") +Signed-off-by: Ming Lei +Link: https://lore.kernel.org/r/20210514022052.1047665-1-ming.lei@redhat.com +Signed-off-by: Jens Axboe +Signed-off-by: Sasha Levin +--- + block/blk-mq.c | 5 +++-- + 1 file changed, 3 insertions(+), 2 deletions(-) + +diff --git a/block/blk-mq.c b/block/blk-mq.c +index d4d7c1caa439..c0b740be62ad 100644 +--- a/block/blk-mq.c ++++ b/block/blk-mq.c +@@ -2216,8 +2216,9 @@ blk_qc_t blk_mq_submit_bio(struct bio *bio) + /* Bypass scheduler for flush requests */ + blk_insert_flush(rq); + blk_mq_run_hw_queue(data.hctx, true); +- } else if (plug && (q->nr_hw_queues == 1 || q->mq_ops->commit_rqs || +- !blk_queue_nonrot(q))) { ++ } else if (plug && (q->nr_hw_queues == 1 || ++ blk_mq_is_sbitmap_shared(rq->mq_hctx->flags) || ++ q->mq_ops->commit_rqs || !blk_queue_nonrot(q))) { + /* + * Use plugging if we have a ->commit_rqs() hook as well, as + * we know the driver uses bd->last in a smart fashion. +-- +2.30.2 + diff --git a/queue-5.12/blk-mq-swap-two-calls-in-blk_mq_exit_queue.patch b/queue-5.12/blk-mq-swap-two-calls-in-blk_mq_exit_queue.patch new file mode 100644 index 00000000000..906f729ee51 --- /dev/null +++ b/queue-5.12/blk-mq-swap-two-calls-in-blk_mq_exit_queue.patch @@ -0,0 +1,51 @@ +From 03e9d562a37205cb95d3d6f10321cf71c92a41f2 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Thu, 13 May 2021 10:15:29 -0700 +Subject: blk-mq: Swap two calls in blk_mq_exit_queue() + +From: Bart Van Assche + +[ Upstream commit 630ef623ed26c18a457cdc070cf24014e50129c2 ] + +If a tag set is shared across request queues (e.g. SCSI LUNs) then the +block layer core keeps track of the number of active request queues in +tags->active_queues. blk_mq_tag_busy() and blk_mq_tag_idle() update that +atomic counter if the hctx flag BLK_MQ_F_TAG_QUEUE_SHARED is set. Make +sure that blk_mq_exit_queue() calls blk_mq_tag_idle() before that flag is +cleared by blk_mq_del_queue_tag_set(). + +Cc: Christoph Hellwig +Cc: Ming Lei +Cc: Hannes Reinecke +Fixes: 0d2602ca30e4 ("blk-mq: improve support for shared tags maps") +Signed-off-by: Bart Van Assche +Reviewed-by: Ming Lei +Link: https://lore.kernel.org/r/20210513171529.7977-1-bvanassche@acm.org +Signed-off-by: Jens Axboe +Signed-off-by: Sasha Levin +--- + block/blk-mq.c | 6 ++++-- + 1 file changed, 4 insertions(+), 2 deletions(-) + +diff --git a/block/blk-mq.c b/block/blk-mq.c +index c0b740be62ad..0e120547ccb7 100644 +--- a/block/blk-mq.c ++++ b/block/blk-mq.c +@@ -3270,10 +3270,12 @@ EXPORT_SYMBOL(blk_mq_init_allocated_queue); + /* tags can _not_ be used after returning from blk_mq_exit_queue */ + void blk_mq_exit_queue(struct request_queue *q) + { +- struct blk_mq_tag_set *set = q->tag_set; ++ struct blk_mq_tag_set *set = q->tag_set; + +- blk_mq_del_queue_tag_set(q); ++ /* Checks hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED. */ + blk_mq_exit_hw_queues(q, set, set->nr_hw_queues); ++ /* May clear BLK_MQ_F_TAG_QUEUE_SHARED in hctx->flags. */ ++ blk_mq_del_queue_tag_set(q); + } + + static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set) +-- +2.30.2 + diff --git a/queue-5.12/dax-add-a-wakeup-mode-parameter-to-put_unlocked_entr.patch b/queue-5.12/dax-add-a-wakeup-mode-parameter-to-put_unlocked_entr.patch new file mode 100644 index 00000000000..1a2da211759 --- /dev/null +++ b/queue-5.12/dax-add-a-wakeup-mode-parameter-to-put_unlocked_entr.patch @@ -0,0 +1,84 @@ +From cb20b6d22dae61664c7cdb7082c8434ef623cf7d Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 28 Apr 2021 15:03:13 -0400 +Subject: dax: Add a wakeup mode parameter to put_unlocked_entry() + +From: Vivek Goyal + +[ Upstream commit 4c3d043d271d4d629aa2328796cdfc96b37d3b3c ] + +As of now put_unlocked_entry() always wakes up next waiter. In next +patches we want to wake up all waiters at one callsite. Hence, add a +parameter to the function. + +This patch does not introduce any change of behavior. + +Reviewed-by: Greg Kurz +Reviewed-by: Jan Kara +Suggested-by: Dan Williams +Signed-off-by: Vivek Goyal +Link: https://lore.kernel.org/r/20210428190314.1865312-3-vgoyal@redhat.com +Signed-off-by: Dan Williams +Signed-off-by: Sasha Levin +--- + fs/dax.c | 14 +++++++------- + 1 file changed, 7 insertions(+), 7 deletions(-) + +diff --git a/fs/dax.c b/fs/dax.c +index 5ecee51c44ee..56eb1c759ca5 100644 +--- a/fs/dax.c ++++ b/fs/dax.c +@@ -275,11 +275,11 @@ static void wait_entry_unlocked(struct xa_state *xas, void *entry) + finish_wait(wq, &ewait.wait); + } + +-static void put_unlocked_entry(struct xa_state *xas, void *entry) ++static void put_unlocked_entry(struct xa_state *xas, void *entry, ++ enum dax_wake_mode mode) + { +- /* If we were the only waiter woken, wake the next one */ + if (entry && !dax_is_conflict(entry)) +- dax_wake_entry(xas, entry, WAKE_NEXT); ++ dax_wake_entry(xas, entry, mode); + } + + /* +@@ -633,7 +633,7 @@ struct page *dax_layout_busy_page_range(struct address_space *mapping, + entry = get_unlocked_entry(&xas, 0); + if (entry) + page = dax_busy_page(entry); +- put_unlocked_entry(&xas, entry); ++ put_unlocked_entry(&xas, entry, WAKE_NEXT); + if (page) + break; + if (++scanned % XA_CHECK_SCHED) +@@ -675,7 +675,7 @@ static int __dax_invalidate_entry(struct address_space *mapping, + mapping->nrexceptional--; + ret = 1; + out: +- put_unlocked_entry(&xas, entry); ++ put_unlocked_entry(&xas, entry, WAKE_NEXT); + xas_unlock_irq(&xas); + return ret; + } +@@ -954,7 +954,7 @@ static int dax_writeback_one(struct xa_state *xas, struct dax_device *dax_dev, + return ret; + + put_unlocked: +- put_unlocked_entry(xas, entry); ++ put_unlocked_entry(xas, entry, WAKE_NEXT); + return ret; + } + +@@ -1695,7 +1695,7 @@ dax_insert_pfn_mkwrite(struct vm_fault *vmf, pfn_t pfn, unsigned int order) + /* Did we race with someone splitting entry or so? */ + if (!entry || dax_is_conflict(entry) || + (order == 0 && !dax_is_pte_entry(entry))) { +- put_unlocked_entry(&xas, entry); ++ put_unlocked_entry(&xas, entry, WAKE_NEXT); + xas_unlock_irq(&xas); + trace_dax_insert_pfn_mkwrite_no_entry(mapping->host, vmf, + VM_FAULT_NOPAGE); +-- +2.30.2 + diff --git a/queue-5.12/dax-add-an-enum-for-specifying-dax-wakup-mode.patch b/queue-5.12/dax-add-an-enum-for-specifying-dax-wakup-mode.patch new file mode 100644 index 00000000000..00dfe72bb4f --- /dev/null +++ b/queue-5.12/dax-add-an-enum-for-specifying-dax-wakup-mode.patch @@ -0,0 +1,106 @@ +From 5b3d893a50c21668f46acf9d6f4ccfab1592679a Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 28 Apr 2021 15:03:12 -0400 +Subject: dax: Add an enum for specifying dax wakup mode + +From: Vivek Goyal + +[ Upstream commit 698ab77aebffe08b312fbcdddeb0e8bd08b78717 ] + +Dan mentioned that he is not very fond of passing around a boolean true/false +to specify if only next waiter should be woken up or all waiters should be +woken up. He instead prefers that we introduce an enum and make it very +explicity at the callsite itself. Easier to read code. + +This patch should not introduce any change of behavior. + +Reviewed-by: Greg Kurz +Reviewed-by: Jan Kara +Suggested-by: Dan Williams +Signed-off-by: Vivek Goyal +Link: https://lore.kernel.org/r/20210428190314.1865312-2-vgoyal@redhat.com +Signed-off-by: Dan Williams +Signed-off-by: Sasha Levin +--- + fs/dax.c | 23 +++++++++++++++++------ + 1 file changed, 17 insertions(+), 6 deletions(-) + +diff --git a/fs/dax.c b/fs/dax.c +index b3d27fdc6775..5ecee51c44ee 100644 +--- a/fs/dax.c ++++ b/fs/dax.c +@@ -144,6 +144,16 @@ struct wait_exceptional_entry_queue { + struct exceptional_entry_key key; + }; + ++/** ++ * enum dax_wake_mode: waitqueue wakeup behaviour ++ * @WAKE_ALL: wake all waiters in the waitqueue ++ * @WAKE_NEXT: wake only the first waiter in the waitqueue ++ */ ++enum dax_wake_mode { ++ WAKE_ALL, ++ WAKE_NEXT, ++}; ++ + static wait_queue_head_t *dax_entry_waitqueue(struct xa_state *xas, + void *entry, struct exceptional_entry_key *key) + { +@@ -182,7 +192,8 @@ static int wake_exceptional_entry_func(wait_queue_entry_t *wait, + * The important information it's conveying is whether the entry at + * this index used to be a PMD entry. + */ +-static void dax_wake_entry(struct xa_state *xas, void *entry, bool wake_all) ++static void dax_wake_entry(struct xa_state *xas, void *entry, ++ enum dax_wake_mode mode) + { + struct exceptional_entry_key key; + wait_queue_head_t *wq; +@@ -196,7 +207,7 @@ static void dax_wake_entry(struct xa_state *xas, void *entry, bool wake_all) + * must be in the waitqueue and the following check will see them. + */ + if (waitqueue_active(wq)) +- __wake_up(wq, TASK_NORMAL, wake_all ? 0 : 1, &key); ++ __wake_up(wq, TASK_NORMAL, mode == WAKE_ALL ? 0 : 1, &key); + } + + /* +@@ -268,7 +279,7 @@ static void put_unlocked_entry(struct xa_state *xas, void *entry) + { + /* If we were the only waiter woken, wake the next one */ + if (entry && !dax_is_conflict(entry)) +- dax_wake_entry(xas, entry, false); ++ dax_wake_entry(xas, entry, WAKE_NEXT); + } + + /* +@@ -286,7 +297,7 @@ static void dax_unlock_entry(struct xa_state *xas, void *entry) + old = xas_store(xas, entry); + xas_unlock_irq(xas); + BUG_ON(!dax_is_locked(old)); +- dax_wake_entry(xas, entry, false); ++ dax_wake_entry(xas, entry, WAKE_NEXT); + } + + /* +@@ -524,7 +535,7 @@ static void *grab_mapping_entry(struct xa_state *xas, + + dax_disassociate_entry(entry, mapping, false); + xas_store(xas, NULL); /* undo the PMD join */ +- dax_wake_entry(xas, entry, true); ++ dax_wake_entry(xas, entry, WAKE_ALL); + mapping->nrexceptional--; + entry = NULL; + xas_set(xas, index); +@@ -937,7 +948,7 @@ static int dax_writeback_one(struct xa_state *xas, struct dax_device *dax_dev, + xas_lock_irq(xas); + xas_store(xas, entry); + xas_clear_mark(xas, PAGECACHE_TAG_DIRTY); +- dax_wake_entry(xas, entry, false); ++ dax_wake_entry(xas, entry, WAKE_NEXT); + + trace_dax_writeback_one(mapping->host, index, count); + return ret; +-- +2.30.2 + diff --git a/queue-5.12/dax-wake-up-all-waiters-after-invalidating-dax-entry.patch b/queue-5.12/dax-wake-up-all-waiters-after-invalidating-dax-entry.patch new file mode 100644 index 00000000000..a919120b8af --- /dev/null +++ b/queue-5.12/dax-wake-up-all-waiters-after-invalidating-dax-entry.patch @@ -0,0 +1,81 @@ +From 2d8c41e975eaaf79671e135e50889873811f8c09 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 28 Apr 2021 15:03:14 -0400 +Subject: dax: Wake up all waiters after invalidating dax entry + +From: Vivek Goyal + +[ Upstream commit 237388320deffde7c2d65ed8fc9eef670dc979b3 ] + +I am seeing missed wakeups which ultimately lead to a deadlock when I am +using virtiofs with DAX enabled and running "make -j". I had to mount +virtiofs as rootfs and also reduce to dax window size to 256M to reproduce +the problem consistently. + +So here is the problem. put_unlocked_entry() wakes up waiters only +if entry is not null as well as !dax_is_conflict(entry). But if I +call multiple instances of invalidate_inode_pages2() in parallel, +then I can run into a situation where there are waiters on +this index but nobody will wake these waiters. + +invalidate_inode_pages2() + invalidate_inode_pages2_range() + invalidate_exceptional_entry2() + dax_invalidate_mapping_entry_sync() + __dax_invalidate_entry() { + xas_lock_irq(&xas); + entry = get_unlocked_entry(&xas, 0); + ... + ... + dax_disassociate_entry(entry, mapping, trunc); + xas_store(&xas, NULL); + ... + ... + put_unlocked_entry(&xas, entry); + xas_unlock_irq(&xas); + } + +Say a fault in in progress and it has locked entry at offset say "0x1c". +Now say three instances of invalidate_inode_pages2() are in progress +(A, B, C) and they all try to invalidate entry at offset "0x1c". Given +dax entry is locked, all tree instances A, B, C will wait in wait queue. + +When dax fault finishes, say A is woken up. It will store NULL entry +at index "0x1c" and wake up B. When B comes along it will find "entry=0" +at page offset 0x1c and it will call put_unlocked_entry(&xas, 0). And +this means put_unlocked_entry() will not wake up next waiter, given +the current code. And that means C continues to wait and is not woken +up. + +This patch fixes the issue by waking up all waiters when a dax entry +has been invalidated. This seems to fix the deadlock I am facing +and I can make forward progress. + +Reported-by: Sergio Lopez +Fixes: ac401cc78242 ("dax: New fault locking") +Reviewed-by: Jan Kara +Suggested-by: Dan Williams +Signed-off-by: Vivek Goyal +Link: https://lore.kernel.org/r/20210428190314.1865312-4-vgoyal@redhat.com +Signed-off-by: Dan Williams +Signed-off-by: Sasha Levin +--- + fs/dax.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/fs/dax.c b/fs/dax.c +index 56eb1c759ca5..df5485b4bddf 100644 +--- a/fs/dax.c ++++ b/fs/dax.c +@@ -675,7 +675,7 @@ static int __dax_invalidate_entry(struct address_space *mapping, + mapping->nrexceptional--; + ret = 1; + out: +- put_unlocked_entry(&xas, entry, WAKE_NEXT); ++ put_unlocked_entry(&xas, entry, WAKE_ALL); + xas_unlock_irq(&xas); + return ret; + } +-- +2.30.2 + diff --git a/queue-5.12/drm-msm-dp-initialize-audio_comp-when-audio-starts.patch b/queue-5.12/drm-msm-dp-initialize-audio_comp-when-audio-starts.patch new file mode 100644 index 00000000000..6f18f665c80 --- /dev/null +++ b/queue-5.12/drm-msm-dp-initialize-audio_comp-when-audio-starts.patch @@ -0,0 +1,98 @@ +From 7001fbfa2fb665cb5e1c25a4236b0f9e848c27f6 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 21 Apr 2021 16:37:36 -0700 +Subject: drm/msm/dp: initialize audio_comp when audio starts + +From: Kuogee Hsieh + +[ Upstream commit f2f46b878777e0d3f885c7ddad48f477b4dea247 ] + +Initialize audio_comp when audio starts and wait for audio_comp at +dp_display_disable(). This will take care of both dongle unplugged +and display off (suspend) cases. + +Changes in v2: +-- add dp_display_signal_audio_start() + +Changes in v3: +-- restore dp_display_handle_plugged_change() at dp_hpd_unplug_handle(). + +Changes in v4: +-- none + +Signed-off-by: Kuogee Hsieh +Reviewed-by: Stephen Boyd +Tested-by: Stephen Boyd +Fixes: c703d5789590 ("drm/msm/dp: trigger unplug event in msm_dp_display_disable") +Link: https://lore.kernel.org/r/1619048258-8717-3-git-send-email-khsieh@codeaurora.org +Signed-off-by: Rob Clark +Signed-off-by: Sasha Levin +--- + drivers/gpu/drm/msm/dp/dp_audio.c | 1 + + drivers/gpu/drm/msm/dp/dp_display.c | 11 +++++++++-- + drivers/gpu/drm/msm/dp/dp_display.h | 1 + + 3 files changed, 11 insertions(+), 2 deletions(-) + +diff --git a/drivers/gpu/drm/msm/dp/dp_audio.c b/drivers/gpu/drm/msm/dp/dp_audio.c +index 82a8673ab8da..d7e4a39a904e 100644 +--- a/drivers/gpu/drm/msm/dp/dp_audio.c ++++ b/drivers/gpu/drm/msm/dp/dp_audio.c +@@ -527,6 +527,7 @@ int dp_audio_hw_params(struct device *dev, + dp_audio_setup_acr(audio); + dp_audio_safe_to_exit_level(audio); + dp_audio_enable(audio, true); ++ dp_display_signal_audio_start(dp_display); + dp_display->audio_enabled = true; + + end: +diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c +index 5a39da6e1eaf..f3d74f8f35fe 100644 +--- a/drivers/gpu/drm/msm/dp/dp_display.c ++++ b/drivers/gpu/drm/msm/dp/dp_display.c +@@ -178,6 +178,15 @@ static int dp_del_event(struct dp_display_private *dp_priv, u32 event) + return 0; + } + ++void dp_display_signal_audio_start(struct msm_dp *dp_display) ++{ ++ struct dp_display_private *dp; ++ ++ dp = container_of(dp_display, struct dp_display_private, dp_display); ++ ++ reinit_completion(&dp->audio_comp); ++} ++ + void dp_display_signal_audio_complete(struct msm_dp *dp_display) + { + struct dp_display_private *dp; +@@ -651,7 +660,6 @@ static int dp_hpd_unplug_handle(struct dp_display_private *dp, u32 data) + dp_add_event(dp, EV_DISCONNECT_PENDING_TIMEOUT, 0, DP_TIMEOUT_5_SECOND); + + /* signal the disconnect event early to ensure proper teardown */ +- reinit_completion(&dp->audio_comp); + dp_display_handle_plugged_change(g_dp_display, false); + + dp_catalog_hpd_config_intr(dp->catalog, DP_DP_HPD_PLUG_INT_MASK | +@@ -898,7 +906,6 @@ static int dp_display_disable(struct dp_display_private *dp, u32 data) + /* wait only if audio was enabled */ + if (dp_display->audio_enabled) { + /* signal the disconnect event */ +- reinit_completion(&dp->audio_comp); + dp_display_handle_plugged_change(dp_display, false); + if (!wait_for_completion_timeout(&dp->audio_comp, + HZ * 5)) +diff --git a/drivers/gpu/drm/msm/dp/dp_display.h b/drivers/gpu/drm/msm/dp/dp_display.h +index 6092ba1ed85e..5173c89eedf7 100644 +--- a/drivers/gpu/drm/msm/dp/dp_display.h ++++ b/drivers/gpu/drm/msm/dp/dp_display.h +@@ -34,6 +34,7 @@ int dp_display_get_modes(struct msm_dp *dp_display, + int dp_display_request_irq(struct msm_dp *dp_display); + bool dp_display_check_video_test(struct msm_dp *dp_display); + int dp_display_get_test_bpp(struct msm_dp *dp_display); ++void dp_display_signal_audio_start(struct msm_dp *dp_display); + void dp_display_signal_audio_complete(struct msm_dp *dp_display); + + #endif /* _DP_DISPLAY_H_ */ +-- +2.30.2 + diff --git a/queue-5.12/drm-msm-fix-llc-not-being-enabled-for-mmu500-targets.patch b/queue-5.12/drm-msm-fix-llc-not-being-enabled-for-mmu500-targets.patch new file mode 100644 index 00000000000..91e449ac47b --- /dev/null +++ b/queue-5.12/drm-msm-fix-llc-not-being-enabled-for-mmu500-targets.patch @@ -0,0 +1,51 @@ +From 78db52daf1ef9a834ca0c3d7adb886d465d8f66f Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Fri, 23 Apr 2021 21:49:26 -0400 +Subject: drm/msm: fix LLC not being enabled for mmu500 targets + +From: Jonathan Marek + +[ Upstream commit 4b95d371fb001185af84d177e69a23d55bd0167a ] + +mmu500 targets don't have a "cx_mem" region, set llc_mmio to NULL in that +case to avoid the IS_ERR() condition in a6xx_llc_activate(). + +Fixes: 3d247123b5a1 ("drm/msm/a6xx: Add support for using system cache on MMU500 based targets") +Signed-off-by: Jonathan Marek +Link: https://lore.kernel.org/r/20210424014927.1661-1-jonathan@marek.ca +Signed-off-by: Rob Clark +Signed-off-by: Sasha Levin +--- + drivers/gpu/drm/msm/adreno/a6xx_gpu.c | 9 +++++---- + 1 file changed, 5 insertions(+), 4 deletions(-) + +diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c +index d553f62f4eeb..b4d8e1b01ee4 100644 +--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c ++++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c +@@ -1153,10 +1153,6 @@ static void a6xx_llc_slices_init(struct platform_device *pdev, + { + struct device_node *phandle; + +- a6xx_gpu->llc_mmio = msm_ioremap(pdev, "cx_mem", "gpu_cx"); +- if (IS_ERR(a6xx_gpu->llc_mmio)) +- return; +- + /* + * There is a different programming path for targets with an mmu500 + * attached, so detect if that is the case +@@ -1166,6 +1162,11 @@ static void a6xx_llc_slices_init(struct platform_device *pdev, + of_device_is_compatible(phandle, "arm,mmu-500")); + of_node_put(phandle); + ++ if (a6xx_gpu->have_mmu500) ++ a6xx_gpu->llc_mmio = NULL; ++ else ++ a6xx_gpu->llc_mmio = msm_ioremap(pdev, "cx_mem", "gpu_cx"); ++ + a6xx_gpu->llc_slice = llcc_slice_getd(LLCC_GPU); + a6xx_gpu->htw_llc_slice = llcc_slice_getd(LLCC_GPUHTW); + +-- +2.30.2 + diff --git a/queue-5.12/f2fs-compress-fix-race-condition-of-overwrite-vs-tru.patch b/queue-5.12/f2fs-compress-fix-race-condition-of-overwrite-vs-tru.patch new file mode 100644 index 00000000000..2e1ab34e931 --- /dev/null +++ b/queue-5.12/f2fs-compress-fix-race-condition-of-overwrite-vs-tru.patch @@ -0,0 +1,148 @@ +From cdb03f5419a8268044b48700b6b75a638f6ff168 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Mon, 10 May 2021 17:30:31 +0800 +Subject: f2fs: compress: fix race condition of overwrite vs truncate + +From: Chao Yu + +[ Upstream commit a949dc5f2c5cfe0c910b664650f45371254c0744 ] + +pos_fsstress testcase complains a panic as belew: + +------------[ cut here ]------------ +kernel BUG at fs/f2fs/compress.c:1082! +invalid opcode: 0000 [#1] SMP PTI +CPU: 4 PID: 2753477 Comm: kworker/u16:2 Tainted: G OE 5.12.0-rc1-custom #1 +Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.14.0-2 04/01/2014 +Workqueue: writeback wb_workfn (flush-252:16) +RIP: 0010:prepare_compress_overwrite+0x4c0/0x760 [f2fs] +Call Trace: + f2fs_prepare_compress_overwrite+0x5f/0x80 [f2fs] + f2fs_write_cache_pages+0x468/0x8a0 [f2fs] + f2fs_write_data_pages+0x2a4/0x2f0 [f2fs] + do_writepages+0x38/0xc0 + __writeback_single_inode+0x44/0x2a0 + writeback_sb_inodes+0x223/0x4d0 + __writeback_inodes_wb+0x56/0xf0 + wb_writeback+0x1dd/0x290 + wb_workfn+0x309/0x500 + process_one_work+0x220/0x3c0 + worker_thread+0x53/0x420 + kthread+0x12f/0x150 + ret_from_fork+0x22/0x30 + +The root cause is truncate() may race with overwrite as below, +so that one reference count left in page can not guarantee the +page attaching in mapping tree all the time, after truncation, +later find_lock_page() may return NULL pointer. + +- prepare_compress_overwrite + - f2fs_pagecache_get_page + - unlock_page + - f2fs_setattr + - truncate_setsize + - truncate_inode_page + - delete_from_page_cache + - find_lock_page + +Fix this by avoiding referencing updated page. + +Fixes: 4c8ff7095bef ("f2fs: support data compression") +Signed-off-by: Chao Yu +Signed-off-by: Jaegeuk Kim +Signed-off-by: Sasha Levin +--- + fs/f2fs/compress.c | 35 ++++++++++++----------------------- + 1 file changed, 12 insertions(+), 23 deletions(-) + +diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c +index 146a8eb3891b..b7ab3bdf2259 100644 +--- a/fs/f2fs/compress.c ++++ b/fs/f2fs/compress.c +@@ -123,19 +123,6 @@ static void f2fs_unlock_rpages(struct compress_ctx *cc, int len) + f2fs_drop_rpages(cc, len, true); + } + +-static void f2fs_put_rpages_mapping(struct address_space *mapping, +- pgoff_t start, int len) +-{ +- int i; +- +- for (i = 0; i < len; i++) { +- struct page *page = find_get_page(mapping, start + i); +- +- put_page(page); +- put_page(page); +- } +-} +- + static void f2fs_put_rpages_wbc(struct compress_ctx *cc, + struct writeback_control *wbc, bool redirty, int unlock) + { +@@ -1048,7 +1035,7 @@ static int prepare_compress_overwrite(struct compress_ctx *cc, + } + + if (PageUptodate(page)) +- unlock_page(page); ++ f2fs_put_page(page, 1); + else + f2fs_compress_ctx_add_page(cc, page); + } +@@ -1058,32 +1045,34 @@ static int prepare_compress_overwrite(struct compress_ctx *cc, + + ret = f2fs_read_multi_pages(cc, &bio, cc->cluster_size, + &last_block_in_bio, false, true); ++ f2fs_put_rpages(cc); + f2fs_destroy_compress_ctx(cc); + if (ret) +- goto release_pages; ++ goto out; + if (bio) + f2fs_submit_bio(sbi, bio, DATA); + + ret = f2fs_init_compress_ctx(cc); + if (ret) +- goto release_pages; ++ goto out; + } + + for (i = 0; i < cc->cluster_size; i++) { + f2fs_bug_on(sbi, cc->rpages[i]); + + page = find_lock_page(mapping, start_idx + i); +- f2fs_bug_on(sbi, !page); ++ if (!page) { ++ /* page can be truncated */ ++ goto release_and_retry; ++ } + + f2fs_wait_on_page_writeback(page, DATA, true, true); +- + f2fs_compress_ctx_add_page(cc, page); +- f2fs_put_page(page, 0); + + if (!PageUptodate(page)) { ++release_and_retry: ++ f2fs_put_rpages(cc); + f2fs_unlock_rpages(cc, i + 1); +- f2fs_put_rpages_mapping(mapping, start_idx, +- cc->cluster_size); + f2fs_destroy_compress_ctx(cc); + goto retry; + } +@@ -1115,10 +1104,10 @@ static int prepare_compress_overwrite(struct compress_ctx *cc, + } + + unlock_pages: ++ f2fs_put_rpages(cc); + f2fs_unlock_rpages(cc, i); +-release_pages: +- f2fs_put_rpages_mapping(mapping, start_idx, i); + f2fs_destroy_compress_ctx(cc); ++out: + return ret; + } + +-- +2.30.2 + diff --git a/queue-5.12/f2fs-compress-fix-to-assign-cc.cluster_idx-correctly.patch b/queue-5.12/f2fs-compress-fix-to-assign-cc.cluster_idx-correctly.patch new file mode 100644 index 00000000000..aa71d06bdbc --- /dev/null +++ b/queue-5.12/f2fs-compress-fix-to-assign-cc.cluster_idx-correctly.patch @@ -0,0 +1,145 @@ +From a92386df3be507d70e07b25fb0a7221541bb96ba Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Mon, 10 May 2021 17:30:32 +0800 +Subject: f2fs: compress: fix to assign cc.cluster_idx correctly + +From: Chao Yu + +[ Upstream commit 8bfbfb0ddd706b1ce2e89259ecc45f192c0ec2bf ] + +In f2fs_destroy_compress_ctx(), after f2fs_destroy_compress_ctx(), +cc.cluster_idx will be cleared w/ NULL_CLUSTER, f2fs_cluster_blocks() +may check wrong cluster metadata, fix it. + +Fixes: 4c8ff7095bef ("f2fs: support data compression") +Signed-off-by: Chao Yu +Signed-off-by: Jaegeuk Kim +Signed-off-by: Sasha Levin +--- + fs/f2fs/compress.c | 17 +++++++++-------- + fs/f2fs/data.c | 6 +++--- + fs/f2fs/f2fs.h | 2 +- + 3 files changed, 13 insertions(+), 12 deletions(-) + +diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c +index b7ab3bdf2259..66dd525a8554 100644 +--- a/fs/f2fs/compress.c ++++ b/fs/f2fs/compress.c +@@ -151,13 +151,14 @@ int f2fs_init_compress_ctx(struct compress_ctx *cc) + return cc->rpages ? 0 : -ENOMEM; + } + +-void f2fs_destroy_compress_ctx(struct compress_ctx *cc) ++void f2fs_destroy_compress_ctx(struct compress_ctx *cc, bool reuse) + { + page_array_free(cc->inode, cc->rpages, cc->cluster_size); + cc->rpages = NULL; + cc->nr_rpages = 0; + cc->nr_cpages = 0; +- cc->cluster_idx = NULL_CLUSTER; ++ if (!reuse) ++ cc->cluster_idx = NULL_CLUSTER; + } + + void f2fs_compress_ctx_add_page(struct compress_ctx *cc, struct page *page) +@@ -1046,7 +1047,7 @@ static int prepare_compress_overwrite(struct compress_ctx *cc, + ret = f2fs_read_multi_pages(cc, &bio, cc->cluster_size, + &last_block_in_bio, false, true); + f2fs_put_rpages(cc); +- f2fs_destroy_compress_ctx(cc); ++ f2fs_destroy_compress_ctx(cc, true); + if (ret) + goto out; + if (bio) +@@ -1073,7 +1074,7 @@ static int prepare_compress_overwrite(struct compress_ctx *cc, + release_and_retry: + f2fs_put_rpages(cc); + f2fs_unlock_rpages(cc, i + 1); +- f2fs_destroy_compress_ctx(cc); ++ f2fs_destroy_compress_ctx(cc, true); + goto retry; + } + } +@@ -1106,7 +1107,7 @@ static int prepare_compress_overwrite(struct compress_ctx *cc, + unlock_pages: + f2fs_put_rpages(cc); + f2fs_unlock_rpages(cc, i); +- f2fs_destroy_compress_ctx(cc); ++ f2fs_destroy_compress_ctx(cc, true); + out: + return ret; + } +@@ -1142,7 +1143,7 @@ bool f2fs_compress_write_end(struct inode *inode, void *fsdata, + set_cluster_dirty(&cc); + + f2fs_put_rpages_wbc(&cc, NULL, false, 1); +- f2fs_destroy_compress_ctx(&cc); ++ f2fs_destroy_compress_ctx(&cc, false); + + return first_index; + } +@@ -1361,7 +1362,7 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc, + f2fs_put_rpages(cc); + page_array_free(cc->inode, cc->cpages, cc->nr_cpages); + cc->cpages = NULL; +- f2fs_destroy_compress_ctx(cc); ++ f2fs_destroy_compress_ctx(cc, false); + return 0; + + out_destroy_crypt: +@@ -1523,7 +1524,7 @@ int f2fs_write_multi_pages(struct compress_ctx *cc, + err = f2fs_write_raw_pages(cc, submitted, wbc, io_type); + f2fs_put_rpages_wbc(cc, wbc, false, 0); + destroy_out: +- f2fs_destroy_compress_ctx(cc); ++ f2fs_destroy_compress_ctx(cc, false); + return err; + } + +diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c +index 4e5257c763d0..8804a5d51380 100644 +--- a/fs/f2fs/data.c ++++ b/fs/f2fs/data.c +@@ -2276,7 +2276,7 @@ static int f2fs_mpage_readpages(struct inode *inode, + max_nr_pages, + &last_block_in_bio, + rac != NULL, false); +- f2fs_destroy_compress_ctx(&cc); ++ f2fs_destroy_compress_ctx(&cc, false); + if (ret) + goto set_error_page; + } +@@ -2321,7 +2321,7 @@ static int f2fs_mpage_readpages(struct inode *inode, + max_nr_pages, + &last_block_in_bio, + rac != NULL, false); +- f2fs_destroy_compress_ctx(&cc); ++ f2fs_destroy_compress_ctx(&cc, false); + } + } + #endif +@@ -3022,7 +3022,7 @@ static int f2fs_write_cache_pages(struct address_space *mapping, + } + } + if (f2fs_compressed_file(inode)) +- f2fs_destroy_compress_ctx(&cc); ++ f2fs_destroy_compress_ctx(&cc, false); + #endif + if (retry) { + index = 0; +diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h +index eaf6f62206de..f3fabb1edfe9 100644 +--- a/fs/f2fs/f2fs.h ++++ b/fs/f2fs/f2fs.h +@@ -3950,7 +3950,7 @@ struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc); + void f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed); + void f2fs_put_page_dic(struct page *page); + int f2fs_init_compress_ctx(struct compress_ctx *cc); +-void f2fs_destroy_compress_ctx(struct compress_ctx *cc); ++void f2fs_destroy_compress_ctx(struct compress_ctx *cc, bool reuse); + void f2fs_init_compress_info(struct f2fs_sb_info *sbi); + int f2fs_init_page_array_cache(struct f2fs_sb_info *sbi); + void f2fs_destroy_page_array_cache(struct f2fs_sb_info *sbi); +-- +2.30.2 + diff --git a/queue-5.12/f2fs-compress-fix-to-free-compress-page-correctly.patch b/queue-5.12/f2fs-compress-fix-to-free-compress-page-correctly.patch new file mode 100644 index 00000000000..5b3bfe8cee0 --- /dev/null +++ b/queue-5.12/f2fs-compress-fix-to-free-compress-page-correctly.patch @@ -0,0 +1,37 @@ +From 029cab8350ad8582b8d9a52e35c6d98a8088f21e Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Thu, 6 May 2021 17:00:43 +0800 +Subject: f2fs: compress: fix to free compress page correctly + +From: Chao Yu + +[ Upstream commit a12cc5b423d4f36dc1a1ea3911e49cf9dff43898 ] + +In error path of f2fs_write_compressed_pages(), it needs to call +f2fs_compress_free_page() to release temporary page. + +Fixes: 5e6bbde95982 ("f2fs: introduce mempool for {,de}compress intermediate page allocation") +Signed-off-by: Chao Yu +Signed-off-by: Jaegeuk Kim +Signed-off-by: Sasha Levin +--- + fs/f2fs/compress.c | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c +index 77fa342de38f..146a8eb3891b 100644 +--- a/fs/f2fs/compress.c ++++ b/fs/f2fs/compress.c +@@ -1383,7 +1383,8 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc, + for (i = 0; i < cc->nr_cpages; i++) { + if (!cc->cpages[i]) + continue; +- f2fs_put_page(cc->cpages[i], 1); ++ f2fs_compress_free_page(cc->cpages[i]); ++ cc->cpages[i] = NULL; + } + out_put_cic: + kmem_cache_free(cic_entry_slab, cic); +-- +2.30.2 + diff --git a/queue-5.12/hwmon-ltc2992-put-fwnode-in-error-case-during-probe.patch b/queue-5.12/hwmon-ltc2992-put-fwnode-in-error-case-during-probe.patch new file mode 100644 index 00000000000..d9d7b71df6b --- /dev/null +++ b/queue-5.12/hwmon-ltc2992-put-fwnode-in-error-case-during-probe.patch @@ -0,0 +1,50 @@ +From df630d6968650f201cb0468f6809f057a87ad06f Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Mon, 10 May 2021 13:01:36 +0300 +Subject: hwmon: (ltc2992) Put fwnode in error case during ->probe() + +From: Andy Shevchenko + +[ Upstream commit 8370e5b093080c03cf89f7ebf0bef6984545429e ] + +In each iteration fwnode_for_each_available_child_node() bumps a reference +counting of a loop variable followed by dropping in on a next iteration, + +Since in error case the loop is broken, we have to drop a reference count +by ourselves. Do it for port_fwnode in error case during ->probe(). + +Fixes: b0bd407e94b0 ("hwmon: (ltc2992) Add support") +Cc: Alexandru Tachici +Signed-off-by: Andy Shevchenko +Link: https://lore.kernel.org/r/20210510100136.3303142-1-andy.shevchenko@gmail.com +Signed-off-by: Guenter Roeck +Signed-off-by: Sasha Levin +--- + drivers/hwmon/ltc2992.c | 8 ++++++-- + 1 file changed, 6 insertions(+), 2 deletions(-) + +diff --git a/drivers/hwmon/ltc2992.c b/drivers/hwmon/ltc2992.c +index 4382105bf142..2a4bed0ab226 100644 +--- a/drivers/hwmon/ltc2992.c ++++ b/drivers/hwmon/ltc2992.c +@@ -900,11 +900,15 @@ static int ltc2992_parse_dt(struct ltc2992_state *st) + + fwnode_for_each_available_child_node(fwnode, child) { + ret = fwnode_property_read_u32(child, "reg", &addr); +- if (ret < 0) ++ if (ret < 0) { ++ fwnode_handle_put(child); + return ret; ++ } + +- if (addr > 1) ++ if (addr > 1) { ++ fwnode_handle_put(child); + return -EINVAL; ++ } + + ret = fwnode_property_read_u32(child, "shunt-resistor-micro-ohms", &val); + if (!ret) +-- +2.30.2 + diff --git a/queue-5.12/hwmon-occ-fix-poll-rate-limiting.patch b/queue-5.12/hwmon-occ-fix-poll-rate-limiting.patch new file mode 100644 index 00000000000..a0af6d88864 --- /dev/null +++ b/queue-5.12/hwmon-occ-fix-poll-rate-limiting.patch @@ -0,0 +1,64 @@ +From af3185647c67cc0ff3f44dbddca95f274b761b72 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Thu, 29 Apr 2021 10:13:36 -0500 +Subject: hwmon: (occ) Fix poll rate limiting + +From: Eddie James + +[ Upstream commit 5216dff22dc2bbbbe6f00335f9fd2879670e753b ] + +The poll rate limiter time was initialized at zero. This breaks the +comparison in time_after if jiffies is large. Switch to storing the +next update time rather than the previous time, and initialize the +time when the device is probed. + +Fixes: c10e753d43eb ("hwmon (occ): Add sensor types and versions") +Signed-off-by: Eddie James +Link: https://lore.kernel.org/r/20210429151336.18980-1-eajames@linux.ibm.com +Signed-off-by: Guenter Roeck +Signed-off-by: Sasha Levin +--- + drivers/hwmon/occ/common.c | 5 +++-- + drivers/hwmon/occ/common.h | 2 +- + 2 files changed, 4 insertions(+), 3 deletions(-) + +diff --git a/drivers/hwmon/occ/common.c b/drivers/hwmon/occ/common.c +index 7a5e539b567b..580e63d7daa0 100644 +--- a/drivers/hwmon/occ/common.c ++++ b/drivers/hwmon/occ/common.c +@@ -217,9 +217,9 @@ int occ_update_response(struct occ *occ) + return rc; + + /* limit the maximum rate of polling the OCC */ +- if (time_after(jiffies, occ->last_update + OCC_UPDATE_FREQUENCY)) { ++ if (time_after(jiffies, occ->next_update)) { + rc = occ_poll(occ); +- occ->last_update = jiffies; ++ occ->next_update = jiffies + OCC_UPDATE_FREQUENCY; + } else { + rc = occ->last_error; + } +@@ -1164,6 +1164,7 @@ int occ_setup(struct occ *occ, const char *name) + return rc; + } + ++ occ->next_update = jiffies + OCC_UPDATE_FREQUENCY; + occ_parse_poll_response(occ); + + rc = occ_setup_sensor_attrs(occ); +diff --git a/drivers/hwmon/occ/common.h b/drivers/hwmon/occ/common.h +index 67e6968b8978..e6df719770e8 100644 +--- a/drivers/hwmon/occ/common.h ++++ b/drivers/hwmon/occ/common.h +@@ -99,7 +99,7 @@ struct occ { + u8 poll_cmd_data; /* to perform OCC poll command */ + int (*send_cmd)(struct occ *occ, u8 *cmd); + +- unsigned long last_update; ++ unsigned long next_update; + struct mutex lock; /* lock OCC access */ + + struct device *hwmon; +-- +2.30.2 + diff --git a/queue-5.12/iio-core-return-enodev-if-ioctl-is-unknown.patch b/queue-5.12/iio-core-return-enodev-if-ioctl-is-unknown.patch new file mode 100644 index 00000000000..feee11c068a --- /dev/null +++ b/queue-5.12/iio-core-return-enodev-if-ioctl-is-unknown.patch @@ -0,0 +1,60 @@ +From 219a694b4559d9968b7f52c7b25621bb3965985e Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Mon, 3 May 2021 17:43:50 +0300 +Subject: iio: core: return ENODEV if ioctl is unknown +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +From: Alexandru Ardelean + +[ Upstream commit af0670b0bf1b116fd729b1b1011cf814bc34e12e ] + +When the ioctl() mechanism was introduced in IIO core to centralize the +registration of all ioctls in one place via commit 8dedcc3eee3ac ("iio: +core: centralize ioctl() calls to the main chardev"), the return code was +changed from ENODEV to EINVAL, when the ioctl code isn't known. + +This was done by accident. + +This change reverts back to the old behavior, where if the ioctl() code +isn't known, ENODEV is returned (vs EINVAL). + +This was brought into perspective by this patch: + https://lore.kernel.org/linux-iio/20210428150815.136150-1-paul@crapouillou.net/ + +Fixes: 8dedcc3eee3ac ("iio: core: centralize ioctl() calls to the main chardev") +Signed-off-by: Alexandru Ardelean +Reviewed-by: Nuno Sá +Tested-by: Paul Cercueil +Reviewed-by: Linus Walleij +Signed-off-by: Jonathan Cameron +Signed-off-by: Sasha Levin +--- + drivers/iio/industrialio-core.c | 3 +-- + 1 file changed, 1 insertion(+), 2 deletions(-) + +diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c +index 7db761afa578..2050d341746b 100644 +--- a/drivers/iio/industrialio-core.c ++++ b/drivers/iio/industrialio-core.c +@@ -1734,7 +1734,6 @@ static long iio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) + if (!indio_dev->info) + goto out_unlock; + +- ret = -EINVAL; + list_for_each_entry(h, &iio_dev_opaque->ioctl_handlers, entry) { + ret = h->ioctl(indio_dev, filp, cmd, arg); + if (ret != IIO_IOCTL_UNHANDLED) +@@ -1742,7 +1741,7 @@ static long iio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) + } + + if (ret == IIO_IOCTL_UNHANDLED) +- ret = -EINVAL; ++ ret = -ENODEV; + + out_unlock: + mutex_unlock(&indio_dev->info_exist_lock); +-- +2.30.2 + diff --git a/queue-5.12/iio-hid-sensors-select-iio_triggered_buffer-under-hi.patch b/queue-5.12/iio-hid-sensors-select-iio_triggered_buffer-under-hi.patch new file mode 100644 index 00000000000..a945e6a3f64 --- /dev/null +++ b/queue-5.12/iio-hid-sensors-select-iio_triggered_buffer-under-hi.patch @@ -0,0 +1,174 @@ +From 1b77b2534532a8cc7679890579062c8fc8b63f3e Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 14 Apr 2021 11:49:55 +0300 +Subject: iio: hid-sensors: select IIO_TRIGGERED_BUFFER under + HID_SENSOR_IIO_TRIGGER + +From: Alexandru Ardelean + +[ Upstream commit 7061803522ee7876df1ca18cdd1e1551f761352d ] + +During commit 067fda1c065ff ("iio: hid-sensors: move triggered buffer +setup into hid_sensor_setup_trigger"), the +iio_triggered_buffer_{setup,cleanup}() functions got moved under the +hid-sensor-trigger module. + +The above change works fine, if any of the sensors get built. However, when +only the common hid-sensor-trigger module gets built (and none of the +drivers), then the IIO_TRIGGERED_BUFFER symbol isn't selected/enforced. + +Previously, each driver would enforce/select the IIO_TRIGGERED_BUFFER +symbol. With this change the HID_SENSOR_IIO_TRIGGER (for the +hid-sensor-trigger module) will enforce that IIO_TRIGGERED_BUFFER gets +selected. + +All HID sensor drivers select the HID_SENSOR_IIO_TRIGGER symbol. So, this +change removes the IIO_TRIGGERED_BUFFER enforcement from each driver. + +Fixes: 067fda1c065ff ("iio: hid-sensors: move triggered buffer setup into hid_sensor_setup_trigger") +Reported-by: Thomas Deutschmann +Cc: Srinivas Pandruvada +Signed-off-by: Alexandru Ardelean +Acked-by: Srinivas Pandruvada +Link: https://lore.kernel.org/r/20210414084955.260117-1-aardelean@deviqon.com +Signed-off-by: Jonathan Cameron +Signed-off-by: Sasha Levin +--- + drivers/iio/accel/Kconfig | 1 - + drivers/iio/common/hid-sensors/Kconfig | 1 + + drivers/iio/gyro/Kconfig | 1 - + drivers/iio/humidity/Kconfig | 1 - + drivers/iio/light/Kconfig | 2 -- + drivers/iio/magnetometer/Kconfig | 1 - + drivers/iio/orientation/Kconfig | 2 -- + drivers/iio/pressure/Kconfig | 1 - + drivers/iio/temperature/Kconfig | 1 - + 9 files changed, 1 insertion(+), 10 deletions(-) + +diff --git a/drivers/iio/accel/Kconfig b/drivers/iio/accel/Kconfig +index 2e0c62c39155..8acf277b8b25 100644 +--- a/drivers/iio/accel/Kconfig ++++ b/drivers/iio/accel/Kconfig +@@ -211,7 +211,6 @@ config DMARD10 + config HID_SENSOR_ACCEL_3D + depends on HID_SENSOR_HUB + select IIO_BUFFER +- select IIO_TRIGGERED_BUFFER + select HID_SENSOR_IIO_COMMON + select HID_SENSOR_IIO_TRIGGER + tristate "HID Accelerometers 3D" +diff --git a/drivers/iio/common/hid-sensors/Kconfig b/drivers/iio/common/hid-sensors/Kconfig +index 24d492567336..2a3dd3b907be 100644 +--- a/drivers/iio/common/hid-sensors/Kconfig ++++ b/drivers/iio/common/hid-sensors/Kconfig +@@ -19,6 +19,7 @@ config HID_SENSOR_IIO_TRIGGER + tristate "Common module (trigger) for all HID Sensor IIO drivers" + depends on HID_SENSOR_HUB && HID_SENSOR_IIO_COMMON && IIO_BUFFER + select IIO_TRIGGER ++ select IIO_TRIGGERED_BUFFER + help + Say yes here to build trigger support for HID sensors. + Triggers will be send if all requested attributes were read. +diff --git a/drivers/iio/gyro/Kconfig b/drivers/iio/gyro/Kconfig +index 5824f2edf975..20b5ac7ab66a 100644 +--- a/drivers/iio/gyro/Kconfig ++++ b/drivers/iio/gyro/Kconfig +@@ -111,7 +111,6 @@ config FXAS21002C_SPI + config HID_SENSOR_GYRO_3D + depends on HID_SENSOR_HUB + select IIO_BUFFER +- select IIO_TRIGGERED_BUFFER + select HID_SENSOR_IIO_COMMON + select HID_SENSOR_IIO_TRIGGER + tristate "HID Gyroscope 3D" +diff --git a/drivers/iio/humidity/Kconfig b/drivers/iio/humidity/Kconfig +index 6549fcf6db69..2de5494e7c22 100644 +--- a/drivers/iio/humidity/Kconfig ++++ b/drivers/iio/humidity/Kconfig +@@ -52,7 +52,6 @@ config HID_SENSOR_HUMIDITY + tristate "HID Environmental humidity sensor" + depends on HID_SENSOR_HUB + select IIO_BUFFER +- select IIO_TRIGGERED_BUFFER + select HID_SENSOR_IIO_COMMON + select HID_SENSOR_IIO_TRIGGER + help +diff --git a/drivers/iio/light/Kconfig b/drivers/iio/light/Kconfig +index 33ad4dd0b5c7..917f9becf9c7 100644 +--- a/drivers/iio/light/Kconfig ++++ b/drivers/iio/light/Kconfig +@@ -256,7 +256,6 @@ config ISL29125 + config HID_SENSOR_ALS + depends on HID_SENSOR_HUB + select IIO_BUFFER +- select IIO_TRIGGERED_BUFFER + select HID_SENSOR_IIO_COMMON + select HID_SENSOR_IIO_TRIGGER + tristate "HID ALS" +@@ -270,7 +269,6 @@ config HID_SENSOR_ALS + config HID_SENSOR_PROX + depends on HID_SENSOR_HUB + select IIO_BUFFER +- select IIO_TRIGGERED_BUFFER + select HID_SENSOR_IIO_COMMON + select HID_SENSOR_IIO_TRIGGER + tristate "HID PROX" +diff --git a/drivers/iio/magnetometer/Kconfig b/drivers/iio/magnetometer/Kconfig +index 5d4ffd66032e..74ad5701c6c2 100644 +--- a/drivers/iio/magnetometer/Kconfig ++++ b/drivers/iio/magnetometer/Kconfig +@@ -95,7 +95,6 @@ config MAG3110 + config HID_SENSOR_MAGNETOMETER_3D + depends on HID_SENSOR_HUB + select IIO_BUFFER +- select IIO_TRIGGERED_BUFFER + select HID_SENSOR_IIO_COMMON + select HID_SENSOR_IIO_TRIGGER + tristate "HID Magenetometer 3D" +diff --git a/drivers/iio/orientation/Kconfig b/drivers/iio/orientation/Kconfig +index a505583cc2fd..396cbbb867f4 100644 +--- a/drivers/iio/orientation/Kconfig ++++ b/drivers/iio/orientation/Kconfig +@@ -9,7 +9,6 @@ menu "Inclinometer sensors" + config HID_SENSOR_INCLINOMETER_3D + depends on HID_SENSOR_HUB + select IIO_BUFFER +- select IIO_TRIGGERED_BUFFER + select HID_SENSOR_IIO_COMMON + select HID_SENSOR_IIO_TRIGGER + tristate "HID Inclinometer 3D" +@@ -20,7 +19,6 @@ config HID_SENSOR_INCLINOMETER_3D + config HID_SENSOR_DEVICE_ROTATION + depends on HID_SENSOR_HUB + select IIO_BUFFER +- select IIO_TRIGGERED_BUFFER + select HID_SENSOR_IIO_COMMON + select HID_SENSOR_IIO_TRIGGER + tristate "HID Device Rotation" +diff --git a/drivers/iio/pressure/Kconfig b/drivers/iio/pressure/Kconfig +index 689b978db4f9..fc0d3cfca418 100644 +--- a/drivers/iio/pressure/Kconfig ++++ b/drivers/iio/pressure/Kconfig +@@ -79,7 +79,6 @@ config DPS310 + config HID_SENSOR_PRESS + depends on HID_SENSOR_HUB + select IIO_BUFFER +- select IIO_TRIGGERED_BUFFER + select HID_SENSOR_IIO_COMMON + select HID_SENSOR_IIO_TRIGGER + tristate "HID PRESS" +diff --git a/drivers/iio/temperature/Kconfig b/drivers/iio/temperature/Kconfig +index f1f2a1499c9e..4df60082c1fa 100644 +--- a/drivers/iio/temperature/Kconfig ++++ b/drivers/iio/temperature/Kconfig +@@ -45,7 +45,6 @@ config HID_SENSOR_TEMP + tristate "HID Environmental temperature sensor" + depends on HID_SENSOR_HUB + select IIO_BUFFER +- select IIO_TRIGGERED_BUFFER + select HID_SENSOR_IIO_COMMON + select HID_SENSOR_IIO_TRIGGER + help +-- +2.30.2 + diff --git a/queue-5.12/iio-light-gp2ap002-fix-rumtime-pm-imbalance-on-error.patch b/queue-5.12/iio-light-gp2ap002-fix-rumtime-pm-imbalance-on-error.patch new file mode 100644 index 00000000000..f1d4385e142 --- /dev/null +++ b/queue-5.12/iio-light-gp2ap002-fix-rumtime-pm-imbalance-on-error.patch @@ -0,0 +1,51 @@ +From b9db7f1e9618233ae0d572872f840bbe9948cf90 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 7 Apr 2021 11:49:27 +0800 +Subject: iio: light: gp2ap002: Fix rumtime PM imbalance on error + +From: Dinghao Liu + +[ Upstream commit 8edb79af88efc6e49e735f9baf61d9f0748b881f ] + +When devm_request_threaded_irq() fails, we should decrease the +runtime PM counter to keep the counter balanced. But when +iio_device_register() fails, we need not to decrease it because +we have already decreased it before. + +Signed-off-by: Dinghao Liu +Reviewed-by: Linus Walleij +Fixes: 97d642e23037 ("iio: light: Add a driver for Sharp GP2AP002x00F") +Link: https://lore.kernel.org/r/20210407034927.16882-1-dinghao.liu@zju.edu.cn +Signed-off-by: Jonathan Cameron +Signed-off-by: Sasha Levin +--- + drivers/iio/light/gp2ap002.c | 5 +++-- + 1 file changed, 3 insertions(+), 2 deletions(-) + +diff --git a/drivers/iio/light/gp2ap002.c b/drivers/iio/light/gp2ap002.c +index 7ba7aa59437c..040d8429a6e0 100644 +--- a/drivers/iio/light/gp2ap002.c ++++ b/drivers/iio/light/gp2ap002.c +@@ -583,7 +583,7 @@ static int gp2ap002_probe(struct i2c_client *client, + "gp2ap002", indio_dev); + if (ret) { + dev_err(dev, "unable to request IRQ\n"); +- goto out_disable_vio; ++ goto out_put_pm; + } + gp2ap002->irq = client->irq; + +@@ -613,8 +613,9 @@ static int gp2ap002_probe(struct i2c_client *client, + + return 0; + +-out_disable_pm: ++out_put_pm: + pm_runtime_put_noidle(dev); ++out_disable_pm: + pm_runtime_disable(dev); + out_disable_vio: + regulator_disable(gp2ap002->vio); +-- +2.30.2 + diff --git a/queue-5.12/iio-proximity-pulsedlight-fix-rumtime-pm-imbalance-o.patch b/queue-5.12/iio-proximity-pulsedlight-fix-rumtime-pm-imbalance-o.patch new file mode 100644 index 00000000000..f9ec0aaca4d --- /dev/null +++ b/queue-5.12/iio-proximity-pulsedlight-fix-rumtime-pm-imbalance-o.patch @@ -0,0 +1,37 @@ +From 75527c5c36214277399941a7a4fd34ab31dbe6a6 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Mon, 12 Apr 2021 13:32:02 +0800 +Subject: iio: proximity: pulsedlight: Fix rumtime PM imbalance on error + +From: Dinghao Liu + +[ Upstream commit a2fa9242e89f27696515699fe0f0296bf1ac1815 ] + +When lidar_write_control() fails, a pairing PM usage counter +decrement is needed to keep the counter balanced. + +Fixes: 4ac4e086fd8c5 ("iio: pulsedlight-lidar-lite: add runtime PM") +Signed-off-by: Dinghao Liu +Reviewed-by: Andy Shevchenko +Link: https://lore.kernel.org/r/20210412053204.4889-1-dinghao.liu@zju.edu.cn +Signed-off-by: Jonathan Cameron +Signed-off-by: Sasha Levin +--- + drivers/iio/proximity/pulsedlight-lidar-lite-v2.c | 1 + + 1 file changed, 1 insertion(+) + +diff --git a/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c b/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c +index c685f10b5ae4..cc206bfa09c7 100644 +--- a/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c ++++ b/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c +@@ -160,6 +160,7 @@ static int lidar_get_measurement(struct lidar_data *data, u16 *reg) + ret = lidar_write_control(data, LIDAR_REG_CONTROL_ACQUIRE); + if (ret < 0) { + dev_err(&client->dev, "cannot send start measurement command"); ++ pm_runtime_put_noidle(&client->dev); + return ret; + } + +-- +2.30.2 + diff --git a/queue-5.12/kvm-lapic-accurately-guarantee-busy-wait-for-timer-t.patch b/queue-5.12/kvm-lapic-accurately-guarantee-busy-wait-for-timer-t.patch new file mode 100644 index 00000000000..499f4f01892 --- /dev/null +++ b/queue-5.12/kvm-lapic-accurately-guarantee-busy-wait-for-timer-t.patch @@ -0,0 +1,45 @@ +From 2e0fd767c3d2dbb872d3f7d2c377a0844b7ab478 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 28 Apr 2021 19:08:02 +0800 +Subject: KVM: LAPIC: Accurately guarantee busy wait for timer to expire when + using hv_timer + +From: Wanpeng Li + +[ Upstream commit d981dd15498b188636ec5a7d8ad485e650f63d8d ] + +Commit ee66e453db13d (KVM: lapic: Busy wait for timer to expire when +using hv_timer) tries to set ktime->expired_tscdeadline by checking +ktime->hv_timer_in_use since lapic timer oneshot/periodic modes which +are emulated by vmx preemption timer also get advanced, they leverage +the same vmx preemption timer logic with tsc-deadline mode. However, +ktime->hv_timer_in_use is cleared before apic_timer_expired() handling, +let's delay this clearing in preemption-disabled region. + +Fixes: ee66e453db13d ("KVM: lapic: Busy wait for timer to expire when using hv_timer") +Reviewed-by: Sean Christopherson +Signed-off-by: Wanpeng Li +Message-Id: <1619608082-4187-1-git-send-email-wanpengli@tencent.com> +Signed-off-by: Paolo Bonzini +Signed-off-by: Sasha Levin +--- + arch/x86/kvm/lapic.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c +index 49a839d0567a..fa023f3feb25 100644 +--- a/arch/x86/kvm/lapic.c ++++ b/arch/x86/kvm/lapic.c +@@ -1913,8 +1913,8 @@ void kvm_lapic_expired_hv_timer(struct kvm_vcpu *vcpu) + if (!apic->lapic_timer.hv_timer_in_use) + goto out; + WARN_ON(rcuwait_active(&vcpu->wait)); +- cancel_hv_timer(apic); + apic_timer_expired(apic, false); ++ cancel_hv_timer(apic); + + if (apic_lvtt_period(apic) && apic->lapic_timer.period) { + advance_periodic_target_expiration(apic); +-- +2.30.2 + diff --git a/queue-5.12/kvm-svm-move-ghcb-unmapping-to-fix-rcu-warning.patch b/queue-5.12/kvm-svm-move-ghcb-unmapping-to-fix-rcu-warning.patch new file mode 100644 index 00000000000..d6aa594d270 --- /dev/null +++ b/queue-5.12/kvm-svm-move-ghcb-unmapping-to-fix-rcu-warning.patch @@ -0,0 +1,82 @@ +From 26d0044848bf688635b32eb9625dd1ec717447cc Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Thu, 6 May 2021 15:14:41 -0500 +Subject: KVM: SVM: Move GHCB unmapping to fix RCU warning + +From: Tom Lendacky + +[ Upstream commit ce7ea0cfdc2e9ff31d12da31c3226deddb9644f5 ] + +When an SEV-ES guest is running, the GHCB is unmapped as part of the +vCPU run support. However, kvm_vcpu_unmap() triggers an RCU dereference +warning with CONFIG_PROVE_LOCKING=y because the SRCU lock is released +before invoking the vCPU run support. + +Move the GHCB unmapping into the prepare_guest_switch callback, which is +invoked while still holding the SRCU lock, eliminating the RCU dereference +warning. + +Fixes: 291bd20d5d88 ("KVM: SVM: Add initial support for a VMGEXIT VMEXIT") +Reported-by: Borislav Petkov +Signed-off-by: Tom Lendacky +Message-Id: +Signed-off-by: Paolo Bonzini +Signed-off-by: Sasha Levin +--- + arch/x86/kvm/svm/sev.c | 5 +---- + arch/x86/kvm/svm/svm.c | 3 +++ + arch/x86/kvm/svm/svm.h | 1 + + 3 files changed, 5 insertions(+), 4 deletions(-) + +diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c +index ba56f677cc09..dbc6214d69de 100644 +--- a/arch/x86/kvm/svm/sev.c ++++ b/arch/x86/kvm/svm/sev.c +@@ -1668,7 +1668,7 @@ static int sev_es_validate_vmgexit(struct vcpu_svm *svm) + return -EINVAL; + } + +-static void pre_sev_es_run(struct vcpu_svm *svm) ++void sev_es_unmap_ghcb(struct vcpu_svm *svm) + { + if (!svm->ghcb) + return; +@@ -1704,9 +1704,6 @@ void pre_sev_run(struct vcpu_svm *svm, int cpu) + struct svm_cpu_data *sd = per_cpu(svm_data, cpu); + int asid = sev_get_asid(svm->vcpu.kvm); + +- /* Perform any SEV-ES pre-run actions */ +- pre_sev_es_run(svm); +- + /* Assign the asid allocated with this SEV guest */ + svm->asid = asid; + +diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c +index 276d3c728628..5364458cf60b 100644 +--- a/arch/x86/kvm/svm/svm.c ++++ b/arch/x86/kvm/svm/svm.c +@@ -1416,6 +1416,9 @@ static void svm_prepare_guest_switch(struct kvm_vcpu *vcpu) + struct svm_cpu_data *sd = per_cpu(svm_data, vcpu->cpu); + unsigned int i; + ++ if (sev_es_guest(vcpu->kvm)) ++ sev_es_unmap_ghcb(svm); ++ + if (svm->guest_state_loaded) + return; + +diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h +index 39e071fdab0c..98da0b91f273 100644 +--- a/arch/x86/kvm/svm/svm.h ++++ b/arch/x86/kvm/svm/svm.h +@@ -571,6 +571,7 @@ void sev_es_init_vmcb(struct vcpu_svm *svm); + void sev_es_create_vcpu(struct vcpu_svm *svm); + void sev_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector); + void sev_es_prepare_guest_switch(struct vcpu_svm *svm, unsigned int cpu); ++void sev_es_unmap_ghcb(struct vcpu_svm *svm); + + /* vmenter.S */ + +-- +2.30.2 + diff --git a/queue-5.12/kvm-x86-cancel-pvclock_gtod_work-on-module-removal.patch b/queue-5.12/kvm-x86-cancel-pvclock_gtod_work-on-module-removal.patch new file mode 100644 index 00000000000..aca9820f411 --- /dev/null +++ b/queue-5.12/kvm-x86-cancel-pvclock_gtod_work-on-module-removal.patch @@ -0,0 +1,49 @@ +From a2308c248421d7889519c150cc15cd6a031e58b1 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 5 May 2021 23:48:17 +0200 +Subject: KVM: x86: Cancel pvclock_gtod_work on module removal + +From: Thomas Gleixner + +[ Upstream commit 594b27e677b35f9734b1969d175ebc6146741109 ] + +Nothing prevents the following: + + pvclock_gtod_notify() + queue_work(system_long_wq, &pvclock_gtod_work); + ... + remove_module(kvm); + ... + work_queue_run() + pvclock_gtod_work() <- UAF + +Ditto for any other operation on that workqueue list head which touches +pvclock_gtod_work after module removal. + +Cancel the work in kvm_arch_exit() to prevent that. + +Fixes: 16e8d74d2da9 ("KVM: x86: notifier for clocksource changes") +Signed-off-by: Thomas Gleixner +Message-Id: <87czu4onry.ffs@nanos.tec.linutronix.de> +Cc: stable@vger.kernel.org +Signed-off-by: Paolo Bonzini +Signed-off-by: Sasha Levin +--- + arch/x86/kvm/x86.c | 1 + + 1 file changed, 1 insertion(+) + +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c +index 3406ff421c1a..f1fd52eddabf 100644 +--- a/arch/x86/kvm/x86.c ++++ b/arch/x86/kvm/x86.c +@@ -8096,6 +8096,7 @@ void kvm_arch_exit(void) + cpuhp_remove_state_nocalls(CPUHP_AP_X86_KVM_CLK_ONLINE); + #ifdef CONFIG_X86_64 + pvclock_gtod_unregister_notifier(&pvclock_gtod_notifier); ++ cancel_work_sync(&pvclock_gtod_work); + #endif + kvm_x86_ops.hardware_enable = NULL; + kvm_mmu_module_exit(); +-- +2.30.2 + diff --git a/queue-5.12/kvm-x86-prevent-deadlock-against-tk_core.seq.patch b/queue-5.12/kvm-x86-prevent-deadlock-against-tk_core.seq.patch new file mode 100644 index 00000000000..ddaf07b8dff --- /dev/null +++ b/queue-5.12/kvm-x86-prevent-deadlock-against-tk_core.seq.patch @@ -0,0 +1,88 @@ +From 9ca2ad69a5060d82072cca17b078ff80400e0932 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Thu, 6 May 2021 15:21:37 +0200 +Subject: KVM: x86: Prevent deadlock against tk_core.seq + +From: Thomas Gleixner + +[ Upstream commit 3f804f6d201ca93adf4c3df04d1bfd152c1129d6 ] + +syzbot reported a possible deadlock in pvclock_gtod_notify(): + +CPU 0 CPU 1 +write_seqcount_begin(&tk_core.seq); + pvclock_gtod_notify() spin_lock(&pool->lock); + queue_work(..., &pvclock_gtod_work) ktime_get() + spin_lock(&pool->lock); do { + seq = read_seqcount_begin(tk_core.seq) + ... + } while (read_seqcount_retry(&tk_core.seq, seq); + +While this is unlikely to happen, it's possible. + +Delegate queue_work() to irq_work() which postpones it until the +tk_core.seq write held region is left and interrupts are reenabled. + +Fixes: 16e8d74d2da9 ("KVM: x86: notifier for clocksource changes") +Reported-by: syzbot+6beae4000559d41d80f8@syzkaller.appspotmail.com +Signed-off-by: Thomas Gleixner +Message-Id: <87h7jgm1zy.ffs@nanos.tec.linutronix.de> +Signed-off-by: Paolo Bonzini +Signed-off-by: Sasha Levin +--- + arch/x86/kvm/x86.c | 22 ++++++++++++++++++---- + 1 file changed, 18 insertions(+), 4 deletions(-) + +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c +index f1fd52eddabf..c37c23c6b178 100644 +--- a/arch/x86/kvm/x86.c ++++ b/arch/x86/kvm/x86.c +@@ -7964,6 +7964,18 @@ static void pvclock_gtod_update_fn(struct work_struct *work) + + static DECLARE_WORK(pvclock_gtod_work, pvclock_gtod_update_fn); + ++/* ++ * Indirection to move queue_work() out of the tk_core.seq write held ++ * region to prevent possible deadlocks against time accessors which ++ * are invoked with work related locks held. ++ */ ++static void pvclock_irq_work_fn(struct irq_work *w) ++{ ++ queue_work(system_long_wq, &pvclock_gtod_work); ++} ++ ++static DEFINE_IRQ_WORK(pvclock_irq_work, pvclock_irq_work_fn); ++ + /* + * Notification about pvclock gtod data update. + */ +@@ -7975,13 +7987,14 @@ static int pvclock_gtod_notify(struct notifier_block *nb, unsigned long unused, + + update_pvclock_gtod(tk); + +- /* disable master clock if host does not trust, or does not +- * use, TSC based clocksource. ++ /* ++ * Disable master clock if host does not trust, or does not use, ++ * TSC based clocksource. Delegate queue_work() to irq_work as ++ * this is invoked with tk_core.seq write held. + */ + if (!gtod_is_based_on_tsc(gtod->clock.vclock_mode) && + atomic_read(&kvm_guest_has_master_clock) != 0) +- queue_work(system_long_wq, &pvclock_gtod_work); +- ++ irq_work_queue(&pvclock_irq_work); + return 0; + } + +@@ -8096,6 +8109,7 @@ void kvm_arch_exit(void) + cpuhp_remove_state_nocalls(CPUHP_AP_X86_KVM_CLK_ONLINE); + #ifdef CONFIG_X86_64 + pvclock_gtod_unregister_notifier(&pvclock_gtod_notifier); ++ irq_work_sync(&pvclock_irq_work); + cancel_work_sync(&pvclock_gtod_work); + #endif + kvm_x86_ops.hardware_enable = NULL; +-- +2.30.2 + diff --git a/queue-5.12/kyber-fix-out-of-bounds-access-when-preempted.patch b/queue-5.12/kyber-fix-out-of-bounds-access-when-preempted.patch new file mode 100644 index 00000000000..ec3b2f2c533 --- /dev/null +++ b/queue-5.12/kyber-fix-out-of-bounds-access-when-preempted.patch @@ -0,0 +1,162 @@ +From fd0d4af29f568fa69e3c5261eab9eb23cd2fe5b3 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Mon, 10 May 2021 17:05:35 -0700 +Subject: kyber: fix out of bounds access when preempted + +From: Omar Sandoval + +[ Upstream commit efed9a3337e341bd0989161b97453b52567bc59d ] + +__blk_mq_sched_bio_merge() gets the ctx and hctx for the current CPU and +passes the hctx to ->bio_merge(). kyber_bio_merge() then gets the ctx +for the current CPU again and uses that to get the corresponding Kyber +context in the passed hctx. However, the thread may be preempted between +the two calls to blk_mq_get_ctx(), and the ctx returned the second time +may no longer correspond to the passed hctx. This "works" accidentally +most of the time, but it can cause us to read garbage if the second ctx +came from an hctx with more ctx's than the first one (i.e., if +ctx->index_hw[hctx->type] > hctx->nr_ctx). + +This manifested as this UBSAN array index out of bounds error reported +by Jakub: + +UBSAN: array-index-out-of-bounds in ../kernel/locking/qspinlock.c:130:9 +index 13106 is out of range for type 'long unsigned int [128]' +Call Trace: + dump_stack+0xa4/0xe5 + ubsan_epilogue+0x5/0x40 + __ubsan_handle_out_of_bounds.cold.13+0x2a/0x34 + queued_spin_lock_slowpath+0x476/0x480 + do_raw_spin_lock+0x1c2/0x1d0 + kyber_bio_merge+0x112/0x180 + blk_mq_submit_bio+0x1f5/0x1100 + submit_bio_noacct+0x7b0/0x870 + submit_bio+0xc2/0x3a0 + btrfs_map_bio+0x4f0/0x9d0 + btrfs_submit_data_bio+0x24e/0x310 + submit_one_bio+0x7f/0xb0 + submit_extent_page+0xc4/0x440 + __extent_writepage_io+0x2b8/0x5e0 + __extent_writepage+0x28d/0x6e0 + extent_write_cache_pages+0x4d7/0x7a0 + extent_writepages+0xa2/0x110 + do_writepages+0x8f/0x180 + __writeback_single_inode+0x99/0x7f0 + writeback_sb_inodes+0x34e/0x790 + __writeback_inodes_wb+0x9e/0x120 + wb_writeback+0x4d2/0x660 + wb_workfn+0x64d/0xa10 + process_one_work+0x53a/0xa80 + worker_thread+0x69/0x5b0 + kthread+0x20b/0x240 + ret_from_fork+0x1f/0x30 + +Only Kyber uses the hctx, so fix it by passing the request_queue to +->bio_merge() instead. BFQ and mq-deadline just use that, and Kyber can +map the queues itself to avoid the mismatch. + +Fixes: a6088845c2bf ("block: kyber: make kyber more friendly with merging") +Reported-by: Jakub Kicinski +Signed-off-by: Omar Sandoval +Link: https://lore.kernel.org/r/c7598605401a48d5cfeadebb678abd10af22b83f.1620691329.git.osandov@fb.com +Signed-off-by: Jens Axboe +Signed-off-by: Sasha Levin +--- + block/bfq-iosched.c | 3 +-- + block/blk-mq-sched.c | 8 +++++--- + block/kyber-iosched.c | 5 +++-- + block/mq-deadline.c | 3 +-- + include/linux/elevator.h | 2 +- + 5 files changed, 11 insertions(+), 10 deletions(-) + +diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c +index 20ba5db0f61c..bc319931d2b3 100644 +--- a/block/bfq-iosched.c ++++ b/block/bfq-iosched.c +@@ -2263,10 +2263,9 @@ static void bfq_remove_request(struct request_queue *q, + + } + +-static bool bfq_bio_merge(struct blk_mq_hw_ctx *hctx, struct bio *bio, ++static bool bfq_bio_merge(struct request_queue *q, struct bio *bio, + unsigned int nr_segs) + { +- struct request_queue *q = hctx->queue; + struct bfq_data *bfqd = q->elevator->elevator_data; + struct request *free = NULL; + /* +diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c +index e1e997af89a0..fdeb9773b55c 100644 +--- a/block/blk-mq-sched.c ++++ b/block/blk-mq-sched.c +@@ -348,14 +348,16 @@ bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio, + unsigned int nr_segs) + { + struct elevator_queue *e = q->elevator; +- struct blk_mq_ctx *ctx = blk_mq_get_ctx(q); +- struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, bio->bi_opf, ctx); ++ struct blk_mq_ctx *ctx; ++ struct blk_mq_hw_ctx *hctx; + bool ret = false; + enum hctx_type type; + + if (e && e->type->ops.bio_merge) +- return e->type->ops.bio_merge(hctx, bio, nr_segs); ++ return e->type->ops.bio_merge(q, bio, nr_segs); + ++ ctx = blk_mq_get_ctx(q); ++ hctx = blk_mq_map_queue(q, bio->bi_opf, ctx); + type = hctx->type; + if (!(hctx->flags & BLK_MQ_F_SHOULD_MERGE) || + list_empty_careful(&ctx->rq_lists[type])) +diff --git a/block/kyber-iosched.c b/block/kyber-iosched.c +index 33d34d69cade..79b69d7046d6 100644 +--- a/block/kyber-iosched.c ++++ b/block/kyber-iosched.c +@@ -560,11 +560,12 @@ static void kyber_limit_depth(unsigned int op, struct blk_mq_alloc_data *data) + } + } + +-static bool kyber_bio_merge(struct blk_mq_hw_ctx *hctx, struct bio *bio, ++static bool kyber_bio_merge(struct request_queue *q, struct bio *bio, + unsigned int nr_segs) + { ++ struct blk_mq_ctx *ctx = blk_mq_get_ctx(q); ++ struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, bio->bi_opf, ctx); + struct kyber_hctx_data *khd = hctx->sched_data; +- struct blk_mq_ctx *ctx = blk_mq_get_ctx(hctx->queue); + struct kyber_ctx_queue *kcq = &khd->kcqs[ctx->index_hw[hctx->type]]; + unsigned int sched_domain = kyber_sched_domain(bio->bi_opf); + struct list_head *rq_list = &kcq->rq_list[sched_domain]; +diff --git a/block/mq-deadline.c b/block/mq-deadline.c +index f3631a287466..3aabcd2a7893 100644 +--- a/block/mq-deadline.c ++++ b/block/mq-deadline.c +@@ -461,10 +461,9 @@ static int dd_request_merge(struct request_queue *q, struct request **rq, + return ELEVATOR_NO_MERGE; + } + +-static bool dd_bio_merge(struct blk_mq_hw_ctx *hctx, struct bio *bio, ++static bool dd_bio_merge(struct request_queue *q, struct bio *bio, + unsigned int nr_segs) + { +- struct request_queue *q = hctx->queue; + struct deadline_data *dd = q->elevator->elevator_data; + struct request *free = NULL; + bool ret; +diff --git a/include/linux/elevator.h b/include/linux/elevator.h +index 1fe8e105b83b..dcb2f9022c1d 100644 +--- a/include/linux/elevator.h ++++ b/include/linux/elevator.h +@@ -34,7 +34,7 @@ struct elevator_mq_ops { + void (*depth_updated)(struct blk_mq_hw_ctx *); + + bool (*allow_merge)(struct request_queue *, struct request *, struct bio *); +- bool (*bio_merge)(struct blk_mq_hw_ctx *, struct bio *, unsigned int); ++ bool (*bio_merge)(struct request_queue *, struct bio *, unsigned int); + int (*request_merge)(struct request_queue *q, struct request **, struct bio *); + void (*request_merged)(struct request_queue *, struct request *, enum elv_merge); + void (*requests_merged)(struct request_queue *, struct request *, struct request *); +-- +2.30.2 + diff --git a/queue-5.12/nbd-fix-null-pointer-in-flush_workqueue.patch b/queue-5.12/nbd-fix-null-pointer-in-flush_workqueue.patch new file mode 100644 index 00000000000..19b73af3a06 --- /dev/null +++ b/queue-5.12/nbd-fix-null-pointer-in-flush_workqueue.patch @@ -0,0 +1,86 @@ +From 7c71471dfce67f56acd01c619806db50175f7e3c Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 12 May 2021 19:43:30 +0800 +Subject: nbd: Fix NULL pointer in flush_workqueue + +From: Sun Ke + +[ Upstream commit 79ebe9110fa458d58f1fceb078e2068d7ad37390 ] + +Open /dev/nbdX first, the config_refs will be 1 and +the pointers in nbd_device are still null. Disconnect +/dev/nbdX, then reference a null recv_workq. The +protection by config_refs in nbd_genl_disconnect is useless. + +[ 656.366194] BUG: kernel NULL pointer dereference, address: 0000000000000020 +[ 656.368943] #PF: supervisor write access in kernel mode +[ 656.369844] #PF: error_code(0x0002) - not-present page +[ 656.370717] PGD 10cc87067 P4D 10cc87067 PUD 1074b4067 PMD 0 +[ 656.371693] Oops: 0002 [#1] SMP +[ 656.372242] CPU: 5 PID: 7977 Comm: nbd-client Not tainted 5.11.0-rc5-00040-g76c057c84d28 #1 +[ 656.373661] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS ?-20190727_073836-buildvm-ppc64le-16.ppc.fedoraproject.org-3.fc31 04/01/2014 +[ 656.375904] RIP: 0010:mutex_lock+0x29/0x60 +[ 656.376627] Code: 00 0f 1f 44 00 00 55 48 89 fd 48 83 05 6f d7 fe 08 01 e8 7a c3 ff ff 48 83 05 6a d7 fe 08 01 31 c0 65 48 8b 14 25 00 6d 01 00 48 0f b1 55 d +[ 656.378934] RSP: 0018:ffffc900005eb9b0 EFLAGS: 00010246 +[ 656.379350] RAX: 0000000000000000 RBX: 0000000000000000 RCX: 0000000000000000 +[ 656.379915] RDX: ffff888104cf2600 RSI: ffffffffaae8f452 RDI: 0000000000000020 +[ 656.380473] RBP: 0000000000000020 R08: 0000000000000000 R09: ffff88813bd6b318 +[ 656.381039] R10: 00000000000000c7 R11: fefefefefefefeff R12: ffff888102710b40 +[ 656.381599] R13: ffffc900005eb9e0 R14: ffffffffb2930680 R15: ffff88810770ef00 +[ 656.382166] FS: 00007fdf117ebb40(0000) GS:ffff88813bd40000(0000) knlGS:0000000000000000 +[ 656.382806] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 +[ 656.383261] CR2: 0000000000000020 CR3: 0000000100c84000 CR4: 00000000000006e0 +[ 656.383819] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 +[ 656.384370] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400 +[ 656.384927] Call Trace: +[ 656.385111] flush_workqueue+0x92/0x6c0 +[ 656.385395] nbd_disconnect_and_put+0x81/0xd0 +[ 656.385716] nbd_genl_disconnect+0x125/0x2a0 +[ 656.386034] genl_family_rcv_msg_doit.isra.0+0x102/0x1b0 +[ 656.386422] genl_rcv_msg+0xfc/0x2b0 +[ 656.386685] ? nbd_ioctl+0x490/0x490 +[ 656.386954] ? genl_family_rcv_msg_doit.isra.0+0x1b0/0x1b0 +[ 656.387354] netlink_rcv_skb+0x62/0x180 +[ 656.387638] genl_rcv+0x34/0x60 +[ 656.387874] netlink_unicast+0x26d/0x590 +[ 656.388162] netlink_sendmsg+0x398/0x6c0 +[ 656.388451] ? netlink_rcv_skb+0x180/0x180 +[ 656.388750] ____sys_sendmsg+0x1da/0x320 +[ 656.389038] ? ____sys_recvmsg+0x130/0x220 +[ 656.389334] ___sys_sendmsg+0x8e/0xf0 +[ 656.389605] ? ___sys_recvmsg+0xa2/0xf0 +[ 656.389889] ? handle_mm_fault+0x1671/0x21d0 +[ 656.390201] __sys_sendmsg+0x6d/0xe0 +[ 656.390464] __x64_sys_sendmsg+0x23/0x30 +[ 656.390751] do_syscall_64+0x45/0x70 +[ 656.391017] entry_SYSCALL_64_after_hwframe+0x44/0xa9 + +To fix it, just add if (nbd->recv_workq) to nbd_disconnect_and_put(). + +Fixes: e9e006f5fcf2 ("nbd: fix max number of supported devs") +Signed-off-by: Sun Ke +Reviewed-by: Josef Bacik +Link: https://lore.kernel.org/r/20210512114331.1233964-2-sunke32@huawei.com +Signed-off-by: Jens Axboe +Signed-off-by: Sasha Levin +--- + drivers/block/nbd.c | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c +index 4ff71b579cfc..974da561b8e5 100644 +--- a/drivers/block/nbd.c ++++ b/drivers/block/nbd.c +@@ -1980,7 +1980,8 @@ static void nbd_disconnect_and_put(struct nbd_device *nbd) + * config ref and try to destroy the workqueue from inside the work + * queue. + */ +- flush_workqueue(nbd->recv_workq); ++ if (nbd->recv_workq) ++ flush_workqueue(nbd->recv_workq); + if (test_and_clear_bit(NBD_RT_HAS_CONFIG_REF, + &nbd->config->runtime_flags)) + nbd_config_put(nbd); +-- +2.30.2 + diff --git a/queue-5.12/nvmet-fix-inline-bio-check-for-bdev-ns.patch b/queue-5.12/nvmet-fix-inline-bio-check-for-bdev-ns.patch new file mode 100644 index 00000000000..00bdf19a449 --- /dev/null +++ b/queue-5.12/nvmet-fix-inline-bio-check-for-bdev-ns.patch @@ -0,0 +1,82 @@ +From 9153b0bf3e5d8937525be28c81174ffba6d56967 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Thu, 6 May 2021 18:51:35 -0700 +Subject: nvmet: fix inline bio check for bdev-ns + +From: Chaitanya Kulkarni + +[ Upstream commit 608a969046e6e0567d05a166be66c77d2dd8220b ] + +When handling rw commands, for inline bio case we only consider +transfer size. This works well when req->sg_cnt fits into the +req->inline_bvec, but it will result in the warning in +__bio_add_page() when req->sg_cnt > NVMET_MAX_INLINE_BVEC. + +Consider an I/O size 32768 and first page is not aligned to the page +boundary, then I/O is split in following manner :- + +[ 2206.256140] nvmet: sg->length 3440 sg->offset 656 +[ 2206.256144] nvmet: sg->length 4096 sg->offset 0 +[ 2206.256148] nvmet: sg->length 4096 sg->offset 0 +[ 2206.256152] nvmet: sg->length 4096 sg->offset 0 +[ 2206.256155] nvmet: sg->length 4096 sg->offset 0 +[ 2206.256159] nvmet: sg->length 4096 sg->offset 0 +[ 2206.256163] nvmet: sg->length 4096 sg->offset 0 +[ 2206.256166] nvmet: sg->length 4096 sg->offset 0 +[ 2206.256170] nvmet: sg->length 656 sg->offset 0 + +Now the req->transfer_size == NVMET_MAX_INLINE_DATA_LEN i.e. 32768, but +the req->sg_cnt is (9) > NVMET_MAX_INLINE_BIOVEC which is (8). +This will result in the following warning message :- + +nvmet_bdev_execute_rw() + bio_add_page() + __bio_add_page() + WARN_ON_ONCE(bio_full(bio, len)); + +This scenario is very hard to reproduce on the nvme-loop transport only +with rw commands issued with the passthru IOCTL interface from the host +application and the data buffer is allocated with the malloc() and not +the posix_memalign(). + +Fixes: 73383adfad24 ("nvmet: don't split large I/Os unconditionally") +Signed-off-by: Chaitanya Kulkarni +Reviewed-by: Sagi Grimberg +Signed-off-by: Christoph Hellwig +Signed-off-by: Sasha Levin +--- + drivers/nvme/target/io-cmd-bdev.c | 2 +- + drivers/nvme/target/nvmet.h | 6 ++++++ + 2 files changed, 7 insertions(+), 1 deletion(-) + +diff --git a/drivers/nvme/target/io-cmd-bdev.c b/drivers/nvme/target/io-cmd-bdev.c +index 9a8b3726a37c..429263ca9b97 100644 +--- a/drivers/nvme/target/io-cmd-bdev.c ++++ b/drivers/nvme/target/io-cmd-bdev.c +@@ -258,7 +258,7 @@ static void nvmet_bdev_execute_rw(struct nvmet_req *req) + + sector = nvmet_lba_to_sect(req->ns, req->cmd->rw.slba); + +- if (req->transfer_len <= NVMET_MAX_INLINE_DATA_LEN) { ++ if (nvmet_use_inline_bvec(req)) { + bio = &req->b.inline_bio; + bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec)); + } else { +diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h +index 4b84edb49f22..5aad34b106dc 100644 +--- a/drivers/nvme/target/nvmet.h ++++ b/drivers/nvme/target/nvmet.h +@@ -614,4 +614,10 @@ static inline sector_t nvmet_lba_to_sect(struct nvmet_ns *ns, __le64 lba) + return le64_to_cpu(lba) << (ns->blksize_shift - SECTOR_SHIFT); + } + ++static inline bool nvmet_use_inline_bvec(struct nvmet_req *req) ++{ ++ return req->transfer_len <= NVMET_MAX_INLINE_DATA_LEN && ++ req->sg_cnt <= NVMET_MAX_INLINE_BIOVEC; ++} ++ + #endif /* _NVMET_H */ +-- +2.30.2 + diff --git a/queue-5.12/nvmet-fix-inline-bio-check-for-passthru.patch b/queue-5.12/nvmet-fix-inline-bio-check-for-passthru.patch new file mode 100644 index 00000000000..5861283a53b --- /dev/null +++ b/queue-5.12/nvmet-fix-inline-bio-check-for-passthru.patch @@ -0,0 +1,66 @@ +From dfca58aa1a9a29d48eb700226aa363dffacd872a Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Thu, 6 May 2021 18:51:36 -0700 +Subject: nvmet: fix inline bio check for passthru + +From: Chaitanya Kulkarni + +[ Upstream commit ab96de5def854d8fc51280b6a20597e64b14ac31 ] + +When handling passthru commands, for inline bio allocation we only +consider the transfer size. This works well when req->sg_cnt fits into +the req->inline_bvec, but it will result in the early return from +bio_add_hw_page() when req->sg_cnt > NVMET_MAX_INLINE_BVEC. + +Consider an I/O of size 32768 and first buffer is not aligned to the +page boundary, then I/O is split in following manner :- + +[ 2206.256140] nvmet: sg->length 3440 sg->offset 656 +[ 2206.256144] nvmet: sg->length 4096 sg->offset 0 +[ 2206.256148] nvmet: sg->length 4096 sg->offset 0 +[ 2206.256152] nvmet: sg->length 4096 sg->offset 0 +[ 2206.256155] nvmet: sg->length 4096 sg->offset 0 +[ 2206.256159] nvmet: sg->length 4096 sg->offset 0 +[ 2206.256163] nvmet: sg->length 4096 sg->offset 0 +[ 2206.256166] nvmet: sg->length 4096 sg->offset 0 +[ 2206.256170] nvmet: sg->length 656 sg->offset 0 + +Now the req->transfer_size == NVMET_MAX_INLINE_DATA_LEN i.e. 32768, but +the req->sg_cnt is (9) > NVMET_MAX_INLINE_BIOVEC which is (8). +This will result in early return in the following code path :- + +nvmet_bdev_execute_rw() + bio_add_pc_page() + bio_add_hw_page() + if (bio_full(bio, len)) + return 0; + +Use previously introduced helper nvmet_use_inline_bvec() to consider +req->sg_cnt when using inline bio. This only affects nvme-loop +transport. + +Fixes: dab3902b19a0 ("nvmet: use inline bio for passthru fast path") +Signed-off-by: Chaitanya Kulkarni +Reviewed-by: Sagi Grimberg +Signed-off-by: Christoph Hellwig +Signed-off-by: Sasha Levin +--- + drivers/nvme/target/passthru.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/drivers/nvme/target/passthru.c b/drivers/nvme/target/passthru.c +index 2798944899b7..39b1473f7204 100644 +--- a/drivers/nvme/target/passthru.c ++++ b/drivers/nvme/target/passthru.c +@@ -194,7 +194,7 @@ static int nvmet_passthru_map_sg(struct nvmet_req *req, struct request *rq) + if (req->sg_cnt > BIO_MAX_VECS) + return -EINVAL; + +- if (req->transfer_len <= NVMET_MAX_INLINE_DATA_LEN) { ++ if (nvmet_use_inline_bvec(req)) { + bio = &req->p.inline_bio; + bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec)); + } else { +-- +2.30.2 + diff --git a/queue-5.12/nvmet-rdma-fix-null-deref-when-send-is-completed-wit.patch b/queue-5.12/nvmet-rdma-fix-null-deref-when-send-is-completed-wit.patch new file mode 100644 index 00000000000..820fd9b634b --- /dev/null +++ b/queue-5.12/nvmet-rdma-fix-null-deref-when-send-is-completed-wit.patch @@ -0,0 +1,82 @@ +From a9010fc919e6d922cc04c6ede4e4a347ad87eb55 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Thu, 6 May 2021 10:08:19 +0300 +Subject: nvmet-rdma: Fix NULL deref when SEND is completed with error + +From: Michal Kalderon + +[ Upstream commit 8cc365f9559b86802afc0208389f5c8d46b4ad61 ] + +When running some traffic and taking down the link on peer, a +retry counter exceeded error is received. This leads to +nvmet_rdma_error_comp which tried accessing the cq_context to +obtain the queue. The cq_context is no longer valid after the +fix to use shared CQ mechanism and should be obtained similar +to how it is obtained in other functions from the wc->qp. + +[ 905.786331] nvmet_rdma: SEND for CQE 0x00000000e3337f90 failed with status transport retry counter exceeded (12). +[ 905.832048] BUG: unable to handle kernel NULL pointer dereference at 0000000000000048 +[ 905.839919] PGD 0 P4D 0 +[ 905.842464] Oops: 0000 1 SMP NOPTI +[ 905.846144] CPU: 13 PID: 1557 Comm: kworker/13:1H Kdump: loaded Tainted: G OE --------- - - 4.18.0-304.el8.x86_64 #1 +[ 905.872135] RIP: 0010:nvmet_rdma_error_comp+0x5/0x1b [nvmet_rdma] +[ 905.878259] Code: 19 4f c0 e8 89 b3 a5 f6 e9 5b e0 ff ff 0f b7 75 14 4c 89 ea 48 c7 c7 08 1a 4f c0 e8 71 b3 a5 f6 e9 4b e0 ff ff 0f 1f 44 00 00 <48> 8b 47 48 48 85 c0 74 08 48 89 c7 e9 98 bf 49 00 e9 c3 e3 ff ff +[ 905.897135] RSP: 0018:ffffab601c45fe28 EFLAGS: 00010246 +[ 905.902387] RAX: 0000000000000065 RBX: ffff9e729ea2f800 RCX: 0000000000000000 +[ 905.909558] RDX: 0000000000000000 RSI: ffff9e72df9567c8 RDI: 0000000000000000 +[ 905.916731] RBP: ffff9e729ea2b400 R08: 000000000000074d R09: 0000000000000074 +[ 905.923903] R10: 0000000000000000 R11: ffffab601c45fcc0 R12: 0000000000000010 +[ 905.931074] R13: 0000000000000000 R14: 0000000000000010 R15: ffff9e729ea2f400 +[ 905.938247] FS: 0000000000000000(0000) GS:ffff9e72df940000(0000) knlGS:0000000000000000 +[ 905.938249] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 +[ 905.950067] nvmet_rdma: SEND for CQE 0x00000000c7356cca failed with status transport retry counter exceeded (12). +[ 905.961855] CR2: 0000000000000048 CR3: 000000678d010004 CR4: 00000000007706e0 +[ 905.961855] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 +[ 905.961856] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400 +[ 905.961857] PKRU: 55555554 +[ 906.010315] Call Trace: +[ 906.012778] __ib_process_cq+0x89/0x170 [ib_core] +[ 906.017509] ib_cq_poll_work+0x26/0x80 [ib_core] +[ 906.022152] process_one_work+0x1a7/0x360 +[ 906.026182] ? create_worker+0x1a0/0x1a0 +[ 906.030123] worker_thread+0x30/0x390 +[ 906.033802] ? create_worker+0x1a0/0x1a0 +[ 906.037744] kthread+0x116/0x130 +[ 906.040988] ? kthread_flush_work_fn+0x10/0x10 +[ 906.045456] ret_from_fork+0x1f/0x40 + +Fixes: ca0f1a8055be2 ("nvmet-rdma: use new shared CQ mechanism") +Signed-off-by: Shai Malin +Signed-off-by: Michal Kalderon +Reviewed-by: Sagi Grimberg +Signed-off-by: Christoph Hellwig +Signed-off-by: Sasha Levin +--- + drivers/nvme/target/rdma.c | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c +index 6c1f3ab7649c..7d607f435e36 100644 +--- a/drivers/nvme/target/rdma.c ++++ b/drivers/nvme/target/rdma.c +@@ -700,7 +700,7 @@ static void nvmet_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc) + { + struct nvmet_rdma_rsp *rsp = + container_of(wc->wr_cqe, struct nvmet_rdma_rsp, send_cqe); +- struct nvmet_rdma_queue *queue = cq->cq_context; ++ struct nvmet_rdma_queue *queue = wc->qp->qp_context; + + nvmet_rdma_release_rsp(rsp); + +@@ -786,7 +786,7 @@ static void nvmet_rdma_write_data_done(struct ib_cq *cq, struct ib_wc *wc) + { + struct nvmet_rdma_rsp *rsp = + container_of(wc->wr_cqe, struct nvmet_rdma_rsp, write_cqe); +- struct nvmet_rdma_queue *queue = cq->cq_context; ++ struct nvmet_rdma_queue *queue = wc->qp->qp_context; + struct rdma_cm_id *cm_id = rsp->queue->cm_id; + u16 status; + +-- +2.30.2 + diff --git a/queue-5.12/perf-tools-fix-dynamic-libbpf-link.patch b/queue-5.12/perf-tools-fix-dynamic-libbpf-link.patch new file mode 100644 index 00000000000..fcc3b8759fa --- /dev/null +++ b/queue-5.12/perf-tools-fix-dynamic-libbpf-link.patch @@ -0,0 +1,74 @@ +From 3fb6f9fb878f94600b61b2ee095e22a6c3609f32 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Sat, 8 May 2021 22:50:20 +0200 +Subject: perf tools: Fix dynamic libbpf link + +From: Jiri Olsa + +[ Upstream commit ad1237c30d975535a669746496cbed136aa5a045 ] + +Justin reported broken build with LIBBPF_DYNAMIC=1. + +When linking libbpf dynamically we need to use perf's +hashmap object, because it's not exported in libbpf.so +(only in libbpf.a). + +Following build is now passing: + + $ make LIBBPF_DYNAMIC=1 + BUILD: Doing 'make -j8' parallel build + ... + $ ldd perf | grep libbpf + libbpf.so.0 => /lib64/libbpf.so.0 (0x00007fa7630db000) + +Fixes: eee19501926d ("perf tools: Grab a copy of libbpf's hashmap") +Reported-by: Justin M. Forbes +Signed-off-by: Jiri Olsa +Cc: Alexander Shishkin +Cc: Ian Rogers +Cc: Mark Rutland +Cc: Michael Petlan +Cc: Namhyung Kim +Cc: Peter Zijlstra +Link: http://lore.kernel.org/lkml/20210508205020.617984-1-jolsa@kernel.org +Signed-off-by: Arnaldo Carvalho de Melo +Signed-off-by: Sasha Levin +--- + tools/perf/Makefile.config | 1 + + tools/perf/util/Build | 7 +++++++ + 2 files changed, 8 insertions(+) + +diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config +index d8e59d31399a..c955cd683e22 100644 +--- a/tools/perf/Makefile.config ++++ b/tools/perf/Makefile.config +@@ -530,6 +530,7 @@ ifndef NO_LIBELF + ifdef LIBBPF_DYNAMIC + ifeq ($(feature-libbpf), 1) + EXTLIBS += -lbpf ++ $(call detected,CONFIG_LIBBPF_DYNAMIC) + else + dummy := $(error Error: No libbpf devel library found, please install libbpf-devel); + endif +diff --git a/tools/perf/util/Build b/tools/perf/util/Build +index e3e12f9d4733..5a296ac69415 100644 +--- a/tools/perf/util/Build ++++ b/tools/perf/util/Build +@@ -141,7 +141,14 @@ perf-$(CONFIG_LIBELF) += symbol-elf.o + perf-$(CONFIG_LIBELF) += probe-file.o + perf-$(CONFIG_LIBELF) += probe-event.o + ++ifdef CONFIG_LIBBPF_DYNAMIC ++ hashmap := 1 ++endif + ifndef CONFIG_LIBBPF ++ hashmap := 1 ++endif ++ ++ifdef hashmap + perf-y += hashmap.o + endif + +-- +2.30.2 + diff --git a/queue-5.12/powerpc-64s-make-nmi-record-implicitly-soft-masked-c.patch b/queue-5.12/powerpc-64s-make-nmi-record-implicitly-soft-masked-c.patch new file mode 100644 index 00000000000..a5345dae5d2 --- /dev/null +++ b/queue-5.12/powerpc-64s-make-nmi-record-implicitly-soft-masked-c.patch @@ -0,0 +1,98 @@ +From 9e0a6621e357b4680d1b0efee386001a3041702b Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Mon, 3 May 2021 21:17:08 +1000 +Subject: powerpc/64s: Make NMI record implicitly soft-masked code as irqs + disabled +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +From: Nicholas Piggin + +[ Upstream commit 4ec5feec1ad029bdf7d49bc50ccc0c195eeabe93 ] + +scv support introduced the notion of code that implicitly soft-masks +irqs due to the instruction addresses. This is required because scv +enters the kernel with MSR[EE]=1. + +If a NMI (including soft-NMI) interrupt hits when we are implicitly +soft-masked then its regs->softe does not reflect this because it is +derived from the explicit soft mask state (paca->irq_soft_mask). This +makes arch_irq_disabled_regs(regs) return false. + +This can trigger a warning in the soft-NMI watchdog code (shown below). +Fix it by having NMI interrupts set regs->softe to disabled in case of +interrupting an implicit soft-masked region. + + ------------[ cut here ]------------ + WARNING: CPU: 41 PID: 1103 at arch/powerpc/kernel/watchdog.c:259 soft_nmi_interrupt+0x3e4/0x5f0 + CPU: 41 PID: 1103 Comm: (spawn) Not tainted + NIP: c000000000039534 LR: c000000000039234 CTR: c000000000009a00 + REGS: c000007fffbcf940 TRAP: 0700 Not tainted + MSR: 9000000000021033 CR: 22042482 XER: 200400ad + CFAR: c000000000039260 IRQMASK: 3 + GPR00: c000000000039204 c000007fffbcfbe0 c000000001d6c300 0000000000000003 + GPR04: 00007ffffa45d078 0000000000000000 0000000000000008 0000000000000020 + GPR08: 0000007ffd4e0000 0000000000000000 c000007ffffceb00 7265677368657265 + GPR12: 9000000000009033 c000007ffffceb00 00000f7075bf4480 000000000000002a + GPR16: 00000f705745a528 00007ffffa45ddd8 00000f70574d0008 0000000000000000 + GPR20: 00000f7075c58d70 00000f7057459c38 0000000000000001 0000000000000040 + GPR24: 0000000000000000 0000000000000029 c000000001dae058 0000000000000029 + GPR28: 0000000000000000 0000000000000800 0000000000000009 c000007fffbcfd60 + NIP [c000000000039534] soft_nmi_interrupt+0x3e4/0x5f0 + LR [c000000000039234] soft_nmi_interrupt+0xe4/0x5f0 + Call Trace: + [c000007fffbcfbe0] [c000000000039204] soft_nmi_interrupt+0xb4/0x5f0 (unreliable) + [c000007fffbcfcf0] [c00000000000c0e8] soft_nmi_common+0x138/0x1c4 + --- interrupt: 900 at end_real_trampolines+0x0/0x1000 + NIP: c000000000003000 LR: 00007ca426adb03c CTR: 900000000280f033 + REGS: c000007fffbcfd60 TRAP: 0900 + MSR: 9000000000009033 CR: 44042482 XER: 200400ad + CFAR: 00007ca426946020 IRQMASK: 0 + GPR00: 00000000000000ad 00007ffffa45d050 00007ca426b07f00 0000000000000035 + GPR04: 00007ffffa45d078 0000000000000000 0000000000000008 0000000000000020 + GPR08: 0000000000000000 0000000000100000 0000000010000000 00007ffffa45d110 + GPR12: 0000000000000001 00007ca426d4e680 00000f7075bf4480 000000000000002a + GPR16: 00000f705745a528 00007ffffa45ddd8 00000f70574d0008 0000000000000000 + GPR20: 00000f7075c58d70 00000f7057459c38 0000000000000001 0000000000000040 + GPR24: 0000000000000000 00000f7057473f68 0000000000000003 000000000000041b + GPR28: 00007ffffa45d4c4 0000000000000035 0000000000000000 00000f7057473f68 + NIP [c000000000003000] end_real_trampolines+0x0/0x1000 + LR [00007ca426adb03c] 0x7ca426adb03c + --- interrupt: 900 + Instruction dump: + 60000000 60000000 60420000 38600001 482b3ae5 60000000 e93f0138 a36d0008 + 7daa6b78 71290001 7f7907b4 4082fd34 <0fe00000> 4bfffd2c 60420000 ea6100a8 + ---[ end trace dc75f67d819779da ]--- + +Fixes: 118178e62e2e ("powerpc: move NMI entry/exit code into wrapper") +Reported-by: Cédric Le Goater +Signed-off-by: Nicholas Piggin +Signed-off-by: Michael Ellerman +Link: https://lore.kernel.org/r/20210503111708.758261-1-npiggin@gmail.com +Signed-off-by: Sasha Levin +--- + arch/powerpc/include/asm/interrupt.h | 7 +++++++ + 1 file changed, 7 insertions(+) + +diff --git a/arch/powerpc/include/asm/interrupt.h b/arch/powerpc/include/asm/interrupt.h +index e8d09a841373..31ed5356590a 100644 +--- a/arch/powerpc/include/asm/interrupt.h ++++ b/arch/powerpc/include/asm/interrupt.h +@@ -138,6 +138,13 @@ static inline void interrupt_nmi_enter_prepare(struct pt_regs *regs, struct inte + local_paca->irq_soft_mask = IRQS_ALL_DISABLED; + local_paca->irq_happened |= PACA_IRQ_HARD_DIS; + ++ if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) && !(regs->msr & MSR_PR) && ++ regs->nip < (unsigned long)__end_interrupts) { ++ // Kernel code running below __end_interrupts is ++ // implicitly soft-masked. ++ regs->softe = IRQS_ALL_DISABLED; ++ } ++ + /* Don't do any per-CPU operations until interrupt state is fixed */ + #endif + /* Allow DEC and PMI to be traced when they are soft-NMI */ +-- +2.30.2 + diff --git a/queue-5.12/sched-fair-fix-clearing-of-has_idle_cores-flag-in-se.patch b/queue-5.12/sched-fair-fix-clearing-of-has_idle_cores-flag-in-se.patch new file mode 100644 index 00000000000..3325576ed28 --- /dev/null +++ b/queue-5.12/sched-fair-fix-clearing-of-has_idle_cores-flag-in-se.patch @@ -0,0 +1,50 @@ +From e2f4ffbf0f83461eb695addb16e3f85baab47a2a Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Tue, 11 May 2021 20:46:09 +0530 +Subject: sched/fair: Fix clearing of has_idle_cores flag in select_idle_cpu() + +From: Gautham R. Shenoy + +[ Upstream commit 02dbb7246c5bbbbe1607ebdc546ba5c454a664b1 ] + +In commit: + + 9fe1f127b913 ("sched/fair: Merge select_idle_core/cpu()") + +in select_idle_cpu(), we check if an idle core is present in the LLC +of the target CPU via the flag "has_idle_cores". We look for the idle +core in select_idle_cores(). If select_idle_cores() isn't able to find +an idle core/CPU, we need to unset the has_idle_cores flag in the LLC +of the target to prevent other CPUs from going down this route. + +However, the current code is unsetting it in the LLC of the current +CPU instead of the target CPU. This patch fixes this issue. + +Fixes: 9fe1f127b913 ("sched/fair: Merge select_idle_core/cpu()") +Signed-off-by: Gautham R. Shenoy +Signed-off-by: Ingo Molnar +Reviewed-by: Vincent Guittot +Reviewed-by: Srikar Dronamraju +Acked-by: Mel Gorman +Link: https://lore.kernel.org/r/1620746169-13996-1-git-send-email-ego@linux.vnet.ibm.com +Signed-off-by: Sasha Levin +--- + kernel/sched/fair.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c +index edcabae5c658..a073a839cd06 100644 +--- a/kernel/sched/fair.c ++++ b/kernel/sched/fair.c +@@ -6212,7 +6212,7 @@ static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, bool + } + + if (has_idle_core) +- set_idle_cores(this, false); ++ set_idle_cores(target, false); + + if (sched_feat(SIS_PROP) && !has_idle_core) { + time = cpu_clock(this) - time; +-- +2.30.2 + diff --git a/queue-5.12/series b/queue-5.12/series index d3142cd49ec..150aeedc6ee 100644 --- a/queue-5.12/series +++ b/queue-5.12/series @@ -271,3 +271,37 @@ drm-amd-display-initialize-attribute-for-hdcp_srm-sysfs-file.patch drm-i915-avoid-div-by-zero-on-gen2.patch drm-i915-dp-use-slow-and-wide-link-training-for-everything.patch kvm-exit-halt-polling-on-need_resched-as-well.patch +drm-msm-fix-llc-not-being-enabled-for-mmu500-targets.patch +kvm-lapic-accurately-guarantee-busy-wait-for-timer-t.patch +drm-msm-dp-initialize-audio_comp-when-audio-starts.patch +kvm-x86-cancel-pvclock_gtod_work-on-module-removal.patch +kvm-x86-prevent-deadlock-against-tk_core.seq.patch +kvm-svm-move-ghcb-unmapping-to-fix-rcu-warning.patch +dax-add-an-enum-for-specifying-dax-wakup-mode.patch +dax-add-a-wakeup-mode-parameter-to-put_unlocked_entr.patch +dax-wake-up-all-waiters-after-invalidating-dax-entry.patch +xen-unpopulated-alloc-fix-error-return-code-in-fill_.patch +perf-tools-fix-dynamic-libbpf-link.patch +usb-dwc3-gadget-free-gadget-structure-only-after-fre.patch +iio-light-gp2ap002-fix-rumtime-pm-imbalance-on-error.patch +iio-proximity-pulsedlight-fix-rumtime-pm-imbalance-o.patch +iio-hid-sensors-select-iio_triggered_buffer-under-hi.patch +iio-core-return-enodev-if-ioctl-is-unknown.patch +usb-fotg210-hcd-fix-an-error-message.patch +hwmon-occ-fix-poll-rate-limiting.patch +usb-typec-tcpm-fix-wrong-handling-for-not_supported-.patch +usb-musb-fix-an-error-message.patch +hwmon-ltc2992-put-fwnode-in-error-case-during-probe.patch +acpi-scan-fix-a-memory-leak-in-an-error-handling-pat.patch +kyber-fix-out-of-bounds-access-when-preempted.patch +nvmet-fix-inline-bio-check-for-bdev-ns.patch +nvmet-fix-inline-bio-check-for-passthru.patch +nvmet-rdma-fix-null-deref-when-send-is-completed-wit.patch +f2fs-compress-fix-to-free-compress-page-correctly.patch +f2fs-compress-fix-race-condition-of-overwrite-vs-tru.patch +f2fs-compress-fix-to-assign-cc.cluster_idx-correctly.patch +sched-fair-fix-clearing-of-has_idle_cores-flag-in-se.patch +nbd-fix-null-pointer-in-flush_workqueue.patch +powerpc-64s-make-nmi-record-implicitly-soft-masked-c.patch +blk-mq-plug-request-for-shared-sbitmap.patch +blk-mq-swap-two-calls-in-blk_mq_exit_queue.patch diff --git a/queue-5.12/usb-dwc3-gadget-free-gadget-structure-only-after-fre.patch b/queue-5.12/usb-dwc3-gadget-free-gadget-structure-only-after-fre.patch new file mode 100644 index 00000000000..b5d0621bd84 --- /dev/null +++ b/queue-5.12/usb-dwc3-gadget-free-gadget-structure-only-after-fre.patch @@ -0,0 +1,64 @@ +From 1e0a3389f4a543cb56e5ce687338c642de0e815a Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Sat, 1 May 2021 02:35:58 -0700 +Subject: usb: dwc3: gadget: Free gadget structure only after freeing endpoints + +From: Jack Pham + +[ Upstream commit bb9c74a5bd1462499fe5ccb1e3c5ac40dcfa9139 ] + +As part of commit e81a7018d93a ("usb: dwc3: allocate gadget structure +dynamically") the dwc3_gadget_release() was added which will free +the dwc->gadget structure upon the device's removal when +usb_del_gadget_udc() is called in dwc3_gadget_exit(). + +However, simply freeing the gadget results a dangling pointer +situation: the endpoints created in dwc3_gadget_init_endpoints() +have their dep->endpoint.ep_list members chained off the list_head +anchored at dwc->gadget->ep_list. Thus when dwc->gadget is freed, +the first dwc3_ep in the list now has a dangling prev pointer and +likewise for the next pointer of the dwc3_ep at the tail of the list. +The dwc3_gadget_free_endpoints() that follows will result in a +use-after-free when it calls list_del(). + +This was caught by enabling KASAN and performing a driver unbind. +The recent commit 568262bf5492 ("usb: dwc3: core: Add shutdown +callback for dwc3") also exposes this as a panic during shutdown. + +There are a few possibilities to fix this. One could be to perform +a list_del() of the gadget->ep_list itself which removes it from +the rest of the dwc3_ep chain. + +Another approach is what this patch does, by splitting up the +usb_del_gadget_udc() call into its separate "del" and "put" +components. This allows dwc3_gadget_free_endpoints() to be +called before the gadget is finally freed with usb_put_gadget(). + +Fixes: e81a7018d93a ("usb: dwc3: allocate gadget structure dynamically") +Reviewed-by: Peter Chen +Signed-off-by: Jack Pham +Link: https://lore.kernel.org/r/20210501093558.7375-1-jackp@codeaurora.org +Signed-off-by: Greg Kroah-Hartman +Signed-off-by: Sasha Levin +--- + drivers/usb/dwc3/gadget.c | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c +index f85eda6bc988..5fd27160c336 100644 +--- a/drivers/usb/dwc3/gadget.c ++++ b/drivers/usb/dwc3/gadget.c +@@ -4024,8 +4024,9 @@ int dwc3_gadget_init(struct dwc3 *dwc) + + void dwc3_gadget_exit(struct dwc3 *dwc) + { +- usb_del_gadget_udc(dwc->gadget); ++ usb_del_gadget(dwc->gadget); + dwc3_gadget_free_endpoints(dwc); ++ usb_put_gadget(dwc->gadget); + dma_free_coherent(dwc->sysdev, DWC3_BOUNCE_SIZE, dwc->bounce, + dwc->bounce_addr); + kfree(dwc->setup_buf); +-- +2.30.2 + diff --git a/queue-5.12/usb-fotg210-hcd-fix-an-error-message.patch b/queue-5.12/usb-fotg210-hcd-fix-an-error-message.patch new file mode 100644 index 00000000000..6f787acb83e --- /dev/null +++ b/queue-5.12/usb-fotg210-hcd-fix-an-error-message.patch @@ -0,0 +1,53 @@ +From e495e39711718aefb50d7ee6a8d2058a3f316305 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Thu, 6 May 2021 22:39:10 +0200 +Subject: usb: fotg210-hcd: Fix an error message + +From: Christophe JAILLET + +[ Upstream commit a60a34366e0d09ca002c966dd7c43a68c28b1f82 ] + +'retval' is known to be -ENODEV here. +This is a hard-coded default error code which is not useful in the error +message. Moreover, another error message is printed at the end of the +error handling path. The corresponding error code (-ENOMEM) is more +informative. + +So remove simplify the first error message. + +While at it, also remove the useless initialization of 'retval'. + +Fixes: 7d50195f6c50 ("usb: host: Faraday fotg210-hcd driver") +Signed-off-by: Christophe JAILLET +Link: https://lore.kernel.org/r/94531bcff98e46d4f9c20183a90b7f47f699126c.1620333419.git.christophe.jaillet@wanadoo.fr +Signed-off-by: Greg Kroah-Hartman +Signed-off-by: Sasha Levin +--- + drivers/usb/host/fotg210-hcd.c | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +diff --git a/drivers/usb/host/fotg210-hcd.c b/drivers/usb/host/fotg210-hcd.c +index 5617ef30530a..f0e4a315cc81 100644 +--- a/drivers/usb/host/fotg210-hcd.c ++++ b/drivers/usb/host/fotg210-hcd.c +@@ -5568,7 +5568,7 @@ static int fotg210_hcd_probe(struct platform_device *pdev) + struct usb_hcd *hcd; + struct resource *res; + int irq; +- int retval = -ENODEV; ++ int retval; + struct fotg210_hcd *fotg210; + + if (usb_disabled()) +@@ -5588,7 +5588,7 @@ static int fotg210_hcd_probe(struct platform_device *pdev) + hcd = usb_create_hcd(&fotg210_fotg210_hc_driver, dev, + dev_name(dev)); + if (!hcd) { +- dev_err(dev, "failed to create hcd with err %d\n", retval); ++ dev_err(dev, "failed to create hcd\n"); + retval = -ENOMEM; + goto fail_create_hcd; + } +-- +2.30.2 + diff --git a/queue-5.12/usb-musb-fix-an-error-message.patch b/queue-5.12/usb-musb-fix-an-error-message.patch new file mode 100644 index 00000000000..f5468fcd836 --- /dev/null +++ b/queue-5.12/usb-musb-fix-an-error-message.patch @@ -0,0 +1,38 @@ +From 036a6ab315da0efc0936b8ee1b0be4e088515f7e Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Tue, 4 May 2021 22:26:29 +0200 +Subject: usb: musb: Fix an error message + +From: Christophe JAILLET + +[ Upstream commit d9ff1096a840dddea3d5cfa2149ff7da9f499fb2 ] + +'ret' is known to be 0 here. +Initialize 'ret' with the expected error code before using it. + +Fixes: 0990366bab3c ("usb: musb: Add support for MediaTek musb controller") +Signed-off-by: Christophe JAILLET +Link: https://lore.kernel.org/r/69f514dc7134e3c917cad208e73cc650cb9e2bd6.1620159879.git.christophe.jaillet@wanadoo.fr +Signed-off-by: Greg Kroah-Hartman +Signed-off-by: Sasha Levin +--- + drivers/usb/musb/mediatek.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/drivers/usb/musb/mediatek.c b/drivers/usb/musb/mediatek.c +index eebeadd26946..6b92d037d8fc 100644 +--- a/drivers/usb/musb/mediatek.c ++++ b/drivers/usb/musb/mediatek.c +@@ -518,8 +518,8 @@ static int mtk_musb_probe(struct platform_device *pdev) + + glue->xceiv = devm_usb_get_phy(dev, USB_PHY_TYPE_USB2); + if (IS_ERR(glue->xceiv)) { +- dev_err(dev, "fail to getting usb-phy %d\n", ret); + ret = PTR_ERR(glue->xceiv); ++ dev_err(dev, "fail to getting usb-phy %d\n", ret); + goto err_unregister_usb_phy; + } + +-- +2.30.2 + diff --git a/queue-5.12/usb-typec-tcpm-fix-wrong-handling-for-not_supported-.patch b/queue-5.12/usb-typec-tcpm-fix-wrong-handling-for-not_supported-.patch new file mode 100644 index 00000000000..7d233c01064 --- /dev/null +++ b/queue-5.12/usb-typec-tcpm-fix-wrong-handling-for-not_supported-.patch @@ -0,0 +1,79 @@ +From c28bd1d85f8d593d403d028bc87d9e12b7c6862f Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Fri, 7 May 2021 14:23:00 +0800 +Subject: usb: typec: tcpm: Fix wrong handling for Not_Supported in VDM AMS + +From: Kyle Tso + +[ Upstream commit f1fbd950b59b67bc5c202216c8e1c6ca8c99a3b4 ] + +Not_Supported Message is acceptable in VDM AMS. Redirect the VDM state +machine to VDM_STATE_DONE when receiving Not_Supported and finish the +VDM AMS. + +Also, after the loop in vdm_state_machine_work, add more conditions of +VDM states to clear the vdm_sm_running flag because those are all +stopping states when leaving the loop. + +In addition, finish the VDM AMS if the port partner responds BUSY. + +Fixes: 8dea75e11380 ("usb: typec: tcpm: Protocol Error handling") +Fixes: 8d3a0578ad1a ("usb: typec: tcpm: Respond Wait if VDM state machine is running") +Reviewed-by: Guenter Roeck +Signed-off-by: Kyle Tso +Link: https://lore.kernel.org/r/20210507062300.1945009-3-kyletso@google.com +Signed-off-by: Greg Kroah-Hartman +Signed-off-by: Sasha Levin +--- + drivers/usb/typec/tcpm/tcpm.c | 14 ++++++++++++-- + 1 file changed, 12 insertions(+), 2 deletions(-) + +diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c +index 1a086ba254d2..7386f3e2cb69 100644 +--- a/drivers/usb/typec/tcpm/tcpm.c ++++ b/drivers/usb/typec/tcpm/tcpm.c +@@ -1877,7 +1877,6 @@ static void vdm_run_state_machine(struct tcpm_port *port) + } + + if (res < 0) { +- port->vdm_sm_running = false; + return; + } + } +@@ -1893,6 +1892,7 @@ static void vdm_run_state_machine(struct tcpm_port *port) + port->vdo_data[0] = port->vdo_retry; + port->vdo_count = 1; + port->vdm_state = VDM_STATE_READY; ++ tcpm_ams_finish(port); + break; + case VDM_STATE_BUSY: + port->vdm_state = VDM_STATE_ERR_TMOUT; +@@ -1958,7 +1958,7 @@ static void vdm_state_machine_work(struct kthread_work *work) + port->vdm_state != VDM_STATE_BUSY && + port->vdm_state != VDM_STATE_SEND_MESSAGE); + +- if (port->vdm_state == VDM_STATE_ERR_TMOUT) ++ if (port->vdm_state < VDM_STATE_READY) + port->vdm_sm_running = false; + + mutex_unlock(&port->lock); +@@ -2549,6 +2549,16 @@ static void tcpm_pd_ctrl_request(struct tcpm_port *port, + port->sink_cap_done = true; + tcpm_set_state(port, ready_state(port), 0); + break; ++ case SRC_READY: ++ case SNK_READY: ++ if (port->vdm_state > VDM_STATE_READY) { ++ port->vdm_state = VDM_STATE_DONE; ++ if (tcpm_vdm_ams(port)) ++ tcpm_ams_finish(port); ++ mod_vdm_delayed_work(port, 0); ++ break; ++ } ++ fallthrough; + default: + tcpm_pd_handle_state(port, + port->pwr_role == TYPEC_SOURCE ? +-- +2.30.2 + diff --git a/queue-5.12/xen-unpopulated-alloc-fix-error-return-code-in-fill_.patch b/queue-5.12/xen-unpopulated-alloc-fix-error-return-code-in-fill_.patch new file mode 100644 index 00000000000..cf3807b7580 --- /dev/null +++ b/queue-5.12/xen-unpopulated-alloc-fix-error-return-code-in-fill_.patch @@ -0,0 +1,42 @@ +From 2ad34745ca54948224b1ac211696378b0238d76b Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Sat, 8 May 2021 10:19:13 +0800 +Subject: xen/unpopulated-alloc: fix error return code in fill_list() + +From: Zhen Lei + +[ Upstream commit dbc03e81586fc33e4945263fd6e09e22eb4b980f ] + +Fix to return a negative error code from the error handling case instead +of 0, as done elsewhere in this function. + +Fixes: a4574f63edc6 ("mm/memremap_pages: convert to 'struct range'") +Reported-by: Hulk Robot +Signed-off-by: Zhen Lei +Reviewed-by: Juergen Gross +Link: https://lore.kernel.org/r/20210508021913.1727-1-thunder.leizhen@huawei.com +Signed-off-by: Juergen Gross +Signed-off-by: Sasha Levin +--- + drivers/xen/unpopulated-alloc.c | 4 +++- + 1 file changed, 3 insertions(+), 1 deletion(-) + +diff --git a/drivers/xen/unpopulated-alloc.c b/drivers/xen/unpopulated-alloc.c +index e64e6befc63b..87e6b7db892f 100644 +--- a/drivers/xen/unpopulated-alloc.c ++++ b/drivers/xen/unpopulated-alloc.c +@@ -39,8 +39,10 @@ static int fill_list(unsigned int nr_pages) + } + + pgmap = kzalloc(sizeof(*pgmap), GFP_KERNEL); +- if (!pgmap) ++ if (!pgmap) { ++ ret = -ENOMEM; + goto err_pgmap; ++ } + + pgmap->type = MEMORY_DEVICE_GENERIC; + pgmap->range = (struct range) { +-- +2.30.2 + -- 2.47.3