From 6f3af5e55fc6cc7ed621df067783926c20efd3b8 Mon Sep 17 00:00:00 2001 From: Sasha Levin Date: Sun, 23 Aug 2020 21:33:58 -0400 Subject: [PATCH] Drop kvm-arm64-only-reschedule-if-mmu_notifier_range_bloc.patch Signed-off-by: Sasha Levin --- ...eschedule-if-mmu_notifier_range_bloc.patch | 110 ------------------ queue-4.14/series | 1 - ...eschedule-if-mmu_notifier_range_bloc.patch | 110 ------------------ queue-4.19/series | 1 - ...eschedule-if-mmu_notifier_range_bloc.patch | 110 ------------------ queue-5.4/series | 1 - ...eschedule-if-mmu_notifier_range_bloc.patch | 110 ------------------ queue-5.7/series | 1 - ...eschedule-if-mmu_notifier_range_bloc.patch | 110 ------------------ queue-5.8/series | 1 - 10 files changed, 555 deletions(-) delete mode 100644 queue-4.14/kvm-arm64-only-reschedule-if-mmu_notifier_range_bloc.patch delete mode 100644 queue-4.19/kvm-arm64-only-reschedule-if-mmu_notifier_range_bloc.patch delete mode 100644 queue-5.4/kvm-arm64-only-reschedule-if-mmu_notifier_range_bloc.patch delete mode 100644 queue-5.7/kvm-arm64-only-reschedule-if-mmu_notifier_range_bloc.patch delete mode 100644 queue-5.8/kvm-arm64-only-reschedule-if-mmu_notifier_range_bloc.patch diff --git a/queue-4.14/kvm-arm64-only-reschedule-if-mmu_notifier_range_bloc.patch b/queue-4.14/kvm-arm64-only-reschedule-if-mmu_notifier_range_bloc.patch deleted file mode 100644 index 11507c403e1..00000000000 --- a/queue-4.14/kvm-arm64-only-reschedule-if-mmu_notifier_range_bloc.patch +++ /dev/null @@ -1,110 +0,0 @@ -From dbaba08b25c9fc277d85ace353008edf6a6208e6 Mon Sep 17 00:00:00 2001 -From: Sasha Levin -Date: Tue, 11 Aug 2020 11:27:25 +0100 -Subject: KVM: arm64: Only reschedule if MMU_NOTIFIER_RANGE_BLOCKABLE is not - set - -From: Will Deacon - -[ Upstream commit b5331379bc62611d1026173a09c73573384201d9 ] - -When an MMU notifier call results in unmapping a range that spans multiple -PGDs, we end up calling into cond_resched_lock() when crossing a PGD boundary, -since this avoids running into RCU stalls during VM teardown. Unfortunately, -if the VM is destroyed as a result of OOM, then blocking is not permitted -and the call to the scheduler triggers the following BUG(): - - | BUG: sleeping function called from invalid context at arch/arm64/kvm/mmu.c:394 - | in_atomic(): 1, irqs_disabled(): 0, non_block: 1, pid: 36, name: oom_reaper - | INFO: lockdep is turned off. - | CPU: 3 PID: 36 Comm: oom_reaper Not tainted 5.8.0 #1 - | Hardware name: QEMU QEMU Virtual Machine, BIOS 0.0.0 02/06/2015 - | Call trace: - | dump_backtrace+0x0/0x284 - | show_stack+0x1c/0x28 - | dump_stack+0xf0/0x1a4 - | ___might_sleep+0x2bc/0x2cc - | unmap_stage2_range+0x160/0x1ac - | kvm_unmap_hva_range+0x1a0/0x1c8 - | kvm_mmu_notifier_invalidate_range_start+0x8c/0xf8 - | __mmu_notifier_invalidate_range_start+0x218/0x31c - | mmu_notifier_invalidate_range_start_nonblock+0x78/0xb0 - | __oom_reap_task_mm+0x128/0x268 - | oom_reap_task+0xac/0x298 - | oom_reaper+0x178/0x17c - | kthread+0x1e4/0x1fc - | ret_from_fork+0x10/0x30 - -Use the new 'flags' argument to kvm_unmap_hva_range() to ensure that we -only reschedule if MMU_NOTIFIER_RANGE_BLOCKABLE is set in the notifier -flags. - -Cc: -Fixes: 8b3405e345b5 ("kvm: arm/arm64: Fix locking for kvm_free_stage2_pgd") -Cc: Marc Zyngier -Cc: Suzuki K Poulose -Cc: James Morse -Signed-off-by: Will Deacon -Message-Id: <20200811102725.7121-3-will@kernel.org> -Signed-off-by: Paolo Bonzini -Signed-off-by: Sasha Levin ---- - virt/kvm/arm/mmu.c | 17 +++++++++++++---- - 1 file changed, 13 insertions(+), 4 deletions(-) - -diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c -index 3814cdad643a5..f7de27efdd1f2 100644 ---- a/virt/kvm/arm/mmu.c -+++ b/virt/kvm/arm/mmu.c -@@ -288,7 +288,8 @@ static void unmap_stage2_puds(struct kvm *kvm, pgd_t *pgd, - * destroying the VM), otherwise another faulting VCPU may come in and mess - * with things behind our backs. - */ --static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size) -+static void __unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size, -+ bool may_block) - { - pgd_t *pgd; - phys_addr_t addr = start, end = start + size; -@@ -311,11 +312,16 @@ static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size) - * If the range is too large, release the kvm->mmu_lock - * to prevent starvation and lockup detector warnings. - */ -- if (next != end) -+ if (may_block && next != end) - cond_resched_lock(&kvm->mmu_lock); - } while (pgd++, addr = next, addr != end); - } - -+static void unmap_stage2_range(struct kvm_s2_mmu *mmu, phys_addr_t start, u64 size) -+{ -+ __unmap_stage2_range(mmu, start, size, true); -+} -+ - static void stage2_flush_ptes(struct kvm *kvm, pmd_t *pmd, - phys_addr_t addr, phys_addr_t end) - { -@@ -1626,7 +1632,10 @@ static int handle_hva_to_gpa(struct kvm *kvm, - - static int kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data) - { -- unmap_stage2_range(kvm, gpa, size); -+ unsigned flags = *(unsigned *)data; -+ bool may_block = flags & MMU_NOTIFIER_RANGE_BLOCKABLE; -+ -+ __unmap_stage2_range(kvm, gpa, size, may_block); - return 0; - } - -@@ -1649,7 +1658,7 @@ int kvm_unmap_hva_range(struct kvm *kvm, - return 0; - - trace_kvm_unmap_hva_range(start, end); -- handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, NULL); -+ handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, &flags); - return 0; - } - --- -2.25.1 - diff --git a/queue-4.14/series b/queue-4.14/series index 5552461fd45..5d56e629090 100644 --- a/queue-4.14/series +++ b/queue-4.14/series @@ -16,7 +16,6 @@ mm-page_alloc-fix-core-hung-in-free_pcppages_bulk.patch ext4-fix-checking-of-directory-entry-validity-for-inline-directories.patch jbd2-add-the-missing-unlock_buffer-in-the-error-path-of-jbd2_write_superblock.patch mm-memory.c-skip-spurious-tlb-flush-for-retried-page-fault.patch -kvm-arm64-only-reschedule-if-mmu_notifier_range_bloc.patch spi-prevent-adding-devices-below-an-unregistering-co.patch scsi-ufs-add-delay_before_lpm-quirk-for-micron-devic.patch media-budget-core-improve-exception-handling-in-budg.patch diff --git a/queue-4.19/kvm-arm64-only-reschedule-if-mmu_notifier_range_bloc.patch b/queue-4.19/kvm-arm64-only-reschedule-if-mmu_notifier_range_bloc.patch deleted file mode 100644 index 6ab3ced4372..00000000000 --- a/queue-4.19/kvm-arm64-only-reschedule-if-mmu_notifier_range_bloc.patch +++ /dev/null @@ -1,110 +0,0 @@ -From 265d91ebe51c31f68e576ded3e4dfe23171e6382 Mon Sep 17 00:00:00 2001 -From: Sasha Levin -Date: Tue, 11 Aug 2020 11:27:25 +0100 -Subject: KVM: arm64: Only reschedule if MMU_NOTIFIER_RANGE_BLOCKABLE is not - set - -From: Will Deacon - -[ Upstream commit b5331379bc62611d1026173a09c73573384201d9 ] - -When an MMU notifier call results in unmapping a range that spans multiple -PGDs, we end up calling into cond_resched_lock() when crossing a PGD boundary, -since this avoids running into RCU stalls during VM teardown. Unfortunately, -if the VM is destroyed as a result of OOM, then blocking is not permitted -and the call to the scheduler triggers the following BUG(): - - | BUG: sleeping function called from invalid context at arch/arm64/kvm/mmu.c:394 - | in_atomic(): 1, irqs_disabled(): 0, non_block: 1, pid: 36, name: oom_reaper - | INFO: lockdep is turned off. - | CPU: 3 PID: 36 Comm: oom_reaper Not tainted 5.8.0 #1 - | Hardware name: QEMU QEMU Virtual Machine, BIOS 0.0.0 02/06/2015 - | Call trace: - | dump_backtrace+0x0/0x284 - | show_stack+0x1c/0x28 - | dump_stack+0xf0/0x1a4 - | ___might_sleep+0x2bc/0x2cc - | unmap_stage2_range+0x160/0x1ac - | kvm_unmap_hva_range+0x1a0/0x1c8 - | kvm_mmu_notifier_invalidate_range_start+0x8c/0xf8 - | __mmu_notifier_invalidate_range_start+0x218/0x31c - | mmu_notifier_invalidate_range_start_nonblock+0x78/0xb0 - | __oom_reap_task_mm+0x128/0x268 - | oom_reap_task+0xac/0x298 - | oom_reaper+0x178/0x17c - | kthread+0x1e4/0x1fc - | ret_from_fork+0x10/0x30 - -Use the new 'flags' argument to kvm_unmap_hva_range() to ensure that we -only reschedule if MMU_NOTIFIER_RANGE_BLOCKABLE is set in the notifier -flags. - -Cc: -Fixes: 8b3405e345b5 ("kvm: arm/arm64: Fix locking for kvm_free_stage2_pgd") -Cc: Marc Zyngier -Cc: Suzuki K Poulose -Cc: James Morse -Signed-off-by: Will Deacon -Message-Id: <20200811102725.7121-3-will@kernel.org> -Signed-off-by: Paolo Bonzini -Signed-off-by: Sasha Levin ---- - virt/kvm/arm/mmu.c | 17 +++++++++++++---- - 1 file changed, 13 insertions(+), 4 deletions(-) - -diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c -index a5bc10d30618f..84c0b5005f0f4 100644 ---- a/virt/kvm/arm/mmu.c -+++ b/virt/kvm/arm/mmu.c -@@ -323,7 +323,8 @@ static void unmap_stage2_puds(struct kvm *kvm, pgd_t *pgd, - * destroying the VM), otherwise another faulting VCPU may come in and mess - * with things behind our backs. - */ --static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size) -+static void __unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size, -+ bool may_block) - { - pgd_t *pgd; - phys_addr_t addr = start, end = start + size; -@@ -348,11 +349,16 @@ static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size) - * If the range is too large, release the kvm->mmu_lock - * to prevent starvation and lockup detector warnings. - */ -- if (next != end) -+ if (may_block && next != end) - cond_resched_lock(&kvm->mmu_lock); - } while (pgd++, addr = next, addr != end); - } - -+static void unmap_stage2_range(struct kvm_s2_mmu *mmu, phys_addr_t start, u64 size) -+{ -+ __unmap_stage2_range(mmu, start, size, true); -+} -+ - static void stage2_flush_ptes(struct kvm *kvm, pmd_t *pmd, - phys_addr_t addr, phys_addr_t end) - { -@@ -1820,7 +1826,10 @@ static int handle_hva_to_gpa(struct kvm *kvm, - - static int kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data) - { -- unmap_stage2_range(kvm, gpa, size); -+ unsigned flags = *(unsigned *)data; -+ bool may_block = flags & MMU_NOTIFIER_RANGE_BLOCKABLE; -+ -+ __unmap_stage2_range(kvm, gpa, size, may_block); - return 0; - } - -@@ -1831,7 +1840,7 @@ int kvm_unmap_hva_range(struct kvm *kvm, - return 0; - - trace_kvm_unmap_hva_range(start, end); -- handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, NULL); -+ handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, &flags); - return 0; - } - --- -2.25.1 - diff --git a/queue-4.19/series b/queue-4.19/series index 52fdc08dc70..5d82a9fe163 100644 --- a/queue-4.19/series +++ b/queue-4.19/series @@ -16,7 +16,6 @@ jbd2-add-the-missing-unlock_buffer-in-the-error-path-of-jbd2_write_superblock.pa scsi-zfcp-fix-use-after-free-in-request-timeout-handlers.patch mm-memory.c-skip-spurious-tlb-flush-for-retried-page-fault.patch drm-amd-display-fix-pow-crashing-when-given-base-0.patch -kvm-arm64-only-reschedule-if-mmu_notifier_range_bloc.patch spi-prevent-adding-devices-below-an-unregistering-co.patch scsi-ufs-add-delay_before_lpm-quirk-for-micron-devic.patch scsi-target-tcmu-fix-crash-in-tcmu_flush_dcache_rang.patch diff --git a/queue-5.4/kvm-arm64-only-reschedule-if-mmu_notifier_range_bloc.patch b/queue-5.4/kvm-arm64-only-reschedule-if-mmu_notifier_range_bloc.patch deleted file mode 100644 index 6e8e0b5dd39..00000000000 --- a/queue-5.4/kvm-arm64-only-reschedule-if-mmu_notifier_range_bloc.patch +++ /dev/null @@ -1,110 +0,0 @@ -From e255a93ed36d41a8e412c6921137353b09964fb5 Mon Sep 17 00:00:00 2001 -From: Sasha Levin -Date: Tue, 11 Aug 2020 11:27:25 +0100 -Subject: KVM: arm64: Only reschedule if MMU_NOTIFIER_RANGE_BLOCKABLE is not - set - -From: Will Deacon - -[ Upstream commit b5331379bc62611d1026173a09c73573384201d9 ] - -When an MMU notifier call results in unmapping a range that spans multiple -PGDs, we end up calling into cond_resched_lock() when crossing a PGD boundary, -since this avoids running into RCU stalls during VM teardown. Unfortunately, -if the VM is destroyed as a result of OOM, then blocking is not permitted -and the call to the scheduler triggers the following BUG(): - - | BUG: sleeping function called from invalid context at arch/arm64/kvm/mmu.c:394 - | in_atomic(): 1, irqs_disabled(): 0, non_block: 1, pid: 36, name: oom_reaper - | INFO: lockdep is turned off. - | CPU: 3 PID: 36 Comm: oom_reaper Not tainted 5.8.0 #1 - | Hardware name: QEMU QEMU Virtual Machine, BIOS 0.0.0 02/06/2015 - | Call trace: - | dump_backtrace+0x0/0x284 - | show_stack+0x1c/0x28 - | dump_stack+0xf0/0x1a4 - | ___might_sleep+0x2bc/0x2cc - | unmap_stage2_range+0x160/0x1ac - | kvm_unmap_hva_range+0x1a0/0x1c8 - | kvm_mmu_notifier_invalidate_range_start+0x8c/0xf8 - | __mmu_notifier_invalidate_range_start+0x218/0x31c - | mmu_notifier_invalidate_range_start_nonblock+0x78/0xb0 - | __oom_reap_task_mm+0x128/0x268 - | oom_reap_task+0xac/0x298 - | oom_reaper+0x178/0x17c - | kthread+0x1e4/0x1fc - | ret_from_fork+0x10/0x30 - -Use the new 'flags' argument to kvm_unmap_hva_range() to ensure that we -only reschedule if MMU_NOTIFIER_RANGE_BLOCKABLE is set in the notifier -flags. - -Cc: -Fixes: 8b3405e345b5 ("kvm: arm/arm64: Fix locking for kvm_free_stage2_pgd") -Cc: Marc Zyngier -Cc: Suzuki K Poulose -Cc: James Morse -Signed-off-by: Will Deacon -Message-Id: <20200811102725.7121-3-will@kernel.org> -Signed-off-by: Paolo Bonzini -Signed-off-by: Sasha Levin ---- - virt/kvm/arm/mmu.c | 17 +++++++++++++---- - 1 file changed, 13 insertions(+), 4 deletions(-) - -diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c -index 767ac4eab4fe9..776a4b1e04cfc 100644 ---- a/virt/kvm/arm/mmu.c -+++ b/virt/kvm/arm/mmu.c -@@ -332,7 +332,8 @@ static void unmap_stage2_puds(struct kvm *kvm, pgd_t *pgd, - * destroying the VM), otherwise another faulting VCPU may come in and mess - * with things behind our backs. - */ --static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size) -+static void __unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size, -+ bool may_block) - { - pgd_t *pgd; - phys_addr_t addr = start, end = start + size; -@@ -357,11 +358,16 @@ static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size) - * If the range is too large, release the kvm->mmu_lock - * to prevent starvation and lockup detector warnings. - */ -- if (next != end) -+ if (may_block && next != end) - cond_resched_lock(&kvm->mmu_lock); - } while (pgd++, addr = next, addr != end); - } - -+static void unmap_stage2_range(struct kvm_s2_mmu *mmu, phys_addr_t start, u64 size) -+{ -+ __unmap_stage2_range(mmu, start, size, true); -+} -+ - static void stage2_flush_ptes(struct kvm *kvm, pmd_t *pmd, - phys_addr_t addr, phys_addr_t end) - { -@@ -2045,7 +2051,10 @@ static int handle_hva_to_gpa(struct kvm *kvm, - - static int kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data) - { -- unmap_stage2_range(kvm, gpa, size); -+ unsigned flags = *(unsigned *)data; -+ bool may_block = flags & MMU_NOTIFIER_RANGE_BLOCKABLE; -+ -+ __unmap_stage2_range(kvm, gpa, size, may_block); - return 0; - } - -@@ -2056,7 +2065,7 @@ int kvm_unmap_hva_range(struct kvm *kvm, - return 0; - - trace_kvm_unmap_hva_range(start, end); -- handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, NULL); -+ handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, &flags); - return 0; - } - --- -2.25.1 - diff --git a/queue-5.4/series b/queue-5.4/series index 99b12a44e19..d23e5c3f18d 100644 --- a/queue-5.4/series +++ b/queue-5.4/series @@ -37,7 +37,6 @@ drm-amdgpu-display-use-gfp_atomic-in-dcn20_validate_bandwidth_internal.patch drm-amd-display-fix-edid-parsing-after-resume-from-suspend.patch drm-amd-display-fix-pow-crashing-when-given-base-0.patch opp-enable-resources-again-if-they-were-disabled-ear.patch -kvm-arm64-only-reschedule-if-mmu_notifier_range_bloc.patch scsi-ufs-add-delay_before_lpm-quirk-for-micron-devic.patch scsi-target-tcmu-fix-crash-in-tcmu_flush_dcache_rang.patch media-budget-core-improve-exception-handling-in-budg.patch diff --git a/queue-5.7/kvm-arm64-only-reschedule-if-mmu_notifier_range_bloc.patch b/queue-5.7/kvm-arm64-only-reschedule-if-mmu_notifier_range_bloc.patch deleted file mode 100644 index f32716ebc0f..00000000000 --- a/queue-5.7/kvm-arm64-only-reschedule-if-mmu_notifier_range_bloc.patch +++ /dev/null @@ -1,110 +0,0 @@ -From 655785290d4ee911a633b3376dc61d5ce1419e43 Mon Sep 17 00:00:00 2001 -From: Sasha Levin -Date: Tue, 11 Aug 2020 11:27:25 +0100 -Subject: KVM: arm64: Only reschedule if MMU_NOTIFIER_RANGE_BLOCKABLE is not - set - -From: Will Deacon - -[ Upstream commit b5331379bc62611d1026173a09c73573384201d9 ] - -When an MMU notifier call results in unmapping a range that spans multiple -PGDs, we end up calling into cond_resched_lock() when crossing a PGD boundary, -since this avoids running into RCU stalls during VM teardown. Unfortunately, -if the VM is destroyed as a result of OOM, then blocking is not permitted -and the call to the scheduler triggers the following BUG(): - - | BUG: sleeping function called from invalid context at arch/arm64/kvm/mmu.c:394 - | in_atomic(): 1, irqs_disabled(): 0, non_block: 1, pid: 36, name: oom_reaper - | INFO: lockdep is turned off. - | CPU: 3 PID: 36 Comm: oom_reaper Not tainted 5.8.0 #1 - | Hardware name: QEMU QEMU Virtual Machine, BIOS 0.0.0 02/06/2015 - | Call trace: - | dump_backtrace+0x0/0x284 - | show_stack+0x1c/0x28 - | dump_stack+0xf0/0x1a4 - | ___might_sleep+0x2bc/0x2cc - | unmap_stage2_range+0x160/0x1ac - | kvm_unmap_hva_range+0x1a0/0x1c8 - | kvm_mmu_notifier_invalidate_range_start+0x8c/0xf8 - | __mmu_notifier_invalidate_range_start+0x218/0x31c - | mmu_notifier_invalidate_range_start_nonblock+0x78/0xb0 - | __oom_reap_task_mm+0x128/0x268 - | oom_reap_task+0xac/0x298 - | oom_reaper+0x178/0x17c - | kthread+0x1e4/0x1fc - | ret_from_fork+0x10/0x30 - -Use the new 'flags' argument to kvm_unmap_hva_range() to ensure that we -only reschedule if MMU_NOTIFIER_RANGE_BLOCKABLE is set in the notifier -flags. - -Cc: -Fixes: 8b3405e345b5 ("kvm: arm/arm64: Fix locking for kvm_free_stage2_pgd") -Cc: Marc Zyngier -Cc: Suzuki K Poulose -Cc: James Morse -Signed-off-by: Will Deacon -Message-Id: <20200811102725.7121-3-will@kernel.org> -Signed-off-by: Paolo Bonzini -Signed-off-by: Sasha Levin ---- - virt/kvm/arm/mmu.c | 17 +++++++++++++---- - 1 file changed, 13 insertions(+), 4 deletions(-) - -diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c -index 8a9d13e8e904f..6ee6770694953 100644 ---- a/virt/kvm/arm/mmu.c -+++ b/virt/kvm/arm/mmu.c -@@ -331,7 +331,8 @@ static void unmap_stage2_puds(struct kvm *kvm, pgd_t *pgd, - * destroying the VM), otherwise another faulting VCPU may come in and mess - * with things behind our backs. - */ --static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size) -+static void __unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size, -+ bool may_block) - { - pgd_t *pgd; - phys_addr_t addr = start, end = start + size; -@@ -356,11 +357,16 @@ static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size) - * If the range is too large, release the kvm->mmu_lock - * to prevent starvation and lockup detector warnings. - */ -- if (next != end) -+ if (may_block && next != end) - cond_resched_lock(&kvm->mmu_lock); - } while (pgd++, addr = next, addr != end); - } - -+static void unmap_stage2_range(struct kvm_s2_mmu *mmu, phys_addr_t start, u64 size) -+{ -+ __unmap_stage2_range(mmu, start, size, true); -+} -+ - static void stage2_flush_ptes(struct kvm *kvm, pmd_t *pmd, - phys_addr_t addr, phys_addr_t end) - { -@@ -2041,7 +2047,10 @@ static int handle_hva_to_gpa(struct kvm *kvm, - - static int kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data) - { -- unmap_stage2_range(kvm, gpa, size); -+ unsigned flags = *(unsigned *)data; -+ bool may_block = flags & MMU_NOTIFIER_RANGE_BLOCKABLE; -+ -+ __unmap_stage2_range(kvm, gpa, size, may_block); - return 0; - } - -@@ -2052,7 +2061,7 @@ int kvm_unmap_hva_range(struct kvm *kvm, - return 0; - - trace_kvm_unmap_hva_range(start, end); -- handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, NULL); -+ handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, &flags); - return 0; - } - --- -2.25.1 - diff --git a/queue-5.7/series b/queue-5.7/series index 1068fd67835..91cd03f27ad 100644 --- a/queue-5.7/series +++ b/queue-5.7/series @@ -30,7 +30,6 @@ io-wq-add-an-option-to-cancel-all-matched-reqs.patch io_uring-cancel-all-task-s-requests-on-exit.patch io_uring-find-and-cancel-head-link-async-work-on-fil.patch opp-enable-resources-again-if-they-were-disabled-ear.patch -kvm-arm64-only-reschedule-if-mmu_notifier_range_bloc.patch opp-reorder-the-code-for-target_freq-case.patch opp-put-opp-table-in-dev_pm_opp_set_rate-for-empty-t.patch scsi-ufs-add-delay_before_lpm-quirk-for-micron-devic.patch diff --git a/queue-5.8/kvm-arm64-only-reschedule-if-mmu_notifier_range_bloc.patch b/queue-5.8/kvm-arm64-only-reschedule-if-mmu_notifier_range_bloc.patch deleted file mode 100644 index 8b3ee43c00c..00000000000 --- a/queue-5.8/kvm-arm64-only-reschedule-if-mmu_notifier_range_bloc.patch +++ /dev/null @@ -1,110 +0,0 @@ -From 965b8c25870ed09250e88797b334afa6180b66b8 Mon Sep 17 00:00:00 2001 -From: Sasha Levin -Date: Tue, 11 Aug 2020 11:27:25 +0100 -Subject: KVM: arm64: Only reschedule if MMU_NOTIFIER_RANGE_BLOCKABLE is not - set - -From: Will Deacon - -[ Upstream commit b5331379bc62611d1026173a09c73573384201d9 ] - -When an MMU notifier call results in unmapping a range that spans multiple -PGDs, we end up calling into cond_resched_lock() when crossing a PGD boundary, -since this avoids running into RCU stalls during VM teardown. Unfortunately, -if the VM is destroyed as a result of OOM, then blocking is not permitted -and the call to the scheduler triggers the following BUG(): - - | BUG: sleeping function called from invalid context at arch/arm64/kvm/mmu.c:394 - | in_atomic(): 1, irqs_disabled(): 0, non_block: 1, pid: 36, name: oom_reaper - | INFO: lockdep is turned off. - | CPU: 3 PID: 36 Comm: oom_reaper Not tainted 5.8.0 #1 - | Hardware name: QEMU QEMU Virtual Machine, BIOS 0.0.0 02/06/2015 - | Call trace: - | dump_backtrace+0x0/0x284 - | show_stack+0x1c/0x28 - | dump_stack+0xf0/0x1a4 - | ___might_sleep+0x2bc/0x2cc - | unmap_stage2_range+0x160/0x1ac - | kvm_unmap_hva_range+0x1a0/0x1c8 - | kvm_mmu_notifier_invalidate_range_start+0x8c/0xf8 - | __mmu_notifier_invalidate_range_start+0x218/0x31c - | mmu_notifier_invalidate_range_start_nonblock+0x78/0xb0 - | __oom_reap_task_mm+0x128/0x268 - | oom_reap_task+0xac/0x298 - | oom_reaper+0x178/0x17c - | kthread+0x1e4/0x1fc - | ret_from_fork+0x10/0x30 - -Use the new 'flags' argument to kvm_unmap_hva_range() to ensure that we -only reschedule if MMU_NOTIFIER_RANGE_BLOCKABLE is set in the notifier -flags. - -Cc: -Fixes: 8b3405e345b5 ("kvm: arm/arm64: Fix locking for kvm_free_stage2_pgd") -Cc: Marc Zyngier -Cc: Suzuki K Poulose -Cc: James Morse -Signed-off-by: Will Deacon -Message-Id: <20200811102725.7121-3-will@kernel.org> -Signed-off-by: Paolo Bonzini -Signed-off-by: Sasha Levin ---- - arch/arm64/kvm/mmu.c | 17 +++++++++++++---- - 1 file changed, 13 insertions(+), 4 deletions(-) - -diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c -index 5f6b35c336188..364e9fc5c9f8f 100644 ---- a/arch/arm64/kvm/mmu.c -+++ b/arch/arm64/kvm/mmu.c -@@ -365,7 +365,8 @@ static void unmap_stage2_p4ds(struct kvm *kvm, pgd_t *pgd, - * destroying the VM), otherwise another faulting VCPU may come in and mess - * with things behind our backs. - */ --static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size) -+static void __unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size, -+ bool may_block) - { - pgd_t *pgd; - phys_addr_t addr = start, end = start + size; -@@ -390,11 +391,16 @@ static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size) - * If the range is too large, release the kvm->mmu_lock - * to prevent starvation and lockup detector warnings. - */ -- if (next != end) -+ if (may_block && next != end) - cond_resched_lock(&kvm->mmu_lock); - } while (pgd++, addr = next, addr != end); - } - -+static void unmap_stage2_range(struct kvm_s2_mmu *mmu, phys_addr_t start, u64 size) -+{ -+ __unmap_stage2_range(mmu, start, size, true); -+} -+ - static void stage2_flush_ptes(struct kvm *kvm, pmd_t *pmd, - phys_addr_t addr, phys_addr_t end) - { -@@ -2198,7 +2204,10 @@ static int handle_hva_to_gpa(struct kvm *kvm, - - static int kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data) - { -- unmap_stage2_range(kvm, gpa, size); -+ unsigned flags = *(unsigned *)data; -+ bool may_block = flags & MMU_NOTIFIER_RANGE_BLOCKABLE; -+ -+ __unmap_stage2_range(kvm, gpa, size, may_block); - return 0; - } - -@@ -2209,7 +2218,7 @@ int kvm_unmap_hva_range(struct kvm *kvm, - return 0; - - trace_kvm_unmap_hva_range(start, end); -- handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, NULL); -+ handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, &flags); - return 0; - } - --- -2.25.1 - diff --git a/queue-5.8/series b/queue-5.8/series index 2fe39b9728f..2b08ef6b797 100644 --- a/queue-5.8/series +++ b/queue-5.8/series @@ -42,7 +42,6 @@ drm-amd-display-fix-dfpstate-hang-due-to-view-port-changed.patch drm-amd-display-fix-pow-crashing-when-given-base-0.patch drm-i915-pmu-prefer-drm_warn_on-over-warn_on.patch drm-i915-provide-the-perf-pmu.module.patch -kvm-arm64-only-reschedule-if-mmu_notifier_range_bloc.patch scsi-ufs-add-delay_before_lpm-quirk-for-micron-devic.patch scsi-target-tcmu-fix-crash-in-tcmu_flush_dcache_rang.patch media-budget-core-improve-exception-handling-in-budg.patch -- 2.47.3