]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
drop powerpc-code-patching-pre-map-patch-area.patch
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 12 Apr 2022 17:37:27 +0000 (19:37 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 12 Apr 2022 17:37:27 +0000 (19:37 +0200)
from everywhere except 5.17

queue-4.14/powerpc-code-patching-pre-map-patch-area.patch [deleted file]
queue-4.14/series
queue-4.19/powerpc-code-patching-pre-map-patch-area.patch [deleted file]
queue-4.19/series
queue-5.10/powerpc-code-patching-pre-map-patch-area.patch [deleted file]
queue-5.10/series
queue-5.15/powerpc-code-patching-pre-map-patch-area.patch [deleted file]
queue-5.15/series
queue-5.4/powerpc-code-patching-pre-map-patch-area.patch [deleted file]
queue-5.4/series

diff --git a/queue-4.14/powerpc-code-patching-pre-map-patch-area.patch b/queue-4.14/powerpc-code-patching-pre-map-patch-area.patch
deleted file mode 100644 (file)
index 407e48a..0000000
+++ /dev/null
@@ -1,99 +0,0 @@
-From 9f4ba417df4c9ff048c90698c54c6a856b2212c0 Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
-Date: Wed, 23 Feb 2022 12:58:21 +1100
-Subject: powerpc/code-patching: Pre-map patch area
-
-From: Michael Ellerman <mpe@ellerman.id.au>
-
-[ Upstream commit 591b4b268435f00d2f0b81f786c2c7bd5ef66416 ]
-
-Paul reported a warning with DEBUG_ATOMIC_SLEEP=y:
-
-  BUG: sleeping function called from invalid context at include/linux/sched/mm.h:256
-  in_atomic(): 0, irqs_disabled(): 1, non_block: 0, pid: 1, name: swapper/0
-  preempt_count: 0, expected: 0
-  ...
-  Call Trace:
-    dump_stack_lvl+0xa0/0xec (unreliable)
-    __might_resched+0x2f4/0x310
-    kmem_cache_alloc+0x220/0x4b0
-    __pud_alloc+0x74/0x1d0
-    hash__map_kernel_page+0x2cc/0x390
-    do_patch_instruction+0x134/0x4a0
-    arch_jump_label_transform+0x64/0x78
-    __jump_label_update+0x148/0x180
-    static_key_enable_cpuslocked+0xd0/0x120
-    static_key_enable+0x30/0x50
-    check_kvm_guest+0x60/0x88
-    pSeries_smp_probe+0x54/0xb0
-    smp_prepare_cpus+0x3e0/0x430
-    kernel_init_freeable+0x20c/0x43c
-    kernel_init+0x30/0x1a0
-    ret_from_kernel_thread+0x5c/0x64
-
-Peter pointed out that this is because do_patch_instruction() has
-disabled interrupts, but then map_patch_area() calls map_kernel_page()
-then hash__map_kernel_page() which does a sleeping memory allocation.
-
-We only see the warning in KVM guests with SMT enabled, which is not
-particularly common, or on other platforms if CONFIG_KPROBES is
-disabled, also not common. The reason we don't see it in most
-configurations is that another path that happens to have interrupts
-enabled has allocated the required page tables for us, eg. there's a
-path in kprobes init that does that. That's just pure luck though.
-
-As Christophe suggested, the simplest solution is to do a dummy
-map/unmap when we initialise the patching, so that any required page
-table levels are pre-allocated before the first call to
-do_patch_instruction(). This works because the unmap doesn't free any
-page tables that were allocated by the map, it just clears the PTE,
-leaving the page table levels there for the next map.
-
-Reported-by: Paul Menzel <pmenzel@molgen.mpg.de>
-Debugged-by: Peter Zijlstra <peterz@infradead.org>
-Suggested-by: Christophe Leroy <christophe.leroy@csgroup.eu>
-Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
-Link: https://lore.kernel.org/r/20220223015821.473097-1-mpe@ellerman.id.au
-Signed-off-by: Sasha Levin <sashal@kernel.org>
----
- arch/powerpc/lib/code-patching.c | 14 ++++++++++++++
- 1 file changed, 14 insertions(+)
-
-diff --git a/arch/powerpc/lib/code-patching.c b/arch/powerpc/lib/code-patching.c
-index 85f84b45d3a0..c58a619a68b3 100644
---- a/arch/powerpc/lib/code-patching.c
-+++ b/arch/powerpc/lib/code-patching.c
-@@ -47,9 +47,14 @@ int raw_patch_instruction(unsigned int *addr, unsigned int instr)
- #ifdef CONFIG_STRICT_KERNEL_RWX
- static DEFINE_PER_CPU(struct vm_struct *, text_poke_area);
-+static int map_patch_area(void *addr, unsigned long text_poke_addr);
-+static void unmap_patch_area(unsigned long addr);
-+
- static int text_area_cpu_up(unsigned int cpu)
- {
-       struct vm_struct *area;
-+      unsigned long addr;
-+      int err;
-       area = get_vm_area(PAGE_SIZE, VM_ALLOC);
-       if (!area) {
-@@ -57,6 +62,15 @@ static int text_area_cpu_up(unsigned int cpu)
-                       cpu);
-               return -1;
-       }
-+
-+      // Map/unmap the area to ensure all page tables are pre-allocated
-+      addr = (unsigned long)area->addr;
-+      err = map_patch_area(empty_zero_page, addr);
-+      if (err)
-+              return err;
-+
-+      unmap_patch_area(addr);
-+
-       this_cpu_write(text_poke_area, area);
-       return 0;
--- 
-2.35.1
-
index e29f436aba3ad5f097bdb58c5e6aeac428566a95..ef779305531fce9d5b5e52bf71dfa2edf02779f5 100644 (file)
@@ -223,7 +223,6 @@ net-smc-correct-settings-of-rmb-window-update-limit.patch
 macvtap-advertise-link-netns-via-netlink.patch
 bnxt_en-eliminate-unintended-link-toggle-during-fw-r.patch
 mips-fix-fortify-panic-when-copying-asm-exception-ha.patch
-powerpc-code-patching-pre-map-patch-area.patch
 scsi-libfc-fix-use-after-free-in-fc_exch_abts_resp.patch
 usb-dwc3-omap-fix-unbalanced-disables-for-smps10_out.patch
 xtensa-fix-dtc-warning-unit_address_format.patch
diff --git a/queue-4.19/powerpc-code-patching-pre-map-patch-area.patch b/queue-4.19/powerpc-code-patching-pre-map-patch-area.patch
deleted file mode 100644 (file)
index eb24fec..0000000
+++ /dev/null
@@ -1,99 +0,0 @@
-From 6f97e4943659dbc7d20c176aaa564adc43d6d262 Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
-Date: Wed, 23 Feb 2022 12:58:21 +1100
-Subject: powerpc/code-patching: Pre-map patch area
-
-From: Michael Ellerman <mpe@ellerman.id.au>
-
-[ Upstream commit 591b4b268435f00d2f0b81f786c2c7bd5ef66416 ]
-
-Paul reported a warning with DEBUG_ATOMIC_SLEEP=y:
-
-  BUG: sleeping function called from invalid context at include/linux/sched/mm.h:256
-  in_atomic(): 0, irqs_disabled(): 1, non_block: 0, pid: 1, name: swapper/0
-  preempt_count: 0, expected: 0
-  ...
-  Call Trace:
-    dump_stack_lvl+0xa0/0xec (unreliable)
-    __might_resched+0x2f4/0x310
-    kmem_cache_alloc+0x220/0x4b0
-    __pud_alloc+0x74/0x1d0
-    hash__map_kernel_page+0x2cc/0x390
-    do_patch_instruction+0x134/0x4a0
-    arch_jump_label_transform+0x64/0x78
-    __jump_label_update+0x148/0x180
-    static_key_enable_cpuslocked+0xd0/0x120
-    static_key_enable+0x30/0x50
-    check_kvm_guest+0x60/0x88
-    pSeries_smp_probe+0x54/0xb0
-    smp_prepare_cpus+0x3e0/0x430
-    kernel_init_freeable+0x20c/0x43c
-    kernel_init+0x30/0x1a0
-    ret_from_kernel_thread+0x5c/0x64
-
-Peter pointed out that this is because do_patch_instruction() has
-disabled interrupts, but then map_patch_area() calls map_kernel_page()
-then hash__map_kernel_page() which does a sleeping memory allocation.
-
-We only see the warning in KVM guests with SMT enabled, which is not
-particularly common, or on other platforms if CONFIG_KPROBES is
-disabled, also not common. The reason we don't see it in most
-configurations is that another path that happens to have interrupts
-enabled has allocated the required page tables for us, eg. there's a
-path in kprobes init that does that. That's just pure luck though.
-
-As Christophe suggested, the simplest solution is to do a dummy
-map/unmap when we initialise the patching, so that any required page
-table levels are pre-allocated before the first call to
-do_patch_instruction(). This works because the unmap doesn't free any
-page tables that were allocated by the map, it just clears the PTE,
-leaving the page table levels there for the next map.
-
-Reported-by: Paul Menzel <pmenzel@molgen.mpg.de>
-Debugged-by: Peter Zijlstra <peterz@infradead.org>
-Suggested-by: Christophe Leroy <christophe.leroy@csgroup.eu>
-Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
-Link: https://lore.kernel.org/r/20220223015821.473097-1-mpe@ellerman.id.au
-Signed-off-by: Sasha Levin <sashal@kernel.org>
----
- arch/powerpc/lib/code-patching.c | 14 ++++++++++++++
- 1 file changed, 14 insertions(+)
-
-diff --git a/arch/powerpc/lib/code-patching.c b/arch/powerpc/lib/code-patching.c
-index bb245dbf6c57..2b9a92ea2d89 100644
---- a/arch/powerpc/lib/code-patching.c
-+++ b/arch/powerpc/lib/code-patching.c
-@@ -46,9 +46,14 @@ int raw_patch_instruction(unsigned int *addr, unsigned int instr)
- #ifdef CONFIG_STRICT_KERNEL_RWX
- static DEFINE_PER_CPU(struct vm_struct *, text_poke_area);
-+static int map_patch_area(void *addr, unsigned long text_poke_addr);
-+static void unmap_patch_area(unsigned long addr);
-+
- static int text_area_cpu_up(unsigned int cpu)
- {
-       struct vm_struct *area;
-+      unsigned long addr;
-+      int err;
-       area = get_vm_area(PAGE_SIZE, VM_ALLOC);
-       if (!area) {
-@@ -56,6 +61,15 @@ static int text_area_cpu_up(unsigned int cpu)
-                       cpu);
-               return -1;
-       }
-+
-+      // Map/unmap the area to ensure all page tables are pre-allocated
-+      addr = (unsigned long)area->addr;
-+      err = map_patch_area(empty_zero_page, addr);
-+      if (err)
-+              return err;
-+
-+      unmap_patch_area(addr);
-+
-       this_cpu_write(text_poke_area, area);
-       return 0;
--- 
-2.35.1
-
index 8147db720092ac67aebecc5c2f20f50d6fbafe08..9469dcf2ce49cf9fbd1298fc33be5f4860a1e73b 100644 (file)
@@ -280,7 +280,6 @@ net-smc-correct-settings-of-rmb-window-update-limit.patch
 macvtap-advertise-link-netns-via-netlink.patch
 bnxt_en-eliminate-unintended-link-toggle-during-fw-r.patch
 mips-fix-fortify-panic-when-copying-asm-exception-ha.patch
-powerpc-code-patching-pre-map-patch-area.patch
 scsi-libfc-fix-use-after-free-in-fc_exch_abts_resp.patch
 usb-dwc3-omap-fix-unbalanced-disables-for-smps10_out.patch
 xtensa-fix-dtc-warning-unit_address_format.patch
diff --git a/queue-5.10/powerpc-code-patching-pre-map-patch-area.patch b/queue-5.10/powerpc-code-patching-pre-map-patch-area.patch
deleted file mode 100644 (file)
index 8ebc223..0000000
+++ /dev/null
@@ -1,99 +0,0 @@
-From 5d11ea532ca7b6c6dd692badd674aa22a879ce9e Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
-Date: Wed, 23 Feb 2022 12:58:21 +1100
-Subject: powerpc/code-patching: Pre-map patch area
-
-From: Michael Ellerman <mpe@ellerman.id.au>
-
-[ Upstream commit 591b4b268435f00d2f0b81f786c2c7bd5ef66416 ]
-
-Paul reported a warning with DEBUG_ATOMIC_SLEEP=y:
-
-  BUG: sleeping function called from invalid context at include/linux/sched/mm.h:256
-  in_atomic(): 0, irqs_disabled(): 1, non_block: 0, pid: 1, name: swapper/0
-  preempt_count: 0, expected: 0
-  ...
-  Call Trace:
-    dump_stack_lvl+0xa0/0xec (unreliable)
-    __might_resched+0x2f4/0x310
-    kmem_cache_alloc+0x220/0x4b0
-    __pud_alloc+0x74/0x1d0
-    hash__map_kernel_page+0x2cc/0x390
-    do_patch_instruction+0x134/0x4a0
-    arch_jump_label_transform+0x64/0x78
-    __jump_label_update+0x148/0x180
-    static_key_enable_cpuslocked+0xd0/0x120
-    static_key_enable+0x30/0x50
-    check_kvm_guest+0x60/0x88
-    pSeries_smp_probe+0x54/0xb0
-    smp_prepare_cpus+0x3e0/0x430
-    kernel_init_freeable+0x20c/0x43c
-    kernel_init+0x30/0x1a0
-    ret_from_kernel_thread+0x5c/0x64
-
-Peter pointed out that this is because do_patch_instruction() has
-disabled interrupts, but then map_patch_area() calls map_kernel_page()
-then hash__map_kernel_page() which does a sleeping memory allocation.
-
-We only see the warning in KVM guests with SMT enabled, which is not
-particularly common, or on other platforms if CONFIG_KPROBES is
-disabled, also not common. The reason we don't see it in most
-configurations is that another path that happens to have interrupts
-enabled has allocated the required page tables for us, eg. there's a
-path in kprobes init that does that. That's just pure luck though.
-
-As Christophe suggested, the simplest solution is to do a dummy
-map/unmap when we initialise the patching, so that any required page
-table levels are pre-allocated before the first call to
-do_patch_instruction(). This works because the unmap doesn't free any
-page tables that were allocated by the map, it just clears the PTE,
-leaving the page table levels there for the next map.
-
-Reported-by: Paul Menzel <pmenzel@molgen.mpg.de>
-Debugged-by: Peter Zijlstra <peterz@infradead.org>
-Suggested-by: Christophe Leroy <christophe.leroy@csgroup.eu>
-Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
-Link: https://lore.kernel.org/r/20220223015821.473097-1-mpe@ellerman.id.au
-Signed-off-by: Sasha Levin <sashal@kernel.org>
----
- arch/powerpc/lib/code-patching.c | 14 ++++++++++++++
- 1 file changed, 14 insertions(+)
-
-diff --git a/arch/powerpc/lib/code-patching.c b/arch/powerpc/lib/code-patching.c
-index a2e4f864b63d..4318aee65a39 100644
---- a/arch/powerpc/lib/code-patching.c
-+++ b/arch/powerpc/lib/code-patching.c
-@@ -43,9 +43,14 @@ int raw_patch_instruction(struct ppc_inst *addr, struct ppc_inst instr)
- #ifdef CONFIG_STRICT_KERNEL_RWX
- static DEFINE_PER_CPU(struct vm_struct *, text_poke_area);
-+static int map_patch_area(void *addr, unsigned long text_poke_addr);
-+static void unmap_patch_area(unsigned long addr);
-+
- static int text_area_cpu_up(unsigned int cpu)
- {
-       struct vm_struct *area;
-+      unsigned long addr;
-+      int err;
-       area = get_vm_area(PAGE_SIZE, VM_ALLOC);
-       if (!area) {
-@@ -53,6 +58,15 @@ static int text_area_cpu_up(unsigned int cpu)
-                       cpu);
-               return -1;
-       }
-+
-+      // Map/unmap the area to ensure all page tables are pre-allocated
-+      addr = (unsigned long)area->addr;
-+      err = map_patch_area(empty_zero_page, addr);
-+      if (err)
-+              return err;
-+
-+      unmap_patch_area(addr);
-+
-       this_cpu_write(text_poke_area, area);
-       return 0;
--- 
-2.35.1
-
index 919f05bf21a3d2f101a39116525a8d48f8d39659..468e724aa638a5536c8aaa7b513f6704ed6784c3 100644 (file)
@@ -54,7 +54,6 @@ bluetooth-use-memset-avoid-memory-leaks.patch
 bnxt_en-eliminate-unintended-link-toggle-during-fw-r.patch
 pci-endpoint-fix-misused-goto-label.patch
 mips-fix-fortify-panic-when-copying-asm-exception-ha.patch
-powerpc-code-patching-pre-map-patch-area.patch
 powerpc-secvar-fix-refcount-leak-in-format_show.patch
 scsi-libfc-fix-use-after-free-in-fc_exch_abts_resp.patch
 can-isotp-set-default-value-for-n_as-to-50-micro-sec.patch
diff --git a/queue-5.15/powerpc-code-patching-pre-map-patch-area.patch b/queue-5.15/powerpc-code-patching-pre-map-patch-area.patch
deleted file mode 100644 (file)
index 458473b..0000000
+++ /dev/null
@@ -1,99 +0,0 @@
-From faaa9ec3a40b620ce8e34013f95cfca26587c8c8 Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
-Date: Wed, 23 Feb 2022 12:58:21 +1100
-Subject: powerpc/code-patching: Pre-map patch area
-
-From: Michael Ellerman <mpe@ellerman.id.au>
-
-[ Upstream commit 591b4b268435f00d2f0b81f786c2c7bd5ef66416 ]
-
-Paul reported a warning with DEBUG_ATOMIC_SLEEP=y:
-
-  BUG: sleeping function called from invalid context at include/linux/sched/mm.h:256
-  in_atomic(): 0, irqs_disabled(): 1, non_block: 0, pid: 1, name: swapper/0
-  preempt_count: 0, expected: 0
-  ...
-  Call Trace:
-    dump_stack_lvl+0xa0/0xec (unreliable)
-    __might_resched+0x2f4/0x310
-    kmem_cache_alloc+0x220/0x4b0
-    __pud_alloc+0x74/0x1d0
-    hash__map_kernel_page+0x2cc/0x390
-    do_patch_instruction+0x134/0x4a0
-    arch_jump_label_transform+0x64/0x78
-    __jump_label_update+0x148/0x180
-    static_key_enable_cpuslocked+0xd0/0x120
-    static_key_enable+0x30/0x50
-    check_kvm_guest+0x60/0x88
-    pSeries_smp_probe+0x54/0xb0
-    smp_prepare_cpus+0x3e0/0x430
-    kernel_init_freeable+0x20c/0x43c
-    kernel_init+0x30/0x1a0
-    ret_from_kernel_thread+0x5c/0x64
-
-Peter pointed out that this is because do_patch_instruction() has
-disabled interrupts, but then map_patch_area() calls map_kernel_page()
-then hash__map_kernel_page() which does a sleeping memory allocation.
-
-We only see the warning in KVM guests with SMT enabled, which is not
-particularly common, or on other platforms if CONFIG_KPROBES is
-disabled, also not common. The reason we don't see it in most
-configurations is that another path that happens to have interrupts
-enabled has allocated the required page tables for us, eg. there's a
-path in kprobes init that does that. That's just pure luck though.
-
-As Christophe suggested, the simplest solution is to do a dummy
-map/unmap when we initialise the patching, so that any required page
-table levels are pre-allocated before the first call to
-do_patch_instruction(). This works because the unmap doesn't free any
-page tables that were allocated by the map, it just clears the PTE,
-leaving the page table levels there for the next map.
-
-Reported-by: Paul Menzel <pmenzel@molgen.mpg.de>
-Debugged-by: Peter Zijlstra <peterz@infradead.org>
-Suggested-by: Christophe Leroy <christophe.leroy@csgroup.eu>
-Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
-Link: https://lore.kernel.org/r/20220223015821.473097-1-mpe@ellerman.id.au
-Signed-off-by: Sasha Levin <sashal@kernel.org>
----
- arch/powerpc/lib/code-patching.c | 14 ++++++++++++++
- 1 file changed, 14 insertions(+)
-
-diff --git a/arch/powerpc/lib/code-patching.c b/arch/powerpc/lib/code-patching.c
-index c5ed98823835..b76b31196be1 100644
---- a/arch/powerpc/lib/code-patching.c
-+++ b/arch/powerpc/lib/code-patching.c
-@@ -47,9 +47,14 @@ int raw_patch_instruction(u32 *addr, struct ppc_inst instr)
- #ifdef CONFIG_STRICT_KERNEL_RWX
- static DEFINE_PER_CPU(struct vm_struct *, text_poke_area);
-+static int map_patch_area(void *addr, unsigned long text_poke_addr);
-+static void unmap_patch_area(unsigned long addr);
-+
- static int text_area_cpu_up(unsigned int cpu)
- {
-       struct vm_struct *area;
-+      unsigned long addr;
-+      int err;
-       area = get_vm_area(PAGE_SIZE, VM_ALLOC);
-       if (!area) {
-@@ -57,6 +62,15 @@ static int text_area_cpu_up(unsigned int cpu)
-                       cpu);
-               return -1;
-       }
-+
-+      // Map/unmap the area to ensure all page tables are pre-allocated
-+      addr = (unsigned long)area->addr;
-+      err = map_patch_area(empty_zero_page, addr);
-+      if (err)
-+              return err;
-+
-+      unmap_patch_area(addr);
-+
-       this_cpu_write(text_poke_area, area);
-       return 0;
--- 
-2.35.1
-
index dc38a07ef775c4f763d85826bcab1713efd0df77..e461e0e21c6e0ff521e868822a514a1db7fee86a 100644 (file)
@@ -84,7 +84,6 @@ bluetooth-use-memset-avoid-memory-leaks.patch
 bnxt_en-eliminate-unintended-link-toggle-during-fw-r.patch
 pci-endpoint-fix-misused-goto-label.patch
 mips-fix-fortify-panic-when-copying-asm-exception-ha.patch
-powerpc-code-patching-pre-map-patch-area.patch
 powerpc-64e-tie-ppc_book3e_64-to-ppc_fsl_book3e.patch
 powerpc-secvar-fix-refcount-leak-in-format_show.patch
 scsi-libfc-fix-use-after-free-in-fc_exch_abts_resp.patch
diff --git a/queue-5.4/powerpc-code-patching-pre-map-patch-area.patch b/queue-5.4/powerpc-code-patching-pre-map-patch-area.patch
deleted file mode 100644 (file)
index 1d632ce..0000000
+++ /dev/null
@@ -1,99 +0,0 @@
-From 7babdadeb603448263a6bdfbc44f81281a58a4e3 Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
-Date: Wed, 23 Feb 2022 12:58:21 +1100
-Subject: powerpc/code-patching: Pre-map patch area
-
-From: Michael Ellerman <mpe@ellerman.id.au>
-
-[ Upstream commit 591b4b268435f00d2f0b81f786c2c7bd5ef66416 ]
-
-Paul reported a warning with DEBUG_ATOMIC_SLEEP=y:
-
-  BUG: sleeping function called from invalid context at include/linux/sched/mm.h:256
-  in_atomic(): 0, irqs_disabled(): 1, non_block: 0, pid: 1, name: swapper/0
-  preempt_count: 0, expected: 0
-  ...
-  Call Trace:
-    dump_stack_lvl+0xa0/0xec (unreliable)
-    __might_resched+0x2f4/0x310
-    kmem_cache_alloc+0x220/0x4b0
-    __pud_alloc+0x74/0x1d0
-    hash__map_kernel_page+0x2cc/0x390
-    do_patch_instruction+0x134/0x4a0
-    arch_jump_label_transform+0x64/0x78
-    __jump_label_update+0x148/0x180
-    static_key_enable_cpuslocked+0xd0/0x120
-    static_key_enable+0x30/0x50
-    check_kvm_guest+0x60/0x88
-    pSeries_smp_probe+0x54/0xb0
-    smp_prepare_cpus+0x3e0/0x430
-    kernel_init_freeable+0x20c/0x43c
-    kernel_init+0x30/0x1a0
-    ret_from_kernel_thread+0x5c/0x64
-
-Peter pointed out that this is because do_patch_instruction() has
-disabled interrupts, but then map_patch_area() calls map_kernel_page()
-then hash__map_kernel_page() which does a sleeping memory allocation.
-
-We only see the warning in KVM guests with SMT enabled, which is not
-particularly common, or on other platforms if CONFIG_KPROBES is
-disabled, also not common. The reason we don't see it in most
-configurations is that another path that happens to have interrupts
-enabled has allocated the required page tables for us, eg. there's a
-path in kprobes init that does that. That's just pure luck though.
-
-As Christophe suggested, the simplest solution is to do a dummy
-map/unmap when we initialise the patching, so that any required page
-table levels are pre-allocated before the first call to
-do_patch_instruction(). This works because the unmap doesn't free any
-page tables that were allocated by the map, it just clears the PTE,
-leaving the page table levels there for the next map.
-
-Reported-by: Paul Menzel <pmenzel@molgen.mpg.de>
-Debugged-by: Peter Zijlstra <peterz@infradead.org>
-Suggested-by: Christophe Leroy <christophe.leroy@csgroup.eu>
-Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
-Link: https://lore.kernel.org/r/20220223015821.473097-1-mpe@ellerman.id.au
-Signed-off-by: Sasha Levin <sashal@kernel.org>
----
- arch/powerpc/lib/code-patching.c | 14 ++++++++++++++
- 1 file changed, 14 insertions(+)
-
-diff --git a/arch/powerpc/lib/code-patching.c b/arch/powerpc/lib/code-patching.c
-index a05f289e613e..e417d4470397 100644
---- a/arch/powerpc/lib/code-patching.c
-+++ b/arch/powerpc/lib/code-patching.c
-@@ -41,9 +41,14 @@ int raw_patch_instruction(unsigned int *addr, unsigned int instr)
- #ifdef CONFIG_STRICT_KERNEL_RWX
- static DEFINE_PER_CPU(struct vm_struct *, text_poke_area);
-+static int map_patch_area(void *addr, unsigned long text_poke_addr);
-+static void unmap_patch_area(unsigned long addr);
-+
- static int text_area_cpu_up(unsigned int cpu)
- {
-       struct vm_struct *area;
-+      unsigned long addr;
-+      int err;
-       area = get_vm_area(PAGE_SIZE, VM_ALLOC);
-       if (!area) {
-@@ -51,6 +56,15 @@ static int text_area_cpu_up(unsigned int cpu)
-                       cpu);
-               return -1;
-       }
-+
-+      // Map/unmap the area to ensure all page tables are pre-allocated
-+      addr = (unsigned long)area->addr;
-+      err = map_patch_area(empty_zero_page, addr);
-+      if (err)
-+              return err;
-+
-+      unmap_patch_area(addr);
-+
-       this_cpu_write(text_poke_area, area);
-       return 0;
--- 
-2.35.1
-
index 1e7622c7d0228274592bcd65d2b7d7e54e5c2170..855bc233c27b835de279249dbb2b72f1e9dd32d9 100644 (file)
@@ -398,7 +398,6 @@ macvtap-advertise-link-netns-via-netlink.patch
 tuntap-add-sanity-checks-about-msg_controllen-in-sen.patch
 bnxt_en-eliminate-unintended-link-toggle-during-fw-r.patch
 mips-fix-fortify-panic-when-copying-asm-exception-ha.patch
-powerpc-code-patching-pre-map-patch-area.patch
 scsi-libfc-fix-use-after-free-in-fc_exch_abts_resp.patch
 usb-dwc3-omap-fix-unbalanced-disables-for-smps10_out.patch
 xtensa-fix-dtc-warning-unit_address_format.patch