+++ /dev/null
-From 9f4ba417df4c9ff048c90698c54c6a856b2212c0 Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
-Date: Wed, 23 Feb 2022 12:58:21 +1100
-Subject: powerpc/code-patching: Pre-map patch area
-
-From: Michael Ellerman <mpe@ellerman.id.au>
-
-[ Upstream commit 591b4b268435f00d2f0b81f786c2c7bd5ef66416 ]
-
-Paul reported a warning with DEBUG_ATOMIC_SLEEP=y:
-
- BUG: sleeping function called from invalid context at include/linux/sched/mm.h:256
- in_atomic(): 0, irqs_disabled(): 1, non_block: 0, pid: 1, name: swapper/0
- preempt_count: 0, expected: 0
- ...
- Call Trace:
- dump_stack_lvl+0xa0/0xec (unreliable)
- __might_resched+0x2f4/0x310
- kmem_cache_alloc+0x220/0x4b0
- __pud_alloc+0x74/0x1d0
- hash__map_kernel_page+0x2cc/0x390
- do_patch_instruction+0x134/0x4a0
- arch_jump_label_transform+0x64/0x78
- __jump_label_update+0x148/0x180
- static_key_enable_cpuslocked+0xd0/0x120
- static_key_enable+0x30/0x50
- check_kvm_guest+0x60/0x88
- pSeries_smp_probe+0x54/0xb0
- smp_prepare_cpus+0x3e0/0x430
- kernel_init_freeable+0x20c/0x43c
- kernel_init+0x30/0x1a0
- ret_from_kernel_thread+0x5c/0x64
-
-Peter pointed out that this is because do_patch_instruction() has
-disabled interrupts, but then map_patch_area() calls map_kernel_page()
-then hash__map_kernel_page() which does a sleeping memory allocation.
-
-We only see the warning in KVM guests with SMT enabled, which is not
-particularly common, or on other platforms if CONFIG_KPROBES is
-disabled, also not common. The reason we don't see it in most
-configurations is that another path that happens to have interrupts
-enabled has allocated the required page tables for us, eg. there's a
-path in kprobes init that does that. That's just pure luck though.
-
-As Christophe suggested, the simplest solution is to do a dummy
-map/unmap when we initialise the patching, so that any required page
-table levels are pre-allocated before the first call to
-do_patch_instruction(). This works because the unmap doesn't free any
-page tables that were allocated by the map, it just clears the PTE,
-leaving the page table levels there for the next map.
-
-Reported-by: Paul Menzel <pmenzel@molgen.mpg.de>
-Debugged-by: Peter Zijlstra <peterz@infradead.org>
-Suggested-by: Christophe Leroy <christophe.leroy@csgroup.eu>
-Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
-Link: https://lore.kernel.org/r/20220223015821.473097-1-mpe@ellerman.id.au
-Signed-off-by: Sasha Levin <sashal@kernel.org>
----
- arch/powerpc/lib/code-patching.c | 14 ++++++++++++++
- 1 file changed, 14 insertions(+)
-
-diff --git a/arch/powerpc/lib/code-patching.c b/arch/powerpc/lib/code-patching.c
-index 85f84b45d3a0..c58a619a68b3 100644
---- a/arch/powerpc/lib/code-patching.c
-+++ b/arch/powerpc/lib/code-patching.c
-@@ -47,9 +47,14 @@ int raw_patch_instruction(unsigned int *addr, unsigned int instr)
- #ifdef CONFIG_STRICT_KERNEL_RWX
- static DEFINE_PER_CPU(struct vm_struct *, text_poke_area);
-
-+static int map_patch_area(void *addr, unsigned long text_poke_addr);
-+static void unmap_patch_area(unsigned long addr);
-+
- static int text_area_cpu_up(unsigned int cpu)
- {
- struct vm_struct *area;
-+ unsigned long addr;
-+ int err;
-
- area = get_vm_area(PAGE_SIZE, VM_ALLOC);
- if (!area) {
-@@ -57,6 +62,15 @@ static int text_area_cpu_up(unsigned int cpu)
- cpu);
- return -1;
- }
-+
-+ // Map/unmap the area to ensure all page tables are pre-allocated
-+ addr = (unsigned long)area->addr;
-+ err = map_patch_area(empty_zero_page, addr);
-+ if (err)
-+ return err;
-+
-+ unmap_patch_area(addr);
-+
- this_cpu_write(text_poke_area, area);
-
- return 0;
---
-2.35.1
-
macvtap-advertise-link-netns-via-netlink.patch
bnxt_en-eliminate-unintended-link-toggle-during-fw-r.patch
mips-fix-fortify-panic-when-copying-asm-exception-ha.patch
-powerpc-code-patching-pre-map-patch-area.patch
scsi-libfc-fix-use-after-free-in-fc_exch_abts_resp.patch
usb-dwc3-omap-fix-unbalanced-disables-for-smps10_out.patch
xtensa-fix-dtc-warning-unit_address_format.patch
+++ /dev/null
-From 6f97e4943659dbc7d20c176aaa564adc43d6d262 Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
-Date: Wed, 23 Feb 2022 12:58:21 +1100
-Subject: powerpc/code-patching: Pre-map patch area
-
-From: Michael Ellerman <mpe@ellerman.id.au>
-
-[ Upstream commit 591b4b268435f00d2f0b81f786c2c7bd5ef66416 ]
-
-Paul reported a warning with DEBUG_ATOMIC_SLEEP=y:
-
- BUG: sleeping function called from invalid context at include/linux/sched/mm.h:256
- in_atomic(): 0, irqs_disabled(): 1, non_block: 0, pid: 1, name: swapper/0
- preempt_count: 0, expected: 0
- ...
- Call Trace:
- dump_stack_lvl+0xa0/0xec (unreliable)
- __might_resched+0x2f4/0x310
- kmem_cache_alloc+0x220/0x4b0
- __pud_alloc+0x74/0x1d0
- hash__map_kernel_page+0x2cc/0x390
- do_patch_instruction+0x134/0x4a0
- arch_jump_label_transform+0x64/0x78
- __jump_label_update+0x148/0x180
- static_key_enable_cpuslocked+0xd0/0x120
- static_key_enable+0x30/0x50
- check_kvm_guest+0x60/0x88
- pSeries_smp_probe+0x54/0xb0
- smp_prepare_cpus+0x3e0/0x430
- kernel_init_freeable+0x20c/0x43c
- kernel_init+0x30/0x1a0
- ret_from_kernel_thread+0x5c/0x64
-
-Peter pointed out that this is because do_patch_instruction() has
-disabled interrupts, but then map_patch_area() calls map_kernel_page()
-then hash__map_kernel_page() which does a sleeping memory allocation.
-
-We only see the warning in KVM guests with SMT enabled, which is not
-particularly common, or on other platforms if CONFIG_KPROBES is
-disabled, also not common. The reason we don't see it in most
-configurations is that another path that happens to have interrupts
-enabled has allocated the required page tables for us, eg. there's a
-path in kprobes init that does that. That's just pure luck though.
-
-As Christophe suggested, the simplest solution is to do a dummy
-map/unmap when we initialise the patching, so that any required page
-table levels are pre-allocated before the first call to
-do_patch_instruction(). This works because the unmap doesn't free any
-page tables that were allocated by the map, it just clears the PTE,
-leaving the page table levels there for the next map.
-
-Reported-by: Paul Menzel <pmenzel@molgen.mpg.de>
-Debugged-by: Peter Zijlstra <peterz@infradead.org>
-Suggested-by: Christophe Leroy <christophe.leroy@csgroup.eu>
-Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
-Link: https://lore.kernel.org/r/20220223015821.473097-1-mpe@ellerman.id.au
-Signed-off-by: Sasha Levin <sashal@kernel.org>
----
- arch/powerpc/lib/code-patching.c | 14 ++++++++++++++
- 1 file changed, 14 insertions(+)
-
-diff --git a/arch/powerpc/lib/code-patching.c b/arch/powerpc/lib/code-patching.c
-index bb245dbf6c57..2b9a92ea2d89 100644
---- a/arch/powerpc/lib/code-patching.c
-+++ b/arch/powerpc/lib/code-patching.c
-@@ -46,9 +46,14 @@ int raw_patch_instruction(unsigned int *addr, unsigned int instr)
- #ifdef CONFIG_STRICT_KERNEL_RWX
- static DEFINE_PER_CPU(struct vm_struct *, text_poke_area);
-
-+static int map_patch_area(void *addr, unsigned long text_poke_addr);
-+static void unmap_patch_area(unsigned long addr);
-+
- static int text_area_cpu_up(unsigned int cpu)
- {
- struct vm_struct *area;
-+ unsigned long addr;
-+ int err;
-
- area = get_vm_area(PAGE_SIZE, VM_ALLOC);
- if (!area) {
-@@ -56,6 +61,15 @@ static int text_area_cpu_up(unsigned int cpu)
- cpu);
- return -1;
- }
-+
-+ // Map/unmap the area to ensure all page tables are pre-allocated
-+ addr = (unsigned long)area->addr;
-+ err = map_patch_area(empty_zero_page, addr);
-+ if (err)
-+ return err;
-+
-+ unmap_patch_area(addr);
-+
- this_cpu_write(text_poke_area, area);
-
- return 0;
---
-2.35.1
-
macvtap-advertise-link-netns-via-netlink.patch
bnxt_en-eliminate-unintended-link-toggle-during-fw-r.patch
mips-fix-fortify-panic-when-copying-asm-exception-ha.patch
-powerpc-code-patching-pre-map-patch-area.patch
scsi-libfc-fix-use-after-free-in-fc_exch_abts_resp.patch
usb-dwc3-omap-fix-unbalanced-disables-for-smps10_out.patch
xtensa-fix-dtc-warning-unit_address_format.patch
+++ /dev/null
-From 5d11ea532ca7b6c6dd692badd674aa22a879ce9e Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
-Date: Wed, 23 Feb 2022 12:58:21 +1100
-Subject: powerpc/code-patching: Pre-map patch area
-
-From: Michael Ellerman <mpe@ellerman.id.au>
-
-[ Upstream commit 591b4b268435f00d2f0b81f786c2c7bd5ef66416 ]
-
-Paul reported a warning with DEBUG_ATOMIC_SLEEP=y:
-
- BUG: sleeping function called from invalid context at include/linux/sched/mm.h:256
- in_atomic(): 0, irqs_disabled(): 1, non_block: 0, pid: 1, name: swapper/0
- preempt_count: 0, expected: 0
- ...
- Call Trace:
- dump_stack_lvl+0xa0/0xec (unreliable)
- __might_resched+0x2f4/0x310
- kmem_cache_alloc+0x220/0x4b0
- __pud_alloc+0x74/0x1d0
- hash__map_kernel_page+0x2cc/0x390
- do_patch_instruction+0x134/0x4a0
- arch_jump_label_transform+0x64/0x78
- __jump_label_update+0x148/0x180
- static_key_enable_cpuslocked+0xd0/0x120
- static_key_enable+0x30/0x50
- check_kvm_guest+0x60/0x88
- pSeries_smp_probe+0x54/0xb0
- smp_prepare_cpus+0x3e0/0x430
- kernel_init_freeable+0x20c/0x43c
- kernel_init+0x30/0x1a0
- ret_from_kernel_thread+0x5c/0x64
-
-Peter pointed out that this is because do_patch_instruction() has
-disabled interrupts, but then map_patch_area() calls map_kernel_page()
-then hash__map_kernel_page() which does a sleeping memory allocation.
-
-We only see the warning in KVM guests with SMT enabled, which is not
-particularly common, or on other platforms if CONFIG_KPROBES is
-disabled, also not common. The reason we don't see it in most
-configurations is that another path that happens to have interrupts
-enabled has allocated the required page tables for us, eg. there's a
-path in kprobes init that does that. That's just pure luck though.
-
-As Christophe suggested, the simplest solution is to do a dummy
-map/unmap when we initialise the patching, so that any required page
-table levels are pre-allocated before the first call to
-do_patch_instruction(). This works because the unmap doesn't free any
-page tables that were allocated by the map, it just clears the PTE,
-leaving the page table levels there for the next map.
-
-Reported-by: Paul Menzel <pmenzel@molgen.mpg.de>
-Debugged-by: Peter Zijlstra <peterz@infradead.org>
-Suggested-by: Christophe Leroy <christophe.leroy@csgroup.eu>
-Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
-Link: https://lore.kernel.org/r/20220223015821.473097-1-mpe@ellerman.id.au
-Signed-off-by: Sasha Levin <sashal@kernel.org>
----
- arch/powerpc/lib/code-patching.c | 14 ++++++++++++++
- 1 file changed, 14 insertions(+)
-
-diff --git a/arch/powerpc/lib/code-patching.c b/arch/powerpc/lib/code-patching.c
-index a2e4f864b63d..4318aee65a39 100644
---- a/arch/powerpc/lib/code-patching.c
-+++ b/arch/powerpc/lib/code-patching.c
-@@ -43,9 +43,14 @@ int raw_patch_instruction(struct ppc_inst *addr, struct ppc_inst instr)
- #ifdef CONFIG_STRICT_KERNEL_RWX
- static DEFINE_PER_CPU(struct vm_struct *, text_poke_area);
-
-+static int map_patch_area(void *addr, unsigned long text_poke_addr);
-+static void unmap_patch_area(unsigned long addr);
-+
- static int text_area_cpu_up(unsigned int cpu)
- {
- struct vm_struct *area;
-+ unsigned long addr;
-+ int err;
-
- area = get_vm_area(PAGE_SIZE, VM_ALLOC);
- if (!area) {
-@@ -53,6 +58,15 @@ static int text_area_cpu_up(unsigned int cpu)
- cpu);
- return -1;
- }
-+
-+ // Map/unmap the area to ensure all page tables are pre-allocated
-+ addr = (unsigned long)area->addr;
-+ err = map_patch_area(empty_zero_page, addr);
-+ if (err)
-+ return err;
-+
-+ unmap_patch_area(addr);
-+
- this_cpu_write(text_poke_area, area);
-
- return 0;
---
-2.35.1
-
bnxt_en-eliminate-unintended-link-toggle-during-fw-r.patch
pci-endpoint-fix-misused-goto-label.patch
mips-fix-fortify-panic-when-copying-asm-exception-ha.patch
-powerpc-code-patching-pre-map-patch-area.patch
powerpc-secvar-fix-refcount-leak-in-format_show.patch
scsi-libfc-fix-use-after-free-in-fc_exch_abts_resp.patch
can-isotp-set-default-value-for-n_as-to-50-micro-sec.patch
+++ /dev/null
-From faaa9ec3a40b620ce8e34013f95cfca26587c8c8 Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
-Date: Wed, 23 Feb 2022 12:58:21 +1100
-Subject: powerpc/code-patching: Pre-map patch area
-
-From: Michael Ellerman <mpe@ellerman.id.au>
-
-[ Upstream commit 591b4b268435f00d2f0b81f786c2c7bd5ef66416 ]
-
-Paul reported a warning with DEBUG_ATOMIC_SLEEP=y:
-
- BUG: sleeping function called from invalid context at include/linux/sched/mm.h:256
- in_atomic(): 0, irqs_disabled(): 1, non_block: 0, pid: 1, name: swapper/0
- preempt_count: 0, expected: 0
- ...
- Call Trace:
- dump_stack_lvl+0xa0/0xec (unreliable)
- __might_resched+0x2f4/0x310
- kmem_cache_alloc+0x220/0x4b0
- __pud_alloc+0x74/0x1d0
- hash__map_kernel_page+0x2cc/0x390
- do_patch_instruction+0x134/0x4a0
- arch_jump_label_transform+0x64/0x78
- __jump_label_update+0x148/0x180
- static_key_enable_cpuslocked+0xd0/0x120
- static_key_enable+0x30/0x50
- check_kvm_guest+0x60/0x88
- pSeries_smp_probe+0x54/0xb0
- smp_prepare_cpus+0x3e0/0x430
- kernel_init_freeable+0x20c/0x43c
- kernel_init+0x30/0x1a0
- ret_from_kernel_thread+0x5c/0x64
-
-Peter pointed out that this is because do_patch_instruction() has
-disabled interrupts, but then map_patch_area() calls map_kernel_page()
-then hash__map_kernel_page() which does a sleeping memory allocation.
-
-We only see the warning in KVM guests with SMT enabled, which is not
-particularly common, or on other platforms if CONFIG_KPROBES is
-disabled, also not common. The reason we don't see it in most
-configurations is that another path that happens to have interrupts
-enabled has allocated the required page tables for us, eg. there's a
-path in kprobes init that does that. That's just pure luck though.
-
-As Christophe suggested, the simplest solution is to do a dummy
-map/unmap when we initialise the patching, so that any required page
-table levels are pre-allocated before the first call to
-do_patch_instruction(). This works because the unmap doesn't free any
-page tables that were allocated by the map, it just clears the PTE,
-leaving the page table levels there for the next map.
-
-Reported-by: Paul Menzel <pmenzel@molgen.mpg.de>
-Debugged-by: Peter Zijlstra <peterz@infradead.org>
-Suggested-by: Christophe Leroy <christophe.leroy@csgroup.eu>
-Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
-Link: https://lore.kernel.org/r/20220223015821.473097-1-mpe@ellerman.id.au
-Signed-off-by: Sasha Levin <sashal@kernel.org>
----
- arch/powerpc/lib/code-patching.c | 14 ++++++++++++++
- 1 file changed, 14 insertions(+)
-
-diff --git a/arch/powerpc/lib/code-patching.c b/arch/powerpc/lib/code-patching.c
-index c5ed98823835..b76b31196be1 100644
---- a/arch/powerpc/lib/code-patching.c
-+++ b/arch/powerpc/lib/code-patching.c
-@@ -47,9 +47,14 @@ int raw_patch_instruction(u32 *addr, struct ppc_inst instr)
- #ifdef CONFIG_STRICT_KERNEL_RWX
- static DEFINE_PER_CPU(struct vm_struct *, text_poke_area);
-
-+static int map_patch_area(void *addr, unsigned long text_poke_addr);
-+static void unmap_patch_area(unsigned long addr);
-+
- static int text_area_cpu_up(unsigned int cpu)
- {
- struct vm_struct *area;
-+ unsigned long addr;
-+ int err;
-
- area = get_vm_area(PAGE_SIZE, VM_ALLOC);
- if (!area) {
-@@ -57,6 +62,15 @@ static int text_area_cpu_up(unsigned int cpu)
- cpu);
- return -1;
- }
-+
-+ // Map/unmap the area to ensure all page tables are pre-allocated
-+ addr = (unsigned long)area->addr;
-+ err = map_patch_area(empty_zero_page, addr);
-+ if (err)
-+ return err;
-+
-+ unmap_patch_area(addr);
-+
- this_cpu_write(text_poke_area, area);
-
- return 0;
---
-2.35.1
-
bnxt_en-eliminate-unintended-link-toggle-during-fw-r.patch
pci-endpoint-fix-misused-goto-label.patch
mips-fix-fortify-panic-when-copying-asm-exception-ha.patch
-powerpc-code-patching-pre-map-patch-area.patch
powerpc-64e-tie-ppc_book3e_64-to-ppc_fsl_book3e.patch
powerpc-secvar-fix-refcount-leak-in-format_show.patch
scsi-libfc-fix-use-after-free-in-fc_exch_abts_resp.patch
+++ /dev/null
-From 7babdadeb603448263a6bdfbc44f81281a58a4e3 Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
-Date: Wed, 23 Feb 2022 12:58:21 +1100
-Subject: powerpc/code-patching: Pre-map patch area
-
-From: Michael Ellerman <mpe@ellerman.id.au>
-
-[ Upstream commit 591b4b268435f00d2f0b81f786c2c7bd5ef66416 ]
-
-Paul reported a warning with DEBUG_ATOMIC_SLEEP=y:
-
- BUG: sleeping function called from invalid context at include/linux/sched/mm.h:256
- in_atomic(): 0, irqs_disabled(): 1, non_block: 0, pid: 1, name: swapper/0
- preempt_count: 0, expected: 0
- ...
- Call Trace:
- dump_stack_lvl+0xa0/0xec (unreliable)
- __might_resched+0x2f4/0x310
- kmem_cache_alloc+0x220/0x4b0
- __pud_alloc+0x74/0x1d0
- hash__map_kernel_page+0x2cc/0x390
- do_patch_instruction+0x134/0x4a0
- arch_jump_label_transform+0x64/0x78
- __jump_label_update+0x148/0x180
- static_key_enable_cpuslocked+0xd0/0x120
- static_key_enable+0x30/0x50
- check_kvm_guest+0x60/0x88
- pSeries_smp_probe+0x54/0xb0
- smp_prepare_cpus+0x3e0/0x430
- kernel_init_freeable+0x20c/0x43c
- kernel_init+0x30/0x1a0
- ret_from_kernel_thread+0x5c/0x64
-
-Peter pointed out that this is because do_patch_instruction() has
-disabled interrupts, but then map_patch_area() calls map_kernel_page()
-then hash__map_kernel_page() which does a sleeping memory allocation.
-
-We only see the warning in KVM guests with SMT enabled, which is not
-particularly common, or on other platforms if CONFIG_KPROBES is
-disabled, also not common. The reason we don't see it in most
-configurations is that another path that happens to have interrupts
-enabled has allocated the required page tables for us, eg. there's a
-path in kprobes init that does that. That's just pure luck though.
-
-As Christophe suggested, the simplest solution is to do a dummy
-map/unmap when we initialise the patching, so that any required page
-table levels are pre-allocated before the first call to
-do_patch_instruction(). This works because the unmap doesn't free any
-page tables that were allocated by the map, it just clears the PTE,
-leaving the page table levels there for the next map.
-
-Reported-by: Paul Menzel <pmenzel@molgen.mpg.de>
-Debugged-by: Peter Zijlstra <peterz@infradead.org>
-Suggested-by: Christophe Leroy <christophe.leroy@csgroup.eu>
-Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
-Link: https://lore.kernel.org/r/20220223015821.473097-1-mpe@ellerman.id.au
-Signed-off-by: Sasha Levin <sashal@kernel.org>
----
- arch/powerpc/lib/code-patching.c | 14 ++++++++++++++
- 1 file changed, 14 insertions(+)
-
-diff --git a/arch/powerpc/lib/code-patching.c b/arch/powerpc/lib/code-patching.c
-index a05f289e613e..e417d4470397 100644
---- a/arch/powerpc/lib/code-patching.c
-+++ b/arch/powerpc/lib/code-patching.c
-@@ -41,9 +41,14 @@ int raw_patch_instruction(unsigned int *addr, unsigned int instr)
- #ifdef CONFIG_STRICT_KERNEL_RWX
- static DEFINE_PER_CPU(struct vm_struct *, text_poke_area);
-
-+static int map_patch_area(void *addr, unsigned long text_poke_addr);
-+static void unmap_patch_area(unsigned long addr);
-+
- static int text_area_cpu_up(unsigned int cpu)
- {
- struct vm_struct *area;
-+ unsigned long addr;
-+ int err;
-
- area = get_vm_area(PAGE_SIZE, VM_ALLOC);
- if (!area) {
-@@ -51,6 +56,15 @@ static int text_area_cpu_up(unsigned int cpu)
- cpu);
- return -1;
- }
-+
-+ // Map/unmap the area to ensure all page tables are pre-allocated
-+ addr = (unsigned long)area->addr;
-+ err = map_patch_area(empty_zero_page, addr);
-+ if (err)
-+ return err;
-+
-+ unmap_patch_area(addr);
-+
- this_cpu_write(text_poke_area, area);
-
- return 0;
---
-2.35.1
-
tuntap-add-sanity-checks-about-msg_controllen-in-sen.patch
bnxt_en-eliminate-unintended-link-toggle-during-fw-r.patch
mips-fix-fortify-panic-when-copying-asm-exception-ha.patch
-powerpc-code-patching-pre-map-patch-area.patch
scsi-libfc-fix-use-after-free-in-fc_exch_abts_resp.patch
usb-dwc3-omap-fix-unbalanced-disables-for-smps10_out.patch
xtensa-fix-dtc-warning-unit_address_format.patch