From: Greg Kroah-Hartman Date: Wed, 27 Oct 2021 16:20:43 +0000 (+0200) Subject: 4.19-stable patches X-Git-Tag: v4.4.291~45 X-Git-Url: http://git.ipfire.org/gitweb.cgi?a=commitdiff_plain;h=b0502b5eaae9ad1850b5b406503e347dca5fd91c;p=thirdparty%2Fkernel%2Fstable-queue.git 4.19-stable patches added patches: arm64-avoid-premature-usercopy-failure.patch powerpc-bpf-fix-bpf_mod-when-imm-1.patch --- diff --git a/queue-4.19/arm64-avoid-premature-usercopy-failure.patch b/queue-4.19/arm64-avoid-premature-usercopy-failure.patch new file mode 100644 index 00000000000..108afadfb9a --- /dev/null +++ b/queue-4.19/arm64-avoid-premature-usercopy-failure.patch @@ -0,0 +1,203 @@ +From 295cf156231ca3f9e3a66bde7fab5e09c41835e0 Mon Sep 17 00:00:00 2001 +From: Robin Murphy +Date: Mon, 12 Jul 2021 15:27:46 +0100 +Subject: arm64: Avoid premature usercopy failure + +From: Robin Murphy + +commit 295cf156231ca3f9e3a66bde7fab5e09c41835e0 upstream. + +Al reminds us that the usercopy API must only return complete failure +if absolutely nothing could be copied. Currently, if userspace does +something silly like giving us an unaligned pointer to Device memory, +or a size which overruns MTE tag bounds, we may fail to honour that +requirement when faulting on a multi-byte access even though a smaller +access could have succeeded. + +Add a mitigation to the fixup routines to fall back to a single-byte +copy if we faulted on a larger access before anything has been written +to the destination, to guarantee making *some* forward progress. We +needn't be too concerned about the overall performance since this should +only occur when callers are doing something a bit dodgy in the first +place. Particularly broken userspace might still be able to trick +generic_perform_write() into an infinite loop by targeting write() at +an mmap() of some read-only device register where the fault-in load +succeeds but any store synchronously aborts such that copy_to_user() is +genuinely unable to make progress, but, well, don't do that... + +CC: stable@vger.kernel.org +Reported-by: Chen Huang +Suggested-by: Al Viro +Reviewed-by: Catalin Marinas +Signed-off-by: Robin Murphy +Link: https://lore.kernel.org/r/dc03d5c675731a1f24a62417dba5429ad744234e.1626098433.git.robin.murphy@arm.com +Signed-off-by: Will Deacon +Signed-off-by: Greg Kroah-Hartman +Signed-off-by: Chen Huang +--- + arch/arm64/lib/copy_from_user.S | 13 ++++++++++--- + arch/arm64/lib/copy_in_user.S | 20 ++++++++++++++------ + arch/arm64/lib/copy_to_user.S | 14 +++++++++++--- + 3 files changed, 35 insertions(+), 12 deletions(-) + +--- a/arch/arm64/lib/copy_from_user.S ++++ b/arch/arm64/lib/copy_from_user.S +@@ -39,7 +39,7 @@ + .endm + + .macro ldrh1 ptr, regB, val +- uao_user_alternative 9998f, ldrh, ldtrh, \ptr, \regB, \val ++ uao_user_alternative 9997f, ldrh, ldtrh, \ptr, \regB, \val + .endm + + .macro strh1 ptr, regB, val +@@ -47,7 +47,7 @@ + .endm + + .macro ldr1 ptr, regB, val +- uao_user_alternative 9998f, ldr, ldtr, \ptr, \regB, \val ++ uao_user_alternative 9997f, ldr, ldtr, \ptr, \regB, \val + .endm + + .macro str1 ptr, regB, val +@@ -55,7 +55,7 @@ + .endm + + .macro ldp1 ptr, regB, regC, val +- uao_ldp 9998f, \ptr, \regB, \regC, \val ++ uao_ldp 9997f, \ptr, \regB, \regC, \val + .endm + + .macro stp1 ptr, regB, regC, val +@@ -63,9 +63,11 @@ + .endm + + end .req x5 ++srcin .req x15 + ENTRY(__arch_copy_from_user) + uaccess_enable_not_uao x3, x4, x5 + add end, x0, x2 ++ mov srcin, x1 + #include "copy_template.S" + uaccess_disable_not_uao x3, x4 + mov x0, #0 // Nothing to copy +@@ -74,6 +76,11 @@ ENDPROC(__arch_copy_from_user) + + .section .fixup,"ax" + .align 2 ++9997: cmp dst, dstin ++ b.ne 9998f ++ // Before being absolutely sure we couldn't copy anything, try harder ++USER(9998f, ldtrb tmp1w, [srcin]) ++ strb tmp1w, [dst], #1 + 9998: sub x0, end, dst // bytes not copied + uaccess_disable_not_uao x3, x4 + ret +--- a/arch/arm64/lib/copy_in_user.S ++++ b/arch/arm64/lib/copy_in_user.S +@@ -40,34 +40,36 @@ + .endm + + .macro ldrh1 ptr, regB, val +- uao_user_alternative 9998f, ldrh, ldtrh, \ptr, \regB, \val ++ uao_user_alternative 9997f, ldrh, ldtrh, \ptr, \regB, \val + .endm + + .macro strh1 ptr, regB, val +- uao_user_alternative 9998f, strh, sttrh, \ptr, \regB, \val ++ uao_user_alternative 9997f, strh, sttrh, \ptr, \regB, \val + .endm + + .macro ldr1 ptr, regB, val +- uao_user_alternative 9998f, ldr, ldtr, \ptr, \regB, \val ++ uao_user_alternative 9997f, ldr, ldtr, \ptr, \regB, \val + .endm + + .macro str1 ptr, regB, val +- uao_user_alternative 9998f, str, sttr, \ptr, \regB, \val ++ uao_user_alternative 9997f, str, sttr, \ptr, \regB, \val + .endm + + .macro ldp1 ptr, regB, regC, val +- uao_ldp 9998f, \ptr, \regB, \regC, \val ++ uao_ldp 9997f, \ptr, \regB, \regC, \val + .endm + + .macro stp1 ptr, regB, regC, val +- uao_stp 9998f, \ptr, \regB, \regC, \val ++ uao_stp 9997f, \ptr, \regB, \regC, \val + .endm + + end .req x5 ++srcin .req x15 + + ENTRY(__arch_copy_in_user) + uaccess_enable_not_uao x3, x4, x5 + add end, x0, x2 ++ mov srcin, x1 + #include "copy_template.S" + uaccess_disable_not_uao x3, x4 + mov x0, #0 +@@ -76,6 +78,12 @@ ENDPROC(__arch_copy_in_user) + + .section .fixup,"ax" + .align 2 ++9997: cmp dst, dstin ++ b.ne 9998f ++ // Before being absolutely sure we couldn't copy anything, try harder ++USER(9998f, ldtrb tmp1w, [srcin]) ++USER(9998f, sttrb tmp1w, [dst]) ++ add dst, dst, #1 + 9998: sub x0, end, dst // bytes not copied + uaccess_disable_not_uao x3, x4 + ret +--- a/arch/arm64/lib/copy_to_user.S ++++ b/arch/arm64/lib/copy_to_user.S +@@ -42,7 +42,7 @@ + .endm + + .macro strh1 ptr, regB, val +- uao_user_alternative 9998f, strh, sttrh, \ptr, \regB, \val ++ uao_user_alternative 9997f, strh, sttrh, \ptr, \regB, \val + .endm + + .macro ldr1 ptr, regB, val +@@ -50,7 +50,7 @@ + .endm + + .macro str1 ptr, regB, val +- uao_user_alternative 9998f, str, sttr, \ptr, \regB, \val ++ uao_user_alternative 9997f, str, sttr, \ptr, \regB, \val + .endm + + .macro ldp1 ptr, regB, regC, val +@@ -58,13 +58,15 @@ + .endm + + .macro stp1 ptr, regB, regC, val +- uao_stp 9998f, \ptr, \regB, \regC, \val ++ uao_stp 9997f, \ptr, \regB, \regC, \val + .endm + + end .req x5 ++srcin .req x15 + ENTRY(__arch_copy_to_user) + uaccess_enable_not_uao x3, x4, x5 + add end, x0, x2 ++ mov srcin, x1 + #include "copy_template.S" + uaccess_disable_not_uao x3, x4 + mov x0, #0 +@@ -73,6 +75,12 @@ ENDPROC(__arch_copy_to_user) + + .section .fixup,"ax" + .align 2 ++9997: cmp dst, dstin ++ b.ne 9998f ++ // Before being absolutely sure we couldn't copy anything, try harder ++ ldrb tmp1w, [srcin] ++USER(9998f, sttrb tmp1w, [dst]) ++ add dst, dst, #1 + 9998: sub x0, end, dst // bytes not copied + uaccess_disable_not_uao x3, x4 + ret diff --git a/queue-4.19/powerpc-bpf-fix-bpf_mod-when-imm-1.patch b/queue-4.19/powerpc-bpf-fix-bpf_mod-when-imm-1.patch new file mode 100644 index 00000000000..907077b4e70 --- /dev/null +++ b/queue-4.19/powerpc-bpf-fix-bpf_mod-when-imm-1.patch @@ -0,0 +1,45 @@ +From 8bbc9d822421d9ac8ff9ed26a3713c9afc69d6c8 Mon Sep 17 00:00:00 2001 +From: "Naveen N. Rao" +Date: Wed, 6 Oct 2021 01:55:22 +0530 +Subject: powerpc/bpf: Fix BPF_MOD when imm == 1 + +From: Naveen N. Rao + +commit 8bbc9d822421d9ac8ff9ed26a3713c9afc69d6c8 upstream. + +Only ignore the operation if dividing by 1. + +Fixes: 156d0e290e969c ("powerpc/ebpf/jit: Implement JIT compiler for extended BPF") +Signed-off-by: Naveen N. Rao +Tested-by: Johan Almbladh +Reviewed-by: Christophe Leroy +Acked-by: Song Liu +Acked-by: Johan Almbladh +Signed-off-by: Michael Ellerman +Link: https://lore.kernel.org/r/c674ca18c3046885602caebb326213731c675d06.1633464148.git.naveen.n.rao@linux.vnet.ibm.com +[cascardo: use PPC_LI instead of EMIT(PPC_RAW_LI)] +Signed-off-by: Thadeu Lima de Souza Cascardo +Signed-off-by: Greg Kroah-Hartman +--- + arch/powerpc/net/bpf_jit_comp64.c | 10 ++++++++-- + 1 file changed, 8 insertions(+), 2 deletions(-) + +--- a/arch/powerpc/net/bpf_jit_comp64.c ++++ b/arch/powerpc/net/bpf_jit_comp64.c +@@ -385,8 +385,14 @@ static int bpf_jit_build_body(struct bpf + case BPF_ALU64 | BPF_DIV | BPF_K: /* dst /= imm */ + if (imm == 0) + return -EINVAL; +- else if (imm == 1) +- goto bpf_alu32_trunc; ++ if (imm == 1) { ++ if (BPF_OP(code) == BPF_DIV) { ++ goto bpf_alu32_trunc; ++ } else { ++ PPC_LI(dst_reg, 0); ++ break; ++ } ++ } + + PPC_LI32(b2p[TMP_REG_1], imm); + switch (BPF_CLASS(code)) { diff --git a/queue-4.19/series b/queue-4.19/series index 8929742951c..dc1ee657245 100644 --- a/queue-4.19/series +++ b/queue-4.19/series @@ -2,3 +2,5 @@ arm-9133-1-mm-proc-macros-ensure-_tlb_fns-are-4b-aligned.patch arm-9134-1-remove-duplicate-memcpy-definition.patch arm-9139-1-kprobes-fix-arch_init_kprobes-prototype.patch arm-9141-1-only-warn-about-xip-address-when-not-compile-testing.patch +powerpc-bpf-fix-bpf_mod-when-imm-1.patch +arm64-avoid-premature-usercopy-failure.patch