From 333a2fbdb1a8aea146494cea912d26dc149b46a9 Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Tue, 23 Jul 2024 15:20:25 +0200 Subject: [PATCH] 6.6-stable patches added patches: arm-9324-1-fix-get_user-broken-with-veneer.patch --- ...24-1-fix-get_user-broken-with-veneer.patch | 72 +++++++++++++++++++ queue-6.6/series | 1 + 2 files changed, 73 insertions(+) create mode 100644 queue-6.6/arm-9324-1-fix-get_user-broken-with-veneer.patch diff --git a/queue-6.6/arm-9324-1-fix-get_user-broken-with-veneer.patch b/queue-6.6/arm-9324-1-fix-get_user-broken-with-veneer.patch new file mode 100644 index 00000000000..76f11df2053 --- /dev/null +++ b/queue-6.6/arm-9324-1-fix-get_user-broken-with-veneer.patch @@ -0,0 +1,72 @@ +From 24d3ba0a7b44c1617c27f5045eecc4f34752ab03 Mon Sep 17 00:00:00 2001 +From: Masahiro Yamada +Date: Tue, 26 Sep 2023 17:09:03 +0100 +Subject: ARM: 9324/1: fix get_user() broken with veneer + +From: Masahiro Yamada + +commit 24d3ba0a7b44c1617c27f5045eecc4f34752ab03 upstream. + +The 32-bit ARM kernel stops working if the kernel grows to the point +where veneers for __get_user_* are created. + +AAPCS32 [1] states, "Register r12 (IP) may be used by a linker as a +scratch register between a routine and any subroutine it calls. It +can also be used within a routine to hold intermediate values between +subroutine calls." + +However, bl instructions buried within the inline asm are unpredictable +for compilers; hence, "ip" must be added to the clobber list. + +This becomes critical when veneers for __get_user_* are created because +veneers use the ip register since commit 02e541db0540 ("ARM: 8323/1: +force linker to use PIC veneers"). + +[1]: https://github.com/ARM-software/abi-aa/blob/2023Q1/aapcs32/aapcs32.rst + +Signed-off-by: Masahiro Yamada +Reviewed-by: Ard Biesheuvel +Signed-off-by: Russell King (Oracle) +Cc: John Stultz +Signed-off-by: Greg Kroah-Hartman +--- + arch/arm/include/asm/uaccess.h | 14 ++------------ + 1 file changed, 2 insertions(+), 12 deletions(-) + +--- a/arch/arm/include/asm/uaccess.h ++++ b/arch/arm/include/asm/uaccess.h +@@ -109,16 +109,6 @@ extern int __get_user_64t_1(void *); + extern int __get_user_64t_2(void *); + extern int __get_user_64t_4(void *); + +-#define __GUP_CLOBBER_1 "lr", "cc" +-#ifdef CONFIG_CPU_USE_DOMAINS +-#define __GUP_CLOBBER_2 "ip", "lr", "cc" +-#else +-#define __GUP_CLOBBER_2 "lr", "cc" +-#endif +-#define __GUP_CLOBBER_4 "lr", "cc" +-#define __GUP_CLOBBER_32t_8 "lr", "cc" +-#define __GUP_CLOBBER_8 "lr", "cc" +- + #define __get_user_x(__r2, __p, __e, __l, __s) \ + __asm__ __volatile__ ( \ + __asmeq("%0", "r0") __asmeq("%1", "r2") \ +@@ -126,7 +116,7 @@ extern int __get_user_64t_4(void *); + "bl __get_user_" #__s \ + : "=&r" (__e), "=r" (__r2) \ + : "0" (__p), "r" (__l) \ +- : __GUP_CLOBBER_##__s) ++ : "ip", "lr", "cc") + + /* narrowing a double-word get into a single 32bit word register: */ + #ifdef __ARMEB__ +@@ -148,7 +138,7 @@ extern int __get_user_64t_4(void *); + "bl __get_user_64t_" #__s \ + : "=&r" (__e), "=r" (__r2) \ + : "0" (__p), "r" (__l) \ +- : __GUP_CLOBBER_##__s) ++ : "ip", "lr", "cc") + #else + #define __get_user_x_64t __get_user_x + #endif diff --git a/queue-6.6/series b/queue-6.6/series index 892c754d840..39c87b77701 100644 --- a/queue-6.6/series +++ b/queue-6.6/series @@ -118,3 +118,4 @@ selftests-bpf-extend-tcx-tests-to-cover-late-tcx_ent.patch spi-mux-set-ctlr-bits_per_word_mask.patch alsa-hda-use-imply-for-suggesting-config_serial_mult.patch cifs-fix-noisy-message-on-copy_file_range.patch +arm-9324-1-fix-get_user-broken-with-veneer.patch -- 2.47.3