From: Greg Kroah-Hartman Date: Mon, 7 Nov 2022 09:17:34 +0000 (+0100) Subject: 5.10-stable patches X-Git-Tag: v4.9.333~55 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=79b125b7fc4f207b77cab1e2bee09863e549d61a;p=thirdparty%2Fkernel%2Fstable-queue.git 5.10-stable patches added patches: binder-fix-uaf-of-alloc-vma-in-race-with-munmap.patch memcg-enable-accounting-of-ipc-resources.patch mtd-rawnand-gpmi-set-wait_for_ready-timeout-based-on-program-erase-times.patch --- diff --git a/queue-5.10/binder-fix-uaf-of-alloc-vma-in-race-with-munmap.patch b/queue-5.10/binder-fix-uaf-of-alloc-vma-in-race-with-munmap.patch new file mode 100644 index 00000000000..afc4e5950ce --- /dev/null +++ b/queue-5.10/binder-fix-uaf-of-alloc-vma-in-race-with-munmap.patch @@ -0,0 +1,140 @@ +From cmllamas@google.com Mon Nov 7 10:16:10 2022 +From: Carlos Llamas +Date: Fri, 4 Nov 2022 17:54:49 +0000 +Subject: binder: fix UAF of alloc->vma in race with munmap() +To: "Greg Kroah-Hartman" , "Arve Hjønnevåg" , "Todd Kjos" , "Martijn Coenen" , "Joel Fernandes" , "Christian Brauner" , "Carlos Llamas" , "Suren Baghdasaryan" +Cc: kernel-team@android.com, Jann Horn , stable@vger.kernel.org, Minchan Kim , Yang Shi , Liam Howlett +Message-ID: <20221104175450.306810-1-cmllamas@google.com> + +From: Carlos Llamas + +In commit 720c24192404 ("ANDROID: binder: change down_write to +down_read") binder assumed the mmap read lock is sufficient to protect +alloc->vma inside binder_update_page_range(). This used to be accurate +until commit dd2283f2605e ("mm: mmap: zap pages with read mmap_sem in +munmap"), which now downgrades the mmap_lock after detaching the vma +from the rbtree in munmap(). Then it proceeds to teardown and free the +vma with only the read lock held. + +This means that accesses to alloc->vma in binder_update_page_range() now +will race with vm_area_free() in munmap() and can cause a UAF as shown +in the following KASAN trace: + + ================================================================== + BUG: KASAN: use-after-free in vm_insert_page+0x7c/0x1f0 + Read of size 8 at addr ffff16204ad00600 by task server/558 + + CPU: 3 PID: 558 Comm: server Not tainted 5.10.150-00001-gdc8dcf942daa #1 + Hardware name: linux,dummy-virt (DT) + Call trace: + dump_backtrace+0x0/0x2a0 + show_stack+0x18/0x2c + dump_stack+0xf8/0x164 + print_address_description.constprop.0+0x9c/0x538 + kasan_report+0x120/0x200 + __asan_load8+0xa0/0xc4 + vm_insert_page+0x7c/0x1f0 + binder_update_page_range+0x278/0x50c + binder_alloc_new_buf+0x3f0/0xba0 + binder_transaction+0x64c/0x3040 + binder_thread_write+0x924/0x2020 + binder_ioctl+0x1610/0x2e5c + __arm64_sys_ioctl+0xd4/0x120 + el0_svc_common.constprop.0+0xac/0x270 + do_el0_svc+0x38/0xa0 + el0_svc+0x1c/0x2c + el0_sync_handler+0xe8/0x114 + el0_sync+0x180/0x1c0 + + Allocated by task 559: + kasan_save_stack+0x38/0x6c + __kasan_kmalloc.constprop.0+0xe4/0xf0 + kasan_slab_alloc+0x18/0x2c + kmem_cache_alloc+0x1b0/0x2d0 + vm_area_alloc+0x28/0x94 + mmap_region+0x378/0x920 + do_mmap+0x3f0/0x600 + vm_mmap_pgoff+0x150/0x17c + ksys_mmap_pgoff+0x284/0x2dc + __arm64_sys_mmap+0x84/0xa4 + el0_svc_common.constprop.0+0xac/0x270 + do_el0_svc+0x38/0xa0 + el0_svc+0x1c/0x2c + el0_sync_handler+0xe8/0x114 + el0_sync+0x180/0x1c0 + + Freed by task 560: + kasan_save_stack+0x38/0x6c + kasan_set_track+0x28/0x40 + kasan_set_free_info+0x24/0x4c + __kasan_slab_free+0x100/0x164 + kasan_slab_free+0x14/0x20 + kmem_cache_free+0xc4/0x34c + vm_area_free+0x1c/0x2c + remove_vma+0x7c/0x94 + __do_munmap+0x358/0x710 + __vm_munmap+0xbc/0x130 + __arm64_sys_munmap+0x4c/0x64 + el0_svc_common.constprop.0+0xac/0x270 + do_el0_svc+0x38/0xa0 + el0_svc+0x1c/0x2c + el0_sync_handler+0xe8/0x114 + el0_sync+0x180/0x1c0 + + [...] + ================================================================== + +To prevent the race above, revert back to taking the mmap write lock +inside binder_update_page_range(). One might expect an increase of mmap +lock contention. However, binder already serializes these calls via top +level alloc->mutex. Also, there was no performance impact shown when +running the binder benchmark tests. + +Note this patch is specific to stable branches 5.4 and 5.10. Since in +newer kernel releases binder no longer caches a pointer to the vma. +Instead, it has been refactored to use vma_lookup() which avoids the +issue described here. This switch was introduced in commit a43cfc87caaf +("android: binder: stop saving a pointer to the VMA"). + +Fixes: dd2283f2605e ("mm: mmap: zap pages with read mmap_sem in munmap") +Reported-by: Jann Horn +Cc: # 5.10.x +Cc: Minchan Kim +Cc: Yang Shi +Cc: Liam Howlett +Signed-off-by: Carlos Llamas +Acked-by: Todd Kjos +Signed-off-by: Greg Kroah-Hartman +--- + drivers/android/binder_alloc.c | 6 +++--- + 1 file changed, 3 insertions(+), 3 deletions(-) + +--- a/drivers/android/binder_alloc.c ++++ b/drivers/android/binder_alloc.c +@@ -212,7 +212,7 @@ static int binder_update_page_range(stru + mm = alloc->vma_vm_mm; + + if (mm) { +- mmap_read_lock(mm); ++ mmap_write_lock(mm); + vma = alloc->vma; + } + +@@ -270,7 +270,7 @@ static int binder_update_page_range(stru + trace_binder_alloc_page_end(alloc, index); + } + if (mm) { +- mmap_read_unlock(mm); ++ mmap_write_unlock(mm); + mmput(mm); + } + return 0; +@@ -303,7 +303,7 @@ err_page_ptr_cleared: + } + err_no_vma: + if (mm) { +- mmap_read_unlock(mm); ++ mmap_write_unlock(mm); + mmput(mm); + } + return vma ? -ENOMEM : -ESRCH; diff --git a/queue-5.10/memcg-enable-accounting-of-ipc-resources.patch b/queue-5.10/memcg-enable-accounting-of-ipc-resources.patch new file mode 100644 index 00000000000..826e3e90252 --- /dev/null +++ b/queue-5.10/memcg-enable-accounting-of-ipc-resources.patch @@ -0,0 +1,118 @@ +From 18319498fdd4cdf8c1c2c48cd432863b1f915d6f Mon Sep 17 00:00:00 2001 +From: Vasily Averin +Date: Thu, 2 Sep 2021 14:55:31 -0700 +Subject: memcg: enable accounting of ipc resources + +From: Vasily Averin + +commit 18319498fdd4cdf8c1c2c48cd432863b1f915d6f upstream. + +When user creates IPC objects it forces kernel to allocate memory for +these long-living objects. + +It makes sense to account them to restrict the host's memory consumption +from inside the memcg-limited container. + +This patch enables accounting for IPC shared memory segments, messages +semaphores and semaphore's undo lists. + +Link: https://lkml.kernel.org/r/d6507b06-4df6-78f8-6c54-3ae86e3b5339@virtuozzo.com +Signed-off-by: Vasily Averin +Reviewed-by: Shakeel Butt +Cc: Alexander Viro +Cc: Alexey Dobriyan +Cc: Andrei Vagin +Cc: Borislav Petkov +Cc: Borislav Petkov +Cc: Christian Brauner +Cc: Dmitry Safonov <0x7f454c46@gmail.com> +Cc: "Eric W. Biederman" +Cc: Greg Kroah-Hartman +Cc: "H. Peter Anvin" +Cc: Ingo Molnar +Cc: "J. Bruce Fields" +Cc: Jeff Layton +Cc: Jens Axboe +Cc: Jiri Slaby +Cc: Johannes Weiner +Cc: Kirill Tkhai +Cc: Michal Hocko +Cc: Oleg Nesterov +Cc: Roman Gushchin +Cc: Serge Hallyn +Cc: Tejun Heo +Cc: Thomas Gleixner +Cc: Vladimir Davydov +Cc: Yutian Yang +Cc: Zefan Li +Signed-off-by: Andrew Morton +Signed-off-by: Linus Torvalds +Signed-off-by: Luiz Capitulino +Signed-off-by: Greg Kroah-Hartman +--- + ipc/msg.c | 2 +- + ipc/sem.c | 9 +++++---- + ipc/shm.c | 2 +- + 3 files changed, 7 insertions(+), 6 deletions(-) + +--- a/ipc/msg.c ++++ b/ipc/msg.c +@@ -147,7 +147,7 @@ static int newque(struct ipc_namespace * + key_t key = params->key; + int msgflg = params->flg; + +- msq = kvmalloc(sizeof(*msq), GFP_KERNEL); ++ msq = kvmalloc(sizeof(*msq), GFP_KERNEL_ACCOUNT); + if (unlikely(!msq)) + return -ENOMEM; + +--- a/ipc/sem.c ++++ b/ipc/sem.c +@@ -511,7 +511,7 @@ static struct sem_array *sem_alloc(size_ + if (nsems > (INT_MAX - sizeof(*sma)) / sizeof(sma->sems[0])) + return NULL; + +- sma = kvzalloc(struct_size(sma, sems, nsems), GFP_KERNEL); ++ sma = kvzalloc(struct_size(sma, sems, nsems), GFP_KERNEL_ACCOUNT); + if (unlikely(!sma)) + return NULL; + +@@ -1852,7 +1852,7 @@ static inline int get_undo_list(struct s + + undo_list = current->sysvsem.undo_list; + if (!undo_list) { +- undo_list = kzalloc(sizeof(*undo_list), GFP_KERNEL); ++ undo_list = kzalloc(sizeof(*undo_list), GFP_KERNEL_ACCOUNT); + if (undo_list == NULL) + return -ENOMEM; + spin_lock_init(&undo_list->lock); +@@ -1937,7 +1937,7 @@ static struct sem_undo *find_alloc_undo( + rcu_read_unlock(); + + /* step 2: allocate new undo structure */ +- new = kzalloc(sizeof(struct sem_undo) + sizeof(short)*nsems, GFP_KERNEL); ++ new = kzalloc(sizeof(struct sem_undo) + sizeof(short)*nsems, GFP_KERNEL_ACCOUNT); + if (!new) { + ipc_rcu_putref(&sma->sem_perm, sem_rcu_free); + return ERR_PTR(-ENOMEM); +@@ -2001,7 +2001,8 @@ static long do_semtimedop(int semid, str + if (nsops > ns->sc_semopm) + return -E2BIG; + if (nsops > SEMOPM_FAST) { +- sops = kvmalloc_array(nsops, sizeof(*sops), GFP_KERNEL); ++ sops = kvmalloc_array(nsops, sizeof(*sops), ++ GFP_KERNEL_ACCOUNT); + if (sops == NULL) + return -ENOMEM; + } +--- a/ipc/shm.c ++++ b/ipc/shm.c +@@ -711,7 +711,7 @@ static int newseg(struct ipc_namespace * + ns->shm_tot + numpages > ns->shm_ctlall) + return -ENOSPC; + +- shp = kvmalloc(sizeof(*shp), GFP_KERNEL); ++ shp = kvmalloc(sizeof(*shp), GFP_KERNEL_ACCOUNT); + if (unlikely(!shp)) + return -ENOMEM; + diff --git a/queue-5.10/mtd-rawnand-gpmi-set-wait_for_ready-timeout-based-on-program-erase-times.patch b/queue-5.10/mtd-rawnand-gpmi-set-wait_for_ready-timeout-based-on-program-erase-times.patch new file mode 100644 index 00000000000..7d0fdde1f93 --- /dev/null +++ b/queue-5.10/mtd-rawnand-gpmi-set-wait_for_ready-timeout-based-on-program-erase-times.patch @@ -0,0 +1,71 @@ +From 0fddf9ad06fd9f439f137139861556671673e31c Mon Sep 17 00:00:00 2001 +From: Sascha Hauer +Date: Fri, 1 Jul 2022 13:03:41 +0200 +Subject: mtd: rawnand: gpmi: Set WAIT_FOR_READY timeout based on program/erase times +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +From: Sascha Hauer + +commit 0fddf9ad06fd9f439f137139861556671673e31c upstream. + +06781a5026350 Fixes the calculation of the DEVICE_BUSY_TIMEOUT register +value from busy_timeout_cycles. busy_timeout_cycles is calculated wrong +though: It is calculated based on the maximum page read time, but the +timeout is also used for page write and block erase operations which +require orders of magnitude bigger timeouts. + +Fix this by calculating busy_timeout_cycles from the maximum of +tBERS_max and tPROG_max. + +This is for now the easiest and most obvious way to fix the driver. +There's room for improvements though: The NAND_OP_WAITRDY_INSTR tells us +the desired timeout for the current operation, so we could program the +timeout dynamically for each operation instead of setting a fixed +timeout. Also we could wire up the interrupt handler to actually detect +and forward timeouts occurred when waiting for the chip being ready. + +As a sidenote I verified that the change in 06781a5026350 is really +correct. I wired up the interrupt handler in my tree and measured the +time between starting the operation and the timeout interrupt handler +coming in. The time increases 41us with each step in the timeout +register which corresponds to 4096 clock cycles with the 99MHz clock +that I have. + +Fixes: 06781a5026350 ("mtd: rawnand: gpmi: Fix setting busy timeout setting") +Fixes: b1206122069aa ("mtd: rawniand: gpmi: use core timings instead of an empirical derivation") +Cc: stable@vger.kernel.org +Signed-off-by: Sascha Hauer +Acked-by: Han Xu +Tested-by: Tomasz Moń +Signed-off-by: Richard Weinberger +Signed-off-by: Tim Harvey +Signed-off-by: Greg Kroah-Hartman +--- + drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c | 6 ++++-- + 1 file changed, 4 insertions(+), 2 deletions(-) + +--- a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c ++++ b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c +@@ -653,8 +653,9 @@ static void gpmi_nfc_compute_timings(str + unsigned int tRP_ps; + bool use_half_period; + int sample_delay_ps, sample_delay_factor; +- u16 busy_timeout_cycles; ++ unsigned int busy_timeout_cycles; + u8 wrn_dly_sel; ++ u64 busy_timeout_ps; + + if (sdr->tRC_min >= 30000) { + /* ONFI non-EDO modes [0-3] */ +@@ -678,7 +679,8 @@ static void gpmi_nfc_compute_timings(str + addr_setup_cycles = TO_CYCLES(sdr->tALS_min, period_ps); + data_setup_cycles = TO_CYCLES(sdr->tDS_min, period_ps); + data_hold_cycles = TO_CYCLES(sdr->tDH_min, period_ps); +- busy_timeout_cycles = TO_CYCLES(sdr->tWB_max + sdr->tR_max, period_ps); ++ busy_timeout_ps = max(sdr->tBERS_max, sdr->tPROG_max); ++ busy_timeout_cycles = TO_CYCLES(busy_timeout_ps, period_ps); + + hw->timing0 = BF_GPMI_TIMING0_ADDRESS_SETUP(addr_setup_cycles) | + BF_GPMI_TIMING0_DATA_HOLD(data_hold_cycles) | diff --git a/queue-5.10/series b/queue-5.10/series index 30c9d83cc79..428f37222c6 100644 --- a/queue-5.10/series +++ b/queue-5.10/series @@ -82,3 +82,6 @@ fscrypt-simplify-master-key-locking.patch fscrypt-stop-using-keyrings-subsystem-for-fscrypt_master_key.patch fscrypt-fix-keyring-memory-leak-on-mount-failure.patch tcp-udp-fix-memory-leak-in-ipv6_renew_options.patch +mtd-rawnand-gpmi-set-wait_for_ready-timeout-based-on-program-erase-times.patch +memcg-enable-accounting-of-ipc-resources.patch +binder-fix-uaf-of-alloc-vma-in-race-with-munmap.patch