From: Greg Kroah-Hartman Date: Mon, 7 Nov 2022 09:17:22 +0000 (+0100) Subject: 5.4-stable patches X-Git-Tag: v4.9.333~56 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=062fa86bfc37c76c3a824076f6166d5cef264317;p=thirdparty%2Fkernel%2Fstable-queue.git 5.4-stable patches added patches: binder-fix-uaf-of-alloc-vma-in-race-with-munmap.patch memcg-enable-accounting-of-ipc-resources.patch --- diff --git a/queue-5.4/binder-fix-uaf-of-alloc-vma-in-race-with-munmap.patch b/queue-5.4/binder-fix-uaf-of-alloc-vma-in-race-with-munmap.patch new file mode 100644 index 00000000000..f59a7917810 --- /dev/null +++ b/queue-5.4/binder-fix-uaf-of-alloc-vma-in-race-with-munmap.patch @@ -0,0 +1,139 @@ +From cmllamas@google.com Mon Nov 7 10:16:49 2022 +From: Carlos Llamas +Date: Fri, 4 Nov 2022 17:55:33 +0000 +Subject: binder: fix UAF of alloc->vma in race with munmap() +To: "Greg Kroah-Hartman" , "Arve Hjønnevåg" , "Todd Kjos" , "Martijn Coenen" , "Joel Fernandes" , "Christian Brauner" , "Carlos Llamas" , "Suren Baghdasaryan" +Cc: kernel-team@android.com, Jann Horn , stable@vger.kernel.org, Minchan Kim , Yang Shi , Liam Howlett +Message-ID: <20221104175534.307317-1-cmllamas@google.com> + +From: Carlos Llamas + +In commit 720c24192404 ("ANDROID: binder: change down_write to +down_read") binder assumed the mmap read lock is sufficient to protect +alloc->vma inside binder_update_page_range(). This used to be accurate +until commit dd2283f2605e ("mm: mmap: zap pages with read mmap_sem in +munmap"), which now downgrades the mmap_lock after detaching the vma +from the rbtree in munmap(). Then it proceeds to teardown and free the +vma with only the read lock held. + +This means that accesses to alloc->vma in binder_update_page_range() now +will race with vm_area_free() in munmap() and can cause a UAF as shown +in the following KASAN trace: + + ================================================================== + BUG: KASAN: use-after-free in vm_insert_page+0x7c/0x1f0 + Read of size 8 at addr ffff16204ad00600 by task server/558 + + CPU: 3 PID: 558 Comm: server Not tainted 5.10.150-00001-gdc8dcf942daa #1 + Hardware name: linux,dummy-virt (DT) + Call trace: + dump_backtrace+0x0/0x2a0 + show_stack+0x18/0x2c + dump_stack+0xf8/0x164 + print_address_description.constprop.0+0x9c/0x538 + kasan_report+0x120/0x200 + __asan_load8+0xa0/0xc4 + vm_insert_page+0x7c/0x1f0 + binder_update_page_range+0x278/0x50c + binder_alloc_new_buf+0x3f0/0xba0 + binder_transaction+0x64c/0x3040 + binder_thread_write+0x924/0x2020 + binder_ioctl+0x1610/0x2e5c + __arm64_sys_ioctl+0xd4/0x120 + el0_svc_common.constprop.0+0xac/0x270 + do_el0_svc+0x38/0xa0 + el0_svc+0x1c/0x2c + el0_sync_handler+0xe8/0x114 + el0_sync+0x180/0x1c0 + + Allocated by task 559: + kasan_save_stack+0x38/0x6c + __kasan_kmalloc.constprop.0+0xe4/0xf0 + kasan_slab_alloc+0x18/0x2c + kmem_cache_alloc+0x1b0/0x2d0 + vm_area_alloc+0x28/0x94 + mmap_region+0x378/0x920 + do_mmap+0x3f0/0x600 + vm_mmap_pgoff+0x150/0x17c + ksys_mmap_pgoff+0x284/0x2dc + __arm64_sys_mmap+0x84/0xa4 + el0_svc_common.constprop.0+0xac/0x270 + do_el0_svc+0x38/0xa0 + el0_svc+0x1c/0x2c + el0_sync_handler+0xe8/0x114 + el0_sync+0x180/0x1c0 + + Freed by task 560: + kasan_save_stack+0x38/0x6c + kasan_set_track+0x28/0x40 + kasan_set_free_info+0x24/0x4c + __kasan_slab_free+0x100/0x164 + kasan_slab_free+0x14/0x20 + kmem_cache_free+0xc4/0x34c + vm_area_free+0x1c/0x2c + remove_vma+0x7c/0x94 + __do_munmap+0x358/0x710 + __vm_munmap+0xbc/0x130 + __arm64_sys_munmap+0x4c/0x64 + el0_svc_common.constprop.0+0xac/0x270 + do_el0_svc+0x38/0xa0 + el0_svc+0x1c/0x2c + el0_sync_handler+0xe8/0x114 + el0_sync+0x180/0x1c0 + + [...] + ================================================================== + +To prevent the race above, revert back to taking the mmap write lock +inside binder_update_page_range(). One might expect an increase of mmap +lock contention. However, binder already serializes these calls via top +level alloc->mutex. Also, there was no performance impact shown when +running the binder benchmark tests. + +Note this patch is specific to stable branches 5.4 and 5.10. Since in +newer kernel releases binder no longer caches a pointer to the vma. +Instead, it has been refactored to use vma_lookup() which avoids the +issue described here. This switch was introduced in commit a43cfc87caaf +("android: binder: stop saving a pointer to the VMA"). + +Fixes: dd2283f2605e ("mm: mmap: zap pages with read mmap_sem in munmap") +Reported-by: Jann Horn +Cc: # 5.4.x +Cc: Minchan Kim +Cc: Yang Shi +Cc: Liam Howlett +Signed-off-by: Carlos Llamas +Signed-off-by: Greg Kroah-Hartman +--- + drivers/android/binder_alloc.c | 6 +++--- + 1 file changed, 3 insertions(+), 3 deletions(-) + +--- a/drivers/android/binder_alloc.c ++++ b/drivers/android/binder_alloc.c +@@ -212,7 +212,7 @@ static int binder_update_page_range(stru + mm = alloc->vma_vm_mm; + + if (mm) { +- down_read(&mm->mmap_sem); ++ down_write(&mm->mmap_sem); + vma = alloc->vma; + } + +@@ -271,7 +271,7 @@ static int binder_update_page_range(stru + /* vm_insert_page does not seem to increment the refcount */ + } + if (mm) { +- up_read(&mm->mmap_sem); ++ up_write(&mm->mmap_sem); + mmput(mm); + } + return 0; +@@ -304,7 +304,7 @@ err_page_ptr_cleared: + } + err_no_vma: + if (mm) { +- up_read(&mm->mmap_sem); ++ up_write(&mm->mmap_sem); + mmput(mm); + } + return vma ? -ENOMEM : -ESRCH; diff --git a/queue-5.4/memcg-enable-accounting-of-ipc-resources.patch b/queue-5.4/memcg-enable-accounting-of-ipc-resources.patch new file mode 100644 index 00000000000..af323491e3e --- /dev/null +++ b/queue-5.4/memcg-enable-accounting-of-ipc-resources.patch @@ -0,0 +1,118 @@ +From 18319498fdd4cdf8c1c2c48cd432863b1f915d6f Mon Sep 17 00:00:00 2001 +From: Vasily Averin +Date: Thu, 2 Sep 2021 14:55:31 -0700 +Subject: memcg: enable accounting of ipc resources + +From: Vasily Averin + +commit 18319498fdd4cdf8c1c2c48cd432863b1f915d6f upstream. + +When user creates IPC objects it forces kernel to allocate memory for +these long-living objects. + +It makes sense to account them to restrict the host's memory consumption +from inside the memcg-limited container. + +This patch enables accounting for IPC shared memory segments, messages +semaphores and semaphore's undo lists. + +Link: https://lkml.kernel.org/r/d6507b06-4df6-78f8-6c54-3ae86e3b5339@virtuozzo.com +Signed-off-by: Vasily Averin +Reviewed-by: Shakeel Butt +Cc: Alexander Viro +Cc: Alexey Dobriyan +Cc: Andrei Vagin +Cc: Borislav Petkov +Cc: Borislav Petkov +Cc: Christian Brauner +Cc: Dmitry Safonov <0x7f454c46@gmail.com> +Cc: "Eric W. Biederman" +Cc: Greg Kroah-Hartman +Cc: "H. Peter Anvin" +Cc: Ingo Molnar +Cc: "J. Bruce Fields" +Cc: Jeff Layton +Cc: Jens Axboe +Cc: Jiri Slaby +Cc: Johannes Weiner +Cc: Kirill Tkhai +Cc: Michal Hocko +Cc: Oleg Nesterov +Cc: Roman Gushchin +Cc: Serge Hallyn +Cc: Tejun Heo +Cc: Thomas Gleixner +Cc: Vladimir Davydov +Cc: Yutian Yang +Cc: Zefan Li +Signed-off-by: Andrew Morton +Signed-off-by: Linus Torvalds +Signed-off-by: Luiz Capitulino +Signed-off-by: Greg Kroah-Hartman +--- + ipc/msg.c | 2 +- + ipc/sem.c | 9 +++++---- + ipc/shm.c | 2 +- + 3 files changed, 7 insertions(+), 6 deletions(-) + +--- a/ipc/msg.c ++++ b/ipc/msg.c +@@ -137,7 +137,7 @@ static int newque(struct ipc_namespace * + key_t key = params->key; + int msgflg = params->flg; + +- msq = kvmalloc(sizeof(*msq), GFP_KERNEL); ++ msq = kvmalloc(sizeof(*msq), GFP_KERNEL_ACCOUNT); + if (unlikely(!msq)) + return -ENOMEM; + +--- a/ipc/sem.c ++++ b/ipc/sem.c +@@ -492,7 +492,7 @@ static struct sem_array *sem_alloc(size_ + if (nsems > (INT_MAX - sizeof(*sma)) / sizeof(sma->sems[0])) + return NULL; + +- sma = kvzalloc(struct_size(sma, sems, nsems), GFP_KERNEL); ++ sma = kvzalloc(struct_size(sma, sems, nsems), GFP_KERNEL_ACCOUNT); + if (unlikely(!sma)) + return NULL; + +@@ -1835,7 +1835,7 @@ static inline int get_undo_list(struct s + + undo_list = current->sysvsem.undo_list; + if (!undo_list) { +- undo_list = kzalloc(sizeof(*undo_list), GFP_KERNEL); ++ undo_list = kzalloc(sizeof(*undo_list), GFP_KERNEL_ACCOUNT); + if (undo_list == NULL) + return -ENOMEM; + spin_lock_init(&undo_list->lock); +@@ -1920,7 +1920,7 @@ static struct sem_undo *find_alloc_undo( + rcu_read_unlock(); + + /* step 2: allocate new undo structure */ +- new = kzalloc(sizeof(struct sem_undo) + sizeof(short)*nsems, GFP_KERNEL); ++ new = kzalloc(sizeof(struct sem_undo) + sizeof(short)*nsems, GFP_KERNEL_ACCOUNT); + if (!new) { + ipc_rcu_putref(&sma->sem_perm, sem_rcu_free); + return ERR_PTR(-ENOMEM); +@@ -1984,7 +1984,8 @@ static long do_semtimedop(int semid, str + if (nsops > ns->sc_semopm) + return -E2BIG; + if (nsops > SEMOPM_FAST) { +- sops = kvmalloc_array(nsops, sizeof(*sops), GFP_KERNEL); ++ sops = kvmalloc_array(nsops, sizeof(*sops), ++ GFP_KERNEL_ACCOUNT); + if (sops == NULL) + return -ENOMEM; + } +--- a/ipc/shm.c ++++ b/ipc/shm.c +@@ -711,7 +711,7 @@ static int newseg(struct ipc_namespace * + ns->shm_tot + numpages > ns->shm_ctlall) + return -ENOSPC; + +- shp = kvmalloc(sizeof(*shp), GFP_KERNEL); ++ shp = kvmalloc(sizeof(*shp), GFP_KERNEL_ACCOUNT); + if (unlikely(!shp)) + return -ENOMEM; + diff --git a/queue-5.4/series b/queue-5.4/series index 2b1a102bf74..e2c0191be66 100644 --- a/queue-5.4/series +++ b/queue-5.4/series @@ -43,3 +43,5 @@ xfs-add-the-missed-xfs_perag_put-for-xfs_ifree_cluster.patch bluetooth-l2cap-fix-attempting-to-access-uninitialized-memory.patch block-bfq-protect-bfqd-queued-by-bfqd-lock.patch tcp-udp-fix-memory-leak-in-ipv6_renew_options.patch +memcg-enable-accounting-of-ipc-resources.patch +binder-fix-uaf-of-alloc-vma-in-race-with-munmap.patch