--- /dev/null
+From cmllamas@google.com Mon Nov 7 10:16:49 2022
+From: Carlos Llamas <cmllamas@google.com>
+Date: Fri, 4 Nov 2022 17:55:33 +0000
+Subject: binder: fix UAF of alloc->vma in race with munmap()
+To: "Greg Kroah-Hartman" <gregkh@linuxfoundation.org>, "Arve Hjønnevåg" <arve@android.com>, "Todd Kjos" <tkjos@android.com>, "Martijn Coenen" <maco@android.com>, "Joel Fernandes" <joel@joelfernandes.org>, "Christian Brauner" <brauner@kernel.org>, "Carlos Llamas" <cmllamas@google.com>, "Suren Baghdasaryan" <surenb@google.com>
+Cc: kernel-team@android.com, Jann Horn <jannh@google.com>, stable@vger.kernel.org, Minchan Kim <minchan@kernel.org>, Yang Shi <yang.shi@linux.alibaba.com>, Liam Howlett <liam.howlett@oracle.com>
+Message-ID: <20221104175534.307317-1-cmllamas@google.com>
+
+From: Carlos Llamas <cmllamas@google.com>
+
+In commit 720c24192404 ("ANDROID: binder: change down_write to
+down_read") binder assumed the mmap read lock is sufficient to protect
+alloc->vma inside binder_update_page_range(). This used to be accurate
+until commit dd2283f2605e ("mm: mmap: zap pages with read mmap_sem in
+munmap"), which now downgrades the mmap_lock after detaching the vma
+from the rbtree in munmap(). Then it proceeds to teardown and free the
+vma with only the read lock held.
+
+This means that accesses to alloc->vma in binder_update_page_range() now
+will race with vm_area_free() in munmap() and can cause a UAF as shown
+in the following KASAN trace:
+
+ ==================================================================
+ BUG: KASAN: use-after-free in vm_insert_page+0x7c/0x1f0
+ Read of size 8 at addr ffff16204ad00600 by task server/558
+
+ CPU: 3 PID: 558 Comm: server Not tainted 5.10.150-00001-gdc8dcf942daa #1
+ Hardware name: linux,dummy-virt (DT)
+ Call trace:
+ dump_backtrace+0x0/0x2a0
+ show_stack+0x18/0x2c
+ dump_stack+0xf8/0x164
+ print_address_description.constprop.0+0x9c/0x538
+ kasan_report+0x120/0x200
+ __asan_load8+0xa0/0xc4
+ vm_insert_page+0x7c/0x1f0
+ binder_update_page_range+0x278/0x50c
+ binder_alloc_new_buf+0x3f0/0xba0
+ binder_transaction+0x64c/0x3040
+ binder_thread_write+0x924/0x2020
+ binder_ioctl+0x1610/0x2e5c
+ __arm64_sys_ioctl+0xd4/0x120
+ el0_svc_common.constprop.0+0xac/0x270
+ do_el0_svc+0x38/0xa0
+ el0_svc+0x1c/0x2c
+ el0_sync_handler+0xe8/0x114
+ el0_sync+0x180/0x1c0
+
+ Allocated by task 559:
+ kasan_save_stack+0x38/0x6c
+ __kasan_kmalloc.constprop.0+0xe4/0xf0
+ kasan_slab_alloc+0x18/0x2c
+ kmem_cache_alloc+0x1b0/0x2d0
+ vm_area_alloc+0x28/0x94
+ mmap_region+0x378/0x920
+ do_mmap+0x3f0/0x600
+ vm_mmap_pgoff+0x150/0x17c
+ ksys_mmap_pgoff+0x284/0x2dc
+ __arm64_sys_mmap+0x84/0xa4
+ el0_svc_common.constprop.0+0xac/0x270
+ do_el0_svc+0x38/0xa0
+ el0_svc+0x1c/0x2c
+ el0_sync_handler+0xe8/0x114
+ el0_sync+0x180/0x1c0
+
+ Freed by task 560:
+ kasan_save_stack+0x38/0x6c
+ kasan_set_track+0x28/0x40
+ kasan_set_free_info+0x24/0x4c
+ __kasan_slab_free+0x100/0x164
+ kasan_slab_free+0x14/0x20
+ kmem_cache_free+0xc4/0x34c
+ vm_area_free+0x1c/0x2c
+ remove_vma+0x7c/0x94
+ __do_munmap+0x358/0x710
+ __vm_munmap+0xbc/0x130
+ __arm64_sys_munmap+0x4c/0x64
+ el0_svc_common.constprop.0+0xac/0x270
+ do_el0_svc+0x38/0xa0
+ el0_svc+0x1c/0x2c
+ el0_sync_handler+0xe8/0x114
+ el0_sync+0x180/0x1c0
+
+ [...]
+ ==================================================================
+
+To prevent the race above, revert back to taking the mmap write lock
+inside binder_update_page_range(). One might expect an increase of mmap
+lock contention. However, binder already serializes these calls via top
+level alloc->mutex. Also, there was no performance impact shown when
+running the binder benchmark tests.
+
+Note this patch is specific to stable branches 5.4 and 5.10. Since in
+newer kernel releases binder no longer caches a pointer to the vma.
+Instead, it has been refactored to use vma_lookup() which avoids the
+issue described here. This switch was introduced in commit a43cfc87caaf
+("android: binder: stop saving a pointer to the VMA").
+
+Fixes: dd2283f2605e ("mm: mmap: zap pages with read mmap_sem in munmap")
+Reported-by: Jann Horn <jannh@google.com>
+Cc: <stable@vger.kernel.org> # 5.4.x
+Cc: Minchan Kim <minchan@kernel.org>
+Cc: Yang Shi <yang.shi@linux.alibaba.com>
+Cc: Liam Howlett <liam.howlett@oracle.com>
+Signed-off-by: Carlos Llamas <cmllamas@google.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/android/binder_alloc.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/drivers/android/binder_alloc.c
++++ b/drivers/android/binder_alloc.c
+@@ -212,7 +212,7 @@ static int binder_update_page_range(stru
+ mm = alloc->vma_vm_mm;
+
+ if (mm) {
+- down_read(&mm->mmap_sem);
++ down_write(&mm->mmap_sem);
+ vma = alloc->vma;
+ }
+
+@@ -271,7 +271,7 @@ static int binder_update_page_range(stru
+ /* vm_insert_page does not seem to increment the refcount */
+ }
+ if (mm) {
+- up_read(&mm->mmap_sem);
++ up_write(&mm->mmap_sem);
+ mmput(mm);
+ }
+ return 0;
+@@ -304,7 +304,7 @@ err_page_ptr_cleared:
+ }
+ err_no_vma:
+ if (mm) {
+- up_read(&mm->mmap_sem);
++ up_write(&mm->mmap_sem);
+ mmput(mm);
+ }
+ return vma ? -ENOMEM : -ESRCH;
--- /dev/null
+From 18319498fdd4cdf8c1c2c48cd432863b1f915d6f Mon Sep 17 00:00:00 2001
+From: Vasily Averin <vvs@virtuozzo.com>
+Date: Thu, 2 Sep 2021 14:55:31 -0700
+Subject: memcg: enable accounting of ipc resources
+
+From: Vasily Averin <vvs@virtuozzo.com>
+
+commit 18319498fdd4cdf8c1c2c48cd432863b1f915d6f upstream.
+
+When user creates IPC objects it forces kernel to allocate memory for
+these long-living objects.
+
+It makes sense to account them to restrict the host's memory consumption
+from inside the memcg-limited container.
+
+This patch enables accounting for IPC shared memory segments, messages
+semaphores and semaphore's undo lists.
+
+Link: https://lkml.kernel.org/r/d6507b06-4df6-78f8-6c54-3ae86e3b5339@virtuozzo.com
+Signed-off-by: Vasily Averin <vvs@virtuozzo.com>
+Reviewed-by: Shakeel Butt <shakeelb@google.com>
+Cc: Alexander Viro <viro@zeniv.linux.org.uk>
+Cc: Alexey Dobriyan <adobriyan@gmail.com>
+Cc: Andrei Vagin <avagin@gmail.com>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Borislav Petkov <bp@suse.de>
+Cc: Christian Brauner <christian.brauner@ubuntu.com>
+Cc: Dmitry Safonov <0x7f454c46@gmail.com>
+Cc: "Eric W. Biederman" <ebiederm@xmission.com>
+Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Cc: "H. Peter Anvin" <hpa@zytor.com>
+Cc: Ingo Molnar <mingo@redhat.com>
+Cc: "J. Bruce Fields" <bfields@fieldses.org>
+Cc: Jeff Layton <jlayton@kernel.org>
+Cc: Jens Axboe <axboe@kernel.dk>
+Cc: Jiri Slaby <jirislaby@kernel.org>
+Cc: Johannes Weiner <hannes@cmpxchg.org>
+Cc: Kirill Tkhai <ktkhai@virtuozzo.com>
+Cc: Michal Hocko <mhocko@kernel.org>
+Cc: Oleg Nesterov <oleg@redhat.com>
+Cc: Roman Gushchin <guro@fb.com>
+Cc: Serge Hallyn <serge@hallyn.com>
+Cc: Tejun Heo <tj@kernel.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Vladimir Davydov <vdavydov.dev@gmail.com>
+Cc: Yutian Yang <nglaive@gmail.com>
+Cc: Zefan Li <lizefan.x@bytedance.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Luiz Capitulino <luizcap@amazon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ ipc/msg.c | 2 +-
+ ipc/sem.c | 9 +++++----
+ ipc/shm.c | 2 +-
+ 3 files changed, 7 insertions(+), 6 deletions(-)
+
+--- a/ipc/msg.c
++++ b/ipc/msg.c
+@@ -137,7 +137,7 @@ static int newque(struct ipc_namespace *
+ key_t key = params->key;
+ int msgflg = params->flg;
+
+- msq = kvmalloc(sizeof(*msq), GFP_KERNEL);
++ msq = kvmalloc(sizeof(*msq), GFP_KERNEL_ACCOUNT);
+ if (unlikely(!msq))
+ return -ENOMEM;
+
+--- a/ipc/sem.c
++++ b/ipc/sem.c
+@@ -492,7 +492,7 @@ static struct sem_array *sem_alloc(size_
+ if (nsems > (INT_MAX - sizeof(*sma)) / sizeof(sma->sems[0]))
+ return NULL;
+
+- sma = kvzalloc(struct_size(sma, sems, nsems), GFP_KERNEL);
++ sma = kvzalloc(struct_size(sma, sems, nsems), GFP_KERNEL_ACCOUNT);
+ if (unlikely(!sma))
+ return NULL;
+
+@@ -1835,7 +1835,7 @@ static inline int get_undo_list(struct s
+
+ undo_list = current->sysvsem.undo_list;
+ if (!undo_list) {
+- undo_list = kzalloc(sizeof(*undo_list), GFP_KERNEL);
++ undo_list = kzalloc(sizeof(*undo_list), GFP_KERNEL_ACCOUNT);
+ if (undo_list == NULL)
+ return -ENOMEM;
+ spin_lock_init(&undo_list->lock);
+@@ -1920,7 +1920,7 @@ static struct sem_undo *find_alloc_undo(
+ rcu_read_unlock();
+
+ /* step 2: allocate new undo structure */
+- new = kzalloc(sizeof(struct sem_undo) + sizeof(short)*nsems, GFP_KERNEL);
++ new = kzalloc(sizeof(struct sem_undo) + sizeof(short)*nsems, GFP_KERNEL_ACCOUNT);
+ if (!new) {
+ ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
+ return ERR_PTR(-ENOMEM);
+@@ -1984,7 +1984,8 @@ static long do_semtimedop(int semid, str
+ if (nsops > ns->sc_semopm)
+ return -E2BIG;
+ if (nsops > SEMOPM_FAST) {
+- sops = kvmalloc_array(nsops, sizeof(*sops), GFP_KERNEL);
++ sops = kvmalloc_array(nsops, sizeof(*sops),
++ GFP_KERNEL_ACCOUNT);
+ if (sops == NULL)
+ return -ENOMEM;
+ }
+--- a/ipc/shm.c
++++ b/ipc/shm.c
+@@ -711,7 +711,7 @@ static int newseg(struct ipc_namespace *
+ ns->shm_tot + numpages > ns->shm_ctlall)
+ return -ENOSPC;
+
+- shp = kvmalloc(sizeof(*shp), GFP_KERNEL);
++ shp = kvmalloc(sizeof(*shp), GFP_KERNEL_ACCOUNT);
+ if (unlikely(!shp))
+ return -ENOMEM;
+