--- /dev/null
+From f9b58e8c7d031b0daa5c9a9ee27f5a4028ba53ac Mon Sep 17 00:00:00 2001
+From: Stefan Agner <stefan@agner.ch>
+Date: Sun, 30 Sep 2018 23:02:33 +0100
+Subject: ARM: 8800/1: use choice for kernel unwinders
+
+From: Stefan Agner <stefan@agner.ch>
+
+commit f9b58e8c7d031b0daa5c9a9ee27f5a4028ba53ac upstream.
+
+While in theory multiple unwinders could be compiled in, it does
+not make sense in practise. Use a choice to make the unwinder
+selection mutually exclusive and mandatory.
+
+Already before this commit it has not been possible to deselect
+FRAME_POINTER. Remove the obsolete comment.
+
+Furthermore, to produce a meaningful backtrace with FRAME_POINTER
+enabled the kernel needs a specific function prologue:
+ mov ip, sp
+ stmfd sp!, {fp, ip, lr, pc}
+ sub fp, ip, #4
+
+To get to the required prologue gcc uses apcs and no-sched-prolog.
+This compiler options are not available on clang, and clang is not
+able to generate the required prologue. Make the FRAME_POINTER
+config symbol depending on !clang.
+
+Suggested-by: Arnd Bergmann <arnd@arndb.de>
+Signed-off-by: Stefan Agner <stefan@agner.ch>
+Reviewed-by: Arnd Bergmann <arnd@arndb.de>
+Signed-off-by: Russell King <rmk+kernel@armlinux.org.uk>
+Signed-off-by: Anders Roxell <anders.roxell@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm/Kconfig.debug | 44 ++++++++++++++++++++++++++++----------------
+ lib/Kconfig.debug | 6 +++---
+ 2 files changed, 31 insertions(+), 19 deletions(-)
+
+--- a/arch/arm/Kconfig.debug
++++ b/arch/arm/Kconfig.debug
+@@ -15,30 +15,42 @@ config ARM_PTDUMP
+ kernel.
+ If in doubt, say "N"
+
+-# RMK wants arm kernels compiled with frame pointers or stack unwinding.
+-# If you know what you are doing and are willing to live without stack
+-# traces, you can get a slightly smaller kernel by setting this option to
+-# n, but then RMK will have to kill you ;).
+-config FRAME_POINTER
+- bool
+- depends on !THUMB2_KERNEL
+- default y if !ARM_UNWIND || FUNCTION_GRAPH_TRACER
++choice
++ prompt "Choose kernel unwinder"
++ default UNWINDER_ARM if AEABI && !FUNCTION_GRAPH_TRACER
++ default UNWINDER_FRAME_POINTER if !AEABI || FUNCTION_GRAPH_TRACER
+ help
+- If you say N here, the resulting kernel will be slightly smaller and
+- faster. However, if neither FRAME_POINTER nor ARM_UNWIND are enabled,
+- when a problem occurs with the kernel, the information that is
+- reported is severely limited.
++ This determines which method will be used for unwinding kernel stack
++ traces for panics, oopses, bugs, warnings, perf, /proc/<pid>/stack,
++ livepatch, lockdep, and more.
+
+-config ARM_UNWIND
+- bool "Enable stack unwinding support (EXPERIMENTAL)"
++config UNWINDER_FRAME_POINTER
++ bool "Frame pointer unwinder"
++ depends on !THUMB2_KERNEL && !CC_IS_CLANG
++ select ARCH_WANT_FRAME_POINTERS
++ select FRAME_POINTER
++ help
++ This option enables the frame pointer unwinder for unwinding
++ kernel stack traces.
++
++config UNWINDER_ARM
++ bool "ARM EABI stack unwinder"
+ depends on AEABI
+- default y
++ select ARM_UNWIND
+ help
+ This option enables stack unwinding support in the kernel
+ using the information automatically generated by the
+ compiler. The resulting kernel image is slightly bigger but
+ the performance is not affected. Currently, this feature
+- only works with EABI compilers. If unsure say Y.
++ only works with EABI compilers.
++
++endchoice
++
++config ARM_UNWIND
++ bool
++
++config FRAME_POINTER
++ bool
+
+ config OLD_MCOUNT
+ bool
+--- a/lib/Kconfig.debug
++++ b/lib/Kconfig.debug
+@@ -1091,7 +1091,7 @@ config LOCKDEP
+ bool
+ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
+ select STACKTRACE
+- select FRAME_POINTER if !MIPS && !PPC && !ARM_UNWIND && !S390 && !MICROBLAZE && !ARC && !SCORE
++ select FRAME_POINTER if !MIPS && !PPC && !ARM && !S390 && !MICROBLAZE && !ARC && !SCORE
+ select KALLSYMS
+ select KALLSYMS_ALL
+
+@@ -1670,7 +1670,7 @@ config FAULT_INJECTION_STACKTRACE_FILTER
+ depends on FAULT_INJECTION_DEBUG_FS && STACKTRACE_SUPPORT
+ depends on !X86_64
+ select STACKTRACE
+- select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND && !ARC && !SCORE
++ select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM && !ARC && !SCORE
+ help
+ Provide stacktrace filter for fault-injection capabilities
+
+@@ -1679,7 +1679,7 @@ config LATENCYTOP
+ depends on DEBUG_KERNEL
+ depends on STACKTRACE_SUPPORT
+ depends on PROC_FS
+- select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND && !ARC
++ select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM && !ARC
+ select KALLSYMS
+ select KALLSYMS_ALL
+ select STACKTRACE
--- /dev/null
+From foo@baz Thu Jan 27 04:37:00 PM CET 2022
+From: Lee Jones <lee.jones@linaro.org>
+Date: Tue, 25 Jan 2022 14:18:08 +0000
+Subject: ion: Do not 'put' ION handle until after its final use
+To: lee.jones@linaro.org
+Cc: stable@vger.kernel.org
+Message-ID: <20220125141808.1172511-3-lee.jones@linaro.org>
+
+From: Lee Jones <lee.jones@linaro.org>
+
+pass_to_user() eventually calls kref_put() on an ION handle which is
+still live, potentially allowing for it to be legitimately freed by
+the client.
+
+Prevent this from happening before its final use in both ION_IOC_ALLOC
+and ION_IOC_IMPORT.
+
+Signed-off-by: Lee Jones <lee.jones@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/staging/android/ion/ion-ioctl.c | 10 +++++-----
+ 1 file changed, 5 insertions(+), 5 deletions(-)
+
+--- a/drivers/staging/android/ion/ion-ioctl.c
++++ b/drivers/staging/android/ion/ion-ioctl.c
+@@ -165,10 +165,9 @@ long ion_ioctl(struct file *filp, unsign
+ data.allocation.flags, true);
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+- pass_to_user(handle);
+ data.allocation.handle = handle->id;
+-
+ cleanup_handle = handle;
++ pass_to_user(handle);
+ break;
+ }
+ case ION_IOC_FREE:
+@@ -212,11 +211,12 @@ long ion_ioctl(struct file *filp, unsign
+ if (IS_ERR(handle)) {
+ ret = PTR_ERR(handle);
+ } else {
++ data.handle.handle = handle->id;
+ handle = pass_to_user(handle);
+- if (IS_ERR(handle))
++ if (IS_ERR(handle)) {
+ ret = PTR_ERR(handle);
+- else
+- data.handle.handle = handle->id;
++ data.handle.handle = 0;
++ }
+ }
+ break;
+ }
--- /dev/null
+From foo@baz Thu Jan 27 04:37:00 PM CET 2022
+From: Lee Jones <lee.jones@linaro.org>
+Date: Tue, 25 Jan 2022 14:18:06 +0000
+Subject: ion: Fix use after free during ION_IOC_ALLOC
+To: lee.jones@linaro.org
+Cc: stable@vger.kernel.org, Daniel Rosenberg <drosen@google.com>, Dennis Cagle <d-cagle@codeaurora.org>, Patrick Daly <pdaly@codeaurora.org>
+Message-ID: <20220125141808.1172511-1-lee.jones@linaro.org>
+
+From: Daniel Rosenberg <drosen@google.com>
+
+If a user happens to call ION_IOC_FREE during an ION_IOC_ALLOC
+on the just allocated id, and the copy_to_user fails, the cleanup
+code will attempt to free an already freed handle.
+
+This adds a wrapper for ion_alloc that adds an ion_handle_get to
+avoid this.
+
+Signed-off-by: Daniel Rosenberg <drosen@google.com>
+Signed-off-by: Dennis Cagle <d-cagle@codeaurora.org>
+Signed-off-by: Patrick Daly <pdaly@codeaurora.org>
+Signed-off-by: Lee Jones <lee.jones@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/staging/android/ion/ion-ioctl.c | 14 +++++++++-----
+ drivers/staging/android/ion/ion.c | 15 ++++++++++++---
+ drivers/staging/android/ion/ion.h | 4 ++++
+ 3 files changed, 25 insertions(+), 8 deletions(-)
+
+--- a/drivers/staging/android/ion/ion-ioctl.c
++++ b/drivers/staging/android/ion/ion-ioctl.c
+@@ -96,10 +96,10 @@ long ion_ioctl(struct file *filp, unsign
+ {
+ struct ion_handle *handle;
+
+- handle = ion_alloc(client, data.allocation.len,
+- data.allocation.align,
+- data.allocation.heap_id_mask,
+- data.allocation.flags);
++ handle = __ion_alloc(client, data.allocation.len,
++ data.allocation.align,
++ data.allocation.heap_id_mask,
++ data.allocation.flags, true);
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+
+@@ -174,10 +174,14 @@ long ion_ioctl(struct file *filp, unsign
+
+ if (dir & _IOC_READ) {
+ if (copy_to_user((void __user *)arg, &data, _IOC_SIZE(cmd))) {
+- if (cleanup_handle)
++ if (cleanup_handle) {
+ ion_free(client, cleanup_handle);
++ ion_handle_put(cleanup_handle);
++ }
+ return -EFAULT;
+ }
+ }
++ if (cleanup_handle)
++ ion_handle_put(cleanup_handle);
+ return ret;
+ }
+--- a/drivers/staging/android/ion/ion.c
++++ b/drivers/staging/android/ion/ion.c
+@@ -401,9 +401,9 @@ static int ion_handle_add(struct ion_cli
+ return 0;
+ }
+
+-struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
+- size_t align, unsigned int heap_id_mask,
+- unsigned int flags)
++struct ion_handle *__ion_alloc(struct ion_client *client, size_t len,
++ size_t align, unsigned int heap_id_mask,
++ unsigned int flags, bool grab_handle)
+ {
+ struct ion_handle *handle;
+ struct ion_device *dev = client->dev;
+@@ -453,6 +453,8 @@ struct ion_handle *ion_alloc(struct ion_
+ return handle;
+
+ mutex_lock(&client->lock);
++ if (grab_handle)
++ ion_handle_get(handle);
+ ret = ion_handle_add(client, handle);
+ mutex_unlock(&client->lock);
+ if (ret) {
+@@ -462,6 +464,13 @@ struct ion_handle *ion_alloc(struct ion_
+
+ return handle;
+ }
++
++struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
++ size_t align, unsigned int heap_id_mask,
++ unsigned int flags)
++{
++ return __ion_alloc(client, len, align, heap_id_mask, flags, false);
++}
+ EXPORT_SYMBOL(ion_alloc);
+
+ void ion_free_nolock(struct ion_client *client,
+--- a/drivers/staging/android/ion/ion.h
++++ b/drivers/staging/android/ion/ion.h
+@@ -109,6 +109,10 @@ struct ion_handle *ion_alloc(struct ion_
+ size_t align, unsigned int heap_id_mask,
+ unsigned int flags);
+
++struct ion_handle *__ion_alloc(struct ion_client *client, size_t len,
++ size_t align, unsigned int heap_id_mask,
++ unsigned int flags, bool grab_handle);
++
+ /**
+ * ion_free - free a handle
+ * @client: the client
--- /dev/null
+From foo@baz Thu Jan 27 04:37:00 PM CET 2022
+From: Lee Jones <lee.jones@linaro.org>
+Date: Tue, 25 Jan 2022 14:18:07 +0000
+Subject: ion: Protect kref from userspace manipulation
+To: lee.jones@linaro.org
+Cc: stable@vger.kernel.org, Daniel Rosenberg <drosen@google.com>, Dennis Cagle <d-cagle@codeaurora.org>
+Message-ID: <20220125141808.1172511-2-lee.jones@linaro.org>
+
+From: Daniel Rosenberg <drosen@google.com>
+
+This separates the kref for ion handles into two components.
+Userspace requests through the ioctl will hold at most one
+reference to the internally used kref. All additional requests
+will increment a separate counter, and the original reference is
+only put once that counter hits 0. This protects the kernel from
+a poorly behaving userspace.
+
+Signed-off-by: Daniel Rosenberg <drosen@google.com>
+[d-cagle@codeaurora.org: Resolve style issues]
+Signed-off-by: Dennis Cagle <d-cagle@codeaurora.org>
+Signed-off-by: Lee Jones <lee.jones@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/staging/android/ion/ion-ioctl.c | 84 +++++++++++++++++++++++++++++---
+ drivers/staging/android/ion/ion.c | 4 -
+ drivers/staging/android/ion/ion_priv.h | 4 +
+ 3 files changed, 83 insertions(+), 9 deletions(-)
+
+--- a/drivers/staging/android/ion/ion-ioctl.c
++++ b/drivers/staging/android/ion/ion-ioctl.c
+@@ -30,6 +30,69 @@ union ion_ioctl_arg {
+ struct ion_heap_query query;
+ };
+
++/* Must hold the client lock */
++static void user_ion_handle_get(struct ion_handle *handle)
++{
++ if (handle->user_ref_count++ == 0)
++ kref_get(&handle->ref);
++}
++
++/* Must hold the client lock */
++static struct ion_handle *user_ion_handle_get_check_overflow(
++ struct ion_handle *handle)
++{
++ if (handle->user_ref_count + 1 == 0)
++ return ERR_PTR(-EOVERFLOW);
++ user_ion_handle_get(handle);
++ return handle;
++}
++
++/* passes a kref to the user ref count.
++ * We know we're holding a kref to the object before and
++ * after this call, so no need to reverify handle.
++ */
++static struct ion_handle *pass_to_user(struct ion_handle *handle)
++{
++ struct ion_client *client = handle->client;
++ struct ion_handle *ret;
++
++ mutex_lock(&client->lock);
++ ret = user_ion_handle_get_check_overflow(handle);
++ ion_handle_put_nolock(handle);
++ mutex_unlock(&client->lock);
++ return ret;
++}
++
++/* Must hold the client lock */
++static int user_ion_handle_put_nolock(struct ion_handle *handle)
++{
++ int ret;
++
++ if (--handle->user_ref_count == 0)
++ ret = ion_handle_put_nolock(handle);
++
++ return ret;
++}
++
++static void user_ion_free_nolock(struct ion_client *client,
++ struct ion_handle *handle)
++{
++ bool valid_handle;
++
++ WARN_ON(client != handle->client);
++
++ valid_handle = ion_handle_validate(client, handle);
++ if (!valid_handle) {
++ WARN(1, "%s: invalid handle passed to free.\n", __func__);
++ return;
++ }
++ if (handle->user_ref_count == 0) {
++ WARN(1, "%s: User does not have access!\n", __func__);
++ return;
++ }
++ user_ion_handle_put_nolock(handle);
++}
++
+ static int validate_ioctl_arg(unsigned int cmd, union ion_ioctl_arg *arg)
+ {
+ int ret = 0;
+@@ -102,7 +165,7 @@ long ion_ioctl(struct file *filp, unsign
+ data.allocation.flags, true);
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+-
++ pass_to_user(handle);
+ data.allocation.handle = handle->id;
+
+ cleanup_handle = handle;
+@@ -118,7 +181,7 @@ long ion_ioctl(struct file *filp, unsign
+ mutex_unlock(&client->lock);
+ return PTR_ERR(handle);
+ }
+- ion_free_nolock(client, handle);
++ user_ion_free_nolock(client, handle);
+ ion_handle_put_nolock(handle);
+ mutex_unlock(&client->lock);
+ break;
+@@ -146,10 +209,15 @@ long ion_ioctl(struct file *filp, unsign
+ struct ion_handle *handle;
+
+ handle = ion_import_dma_buf_fd(client, data.fd.fd);
+- if (IS_ERR(handle))
++ if (IS_ERR(handle)) {
+ ret = PTR_ERR(handle);
+- else
+- data.handle.handle = handle->id;
++ } else {
++ handle = pass_to_user(handle);
++ if (IS_ERR(handle))
++ ret = PTR_ERR(handle);
++ else
++ data.handle.handle = handle->id;
++ }
+ break;
+ }
+ case ION_IOC_SYNC:
+@@ -175,8 +243,10 @@ long ion_ioctl(struct file *filp, unsign
+ if (dir & _IOC_READ) {
+ if (copy_to_user((void __user *)arg, &data, _IOC_SIZE(cmd))) {
+ if (cleanup_handle) {
+- ion_free(client, cleanup_handle);
+- ion_handle_put(cleanup_handle);
++ mutex_lock(&client->lock);
++ user_ion_free_nolock(client, cleanup_handle);
++ ion_handle_put_nolock(cleanup_handle);
++ mutex_unlock(&client->lock);
+ }
+ return -EFAULT;
+ }
+--- a/drivers/staging/android/ion/ion.c
++++ b/drivers/staging/android/ion/ion.c
+@@ -363,8 +363,8 @@ struct ion_handle *ion_handle_get_by_id_
+ return ERR_PTR(-EINVAL);
+ }
+
+-static bool ion_handle_validate(struct ion_client *client,
+- struct ion_handle *handle)
++bool ion_handle_validate(struct ion_client *client,
++ struct ion_handle *handle)
+ {
+ WARN_ON(!mutex_is_locked(&client->lock));
+ return idr_find(&client->idr, handle->id) == handle;
+--- a/drivers/staging/android/ion/ion_priv.h
++++ b/drivers/staging/android/ion/ion_priv.h
+@@ -149,6 +149,7 @@ struct ion_client {
+ */
+ struct ion_handle {
+ struct kref ref;
++ unsigned int user_ref_count;
+ struct ion_client *client;
+ struct ion_buffer *buffer;
+ struct rb_node node;
+@@ -459,6 +460,9 @@ int ion_sync_for_device(struct ion_clien
+ struct ion_handle *ion_handle_get_by_id_nolock(struct ion_client *client,
+ int id);
+
++bool ion_handle_validate(struct ion_client *client,
++ struct ion_handle *handle);
++
+ void ion_free_nolock(struct ion_client *client, struct ion_handle *handle);
+
+ int ion_handle_put_nolock(struct ion_handle *handle);
--- /dev/null
+From foo@baz Thu Jan 27 04:31:44 PM CET 2022
+From: Ben Hutchings <ben@decadent.org.uk>
+Date: Mon, 24 Jan 2022 19:32:46 +0100
+Subject: KVM: nVMX: fix EPT permissions as reported in exit qualification
+To: stable@vger.kernel.org
+Cc: "Paolo Bonzini" <pbonzini@redhat.com>, "Xiao Guangrong" <xiaoguangrong@tencent.com>, "Radim Krčmář" <rkrcmar@redhat.com>
+Message-ID: <Ye7wziIF+4bAna9E@decadent.org.uk>
+Content-Disposition: inline
+
+From: Paolo Bonzini <pbonzini@redhat.com>
+
+commit 0780516a18f87e881e42ed815f189279b0a1743c upstream.
+
+This fixes the new ept_access_test_read_only and ept_access_test_read_write
+testcases from vmx.flat.
+
+The problem is that gpte_access moves bits around to switch from EPT
+bit order (XWR) to ACC_*_MASK bit order (RWX). This results in an
+incorrect exit qualification. To fix this, make pt_access and
+pte_access operate on raw PTE values (only with NX flipped to mean
+"can execute") and call gpte_access at the end of the walk. This
+lets us use pte_access to compute the exit qualification with XWR
+bit order.
+
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Reviewed-by: Xiao Guangrong <xiaoguangrong@tencent.com>
+Signed-off-by: Radim Krčmář <rkrcmar@redhat.com>
+[bwh: Backported to 4.9:
+ - There's no support for EPT accessed/dirty bits, so do not use
+ have_ad flag
+ - Adjust context]
+Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/paging_tmpl.h | 34 +++++++++++++++++++++-------------
+ 1 file changed, 21 insertions(+), 13 deletions(-)
+
+--- a/arch/x86/kvm/paging_tmpl.h
++++ b/arch/x86/kvm/paging_tmpl.h
+@@ -285,9 +285,11 @@ static int FNAME(walk_addr_generic)(stru
+ pt_element_t pte;
+ pt_element_t __user *uninitialized_var(ptep_user);
+ gfn_t table_gfn;
+- unsigned index, pt_access, pte_access, accessed_dirty, pte_pkey;
++ u64 pt_access, pte_access;
++ unsigned index, accessed_dirty, pte_pkey;
+ gpa_t pte_gpa;
+ int offset;
++ u64 walk_nx_mask = 0;
+ const int write_fault = access & PFERR_WRITE_MASK;
+ const int user_fault = access & PFERR_USER_MASK;
+ const int fetch_fault = access & PFERR_FETCH_MASK;
+@@ -301,6 +303,7 @@ retry_walk:
+ pte = mmu->get_cr3(vcpu);
+
+ #if PTTYPE == 64
++ walk_nx_mask = 1ULL << PT64_NX_SHIFT;
+ if (walker->level == PT32E_ROOT_LEVEL) {
+ pte = mmu->get_pdptr(vcpu, (addr >> 30) & 3);
+ trace_kvm_mmu_paging_element(pte, walker->level);
+@@ -312,15 +315,14 @@ retry_walk:
+ walker->max_level = walker->level;
+ ASSERT(!(is_long_mode(vcpu) && !is_pae(vcpu)));
+
+- accessed_dirty = PT_GUEST_ACCESSED_MASK;
+- pt_access = pte_access = ACC_ALL;
++ pte_access = ~0;
+ ++walker->level;
+
+ do {
+ gfn_t real_gfn;
+ unsigned long host_addr;
+
+- pt_access &= pte_access;
++ pt_access = pte_access;
+ --walker->level;
+
+ index = PT_INDEX(addr, walker->level);
+@@ -363,6 +365,12 @@ retry_walk:
+
+ trace_kvm_mmu_paging_element(pte, walker->level);
+
++ /*
++ * Inverting the NX it lets us AND it like other
++ * permission bits.
++ */
++ pte_access = pt_access & (pte ^ walk_nx_mask);
++
+ if (unlikely(!FNAME(is_present_gpte)(pte)))
+ goto error;
+
+@@ -371,14 +379,16 @@ retry_walk:
+ goto error;
+ }
+
+- accessed_dirty &= pte;
+- pte_access = pt_access & FNAME(gpte_access)(vcpu, pte);
+-
+ walker->ptes[walker->level - 1] = pte;
+ } while (!is_last_gpte(mmu, walker->level, pte));
+
+ pte_pkey = FNAME(gpte_pkeys)(vcpu, pte);
+- errcode = permission_fault(vcpu, mmu, pte_access, pte_pkey, access);
++ accessed_dirty = pte_access & PT_GUEST_ACCESSED_MASK;
++
++ /* Convert to ACC_*_MASK flags for struct guest_walker. */
++ walker->pt_access = FNAME(gpte_access)(vcpu, pt_access ^ walk_nx_mask);
++ walker->pte_access = FNAME(gpte_access)(vcpu, pte_access ^ walk_nx_mask);
++ errcode = permission_fault(vcpu, mmu, walker->pte_access, pte_pkey, access);
+ if (unlikely(errcode))
+ goto error;
+
+@@ -395,7 +405,7 @@ retry_walk:
+ walker->gfn = real_gpa >> PAGE_SHIFT;
+
+ if (!write_fault)
+- FNAME(protect_clean_gpte)(&pte_access, pte);
++ FNAME(protect_clean_gpte)(&walker->pte_access, pte);
+ else
+ /*
+ * On a write fault, fold the dirty bit into accessed_dirty.
+@@ -413,10 +423,8 @@ retry_walk:
+ goto retry_walk;
+ }
+
+- walker->pt_access = pt_access;
+- walker->pte_access = pte_access;
+ pgprintk("%s: pte %llx pte_access %x pt_access %x\n",
+- __func__, (u64)pte, pte_access, pt_access);
++ __func__, (u64)pte, walker->pte_access, walker->pt_access);
+ return 1;
+
+ error:
+@@ -444,7 +452,7 @@ error:
+ */
+ if (!(errcode & PFERR_RSVD_MASK)) {
+ vcpu->arch.exit_qualification &= 0x187;
+- vcpu->arch.exit_qualification |= ((pt_access & pte) & 0x7) << 3;
++ vcpu->arch.exit_qualification |= (pte_access & 0x7) << 3;
+ }
+ #endif
+ walker->fault.address = addr;
--- /dev/null
+From foo@baz Thu Jan 27 04:31:44 PM CET 2022
+From: Ben Hutchings <ben@decadent.org.uk>
+Date: Mon, 24 Jan 2022 19:33:29 +0100
+Subject: KVM: X86: MMU: Use the correct inherited permissions to get shadow page
+To: stable@vger.kernel.org
+Cc: Paolo Bonzini <pbonzini@redhat.com>, Lai Jiangshan <laijs@linux.alibaba.com>
+Message-ID: <Ye7w+SHXgvSxYyv/@decadent.org.uk>
+Content-Disposition: inline
+
+From: Lai Jiangshan <laijs@linux.alibaba.com>
+
+commit b1bd5cba3306691c771d558e94baa73e8b0b96b7 upstream.
+
+When computing the access permissions of a shadow page, use the effective
+permissions of the walk up to that point, i.e. the logic AND of its parents'
+permissions. Two guest PxE entries that point at the same table gfn need to
+be shadowed with different shadow pages if their parents' permissions are
+different. KVM currently uses the effective permissions of the last
+non-leaf entry for all non-leaf entries. Because all non-leaf SPTEs have
+full ("uwx") permissions, and the effective permissions are recorded only
+in role.access and merged into the leaves, this can lead to incorrect
+reuse of a shadow page and eventually to a missing guest protection page
+fault.
+
+For example, here is a shared pagetable:
+
+ pgd[] pud[] pmd[] virtual address pointers
+ /->pmd1(u--)->pte1(uw-)->page1 <- ptr1 (u--)
+ /->pud1(uw-)--->pmd2(uw-)->pte2(uw-)->page2 <- ptr2 (uw-)
+ pgd-| (shared pmd[] as above)
+ \->pud2(u--)--->pmd1(u--)->pte1(uw-)->page1 <- ptr3 (u--)
+ \->pmd2(uw-)->pte2(uw-)->page2 <- ptr4 (u--)
+
+ pud1 and pud2 point to the same pmd table, so:
+ - ptr1 and ptr3 points to the same page.
+ - ptr2 and ptr4 points to the same page.
+
+(pud1 and pud2 here are pud entries, while pmd1 and pmd2 here are pmd entries)
+
+- First, the guest reads from ptr1 first and KVM prepares a shadow
+ page table with role.access=u--, from ptr1's pud1 and ptr1's pmd1.
+ "u--" comes from the effective permissions of pgd, pud1 and
+ pmd1, which are stored in pt->access. "u--" is used also to get
+ the pagetable for pud1, instead of "uw-".
+
+- Then the guest writes to ptr2 and KVM reuses pud1 which is present.
+ The hypervisor set up a shadow page for ptr2 with pt->access is "uw-"
+ even though the pud1 pmd (because of the incorrect argument to
+ kvm_mmu_get_page in the previous step) has role.access="u--".
+
+- Then the guest reads from ptr3. The hypervisor reuses pud1's
+ shadow pmd for pud2, because both use "u--" for their permissions.
+ Thus, the shadow pmd already includes entries for both pmd1 and pmd2.
+
+- At last, the guest writes to ptr4. This causes no vmexit or pagefault,
+ because pud1's shadow page structures included an "uw-" page even though
+ its role.access was "u--".
+
+Any kind of shared pagetable might have the similar problem when in
+virtual machine without TDP enabled if the permissions are different
+from different ancestors.
+
+In order to fix the problem, we change pt->access to be an array, and
+any access in it will not include permissions ANDed from child ptes.
+
+The test code is: https://lore.kernel.org/kvm/20210603050537.19605-1-jiangshanlai@gmail.com/
+Remember to test it with TDP disabled.
+
+The problem had existed long before the commit 41074d07c78b ("KVM: MMU:
+Fix inherited permissions for emulated guest pte updates"), and it
+is hard to find which is the culprit. So there is no fixes tag here.
+
+Signed-off-by: Lai Jiangshan <laijs@linux.alibaba.com>
+Message-Id: <20210603052455.21023-1-jiangshanlai@gmail.com>
+Cc: stable@vger.kernel.org
+Fixes: cea0f0e7ea54 ("[PATCH] KVM: MMU: Shadow page table caching")
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+[bwh: Backported to 4.9:
+ - Keep passing vcpu argument to gpte_access functions
+ - Adjust filenames, context]
+Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ Documentation/virtual/kvm/mmu.txt | 4 ++--
+ arch/x86/kvm/paging_tmpl.h | 14 +++++++++-----
+ 2 files changed, 11 insertions(+), 7 deletions(-)
+
+--- a/Documentation/virtual/kvm/mmu.txt
++++ b/Documentation/virtual/kvm/mmu.txt
+@@ -152,8 +152,8 @@ Shadow pages contain the following infor
+ shadow pages) so role.quadrant takes values in the range 0..3. Each
+ quadrant maps 1GB virtual address space.
+ role.access:
+- Inherited guest access permissions in the form uwx. Note execute
+- permission is positive, not negative.
++ Inherited guest access permissions from the parent ptes in the form uwx.
++ Note execute permission is positive, not negative.
+ role.invalid:
+ The page is invalid and should not be used. It is a root page that is
+ currently pinned (by a cpu hardware register pointing to it); once it is
+--- a/arch/x86/kvm/paging_tmpl.h
++++ b/arch/x86/kvm/paging_tmpl.h
+@@ -100,8 +100,8 @@ struct guest_walker {
+ gpa_t pte_gpa[PT_MAX_FULL_LEVELS];
+ pt_element_t __user *ptep_user[PT_MAX_FULL_LEVELS];
+ bool pte_writable[PT_MAX_FULL_LEVELS];
+- unsigned pt_access;
+- unsigned pte_access;
++ unsigned int pt_access[PT_MAX_FULL_LEVELS];
++ unsigned int pte_access;
+ gfn_t gfn;
+ struct x86_exception fault;
+ };
+@@ -380,13 +380,15 @@ retry_walk:
+ }
+
+ walker->ptes[walker->level - 1] = pte;
++
++ /* Convert to ACC_*_MASK flags for struct guest_walker. */
++ walker->pt_access[walker->level - 1] = FNAME(gpte_access)(vcpu, pt_access ^ walk_nx_mask);
+ } while (!is_last_gpte(mmu, walker->level, pte));
+
+ pte_pkey = FNAME(gpte_pkeys)(vcpu, pte);
+ accessed_dirty = pte_access & PT_GUEST_ACCESSED_MASK;
+
+ /* Convert to ACC_*_MASK flags for struct guest_walker. */
+- walker->pt_access = FNAME(gpte_access)(vcpu, pt_access ^ walk_nx_mask);
+ walker->pte_access = FNAME(gpte_access)(vcpu, pte_access ^ walk_nx_mask);
+ errcode = permission_fault(vcpu, mmu, walker->pte_access, pte_pkey, access);
+ if (unlikely(errcode))
+@@ -424,7 +426,8 @@ retry_walk:
+ }
+
+ pgprintk("%s: pte %llx pte_access %x pt_access %x\n",
+- __func__, (u64)pte, walker->pte_access, walker->pt_access);
++ __func__, (u64)pte, walker->pte_access,
++ walker->pt_access[walker->level - 1]);
+ return 1;
+
+ error:
+@@ -586,7 +589,7 @@ static int FNAME(fetch)(struct kvm_vcpu
+ {
+ struct kvm_mmu_page *sp = NULL;
+ struct kvm_shadow_walk_iterator it;
+- unsigned direct_access, access = gw->pt_access;
++ unsigned int direct_access, access;
+ int top_level, ret;
+ gfn_t gfn, base_gfn;
+
+@@ -618,6 +621,7 @@ static int FNAME(fetch)(struct kvm_vcpu
+ sp = NULL;
+ if (!is_shadow_present_pte(*it.sptep)) {
+ table_gfn = gw->table_gfn[it.level - 2];
++ access = gw->pt_access[it.level - 2];
+ sp = kvm_mmu_get_page(vcpu, table_gfn, addr, it.level-1,
+ false, access);
+ }
--- /dev/null
+From foo@baz Thu Jan 27 04:31:44 PM CET 2022
+From: Ben Hutchings <ben@decadent.org.uk>
+Date: Mon, 24 Jan 2022 19:35:29 +0100
+Subject: media: firewire: firedtv-avc: fix a buffer overflow in avc_ca_pmt()
+To: stable@vger.kernel.org
+Cc: Dan Carpenter <dan.carpenter@oracle.com>, Luo Likang <luolikang@nsfocus.com>, Hans Verkuil <hverkuil-cisco@xs4all.nl>, Mauro Carvalho Chehab <mchehab+huawei@kernel.org>
+Message-ID: <Ye7xcRkdnlSW+Oy2@decadent.org.uk>
+Content-Disposition: inline
+
+From: Dan Carpenter <dan.carpenter@oracle.com>
+
+commit 35d2969ea3c7d32aee78066b1f3cf61a0d935a4e upstream.
+
+The bounds checking in avc_ca_pmt() is not strict enough. It should
+be checking "read_pos + 4" because it's reading 5 bytes. If the
+"es_info_length" is non-zero then it reads a 6th byte so there needs to
+be an additional check for that.
+
+I also added checks for the "write_pos". I don't think these are
+required because "read_pos" and "write_pos" are tied together so
+checking one ought to be enough. But they make the code easier to
+understand for me. The check on write_pos is:
+
+ if (write_pos + 4 >= sizeof(c->operand) - 4) {
+
+The first "+ 4" is because we're writing 5 bytes and the last " - 4"
+is to leave space for the CRC.
+
+The other problem is that "length" can be invalid. It comes from
+"data_length" in fdtv_ca_pmt().
+
+Cc: stable@vger.kernel.org
+Reported-by: Luo Likang <luolikang@nsfocus.com>
+Signed-off-by: Dan Carpenter <dan.carpenter@oracle.com>
+Signed-off-by: Hans Verkuil <hverkuil-cisco@xs4all.nl>
+Signed-off-by: Mauro Carvalho Chehab <mchehab+huawei@kernel.org>
+[bwh: Backported to 4.9: adjust context]
+Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/media/firewire/firedtv-avc.c | 14 +++++++++++---
+ drivers/media/firewire/firedtv-ci.c | 2 ++
+ 2 files changed, 13 insertions(+), 3 deletions(-)
+
+--- a/drivers/media/firewire/firedtv-avc.c
++++ b/drivers/media/firewire/firedtv-avc.c
+@@ -1169,7 +1169,11 @@ int avc_ca_pmt(struct firedtv *fdtv, cha
+ read_pos += program_info_length;
+ write_pos += program_info_length;
+ }
+- while (read_pos < length) {
++ while (read_pos + 4 < length) {
++ if (write_pos + 4 >= sizeof(c->operand) - 4) {
++ ret = -EINVAL;
++ goto out;
++ }
+ c->operand[write_pos++] = msg[read_pos++];
+ c->operand[write_pos++] = msg[read_pos++];
+ c->operand[write_pos++] = msg[read_pos++];
+@@ -1181,13 +1185,17 @@ int avc_ca_pmt(struct firedtv *fdtv, cha
+ c->operand[write_pos++] = es_info_length >> 8;
+ c->operand[write_pos++] = es_info_length & 0xff;
+ if (es_info_length > 0) {
++ if (read_pos >= length) {
++ ret = -EINVAL;
++ goto out;
++ }
+ pmt_cmd_id = msg[read_pos++];
+ if (pmt_cmd_id != 1 && pmt_cmd_id != 4)
+ dev_err(fdtv->device, "invalid pmt_cmd_id %d "
+ "at stream level\n", pmt_cmd_id);
+
+- if (es_info_length > sizeof(c->operand) - 4 -
+- write_pos) {
++ if (es_info_length > sizeof(c->operand) - 4 - write_pos ||
++ es_info_length > length - read_pos) {
+ ret = -EINVAL;
+ goto out;
+ }
+--- a/drivers/media/firewire/firedtv-ci.c
++++ b/drivers/media/firewire/firedtv-ci.c
+@@ -138,6 +138,8 @@ static int fdtv_ca_pmt(struct firedtv *f
+ } else {
+ data_length = msg->msg[3];
+ }
++ if (data_length > sizeof(msg->msg) - data_pos)
++ return -EINVAL;
+
+ return avc_ca_pmt(fdtv, &msg->msg[data_pos], data_length);
+ }
--- /dev/null
+From foo@baz Thu Jan 27 04:31:44 PM CET 2022
+From: Ben Hutchings <ben@decadent.org.uk>
+Date: Mon, 24 Jan 2022 19:34:30 +0100
+Subject: NFSv4: Initialise connection to the server in nfs4_alloc_client()
+To: stable@vger.kernel.org
+Cc: Trond Myklebust <trond.myklebust@hammerspace.com>, Michael Wakabayashi <mwakabayashi@vmware.com>
+Message-ID: <Ye7xNuvgVSWizxdK@decadent.org.uk>
+Content-Disposition: inline
+
+From: Trond Myklebust <trond.myklebust@hammerspace.com>
+
+commit dd99e9f98fbf423ff6d365b37a98e8879170f17c upstream.
+
+Set up the connection to the NFSv4 server in nfs4_alloc_client(), before
+we've added the struct nfs_client to the net-namespace's nfs_client_list
+so that a downed server won't cause other mounts to hang in the trunking
+detection code.
+
+Reported-by: Michael Wakabayashi <mwakabayashi@vmware.com>
+Fixes: 5c6e5b60aae4 ("NFS: Fix an Oops in the pNFS files and flexfiles connection setup to the DS")
+Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
+[bwh: Backported to 4.9: adjust context]
+Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/nfs/nfs4client.c | 82 ++++++++++++++++++++++++++--------------------------
+ 1 file changed, 42 insertions(+), 40 deletions(-)
+
+--- a/fs/nfs/nfs4client.c
++++ b/fs/nfs/nfs4client.c
+@@ -177,8 +177,11 @@ void nfs40_shutdown_client(struct nfs_cl
+
+ struct nfs_client *nfs4_alloc_client(const struct nfs_client_initdata *cl_init)
+ {
+- int err;
++ char buf[INET6_ADDRSTRLEN + 1];
++ const char *ip_addr = cl_init->ip_addr;
+ struct nfs_client *clp = nfs_alloc_client(cl_init);
++ int err;
++
+ if (IS_ERR(clp))
+ return clp;
+
+@@ -202,6 +205,44 @@ struct nfs_client *nfs4_alloc_client(con
+ #if IS_ENABLED(CONFIG_NFS_V4_1)
+ init_waitqueue_head(&clp->cl_lock_waitq);
+ #endif
++
++ if (cl_init->minorversion != 0)
++ __set_bit(NFS_CS_INFINITE_SLOTS, &clp->cl_flags);
++ __set_bit(NFS_CS_DISCRTRY, &clp->cl_flags);
++ __set_bit(NFS_CS_NO_RETRANS_TIMEOUT, &clp->cl_flags);
++
++ /*
++ * Set up the connection to the server before we add add to the
++ * global list.
++ */
++ err = nfs_create_rpc_client(clp, cl_init, RPC_AUTH_GSS_KRB5I);
++ if (err == -EINVAL)
++ err = nfs_create_rpc_client(clp, cl_init, RPC_AUTH_UNIX);
++ if (err < 0)
++ goto error;
++
++ /* If no clientaddr= option was specified, find a usable cb address */
++ if (ip_addr == NULL) {
++ struct sockaddr_storage cb_addr;
++ struct sockaddr *sap = (struct sockaddr *)&cb_addr;
++
++ err = rpc_localaddr(clp->cl_rpcclient, sap, sizeof(cb_addr));
++ if (err < 0)
++ goto error;
++ err = rpc_ntop(sap, buf, sizeof(buf));
++ if (err < 0)
++ goto error;
++ ip_addr = (const char *)buf;
++ }
++ strlcpy(clp->cl_ipaddr, ip_addr, sizeof(clp->cl_ipaddr));
++
++ err = nfs_idmap_new(clp);
++ if (err < 0) {
++ dprintk("%s: failed to create idmapper. Error = %d\n",
++ __func__, err);
++ goto error;
++ }
++ __set_bit(NFS_CS_IDMAP, &clp->cl_res_state);
+ return clp;
+
+ error:
+@@ -354,8 +395,6 @@ static int nfs4_init_client_minor_versio
+ struct nfs_client *nfs4_init_client(struct nfs_client *clp,
+ const struct nfs_client_initdata *cl_init)
+ {
+- char buf[INET6_ADDRSTRLEN + 1];
+- const char *ip_addr = cl_init->ip_addr;
+ struct nfs_client *old;
+ int error;
+
+@@ -365,43 +404,6 @@ struct nfs_client *nfs4_init_client(stru
+ return clp;
+ }
+
+- /* Check NFS protocol revision and initialize RPC op vector */
+- clp->rpc_ops = &nfs_v4_clientops;
+-
+- if (clp->cl_minorversion != 0)
+- __set_bit(NFS_CS_INFINITE_SLOTS, &clp->cl_flags);
+- __set_bit(NFS_CS_DISCRTRY, &clp->cl_flags);
+- __set_bit(NFS_CS_NO_RETRANS_TIMEOUT, &clp->cl_flags);
+-
+- error = nfs_create_rpc_client(clp, cl_init, RPC_AUTH_GSS_KRB5I);
+- if (error == -EINVAL)
+- error = nfs_create_rpc_client(clp, cl_init, RPC_AUTH_UNIX);
+- if (error < 0)
+- goto error;
+-
+- /* If no clientaddr= option was specified, find a usable cb address */
+- if (ip_addr == NULL) {
+- struct sockaddr_storage cb_addr;
+- struct sockaddr *sap = (struct sockaddr *)&cb_addr;
+-
+- error = rpc_localaddr(clp->cl_rpcclient, sap, sizeof(cb_addr));
+- if (error < 0)
+- goto error;
+- error = rpc_ntop(sap, buf, sizeof(buf));
+- if (error < 0)
+- goto error;
+- ip_addr = (const char *)buf;
+- }
+- strlcpy(clp->cl_ipaddr, ip_addr, sizeof(clp->cl_ipaddr));
+-
+- error = nfs_idmap_new(clp);
+- if (error < 0) {
+- dprintk("%s: failed to create idmapper. Error = %d\n",
+- __func__, error);
+- goto error;
+- }
+- __set_bit(NFS_CS_IDMAP, &clp->cl_res_state);
+-
+ error = nfs4_init_client_minor_version(clp);
+ if (error < 0)
+ goto error;
drm-i915-flush-tlbs-before-releasing-backing-store.patch
+media-firewire-firedtv-avc-fix-a-buffer-overflow-in-avc_ca_pmt.patch
+nfsv4-initialise-connection-to-the-server-in-nfs4_alloc_client.patch
+kvm-nvmx-fix-ept-permissions-as-reported-in-exit-qualification.patch
+kvm-x86-mmu-use-the-correct-inherited-permissions-to-get-shadow-page.patch
+arm-8800-1-use-choice-for-kernel-unwinders.patch
+ion-fix-use-after-free-during-ion_ioc_alloc.patch
+ion-protect-kref-from-userspace-manipulation.patch
+ion-do-not-put-ion-handle-until-after-its-final-use.patch