]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
5.10-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 25 Feb 2022 12:35:43 +0000 (13:35 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 25 Feb 2022 12:35:43 +0000 (13:35 +0100)
added patches:
optee-use-driver-internal-tee_context-for-some-rpc.patch
tee-export-teedev_open-and-teedev_close_context.patch
x86-fpu-correct-pkru-xstate-inconsistency.patch

queue-5.10/optee-use-driver-internal-tee_context-for-some-rpc.patch [new file with mode: 0644]
queue-5.10/series
queue-5.10/tee-export-teedev_open-and-teedev_close_context.patch [new file with mode: 0644]
queue-5.10/x86-fpu-correct-pkru-xstate-inconsistency.patch [new file with mode: 0644]

diff --git a/queue-5.10/optee-use-driver-internal-tee_context-for-some-rpc.patch b/queue-5.10/optee-use-driver-internal-tee_context-for-some-rpc.patch
new file mode 100644 (file)
index 0000000..1545202
--- /dev/null
@@ -0,0 +1,131 @@
+From aceeafefff736057e8f93f19bbfbef26abd94604 Mon Sep 17 00:00:00 2001
+From: Jens Wiklander <jens.wiklander@linaro.org>
+Date: Thu, 27 Jan 2022 15:29:39 +0100
+Subject: optee: use driver internal tee_context for some rpc
+
+From: Jens Wiklander <jens.wiklander@linaro.org>
+
+commit aceeafefff736057e8f93f19bbfbef26abd94604 upstream.
+
+Adds a driver private tee_context by moving the tee_context in struct
+optee_notif to struct optee. This tee_context was previously used when
+doing internal calls to secure world to deliver notification.
+
+The new driver internal tee_context is now also when allocating driver
+private shared memory. This decouples the shared memory object from its
+original tee_context. This is needed when the life time of such a memory
+allocation outlives the client tee_context.
+
+This patch fixes the problem described below:
+
+The addition of a shutdown hook by commit f25889f93184 ("optee: fix tee out
+of memory failure seen during kexec reboot") introduced a kernel shutdown
+regression that can be triggered after running the OP-TEE xtest suites.
+
+Once the shutdown hook is called it is not possible to communicate any more
+with the supplicant process because the system is not scheduling task any
+longer. Thus if the optee driver shutdown path receives a supplicant RPC
+request from the OP-TEE we will deadlock the kernel's shutdown.
+
+Fixes: f25889f93184 ("optee: fix tee out of memory failure seen during kexec reboot")
+Fixes: 217e0250cccb ("tee: use reference counting for tee_context")
+Reported-by: Lars Persson <larper@axis.com>
+Cc: stable@vger.kernel.org
+Reviewed-by: Sumit Garg <sumit.garg@linaro.org>
+Signed-off-by: Jens Wiklander <jens.wiklander@linaro.org>
+[JW: backport to 5.10-stable + update commit message]
+Signed-off-by: Jens Wiklander <jens.wiklander@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/tee/optee/core.c          |    8 ++++++++
+ drivers/tee/optee/optee_private.h |    2 ++
+ drivers/tee/optee/rpc.c           |    8 +++++---
+ 3 files changed, 15 insertions(+), 3 deletions(-)
+
+--- a/drivers/tee/optee/core.c
++++ b/drivers/tee/optee/core.c
+@@ -588,6 +588,7 @@ static int optee_remove(struct platform_
+       /* Unregister OP-TEE specific client devices on TEE bus */
+       optee_unregister_devices();
++      teedev_close_context(optee->ctx);
+       /*
+        * Ask OP-TEE to free all cached shared memory objects to decrease
+        * reference counters and also avoid wild pointers in secure world
+@@ -633,6 +634,7 @@ static int optee_probe(struct platform_d
+       struct optee *optee = NULL;
+       void *memremaped_shm = NULL;
+       struct tee_device *teedev;
++      struct tee_context *ctx;
+       u32 sec_caps;
+       int rc;
+@@ -719,6 +721,12 @@ static int optee_probe(struct platform_d
+       optee_supp_init(&optee->supp);
+       optee->memremaped_shm = memremaped_shm;
+       optee->pool = pool;
++      ctx = teedev_open(optee->teedev);
++      if (IS_ERR(ctx)) {
++              rc = PTR_ERR(ctx);
++              goto err;
++      }
++      optee->ctx = ctx;
+       /*
+        * Ensure that there are no pre-existing shm objects before enabling
+--- a/drivers/tee/optee/optee_private.h
++++ b/drivers/tee/optee/optee_private.h
+@@ -70,6 +70,7 @@ struct optee_supp {
+  * struct optee - main service struct
+  * @supp_teedev:      supplicant device
+  * @teedev:           client device
++ * @ctx:              driver internal TEE context
+  * @invoke_fn:                function to issue smc or hvc
+  * @call_queue:               queue of threads waiting to call @invoke_fn
+  * @wait_queue:               queue of threads from secure world waiting for a
+@@ -87,6 +88,7 @@ struct optee {
+       struct tee_device *supp_teedev;
+       struct tee_device *teedev;
+       optee_invoke_fn *invoke_fn;
++      struct tee_context *ctx;
+       struct optee_call_queue call_queue;
+       struct optee_wait_queue wait_queue;
+       struct optee_supp supp;
+--- a/drivers/tee/optee/rpc.c
++++ b/drivers/tee/optee/rpc.c
+@@ -284,6 +284,7 @@ static struct tee_shm *cmd_alloc_suppl(s
+ }
+ static void handle_rpc_func_cmd_shm_alloc(struct tee_context *ctx,
++                                        struct optee *optee,
+                                         struct optee_msg_arg *arg,
+                                         struct optee_call_ctx *call_ctx)
+ {
+@@ -313,7 +314,8 @@ static void handle_rpc_func_cmd_shm_allo
+               shm = cmd_alloc_suppl(ctx, sz);
+               break;
+       case OPTEE_MSG_RPC_SHM_TYPE_KERNEL:
+-              shm = tee_shm_alloc(ctx, sz, TEE_SHM_MAPPED | TEE_SHM_PRIV);
++              shm = tee_shm_alloc(optee->ctx, sz,
++                                  TEE_SHM_MAPPED | TEE_SHM_PRIV);
+               break;
+       default:
+               arg->ret = TEEC_ERROR_BAD_PARAMETERS;
+@@ -470,7 +472,7 @@ static void handle_rpc_func_cmd(struct t
+               break;
+       case OPTEE_MSG_RPC_CMD_SHM_ALLOC:
+               free_pages_list(call_ctx);
+-              handle_rpc_func_cmd_shm_alloc(ctx, arg, call_ctx);
++              handle_rpc_func_cmd_shm_alloc(ctx, optee, arg, call_ctx);
+               break;
+       case OPTEE_MSG_RPC_CMD_SHM_FREE:
+               handle_rpc_func_cmd_shm_free(ctx, arg);
+@@ -501,7 +503,7 @@ void optee_handle_rpc(struct tee_context
+       switch (OPTEE_SMC_RETURN_GET_RPC_FUNC(param->a0)) {
+       case OPTEE_SMC_RPC_FUNC_ALLOC:
+-              shm = tee_shm_alloc(ctx, param->a1,
++              shm = tee_shm_alloc(optee->ctx, param->a1,
+                                   TEE_SHM_MAPPED | TEE_SHM_PRIV);
+               if (!IS_ERR(shm) && !tee_shm_get_pa(shm, 0, &pa)) {
+                       reg_pair_from_64(&param->a1, &param->a2, pa);
index 3d9fd870ca14805e92068257e53c1b9fb42e3965..75265aae867643e782eb1ef82509b2f4262c0cc5 100644 (file)
@@ -13,3 +13,6 @@ sr9700-sanity-check-for-packet-length.patch
 usb-zaurus-support-another-broken-zaurus.patch
 cdc-ncm-avoid-overflow-in-sanity-checking.patch
 netfilter-nf_tables_offload-incorrect-flow-offload-action-array-size.patch
+x86-fpu-correct-pkru-xstate-inconsistency.patch
+tee-export-teedev_open-and-teedev_close_context.patch
+optee-use-driver-internal-tee_context-for-some-rpc.patch
diff --git a/queue-5.10/tee-export-teedev_open-and-teedev_close_context.patch b/queue-5.10/tee-export-teedev_open-and-teedev_close_context.patch
new file mode 100644 (file)
index 0000000..6c8401e
--- /dev/null
@@ -0,0 +1,76 @@
+From 1e2c3ef0496e72ba9001da5fd1b7ed56ccb30597 Mon Sep 17 00:00:00 2001
+From: Jens Wiklander <jens.wiklander@linaro.org>
+Date: Mon, 4 Oct 2021 16:11:52 +0200
+Subject: tee: export teedev_open() and teedev_close_context()
+
+From: Jens Wiklander <jens.wiklander@linaro.org>
+
+commit 1e2c3ef0496e72ba9001da5fd1b7ed56ccb30597 upstream.
+
+Exports the two functions teedev_open() and teedev_close_context() in
+order to make it easier to create a driver internal struct tee_context.
+
+Reviewed-by: Sumit Garg <sumit.garg@linaro.org>
+Signed-off-by: Jens Wiklander <jens.wiklander@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/tee/tee_core.c  |    6 ++++--
+ include/linux/tee_drv.h |   14 ++++++++++++++
+ 2 files changed, 18 insertions(+), 2 deletions(-)
+
+--- a/drivers/tee/tee_core.c
++++ b/drivers/tee/tee_core.c
+@@ -43,7 +43,7 @@ static DEFINE_SPINLOCK(driver_lock);
+ static struct class *tee_class;
+ static dev_t tee_devt;
+-static struct tee_context *teedev_open(struct tee_device *teedev)
++struct tee_context *teedev_open(struct tee_device *teedev)
+ {
+       int rc;
+       struct tee_context *ctx;
+@@ -70,6 +70,7 @@ err:
+       return ERR_PTR(rc);
+ }
++EXPORT_SYMBOL_GPL(teedev_open);
+ void teedev_ctx_get(struct tee_context *ctx)
+ {
+@@ -96,13 +97,14 @@ void teedev_ctx_put(struct tee_context *
+       kref_put(&ctx->refcount, teedev_ctx_release);
+ }
+-static void teedev_close_context(struct tee_context *ctx)
++void teedev_close_context(struct tee_context *ctx)
+ {
+       struct tee_device *teedev = ctx->teedev;
+       teedev_ctx_put(ctx);
+       tee_device_put(teedev);
+ }
++EXPORT_SYMBOL_GPL(teedev_close_context);
+ static int tee_open(struct inode *inode, struct file *filp)
+ {
+--- a/include/linux/tee_drv.h
++++ b/include/linux/tee_drv.h
+@@ -582,4 +582,18 @@ struct tee_client_driver {
+ #define to_tee_client_driver(d) \
+               container_of(d, struct tee_client_driver, driver)
++/**
++ * teedev_open() - Open a struct tee_device
++ * @teedev:   Device to open
++ *
++ * @return a pointer to struct tee_context on success or an ERR_PTR on failure.
++ */
++struct tee_context *teedev_open(struct tee_device *teedev);
++
++/**
++ * teedev_close_context() - closes a struct tee_context
++ * @ctx:      The struct tee_context to close
++ */
++void teedev_close_context(struct tee_context *ctx);
++
+ #endif /*__TEE_DRV_H*/
diff --git a/queue-5.10/x86-fpu-correct-pkru-xstate-inconsistency.patch b/queue-5.10/x86-fpu-correct-pkru-xstate-inconsistency.patch
new file mode 100644 (file)
index 0000000..d313af2
--- /dev/null
@@ -0,0 +1,147 @@
+From bgeffon@google.com  Fri Feb 25 13:00:00 2022
+From: Brian Geffon <bgeffon@google.com>
+Date: Tue, 15 Feb 2022 11:22:33 -0800
+Subject: x86/fpu: Correct pkru/xstate inconsistency
+To: Dave Hansen <dave.hansen@intel.com>, Thomas Gleixner <tglx@linutronix.de>
+Cc: Willis Kung <williskung@google.com>, Guenter Roeck <groeck@google.com>, Borislav Petkov <bp@suse.de>, Andy Lutomirski <luto@kernel.org>, stable@vger.kernel.org, x86@kernel.org, linux-kernel@vger.kernel.org, Brian Geffon <bgeffon@google.com>
+Message-ID: <20220215192233.8717-1-bgeffon@google.com>
+
+From: Brian Geffon <bgeffon@google.com>
+
+When eagerly switching PKRU in switch_fpu_finish() it checks that
+current is not a kernel thread as kernel threads will never use PKRU.
+It's possible that this_cpu_read_stable() on current_task
+(ie. get_current()) is returning an old cached value. To resolve this
+reference next_p directly rather than relying on current.
+
+As written it's possible when switching from a kernel thread to a
+userspace thread to observe a cached PF_KTHREAD flag and never restore
+the PKRU. And as a result this issue only occurs when switching
+from a kernel thread to a userspace thread, switching from a non kernel
+thread works perfectly fine because all that is considered in that
+situation are the flags from some other non kernel task and the next fpu
+is passed in to switch_fpu_finish().
+
+This behavior only exists between 5.2 and 5.13 when it was fixed by a
+rewrite decoupling PKRU from xstate, in:
+  commit 954436989cc5 ("x86/fpu: Remove PKRU handling from switch_fpu_finish()")
+
+Unfortunately backporting the fix from 5.13 is probably not realistic as
+it's part of a 60+ patch series which rewrites most of the PKRU handling.
+
+Fixes: 0cecca9d03c9 ("x86/fpu: Eager switch PKRU state")
+Signed-off-by: Brian Geffon <bgeffon@google.com>
+Signed-off-by: Willis Kung <williskung@google.com>
+Tested-by: Willis Kung <williskung@google.com>
+Cc: <stable@vger.kernel.org> # v5.4.x
+Cc: <stable@vger.kernel.org> # v5.10.x
+Acked-by: Dave Hansen <dave.hansen@linux.intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/fpu/internal.h |   13 ++++++++-----
+ arch/x86/kernel/process_32.c        |    6 ++----
+ arch/x86/kernel/process_64.c        |    6 ++----
+ 3 files changed, 12 insertions(+), 13 deletions(-)
+
+--- a/arch/x86/include/asm/fpu/internal.h
++++ b/arch/x86/include/asm/fpu/internal.h
+@@ -531,9 +531,11 @@ static inline void __fpregs_load_activat
+  * The FPU context is only stored/restored for a user task and
+  * PF_KTHREAD is used to distinguish between kernel and user threads.
+  */
+-static inline void switch_fpu_prepare(struct fpu *old_fpu, int cpu)
++static inline void switch_fpu_prepare(struct task_struct *prev, int cpu)
+ {
+-      if (static_cpu_has(X86_FEATURE_FPU) && !(current->flags & PF_KTHREAD)) {
++      struct fpu *old_fpu = &prev->thread.fpu;
++
++      if (static_cpu_has(X86_FEATURE_FPU) && !(prev->flags & PF_KTHREAD)) {
+               if (!copy_fpregs_to_fpstate(old_fpu))
+                       old_fpu->last_cpu = -1;
+               else
+@@ -552,10 +554,11 @@ static inline void switch_fpu_prepare(st
+  * Load PKRU from the FPU context if available. Delay loading of the
+  * complete FPU state until the return to userland.
+  */
+-static inline void switch_fpu_finish(struct fpu *new_fpu)
++static inline void switch_fpu_finish(struct task_struct *next)
+ {
+       u32 pkru_val = init_pkru_value;
+       struct pkru_state *pk;
++      struct fpu *next_fpu = &next->thread.fpu;
+       if (!static_cpu_has(X86_FEATURE_FPU))
+               return;
+@@ -569,7 +572,7 @@ static inline void switch_fpu_finish(str
+        * PKRU state is switched eagerly because it needs to be valid before we
+        * return to userland e.g. for a copy_to_user() operation.
+        */
+-      if (!(current->flags & PF_KTHREAD)) {
++      if (!(next->flags & PF_KTHREAD)) {
+               /*
+                * If the PKRU bit in xsave.header.xfeatures is not set,
+                * then the PKRU component was in init state, which means
+@@ -578,7 +581,7 @@ static inline void switch_fpu_finish(str
+                * in memory is not valid. This means pkru_val has to be
+                * set to 0 and not to init_pkru_value.
+                */
+-              pk = get_xsave_addr(&new_fpu->state.xsave, XFEATURE_PKRU);
++              pk = get_xsave_addr(&next_fpu->state.xsave, XFEATURE_PKRU);
+               pkru_val = pk ? pk->pkru : 0;
+       }
+       __write_pkru(pkru_val);
+--- a/arch/x86/kernel/process_32.c
++++ b/arch/x86/kernel/process_32.c
+@@ -159,14 +159,12 @@ __switch_to(struct task_struct *prev_p,
+ {
+       struct thread_struct *prev = &prev_p->thread,
+                            *next = &next_p->thread;
+-      struct fpu *prev_fpu = &prev->fpu;
+-      struct fpu *next_fpu = &next->fpu;
+       int cpu = smp_processor_id();
+       /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
+       if (!test_thread_flag(TIF_NEED_FPU_LOAD))
+-              switch_fpu_prepare(prev_fpu, cpu);
++              switch_fpu_prepare(prev_p, cpu);
+       /*
+        * Save away %gs. No need to save %fs, as it was saved on the
+@@ -213,7 +211,7 @@ __switch_to(struct task_struct *prev_p,
+       this_cpu_write(current_task, next_p);
+-      switch_fpu_finish(next_fpu);
++      switch_fpu_finish(next_p);
+       /* Load the Intel cache allocation PQR MSR. */
+       resctrl_sched_in();
+--- a/arch/x86/kernel/process_64.c
++++ b/arch/x86/kernel/process_64.c
+@@ -535,15 +535,13 @@ __switch_to(struct task_struct *prev_p,
+ {
+       struct thread_struct *prev = &prev_p->thread;
+       struct thread_struct *next = &next_p->thread;
+-      struct fpu *prev_fpu = &prev->fpu;
+-      struct fpu *next_fpu = &next->fpu;
+       int cpu = smp_processor_id();
+       WARN_ON_ONCE(IS_ENABLED(CONFIG_DEBUG_ENTRY) &&
+                    this_cpu_read(irq_count) != -1);
+       if (!test_thread_flag(TIF_NEED_FPU_LOAD))
+-              switch_fpu_prepare(prev_fpu, cpu);
++              switch_fpu_prepare(prev_p, cpu);
+       /* We must save %fs and %gs before load_TLS() because
+        * %fs and %gs may be cleared by load_TLS().
+@@ -595,7 +593,7 @@ __switch_to(struct task_struct *prev_p,
+       this_cpu_write(current_task, next_p);
+       this_cpu_write(cpu_current_top_of_stack, task_top_of_stack(next_p));
+-      switch_fpu_finish(next_fpu);
++      switch_fpu_finish(next_p);
+       /* Reload sp0. */
+       update_task_stack(next_p);