--- /dev/null
+From ddb826c2c92d461f290a7bab89e7c28696191875 Mon Sep 17 00:00:00 2001
+From: Heiner Kallweit <hkallweit1@gmail.com>
+Date: Wed, 24 Nov 2021 08:16:25 +0100
+Subject: lan743x: fix deadlock in lan743x_phy_link_status_change()
+
+From: Heiner Kallweit <hkallweit1@gmail.com>
+
+commit ddb826c2c92d461f290a7bab89e7c28696191875 upstream.
+
+Usage of phy_ethtool_get_link_ksettings() in the link status change
+handler isn't needed, and in combination with the referenced change
+it results in a deadlock. Simply remove the call and replace it with
+direct access to phydev->speed. The duplex argument of
+lan743x_phy_update_flowcontrol() isn't used and can be removed.
+
+Fixes: c10a485c3de5 ("phy: phy_ethtool_ksettings_get: Lock the phy for consistency")
+Reported-by: Alessandro B Maurici <abmaurici@gmail.com>
+Tested-by: Alessandro B Maurici <abmaurici@gmail.com>
+Signed-off-by: Heiner Kallweit <hkallweit1@gmail.com>
+Reviewed-by: Andrew Lunn <andrew@lunn.ch>
+Link: https://lore.kernel.org/r/40e27f76-0ba3-dcef-ee32-a78b9df38b0f@gmail.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+[dannf: adjust context]
+Signed-off-by: dann frazier <dann.frazier@canonical.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+
+---
+ drivers/net/ethernet/microchip/lan743x_main.c | 12 +++---------
+ 1 file changed, 3 insertions(+), 9 deletions(-)
+
+--- a/drivers/net/ethernet/microchip/lan743x_main.c
++++ b/drivers/net/ethernet/microchip/lan743x_main.c
+@@ -916,8 +916,7 @@ static int lan743x_phy_reset(struct lan7
+ }
+
+ static void lan743x_phy_update_flowcontrol(struct lan743x_adapter *adapter,
+- u8 duplex, u16 local_adv,
+- u16 remote_adv)
++ u16 local_adv, u16 remote_adv)
+ {
+ struct lan743x_phy *phy = &adapter->phy;
+ u8 cap;
+@@ -944,22 +943,17 @@ static void lan743x_phy_link_status_chan
+
+ phy_print_status(phydev);
+ if (phydev->state == PHY_RUNNING) {
+- struct ethtool_link_ksettings ksettings;
+ int remote_advertisement = 0;
+ int local_advertisement = 0;
+
+- memset(&ksettings, 0, sizeof(ksettings));
+- phy_ethtool_get_link_ksettings(netdev, &ksettings);
+ local_advertisement =
+ linkmode_adv_to_mii_adv_t(phydev->advertising);
+ remote_advertisement =
+ linkmode_adv_to_mii_adv_t(phydev->lp_advertising);
+
+- lan743x_phy_update_flowcontrol(adapter,
+- ksettings.base.duplex,
+- local_advertisement,
++ lan743x_phy_update_flowcontrol(adapter, local_advertisement,
+ remote_advertisement);
+- lan743x_ptp_update_latency(adapter, ksettings.base.speed);
++ lan743x_ptp_update_latency(adapter, phydev->speed);
+ }
+ }
+
--- /dev/null
+From aceeafefff736057e8f93f19bbfbef26abd94604 Mon Sep 17 00:00:00 2001
+From: Jens Wiklander <jens.wiklander@linaro.org>
+Date: Thu, 27 Jan 2022 15:29:39 +0100
+Subject: optee: use driver internal tee_context for some rpc
+
+From: Jens Wiklander <jens.wiklander@linaro.org>
+
+commit aceeafefff736057e8f93f19bbfbef26abd94604 upstream.
+
+Adds a driver private tee_context by moving the tee_context in struct
+optee_notif to struct optee. This tee_context was previously used when
+doing internal calls to secure world to deliver notification.
+
+The new driver internal tee_context is now also when allocating driver
+private shared memory. This decouples the shared memory object from its
+original tee_context. This is needed when the life time of such a memory
+allocation outlives the client tee_context.
+
+This patch fixes the problem described below:
+
+The addition of a shutdown hook by commit f25889f93184 ("optee: fix tee out
+of memory failure seen during kexec reboot") introduced a kernel shutdown
+regression that can be triggered after running the OP-TEE xtest suites.
+
+Once the shutdown hook is called it is not possible to communicate any more
+with the supplicant process because the system is not scheduling task any
+longer. Thus if the optee driver shutdown path receives a supplicant RPC
+request from the OP-TEE we will deadlock the kernel's shutdown.
+
+Fixes: f25889f93184 ("optee: fix tee out of memory failure seen during kexec reboot")
+Fixes: 217e0250cccb ("tee: use reference counting for tee_context")
+Reported-by: Lars Persson <larper@axis.com>
+Cc: stable@vger.kernel.org
+Reviewed-by: Sumit Garg <sumit.garg@linaro.org>
+Signed-off-by: Jens Wiklander <jens.wiklander@linaro.org>
+[JW: backport to 5.4-stable + update commit message]
+Signed-off-by: Jens Wiklander <jens.wiklander@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/tee/optee/core.c | 8 ++++++++
+ drivers/tee/optee/optee_private.h | 2 ++
+ drivers/tee/optee/rpc.c | 8 +++++---
+ 3 files changed, 15 insertions(+), 3 deletions(-)
+
+--- a/drivers/tee/optee/core.c
++++ b/drivers/tee/optee/core.c
+@@ -552,6 +552,7 @@ static struct optee *optee_probe(struct
+ struct optee *optee = NULL;
+ void *memremaped_shm = NULL;
+ struct tee_device *teedev;
++ struct tee_context *ctx;
+ u32 sec_caps;
+ int rc;
+
+@@ -631,6 +632,12 @@ static struct optee *optee_probe(struct
+ optee_supp_init(&optee->supp);
+ optee->memremaped_shm = memremaped_shm;
+ optee->pool = pool;
++ ctx = teedev_open(optee->teedev);
++ if (IS_ERR(ctx)) {
++ rc = PTR_ERR(ctx);
++ goto err;
++ }
++ optee->ctx = ctx;
+
+ /*
+ * Ensure that there are no pre-existing shm objects before enabling
+@@ -667,6 +674,7 @@ err:
+
+ static void optee_remove(struct optee *optee)
+ {
++ teedev_close_context(optee->ctx);
+ /*
+ * Ask OP-TEE to free all cached shared memory objects to decrease
+ * reference counters and also avoid wild pointers in secure world
+--- a/drivers/tee/optee/optee_private.h
++++ b/drivers/tee/optee/optee_private.h
+@@ -69,6 +69,7 @@ struct optee_supp {
+ * struct optee - main service struct
+ * @supp_teedev: supplicant device
+ * @teedev: client device
++ * @ctx: driver internal TEE context
+ * @invoke_fn: function to issue smc or hvc
+ * @call_queue: queue of threads waiting to call @invoke_fn
+ * @wait_queue: queue of threads from secure world waiting for a
+@@ -83,6 +84,7 @@ struct optee {
+ struct tee_device *supp_teedev;
+ struct tee_device *teedev;
+ optee_invoke_fn *invoke_fn;
++ struct tee_context *ctx;
+ struct optee_call_queue call_queue;
+ struct optee_wait_queue wait_queue;
+ struct optee_supp supp;
+--- a/drivers/tee/optee/rpc.c
++++ b/drivers/tee/optee/rpc.c
+@@ -191,6 +191,7 @@ static struct tee_shm *cmd_alloc_suppl(s
+ }
+
+ static void handle_rpc_func_cmd_shm_alloc(struct tee_context *ctx,
++ struct optee *optee,
+ struct optee_msg_arg *arg,
+ struct optee_call_ctx *call_ctx)
+ {
+@@ -220,7 +221,8 @@ static void handle_rpc_func_cmd_shm_allo
+ shm = cmd_alloc_suppl(ctx, sz);
+ break;
+ case OPTEE_MSG_RPC_SHM_TYPE_KERNEL:
+- shm = tee_shm_alloc(ctx, sz, TEE_SHM_MAPPED | TEE_SHM_PRIV);
++ shm = tee_shm_alloc(optee->ctx, sz,
++ TEE_SHM_MAPPED | TEE_SHM_PRIV);
+ break;
+ default:
+ arg->ret = TEEC_ERROR_BAD_PARAMETERS;
+@@ -377,7 +379,7 @@ static void handle_rpc_func_cmd(struct t
+ break;
+ case OPTEE_MSG_RPC_CMD_SHM_ALLOC:
+ free_pages_list(call_ctx);
+- handle_rpc_func_cmd_shm_alloc(ctx, arg, call_ctx);
++ handle_rpc_func_cmd_shm_alloc(ctx, optee, arg, call_ctx);
+ break;
+ case OPTEE_MSG_RPC_CMD_SHM_FREE:
+ handle_rpc_func_cmd_shm_free(ctx, arg);
+@@ -405,7 +407,7 @@ void optee_handle_rpc(struct tee_context
+
+ switch (OPTEE_SMC_RETURN_GET_RPC_FUNC(param->a0)) {
+ case OPTEE_SMC_RPC_FUNC_ALLOC:
+- shm = tee_shm_alloc(ctx, param->a1,
++ shm = tee_shm_alloc(optee->ctx, param->a1,
+ TEE_SHM_MAPPED | TEE_SHM_PRIV);
+ if (!IS_ERR(shm) && !tee_shm_get_pa(shm, 0, &pa)) {
+ reg_pair_from_64(¶m->a1, ¶m->a2, pa);
sr9700-sanity-check-for-packet-length.patch
usb-zaurus-support-another-broken-zaurus.patch
netfilter-nf_tables_offload-incorrect-flow-offload-action-array-size.patch
+x86-fpu-correct-pkru-xstate-inconsistency.patch
+tee-export-teedev_open-and-teedev_close_context.patch
+optee-use-driver-internal-tee_context-for-some-rpc.patch
+lan743x-fix-deadlock-in-lan743x_phy_link_status_change.patch
--- /dev/null
+From 1e2c3ef0496e72ba9001da5fd1b7ed56ccb30597 Mon Sep 17 00:00:00 2001
+From: Jens Wiklander <jens.wiklander@linaro.org>
+Date: Mon, 4 Oct 2021 16:11:52 +0200
+Subject: tee: export teedev_open() and teedev_close_context()
+
+From: Jens Wiklander <jens.wiklander@linaro.org>
+
+commit 1e2c3ef0496e72ba9001da5fd1b7ed56ccb30597 upstream.
+
+Exports the two functions teedev_open() and teedev_close_context() in
+order to make it easier to create a driver internal struct tee_context.
+
+Reviewed-by: Sumit Garg <sumit.garg@linaro.org>
+Signed-off-by: Jens Wiklander <jens.wiklander@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/tee/tee_core.c | 6 ++++--
+ include/linux/tee_drv.h | 14 ++++++++++++++
+ 2 files changed, 18 insertions(+), 2 deletions(-)
+
+--- a/drivers/tee/tee_core.c
++++ b/drivers/tee/tee_core.c
+@@ -28,7 +28,7 @@ static DEFINE_SPINLOCK(driver_lock);
+ static struct class *tee_class;
+ static dev_t tee_devt;
+
+-static struct tee_context *teedev_open(struct tee_device *teedev)
++struct tee_context *teedev_open(struct tee_device *teedev)
+ {
+ int rc;
+ struct tee_context *ctx;
+@@ -56,6 +56,7 @@ err:
+ return ERR_PTR(rc);
+
+ }
++EXPORT_SYMBOL_GPL(teedev_open);
+
+ void teedev_ctx_get(struct tee_context *ctx)
+ {
+@@ -82,13 +83,14 @@ void teedev_ctx_put(struct tee_context *
+ kref_put(&ctx->refcount, teedev_ctx_release);
+ }
+
+-static void teedev_close_context(struct tee_context *ctx)
++void teedev_close_context(struct tee_context *ctx)
+ {
+ struct tee_device *teedev = ctx->teedev;
+
+ teedev_ctx_put(ctx);
+ tee_device_put(teedev);
+ }
++EXPORT_SYMBOL_GPL(teedev_close_context);
+
+ static int tee_open(struct inode *inode, struct file *filp)
+ {
+--- a/include/linux/tee_drv.h
++++ b/include/linux/tee_drv.h
+@@ -579,4 +579,18 @@ struct tee_client_driver {
+ #define to_tee_client_driver(d) \
+ container_of(d, struct tee_client_driver, driver)
+
++/**
++ * teedev_open() - Open a struct tee_device
++ * @teedev: Device to open
++ *
++ * @return a pointer to struct tee_context on success or an ERR_PTR on failure.
++ */
++struct tee_context *teedev_open(struct tee_device *teedev);
++
++/**
++ * teedev_close_context() - closes a struct tee_context
++ * @ctx: The struct tee_context to close
++ */
++void teedev_close_context(struct tee_context *ctx);
++
+ #endif /*__TEE_DRV_H*/
--- /dev/null
+From bgeffon@google.com Fri Feb 25 13:00:00 2022
+From: Brian Geffon <bgeffon@google.com>
+Date: Tue, 15 Feb 2022 11:22:33 -0800
+Subject: x86/fpu: Correct pkru/xstate inconsistency
+To: Dave Hansen <dave.hansen@intel.com>, Thomas Gleixner <tglx@linutronix.de>
+Cc: Willis Kung <williskung@google.com>, Guenter Roeck <groeck@google.com>, Borislav Petkov <bp@suse.de>, Andy Lutomirski <luto@kernel.org>, stable@vger.kernel.org, x86@kernel.org, linux-kernel@vger.kernel.org, Brian Geffon <bgeffon@google.com>
+Message-ID: <20220215192233.8717-1-bgeffon@google.com>
+
+From: Brian Geffon <bgeffon@google.com>
+
+When eagerly switching PKRU in switch_fpu_finish() it checks that
+current is not a kernel thread as kernel threads will never use PKRU.
+It's possible that this_cpu_read_stable() on current_task
+(ie. get_current()) is returning an old cached value. To resolve this
+reference next_p directly rather than relying on current.
+
+As written it's possible when switching from a kernel thread to a
+userspace thread to observe a cached PF_KTHREAD flag and never restore
+the PKRU. And as a result this issue only occurs when switching
+from a kernel thread to a userspace thread, switching from a non kernel
+thread works perfectly fine because all that is considered in that
+situation are the flags from some other non kernel task and the next fpu
+is passed in to switch_fpu_finish().
+
+This behavior only exists between 5.2 and 5.13 when it was fixed by a
+rewrite decoupling PKRU from xstate, in:
+ commit 954436989cc5 ("x86/fpu: Remove PKRU handling from switch_fpu_finish()")
+
+Unfortunately backporting the fix from 5.13 is probably not realistic as
+it's part of a 60+ patch series which rewrites most of the PKRU handling.
+
+Fixes: 0cecca9d03c9 ("x86/fpu: Eager switch PKRU state")
+Signed-off-by: Brian Geffon <bgeffon@google.com>
+Signed-off-by: Willis Kung <williskung@google.com>
+Tested-by: Willis Kung <williskung@google.com>
+Cc: <stable@vger.kernel.org> # v5.4.x
+Cc: <stable@vger.kernel.org> # v5.10.x
+Acked-by: Dave Hansen <dave.hansen@linux.intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/fpu/internal.h | 13 ++++++++-----
+ arch/x86/kernel/process_32.c | 6 ++----
+ arch/x86/kernel/process_64.c | 6 ++----
+ 3 files changed, 12 insertions(+), 13 deletions(-)
+
+--- a/arch/x86/include/asm/fpu/internal.h
++++ b/arch/x86/include/asm/fpu/internal.h
+@@ -560,9 +560,11 @@ static inline void __fpregs_load_activat
+ * The FPU context is only stored/restored for a user task and
+ * PF_KTHREAD is used to distinguish between kernel and user threads.
+ */
+-static inline void switch_fpu_prepare(struct fpu *old_fpu, int cpu)
++static inline void switch_fpu_prepare(struct task_struct *prev, int cpu)
+ {
+- if (static_cpu_has(X86_FEATURE_FPU) && !(current->flags & PF_KTHREAD)) {
++ struct fpu *old_fpu = &prev->thread.fpu;
++
++ if (static_cpu_has(X86_FEATURE_FPU) && !(prev->flags & PF_KTHREAD)) {
+ if (!copy_fpregs_to_fpstate(old_fpu))
+ old_fpu->last_cpu = -1;
+ else
+@@ -581,10 +583,11 @@ static inline void switch_fpu_prepare(st
+ * Load PKRU from the FPU context if available. Delay loading of the
+ * complete FPU state until the return to userland.
+ */
+-static inline void switch_fpu_finish(struct fpu *new_fpu)
++static inline void switch_fpu_finish(struct task_struct *next)
+ {
+ u32 pkru_val = init_pkru_value;
+ struct pkru_state *pk;
++ struct fpu *next_fpu = &next->thread.fpu;
+
+ if (!static_cpu_has(X86_FEATURE_FPU))
+ return;
+@@ -598,7 +601,7 @@ static inline void switch_fpu_finish(str
+ * PKRU state is switched eagerly because it needs to be valid before we
+ * return to userland e.g. for a copy_to_user() operation.
+ */
+- if (!(current->flags & PF_KTHREAD)) {
++ if (!(next->flags & PF_KTHREAD)) {
+ /*
+ * If the PKRU bit in xsave.header.xfeatures is not set,
+ * then the PKRU component was in init state, which means
+@@ -607,7 +610,7 @@ static inline void switch_fpu_finish(str
+ * in memory is not valid. This means pkru_val has to be
+ * set to 0 and not to init_pkru_value.
+ */
+- pk = get_xsave_addr(&new_fpu->state.xsave, XFEATURE_PKRU);
++ pk = get_xsave_addr(&next_fpu->state.xsave, XFEATURE_PKRU);
+ pkru_val = pk ? pk->pkru : 0;
+ }
+ __write_pkru(pkru_val);
+--- a/arch/x86/kernel/process_32.c
++++ b/arch/x86/kernel/process_32.c
+@@ -229,14 +229,12 @@ __switch_to(struct task_struct *prev_p,
+ {
+ struct thread_struct *prev = &prev_p->thread,
+ *next = &next_p->thread;
+- struct fpu *prev_fpu = &prev->fpu;
+- struct fpu *next_fpu = &next->fpu;
+ int cpu = smp_processor_id();
+
+ /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
+
+ if (!test_thread_flag(TIF_NEED_FPU_LOAD))
+- switch_fpu_prepare(prev_fpu, cpu);
++ switch_fpu_prepare(prev_p, cpu);
+
+ /*
+ * Save away %gs. No need to save %fs, as it was saved on the
+@@ -292,7 +290,7 @@ __switch_to(struct task_struct *prev_p,
+
+ this_cpu_write(current_task, next_p);
+
+- switch_fpu_finish(next_fpu);
++ switch_fpu_finish(next_p);
+
+ /* Load the Intel cache allocation PQR MSR. */
+ resctrl_sched_in();
+--- a/arch/x86/kernel/process_64.c
++++ b/arch/x86/kernel/process_64.c
+@@ -505,15 +505,13 @@ __switch_to(struct task_struct *prev_p,
+ {
+ struct thread_struct *prev = &prev_p->thread;
+ struct thread_struct *next = &next_p->thread;
+- struct fpu *prev_fpu = &prev->fpu;
+- struct fpu *next_fpu = &next->fpu;
+ int cpu = smp_processor_id();
+
+ WARN_ON_ONCE(IS_ENABLED(CONFIG_DEBUG_ENTRY) &&
+ this_cpu_read(irq_count) != -1);
+
+ if (!test_thread_flag(TIF_NEED_FPU_LOAD))
+- switch_fpu_prepare(prev_fpu, cpu);
++ switch_fpu_prepare(prev_p, cpu);
+
+ /* We must save %fs and %gs before load_TLS() because
+ * %fs and %gs may be cleared by load_TLS().
+@@ -565,7 +563,7 @@ __switch_to(struct task_struct *prev_p,
+ this_cpu_write(current_task, next_p);
+ this_cpu_write(cpu_current_top_of_stack, task_top_of_stack(next_p));
+
+- switch_fpu_finish(next_fpu);
++ switch_fpu_finish(next_p);
+
+ /* Reload sp0. */
+ update_task_stack(next_p);