--- /dev/null
+From a6aeb739974ec73e5217c75a7c008a688d3d5cf1 Mon Sep 17 00:00:00 2001
+From: Dmitry Antipov <dmantipov@yandex.ru>
+Date: Wed, 7 May 2025 09:50:44 +0300
+Subject: module: ensure that kobject_put() is safe for module type kobjects
+
+From: Dmitry Antipov <dmantipov@yandex.ru>
+
+commit a6aeb739974ec73e5217c75a7c008a688d3d5cf1 upstream.
+
+In 'lookup_or_create_module_kobject()', an internal kobject is created
+using 'module_ktype'. So call to 'kobject_put()' on error handling
+path causes an attempt to use an uninitialized completion pointer in
+'module_kobject_release()'. In this scenario, we just want to release
+kobject without an extra synchronization required for a regular module
+unloading process, so adding an extra check whether 'complete()' is
+actually required makes 'kobject_put()' safe.
+
+Reported-by: syzbot+7fb8a372e1f6add936dd@syzkaller.appspotmail.com
+Closes: https://syzkaller.appspot.com/bug?extid=7fb8a372e1f6add936dd
+Fixes: 942e443127e9 ("module: Fix mod->mkobj.kobj potentially freed too early")
+Cc: stable@vger.kernel.org
+Suggested-by: Petr Pavlu <petr.pavlu@suse.com>
+Signed-off-by: Dmitry Antipov <dmantipov@yandex.ru>
+Link: https://lore.kernel.org/r/20250507065044.86529-1-dmantipov@yandex.ru
+Signed-off-by: Petr Pavlu <petr.pavlu@suse.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/params.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/kernel/params.c
++++ b/kernel/params.c
+@@ -945,7 +945,9 @@ int module_sysfs_initialized;
+ static void module_kobj_release(struct kobject *kobj)
+ {
+ struct module_kobject *mk = to_module_kobject(kobj);
+- complete(mk->kobj_completion);
++
++ if (mk->kobj_completion)
++ complete(mk->kobj_completion);
+ }
+
+ struct kobj_type module_ktype = {
--- /dev/null
+From 8f947e0fd595951460f5a6e1ac29baa82fa02eab Mon Sep 17 00:00:00 2001
+From: Jan Kara <jack@suse.cz>
+Date: Thu, 24 Apr 2025 15:45:12 +0200
+Subject: ocfs2: implement handshaking with ocfs2 recovery thread
+
+From: Jan Kara <jack@suse.cz>
+
+commit 8f947e0fd595951460f5a6e1ac29baa82fa02eab upstream.
+
+We will need ocfs2 recovery thread to acknowledge transitions of
+recovery_state when disabling particular types of recovery. This is
+similar to what currently happens when disabling recovery completely, just
+more general. Implement the handshake and use it for exit from recovery.
+
+Link: https://lkml.kernel.org/r/20250424134515.18933-5-jack@suse.cz
+Fixes: 5f530de63cfc ("ocfs2: Use s_umount for quota recovery protection")
+Signed-off-by: Jan Kara <jack@suse.cz>
+Reviewed-by: Heming Zhao <heming.zhao@suse.com>
+Tested-by: Heming Zhao <heming.zhao@suse.com>
+Acked-by: Joseph Qi <joseph.qi@linux.alibaba.com>
+Cc: Changwei Ge <gechangwei@live.cn>
+Cc: Joel Becker <jlbec@evilplan.org>
+Cc: Jun Piao <piaojun@huawei.com>
+Cc: Junxiao Bi <junxiao.bi@oracle.com>
+Cc: Mark Fasheh <mark@fasheh.com>
+Cc: Murad Masimov <m.masimov@mt-integration.ru>
+Cc: Shichangkuo <shi.changkuo@h3c.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/ocfs2/journal.c | 52 +++++++++++++++++++++++++++++++++++-----------------
+ fs/ocfs2/ocfs2.h | 4 ++++
+ 2 files changed, 39 insertions(+), 17 deletions(-)
+
+--- a/fs/ocfs2/journal.c
++++ b/fs/ocfs2/journal.c
+@@ -192,31 +192,48 @@ int ocfs2_recovery_init(struct ocfs2_sup
+ return 0;
+ }
+
+-/* we can't grab the goofy sem lock from inside wait_event, so we use
+- * memory barriers to make sure that we'll see the null task before
+- * being woken up */
+ static int ocfs2_recovery_thread_running(struct ocfs2_super *osb)
+ {
+- mb();
+ return osb->recovery_thread_task != NULL;
+ }
+
+-void ocfs2_recovery_exit(struct ocfs2_super *osb)
++static void ocfs2_recovery_disable(struct ocfs2_super *osb,
++ enum ocfs2_recovery_state state)
+ {
+- struct ocfs2_recovery_map *rm;
+-
+- /* disable any new recovery threads and wait for any currently
+- * running ones to exit. Do this before setting the vol_state. */
+ mutex_lock(&osb->recovery_lock);
+- osb->recovery_state = OCFS2_REC_DISABLED;
++ /*
++ * If recovery thread is not running, we can directly transition to
++ * final state.
++ */
++ if (!ocfs2_recovery_thread_running(osb)) {
++ osb->recovery_state = state + 1;
++ goto out_lock;
++ }
++ osb->recovery_state = state;
++ /* Wait for recovery thread to acknowledge state transition */
++ wait_event_cmd(osb->recovery_event,
++ !ocfs2_recovery_thread_running(osb) ||
++ osb->recovery_state >= state + 1,
++ mutex_unlock(&osb->recovery_lock),
++ mutex_lock(&osb->recovery_lock));
++out_lock:
+ mutex_unlock(&osb->recovery_lock);
+- wait_event(osb->recovery_event, !ocfs2_recovery_thread_running(osb));
+
+- /* At this point, we know that no more recovery threads can be
+- * launched, so wait for any recovery completion work to
+- * complete. */
++ /*
++ * At this point we know that no more recovery work can be queued so
++ * wait for any recovery completion work to complete.
++ */
+ if (osb->ocfs2_wq)
+ flush_workqueue(osb->ocfs2_wq);
++}
++
++void ocfs2_recovery_exit(struct ocfs2_super *osb)
++{
++ struct ocfs2_recovery_map *rm;
++
++ /* disable any new recovery threads and wait for any currently
++ * running ones to exit. Do this before setting the vol_state. */
++ ocfs2_recovery_disable(osb, OCFS2_REC_WANT_DISABLE);
+
+ /*
+ * Now that recovery is shut down, and the osb is about to be
+@@ -1507,7 +1524,8 @@ bail:
+
+ ocfs2_free_replay_slots(osb);
+ osb->recovery_thread_task = NULL;
+- mb(); /* sync with ocfs2_recovery_thread_running */
++ if (osb->recovery_state == OCFS2_REC_WANT_DISABLE)
++ osb->recovery_state = OCFS2_REC_DISABLED;
+ wake_up(&osb->recovery_event);
+
+ mutex_unlock(&osb->recovery_lock);
+@@ -1526,13 +1544,13 @@ void ocfs2_recovery_thread(struct ocfs2_
+ int was_set = -1;
+
+ mutex_lock(&osb->recovery_lock);
+- if (osb->recovery_state < OCFS2_REC_DISABLED)
++ if (osb->recovery_state < OCFS2_REC_WANT_DISABLE)
+ was_set = ocfs2_recovery_map_set(osb, node_num);
+
+ trace_ocfs2_recovery_thread(node_num, osb->node_num,
+ osb->recovery_state, osb->recovery_thread_task, was_set);
+
+- if (osb->recovery_state == OCFS2_REC_DISABLED)
++ if (osb->recovery_state >= OCFS2_REC_WANT_DISABLE)
+ goto out;
+
+ if (osb->recovery_thread_task)
+--- a/fs/ocfs2/ocfs2.h
++++ b/fs/ocfs2/ocfs2.h
+@@ -286,6 +286,10 @@ enum ocfs2_mount_options
+
+ enum ocfs2_recovery_state {
+ OCFS2_REC_ENABLED = 0,
++ OCFS2_REC_WANT_DISABLE,
++ /*
++ * Must be OCFS2_REC_WANT_DISABLE + 1 for ocfs2_recovery_exit() to work
++ */
+ OCFS2_REC_DISABLED,
+ };
+
--- /dev/null
+From fcaf3b2683b05a9684acdebda706a12025a6927a Mon Sep 17 00:00:00 2001
+From: Jan Kara <jack@suse.cz>
+Date: Thu, 24 Apr 2025 15:45:13 +0200
+Subject: ocfs2: stop quota recovery before disabling quotas
+
+From: Jan Kara <jack@suse.cz>
+
+commit fcaf3b2683b05a9684acdebda706a12025a6927a upstream.
+
+Currently quota recovery is synchronized with unmount using sb->s_umount
+semaphore. That is however prone to deadlocks because
+flush_workqueue(osb->ocfs2_wq) called from umount code can wait for quota
+recovery to complete while ocfs2_finish_quota_recovery() waits for
+sb->s_umount semaphore.
+
+Grabbing of sb->s_umount semaphore in ocfs2_finish_quota_recovery() is
+only needed to protect that function from disabling of quotas from
+ocfs2_dismount_volume(). Handle this problem by disabling quota recovery
+early during unmount in ocfs2_dismount_volume() instead so that we can
+drop acquisition of sb->s_umount from ocfs2_finish_quota_recovery().
+
+Link: https://lkml.kernel.org/r/20250424134515.18933-6-jack@suse.cz
+Fixes: 5f530de63cfc ("ocfs2: Use s_umount for quota recovery protection")
+Signed-off-by: Jan Kara <jack@suse.cz>
+Reported-by: Shichangkuo <shi.changkuo@h3c.com>
+Reported-by: Murad Masimov <m.masimov@mt-integration.ru>
+Reviewed-by: Heming Zhao <heming.zhao@suse.com>
+Tested-by: Heming Zhao <heming.zhao@suse.com>
+Acked-by: Joseph Qi <joseph.qi@linux.alibaba.com>
+Cc: Changwei Ge <gechangwei@live.cn>
+Cc: Joel Becker <jlbec@evilplan.org>
+Cc: Jun Piao <piaojun@huawei.com>
+Cc: Junxiao Bi <junxiao.bi@oracle.com>
+Cc: Mark Fasheh <mark@fasheh.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/ocfs2/journal.c | 20 ++++++++++++++++++--
+ fs/ocfs2/journal.h | 1 +
+ fs/ocfs2/ocfs2.h | 6 ++++++
+ fs/ocfs2/quota_local.c | 9 ++-------
+ fs/ocfs2/super.c | 3 +++
+ 5 files changed, 30 insertions(+), 9 deletions(-)
+
+--- a/fs/ocfs2/journal.c
++++ b/fs/ocfs2/journal.c
+@@ -227,6 +227,11 @@ out_lock:
+ flush_workqueue(osb->ocfs2_wq);
+ }
+
++void ocfs2_recovery_disable_quota(struct ocfs2_super *osb)
++{
++ ocfs2_recovery_disable(osb, OCFS2_REC_QUOTA_WANT_DISABLE);
++}
++
+ void ocfs2_recovery_exit(struct ocfs2_super *osb)
+ {
+ struct ocfs2_recovery_map *rm;
+@@ -1427,6 +1432,18 @@ static int __ocfs2_recovery_thread(void
+ }
+ }
+ restart:
++ if (quota_enabled) {
++ mutex_lock(&osb->recovery_lock);
++ /* Confirm that recovery thread will no longer recover quotas */
++ if (osb->recovery_state == OCFS2_REC_QUOTA_WANT_DISABLE) {
++ osb->recovery_state = OCFS2_REC_QUOTA_DISABLED;
++ wake_up(&osb->recovery_event);
++ }
++ if (osb->recovery_state >= OCFS2_REC_QUOTA_DISABLED)
++ quota_enabled = 0;
++ mutex_unlock(&osb->recovery_lock);
++ }
++
+ status = ocfs2_super_lock(osb, 1);
+ if (status < 0) {
+ mlog_errno(status);
+@@ -1530,8 +1547,7 @@ bail:
+
+ mutex_unlock(&osb->recovery_lock);
+
+- if (quota_enabled)
+- kfree(rm_quota);
++ kfree(rm_quota);
+
+ /* no one is callint kthread_stop() for us so the kthread() api
+ * requires that we call do_exit(). And it isn't exported, but
+--- a/fs/ocfs2/journal.h
++++ b/fs/ocfs2/journal.h
+@@ -148,6 +148,7 @@ void ocfs2_wait_for_recovery(struct ocfs
+
+ int ocfs2_recovery_init(struct ocfs2_super *osb);
+ void ocfs2_recovery_exit(struct ocfs2_super *osb);
++void ocfs2_recovery_disable_quota(struct ocfs2_super *osb);
+
+ int ocfs2_compute_replay_slots(struct ocfs2_super *osb);
+ void ocfs2_free_replay_slots(struct ocfs2_super *osb);
+--- a/fs/ocfs2/ocfs2.h
++++ b/fs/ocfs2/ocfs2.h
+@@ -286,6 +286,12 @@ enum ocfs2_mount_options
+
+ enum ocfs2_recovery_state {
+ OCFS2_REC_ENABLED = 0,
++ OCFS2_REC_QUOTA_WANT_DISABLE,
++ /*
++ * Must be OCFS2_REC_QUOTA_WANT_DISABLE + 1 for
++ * ocfs2_recovery_disable_quota() to work.
++ */
++ OCFS2_REC_QUOTA_DISABLED,
+ OCFS2_REC_WANT_DISABLE,
+ /*
+ * Must be OCFS2_REC_WANT_DISABLE + 1 for ocfs2_recovery_exit() to work
+--- a/fs/ocfs2/quota_local.c
++++ b/fs/ocfs2/quota_local.c
+@@ -453,8 +453,7 @@ out:
+
+ /* Sync changes in local quota file into global quota file and
+ * reinitialize local quota file.
+- * The function expects local quota file to be already locked and
+- * s_umount locked in shared mode. */
++ * The function expects local quota file to be already locked. */
+ static int ocfs2_recover_local_quota_file(struct inode *lqinode,
+ int type,
+ struct ocfs2_quota_recovery *rec)
+@@ -585,7 +584,6 @@ int ocfs2_finish_quota_recovery(struct o
+ {
+ unsigned int ino[OCFS2_MAXQUOTAS] = { LOCAL_USER_QUOTA_SYSTEM_INODE,
+ LOCAL_GROUP_QUOTA_SYSTEM_INODE };
+- struct super_block *sb = osb->sb;
+ struct ocfs2_local_disk_dqinfo *ldinfo;
+ struct buffer_head *bh;
+ handle_t *handle;
+@@ -597,7 +595,6 @@ int ocfs2_finish_quota_recovery(struct o
+ printk(KERN_NOTICE "ocfs2: Finishing quota recovery on device (%s) for "
+ "slot %u\n", osb->dev_str, slot_num);
+
+- down_read(&sb->s_umount);
+ for (type = 0; type < OCFS2_MAXQUOTAS; type++) {
+ if (list_empty(&(rec->r_list[type])))
+ continue;
+@@ -674,7 +671,6 @@ out_put:
+ break;
+ }
+ out:
+- up_read(&sb->s_umount);
+ kfree(rec);
+ return status;
+ }
+@@ -840,8 +836,7 @@ static int ocfs2_local_free_info(struct
+ ocfs2_release_local_quota_bitmaps(&oinfo->dqi_chunk);
+
+ /*
+- * s_umount held in exclusive mode protects us against racing with
+- * recovery thread...
++ * ocfs2_dismount_volume() has already aborted quota recovery...
+ */
+ if (oinfo->dqi_rec) {
+ ocfs2_free_quota_recovery(oinfo->dqi_rec);
+--- a/fs/ocfs2/super.c
++++ b/fs/ocfs2/super.c
+@@ -1872,6 +1872,9 @@ static void ocfs2_dismount_volume(struct
+ /* Orphan scan should be stopped as early as possible */
+ ocfs2_orphan_scan_stop(osb);
+
++ /* Stop quota recovery so that we can disable quotas */
++ ocfs2_recovery_disable_quota(osb);
++
+ ocfs2_disable_quotas(osb);
+
+ /* All dquots should be freed by now */
--- /dev/null
+From c0fb83088f0cc4ee4706e0495ee8b06f49daa716 Mon Sep 17 00:00:00 2001
+From: Jan Kara <jack@suse.cz>
+Date: Thu, 24 Apr 2025 15:45:11 +0200
+Subject: ocfs2: switch osb->disable_recovery to enum
+
+From: Jan Kara <jack@suse.cz>
+
+commit c0fb83088f0cc4ee4706e0495ee8b06f49daa716 upstream.
+
+Patch series "ocfs2: Fix deadlocks in quota recovery", v3.
+
+This implements another approach to fixing quota recovery deadlocks. We
+avoid grabbing sb->s_umount semaphore from ocfs2_finish_quota_recovery()
+and instead stop quota recovery early in ocfs2_dismount_volume().
+
+
+This patch (of 3):
+
+We will need more recovery states than just pure enable / disable to fix
+deadlocks with quota recovery. Switch osb->disable_recovery to enum.
+
+Link: https://lkml.kernel.org/r/20250424134301.1392-1-jack@suse.cz
+Link: https://lkml.kernel.org/r/20250424134515.18933-4-jack@suse.cz
+Fixes: 5f530de63cfc ("ocfs2: Use s_umount for quota recovery protection")
+Signed-off-by: Jan Kara <jack@suse.cz>
+Reviewed-by: Heming Zhao <heming.zhao@suse.com>
+Tested-by: Heming Zhao <heming.zhao@suse.com>
+Acked-by: Joseph Qi <joseph.qi@linux.alibaba.com>
+Cc: Mark Fasheh <mark@fasheh.com>
+Cc: Joel Becker <jlbec@evilplan.org>
+Cc: Junxiao Bi <junxiao.bi@oracle.com>
+Cc: Changwei Ge <gechangwei@live.cn>
+Cc: Jun Piao <piaojun@huawei.com>
+Cc: Murad Masimov <m.masimov@mt-integration.ru>
+Cc: Shichangkuo <shi.changkuo@h3c.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/ocfs2/journal.c | 14 ++++++++------
+ fs/ocfs2/ocfs2.h | 7 ++++++-
+ 2 files changed, 14 insertions(+), 7 deletions(-)
+
+--- a/fs/ocfs2/journal.c
++++ b/fs/ocfs2/journal.c
+@@ -173,7 +173,7 @@ int ocfs2_recovery_init(struct ocfs2_sup
+ struct ocfs2_recovery_map *rm;
+
+ mutex_init(&osb->recovery_lock);
+- osb->disable_recovery = 0;
++ osb->recovery_state = OCFS2_REC_ENABLED;
+ osb->recovery_thread_task = NULL;
+ init_waitqueue_head(&osb->recovery_event);
+
+@@ -208,7 +208,7 @@ void ocfs2_recovery_exit(struct ocfs2_su
+ /* disable any new recovery threads and wait for any currently
+ * running ones to exit. Do this before setting the vol_state. */
+ mutex_lock(&osb->recovery_lock);
+- osb->disable_recovery = 1;
++ osb->recovery_state = OCFS2_REC_DISABLED;
+ mutex_unlock(&osb->recovery_lock);
+ wait_event(osb->recovery_event, !ocfs2_recovery_thread_running(osb));
+
+@@ -1523,14 +1523,16 @@ bail:
+
+ void ocfs2_recovery_thread(struct ocfs2_super *osb, int node_num)
+ {
++ int was_set = -1;
++
+ mutex_lock(&osb->recovery_lock);
++ if (osb->recovery_state < OCFS2_REC_DISABLED)
++ was_set = ocfs2_recovery_map_set(osb, node_num);
+
+ trace_ocfs2_recovery_thread(node_num, osb->node_num,
+- osb->disable_recovery, osb->recovery_thread_task,
+- osb->disable_recovery ?
+- -1 : ocfs2_recovery_map_set(osb, node_num));
++ osb->recovery_state, osb->recovery_thread_task, was_set);
+
+- if (osb->disable_recovery)
++ if (osb->recovery_state == OCFS2_REC_DISABLED)
+ goto out;
+
+ if (osb->recovery_thread_task)
+--- a/fs/ocfs2/ocfs2.h
++++ b/fs/ocfs2/ocfs2.h
+@@ -284,6 +284,11 @@ enum ocfs2_mount_options
+ #define OCFS2_OSB_ERROR_FS 0x0004
+ #define OCFS2_DEFAULT_ATIME_QUANTUM 60
+
++enum ocfs2_recovery_state {
++ OCFS2_REC_ENABLED = 0,
++ OCFS2_REC_DISABLED,
++};
++
+ struct ocfs2_journal;
+ struct ocfs2_slot_info;
+ struct ocfs2_recovery_map;
+@@ -346,7 +351,7 @@ struct ocfs2_super
+ struct ocfs2_recovery_map *recovery_map;
+ struct ocfs2_replay_map *replay_map;
+ struct task_struct *recovery_thread_task;
+- int disable_recovery;
++ enum ocfs2_recovery_state recovery_state;
+ wait_queue_head_t checkpoint_event;
+ struct ocfs2_journal *journal;
+ unsigned long osb_commit_interval;
iio-imu-st_lsm6dsx-fix-possible-lockup-in-st_lsm6dsx_read_tagged_fifo.patch
drm-amd-display-fix-wrong-handling-for-aux_defer-case.patch
usb-uhci-platform-make-the-clock-really-optional.patch
+xenbus-use-kref-to-track-req-lifetime.patch
+module-ensure-that-kobject_put-is-safe-for-module-type-kobjects.patch
+ocfs2-switch-osb-disable_recovery-to-enum.patch
+ocfs2-implement-handshaking-with-ocfs2-recovery-thread.patch
+ocfs2-stop-quota-recovery-before-disabling-quotas.patch
+usb-cdnsp-fix-issue-with-resuming-from-l1.patch
+usb-cdnsp-fix-l1-resume-issue-for-rtl_revision_new_lpm-version.patch
+usb-gadget-tegra-xudc-ack-st_rc-after-clearing-ctrl_run.patch
+usb-host-tegra-prevent-host-controller-crash-when-otg-port-is-used.patch
--- /dev/null
+From 241e2ce88e5a494be7a5d44c0697592f1632fbee Mon Sep 17 00:00:00 2001
+From: Pawel Laszczak <pawell@cadence.com>
+Date: Fri, 18 Apr 2025 04:55:16 +0000
+Subject: usb: cdnsp: Fix issue with resuming from L1
+
+From: Pawel Laszczak <pawell@cadence.com>
+
+commit 241e2ce88e5a494be7a5d44c0697592f1632fbee upstream.
+
+In very rare cases after resuming controller from L1 to L0 it reads
+registers before the clock UTMI have been enabled and as the result
+driver reads incorrect value.
+Most of registers are in APB domain clock but some of them (e.g. PORTSC)
+are in UTMI domain clock.
+After entering to L1 state the UTMI clock can be disabled.
+When controller transition from L1 to L0 the port status change event is
+reported and in interrupt runtime function driver reads PORTSC.
+During this read operation controller synchronize UTMI and APB domain
+but UTMI clock is still disabled and in result it reads 0xFFFFFFFF value.
+To fix this issue driver increases APB timeout value.
+
+The issue is platform specific and if the default value of APB timeout
+is not sufficient then this time should be set Individually for each
+platform.
+
+Fixes: 3d82904559f4 ("usb: cdnsp: cdns3 Add main part of Cadence USBSSP DRD Driver")
+Cc: stable <stable@kernel.org>
+Signed-off-by: Pawel Laszczak <pawell@cadence.com>
+Acked-by: Peter Chen <peter.chen@kernel.org>
+Link: https://lore.kernel.org/r/PH7PR07MB953846C57973E4DB134CAA71DDBF2@PH7PR07MB9538.namprd07.prod.outlook.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/usb/cdns3/cdnsp-gadget.c | 29 +++++++++++++++++++++++++++++
+ drivers/usb/cdns3/cdnsp-gadget.h | 3 +++
+ drivers/usb/cdns3/cdnsp-pci.c | 12 ++++++++++--
+ drivers/usb/cdns3/core.h | 3 +++
+ 4 files changed, 45 insertions(+), 2 deletions(-)
+
+--- a/drivers/usb/cdns3/cdnsp-gadget.c
++++ b/drivers/usb/cdns3/cdnsp-gadget.c
+@@ -138,6 +138,26 @@ static void cdnsp_clear_port_change_bit(
+ (portsc & PORT_CHANGE_BITS), port_regs);
+ }
+
++static void cdnsp_set_apb_timeout_value(struct cdnsp_device *pdev)
++{
++ struct cdns *cdns = dev_get_drvdata(pdev->dev);
++ __le32 __iomem *reg;
++ void __iomem *base;
++ u32 offset = 0;
++ u32 val;
++
++ if (!cdns->override_apb_timeout)
++ return;
++
++ base = &pdev->cap_regs->hc_capbase;
++ offset = cdnsp_find_next_ext_cap(base, offset, D_XEC_PRE_REGS_CAP);
++ reg = base + offset + REG_CHICKEN_BITS_3_OFFSET;
++
++ val = le32_to_cpu(readl(reg));
++ val = CHICKEN_APB_TIMEOUT_SET(val, cdns->override_apb_timeout);
++ writel(cpu_to_le32(val), reg);
++}
++
+ static void cdnsp_set_chicken_bits_2(struct cdnsp_device *pdev, u32 bit)
+ {
+ __le32 __iomem *reg;
+@@ -1804,6 +1824,15 @@ static int cdnsp_gen_setup(struct cdnsp_
+ pdev->hci_version = HC_VERSION(pdev->hcc_params);
+ pdev->hcc_params = readl(&pdev->cap_regs->hcc_params);
+
++ /*
++ * Override the APB timeout value to give the controller more time for
++ * enabling UTMI clock and synchronizing APB and UTMI clock domains.
++ * This fix is platform specific and is required to fixes issue with
++ * reading incorrect value from PORTSC register after resuming
++ * from L1 state.
++ */
++ cdnsp_set_apb_timeout_value(pdev);
++
+ cdnsp_get_rev_cap(pdev);
+
+ /* Make sure the Device Controller is halted. */
+--- a/drivers/usb/cdns3/cdnsp-gadget.h
++++ b/drivers/usb/cdns3/cdnsp-gadget.h
+@@ -520,6 +520,9 @@ struct cdnsp_rev_cap {
+ #define REG_CHICKEN_BITS_2_OFFSET 0x48
+ #define CHICKEN_XDMA_2_TP_CACHE_DIS BIT(28)
+
++#define REG_CHICKEN_BITS_3_OFFSET 0x4C
++#define CHICKEN_APB_TIMEOUT_SET(p, val) (((p) & ~GENMASK(21, 0)) | (val))
++
+ /* XBUF Extended Capability ID. */
+ #define XBUF_CAP_ID 0xCB
+ #define XBUF_RX_TAG_MASK_0_OFFSET 0x1C
+--- a/drivers/usb/cdns3/cdnsp-pci.c
++++ b/drivers/usb/cdns3/cdnsp-pci.c
+@@ -33,6 +33,8 @@
+ #define CDNS_DRD_ID 0x0100
+ #define CDNS_DRD_IF (PCI_CLASS_SERIAL_USB << 8 | 0x80)
+
++#define CHICKEN_APB_TIMEOUT_VALUE 0x1C20
++
+ static struct pci_dev *cdnsp_get_second_fun(struct pci_dev *pdev)
+ {
+ /*
+@@ -144,6 +146,14 @@ static int cdnsp_pci_probe(struct pci_de
+ cdnsp->otg_irq = pdev->irq;
+ }
+
++ /*
++ * Cadence PCI based platform require some longer timeout for APB
++ * to fixes domain clock synchronization issue after resuming
++ * controller from L1 state.
++ */
++ cdnsp->override_apb_timeout = CHICKEN_APB_TIMEOUT_VALUE;
++ pci_set_drvdata(pdev, cdnsp);
++
+ if (pci_is_enabled(func)) {
+ cdnsp->dev = dev;
+ cdnsp->gadget_init = cdnsp_gadget_init;
+@@ -153,8 +163,6 @@ static int cdnsp_pci_probe(struct pci_de
+ goto free_cdnsp;
+ }
+
+- pci_set_drvdata(pdev, cdnsp);
+-
+ device_wakeup_enable(&pdev->dev);
+ if (pci_dev_run_wake(pdev))
+ pm_runtime_put_noidle(&pdev->dev);
+--- a/drivers/usb/cdns3/core.h
++++ b/drivers/usb/cdns3/core.h
+@@ -79,6 +79,8 @@ struct cdns3_platform_data {
+ * @pdata: platform data from glue layer
+ * @lock: spinlock structure
+ * @xhci_plat_data: xhci private data structure pointer
++ * @override_apb_timeout: hold value of APB timeout. For value 0 the default
++ * value in CHICKEN_BITS_3 will be preserved.
+ * @gadget_init: pointer to gadget initialization function
+ */
+ struct cdns {
+@@ -117,6 +119,7 @@ struct cdns {
+ struct cdns3_platform_data *pdata;
+ spinlock_t lock;
+ struct xhci_plat_priv *xhci_plat_data;
++ u32 override_apb_timeout;
+
+ int (*gadget_init)(struct cdns *cdns);
+ };
--- /dev/null
+From 8614ecdb1570e4fffe87ebdc62b613ed66f1f6a6 Mon Sep 17 00:00:00 2001
+From: Pawel Laszczak <pawell@cadence.com>
+Date: Fri, 25 Apr 2025 05:55:40 +0000
+Subject: usb: cdnsp: fix L1 resume issue for RTL_REVISION_NEW_LPM version
+
+From: Pawel Laszczak <pawell@cadence.com>
+
+commit 8614ecdb1570e4fffe87ebdc62b613ed66f1f6a6 upstream.
+
+The controllers with rtl version larger than
+RTL_REVISION_NEW_LPM (0x00002700) has bug which causes that controller
+doesn't resume from L1 state. It happens if after receiving LPM packet
+controller starts transitioning to L1 and in this moment the driver force
+resuming by write operation to PORTSC.PLS.
+It's corner case and happens when write operation to PORTSC occurs during
+device delay before transitioning to L1 after transmitting ACK
+time (TL1TokenRetry).
+
+Forcing transition from L1->L0 by driver for revision larger than
+RTL_REVISION_NEW_LPM is not needed, so driver can simply fix this issue
+through block call of cdnsp_force_l0_go function.
+
+Fixes: 3d82904559f4 ("usb: cdnsp: cdns3 Add main part of Cadence USBSSP DRD Driver")
+Cc: stable <stable@kernel.org>
+Signed-off-by: Pawel Laszczak <pawell@cadence.com>
+Acked-by: Peter Chen <peter.chen@kernel.org>
+Link: https://lore.kernel.org/r/PH7PR07MB9538B55C3A6E71F9ED29E980DD842@PH7PR07MB9538.namprd07.prod.outlook.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/usb/cdns3/cdnsp-gadget.c | 2 ++
+ drivers/usb/cdns3/cdnsp-gadget.h | 3 +++
+ drivers/usb/cdns3/cdnsp-ring.c | 3 ++-
+ 3 files changed, 7 insertions(+), 1 deletion(-)
+
+--- a/drivers/usb/cdns3/cdnsp-gadget.c
++++ b/drivers/usb/cdns3/cdnsp-gadget.c
+@@ -1799,6 +1799,8 @@ static void cdnsp_get_rev_cap(struct cdn
+ reg += cdnsp_find_next_ext_cap(reg, 0, RTL_REV_CAP);
+ pdev->rev_cap = reg;
+
++ pdev->rtl_revision = readl(&pdev->rev_cap->rtl_revision);
++
+ dev_info(pdev->dev, "Rev: %08x/%08x, eps: %08x, buff: %08x/%08x\n",
+ readl(&pdev->rev_cap->ctrl_revision),
+ readl(&pdev->rev_cap->rtl_revision),
+--- a/drivers/usb/cdns3/cdnsp-gadget.h
++++ b/drivers/usb/cdns3/cdnsp-gadget.h
+@@ -1362,6 +1362,7 @@ struct cdnsp_port {
+ * @rev_cap: Controller Capabilities Registers.
+ * @hcs_params1: Cached register copies of read-only HCSPARAMS1
+ * @hcc_params: Cached register copies of read-only HCCPARAMS1
++ * @rtl_revision: Cached controller rtl revision.
+ * @setup: Temporary buffer for setup packet.
+ * @ep0_preq: Internal allocated request used during enumeration.
+ * @ep0_stage: ep0 stage during enumeration process.
+@@ -1416,6 +1417,8 @@ struct cdnsp_device {
+ __u32 hcs_params1;
+ __u32 hcs_params3;
+ __u32 hcc_params;
++ #define RTL_REVISION_NEW_LPM 0x2700
++ __u32 rtl_revision;
+ /* Lock used in interrupt thread context. */
+ spinlock_t lock;
+ struct usb_ctrlrequest setup;
+--- a/drivers/usb/cdns3/cdnsp-ring.c
++++ b/drivers/usb/cdns3/cdnsp-ring.c
+@@ -308,7 +308,8 @@ static bool cdnsp_ring_ep_doorbell(struc
+
+ writel(db_value, reg_addr);
+
+- cdnsp_force_l0_go(pdev);
++ if (pdev->rtl_revision < RTL_REVISION_NEW_LPM)
++ cdnsp_force_l0_go(pdev);
+
+ /* Doorbell was set. */
+ return true;
--- /dev/null
+From 59820fde001500c167342257650541280c622b73 Mon Sep 17 00:00:00 2001
+From: Wayne Chang <waynec@nvidia.com>
+Date: Fri, 18 Apr 2025 16:12:28 +0800
+Subject: usb: gadget: tegra-xudc: ACK ST_RC after clearing CTRL_RUN
+
+From: Wayne Chang <waynec@nvidia.com>
+
+commit 59820fde001500c167342257650541280c622b73 upstream.
+
+We identified a bug where the ST_RC bit in the status register was not
+being acknowledged after clearing the CTRL_RUN bit in the control
+register. This could lead to unexpected behavior in the USB gadget
+drivers.
+
+This patch resolves the issue by adding the necessary code to explicitly
+acknowledge ST_RC after clearing CTRL_RUN based on the programming
+sequence, ensuring proper state transition.
+
+Fixes: 49db427232fe ("usb: gadget: Add UDC driver for tegra XUSB device mode controller")
+Cc: stable <stable@kernel.org>
+Signed-off-by: Wayne Chang <waynec@nvidia.com>
+Link: https://lore.kernel.org/r/20250418081228.1194779-1-waynec@nvidia.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/usb/gadget/udc/tegra-xudc.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/drivers/usb/gadget/udc/tegra-xudc.c
++++ b/drivers/usb/gadget/udc/tegra-xudc.c
+@@ -1737,6 +1737,10 @@ static int __tegra_xudc_ep_disable(struc
+ val = xudc_readl(xudc, CTRL);
+ val &= ~CTRL_RUN;
+ xudc_writel(xudc, val, CTRL);
++
++ val = xudc_readl(xudc, ST);
++ if (val & ST_RC)
++ xudc_writel(xudc, ST_RC, ST);
+ }
+
+ dev_info(xudc->dev, "ep %u disabled\n", ep->index);
--- /dev/null
+From 732f35cf8bdfece582f6e4a9c659119036577308 Mon Sep 17 00:00:00 2001
+From: Jim Lin <jilin@nvidia.com>
+Date: Tue, 22 Apr 2025 19:40:01 +0800
+Subject: usb: host: tegra: Prevent host controller crash when OTG port is used
+
+From: Jim Lin <jilin@nvidia.com>
+
+commit 732f35cf8bdfece582f6e4a9c659119036577308 upstream.
+
+When a USB device is connected to the OTG port, the tegra_xhci_id_work()
+routine transitions the PHY to host mode and calls xhci_hub_control()
+with the SetPortFeature command to enable port power.
+
+In certain cases, the XHCI controller may be in a low-power state
+when this operation occurs. If xhci_hub_control() is invoked while
+the controller is suspended, the PORTSC register may return 0xFFFFFFFF,
+indicating a read failure. This causes xhci_hc_died() to be triggered,
+leading to host controller shutdown.
+
+Example backtrace:
+[ 105.445736] Workqueue: events tegra_xhci_id_work
+[ 105.445747] dump_backtrace+0x0/0x1e8
+[ 105.445759] xhci_hc_died.part.48+0x40/0x270
+[ 105.445769] tegra_xhci_set_port_power+0xc0/0x240
+[ 105.445774] tegra_xhci_id_work+0x130/0x240
+
+To prevent this, ensure the controller is fully resumed before
+interacting with hardware registers by calling pm_runtime_get_sync()
+prior to the host mode transition and xhci_hub_control().
+
+Fixes: f836e7843036 ("usb: xhci-tegra: Add OTG support")
+Cc: stable <stable@kernel.org>
+Signed-off-by: Jim Lin <jilin@nvidia.com>
+Signed-off-by: Wayne Chang <waynec@nvidia.com>
+Link: https://lore.kernel.org/r/20250422114001.126367-1-waynec@nvidia.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/usb/host/xhci-tegra.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/drivers/usb/host/xhci-tegra.c
++++ b/drivers/usb/host/xhci-tegra.c
+@@ -1228,6 +1228,7 @@ static void tegra_xhci_id_work(struct wo
+ tegra->otg_usb3_port = tegra_xusb_padctl_get_usb3_companion(tegra->padctl,
+ tegra->otg_usb2_port);
+
++ pm_runtime_get_sync(tegra->dev);
+ if (tegra->host_mode) {
+ /* switch to host mode */
+ if (tegra->otg_usb3_port >= 0) {
+@@ -1257,6 +1258,7 @@ static void tegra_xhci_id_work(struct wo
+ }
+
+ tegra_xhci_set_port_power(tegra, true, true);
++ pm_runtime_mark_last_busy(tegra->dev);
+
+ } else {
+ if (tegra->otg_usb3_port >= 0)
+@@ -1264,6 +1266,7 @@ static void tegra_xhci_id_work(struct wo
+
+ tegra_xhci_set_port_power(tegra, true, false);
+ }
++ pm_runtime_put_autosuspend(tegra->dev);
+ }
+
+ #if IS_ENABLED(CONFIG_PM) || IS_ENABLED(CONFIG_PM_SLEEP)
--- /dev/null
+From 1f0304dfd9d217c2f8b04a9ef4b3258a66eedd27 Mon Sep 17 00:00:00 2001
+From: Jason Andryuk <jason.andryuk@amd.com>
+Date: Tue, 6 May 2025 17:09:33 -0400
+Subject: xenbus: Use kref to track req lifetime
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Jason Andryuk <jason.andryuk@amd.com>
+
+commit 1f0304dfd9d217c2f8b04a9ef4b3258a66eedd27 upstream.
+
+Marek reported seeing a NULL pointer fault in the xenbus_thread
+callstack:
+BUG: kernel NULL pointer dereference, address: 0000000000000000
+RIP: e030:__wake_up_common+0x4c/0x180
+Call Trace:
+ <TASK>
+ __wake_up_common_lock+0x82/0xd0
+ process_msg+0x18e/0x2f0
+ xenbus_thread+0x165/0x1c0
+
+process_msg+0x18e is req->cb(req). req->cb is set to xs_wake_up(), a
+thin wrapper around wake_up(), or xenbus_dev_queue_reply(). It seems
+like it was xs_wake_up() in this case.
+
+It seems like req may have woken up the xs_wait_for_reply(), which
+kfree()ed the req. When xenbus_thread resumes, it faults on the zero-ed
+data.
+
+Linux Device Drivers 2nd edition states:
+"Normally, a wake_up call can cause an immediate reschedule to happen,
+meaning that other processes might run before wake_up returns."
+... which would match the behaviour observed.
+
+Change to keeping two krefs on each request. One for the caller, and
+one for xenbus_thread. Each will kref_put() when finished, and the last
+will free it.
+
+This use of kref matches the description in
+Documentation/core-api/kref.rst
+
+Link: https://lore.kernel.org/xen-devel/ZO0WrR5J0xuwDIxW@mail-itl/
+Reported-by: Marek Marczykowski-Górecki <marmarek@invisiblethingslab.com>
+Fixes: fd8aa9095a95 ("xen: optimize xenbus driver for multiple concurrent xenstore accesses")
+Cc: stable@vger.kernel.org
+Signed-off-by: Jason Andryuk <jason.andryuk@amd.com>
+Reviewed-by: Juergen Gross <jgross@suse.com>
+Signed-off-by: Juergen Gross <jgross@suse.com>
+Message-ID: <20250506210935.5607-1-jason.andryuk@amd.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/xen/xenbus/xenbus.h | 2 ++
+ drivers/xen/xenbus/xenbus_comms.c | 9 ++++-----
+ drivers/xen/xenbus/xenbus_dev_frontend.c | 2 +-
+ drivers/xen/xenbus/xenbus_xs.c | 18 ++++++++++++++++--
+ 4 files changed, 23 insertions(+), 8 deletions(-)
+
+--- a/drivers/xen/xenbus/xenbus.h
++++ b/drivers/xen/xenbus/xenbus.h
+@@ -77,6 +77,7 @@ enum xb_req_state {
+ struct xb_req_data {
+ struct list_head list;
+ wait_queue_head_t wq;
++ struct kref kref;
+ struct xsd_sockmsg msg;
+ uint32_t caller_req_id;
+ enum xsd_sockmsg_type type;
+@@ -103,6 +104,7 @@ int xb_init_comms(void);
+ void xb_deinit_comms(void);
+ int xs_watch_msg(struct xs_watch_event *event);
+ void xs_request_exit(struct xb_req_data *req);
++void xs_free_req(struct kref *kref);
+
+ int xenbus_match(struct device *_dev, struct device_driver *_drv);
+ int xenbus_dev_probe(struct device *_dev);
+--- a/drivers/xen/xenbus/xenbus_comms.c
++++ b/drivers/xen/xenbus/xenbus_comms.c
+@@ -309,8 +309,8 @@ static int process_msg(void)
+ virt_wmb();
+ req->state = xb_req_state_got_reply;
+ req->cb(req);
+- } else
+- kfree(req);
++ }
++ kref_put(&req->kref, xs_free_req);
+ }
+
+ mutex_unlock(&xs_response_mutex);
+@@ -386,14 +386,13 @@ static int process_writes(void)
+ state.req->msg.type = XS_ERROR;
+ state.req->err = err;
+ list_del(&state.req->list);
+- if (state.req->state == xb_req_state_aborted)
+- kfree(state.req);
+- else {
++ if (state.req->state != xb_req_state_aborted) {
+ /* write err, then update state */
+ virt_wmb();
+ state.req->state = xb_req_state_got_reply;
+ wake_up(&state.req->wq);
+ }
++ kref_put(&state.req->kref, xs_free_req);
+
+ mutex_unlock(&xb_write_mutex);
+
+--- a/drivers/xen/xenbus/xenbus_dev_frontend.c
++++ b/drivers/xen/xenbus/xenbus_dev_frontend.c
+@@ -406,7 +406,7 @@ void xenbus_dev_queue_reply(struct xb_re
+ mutex_unlock(&u->reply_mutex);
+
+ kfree(req->body);
+- kfree(req);
++ kref_put(&req->kref, xs_free_req);
+
+ kref_put(&u->kref, xenbus_file_free);
+
+--- a/drivers/xen/xenbus/xenbus_xs.c
++++ b/drivers/xen/xenbus/xenbus_xs.c
+@@ -112,6 +112,12 @@ static void xs_suspend_exit(void)
+ wake_up_all(&xs_state_enter_wq);
+ }
+
++void xs_free_req(struct kref *kref)
++{
++ struct xb_req_data *req = container_of(kref, struct xb_req_data, kref);
++ kfree(req);
++}
++
+ static uint32_t xs_request_enter(struct xb_req_data *req)
+ {
+ uint32_t rq_id;
+@@ -237,6 +243,12 @@ static void xs_send(struct xb_req_data *
+ req->caller_req_id = req->msg.req_id;
+ req->msg.req_id = xs_request_enter(req);
+
++ /*
++ * Take 2nd ref. One for this thread, and the second for the
++ * xenbus_thread.
++ */
++ kref_get(&req->kref);
++
+ mutex_lock(&xb_write_mutex);
+ list_add_tail(&req->list, &xb_write_list);
+ notify = list_is_singular(&xb_write_list);
+@@ -261,8 +273,8 @@ static void *xs_wait_for_reply(struct xb
+ if (req->state == xb_req_state_queued ||
+ req->state == xb_req_state_wait_reply)
+ req->state = xb_req_state_aborted;
+- else
+- kfree(req);
++
++ kref_put(&req->kref, xs_free_req);
+ mutex_unlock(&xb_write_mutex);
+
+ return ret;
+@@ -291,6 +303,7 @@ int xenbus_dev_request_and_reply(struct
+ req->cb = xenbus_dev_queue_reply;
+ req->par = par;
+ req->user_req = true;
++ kref_init(&req->kref);
+
+ xs_send(req, msg);
+
+@@ -319,6 +332,7 @@ static void *xs_talkv(struct xenbus_tran
+ req->num_vecs = num_vecs;
+ req->cb = xs_wake_up;
+ req->user_req = false;
++ kref_init(&req->kref);
+
+ msg.req_id = 0;
+ msg.tx_id = t.id;