--- /dev/null
+From stable+bounces-189979-greg=kroah.com@vger.kernel.org Mon Oct 27 22:13:25 2025
+From: Celeste Liu <uwu@coelacanthus.name>
+Date: Mon, 27 Oct 2025 21:11:57 +0800
+Subject: can: gs_usb: increase max interface to U8_MAX
+To: stable@vger.kernel.org
+Cc: Celeste Liu <uwu@coelacanthus.name>, Runcheng Lu <runcheng.lu@hpmicro.com>, Vincent Mailhol <mailhol@kernel.org>, Marc Kleine-Budde <mkl@pengutronix.de>
+Message-ID: <20251027131156.875878-2-uwu@coelacanthus.name>
+
+From: Celeste Liu <uwu@coelacanthus.name>
+
+commit 2a27f6a8fb5722223d526843040f747e9b0e8060 upstream
+
+This issue was found by Runcheng Lu when develop HSCanT USB to CAN FD
+converter[1]. The original developers may have only 3 interfaces
+device to test so they write 3 here and wait for future change.
+
+During the HSCanT development, we actually used 4 interfaces, so the
+limitation of 3 is not enough now. But just increase one is not
+future-proofed. Since the channel index type in gs_host_frame is u8,
+just make canch[] become a flexible array with a u8 index, so it
+naturally constraint by U8_MAX and avoid statically allocate 256
+pointer for every gs_usb device.
+
+[1]: https://github.com/cherry-embedded/HSCanT-hardware
+
+Fixes: d08e973a77d1 ("can: gs_usb: Added support for the GS_USB CAN devices")
+Reported-by: Runcheng Lu <runcheng.lu@hpmicro.com>
+Cc: stable@vger.kernel.org
+Reviewed-by: Vincent Mailhol <mailhol@kernel.org>
+Signed-off-by: Celeste Liu <uwu@coelacanthus.name>
+Link: https://patch.msgid.link/20250930-gs-usb-max-if-v5-1-863330bf6666@coelacanthus.name
+Signed-off-by: Marc Kleine-Budde <mkl@pengutronix.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/can/usb/gs_usb.c | 23 +++++++++++------------
+ 1 file changed, 11 insertions(+), 12 deletions(-)
+
+--- a/drivers/net/can/usb/gs_usb.c
++++ b/drivers/net/can/usb/gs_usb.c
+@@ -156,10 +156,6 @@ struct gs_host_frame {
+ #define GS_MAX_TX_URBS 10
+ /* Only launch a max of GS_MAX_RX_URBS usb requests at a time. */
+ #define GS_MAX_RX_URBS 30
+-/* Maximum number of interfaces the driver supports per device.
+- * Current hardware only supports 2 interfaces. The future may vary.
+- */
+-#define GS_MAX_INTF 2
+
+ struct gs_tx_context {
+ struct gs_can *dev;
+@@ -190,10 +186,11 @@ struct gs_can {
+
+ /* usb interface struct */
+ struct gs_usb {
+- struct gs_can *canch[GS_MAX_INTF];
+ struct usb_anchor rx_submitted;
+ struct usb_device *udev;
+ u8 active_channels;
++ u8 channel_cnt;
++ struct gs_can *canch[];
+ };
+
+ /* 'allocate' a tx context.
+@@ -321,7 +318,7 @@ static void gs_usb_receive_bulk_callback
+ }
+
+ /* device reports out of range channel id */
+- if (hf->channel >= GS_MAX_INTF)
++ if (hf->channel >= usbcan->channel_cnt)
+ goto device_detach;
+
+ dev = usbcan->canch[hf->channel];
+@@ -409,7 +406,7 @@ static void gs_usb_receive_bulk_callback
+ /* USB failure take down all interfaces */
+ if (rc == -ENODEV) {
+ device_detach:
+- for (rc = 0; rc < GS_MAX_INTF; rc++) {
++ for (rc = 0; rc < usbcan->channel_cnt; rc++) {
+ if (usbcan->canch[rc])
+ netif_device_detach(usbcan->canch[rc]->netdev);
+ }
+@@ -991,20 +988,22 @@ static int gs_usb_probe(struct usb_inter
+ icount = dconf->icount + 1;
+ dev_info(&intf->dev, "Configuring for %d interfaces\n", icount);
+
+- if (icount > GS_MAX_INTF) {
++ if (icount > type_max(typeof(dev->channel_cnt))) {
+ dev_err(&intf->dev,
+- "Driver cannot handle more that %d CAN interfaces\n",
+- GS_MAX_INTF);
++ "Driver cannot handle more that %u CAN interfaces\n",
++ type_max(typeof(dev->channel_cnt)));
+ kfree(dconf);
+ return -EINVAL;
+ }
+
+- dev = kzalloc(sizeof(*dev), GFP_KERNEL);
++ dev = kzalloc(struct_size(dev, canch, icount), GFP_KERNEL);
+ if (!dev) {
+ kfree(dconf);
+ return -ENOMEM;
+ }
+
++ dev->channel_cnt = icount;
++
+ init_usb_anchor(&dev->rx_submitted);
+
+ usb_set_intfdata(intf, dev);
+@@ -1045,7 +1044,7 @@ static void gs_usb_disconnect(struct usb
+ return;
+ }
+
+- for (i = 0; i < GS_MAX_INTF; i++)
++ for (i = 0; i < dev->channel_cnt; i++)
+ if (dev->canch[i])
+ gs_destroy_candev(dev->canch[i]);
+
--- /dev/null
+From stable+bounces-189990-greg=kroah.com@vger.kernel.org Mon Oct 27 22:57:27 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 27 Oct 2025 09:56:54 -0400
+Subject: devcoredump: Fix circular locking dependency with devcd->mutex.
+To: stable@vger.kernel.org
+Cc: Maarten Lankhorst <dev@lankhorst.se>, Mukesh Ojha <quic_mojha@quicinc.com>, Greg Kroah-Hartman <gregkh@linuxfoundation.org>, Johannes Berg <johannes@sipsolutions.net>, "Rafael J. Wysocki" <rafael@kernel.org>, Danilo Krummrich <dakr@kernel.org>, linux-kernel@vger.kernel.org, Matthew Brost <matthew.brost@intel.com>, Mukesh Ojha <mukesh.ojha@oss.qualcomm.com>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251027135654.496095-1-sashal@kernel.org>
+
+From: Maarten Lankhorst <dev@lankhorst.se>
+
+[ Upstream commit a91c8096590bd7801a26454789f2992094fe36da ]
+
+The original code causes a circular locking dependency found by lockdep.
+
+======================================================
+WARNING: possible circular locking dependency detected
+6.16.0-rc6-lgci-xe-xe-pw-151626v3+ #1 Tainted: G S U
+------------------------------------------------------
+xe_fault_inject/5091 is trying to acquire lock:
+ffff888156815688 ((work_completion)(&(&devcd->del_wk)->work)){+.+.}-{0:0}, at: __flush_work+0x25d/0x660
+
+but task is already holding lock:
+
+ffff888156815620 (&devcd->mutex){+.+.}-{3:3}, at: dev_coredump_put+0x3f/0xa0
+which lock already depends on the new lock.
+the existing dependency chain (in reverse order) is:
+-> #2 (&devcd->mutex){+.+.}-{3:3}:
+ mutex_lock_nested+0x4e/0xc0
+ devcd_data_write+0x27/0x90
+ sysfs_kf_bin_write+0x80/0xf0
+ kernfs_fop_write_iter+0x169/0x220
+ vfs_write+0x293/0x560
+ ksys_write+0x72/0xf0
+ __x64_sys_write+0x19/0x30
+ x64_sys_call+0x2bf/0x2660
+ do_syscall_64+0x93/0xb60
+ entry_SYSCALL_64_after_hwframe+0x76/0x7e
+-> #1 (kn->active#236){++++}-{0:0}:
+ kernfs_drain+0x1e2/0x200
+ __kernfs_remove+0xae/0x400
+ kernfs_remove_by_name_ns+0x5d/0xc0
+ remove_files+0x54/0x70
+ sysfs_remove_group+0x3d/0xa0
+ sysfs_remove_groups+0x2e/0x60
+ device_remove_attrs+0xc7/0x100
+ device_del+0x15d/0x3b0
+ devcd_del+0x19/0x30
+ process_one_work+0x22b/0x6f0
+ worker_thread+0x1e8/0x3d0
+ kthread+0x11c/0x250
+ ret_from_fork+0x26c/0x2e0
+ ret_from_fork_asm+0x1a/0x30
+-> #0 ((work_completion)(&(&devcd->del_wk)->work)){+.+.}-{0:0}:
+ __lock_acquire+0x1661/0x2860
+ lock_acquire+0xc4/0x2f0
+ __flush_work+0x27a/0x660
+ flush_delayed_work+0x5d/0xa0
+ dev_coredump_put+0x63/0xa0
+ xe_driver_devcoredump_fini+0x12/0x20 [xe]
+ devm_action_release+0x12/0x30
+ release_nodes+0x3a/0x120
+ devres_release_all+0x8a/0xd0
+ device_unbind_cleanup+0x12/0x80
+ device_release_driver_internal+0x23a/0x280
+ device_driver_detach+0x14/0x20
+ unbind_store+0xaf/0xc0
+ drv_attr_store+0x21/0x50
+ sysfs_kf_write+0x4a/0x80
+ kernfs_fop_write_iter+0x169/0x220
+ vfs_write+0x293/0x560
+ ksys_write+0x72/0xf0
+ __x64_sys_write+0x19/0x30
+ x64_sys_call+0x2bf/0x2660
+ do_syscall_64+0x93/0xb60
+ entry_SYSCALL_64_after_hwframe+0x76/0x7e
+other info that might help us debug this:
+Chain exists of: (work_completion)(&(&devcd->del_wk)->work) --> kn->active#236 --> &devcd->mutex
+ Possible unsafe locking scenario:
+ CPU0 CPU1
+ ---- ----
+ lock(&devcd->mutex);
+ lock(kn->active#236);
+ lock(&devcd->mutex);
+ lock((work_completion)(&(&devcd->del_wk)->work));
+ *** DEADLOCK ***
+5 locks held by xe_fault_inject/5091:
+ #0: ffff8881129f9488 (sb_writers#5){.+.+}-{0:0}, at: ksys_write+0x72/0xf0
+ #1: ffff88810c755078 (&of->mutex#2){+.+.}-{3:3}, at: kernfs_fop_write_iter+0x123/0x220
+ #2: ffff8881054811a0 (&dev->mutex){....}-{3:3}, at: device_release_driver_internal+0x55/0x280
+ #3: ffff888156815620 (&devcd->mutex){+.+.}-{3:3}, at: dev_coredump_put+0x3f/0xa0
+ #4: ffffffff8359e020 (rcu_read_lock){....}-{1:2}, at: __flush_work+0x72/0x660
+stack backtrace:
+CPU: 14 UID: 0 PID: 5091 Comm: xe_fault_inject Tainted: G S U 6.16.0-rc6-lgci-xe-xe-pw-151626v3+ #1 PREEMPT_{RT,(lazy)}
+Tainted: [S]=CPU_OUT_OF_SPEC, [U]=USER
+Hardware name: Micro-Star International Co., Ltd. MS-7D25/PRO Z690-A DDR4(MS-7D25), BIOS 1.10 12/13/2021
+Call Trace:
+ <TASK>
+ dump_stack_lvl+0x91/0xf0
+ dump_stack+0x10/0x20
+ print_circular_bug+0x285/0x360
+ check_noncircular+0x135/0x150
+ ? register_lock_class+0x48/0x4a0
+ __lock_acquire+0x1661/0x2860
+ lock_acquire+0xc4/0x2f0
+ ? __flush_work+0x25d/0x660
+ ? mark_held_locks+0x46/0x90
+ ? __flush_work+0x25d/0x660
+ __flush_work+0x27a/0x660
+ ? __flush_work+0x25d/0x660
+ ? trace_hardirqs_on+0x1e/0xd0
+ ? __pfx_wq_barrier_func+0x10/0x10
+ flush_delayed_work+0x5d/0xa0
+ dev_coredump_put+0x63/0xa0
+ xe_driver_devcoredump_fini+0x12/0x20 [xe]
+ devm_action_release+0x12/0x30
+ release_nodes+0x3a/0x120
+ devres_release_all+0x8a/0xd0
+ device_unbind_cleanup+0x12/0x80
+ device_release_driver_internal+0x23a/0x280
+ ? bus_find_device+0xa8/0xe0
+ device_driver_detach+0x14/0x20
+ unbind_store+0xaf/0xc0
+ drv_attr_store+0x21/0x50
+ sysfs_kf_write+0x4a/0x80
+ kernfs_fop_write_iter+0x169/0x220
+ vfs_write+0x293/0x560
+ ksys_write+0x72/0xf0
+ __x64_sys_write+0x19/0x30
+ x64_sys_call+0x2bf/0x2660
+ do_syscall_64+0x93/0xb60
+ ? __f_unlock_pos+0x15/0x20
+ ? __x64_sys_getdents64+0x9b/0x130
+ ? __pfx_filldir64+0x10/0x10
+ ? do_syscall_64+0x1a2/0xb60
+ ? clear_bhb_loop+0x30/0x80
+ ? clear_bhb_loop+0x30/0x80
+ entry_SYSCALL_64_after_hwframe+0x76/0x7e
+RIP: 0033:0x76e292edd574
+Code: c7 00 16 00 00 00 b8 ff ff ff ff c3 66 2e 0f 1f 84 00 00 00 00 00 f3 0f 1e fa 80 3d d5 ea 0e 00 00 74 13 b8 01 00 00 00 0f 05 <48> 3d 00 f0 ff ff 77 54 c3 0f 1f 00 55 48 89 e5 48 83 ec 20 48 89
+RSP: 002b:00007fffe247a828 EFLAGS: 00000202 ORIG_RAX: 0000000000000001
+RAX: ffffffffffffffda RBX: 0000000000000000 RCX: 000076e292edd574
+RDX: 000000000000000c RSI: 00006267f6306063 RDI: 000000000000000b
+RBP: 000000000000000c R08: 000076e292fc4b20 R09: 0000000000000000
+R10: 0000000000000000 R11: 0000000000000202 R12: 00006267f6306063
+R13: 000000000000000b R14: 00006267e6859c00 R15: 000076e29322a000
+ </TASK>
+xe 0000:03:00.0: [drm] Xe device coredump has been deleted.
+
+Fixes: 01daccf74832 ("devcoredump : Serialize devcd_del work")
+Cc: Mukesh Ojha <quic_mojha@quicinc.com>
+Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Cc: Johannes Berg <johannes@sipsolutions.net>
+Cc: Rafael J. Wysocki <rafael@kernel.org>
+Cc: Danilo Krummrich <dakr@kernel.org>
+Cc: linux-kernel@vger.kernel.org
+Cc: stable@vger.kernel.org # v6.1+
+Signed-off-by: Maarten Lankhorst <dev@lankhorst.se>
+Cc: Matthew Brost <matthew.brost@intel.com>
+Acked-by: Mukesh Ojha <mukesh.ojha@oss.qualcomm.com>
+Link: https://lore.kernel.org/r/20250723142416.1020423-1-dev@lankhorst.se
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+[ replaced disable_delayed_work_sync() with cancel_delayed_work_sync() ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/base/devcoredump.c | 138 +++++++++++++++++++++++++++------------------
+ 1 file changed, 84 insertions(+), 54 deletions(-)
+
+--- a/drivers/base/devcoredump.c
++++ b/drivers/base/devcoredump.c
+@@ -30,50 +30,46 @@ struct devcd_entry {
+ void *data;
+ size_t datalen;
+ /*
+- * Here, mutex is required to serialize the calls to del_wk work between
+- * user/kernel space which happens when devcd is added with device_add()
+- * and that sends uevent to user space. User space reads the uevents,
+- * and calls to devcd_data_write() which try to modify the work which is
+- * not even initialized/queued from devcoredump.
++ * There are 2 races for which mutex is required.
+ *
++ * The first race is between device creation and userspace writing to
++ * schedule immediately destruction.
+ *
++ * This race is handled by arming the timer before device creation, but
++ * when device creation fails the timer still exists.
+ *
+- * cpu0(X) cpu1(Y)
++ * To solve this, hold the mutex during device_add(), and set
++ * init_completed on success before releasing the mutex.
+ *
+- * dev_coredump() uevent sent to user space
+- * device_add() ======================> user space process Y reads the
+- * uevents writes to devcd fd
+- * which results into writes to
++ * That way the timer will never fire until device_add() is called,
++ * it will do nothing if init_completed is not set. The timer is also
++ * cancelled in that case.
+ *
+- * devcd_data_write()
+- * mod_delayed_work()
+- * try_to_grab_pending()
+- * del_timer()
+- * debug_assert_init()
+- * INIT_DELAYED_WORK()
+- * schedule_delayed_work()
+- *
+- *
+- * Also, mutex alone would not be enough to avoid scheduling of
+- * del_wk work after it get flush from a call to devcd_free()
+- * mentioned as below.
+- *
+- * disabled_store()
+- * devcd_free()
+- * mutex_lock() devcd_data_write()
+- * flush_delayed_work()
+- * mutex_unlock()
+- * mutex_lock()
+- * mod_delayed_work()
+- * mutex_unlock()
+- * So, delete_work flag is required.
++ * The second race involves multiple parallel invocations of devcd_free(),
++ * add a deleted flag so only 1 can call the destructor.
+ */
+ struct mutex mutex;
+- bool delete_work;
++ bool init_completed, deleted;
+ struct module *owner;
+ ssize_t (*read)(char *buffer, loff_t offset, size_t count,
+ void *data, size_t datalen);
+ void (*free)(void *data);
++ /*
++ * If nothing interferes and device_add() was returns success,
++ * del_wk will destroy the device after the timer fires.
++ *
++ * Multiple userspace processes can interfere in the working of the timer:
++ * - Writing to the coredump will reschedule the timer to run immediately,
++ * if still armed.
++ *
++ * This is handled by using "if (cancel_delayed_work()) {
++ * schedule_delayed_work() }", to prevent re-arming after having
++ * been previously fired.
++ * - Writing to /sys/class/devcoredump/disabled will destroy the
++ * coredump synchronously.
++ * This is handled by using disable_delayed_work_sync(), and then
++ * checking if deleted flag is set with &devcd->mutex held.
++ */
+ struct delayed_work del_wk;
+ struct device *failing_dev;
+ };
+@@ -102,14 +98,27 @@ static void devcd_dev_release(struct dev
+ kfree(devcd);
+ }
+
++static void __devcd_del(struct devcd_entry *devcd)
++{
++ devcd->deleted = true;
++ device_del(&devcd->devcd_dev);
++ put_device(&devcd->devcd_dev);
++}
++
+ static void devcd_del(struct work_struct *wk)
+ {
+ struct devcd_entry *devcd;
++ bool init_completed;
+
+ devcd = container_of(wk, struct devcd_entry, del_wk.work);
+
+- device_del(&devcd->devcd_dev);
+- put_device(&devcd->devcd_dev);
++ /* devcd->mutex serializes against dev_coredumpm_timeout */
++ mutex_lock(&devcd->mutex);
++ init_completed = devcd->init_completed;
++ mutex_unlock(&devcd->mutex);
++
++ if (init_completed)
++ __devcd_del(devcd);
+ }
+
+ static ssize_t devcd_data_read(struct file *filp, struct kobject *kobj,
+@@ -129,12 +138,12 @@ static ssize_t devcd_data_write(struct f
+ struct device *dev = kobj_to_dev(kobj);
+ struct devcd_entry *devcd = dev_to_devcd(dev);
+
+- mutex_lock(&devcd->mutex);
+- if (!devcd->delete_work) {
+- devcd->delete_work = true;
+- mod_delayed_work(system_wq, &devcd->del_wk, 0);
+- }
+- mutex_unlock(&devcd->mutex);
++ /*
++ * Although it's tempting to use mod_delayed work here,
++ * that will cause a reschedule if the timer already fired.
++ */
++ if (cancel_delayed_work(&devcd->del_wk))
++ schedule_delayed_work(&devcd->del_wk, 0);
+
+ return count;
+ }
+@@ -162,11 +171,21 @@ static int devcd_free(struct device *dev
+ {
+ struct devcd_entry *devcd = dev_to_devcd(dev);
+
++ /*
++ * To prevent a race with devcd_data_write(), cancel work and
++ * complete manually instead.
++ *
++ * We cannot rely on the return value of
++ * cancel_delayed_work_sync() here, because it might be in the
++ * middle of a cancel_delayed_work + schedule_delayed_work pair.
++ *
++ * devcd->mutex here guards against multiple parallel invocations
++ * of devcd_free().
++ */
++ cancel_delayed_work_sync(&devcd->del_wk);
+ mutex_lock(&devcd->mutex);
+- if (!devcd->delete_work)
+- devcd->delete_work = true;
+-
+- flush_delayed_work(&devcd->del_wk);
++ if (!devcd->deleted)
++ __devcd_del(devcd);
+ mutex_unlock(&devcd->mutex);
+ return 0;
+ }
+@@ -190,12 +209,10 @@ static ssize_t disabled_show(struct clas
+ * put_device() <- last reference
+ * error = fn(dev, data) devcd_dev_release()
+ * devcd_free(dev, data) kfree(devcd)
+- * mutex_lock(&devcd->mutex);
+ *
+ *
+- * In the above diagram, It looks like disabled_store() would be racing with parallely
+- * running devcd_del() and result in memory abort while acquiring devcd->mutex which
+- * is called after kfree of devcd memory after dropping its last reference with
++ * In the above diagram, it looks like disabled_store() would be racing with parallelly
++ * running devcd_del() and result in memory abort after dropping its last reference with
+ * put_device(). However, this will not happens as fn(dev, data) runs
+ * with its own reference to device via klist_node so it is not its last reference.
+ * so, above situation would not occur.
+@@ -357,7 +374,7 @@ void dev_coredumpm(struct device *dev, s
+ devcd->read = read;
+ devcd->free = free;
+ devcd->failing_dev = get_device(dev);
+- devcd->delete_work = false;
++ devcd->deleted = false;
+
+ mutex_init(&devcd->mutex);
+ device_initialize(&devcd->devcd_dev);
+@@ -366,8 +383,14 @@ void dev_coredumpm(struct device *dev, s
+ atomic_inc_return(&devcd_count));
+ devcd->devcd_dev.class = &devcd_class;
+
+- mutex_lock(&devcd->mutex);
+ dev_set_uevent_suppress(&devcd->devcd_dev, true);
++
++ /* devcd->mutex prevents devcd_del() completing until init finishes */
++ mutex_lock(&devcd->mutex);
++ devcd->init_completed = false;
++ INIT_DELAYED_WORK(&devcd->del_wk, devcd_del);
++ schedule_delayed_work(&devcd->del_wk, DEVCD_TIMEOUT);
++
+ if (device_add(&devcd->devcd_dev))
+ goto put_device;
+
+@@ -381,13 +404,20 @@ void dev_coredumpm(struct device *dev, s
+
+ dev_set_uevent_suppress(&devcd->devcd_dev, false);
+ kobject_uevent(&devcd->devcd_dev.kobj, KOBJ_ADD);
+- INIT_DELAYED_WORK(&devcd->del_wk, devcd_del);
+- schedule_delayed_work(&devcd->del_wk, DEVCD_TIMEOUT);
++
++ /*
++ * Safe to run devcd_del() now that we are done with devcd_dev.
++ * Alternatively we could have taken a ref on devcd_dev before
++ * dropping the lock.
++ */
++ devcd->init_completed = true;
+ mutex_unlock(&devcd->mutex);
+ return;
+ put_device:
+- put_device(&devcd->devcd_dev);
+ mutex_unlock(&devcd->mutex);
++ cancel_delayed_work_sync(&devcd->del_wk);
++ put_device(&devcd->devcd_dev);
++
+ put_module:
+ module_put(owner);
+ free:
--- /dev/null
+From stable+bounces-192101-greg=kroah.com@vger.kernel.org Mon Nov 3 08:43:40 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 2 Nov 2025 18:43:32 -0500
+Subject: net: phy: dp83867: Disable EEE support as not implemented
+To: stable@vger.kernel.org
+Cc: Emanuele Ghidoli <emanuele.ghidoli@toradex.com>, Andrew Lunn <andrew@lunn.ch>, Jakub Kicinski <kuba@kernel.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251102234332.3659143-1-sashal@kernel.org>
+
+From: Emanuele Ghidoli <emanuele.ghidoli@toradex.com>
+
+[ Upstream commit 84a905290cb4c3d9a71a9e3b2f2e02e031e7512f ]
+
+While the DP83867 PHYs report EEE capability through their feature
+registers, the actual hardware does not support EEE (see Links).
+When the connected MAC enables EEE, it causes link instability and
+communication failures.
+
+The issue is reproducible with a iMX8MP and relevant stmmac ethernet port.
+Since the introduction of phylink-managed EEE support in the stmmac driver,
+EEE is now enabled by default, leading to issues on systems using the
+DP83867 PHY.
+
+Call phy_disable_eee during phy initialization to prevent EEE from being
+enabled on DP83867 PHYs.
+
+Link: https://e2e.ti.com/support/interface-group/interface/f/interface-forum/1445244/dp83867ir-dp83867-disable-eee-lpi
+Link: https://e2e.ti.com/support/interface-group/interface/f/interface-forum/658638/dp83867ir-eee-energy-efficient-ethernet
+Fixes: 2a10154abcb7 ("net: phy: dp83867: Add TI dp83867 phy")
+Cc: stable@vger.kernel.org
+Signed-off-by: Emanuele Ghidoli <emanuele.ghidoli@toradex.com>
+Reviewed-by: Andrew Lunn <andrew@lunn.ch>
+Link: https://patch.msgid.link/20251023144857.529566-1-ghidoliemanuele@gmail.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+[ replaced phy_disable_eee() call with direct eee_broken_modes assignment ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/phy/dp83867.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+--- a/drivers/net/phy/dp83867.c
++++ b/drivers/net/phy/dp83867.c
+@@ -337,6 +337,12 @@ static int dp83867_config_init(struct ph
+ return ret;
+ }
+
++ /* Although the DP83867 reports EEE capability through the
++ * MDIO_PCS_EEE_ABLE and MDIO_AN_EEE_ADV registers, the feature
++ * is not actually implemented in hardware.
++ */
++ phydev->eee_broken_modes = MDIO_EEE_100TX | MDIO_EEE_1000T;
++
+ if (phy_interface_is_rgmii(phydev)) {
+ val = phy_read(phydev, MII_DP83867_PHYCTRL);
+ if (val < 0)
--- /dev/null
+From stable+bounces-190012-greg=kroah.com@vger.kernel.org Tue Oct 28 00:01:39 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 27 Oct 2025 10:59:07 -0400
+Subject: net: ravb: Enforce descriptor type ordering
+To: stable@vger.kernel.org
+Cc: "Lad Prabhakar" <prabhakar.mahadev-lad.rj@bp.renesas.com>, "Fabrizio Castro" <fabrizio.castro.jz@renesas.com>, "Niklas Söderlund" <niklas.soderlund+renesas@ragnatech.se>, "Jakub Kicinski" <kuba@kernel.org>, "Sasha Levin" <sashal@kernel.org>
+Message-ID: <20251027145907.533046-1-sashal@kernel.org>
+
+From: Lad Prabhakar <prabhakar.mahadev-lad.rj@bp.renesas.com>
+
+[ Upstream commit 5370c31e84b0e0999c7b5ff949f4e104def35584 ]
+
+Ensure the TX descriptor type fields are published in a safe order so the
+DMA engine never begins processing a descriptor chain before all descriptor
+fields are fully initialised.
+
+For multi-descriptor transmits the driver writes DT_FEND into the last
+descriptor and DT_FSTART into the first. The DMA engine begins processing
+when it observes DT_FSTART. Move the dma_wmb() barrier so it executes
+immediately after DT_FEND and immediately before writing DT_FSTART
+(and before DT_FSINGLE in the single-descriptor case). This guarantees
+that all prior CPU writes to the descriptor memory are visible to the
+device before DT_FSTART is seen.
+
+This avoids a situation where compiler/CPU reordering could publish
+DT_FSTART ahead of DT_FEND or other descriptor fields, allowing the DMA to
+start on a partially initialised chain and causing corrupted transmissions
+or TX timeouts. Such a failure was observed on RZ/G2L with an RT kernel as
+transmit queue timeouts and device resets.
+
+Fixes: 2f45d1902acf ("ravb: minimize TX data copying")
+Cc: stable@vger.kernel.org
+Co-developed-by: Fabrizio Castro <fabrizio.castro.jz@renesas.com>
+Signed-off-by: Fabrizio Castro <fabrizio.castro.jz@renesas.com>
+Signed-off-by: Lad Prabhakar <prabhakar.mahadev-lad.rj@bp.renesas.com>
+Reviewed-by: Niklas Söderlund <niklas.soderlund+renesas@ragnatech.se>
+Link: https://patch.msgid.link/20251017151830.171062-4-prabhakar.mahadev-lad.rj@bp.renesas.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+[ kept unconditional skb_tx_timestamp() ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/renesas/ravb_main.c | 16 ++++++++++++++--
+ 1 file changed, 14 insertions(+), 2 deletions(-)
+
+--- a/drivers/net/ethernet/renesas/ravb_main.c
++++ b/drivers/net/ethernet/renesas/ravb_main.c
+@@ -1593,13 +1593,25 @@ static netdev_tx_t ravb_start_xmit(struc
+ }
+
+ skb_tx_timestamp(skb);
+- /* Descriptor type must be set after all the above writes */
+- dma_wmb();
++
+ if (num_tx_desc > 1) {
+ desc->die_dt = DT_FEND;
+ desc--;
++ /* When using multi-descriptors, DT_FEND needs to get written
++ * before DT_FSTART, but the compiler may reorder the memory
++ * writes in an attempt to optimize the code.
++ * Use a dma_wmb() barrier to make sure DT_FEND and DT_FSTART
++ * are written exactly in the order shown in the code.
++ * This is particularly important for cases where the DMA engine
++ * is already running when we are running this code. If the DMA
++ * sees DT_FSTART without the corresponding DT_FEND it will enter
++ * an error condition.
++ */
++ dma_wmb();
+ desc->die_dt = DT_FSTART;
+ } else {
++ /* Descriptor type must be set after all the above writes */
++ dma_wmb();
+ desc->die_dt = DT_FSINGLE;
+ }
+
--- /dev/null
+From stable+bounces-191086-greg=kroah.com@vger.kernel.org Tue Oct 28 04:32:55 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 27 Oct 2025 15:22:45 -0400
+Subject: serial: 8250_dw: handle reset control deassert error
+To: stable@vger.kernel.org
+Cc: Artem Shimko <a.shimko.dev@gmail.com>, stable <stable@kernel.org>, Greg Kroah-Hartman <gregkh@linuxfoundation.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251027192245.660757-3-sashal@kernel.org>
+
+From: Artem Shimko <a.shimko.dev@gmail.com>
+
+[ Upstream commit daeb4037adf7d3349b4a1fb792f4bc9824686a4b ]
+
+Check the return value of reset_control_deassert() in the probe
+function to prevent continuing probe when reset deassertion fails.
+
+Previously, reset_control_deassert() was called without checking its
+return value, which could lead to probe continuing even when the
+device reset wasn't properly deasserted.
+
+The fix checks the return value and returns an error with dev_err_probe()
+if reset deassertion fails, providing better error handling and
+diagnostics.
+
+Fixes: acbdad8dd1ab ("serial: 8250_dw: simplify optional reset handling")
+Cc: stable <stable@kernel.org>
+Signed-off-by: Artem Shimko <a.shimko.dev@gmail.com>
+Link: https://patch.msgid.link/20251019095131.252848-1-a.shimko.dev@gmail.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/tty/serial/8250/8250_dw.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/drivers/tty/serial/8250/8250_dw.c
++++ b/drivers/tty/serial/8250/8250_dw.c
+@@ -523,7 +523,9 @@ static int dw8250_probe(struct platform_
+ if (IS_ERR(data->rst))
+ return PTR_ERR(data->rst);
+
+- reset_control_deassert(data->rst);
++ err = reset_control_deassert(data->rst);
++ if (err)
++ return dev_err_probe(dev, err, "failed to deassert resets\n");
+
+ err = devm_add_action_or_reset(dev, dw8250_reset_control_assert, data->rst);
+ if (err)
--- /dev/null
+From stable+bounces-191085-greg=kroah.com@vger.kernel.org Tue Oct 28 04:28:53 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 27 Oct 2025 15:22:44 -0400
+Subject: serial: 8250_dw: Use devm_add_action_or_reset()
+To: stable@vger.kernel.org
+Cc: Andy Shevchenko <andriy.shevchenko@linux.intel.com>, Greg Kroah-Hartman <gregkh@linuxfoundation.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251027192245.660757-2-sashal@kernel.org>
+
+From: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+
+[ Upstream commit 295b09128d12fb1a7a67f771cc0ae0df869eafaf ]
+
+Slightly simplify ->probe() and drop a few goto labels by using
+devm_add_action_or_reset() for clock and reset cleanup.
+
+Signed-off-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Link: https://lore.kernel.org/r/20220509172129.37770-1-andriy.shevchenko@linux.intel.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Stable-dep-of: daeb4037adf7 ("serial: 8250_dw: handle reset control deassert error")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/tty/serial/8250/8250_dw.c | 63 ++++++++++++++++++--------------------
+ 1 file changed, 31 insertions(+), 32 deletions(-)
+
+--- a/drivers/tty/serial/8250/8250_dw.c
++++ b/drivers/tty/serial/8250/8250_dw.c
+@@ -384,6 +384,16 @@ static void dw8250_quirks(struct uart_po
+ }
+ }
+
++static void dw8250_clk_disable_unprepare(void *data)
++{
++ clk_disable_unprepare(data);
++}
++
++static void dw8250_reset_control_assert(void *data)
++{
++ reset_control_assert(data);
++}
++
+ static int dw8250_probe(struct platform_device *pdev)
+ {
+ struct uart_8250_port uart = {}, *up = &uart;
+@@ -482,35 +492,43 @@ static int dw8250_probe(struct platform_
+ if (err)
+ dev_warn(dev, "could not enable optional baudclk: %d\n", err);
+
++ err = devm_add_action_or_reset(dev, dw8250_clk_disable_unprepare, data->clk);
++ if (err)
++ return err;
++
+ if (data->clk)
+ p->uartclk = clk_get_rate(data->clk);
+
+ /* If no clock rate is defined, fail. */
+ if (!p->uartclk) {
+ dev_err(dev, "clock rate not defined\n");
+- err = -EINVAL;
+- goto err_clk;
++ return -EINVAL;
+ }
+
+ data->pclk = devm_clk_get_optional(dev, "apb_pclk");
+- if (IS_ERR(data->pclk)) {
+- err = PTR_ERR(data->pclk);
+- goto err_clk;
+- }
++ if (IS_ERR(data->pclk))
++ return PTR_ERR(data->pclk);
+
+ err = clk_prepare_enable(data->pclk);
+ if (err) {
+ dev_err(dev, "could not enable apb_pclk\n");
+- goto err_clk;
++ return err;
+ }
+
++ err = devm_add_action_or_reset(dev, dw8250_clk_disable_unprepare, data->pclk);
++ if (err)
++ return err;
++
+ data->rst = devm_reset_control_get_optional_exclusive(dev, NULL);
+- if (IS_ERR(data->rst)) {
+- err = PTR_ERR(data->rst);
+- goto err_pclk;
+- }
++ if (IS_ERR(data->rst))
++ return PTR_ERR(data->rst);
++
+ reset_control_deassert(data->rst);
+
++ err = devm_add_action_or_reset(dev, dw8250_reset_control_assert, data->rst);
++ if (err)
++ return err;
++
+ dw8250_quirks(p, data);
+
+ /* If the Busy Functionality is not implemented, don't handle it */
+@@ -528,10 +546,8 @@ static int dw8250_probe(struct platform_
+ }
+
+ data->data.line = serial8250_register_8250_port(up);
+- if (data->data.line < 0) {
+- err = data->data.line;
+- goto err_reset;
+- }
++ if (data->data.line < 0)
++ return data->data.line;
+
+ platform_set_drvdata(pdev, data);
+
+@@ -539,17 +555,6 @@ static int dw8250_probe(struct platform_
+ pm_runtime_enable(dev);
+
+ return 0;
+-
+-err_reset:
+- reset_control_assert(data->rst);
+-
+-err_pclk:
+- clk_disable_unprepare(data->pclk);
+-
+-err_clk:
+- clk_disable_unprepare(data->clk);
+-
+- return err;
+ }
+
+ static int dw8250_remove(struct platform_device *pdev)
+@@ -561,12 +566,6 @@ static int dw8250_remove(struct platform
+
+ serial8250_unregister_port(data->data.line);
+
+- reset_control_assert(data->rst);
+-
+- clk_disable_unprepare(data->pclk);
+-
+- clk_disable_unprepare(data->clk);
+-
+ pm_runtime_disable(dev);
+ pm_runtime_put_noidle(dev);
+
--- /dev/null
+From stable+bounces-191083-greg=kroah.com@vger.kernel.org Tue Oct 28 04:30:29 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 27 Oct 2025 15:22:43 -0400
+Subject: serial: 8250_dw: Use devm_clk_get_optional() to get the input clock
+To: stable@vger.kernel.org
+Cc: Andy Shevchenko <andriy.shevchenko@linux.intel.com>, Greg Kroah-Hartman <gregkh@linuxfoundation.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251027192245.660757-1-sashal@kernel.org>
+
+From: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+
+[ Upstream commit a8afc193558a42d5df724c84436ae3b2446d8a30 ]
+
+Simplify the code which fetches the input clock by using
+devm_clk_get_optional(). This comes with a small functional change: previously
+all errors were ignored except deferred probe. Now all errors are
+treated as errors. If no input clock is present devm_clk_get_optional() will
+return NULL instead of an error which matches the behavior of the old code.
+
+Signed-off-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Link: https://lore.kernel.org/r/20190925162617.30368-1-andriy.shevchenko@linux.intel.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Stable-dep-of: daeb4037adf7 ("serial: 8250_dw: handle reset control deassert error")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/tty/serial/8250/8250_dw.c | 75 ++++++++++++++++----------------------
+ 1 file changed, 32 insertions(+), 43 deletions(-)
+
+--- a/drivers/tty/serial/8250/8250_dw.c
++++ b/drivers/tty/serial/8250/8250_dw.c
+@@ -283,9 +283,6 @@ static void dw8250_set_termios(struct ua
+ long rate;
+ int ret;
+
+- if (IS_ERR(d->clk))
+- goto out;
+-
+ clk_disable_unprepare(d->clk);
+ rate = clk_round_rate(d->clk, baud * 16);
+ if (rate < 0)
+@@ -296,8 +293,10 @@ static void dw8250_set_termios(struct ua
+ ret = clk_set_rate(d->clk, rate);
+ clk_prepare_enable(d->clk);
+
+- if (!ret)
+- p->uartclk = rate;
++ if (ret)
++ goto out;
++
++ p->uartclk = rate;
+
+ out:
+ p->status &= ~UPSTAT_AUTOCTS;
+@@ -473,19 +472,18 @@ static int dw8250_probe(struct platform_
+ device_property_read_u32(dev, "clock-frequency", &p->uartclk);
+
+ /* If there is separate baudclk, get the rate from it. */
+- data->clk = devm_clk_get(dev, "baudclk");
+- if (IS_ERR(data->clk) && PTR_ERR(data->clk) != -EPROBE_DEFER)
+- data->clk = devm_clk_get(dev, NULL);
+- if (IS_ERR(data->clk) && PTR_ERR(data->clk) == -EPROBE_DEFER)
+- return -EPROBE_DEFER;
+- if (!IS_ERR_OR_NULL(data->clk)) {
+- err = clk_prepare_enable(data->clk);
+- if (err)
+- dev_warn(dev, "could not enable optional baudclk: %d\n",
+- err);
+- else
+- p->uartclk = clk_get_rate(data->clk);
+- }
++ data->clk = devm_clk_get_optional(dev, "baudclk");
++ if (data->clk == NULL)
++ data->clk = devm_clk_get_optional(dev, NULL);
++ if (IS_ERR(data->clk))
++ return PTR_ERR(data->clk);
++
++ err = clk_prepare_enable(data->clk);
++ if (err)
++ dev_warn(dev, "could not enable optional baudclk: %d\n", err);
++
++ if (data->clk)
++ p->uartclk = clk_get_rate(data->clk);
+
+ /* If no clock rate is defined, fail. */
+ if (!p->uartclk) {
+@@ -494,17 +492,16 @@ static int dw8250_probe(struct platform_
+ goto err_clk;
+ }
+
+- data->pclk = devm_clk_get(dev, "apb_pclk");
+- if (IS_ERR(data->pclk) && PTR_ERR(data->pclk) == -EPROBE_DEFER) {
+- err = -EPROBE_DEFER;
++ data->pclk = devm_clk_get_optional(dev, "apb_pclk");
++ if (IS_ERR(data->pclk)) {
++ err = PTR_ERR(data->pclk);
+ goto err_clk;
+ }
+- if (!IS_ERR(data->pclk)) {
+- err = clk_prepare_enable(data->pclk);
+- if (err) {
+- dev_err(dev, "could not enable apb_pclk\n");
+- goto err_clk;
+- }
++
++ err = clk_prepare_enable(data->pclk);
++ if (err) {
++ dev_err(dev, "could not enable apb_pclk\n");
++ goto err_clk;
+ }
+
+ data->rst = devm_reset_control_get_optional_exclusive(dev, NULL);
+@@ -547,12 +544,10 @@ err_reset:
+ reset_control_assert(data->rst);
+
+ err_pclk:
+- if (!IS_ERR(data->pclk))
+- clk_disable_unprepare(data->pclk);
++ clk_disable_unprepare(data->pclk);
+
+ err_clk:
+- if (!IS_ERR(data->clk))
+- clk_disable_unprepare(data->clk);
++ clk_disable_unprepare(data->clk);
+
+ return err;
+ }
+@@ -568,11 +563,9 @@ static int dw8250_remove(struct platform
+
+ reset_control_assert(data->rst);
+
+- if (!IS_ERR(data->pclk))
+- clk_disable_unprepare(data->pclk);
++ clk_disable_unprepare(data->pclk);
+
+- if (!IS_ERR(data->clk))
+- clk_disable_unprepare(data->clk);
++ clk_disable_unprepare(data->clk);
+
+ pm_runtime_disable(dev);
+ pm_runtime_put_noidle(dev);
+@@ -605,11 +598,9 @@ static int dw8250_runtime_suspend(struct
+ {
+ struct dw8250_data *data = dev_get_drvdata(dev);
+
+- if (!IS_ERR(data->clk))
+- clk_disable_unprepare(data->clk);
++ clk_disable_unprepare(data->clk);
+
+- if (!IS_ERR(data->pclk))
+- clk_disable_unprepare(data->pclk);
++ clk_disable_unprepare(data->pclk);
+
+ return 0;
+ }
+@@ -618,11 +609,9 @@ static int dw8250_runtime_resume(struct
+ {
+ struct dw8250_data *data = dev_get_drvdata(dev);
+
+- if (!IS_ERR(data->pclk))
+- clk_prepare_enable(data->pclk);
++ clk_prepare_enable(data->pclk);
+
+- if (!IS_ERR(data->clk))
+- clk_prepare_enable(data->clk);
++ clk_prepare_enable(data->clk);
+
+ return 0;
+ }
usbnet-prevents-free-active-kevent.patch
drm-etnaviv-fix-flush-sequence-logic.patch
regmap-slimbus-fix-bus_context-pointer-in-regmap-init-calls.patch
+net-phy-dp83867-disable-eee-support-as-not-implemented.patch
+wifi-brcmfmac-fix-crash-while-sending-action-frames-in-standalone-ap-mode.patch
+x86-resctrl-fix-miscount-of-bandwidth-event-when-reactivating-previously-unavailable-rmid.patch
+net-ravb-enforce-descriptor-type-ordering.patch
+devcoredump-fix-circular-locking-dependency-with-devcd-mutex.patch
+can-gs_usb-increase-max-interface-to-u8_max.patch
+serial-8250_dw-use-devm_clk_get_optional-to-get-the-input-clock.patch
+serial-8250_dw-use-devm_add_action_or_reset.patch
+serial-8250_dw-handle-reset-control-deassert-error.patch
--- /dev/null
+From stable+bounces-192067-greg=kroah.com@vger.kernel.org Mon Nov 3 00:19:36 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 2 Nov 2025 10:19:27 -0500
+Subject: wifi: brcmfmac: fix crash while sending Action Frames in standalone AP Mode
+To: stable@vger.kernel.org
+Cc: Gokul Sivakumar <gokulkumar.sivakumar@infineon.com>, Arend van Spriel <arend.vanspriel@broadcom.com>, Johannes Berg <johannes.berg@intel.com>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251102151927.3476662-1-sashal@kernel.org>
+
+From: Gokul Sivakumar <gokulkumar.sivakumar@infineon.com>
+
+[ Upstream commit 3776c685ebe5f43e9060af06872661de55e80b9a ]
+
+Currently, whenever there is a need to transmit an Action frame,
+the brcmfmac driver always uses the P2P vif to send the "actframe" IOVAR to
+firmware. The P2P interfaces were available when wpa_supplicant is managing
+the wlan interface.
+
+However, the P2P interfaces are not created/initialized when only hostapd
+is managing the wlan interface. And if hostapd receives an ANQP Query REQ
+Action frame even from an un-associated STA, the brcmfmac driver tries
+to use an uninitialized P2P vif pointer for sending the IOVAR to firmware.
+This NULL pointer dereferencing triggers a driver crash.
+
+ [ 1417.074538] Unable to handle kernel NULL pointer dereference at virtual
+ address 0000000000000000
+ [...]
+ [ 1417.075188] Hardware name: Raspberry Pi 4 Model B Rev 1.5 (DT)
+ [...]
+ [ 1417.075653] Call trace:
+ [ 1417.075662] brcmf_p2p_send_action_frame+0x23c/0xc58 [brcmfmac]
+ [ 1417.075738] brcmf_cfg80211_mgmt_tx+0x304/0x5c0 [brcmfmac]
+ [ 1417.075810] cfg80211_mlme_mgmt_tx+0x1b0/0x428 [cfg80211]
+ [ 1417.076067] nl80211_tx_mgmt+0x238/0x388 [cfg80211]
+ [ 1417.076281] genl_family_rcv_msg_doit+0xe0/0x158
+ [ 1417.076302] genl_rcv_msg+0x220/0x2a0
+ [ 1417.076317] netlink_rcv_skb+0x68/0x140
+ [ 1417.076330] genl_rcv+0x40/0x60
+ [ 1417.076343] netlink_unicast+0x330/0x3b8
+ [ 1417.076357] netlink_sendmsg+0x19c/0x3f8
+ [ 1417.076370] __sock_sendmsg+0x64/0xc0
+ [ 1417.076391] ____sys_sendmsg+0x268/0x2a0
+ [ 1417.076408] ___sys_sendmsg+0xb8/0x118
+ [ 1417.076427] __sys_sendmsg+0x90/0xf8
+ [ 1417.076445] __arm64_sys_sendmsg+0x2c/0x40
+ [ 1417.076465] invoke_syscall+0x50/0x120
+ [ 1417.076486] el0_svc_common.constprop.0+0x48/0xf0
+ [ 1417.076506] do_el0_svc+0x24/0x38
+ [ 1417.076525] el0_svc+0x30/0x100
+ [ 1417.076548] el0t_64_sync_handler+0x100/0x130
+ [ 1417.076569] el0t_64_sync+0x190/0x198
+ [ 1417.076589] Code: f9401e80 aa1603e2 f9403be1 5280e483 (f9400000)
+
+Fix this, by always using the vif corresponding to the wdev on which the
+Action frame Transmission request was initiated by the userspace. This way,
+even if P2P vif is not available, the IOVAR is sent to firmware on AP vif
+and the ANQP Query RESP Action frame is transmitted without crashing the
+driver.
+
+Move init_completion() for "send_af_done" from brcmf_p2p_create_p2pdev()
+to brcmf_p2p_attach(). Because the former function would not get executed
+when only hostapd is managing wlan interface, and it is not safe to do
+reinit_completion() later in brcmf_p2p_tx_action_frame(), without any prior
+init_completion().
+
+And in the brcmf_p2p_tx_action_frame() function, the condition check for
+P2P Presence response frame is not needed, since the wpa_supplicant is
+properly sending the P2P Presense Response frame on the P2P-GO vif instead
+of the P2P-Device vif.
+
+Cc: stable@vger.kernel.org
+Fixes: 18e2f61db3b7 ("brcmfmac: P2P action frame tx")
+Signed-off-by: Gokul Sivakumar <gokulkumar.sivakumar@infineon.com>
+Acked-by: Arend van Spriel <arend.vanspriel@broadcom.com>
+Link: https://patch.msgid.link/20251013102819.9727-1-gokulkumar.sivakumar@infineon.com
+[Cc stable]
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+[ removed hunks for P2P presence response check and dwell_overflow logic ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c | 3 -
+ drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c | 21 +++++-------
+ drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.h | 3 -
+ 3 files changed, 12 insertions(+), 15 deletions(-)
+
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+@@ -4963,8 +4963,7 @@ brcmf_cfg80211_mgmt_tx(struct wiphy *wip
+ brcmf_dbg(TRACE, "Action frame, cookie=%lld, len=%d, freq=%d\n",
+ *cookie, le16_to_cpu(action_frame->len), freq);
+
+- ack = brcmf_p2p_send_action_frame(cfg, cfg_to_ndev(cfg),
+- af_params);
++ ack = brcmf_p2p_send_action_frame(vif->ifp, af_params);
+
+ cfg80211_mgmt_tx_status(wdev, *cookie, buf, len, ack,
+ GFP_KERNEL);
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
+@@ -1477,6 +1477,7 @@ int brcmf_p2p_notify_action_tx_complete(
+ /**
+ * brcmf_p2p_tx_action_frame() - send action frame over fil.
+ *
++ * @ifp: interface to transmit on.
+ * @p2p: p2p info struct for vif.
+ * @af_params: action frame data/info.
+ *
+@@ -1486,11 +1487,11 @@ int brcmf_p2p_notify_action_tx_complete(
+ * The WLC_E_ACTION_FRAME_COMPLETE event will be received when the action
+ * frame is transmitted.
+ */
+-static s32 brcmf_p2p_tx_action_frame(struct brcmf_p2p_info *p2p,
++static s32 brcmf_p2p_tx_action_frame(struct brcmf_if *ifp,
++ struct brcmf_p2p_info *p2p,
+ struct brcmf_fil_af_params_le *af_params)
+ {
+ struct brcmf_pub *drvr = p2p->cfg->pub;
+- struct brcmf_cfg80211_vif *vif;
+ s32 err = 0;
+ s32 timeout = 0;
+
+@@ -1500,8 +1501,7 @@ static s32 brcmf_p2p_tx_action_frame(str
+ clear_bit(BRCMF_P2P_STATUS_ACTION_TX_COMPLETED, &p2p->status);
+ clear_bit(BRCMF_P2P_STATUS_ACTION_TX_NOACK, &p2p->status);
+
+- vif = p2p->bss_idx[P2PAPI_BSSCFG_DEVICE].vif;
+- err = brcmf_fil_bsscfg_data_set(vif->ifp, "actframe", af_params,
++ err = brcmf_fil_bsscfg_data_set(ifp, "actframe", af_params,
+ sizeof(*af_params));
+ if (err) {
+ bphy_err(drvr, " sending action frame has failed\n");
+@@ -1643,16 +1643,14 @@ static s32 brcmf_p2p_pub_af_tx(struct br
+ /**
+ * brcmf_p2p_send_action_frame() - send action frame .
+ *
+- * @cfg: driver private data for cfg80211 interface.
+- * @ndev: net device to transmit on.
++ * @ifp: interface to transmit on.
+ * @af_params: configuration data for action frame.
+ */
+-bool brcmf_p2p_send_action_frame(struct brcmf_cfg80211_info *cfg,
+- struct net_device *ndev,
++bool brcmf_p2p_send_action_frame(struct brcmf_if *ifp,
+ struct brcmf_fil_af_params_le *af_params)
+ {
++ struct brcmf_cfg80211_info *cfg = ifp->drvr->config;
+ struct brcmf_p2p_info *p2p = &cfg->p2p;
+- struct brcmf_if *ifp = netdev_priv(ndev);
+ struct brcmf_fil_action_frame_le *action_frame;
+ struct brcmf_config_af_params config_af_params;
+ struct afx_hdl *afx_hdl = &p2p->afx_hdl;
+@@ -1779,7 +1777,7 @@ bool brcmf_p2p_send_action_frame(struct
+ tx_retry = 0;
+ while (!p2p->block_gon_req_tx &&
+ (ack == false) && (tx_retry < P2P_AF_TX_MAX_RETRY)) {
+- ack = !brcmf_p2p_tx_action_frame(p2p, af_params);
++ ack = !brcmf_p2p_tx_action_frame(ifp, p2p, af_params);
+ tx_retry++;
+ }
+ if (ack == false) {
+@@ -2137,7 +2135,6 @@ static struct wireless_dev *brcmf_p2p_cr
+
+ WARN_ON(p2p_ifp->bsscfgidx != bsscfgidx);
+
+- init_completion(&p2p->send_af_done);
+ INIT_WORK(&p2p->afx_hdl.afx_work, brcmf_p2p_afx_handler);
+ init_completion(&p2p->afx_hdl.act_frm_scan);
+ init_completion(&p2p->wait_next_af);
+@@ -2390,6 +2387,8 @@ s32 brcmf_p2p_attach(struct brcmf_cfg802
+ pri_ifp = brcmf_get_ifp(cfg->pub, 0);
+ p2p->bss_idx[P2PAPI_BSSCFG_PRIMARY].vif = pri_ifp->vif;
+
++ init_completion(&p2p->send_af_done);
++
+ if (p2pdev_forced) {
+ err_ptr = brcmf_p2p_create_p2pdev(p2p, NULL, NULL);
+ if (IS_ERR(err_ptr)) {
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.h
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.h
+@@ -165,8 +165,7 @@ int brcmf_p2p_notify_action_frame_rx(str
+ int brcmf_p2p_notify_action_tx_complete(struct brcmf_if *ifp,
+ const struct brcmf_event_msg *e,
+ void *data);
+-bool brcmf_p2p_send_action_frame(struct brcmf_cfg80211_info *cfg,
+- struct net_device *ndev,
++bool brcmf_p2p_send_action_frame(struct brcmf_if *ifp,
+ struct brcmf_fil_af_params_le *af_params);
+ bool brcmf_p2p_scan_finding_common_channel(struct brcmf_cfg80211_info *cfg,
+ struct brcmf_bss_info_le *bi);
--- /dev/null
+From stable+bounces-191546-greg=kroah.com@vger.kernel.org Wed Oct 29 04:59:24 2025
+From: Babu Moger <babu.moger@amd.com>
+Date: Tue, 28 Oct 2025 14:59:04 -0500
+Subject: x86/resctrl: Fix miscount of bandwidth event when reactivating previously unavailable RMID
+To: <stable@vger.kernel.org>
+Message-ID: <20251028195904.222814-1-babu.moger@amd.com>
+
+From: Babu Moger <babu.moger@amd.com>
+
+[ Upstream commit 15292f1b4c55a3a7c940dbcb6cb8793871ed3d92 ]
+
+Users can create as many monitoring groups as the number of RMIDs supported
+by the hardware. However, on AMD systems, only a limited number of RMIDs
+are guaranteed to be actively tracked by the hardware. RMIDs that exceed
+this limit are placed in an "Unavailable" state.
+
+When a bandwidth counter is read for such an RMID, the hardware sets
+MSR_IA32_QM_CTR.Unavailable (bit 62). When such an RMID starts being tracked
+again the hardware counter is reset to zero. MSR_IA32_QM_CTR.Unavailable
+remains set on first read after tracking re-starts and is clear on all
+subsequent reads as long as the RMID is tracked.
+
+resctrl miscounts the bandwidth events after an RMID transitions from the
+"Unavailable" state back to being tracked. This happens because when the
+hardware starts counting again after resetting the counter to zero, resctrl
+in turn compares the new count against the counter value stored from the
+previous time the RMID was tracked.
+
+This results in resctrl computing an event value that is either undercounting
+(when new counter is more than stored counter) or a mistaken overflow (when
+new counter is less than stored counter).
+
+Reset the stored value (arch_mbm_state::prev_msr) of MSR_IA32_QM_CTR to
+zero whenever the RMID is in the "Unavailable" state to ensure accurate
+counting after the RMID resets to zero when it starts to be tracked again.
+
+Example scenario that results in mistaken overflow
+==================================================
+1. The resctrl filesystem is mounted, and a task is assigned to a
+ monitoring group.
+
+ $mount -t resctrl resctrl /sys/fs/resctrl
+ $mkdir /sys/fs/resctrl/mon_groups/test1/
+ $echo 1234 > /sys/fs/resctrl/mon_groups/test1/tasks
+
+ $cat /sys/fs/resctrl/mon_groups/test1/mon_data/mon_L3_*/mbm_total_bytes
+ 21323 <- Total bytes on domain 0
+ "Unavailable" <- Total bytes on domain 1
+
+ Task is running on domain 0. Counter on domain 1 is "Unavailable".
+
+2. The task runs on domain 0 for a while and then moves to domain 1. The
+ counter starts incrementing on domain 1.
+
+ $cat /sys/fs/resctrl/mon_groups/test1/mon_data/mon_L3_*/mbm_total_bytes
+ 7345357 <- Total bytes on domain 0
+ 4545 <- Total bytes on domain 1
+
+3. At some point, the RMID in domain 0 transitions to the "Unavailable"
+ state because the task is no longer executing in that domain.
+
+ $cat /sys/fs/resctrl/mon_groups/test1/mon_data/mon_L3_*/mbm_total_bytes
+ "Unavailable" <- Total bytes on domain 0
+ 434341 <- Total bytes on domain 1
+
+4. Since the task continues to migrate between domains, it may eventually
+ return to domain 0.
+
+ $cat /sys/fs/resctrl/mon_groups/test1/mon_data/mon_L3_*/mbm_total_bytes
+ 17592178699059 <- Overflow on domain 0
+ 3232332 <- Total bytes on domain 1
+
+In this case, the RMID on domain 0 transitions from "Unavailable" state to
+active state. The hardware sets MSR_IA32_QM_CTR.Unavailable (bit 62) when
+the counter is read and begins tracking the RMID counting from 0.
+
+Subsequent reads succeed but return a value smaller than the previously
+saved MSR value (7345357). Consequently, the resctrl's overflow logic is
+triggered, it compares the previous value (7345357) with the new, smaller
+value and incorrectly interprets this as a counter overflow, adding a large
+delta.
+
+In reality, this is a false positive: the counter did not overflow but was
+simply reset when the RMID transitioned from "Unavailable" back to active
+state.
+
+Here is the text from APM [1] available from [2].
+
+"In PQOS Version 2.0 or higher, the MBM hardware will set the U bit on the
+first QM_CTR read when it begins tracking an RMID that it was not
+previously tracking. The U bit will be zero for all subsequent reads from
+that RMID while it is still tracked by the hardware. Therefore, a QM_CTR
+read with the U bit set when that RMID is in use by a processor can be
+considered 0 when calculating the difference with a subsequent read."
+
+[1] AMD64 Architecture Programmer's Manual Volume 2: System Programming
+ Publication # 24593 Revision 3.41 section 19.3.3 Monitoring L3 Memory
+ Bandwidth (MBM).
+
+ [ bp: Split commit message into smaller paragraph chunks for better
+ consumption. ]
+
+Fixes: 4d05bf71f157d ("x86/resctrl: Introduce AMD QOS feature")
+Signed-off-by: Babu Moger <babu.moger@amd.com>
+Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
+Reviewed-by: Reinette Chatre <reinette.chatre@intel.com>
+Tested-by: Reinette Chatre <reinette.chatre@intel.com>
+Cc: stable@vger.kernel.org # needs adjustments for <= v6.17
+Link: https://bugzilla.kernel.org/show_bug.cgi?id=206537 # [2]
+(cherry picked from commit 15292f1b4c55a3a7c940dbcb6cb8793871ed3d92)
+[babu.moger@amd.com: Backport for v5.4 stable]
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/cpu/resctrl/monitor.c | 10 +++++++++-
+ 1 file changed, 9 insertions(+), 1 deletion(-)
+
+--- a/arch/x86/kernel/cpu/resctrl/monitor.c
++++ b/arch/x86/kernel/cpu/resctrl/monitor.c
+@@ -225,11 +225,19 @@ static u64 mbm_overflow_count(u64 prev_m
+
+ static u64 __mon_event_count(u32 rmid, struct rmid_read *rr)
+ {
+- struct mbm_state *m;
++ struct mbm_state *m = NULL;
+ u64 chunks, tval;
+
+ tval = __rmid_read(rmid, rr->evtid);
+ if (tval & (RMID_VAL_ERROR | RMID_VAL_UNAVAIL)) {
++ if (tval & RMID_VAL_UNAVAIL) {
++ if (rr->evtid == QOS_L3_MBM_TOTAL_EVENT_ID)
++ m = &rr->d->mbm_total[rmid];
++ else if (rr->evtid == QOS_L3_MBM_LOCAL_EVENT_ID)
++ m = &rr->d->mbm_local[rmid];
++ if (m)
++ m->prev_msr = 0;
++ }
+ return tval;
+ }
+ switch (rr->evtid) {