]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
6.12-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 5 Jan 2026 14:19:19 +0000 (15:19 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 5 Jan 2026 14:19:19 +0000 (15:19 +0100)
added patches:
arm-dts-microchip-sama7g5-fix-uart-fifo-size-to-32.patch
block-freeze-queue-when-updating-zone-resources.patch
btrfs-don-t-rewrite-ret-from-inode_permission.patch
drm-displayid-add-quirk-to-ignore-displayid-checksum-errors.patch
drm-panthor-flush-shmem-writes-before-mapping-buffers-cpu-uncached.patch
erofs-fix-unexpected-eio-under-memory-pressure.patch
f2fs-add-timeout-in-f2fs_enable_checkpoint.patch
f2fs-clear-sbi_por_doing-before-initing-inmem-curseg.patch
f2fs-drop-inode-from-the-donation-list-when-the-last-file-is-closed.patch
f2fs-dump-more-information-for-f2fs_-enable-disable-_checkpoint.patch
f2fs-fix-to-avoid-updating-compression-context-during-writeback.patch
f2fs-fix-to-detect-recoverable-inode-during-dryrun-of-find_fsync_dnodes.patch
f2fs-fix-to-propagate-error-from-f2fs_enable_checkpoint.patch
f2fs-use-global-inline_xattr_slab-instead-of-per-sb-slab-cache.patch
gfs2-fix-freeze-error-handling.patch
gpiolib-acpi-add-a-quirk-for-acer-nitro-v15.patch
gpiolib-acpi-add-acpi_gpio_need_run_edge_events_on_boot-getter.patch
gpiolib-acpi-add-quirk-for-asus-proart-px13.patch
gpiolib-acpi-add-quirk-for-dell-precision-7780.patch
gpiolib-acpi-handle-deferred-list-via-new-api.patch
gpiolib-acpi-move-quirks-to-a-separate-file.patch
gpiolib-acpi-switch-to-use-enum-in-acpi_gpio_in_ignore_list.patch
hrtimers-introduce-hrtimer_update_function.patch
hrtimers-make-hrtimer_update_function-less-expensive.patch
idpf-add-support-for-sw-triggered-interrupts.patch
idpf-add-support-for-tx-refillqs-in-flow-scheduling-mode.patch
idpf-improve-when-to-set-re-bit-logic.patch
idpf-remove-obsolete-stashing-code.patch
idpf-replace-flow-scheduling-buffer-ring-with-buffer-pool.patch
idpf-simplify-and-fix-splitq-tx-packet-rollback-error-path.patch
idpf-stop-tx-if-there-are-insufficient-buffer-resources.patch
idpf-trigger-sw-interrupt-when-exiting-wb_on_itr-mode.patch
ipv6-adopt-dst_dev-helper.patch
jbd2-fix-the-inconsistency-between-checksum-and-data-in-memory-for-journal-sb.patch
lib-crypto-riscv-chacha-avoid-s0-fp-register.patch
md-raid10-wait-barrier-before-returning-discard-request-with-req_nowait.patch
media-i2c-imx219-fix-1920x1080-mode-to-use-1-1-pixel-aspect-ratio.patch
mm-ksm-fix-exec-fork-inheritance-support-for-prctl.patch
mptcp-pm-ignore-unknown-endpoint-flags.patch
net-ipv6-ioam6-use-consistent-dst-names.patch
net-use-dst_dev_rcu-in-sk_setup_caps.patch
netfilter-nft_ct-add-seqadj-extension-for-natted-connections.patch
sched-eevdf-fix-min_vruntime-vs-avg_vruntime.patch
sched_ext-factor-out-local_dsq_post_enq-from-dispatch_enqueue.patch
sched_ext-fix-incorrect-sched_class-settings-for-per-cpu-migration-tasks.patch
sched_ext-fix-missing-post-enqueue-handling-in-move_local_task_to_local_dsq.patch
serial-core-fix-of-node-leak.patch
serial-core-fix-serial-device-initialization.patch
serial-core-restore-sysfs-fwnode-information.patch
serial-xilinx_uartps-fix-rs485-delay_rts_after_send.patch
serial-xilinx_uartps-use-helper-function-hrtimer_update_function.patch
svcrdma-bound-check-rq_pages-index-in-inline-path.patch
tpm2-sessions-fix-tpm2_read_public-range-checks.patch
tty-fix-tty_port_tty_-hangup-kernel-doc.patch
tty-introduce-and-use-tty_port_tty_vhangup-helper.patch
usbnet-fix-using-smp_processor_id-in-preemptible-code-warnings.patch
wifi-mt76-mt7925-add-handler-to-hif-suspend-resume-event.patch
wifi-mt76-mt7925-fix-clc-command-timeout-when-suspend-resume.patch
wifi-mt76-mt7925-fix-the-unfinished-command-of-regd_notifier-before-suspend.patch
x86-microcode-amd-select-which-microcode-patch-to-load.patch
xhci-dbgtty-fix-device-unregister-fixup.patch

62 files changed:
queue-6.12/arm-dts-microchip-sama7g5-fix-uart-fifo-size-to-32.patch [new file with mode: 0644]
queue-6.12/block-freeze-queue-when-updating-zone-resources.patch [new file with mode: 0644]
queue-6.12/btrfs-don-t-rewrite-ret-from-inode_permission.patch [new file with mode: 0644]
queue-6.12/drm-displayid-add-quirk-to-ignore-displayid-checksum-errors.patch [new file with mode: 0644]
queue-6.12/drm-panthor-flush-shmem-writes-before-mapping-buffers-cpu-uncached.patch [new file with mode: 0644]
queue-6.12/erofs-fix-unexpected-eio-under-memory-pressure.patch [new file with mode: 0644]
queue-6.12/f2fs-add-timeout-in-f2fs_enable_checkpoint.patch [new file with mode: 0644]
queue-6.12/f2fs-clear-sbi_por_doing-before-initing-inmem-curseg.patch [new file with mode: 0644]
queue-6.12/f2fs-drop-inode-from-the-donation-list-when-the-last-file-is-closed.patch [new file with mode: 0644]
queue-6.12/f2fs-dump-more-information-for-f2fs_-enable-disable-_checkpoint.patch [new file with mode: 0644]
queue-6.12/f2fs-fix-to-avoid-updating-compression-context-during-writeback.patch [new file with mode: 0644]
queue-6.12/f2fs-fix-to-detect-recoverable-inode-during-dryrun-of-find_fsync_dnodes.patch [new file with mode: 0644]
queue-6.12/f2fs-fix-to-propagate-error-from-f2fs_enable_checkpoint.patch [new file with mode: 0644]
queue-6.12/f2fs-use-global-inline_xattr_slab-instead-of-per-sb-slab-cache.patch [new file with mode: 0644]
queue-6.12/gfs2-fix-freeze-error-handling.patch [new file with mode: 0644]
queue-6.12/gpiolib-acpi-add-a-quirk-for-acer-nitro-v15.patch [new file with mode: 0644]
queue-6.12/gpiolib-acpi-add-acpi_gpio_need_run_edge_events_on_boot-getter.patch [new file with mode: 0644]
queue-6.12/gpiolib-acpi-add-quirk-for-asus-proart-px13.patch [new file with mode: 0644]
queue-6.12/gpiolib-acpi-add-quirk-for-dell-precision-7780.patch [new file with mode: 0644]
queue-6.12/gpiolib-acpi-handle-deferred-list-via-new-api.patch [new file with mode: 0644]
queue-6.12/gpiolib-acpi-move-quirks-to-a-separate-file.patch [new file with mode: 0644]
queue-6.12/gpiolib-acpi-switch-to-use-enum-in-acpi_gpio_in_ignore_list.patch [new file with mode: 0644]
queue-6.12/hrtimers-introduce-hrtimer_update_function.patch [new file with mode: 0644]
queue-6.12/hrtimers-make-hrtimer_update_function-less-expensive.patch [new file with mode: 0644]
queue-6.12/idpf-add-support-for-sw-triggered-interrupts.patch [new file with mode: 0644]
queue-6.12/idpf-add-support-for-tx-refillqs-in-flow-scheduling-mode.patch [new file with mode: 0644]
queue-6.12/idpf-improve-when-to-set-re-bit-logic.patch [new file with mode: 0644]
queue-6.12/idpf-remove-obsolete-stashing-code.patch [new file with mode: 0644]
queue-6.12/idpf-replace-flow-scheduling-buffer-ring-with-buffer-pool.patch [new file with mode: 0644]
queue-6.12/idpf-simplify-and-fix-splitq-tx-packet-rollback-error-path.patch [new file with mode: 0644]
queue-6.12/idpf-stop-tx-if-there-are-insufficient-buffer-resources.patch [new file with mode: 0644]
queue-6.12/idpf-trigger-sw-interrupt-when-exiting-wb_on_itr-mode.patch [new file with mode: 0644]
queue-6.12/ipv6-adopt-dst_dev-helper.patch [new file with mode: 0644]
queue-6.12/jbd2-fix-the-inconsistency-between-checksum-and-data-in-memory-for-journal-sb.patch [new file with mode: 0644]
queue-6.12/lib-crypto-riscv-chacha-avoid-s0-fp-register.patch [new file with mode: 0644]
queue-6.12/md-raid10-wait-barrier-before-returning-discard-request-with-req_nowait.patch [new file with mode: 0644]
queue-6.12/media-i2c-imx219-fix-1920x1080-mode-to-use-1-1-pixel-aspect-ratio.patch [new file with mode: 0644]
queue-6.12/mm-ksm-fix-exec-fork-inheritance-support-for-prctl.patch [new file with mode: 0644]
queue-6.12/mptcp-pm-ignore-unknown-endpoint-flags.patch [new file with mode: 0644]
queue-6.12/net-ipv6-ioam6-use-consistent-dst-names.patch [new file with mode: 0644]
queue-6.12/net-use-dst_dev_rcu-in-sk_setup_caps.patch [new file with mode: 0644]
queue-6.12/netfilter-nft_ct-add-seqadj-extension-for-natted-connections.patch [new file with mode: 0644]
queue-6.12/sched-eevdf-fix-min_vruntime-vs-avg_vruntime.patch [new file with mode: 0644]
queue-6.12/sched_ext-factor-out-local_dsq_post_enq-from-dispatch_enqueue.patch [new file with mode: 0644]
queue-6.12/sched_ext-fix-incorrect-sched_class-settings-for-per-cpu-migration-tasks.patch [new file with mode: 0644]
queue-6.12/sched_ext-fix-missing-post-enqueue-handling-in-move_local_task_to_local_dsq.patch [new file with mode: 0644]
queue-6.12/serial-core-fix-of-node-leak.patch [new file with mode: 0644]
queue-6.12/serial-core-fix-serial-device-initialization.patch [new file with mode: 0644]
queue-6.12/serial-core-restore-sysfs-fwnode-information.patch [new file with mode: 0644]
queue-6.12/serial-xilinx_uartps-fix-rs485-delay_rts_after_send.patch [new file with mode: 0644]
queue-6.12/serial-xilinx_uartps-use-helper-function-hrtimer_update_function.patch [new file with mode: 0644]
queue-6.12/series
queue-6.12/svcrdma-bound-check-rq_pages-index-in-inline-path.patch [new file with mode: 0644]
queue-6.12/tpm2-sessions-fix-tpm2_read_public-range-checks.patch [new file with mode: 0644]
queue-6.12/tty-fix-tty_port_tty_-hangup-kernel-doc.patch [new file with mode: 0644]
queue-6.12/tty-introduce-and-use-tty_port_tty_vhangup-helper.patch [new file with mode: 0644]
queue-6.12/usbnet-fix-using-smp_processor_id-in-preemptible-code-warnings.patch [new file with mode: 0644]
queue-6.12/wifi-mt76-mt7925-add-handler-to-hif-suspend-resume-event.patch [new file with mode: 0644]
queue-6.12/wifi-mt76-mt7925-fix-clc-command-timeout-when-suspend-resume.patch [new file with mode: 0644]
queue-6.12/wifi-mt76-mt7925-fix-the-unfinished-command-of-regd_notifier-before-suspend.patch [new file with mode: 0644]
queue-6.12/x86-microcode-amd-select-which-microcode-patch-to-load.patch [new file with mode: 0644]
queue-6.12/xhci-dbgtty-fix-device-unregister-fixup.patch [new file with mode: 0644]

diff --git a/queue-6.12/arm-dts-microchip-sama7g5-fix-uart-fifo-size-to-32.patch b/queue-6.12/arm-dts-microchip-sama7g5-fix-uart-fifo-size-to-32.patch
new file mode 100644 (file)
index 0000000..bdc755b
--- /dev/null
@@ -0,0 +1,46 @@
+From stable+bounces-204388-greg=kroah.com@vger.kernel.org Wed Dec 31 22:06:06 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 31 Dec 2025 16:06:00 -0500
+Subject: ARM: dts: microchip: sama7g5: fix uart fifo size to 32
+To: stable@vger.kernel.org
+Cc: Nicolas Ferre <nicolas.ferre@microchip.com>, Claudiu Beznea <claudiu.beznea@tuxon.dev>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251231210600.3501075-1-sashal@kernel.org>
+
+From: Nicolas Ferre <nicolas.ferre@microchip.com>
+
+[ Upstream commit 5654889a94b0de5ad6ceae3793e7f5e0b61b50b6 ]
+
+On some flexcom nodes related to uart, the fifo sizes were wrong: fix
+them to 32 data.
+
+Fixes: 7540629e2fc7 ("ARM: dts: at91: add sama7g5 SoC DT and sama7g5-ek")
+Cc: stable@vger.kernel.org # 5.15+
+Signed-off-by: Nicolas Ferre <nicolas.ferre@microchip.com>
+Link: https://lore.kernel.org/r/20251114103313.20220-2-nicolas.ferre@microchip.com
+Signed-off-by: Claudiu Beznea <claudiu.beznea@tuxon.dev>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm/boot/dts/microchip/sama7g5.dtsi |    4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/arch/arm/boot/dts/microchip/sama7g5.dtsi
++++ b/arch/arm/boot/dts/microchip/sama7g5.dtsi
+@@ -811,7 +811,7 @@
+                               dma-names = "tx", "rx";
+                               atmel,use-dma-rx;
+                               atmel,use-dma-tx;
+-                              atmel,fifo-size = <16>;
++                              atmel,fifo-size = <32>;
+                               status = "disabled";
+                       };
+               };
+@@ -837,7 +837,7 @@
+                               dma-names = "tx", "rx";
+                               atmel,use-dma-rx;
+                               atmel,use-dma-tx;
+-                              atmel,fifo-size = <16>;
++                              atmel,fifo-size = <32>;
+                               status = "disabled";
+                       };
+               };
diff --git a/queue-6.12/block-freeze-queue-when-updating-zone-resources.patch b/queue-6.12/block-freeze-queue-when-updating-zone-resources.patch
new file mode 100644 (file)
index 0000000..f7b219c
--- /dev/null
@@ -0,0 +1,117 @@
+From stable+bounces-204401-greg=kroah.com@vger.kernel.org Thu Jan  1 00:40:18 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 31 Dec 2025 18:40:07 -0500
+Subject: block: freeze queue when updating zone resources
+To: stable@vger.kernel.org
+Cc: Damien Le Moal <dlemoal@kernel.org>, Christoph Hellwig <hch@lst.de>, Johannes Thumshirn <johannes.thumshirn@wdc.com>, Chaitanya Kulkarni <kch@nvidia.com>, Hannes Reinecke <hare@suse.de>, "Martin K. Petersen" <martin.petersen@oracle.com>, Jens Axboe <axboe@kernel.dk>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251231234008.3701023-1-sashal@kernel.org>
+
+From: Damien Le Moal <dlemoal@kernel.org>
+
+[ Upstream commit bba4322e3f303b2d656e748be758320b567f046f ]
+
+Modify disk_update_zone_resources() to freeze the device queue before
+updating the number of zones, zone capacity and other zone related
+resources. The locking order resulting from the call to
+queue_limits_commit_update_frozen() is preserved, that is, the queue
+limits lock is first taken by calling queue_limits_start_update() before
+freezing the queue, and the queue is unfrozen after executing
+queue_limits_commit_update(), which replaces the call to
+queue_limits_commit_update_frozen().
+
+This change ensures that there are no in-flights I/Os when the zone
+resources are updated due to a zone revalidation. In case of error when
+the limits are applied, directly call disk_free_zone_resources() from
+disk_update_zone_resources() while the disk queue is still frozen to
+avoid needing to freeze & unfreeze the queue again in
+blk_revalidate_disk_zones(), thus simplifying that function code a
+little.
+
+Fixes: 0b83c86b444a ("block: Prevent potential deadlock in blk_revalidate_disk_zones()")
+Cc: stable@vger.kernel.org
+Signed-off-by: Damien Le Moal <dlemoal@kernel.org>
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+Reviewed-by: Johannes Thumshirn <johannes.thumshirn@wdc.com>
+Reviewed-by: Chaitanya Kulkarni <kch@nvidia.com>
+Reviewed-by: Hannes Reinecke <hare@suse.de>
+Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+[ adapted blk_mq_freeze_queue/unfreeze_queue calls to single-argument void API ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ block/blk-zoned.c |   39 +++++++++++++++++++++++----------------
+ 1 file changed, 23 insertions(+), 16 deletions(-)
+
+--- a/block/blk-zoned.c
++++ b/block/blk-zoned.c
+@@ -1514,6 +1514,11 @@ static int disk_update_zone_resources(st
+       unsigned int nr_seq_zones, nr_conv_zones;
+       unsigned int pool_size;
+       struct queue_limits lim;
++      int ret = 0;
++
++      lim = queue_limits_start_update(q);
++
++      blk_mq_freeze_queue(q);
+       disk->nr_zones = args->nr_zones;
+       disk->zone_capacity = args->zone_capacity;
+@@ -1523,11 +1528,10 @@ static int disk_update_zone_resources(st
+       if (nr_conv_zones >= disk->nr_zones) {
+               pr_warn("%s: Invalid number of conventional zones %u / %u\n",
+                       disk->disk_name, nr_conv_zones, disk->nr_zones);
+-              return -ENODEV;
++              ret = -ENODEV;
++              goto unfreeze;
+       }
+-      lim = queue_limits_start_update(q);
+-
+       /*
+        * Some devices can advertize zone resource limits that are larger than
+        * the number of sequential zones of the zoned block device, e.g. a
+@@ -1564,7 +1568,15 @@ static int disk_update_zone_resources(st
+       }
+ commit:
+-      return queue_limits_commit_update_frozen(q, &lim);
++      ret = queue_limits_commit_update(q, &lim);
++
++unfreeze:
++      if (ret)
++              disk_free_zone_resources(disk);
++
++      blk_mq_unfreeze_queue(q);
++
++      return ret;
+ }
+ static int blk_revalidate_conv_zone(struct blk_zone *zone, unsigned int idx,
+@@ -1785,19 +1797,14 @@ int blk_revalidate_disk_zones(struct gen
+               ret = -ENODEV;
+       }
+-      /*
+-       * Set the new disk zone parameters only once the queue is frozen and
+-       * all I/Os are completed.
+-       */
+       if (ret > 0)
+-              ret = disk_update_zone_resources(disk, &args);
+-      else
+-              pr_warn("%s: failed to revalidate zones\n", disk->disk_name);
+-      if (ret) {
+-              blk_mq_freeze_queue(q);
+-              disk_free_zone_resources(disk);
+-              blk_mq_unfreeze_queue(q);
+-      }
++              return disk_update_zone_resources(disk, &args);
++
++      pr_warn("%s: failed to revalidate zones\n", disk->disk_name);
++
++      blk_mq_freeze_queue(q);
++      disk_free_zone_resources(disk);
++      blk_mq_unfreeze_queue(q);
+       return ret;
+ }
diff --git a/queue-6.12/btrfs-don-t-rewrite-ret-from-inode_permission.patch b/queue-6.12/btrfs-don-t-rewrite-ret-from-inode_permission.patch
new file mode 100644 (file)
index 0000000..1b0bc54
--- /dev/null
@@ -0,0 +1,51 @@
+From stable+bounces-204138-greg=kroah.com@vger.kernel.org Mon Dec 29 22:39:14 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 29 Dec 2025 16:39:04 -0500
+Subject: btrfs: don't rewrite ret from inode_permission
+To: stable@vger.kernel.org
+Cc: Josef Bacik <josef@toxicpanda.com>, Johannes Thumshirn <johannes.thumshirn@wdc.com>, Daniel Vacek <neelx@suse.com>, David Sterba <dsterba@suse.com>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251229213904.1726555-1-sashal@kernel.org>
+
+From: Josef Bacik <josef@toxicpanda.com>
+
+[ Upstream commit 0185c2292c600993199bc6b1f342ad47a9e8c678 ]
+
+In our user safe ino resolve ioctl we'll just turn any ret into -EACCES
+from inode_permission().  This is redundant, and could potentially be
+wrong if we had an ENOMEM in the security layer or some such other
+error, so simply return the actual return value.
+
+Note: The patch was taken from v5 of fscrypt patchset
+(https://lore.kernel.org/linux-btrfs/cover.1706116485.git.josef@toxicpanda.com/)
+which was handled over time by various people: Omar Sandoval, Sweet Tea
+Dorminy, Josef Bacik.
+
+Fixes: 23d0b79dfaed ("btrfs: Add unprivileged version of ino_lookup ioctl")
+CC: stable@vger.kernel.org # 5.4+
+Reviewed-by: Johannes Thumshirn <johannes.thumshirn@wdc.com>
+Signed-off-by: Josef Bacik <josef@toxicpanda.com>
+Signed-off-by: Daniel Vacek <neelx@suse.com>
+Reviewed-by: David Sterba <dsterba@suse.com>
+[ add note ]
+Signed-off-by: David Sterba <dsterba@suse.com>
+[ Adjust context ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/btrfs/ioctl.c |    4 +---
+ 1 file changed, 1 insertion(+), 3 deletions(-)
+
+--- a/fs/btrfs/ioctl.c
++++ b/fs/btrfs/ioctl.c
+@@ -2012,10 +2012,8 @@ static int btrfs_search_path_in_tree_use
+                       ret = inode_permission(idmap, temp_inode,
+                                              MAY_READ | MAY_EXEC);
+                       iput(temp_inode);
+-                      if (ret) {
+-                              ret = -EACCES;
++                      if (ret)
+                               goto out_put;
+-                      }
+                       if (key.offset == upper_limit)
+                               break;
diff --git a/queue-6.12/drm-displayid-add-quirk-to-ignore-displayid-checksum-errors.patch b/queue-6.12/drm-displayid-add-quirk-to-ignore-displayid-checksum-errors.patch
new file mode 100644 (file)
index 0000000..05566a2
--- /dev/null
@@ -0,0 +1,137 @@
+From stable+bounces-204374-greg=kroah.com@vger.kernel.org Wed Dec 31 17:29:40 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 31 Dec 2025 11:29:26 -0500
+Subject: drm/displayid: add quirk to ignore DisplayID checksum errors
+To: stable@vger.kernel.org
+Cc: "Jani Nikula" <jani.nikula@intel.com>, "Tiago Martins Araújo" <tiago.martins.araujo@gmail.com>, "Alex Deucher" <alexander.deucher@amd.com>, "Sasha Levin" <sashal@kernel.org>
+Message-ID: <20251231162926.3267905-3-sashal@kernel.org>
+
+From: Jani Nikula <jani.nikula@intel.com>
+
+[ Upstream commit 83cbb4d33dc22b0ca1a4e85c6e892c9b729e28d4 ]
+
+Add a mechanism for DisplayID specific quirks, and add the first quirk
+to ignore DisplayID section checksum errors.
+
+It would be quite inconvenient to pass existing EDID quirks from
+drm_edid.c for DisplayID parsing. Not all places doing DisplayID
+iteration have the quirks readily available, and would have to pass it
+in all places. Simply add a separate array of DisplayID specific EDID
+quirks. We do end up checking it every time we iterate DisplayID blocks,
+but hopefully the number of quirks remains small.
+
+There are a few laptop models with DisplayID checksum failures, leading
+to higher refresh rates only present in the DisplayID blocks being
+ignored. Add a quirk for the panel in the machines.
+
+Reported-by: Tiago Martins Araújo <tiago.martins.araujo@gmail.com>
+Closes: https://lore.kernel.org/r/CACRbrPGvLP5LANXuFi6z0S7XMbAG4X5y2YOLBDxfOVtfGGqiKQ@mail.gmail.com
+Closes: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/14703
+Acked-by: Alex Deucher <alexander.deucher@amd.com>
+Tested-by: Tiago Martins Araújo <tiago.martins.araujo@gmail.com>
+Cc: stable@vger.kernel.org
+Link: https://patch.msgid.link/c04d81ae648c5f21b3f5b7953f924718051f2798.1761681968.git.jani.nikula@intel.com
+Signed-off-by: Jani Nikula <jani.nikula@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/drm_displayid.c          |   41 +++++++++++++++++++++++++++----
+ drivers/gpu/drm/drm_displayid_internal.h |    2 +
+ 2 files changed, 39 insertions(+), 4 deletions(-)
+
+--- a/drivers/gpu/drm/drm_displayid.c
++++ b/drivers/gpu/drm/drm_displayid.c
+@@ -9,6 +9,34 @@
+ #include "drm_crtc_internal.h"
+ #include "drm_displayid_internal.h"
++enum {
++      QUIRK_IGNORE_CHECKSUM,
++};
++
++struct displayid_quirk {
++      const struct drm_edid_ident ident;
++      u8 quirks;
++};
++
++static const struct displayid_quirk quirks[] = {
++      {
++              .ident = DRM_EDID_IDENT_INIT('C', 'S', 'O', 5142, "MNE007ZA1-5"),
++              .quirks = BIT(QUIRK_IGNORE_CHECKSUM),
++      },
++};
++
++static u8 get_quirks(const struct drm_edid *drm_edid)
++{
++      int i;
++
++      for (i = 0; i < ARRAY_SIZE(quirks); i++) {
++              if (drm_edid_match(drm_edid, &quirks[i].ident))
++                      return quirks[i].quirks;
++      }
++
++      return 0;
++}
++
+ static const struct displayid_header *
+ displayid_get_header(const u8 *displayid, int length, int index)
+ {
+@@ -23,7 +51,7 @@ displayid_get_header(const u8 *displayid
+ }
+ static const struct displayid_header *
+-validate_displayid(const u8 *displayid, int length, int idx)
++validate_displayid(const u8 *displayid, int length, int idx, bool ignore_checksum)
+ {
+       int i, dispid_length;
+       u8 csum = 0;
+@@ -41,8 +69,11 @@ validate_displayid(const u8 *displayid,
+       for (i = 0; i < dispid_length; i++)
+               csum += displayid[idx + i];
+       if (csum) {
+-              DRM_NOTE("DisplayID checksum invalid, remainder is %d\n", csum);
+-              return ERR_PTR(-EINVAL);
++              DRM_NOTE("DisplayID checksum invalid, remainder is %d%s\n", csum,
++                       ignore_checksum ? " (ignoring)" : "");
++
++              if (!ignore_checksum)
++                      return ERR_PTR(-EINVAL);
+       }
+       return base;
+@@ -52,6 +83,7 @@ static const u8 *find_next_displayid_ext
+ {
+       const struct displayid_header *base;
+       const u8 *displayid;
++      bool ignore_checksum = iter->quirks & BIT(QUIRK_IGNORE_CHECKSUM);
+       displayid = drm_edid_find_extension(iter->drm_edid, DISPLAYID_EXT, &iter->ext_index);
+       if (!displayid)
+@@ -61,7 +93,7 @@ static const u8 *find_next_displayid_ext
+       iter->length = EDID_LENGTH - 1;
+       iter->idx = 1;
+-      base = validate_displayid(displayid, iter->length, iter->idx);
++      base = validate_displayid(displayid, iter->length, iter->idx, ignore_checksum);
+       if (IS_ERR(base))
+               return NULL;
+@@ -76,6 +108,7 @@ void displayid_iter_edid_begin(const str
+       memset(iter, 0, sizeof(*iter));
+       iter->drm_edid = drm_edid;
++      iter->quirks = get_quirks(drm_edid);
+ }
+ static const struct displayid_block *
+--- a/drivers/gpu/drm/drm_displayid_internal.h
++++ b/drivers/gpu/drm/drm_displayid_internal.h
+@@ -154,6 +154,8 @@ struct displayid_iter {
+       u8 version;
+       u8 primary_use;
++
++      u8 quirks;
+ };
+ void displayid_iter_edid_begin(const struct drm_edid *drm_edid,
diff --git a/queue-6.12/drm-panthor-flush-shmem-writes-before-mapping-buffers-cpu-uncached.patch b/queue-6.12/drm-panthor-flush-shmem-writes-before-mapping-buffers-cpu-uncached.patch
new file mode 100644 (file)
index 0000000..33d785f
--- /dev/null
@@ -0,0 +1,66 @@
+From stable+bounces-204508-greg=kroah.com@vger.kernel.org Fri Jan  2 21:37:55 2026
+From: Harshit Mogalapalli <harshit.m.mogalapalli@oracle.com>
+Date: Fri,  2 Jan 2026 12:37:23 -0800
+Subject: drm/panthor: Flush shmem writes before mapping buffers CPU-uncached
+To: stable@vger.kernel.org
+Cc: Boris Brezillon <boris.brezillon@collabora.com>, Steven Price <steven.price@arm.com>, Liviu Dudau <liviu.dudau@arm.com>, Harshit Mogalapalli <harshit.m.mogalapalli@oracle.com>
+Message-ID: <20260102203727.1455662-4-harshit.m.mogalapalli@oracle.com>
+
+From: Boris Brezillon <boris.brezillon@collabora.com>
+
+[ Upstream commit 576c930e5e7dcb937648490611a83f1bf0171048 ]
+
+The shmem layer zeroes out the new pages using cached mappings, and if
+we don't CPU-flush we might leave dirty cachelines behind, leading to
+potential data leaks and/or asynchronous buffer corruption when dirty
+cachelines are evicted.
+
+Fixes: 8a1cc07578bf ("drm/panthor: Add GEM logical block")
+Signed-off-by: Boris Brezillon <boris.brezillon@collabora.com>
+Reviewed-by: Steven Price <steven.price@arm.com>
+Reviewed-by: Liviu Dudau <liviu.dudau@arm.com>
+Signed-off-by: Steven Price <steven.price@arm.com>
+Link: https://patch.msgid.link/20251107171214.1186299-1-boris.brezillon@collabora.com
+[Harshit: Resolve conflicts due to missing commit: fe69a3918084
+("drm/panthor: Fix UAF in panthor_gem_create_with_handle() debugfs
+code") in 6.12.y]
+Signed-off-by: Harshit Mogalapalli <harshit.m.mogalapalli@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/panthor/panthor_gem.c |   18 ++++++++++++++++++
+ 1 file changed, 18 insertions(+)
+
+--- a/drivers/gpu/drm/panthor/panthor_gem.c
++++ b/drivers/gpu/drm/panthor/panthor_gem.c
+@@ -214,6 +214,23 @@ panthor_gem_create_with_handle(struct dr
+               bo->base.base.resv = bo->exclusive_vm_root_gem->resv;
+       }
++      /* If this is a write-combine mapping, we query the sgt to force a CPU
++       * cache flush (dma_map_sgtable() is called when the sgt is created).
++       * This ensures the zero-ing is visible to any uncached mapping created
++       * by vmap/mmap.
++       * FIXME: Ideally this should be done when pages are allocated, not at
++       * BO creation time.
++       */
++      if (shmem->map_wc) {
++              struct sg_table *sgt;
++
++              sgt = drm_gem_shmem_get_pages_sgt(shmem);
++              if (IS_ERR(sgt)) {
++                      ret = PTR_ERR(sgt);
++                      goto out_put_gem;
++              }
++      }
++
+       /*
+        * Allocate an id of idr table where the obj is registered
+        * and handle has the id what user can see.
+@@ -222,6 +239,7 @@ panthor_gem_create_with_handle(struct dr
+       if (!ret)
+               *size = bo->base.base.size;
++out_put_gem:
+       /* drop reference from allocate - handle holds it now. */
+       drm_gem_object_put(&shmem->base);
diff --git a/queue-6.12/erofs-fix-unexpected-eio-under-memory-pressure.patch b/queue-6.12/erofs-fix-unexpected-eio-under-memory-pressure.patch
new file mode 100644 (file)
index 0000000..eaaed6d
--- /dev/null
@@ -0,0 +1,131 @@
+From stable+bounces-204131-greg=kroah.com@vger.kernel.org Mon Dec 29 21:22:14 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 29 Dec 2025 15:21:42 -0500
+Subject: erofs: fix unexpected EIO under memory pressure
+To: stable@vger.kernel.org
+Cc: Junbeom Yeom <junbeom.yeom@samsung.com>, Jaewook Kim <jw5454.kim@samsung.com>, Sungjong Seo <sj1557.seo@samsung.com>, Gao Xiang <hsiangkao@linux.alibaba.com>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251229202143.1682446-1-sashal@kernel.org>
+
+From: Junbeom Yeom <junbeom.yeom@samsung.com>
+
+[ Upstream commit 4012d78562193ef5eb613bad4b0c0fa187637cfe ]
+
+erofs readahead could fail with ENOMEM under the memory pressure because
+it tries to alloc_page with GFP_NOWAIT | GFP_NORETRY, while GFP_KERNEL
+for a regular read. And if readahead fails (with non-uptodate folios),
+the original request will then fall back to synchronous read, and
+`.read_folio()` should return appropriate errnos.
+
+However, in scenarios where readahead and read operations compete,
+read operation could return an unintended EIO because of an incorrect
+error propagation.
+
+To resolve this, this patch modifies the behavior so that, when the
+PCL is for read(which means pcl.besteffort is true), it attempts actual
+decompression instead of propagating the privios error except initial EIO.
+
+- Page size: 4K
+- The original size of FileA: 16K
+- Compress-ratio per PCL: 50% (Uncompressed 8K -> Compressed 4K)
+[page0, page1] [page2, page3]
+[PCL0]---------[PCL1]
+
+- functions declaration:
+  . pread(fd, buf, count, offset)
+  . readahead(fd, offset, count)
+- Thread A tries to read the last 4K
+- Thread B tries to do readahead 8K from 4K
+- RA, besteffort == false
+- R, besteffort == true
+
+        <process A>                   <process B>
+
+pread(FileA, buf, 4K, 12K)
+  do readahead(page3) // failed with ENOMEM
+  wait_lock(page3)
+    if (!uptodate(page3))
+      goto do_read
+                               readahead(FileA, 4K, 8K)
+                               // Here create PCL-chain like below:
+                               // [null, page1] [page2, null]
+                               //   [PCL0:RA]-----[PCL1:RA]
+...
+  do read(page3)        // found [PCL1:RA] and add page3 into it,
+                        // and then, change PCL1 from RA to R
+...
+                               // Now, PCL-chain is as below:
+                               // [null, page1] [page2, page3]
+                               //   [PCL0:RA]-----[PCL1:R]
+
+                                 // try to decompress PCL-chain...
+                                 z_erofs_decompress_queue
+                                   err = 0;
+
+                                   // failed with ENOMEM, so page 1
+                                   // only for RA will not be uptodated.
+                                   // it's okay.
+                                   err = decompress([PCL0:RA], err)
+
+                                   // However, ENOMEM propagated to next
+                                   // PCL, even though PCL is not only
+                                   // for RA but also for R. As a result,
+                                   // it just failed with ENOMEM without
+                                   // trying any decompression, so page2
+                                   // and page3 will not be uptodated.
+                ** BUG HERE ** --> err = decompress([PCL1:R], err)
+
+                                   return err as ENOMEM
+...
+    wait_lock(page3)
+      if (!uptodate(page3))
+        return EIO      <-- Return an unexpected EIO!
+...
+
+Fixes: 2349d2fa02db ("erofs: sunset unneeded NOFAILs")
+Cc: stable@vger.kernel.org
+Reviewed-by: Jaewook Kim <jw5454.kim@samsung.com>
+Reviewed-by: Sungjong Seo <sj1557.seo@samsung.com>
+Signed-off-by: Junbeom Yeom <junbeom.yeom@samsung.com>
+Reviewed-by: Gao Xiang <hsiangkao@linux.alibaba.com>
+Signed-off-by: Gao Xiang <hsiangkao@linux.alibaba.com>
+[ Adjust context ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/erofs/zdata.c |    8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+--- a/fs/erofs/zdata.c
++++ b/fs/erofs/zdata.c
+@@ -1244,14 +1244,14 @@ static int z_erofs_parse_in_bvecs(struct
+       return err;
+ }
+-static int z_erofs_decompress_pcluster(struct z_erofs_backend *be, int err)
++static int z_erofs_decompress_pcluster(struct z_erofs_backend *be, bool eio)
+ {
+       struct erofs_sb_info *const sbi = EROFS_SB(be->sb);
+       struct z_erofs_pcluster *pcl = be->pcl;
+       unsigned int pclusterpages = z_erofs_pclusterpages(pcl);
+       const struct z_erofs_decompressor *decomp =
+                               z_erofs_decomp[pcl->algorithmformat];
+-      int i, j, jtop, err2;
++      int i, j, jtop, err2, err = eio ? -EIO : 0;
+       struct page *page;
+       bool overlapped;
+       bool try_free = true;
+@@ -1381,12 +1381,12 @@ static int z_erofs_decompress_queue(cons
+               .pcl = io->head,
+       };
+       struct z_erofs_pcluster *next;
+-      int err = io->eio ? -EIO : 0;
++      int err = 0;
+       for (; be.pcl != Z_EROFS_PCLUSTER_TAIL; be.pcl = next) {
+               DBG_BUGON(!be.pcl);
+               next = READ_ONCE(be.pcl->next);
+-              err = z_erofs_decompress_pcluster(&be, err) ?: err;
++              err = z_erofs_decompress_pcluster(&be, io->eio) ?: err;
+       }
+       return err;
+ }
diff --git a/queue-6.12/f2fs-add-timeout-in-f2fs_enable_checkpoint.patch b/queue-6.12/f2fs-add-timeout-in-f2fs_enable_checkpoint.patch
new file mode 100644 (file)
index 0000000..61975fc
--- /dev/null
@@ -0,0 +1,86 @@
+From stable+bounces-204246-greg=kroah.com@vger.kernel.org Tue Dec 30 17:23:05 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 30 Dec 2025 11:22:52 -0500
+Subject: f2fs: add timeout in f2fs_enable_checkpoint()
+To: stable@vger.kernel.org
+Cc: Chao Yu <chao@kernel.org>, Jaegeuk Kim <jaegeuk@kernel.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251230162254.2306864-2-sashal@kernel.org>
+
+From: Chao Yu <chao@kernel.org>
+
+[ Upstream commit 4bc347779698b5e67e1514bab105c2c083e55502 ]
+
+During f2fs_enable_checkpoint() in remount(), if we flush a large
+amount of dirty pages into slow device, it may take long time which
+will block write IO, let's add a timeout machanism during dirty
+pages flush to avoid long time block in f2fs_enable_checkpoint().
+
+Signed-off-by: Chao Yu <chao@kernel.org>
+Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
+Stable-dep-of: be112e7449a6 ("f2fs: fix to propagate error from f2fs_enable_checkpoint()")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/f2fs/f2fs.h  |    2 ++
+ fs/f2fs/super.c |   21 +++++++++++++++------
+ 2 files changed, 17 insertions(+), 6 deletions(-)
+
+--- a/fs/f2fs/f2fs.h
++++ b/fs/f2fs/f2fs.h
+@@ -249,6 +249,7 @@ enum {
+ #define DEF_CP_INTERVAL                       60      /* 60 secs */
+ #define DEF_IDLE_INTERVAL             5       /* 5 secs */
+ #define DEF_DISABLE_INTERVAL          5       /* 5 secs */
++#define DEF_ENABLE_INTERVAL           16      /* 16 secs */
+ #define DEF_DISABLE_QUICK_INTERVAL    1       /* 1 secs */
+ #define DEF_UMOUNT_DISCARD_TIMEOUT    5       /* 5 secs */
+@@ -1351,6 +1352,7 @@ enum {
+       DISCARD_TIME,
+       GC_TIME,
+       DISABLE_TIME,
++      ENABLE_TIME,
+       UMOUNT_DISCARD_TIMEOUT,
+       MAX_TIME,
+ };
+--- a/fs/f2fs/super.c
++++ b/fs/f2fs/super.c
+@@ -2283,16 +2283,24 @@ restore_flag:
+ static void f2fs_enable_checkpoint(struct f2fs_sb_info *sbi)
+ {
+-      int retry = DEFAULT_RETRY_IO_COUNT;
++      unsigned int nr_pages = get_pages(sbi, F2FS_DIRTY_DATA) / 16;
++
++      f2fs_update_time(sbi, ENABLE_TIME);
+       /* we should flush all the data to keep data consistency */
+-      do {
+-              sync_inodes_sb(sbi->sb);
++      while (get_pages(sbi, F2FS_DIRTY_DATA)) {
++              writeback_inodes_sb_nr(sbi->sb, nr_pages, WB_REASON_SYNC);
+               f2fs_io_schedule_timeout(DEFAULT_IO_TIMEOUT);
+-      } while (get_pages(sbi, F2FS_DIRTY_DATA) && retry--);
+-      if (unlikely(retry < 0))
+-              f2fs_warn(sbi, "checkpoint=enable has some unwritten data.");
++              if (f2fs_time_over(sbi, ENABLE_TIME))
++                      break;
++      }
++
++      sync_inodes_sb(sbi->sb);
++
++      if (unlikely(get_pages(sbi, F2FS_DIRTY_DATA)))
++              f2fs_warn(sbi, "checkpoint=enable has some unwritten data: %lld",
++                                      get_pages(sbi, F2FS_DIRTY_DATA));
+       f2fs_down_write(&sbi->gc_lock);
+       f2fs_dirty_to_prefree(sbi);
+@@ -3868,6 +3876,7 @@ static void init_sb_info(struct f2fs_sb_
+       sbi->interval_time[DISCARD_TIME] = DEF_IDLE_INTERVAL;
+       sbi->interval_time[GC_TIME] = DEF_IDLE_INTERVAL;
+       sbi->interval_time[DISABLE_TIME] = DEF_DISABLE_INTERVAL;
++      sbi->interval_time[ENABLE_TIME] = DEF_ENABLE_INTERVAL;
+       sbi->interval_time[UMOUNT_DISCARD_TIMEOUT] =
+                               DEF_UMOUNT_DISCARD_TIMEOUT;
+       clear_sbi_flag(sbi, SBI_NEED_FSCK);
diff --git a/queue-6.12/f2fs-clear-sbi_por_doing-before-initing-inmem-curseg.patch b/queue-6.12/f2fs-clear-sbi_por_doing-before-initing-inmem-curseg.patch
new file mode 100644 (file)
index 0000000..d919ffd
--- /dev/null
@@ -0,0 +1,47 @@
+From stable+bounces-204245-greg=kroah.com@vger.kernel.org Tue Dec 30 17:23:00 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 30 Dec 2025 11:22:51 -0500
+Subject: f2fs: clear SBI_POR_DOING before initing inmem curseg
+To: stable@vger.kernel.org
+Cc: Sheng Yong <shengyong@oppo.com>, Song Feng <songfeng@oppo.com>, Yongpeng Yang <yangyongpeng1@oppo.com>, Chao Yu <chao@kernel.org>, Jaegeuk Kim <jaegeuk@kernel.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251230162254.2306864-1-sashal@kernel.org>
+
+From: Sheng Yong <shengyong@oppo.com>
+
+[ Upstream commit f88c7904b5c7e35ab8037e2a59e10d80adf6fd7e ]
+
+SBI_POR_DOING can be cleared after recovery is completed, so that
+changes made before recovery can be persistent, and subsequent
+errors can be recorded into cp/sb.
+
+Signed-off-by: Song Feng <songfeng@oppo.com>
+Signed-off-by: Yongpeng Yang <yangyongpeng1@oppo.com>
+Signed-off-by: Sheng Yong <shengyong@oppo.com>
+Reviewed-by: Chao Yu <chao@kernel.org>
+Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
+Stable-dep-of: be112e7449a6 ("f2fs: fix to propagate error from f2fs_enable_checkpoint()")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/f2fs/super.c |    6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/fs/f2fs/super.c
++++ b/fs/f2fs/super.c
+@@ -4804,13 +4804,13 @@ reset_checkpoint:
+       if (err)
+               goto free_meta;
++      /* f2fs_recover_fsync_data() cleared this already */
++      clear_sbi_flag(sbi, SBI_POR_DOING);
++
+       err = f2fs_init_inmem_curseg(sbi);
+       if (err)
+               goto sync_free_meta;
+-      /* f2fs_recover_fsync_data() cleared this already */
+-      clear_sbi_flag(sbi, SBI_POR_DOING);
+-
+       if (test_opt(sbi, DISABLE_CHECKPOINT)) {
+               err = f2fs_disable_checkpoint(sbi);
+               if (err)
diff --git a/queue-6.12/f2fs-drop-inode-from-the-donation-list-when-the-last-file-is-closed.patch b/queue-6.12/f2fs-drop-inode-from-the-donation-list-when-the-last-file-is-closed.patch
new file mode 100644 (file)
index 0000000..1aae9c0
--- /dev/null
@@ -0,0 +1,90 @@
+From stable+bounces-204241-greg=kroah.com@vger.kernel.org Tue Dec 30 17:15:34 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 30 Dec 2025 11:15:26 -0500
+Subject: f2fs: drop inode from the donation list when the last file is closed
+To: stable@vger.kernel.org
+Cc: Jaegeuk Kim <jaegeuk@kernel.org>, Chao Yu <chao@kernel.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251230161527.2300909-1-sashal@kernel.org>
+
+From: Jaegeuk Kim <jaegeuk@kernel.org>
+
+[ Upstream commit 078cad8212ce4f4ebbafcc0936475b8215e1ca2a ]
+
+Let's drop the inode from the donation list when there is no other
+open file.
+
+Reviewed-by: Chao Yu <chao@kernel.org>
+Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
+Stable-dep-of: 10b591e7fb7c ("f2fs: fix to avoid updating compression context during writeback")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/f2fs/f2fs.h  |    2 ++
+ fs/f2fs/file.c  |    8 +++++++-
+ fs/f2fs/inode.c |    2 +-
+ fs/f2fs/super.c |    1 +
+ 4 files changed, 11 insertions(+), 2 deletions(-)
+
+--- a/fs/f2fs/f2fs.h
++++ b/fs/f2fs/f2fs.h
+@@ -859,6 +859,7 @@ struct f2fs_inode_info {
+       /* linked in global inode list for cache donation */
+       struct list_head gdonate_list;
+       pgoff_t donate_start, donate_end; /* inclusive */
++      atomic_t open_count;            /* # of open files */
+       struct task_struct *atomic_write_task;  /* store atomic write task */
+       struct extent_tree *extent_tree[NR_EXTENT_CACHES];
+@@ -3600,6 +3601,7 @@ int f2fs_try_to_free_nats(struct f2fs_sb
+ void f2fs_update_inode(struct inode *inode, struct page *node_page);
+ void f2fs_update_inode_page(struct inode *inode);
+ int f2fs_write_inode(struct inode *inode, struct writeback_control *wbc);
++void f2fs_remove_donate_inode(struct inode *inode);
+ void f2fs_evict_inode(struct inode *inode);
+ void f2fs_handle_failed_inode(struct inode *inode);
+--- a/fs/f2fs/file.c
++++ b/fs/f2fs/file.c
+@@ -631,7 +631,10 @@ static int f2fs_file_open(struct inode *
+       if (err)
+               return err;
+-      return finish_preallocate_blocks(inode);
++      err = finish_preallocate_blocks(inode);
++      if (!err)
++              atomic_inc(&F2FS_I(inode)->open_count);
++      return err;
+ }
+ void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count)
+@@ -1992,6 +1995,9 @@ out:
+ static int f2fs_release_file(struct inode *inode, struct file *filp)
+ {
++      if (atomic_dec_and_test(&F2FS_I(inode)->open_count))
++              f2fs_remove_donate_inode(inode);
++
+       /*
+        * f2fs_release_file is called at every close calls. So we should
+        * not drop any inmemory pages by close called by other process.
+--- a/fs/f2fs/inode.c
++++ b/fs/f2fs/inode.c
+@@ -807,7 +807,7 @@ int f2fs_write_inode(struct inode *inode
+       return 0;
+ }
+-static void f2fs_remove_donate_inode(struct inode *inode)
++void f2fs_remove_donate_inode(struct inode *inode)
+ {
+       struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+--- a/fs/f2fs/super.c
++++ b/fs/f2fs/super.c
+@@ -1425,6 +1425,7 @@ static struct inode *f2fs_alloc_inode(st
+       /* Initialize f2fs-specific inode info */
+       atomic_set(&fi->dirty_pages, 0);
+       atomic_set(&fi->i_compr_blocks, 0);
++      atomic_set(&fi->open_count, 0);
+       init_f2fs_rwsem(&fi->i_sem);
+       spin_lock_init(&fi->i_size_lock);
+       INIT_LIST_HEAD(&fi->dirty_list);
diff --git a/queue-6.12/f2fs-dump-more-information-for-f2fs_-enable-disable-_checkpoint.patch b/queue-6.12/f2fs-dump-more-information-for-f2fs_-enable-disable-_checkpoint.patch
new file mode 100644 (file)
index 0000000..f56cfbe
--- /dev/null
@@ -0,0 +1,73 @@
+From stable+bounces-204247-greg=kroah.com@vger.kernel.org Tue Dec 30 17:23:10 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 30 Dec 2025 11:22:53 -0500
+Subject: f2fs: dump more information for f2fs_{enable,disable}_checkpoint()
+To: stable@vger.kernel.org
+Cc: Chao Yu <chao@kernel.org>, Jaegeuk Kim <jaegeuk@kernel.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251230162254.2306864-3-sashal@kernel.org>
+
+From: Chao Yu <chao@kernel.org>
+
+[ Upstream commit 80b6d1d2535a343e43d658777a46f1ebce8f3413 ]
+
+Changes as below:
+- print more logs for f2fs_{enable,disable}_checkpoint()
+- account and dump time stats for f2fs_enable_checkpoint()
+
+Signed-off-by: Chao Yu <chao@kernel.org>
+Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
+Stable-dep-of: be112e7449a6 ("f2fs: fix to propagate error from f2fs_enable_checkpoint()")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/f2fs/super.c |   16 ++++++++++++++++
+ 1 file changed, 16 insertions(+)
+
+--- a/fs/f2fs/super.c
++++ b/fs/f2fs/super.c
+@@ -2278,15 +2278,24 @@ out_unlock:
+ restore_flag:
+       sbi->gc_mode = gc_mode;
+       sbi->sb->s_flags = s_flags;     /* Restore SB_RDONLY status */
++      f2fs_info(sbi, "f2fs_disable_checkpoint() finish, err:%d", err);
+       return err;
+ }
+ static void f2fs_enable_checkpoint(struct f2fs_sb_info *sbi)
+ {
+       unsigned int nr_pages = get_pages(sbi, F2FS_DIRTY_DATA) / 16;
++      long long start, writeback, end;
++
++      f2fs_info(sbi, "f2fs_enable_checkpoint() starts, meta: %lld, node: %lld, data: %lld",
++                                      get_pages(sbi, F2FS_DIRTY_META),
++                                      get_pages(sbi, F2FS_DIRTY_NODES),
++                                      get_pages(sbi, F2FS_DIRTY_DATA));
+       f2fs_update_time(sbi, ENABLE_TIME);
++      start = ktime_get();
++
+       /* we should flush all the data to keep data consistency */
+       while (get_pages(sbi, F2FS_DIRTY_DATA)) {
+               writeback_inodes_sb_nr(sbi->sb, nr_pages, WB_REASON_SYNC);
+@@ -2295,6 +2304,7 @@ static void f2fs_enable_checkpoint(struc
+               if (f2fs_time_over(sbi, ENABLE_TIME))
+                       break;
+       }
++      writeback = ktime_get();
+       sync_inodes_sb(sbi->sb);
+@@ -2313,6 +2323,12 @@ static void f2fs_enable_checkpoint(struc
+       /* Let's ensure there's no pending checkpoint anymore */
+       f2fs_flush_ckpt_thread(sbi);
++
++      end = ktime_get();
++
++      f2fs_info(sbi, "f2fs_enable_checkpoint() finishes, writeback:%llu, sync:%llu",
++                                      ktime_ms_delta(writeback, start),
++                                      ktime_ms_delta(end, writeback));
+ }
+ static int f2fs_remount(struct super_block *sb, int *flags, char *data)
diff --git a/queue-6.12/f2fs-fix-to-avoid-updating-compression-context-during-writeback.patch b/queue-6.12/f2fs-fix-to-avoid-updating-compression-context-during-writeback.patch
new file mode 100644 (file)
index 0000000..0b2fe87
--- /dev/null
@@ -0,0 +1,163 @@
+From stable+bounces-204242-greg=kroah.com@vger.kernel.org Tue Dec 30 17:15:37 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 30 Dec 2025 11:15:27 -0500
+Subject: f2fs: fix to avoid updating compression context during writeback
+To: stable@vger.kernel.org
+Cc: Chao Yu <chao@kernel.org>, stable@kernel.org, "Bai, Shuangpeng" <sjb7183@psu.edu>, Jaegeuk Kim <jaegeuk@kernel.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251230161527.2300909-2-sashal@kernel.org>
+
+From: Chao Yu <chao@kernel.org>
+
+[ Upstream commit 10b591e7fb7cdc8c1e53e9c000dc0ef7069aaa76 ]
+
+Bai, Shuangpeng <sjb7183@psu.edu> reported a bug as below:
+
+Oops: divide error: 0000 [#1] SMP KASAN PTI
+CPU: 0 UID: 0 PID: 11441 Comm: syz.0.46 Not tainted 6.17.0 #1 PREEMPT(full)
+Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.15.0-1 04/01/2014
+RIP: 0010:f2fs_all_cluster_page_ready+0x106/0x550 fs/f2fs/compress.c:857
+Call Trace:
+ <TASK>
+ f2fs_write_cache_pages fs/f2fs/data.c:3078 [inline]
+ __f2fs_write_data_pages fs/f2fs/data.c:3290 [inline]
+ f2fs_write_data_pages+0x1c19/0x3600 fs/f2fs/data.c:3317
+ do_writepages+0x38e/0x640 mm/page-writeback.c:2634
+ filemap_fdatawrite_wbc mm/filemap.c:386 [inline]
+ __filemap_fdatawrite_range mm/filemap.c:419 [inline]
+ file_write_and_wait_range+0x2ba/0x3e0 mm/filemap.c:794
+ f2fs_do_sync_file+0x6e6/0x1b00 fs/f2fs/file.c:294
+ generic_write_sync include/linux/fs.h:3043 [inline]
+ f2fs_file_write_iter+0x76e/0x2700 fs/f2fs/file.c:5259
+ new_sync_write fs/read_write.c:593 [inline]
+ vfs_write+0x7e9/0xe00 fs/read_write.c:686
+ ksys_write+0x19d/0x2d0 fs/read_write.c:738
+ do_syscall_x64 arch/x86/entry/syscall_64.c:63 [inline]
+ do_syscall_64+0xf7/0x470 arch/x86/entry/syscall_64.c:94
+ entry_SYSCALL_64_after_hwframe+0x77/0x7f
+
+The bug was triggered w/ below race condition:
+
+fsync                          setattr                 ioctl
+- f2fs_do_sync_file
+ - file_write_and_wait_range
+  - f2fs_write_cache_pages
+  : inode is non-compressed
+  : cc.cluster_size =
+    F2FS_I(inode)->i_cluster_size = 0
+   - tag_pages_for_writeback
+                               - f2fs_setattr
+                                - truncate_setsize
+                                - f2fs_truncate
+                                                       - f2fs_fileattr_set
+                                                        - f2fs_setflags_common
+                                                         - set_compress_context
+                                                         : F2FS_I(inode)->i_cluster_size = 4
+                                                         : set_inode_flag(inode, FI_COMPRESSED_FILE)
+   - f2fs_compressed_file
+   : return true
+   - f2fs_all_cluster_page_ready
+   : "pgidx % cc->cluster_size" trigger dividing 0 issue
+
+Let's change as below to fix this issue:
+- introduce a new atomic type variable .writeback in structure f2fs_inode_info
+to track the number of threads which calling f2fs_write_cache_pages().
+- use .i_sem lock to protect .writeback update.
+- check .writeback before update compression context in f2fs_setflags_common()
+to avoid race w/ ->writepages.
+
+Fixes: 4c8ff7095bef ("f2fs: support data compression")
+Cc: stable@kernel.org
+Reported-by: Bai, Shuangpeng <sjb7183@psu.edu>
+Tested-by: Bai, Shuangpeng <sjb7183@psu.edu>
+Closes: https://lore.kernel.org/lkml/44D8F7B3-68AD-425F-9915-65D27591F93F@psu.edu
+Signed-off-by: Chao Yu <chao@kernel.org>
+Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/f2fs/data.c  |   17 +++++++++++++++++
+ fs/f2fs/f2fs.h  |    3 ++-
+ fs/f2fs/file.c  |    5 +++--
+ fs/f2fs/super.c |    1 +
+ 4 files changed, 23 insertions(+), 3 deletions(-)
+
+--- a/fs/f2fs/data.c
++++ b/fs/f2fs/data.c
+@@ -3272,6 +3272,19 @@ static inline bool __should_serialize_io
+       return false;
+ }
++static inline void account_writeback(struct inode *inode, bool inc)
++{
++      if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
++              return;
++
++      f2fs_down_read(&F2FS_I(inode)->i_sem);
++      if (inc)
++              atomic_inc(&F2FS_I(inode)->writeback);
++      else
++              atomic_dec(&F2FS_I(inode)->writeback);
++      f2fs_up_read(&F2FS_I(inode)->i_sem);
++}
++
+ static int __f2fs_write_data_pages(struct address_space *mapping,
+                                               struct writeback_control *wbc,
+                                               enum iostat_type io_type)
+@@ -3321,10 +3334,14 @@ static int __f2fs_write_data_pages(struc
+               locked = true;
+       }
++      account_writeback(inode, true);
++
+       blk_start_plug(&plug);
+       ret = f2fs_write_cache_pages(mapping, wbc, io_type);
+       blk_finish_plug(&plug);
++      account_writeback(inode, false);
++
+       if (locked)
+               mutex_unlock(&sbi->writepages);
+--- a/fs/f2fs/f2fs.h
++++ b/fs/f2fs/f2fs.h
+@@ -887,6 +887,7 @@ struct f2fs_inode_info {
+       unsigned char i_compress_level;         /* compress level (lz4hc,zstd) */
+       unsigned char i_compress_flag;          /* compress flag */
+       unsigned int i_cluster_size;            /* cluster size */
++      atomic_t writeback;                     /* count # of writeback thread */
+       unsigned int atomic_write_cnt;
+       loff_t original_i_size;         /* original i_size before atomic write */
+@@ -4540,7 +4541,7 @@ static inline bool f2fs_disable_compress
+               f2fs_up_write(&fi->i_sem);
+               return true;
+       }
+-      if (f2fs_is_mmap_file(inode) ||
++      if (f2fs_is_mmap_file(inode) || atomic_read(&fi->writeback) ||
+               (S_ISREG(inode->i_mode) && F2FS_HAS_BLOCKS(inode))) {
+               f2fs_up_write(&fi->i_sem);
+               return false;
+--- a/fs/f2fs/file.c
++++ b/fs/f2fs/file.c
+@@ -2071,8 +2071,9 @@ static int f2fs_setflags_common(struct i
+                       f2fs_down_write(&fi->i_sem);
+                       if (!f2fs_may_compress(inode) ||
+-                                      (S_ISREG(inode->i_mode) &&
+-                                      F2FS_HAS_BLOCKS(inode))) {
++                              atomic_read(&fi->writeback) ||
++                              (S_ISREG(inode->i_mode) &&
++                              F2FS_HAS_BLOCKS(inode))) {
+                               f2fs_up_write(&fi->i_sem);
+                               return -EINVAL;
+                       }
+--- a/fs/f2fs/super.c
++++ b/fs/f2fs/super.c
+@@ -1426,6 +1426,7 @@ static struct inode *f2fs_alloc_inode(st
+       atomic_set(&fi->dirty_pages, 0);
+       atomic_set(&fi->i_compr_blocks, 0);
+       atomic_set(&fi->open_count, 0);
++      atomic_set(&fi->writeback, 0);
+       init_f2fs_rwsem(&fi->i_sem);
+       spin_lock_init(&fi->i_size_lock);
+       INIT_LIST_HEAD(&fi->dirty_list);
diff --git a/queue-6.12/f2fs-fix-to-detect-recoverable-inode-during-dryrun-of-find_fsync_dnodes.patch b/queue-6.12/f2fs-fix-to-detect-recoverable-inode-during-dryrun-of-find_fsync_dnodes.patch
new file mode 100644 (file)
index 0000000..e0b87df
--- /dev/null
@@ -0,0 +1,109 @@
+From stable+bounces-204272-greg=kroah.com@vger.kernel.org Tue Dec 30 19:32:26 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 30 Dec 2025 13:32:19 -0500
+Subject: f2fs: fix to detect recoverable inode during dryrun of find_fsync_dnodes()
+To: stable@vger.kernel.org
+Cc: Chao Yu <chao@kernel.org>, stable@kernel.org, Jaegeuk Kim <jaegeuk@kernel.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251230183219.2392969-1-sashal@kernel.org>
+
+From: Chao Yu <chao@kernel.org>
+
+[ Upstream commit 68d05693f8c031257a0822464366e1c2a239a512 ]
+
+mkfs.f2fs -f /dev/vdd
+mount /dev/vdd /mnt/f2fs
+touch /mnt/f2fs/foo
+sync           # avoid CP_UMOUNT_FLAG in last f2fs_checkpoint.ckpt_flags
+touch /mnt/f2fs/bar
+f2fs_io fsync /mnt/f2fs/bar
+f2fs_io shutdown 2 /mnt/f2fs
+umount /mnt/f2fs
+blockdev --setro /dev/vdd
+mount /dev/vdd /mnt/f2fs
+mount: /mnt/f2fs: WARNING: source write-protected, mounted read-only.
+
+For the case if we create and fsync a new inode before sudden power-cut,
+without norecovery or disable_roll_forward mount option, the following
+mount will succeed w/o recovering last fsynced inode.
+
+The problem here is that we only check inode_list list after
+find_fsync_dnodes() in f2fs_recover_fsync_data() to find out whether
+there is recoverable data in the iamge, but there is a missed case, if
+last fsynced inode is not existing in last checkpoint, then, we will
+fail to get its inode due to nat of inode node is not existing in last
+checkpoint, so the inode won't be linked in inode_list.
+
+Let's detect such case in dyrun mode to fix this issue.
+
+After this change, mount will fail as expected below:
+mount: /mnt/f2fs: cannot mount /dev/vdd read-only.
+       dmesg(1) may have more information after failed mount system call.
+demsg:
+F2FS-fs (vdd): Need to recover fsync data, but write access unavailable, please try mount w/ disable_roll_forward or norecovery
+
+Cc: stable@kernel.org
+Fixes: 6781eabba1bd ("f2fs: give -EINVAL for norecovery and rw mount")
+Signed-off-by: Chao Yu <chao@kernel.org>
+Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
+[ folio => page ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/f2fs/recovery.c |   20 ++++++++++++--------
+ 1 file changed, 12 insertions(+), 8 deletions(-)
+
+--- a/fs/f2fs/recovery.c
++++ b/fs/f2fs/recovery.c
+@@ -398,7 +398,7 @@ static int sanity_check_node_chain(struc
+ }
+ static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head,
+-                              bool check_only)
++                              bool check_only, bool *new_inode)
+ {
+       struct curseg_info *curseg;
+       struct page *page = NULL;
+@@ -445,16 +445,19 @@ static int find_fsync_dnodes(struct f2fs
+                               quota_inode = true;
+                       }
+-                      /*
+-                       * CP | dnode(F) | inode(DF)
+-                       * For this case, we should not give up now.
+-                       */
+                       entry = add_fsync_inode(sbi, head, ino_of_node(page),
+                                                               quota_inode);
+                       if (IS_ERR(entry)) {
+                               err = PTR_ERR(entry);
+-                              if (err == -ENOENT)
++                              /*
++                               * CP | dnode(F) | inode(DF)
++                               * For this case, we should not give up now.
++                               */
++                              if (err == -ENOENT) {
++                                      if (check_only)
++                                              *new_inode = true;
+                                       goto next;
++                              }
+                               f2fs_put_page(page, 1);
+                               break;
+                       }
+@@ -852,6 +855,7 @@ int f2fs_recover_fsync_data(struct f2fs_
+       int ret = 0;
+       unsigned long s_flags = sbi->sb->s_flags;
+       bool need_writecp = false;
++      bool new_inode = false;
+       if (is_sbi_flag_set(sbi, SBI_IS_WRITABLE))
+               f2fs_info(sbi, "recover fsync data on readonly fs");
+@@ -864,8 +868,8 @@ int f2fs_recover_fsync_data(struct f2fs_
+       f2fs_down_write(&sbi->cp_global_sem);
+       /* step #1: find fsynced inode numbers */
+-      err = find_fsync_dnodes(sbi, &inode_list, check_only);
+-      if (err || list_empty(&inode_list))
++      err = find_fsync_dnodes(sbi, &inode_list, check_only, &new_inode);
++      if (err < 0 || (list_empty(&inode_list) && (!check_only || !new_inode)))
+               goto skip;
+       if (check_only) {
diff --git a/queue-6.12/f2fs-fix-to-propagate-error-from-f2fs_enable_checkpoint.patch b/queue-6.12/f2fs-fix-to-propagate-error-from-f2fs_enable_checkpoint.patch
new file mode 100644 (file)
index 0000000..5134e77
--- /dev/null
@@ -0,0 +1,99 @@
+From stable+bounces-204248-greg=kroah.com@vger.kernel.org Tue Dec 30 17:23:12 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 30 Dec 2025 11:22:54 -0500
+Subject: f2fs: fix to propagate error from f2fs_enable_checkpoint()
+To: stable@vger.kernel.org
+Cc: Chao Yu <chao@kernel.org>, stable@kernel.org, Jaegeuk Kim <jaegeuk@kernel.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251230162254.2306864-4-sashal@kernel.org>
+
+From: Chao Yu <chao@kernel.org>
+
+[ Upstream commit be112e7449a6e1b54aa9feac618825d154b3a5c7 ]
+
+In order to let userspace detect such error rather than suffering
+silent failure.
+
+Fixes: 4354994f097d ("f2fs: checkpoint disabling")
+Cc: stable@kernel.org
+Signed-off-by: Chao Yu <chao@kernel.org>
+Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/f2fs/super.c |   26 ++++++++++++++++----------
+ 1 file changed, 16 insertions(+), 10 deletions(-)
+
+--- a/fs/f2fs/super.c
++++ b/fs/f2fs/super.c
+@@ -2282,10 +2282,11 @@ restore_flag:
+       return err;
+ }
+-static void f2fs_enable_checkpoint(struct f2fs_sb_info *sbi)
++static int f2fs_enable_checkpoint(struct f2fs_sb_info *sbi)
+ {
+       unsigned int nr_pages = get_pages(sbi, F2FS_DIRTY_DATA) / 16;
+       long long start, writeback, end;
++      int ret;
+       f2fs_info(sbi, "f2fs_enable_checkpoint() starts, meta: %lld, node: %lld, data: %lld",
+                                       get_pages(sbi, F2FS_DIRTY_META),
+@@ -2319,7 +2320,9 @@ static void f2fs_enable_checkpoint(struc
+       set_sbi_flag(sbi, SBI_IS_DIRTY);
+       f2fs_up_write(&sbi->gc_lock);
+-      f2fs_sync_fs(sbi->sb, 1);
++      ret = f2fs_sync_fs(sbi->sb, 1);
++      if (ret)
++              f2fs_err(sbi, "%s sync_fs failed, ret: %d", __func__, ret);
+       /* Let's ensure there's no pending checkpoint anymore */
+       f2fs_flush_ckpt_thread(sbi);
+@@ -2329,6 +2332,7 @@ static void f2fs_enable_checkpoint(struc
+       f2fs_info(sbi, "f2fs_enable_checkpoint() finishes, writeback:%llu, sync:%llu",
+                                       ktime_ms_delta(writeback, start),
+                                       ktime_ms_delta(end, writeback));
++      return ret;
+ }
+ static int f2fs_remount(struct super_block *sb, int *flags, char *data)
+@@ -2543,7 +2547,9 @@ static int f2fs_remount(struct super_blo
+                               goto restore_discard;
+                       need_enable_checkpoint = true;
+               } else {
+-                      f2fs_enable_checkpoint(sbi);
++                      err = f2fs_enable_checkpoint(sbi);
++                      if (err)
++                              goto restore_discard;
+                       need_disable_checkpoint = true;
+               }
+       }
+@@ -2585,7 +2591,8 @@ skip:
+       return 0;
+ restore_checkpoint:
+       if (need_enable_checkpoint) {
+-              f2fs_enable_checkpoint(sbi);
++              if (f2fs_enable_checkpoint(sbi))
++                      f2fs_warn(sbi, "checkpoint has not been enabled");
+       } else if (need_disable_checkpoint) {
+               if (f2fs_disable_checkpoint(sbi))
+                       f2fs_warn(sbi, "checkpoint has not been disabled");
+@@ -4836,13 +4843,12 @@ reset_checkpoint:
+       if (err)
+               goto sync_free_meta;
+-      if (test_opt(sbi, DISABLE_CHECKPOINT)) {
++      if (test_opt(sbi, DISABLE_CHECKPOINT))
+               err = f2fs_disable_checkpoint(sbi);
+-              if (err)
+-                      goto sync_free_meta;
+-      } else if (is_set_ckpt_flags(sbi, CP_DISABLED_FLAG)) {
+-              f2fs_enable_checkpoint(sbi);
+-      }
++      else if (is_set_ckpt_flags(sbi, CP_DISABLED_FLAG))
++              err = f2fs_enable_checkpoint(sbi);
++      if (err)
++              goto sync_free_meta;
+       /*
+        * If filesystem is not mounted as read-only then
diff --git a/queue-6.12/f2fs-use-global-inline_xattr_slab-instead-of-per-sb-slab-cache.patch b/queue-6.12/f2fs-use-global-inline_xattr_slab-instead-of-per-sb-slab-cache.patch
new file mode 100644 (file)
index 0000000..49b8d85
--- /dev/null
@@ -0,0 +1,239 @@
+From stable+bounces-204268-greg=kroah.com@vger.kernel.org Tue Dec 30 19:07:03 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 30 Dec 2025 13:06:25 -0500
+Subject: f2fs: use global inline_xattr_slab instead of per-sb slab cache
+To: stable@vger.kernel.org
+Cc: Chao Yu <chao@kernel.org>, stable@kernel.org, Hong Yun <yhong@link.cuhk.edu.hk>, Jaegeuk Kim <jaegeuk@kernel.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251230180625.2376707-1-sashal@kernel.org>
+
+From: Chao Yu <chao@kernel.org>
+
+[ Upstream commit 1f27ef42bb0b7c0740c5616ec577ec188b8a1d05 ]
+
+As Hong Yun reported in mailing list:
+
+loop7: detected capacity change from 0 to 131072
+------------[ cut here ]------------
+kmem_cache of name 'f2fs_xattr_entry-7:7' already exists
+WARNING: CPU: 0 PID: 24426 at mm/slab_common.c:110 kmem_cache_sanity_check mm/slab_common.c:109 [inline]
+WARNING: CPU: 0 PID: 24426 at mm/slab_common.c:110 __kmem_cache_create_args+0xa6/0x320 mm/slab_common.c:307
+CPU: 0 UID: 0 PID: 24426 Comm: syz.7.1370 Not tainted 6.17.0-rc4 #1 PREEMPT(full)
+Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.13.0-1ubuntu1.1 04/01/2014
+RIP: 0010:kmem_cache_sanity_check mm/slab_common.c:109 [inline]
+RIP: 0010:__kmem_cache_create_args+0xa6/0x320 mm/slab_common.c:307
+Call Trace:
+ __kmem_cache_create include/linux/slab.h:353 [inline]
+ f2fs_kmem_cache_create fs/f2fs/f2fs.h:2943 [inline]
+ f2fs_init_xattr_caches+0xa5/0xe0 fs/f2fs/xattr.c:843
+ f2fs_fill_super+0x1645/0x2620 fs/f2fs/super.c:4918
+ get_tree_bdev_flags+0x1fb/0x260 fs/super.c:1692
+ vfs_get_tree+0x43/0x140 fs/super.c:1815
+ do_new_mount+0x201/0x550 fs/namespace.c:3808
+ do_mount fs/namespace.c:4136 [inline]
+ __do_sys_mount fs/namespace.c:4347 [inline]
+ __se_sys_mount+0x298/0x2f0 fs/namespace.c:4324
+ do_syscall_x64 arch/x86/entry/syscall_64.c:63 [inline]
+ do_syscall_64+0x8e/0x3a0 arch/x86/entry/syscall_64.c:94
+ entry_SYSCALL_64_after_hwframe+0x76/0x7e
+
+The bug can be reproduced w/ below scripts:
+- mount /dev/vdb /mnt1
+- mount /dev/vdc /mnt2
+- umount /mnt1
+- mounnt /dev/vdb /mnt1
+
+The reason is if we created two slab caches, named f2fs_xattr_entry-7:3
+and f2fs_xattr_entry-7:7, and they have the same slab size. Actually,
+slab system will only create one slab cache core structure which has
+slab name of "f2fs_xattr_entry-7:3", and two slab caches share the same
+structure and cache address.
+
+So, if we destroy f2fs_xattr_entry-7:3 cache w/ cache address, it will
+decrease reference count of slab cache, rather than release slab cache
+entirely, since there is one more user has referenced the cache.
+
+Then, if we try to create slab cache w/ name "f2fs_xattr_entry-7:3" again,
+slab system will find that there is existed cache which has the same name
+and trigger the warning.
+
+Let's changes to use global inline_xattr_slab instead of per-sb slab cache
+for fixing.
+
+Fixes: a999150f4fe3 ("f2fs: use kmem_cache pool during inline xattr lookups")
+Cc: stable@kernel.org
+Reported-by: Hong Yun <yhong@link.cuhk.edu.hk>
+Tested-by: Hong Yun <yhong@link.cuhk.edu.hk>
+Signed-off-by: Chao Yu <chao@kernel.org>
+Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
+[ folio => page ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/f2fs/f2fs.h  |    3 ---
+ fs/f2fs/super.c |   17 ++++++++---------
+ fs/f2fs/xattr.c |   30 ++++++++++--------------------
+ fs/f2fs/xattr.h |   10 ++++++----
+ 4 files changed, 24 insertions(+), 36 deletions(-)
+
+--- a/fs/f2fs/f2fs.h
++++ b/fs/f2fs/f2fs.h
+@@ -1804,9 +1804,6 @@ struct f2fs_sb_info {
+       spinlock_t error_lock;                  /* protect errors/stop_reason array */
+       bool error_dirty;                       /* errors of sb is dirty */
+-      struct kmem_cache *inline_xattr_slab;   /* inline xattr entry */
+-      unsigned int inline_xattr_slab_size;    /* default inline xattr slab size */
+-
+       /* For reclaimed segs statistics per each GC mode */
+       unsigned int gc_segment_mode;           /* GC state for reclaimed segments */
+       unsigned int gc_reclaimed_segs[MAX_GC_MODE];    /* Reclaimed segs for each mode */
+--- a/fs/f2fs/super.c
++++ b/fs/f2fs/super.c
+@@ -1695,7 +1695,6 @@ static void f2fs_put_super(struct super_
+       kfree(sbi->raw_super);
+       f2fs_destroy_page_array_cache(sbi);
+-      f2fs_destroy_xattr_caches(sbi);
+ #ifdef CONFIG_QUOTA
+       for (i = 0; i < MAXQUOTAS; i++)
+               kfree(F2FS_OPTION(sbi).s_qf_names[i]);
+@@ -4568,13 +4567,9 @@ try_onemore:
+       if (err)
+               goto free_iostat;
+-      /* init per sbi slab cache */
+-      err = f2fs_init_xattr_caches(sbi);
+-      if (err)
+-              goto free_percpu;
+       err = f2fs_init_page_array_cache(sbi);
+       if (err)
+-              goto free_xattr_cache;
++              goto free_percpu;
+       /* get an inode for meta space */
+       sbi->meta_inode = f2fs_iget(sb, F2FS_META_INO(sbi));
+@@ -4906,8 +4901,6 @@ free_meta_inode:
+       sbi->meta_inode = NULL;
+ free_page_array_cache:
+       f2fs_destroy_page_array_cache(sbi);
+-free_xattr_cache:
+-      f2fs_destroy_xattr_caches(sbi);
+ free_percpu:
+       destroy_percpu_info(sbi);
+ free_iostat:
+@@ -5069,10 +5062,15 @@ static int __init init_f2fs_fs(void)
+       err = f2fs_create_casefold_cache();
+       if (err)
+               goto free_compress_cache;
+-      err = register_filesystem(&f2fs_fs_type);
++      err = f2fs_init_xattr_cache();
+       if (err)
+               goto free_casefold_cache;
++      err = register_filesystem(&f2fs_fs_type);
++      if (err)
++              goto free_xattr_cache;
+       return 0;
++free_xattr_cache:
++      f2fs_destroy_xattr_cache();
+ free_casefold_cache:
+       f2fs_destroy_casefold_cache();
+ free_compress_cache:
+@@ -5113,6 +5111,7 @@ fail:
+ static void __exit exit_f2fs_fs(void)
+ {
+       unregister_filesystem(&f2fs_fs_type);
++      f2fs_destroy_xattr_cache();
+       f2fs_destroy_casefold_cache();
+       f2fs_destroy_compress_cache();
+       f2fs_destroy_compress_mempool();
+--- a/fs/f2fs/xattr.c
++++ b/fs/f2fs/xattr.c
+@@ -23,11 +23,12 @@
+ #include "xattr.h"
+ #include "segment.h"
++static struct kmem_cache *inline_xattr_slab;
+ static void *xattr_alloc(struct f2fs_sb_info *sbi, int size, bool *is_inline)
+ {
+-      if (likely(size == sbi->inline_xattr_slab_size)) {
++      if (likely(size == DEFAULT_XATTR_SLAB_SIZE)) {
+               *is_inline = true;
+-              return f2fs_kmem_cache_alloc(sbi->inline_xattr_slab,
++              return f2fs_kmem_cache_alloc(inline_xattr_slab,
+                                       GFP_F2FS_ZERO, false, sbi);
+       }
+       *is_inline = false;
+@@ -38,7 +39,7 @@ static void xattr_free(struct f2fs_sb_in
+                                                       bool is_inline)
+ {
+       if (is_inline)
+-              kmem_cache_free(sbi->inline_xattr_slab, xattr_addr);
++              kmem_cache_free(inline_xattr_slab, xattr_addr);
+       else
+               kfree(xattr_addr);
+ }
+@@ -830,25 +831,14 @@ int f2fs_setxattr(struct inode *inode, i
+       return err;
+ }
+-int f2fs_init_xattr_caches(struct f2fs_sb_info *sbi)
++int __init f2fs_init_xattr_cache(void)
+ {
+-      dev_t dev = sbi->sb->s_bdev->bd_dev;
+-      char slab_name[32];
+-
+-      sprintf(slab_name, "f2fs_xattr_entry-%u:%u", MAJOR(dev), MINOR(dev));
+-
+-      sbi->inline_xattr_slab_size = F2FS_OPTION(sbi).inline_xattr_size *
+-                                      sizeof(__le32) + XATTR_PADDING_SIZE;
+-
+-      sbi->inline_xattr_slab = f2fs_kmem_cache_create(slab_name,
+-                                      sbi->inline_xattr_slab_size);
+-      if (!sbi->inline_xattr_slab)
+-              return -ENOMEM;
+-
+-      return 0;
++      inline_xattr_slab = f2fs_kmem_cache_create("f2fs_xattr_entry",
++                                      DEFAULT_XATTR_SLAB_SIZE);
++      return inline_xattr_slab ? 0 : -ENOMEM;
+ }
+-void f2fs_destroy_xattr_caches(struct f2fs_sb_info *sbi)
++void f2fs_destroy_xattr_cache(void)
+ {
+-      kmem_cache_destroy(sbi->inline_xattr_slab);
++      kmem_cache_destroy(inline_xattr_slab);
+ }
+--- a/fs/f2fs/xattr.h
++++ b/fs/f2fs/xattr.h
+@@ -89,6 +89,8 @@ struct f2fs_xattr_entry {
+                       F2FS_TOTAL_EXTRA_ATTR_SIZE / sizeof(__le32) -   \
+                       DEF_INLINE_RESERVED_SIZE -                      \
+                       MIN_INLINE_DENTRY_SIZE / sizeof(__le32))
++#define DEFAULT_XATTR_SLAB_SIZE       (DEFAULT_INLINE_XATTR_ADDRS *           \
++                              sizeof(__le32) + XATTR_PADDING_SIZE)
+ /*
+  * On-disk structure of f2fs_xattr
+@@ -132,8 +134,8 @@ extern int f2fs_setxattr(struct inode *,
+ extern int f2fs_getxattr(struct inode *, int, const char *, void *,
+                                               size_t, struct page *);
+ extern ssize_t f2fs_listxattr(struct dentry *, char *, size_t);
+-extern int f2fs_init_xattr_caches(struct f2fs_sb_info *);
+-extern void f2fs_destroy_xattr_caches(struct f2fs_sb_info *);
++extern int __init f2fs_init_xattr_cache(void);
++extern void f2fs_destroy_xattr_cache(void);
+ #else
+ #define f2fs_xattr_handlers   NULL
+@@ -150,8 +152,8 @@ static inline int f2fs_getxattr(struct i
+ {
+       return -EOPNOTSUPP;
+ }
+-static inline int f2fs_init_xattr_caches(struct f2fs_sb_info *sbi) { return 0; }
+-static inline void f2fs_destroy_xattr_caches(struct f2fs_sb_info *sbi) { }
++static inline int __init f2fs_init_xattr_cache(void) { return 0; }
++static inline void f2fs_destroy_xattr_cache(void) { }
+ #endif
+ #ifdef CONFIG_F2FS_FS_SECURITY
diff --git a/queue-6.12/gfs2-fix-freeze-error-handling.patch b/queue-6.12/gfs2-fix-freeze-error-handling.patch
new file mode 100644 (file)
index 0000000..ab354fd
--- /dev/null
@@ -0,0 +1,47 @@
+From stable+bounces-204144-greg=kroah.com@vger.kernel.org Mon Dec 29 23:33:40 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 29 Dec 2025 17:33:32 -0500
+Subject: gfs2: fix freeze error handling
+To: stable@vger.kernel.org
+Cc: Alexey Velichayshiy <a.velichayshiy@ispras.ru>, Andreas Gruenbacher <agruenba@redhat.com>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251229223332.1744624-1-sashal@kernel.org>
+
+From: Alexey Velichayshiy <a.velichayshiy@ispras.ru>
+
+[ Upstream commit 4cfc7d5a4a01d2133b278cdbb1371fba1b419174 ]
+
+After commit b77b4a4815a9 ("gfs2: Rework freeze / thaw logic"),
+the freeze error handling is broken because gfs2_do_thaw()
+overwrites the 'error' variable, causing incorrect processing
+of the original freeze error.
+
+Fix this by calling gfs2_do_thaw() when gfs2_lock_fs_check_clean()
+fails but ignoring its return value to preserve the original
+freeze error for proper reporting.
+
+Found by Linux Verification Center (linuxtesting.org) with SVACE.
+
+Fixes: b77b4a4815a9 ("gfs2: Rework freeze / thaw logic")
+Cc: stable@vger.kernel.org # v6.5+
+Signed-off-by: Alexey Velichayshiy <a.velichayshiy@ispras.ru>
+Signed-off-by: Andreas Gruenbacher <agruenba@redhat.com>
+[ gfs2_do_thaw() only takes 2 params ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/gfs2/super.c |    4 +---
+ 1 file changed, 1 insertion(+), 3 deletions(-)
+
+--- a/fs/gfs2/super.c
++++ b/fs/gfs2/super.c
+@@ -759,9 +759,7 @@ static int gfs2_freeze_super(struct supe
+                       break;
+               }
+-              error = gfs2_do_thaw(sdp, who);
+-              if (error)
+-                      goto out;
++              (void)gfs2_do_thaw(sdp, who);
+               if (error == -EBUSY)
+                       fs_err(sdp, "waiting for recovery before freeze\n");
diff --git a/queue-6.12/gpiolib-acpi-add-a-quirk-for-acer-nitro-v15.patch b/queue-6.12/gpiolib-acpi-add-a-quirk-for-acer-nitro-v15.patch
new file mode 100644 (file)
index 0000000..f258efd
--- /dev/null
@@ -0,0 +1,55 @@
+From stable+bounces-204353-greg=kroah.com@vger.kernel.org Wed Dec 31 15:32:29 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 31 Dec 2025 09:32:16 -0500
+Subject: gpiolib: acpi: Add a quirk for Acer Nitro V15
+To: stable@vger.kernel.org
+Cc: Mario Limonciello <mario.limonciello@amd.com>, Mika Westerberg <westeri@kernel.org>, Andy Shevchenko <andriy.shevchenko@linux.intel.com>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251231143218.3042757-5-sashal@kernel.org>
+
+From: Mario Limonciello <mario.limonciello@amd.com>
+
+[ Upstream commit 9ab29ed505557bd106e292184fa4917955eb8e6e ]
+
+It is reported that on Acer Nitro V15 suspend only works properly if the
+keyboard backlight is turned off. In looking through the issue Acer Nitro
+V15 has a GPIO (#8) specified in _AEI but it has no matching notify device
+in _EVT. The values for GPIO #8 change as keyboard backlight is turned on
+and off.
+
+This makes it seem that GPIO #8 is actually supposed to be solely for
+keyboard backlight.  Turning off the interrupt for this GPIO fixes the issue.
+Add a quirk that does just that.
+
+Closes: https://gitlab.freedesktop.org/drm/amd/-/issues/4169
+Signed-off-by: Mario Limonciello <mario.limonciello@amd.com>
+Acked-by: Mika Westerberg <westeri@kernel.org>
+Signed-off-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Stable-dep-of: 2d967310c49e ("gpiolib: acpi: Add quirk for Dell Precision 7780")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpio/gpiolib-acpi-quirks.c |   13 +++++++++++++
+ 1 file changed, 13 insertions(+)
+
+--- a/drivers/gpio/gpiolib-acpi-quirks.c
++++ b/drivers/gpio/gpiolib-acpi-quirks.c
+@@ -331,6 +331,19 @@ static const struct dmi_system_id gpioli
+                       .ignore_interrupt = "AMDI0030:00@11",
+               },
+       },
++      {
++              /*
++               * Wakeup only works when keyboard backlight is turned off
++               * https://gitlab.freedesktop.org/drm/amd/-/issues/4169
++               */
++              .matches = {
++                      DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
++                      DMI_MATCH(DMI_PRODUCT_FAMILY, "Acer Nitro V 15"),
++              },
++              .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
++                      .ignore_interrupt = "AMDI0030:00@8",
++              },
++      },
+       {} /* Terminating entry */
+ };
diff --git a/queue-6.12/gpiolib-acpi-add-acpi_gpio_need_run_edge_events_on_boot-getter.patch b/queue-6.12/gpiolib-acpi-add-acpi_gpio_need_run_edge_events_on_boot-getter.patch
new file mode 100644 (file)
index 0000000..f1c1061
--- /dev/null
@@ -0,0 +1,63 @@
+From stable+bounces-204351-greg=kroah.com@vger.kernel.org Wed Dec 31 15:32:31 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 31 Dec 2025 09:32:14 -0500
+Subject: gpiolib: acpi: Add acpi_gpio_need_run_edge_events_on_boot() getter
+To: stable@vger.kernel.org
+Cc: Andy Shevchenko <andriy.shevchenko@linux.intel.com>, Hans de Goede <hdegoede@redhat.com>, Mika Westerberg <mika.westerberg@linux.intel.com>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251231143218.3042757-3-sashal@kernel.org>
+
+From: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+
+[ Upstream commit 5666a8777add09d1167de308df2147983486a0af ]
+
+Add acpi_gpio_need_run_edge_events_on_boot() getter which moves
+towards isolating the GPIO ACPI and quirk APIs. It will helps
+splitting them completely in the next changes.
+
+No functional changes.
+
+Reviewed-by: Hans de Goede <hdegoede@redhat.com>
+Acked-by: Mika Westerberg <mika.westerberg@linux.intel.com>
+Signed-off-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Stable-dep-of: 2d967310c49e ("gpiolib: acpi: Add quirk for Dell Precision 7780")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpio/gpiolib-acpi.c |    7 ++++++-
+ drivers/gpio/gpiolib-acpi.h |    2 ++
+ 2 files changed, 8 insertions(+), 1 deletion(-)
+
+--- a/drivers/gpio/gpiolib-acpi.c
++++ b/drivers/gpio/gpiolib-acpi.c
+@@ -268,7 +268,7 @@ static void acpi_gpiochip_request_irq(st
+       event->irq_requested = true;
+       /* Make sure we trigger the initial state of edge-triggered IRQs */
+-      if (run_edge_events_on_boot &&
++      if (acpi_gpio_need_run_edge_events_on_boot() &&
+           (event->irqflags & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING))) {
+               value = gpiod_get_raw_value_cansleep(event->desc);
+               if (((event->irqflags & IRQF_TRIGGER_RISING) && value == 1) ||
+@@ -371,6 +371,11 @@ void acpi_gpio_remove_from_deferred_list
+       mutex_unlock(&acpi_gpio_deferred_req_irqs_lock);
+ }
++int acpi_gpio_need_run_edge_events_on_boot(void)
++{
++      return run_edge_events_on_boot;
++}
++
+ bool acpi_gpio_in_ignore_list(enum acpi_gpio_ignore_list list, const char *controller_in,
+                             unsigned int pin_in)
+ {
+--- a/drivers/gpio/gpiolib-acpi.h
++++ b/drivers/gpio/gpiolib-acpi.h
+@@ -63,6 +63,8 @@ void acpi_gpio_process_deferred_list(str
+ bool acpi_gpio_add_to_deferred_list(struct list_head *list);
+ void acpi_gpio_remove_from_deferred_list(struct list_head *list);
++int acpi_gpio_need_run_edge_events_on_boot(void);
++
+ enum acpi_gpio_ignore_list {
+       ACPI_GPIO_IGNORE_WAKE,
+       ACPI_GPIO_IGNORE_INTERRUPT,
diff --git a/queue-6.12/gpiolib-acpi-add-quirk-for-asus-proart-px13.patch b/queue-6.12/gpiolib-acpi-add-quirk-for-asus-proart-px13.patch
new file mode 100644 (file)
index 0000000..9f26d5d
--- /dev/null
@@ -0,0 +1,55 @@
+From stable+bounces-204354-greg=kroah.com@vger.kernel.org Wed Dec 31 15:32:31 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 31 Dec 2025 09:32:17 -0500
+Subject: gpiolib: acpi: Add quirk for ASUS ProArt PX13
+To: stable@vger.kernel.org
+Cc: "Mario Limonciello (AMD)" <superm1@kernel.org>, Amit Chaudhari <amitchaudhari@mac.com>, Mika Westerberg <mika.westerberg@linux.intel.com>, Linus Walleij <linus.walleij@linaro.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251231143218.3042757-6-sashal@kernel.org>
+
+From: "Mario Limonciello (AMD)" <superm1@kernel.org>
+
+[ Upstream commit 23800ad1265f10c2bc6f42154ce4d20e59f2900e ]
+
+The ASUS ProArt PX13 has a spurious wakeup event from the touchpad
+a few moments after entering hardware sleep.  This can be avoided
+by preventing the touchpad from being a wake source.
+
+Add to the wakeup ignore list.
+
+Reported-by: Amit Chaudhari <amitchaudhari@mac.com>
+Closes: https://gitlab.freedesktop.org/drm/amd/-/issues/4482
+Tested-by: Amit Chaudhari <amitchaudhari@mac.com>
+Signed-off-by: Mario Limonciello (AMD) <superm1@kernel.org>
+Reviewed-by: Mika Westerberg <mika.westerberg@linux.intel.com>
+Link: https://lore.kernel.org/20250814183430.3887973-1-superm1@kernel.org
+Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
+Stable-dep-of: 2d967310c49e ("gpiolib: acpi: Add quirk for Dell Precision 7780")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpio/gpiolib-acpi-quirks.c |   14 ++++++++++++++
+ 1 file changed, 14 insertions(+)
+
+--- a/drivers/gpio/gpiolib-acpi-quirks.c
++++ b/drivers/gpio/gpiolib-acpi-quirks.c
+@@ -344,6 +344,20 @@ static const struct dmi_system_id gpioli
+                       .ignore_interrupt = "AMDI0030:00@8",
+               },
+       },
++      {
++              /*
++               * Spurious wakeups from TP_ATTN# pin
++               * Found in BIOS 5.35
++               * https://gitlab.freedesktop.org/drm/amd/-/issues/4482
++               */
++              .matches = {
++                      DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
++                      DMI_MATCH(DMI_PRODUCT_FAMILY, "ProArt PX13"),
++              },
++              .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
++                      .ignore_wake = "ASCP1A00:00@8",
++              },
++      },
+       {} /* Terminating entry */
+ };
diff --git a/queue-6.12/gpiolib-acpi-add-quirk-for-dell-precision-7780.patch b/queue-6.12/gpiolib-acpi-add-quirk-for-dell-precision-7780.patch
new file mode 100644 (file)
index 0000000..c215386
--- /dev/null
@@ -0,0 +1,60 @@
+From stable+bounces-204355-greg=kroah.com@vger.kernel.org Wed Dec 31 15:32:34 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 31 Dec 2025 09:32:18 -0500
+Subject: gpiolib: acpi: Add quirk for Dell Precision 7780
+To: stable@vger.kernel.org
+Cc: Askar Safin <safinaskar@gmail.com>, Andy Shevchenko <andriy.shevchenko@linux.intel.com>, Bartosz Golaszewski <bartosz.golaszewski@oss.qualcomm.com>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251231143218.3042757-7-sashal@kernel.org>
+
+From: Askar Safin <safinaskar@gmail.com>
+
+[ Upstream commit 2d967310c49ed93ac11cef408a55ddf15c3dd52e ]
+
+Dell Precision 7780 often wakes up on its own from suspend. Sometimes
+wake up happens immediately (i. e. within 7 seconds), sometimes it happens
+after, say, 30 minutes.
+
+Fixes: 1796f808e4bb ("HID: i2c-hid: acpi: Stop setting wakeup_capable")
+Link: https://lore.kernel.org/linux-i2c/197ae95ffd8.dc819e60457077.7692120488609091556@zohomail.com/
+Cc: stable@vger.kernel.org
+Reviewed-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Signed-off-by: Askar Safin <safinaskar@gmail.com>
+Link: https://lore.kernel.org/r/20251206180414.3183334-2-safinaskar@gmail.com
+Signed-off-by: Bartosz Golaszewski <bartosz.golaszewski@oss.qualcomm.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpio/gpiolib-acpi-quirks.c |   22 ++++++++++++++++++++++
+ 1 file changed, 22 insertions(+)
+
+--- a/drivers/gpio/gpiolib-acpi-quirks.c
++++ b/drivers/gpio/gpiolib-acpi-quirks.c
+@@ -358,6 +358,28 @@ static const struct dmi_system_id gpioli
+                       .ignore_wake = "ASCP1A00:00@8",
+               },
+       },
++      {
++              /*
++               * Spurious wakeups, likely from touchpad controller
++               * Dell Precision 7780
++               * Found in BIOS 1.24.1
++               *
++               * Found in touchpad firmware, installed by Dell Touchpad Firmware Update Utility version 1160.4196.9, A01
++               * ( Dell-Touchpad-Firmware-Update-Utility_VYGNN_WIN64_1160.4196.9_A00.EXE ),
++               * released on 11 Jul 2024
++               *
++               * https://lore.kernel.org/linux-i2c/197ae95ffd8.dc819e60457077.7692120488609091556@zohomail.com/
++               */
++              .matches = {
++                      DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
++                      DMI_MATCH(DMI_PRODUCT_FAMILY, "Precision"),
++                      DMI_MATCH(DMI_PRODUCT_NAME, "Precision 7780"),
++                      DMI_MATCH(DMI_BOARD_NAME, "0C6JVW"),
++              },
++              .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
++                      .ignore_wake = "VEN_0488:00@355",
++              },
++      },
+       {} /* Terminating entry */
+ };
diff --git a/queue-6.12/gpiolib-acpi-handle-deferred-list-via-new-api.patch b/queue-6.12/gpiolib-acpi-handle-deferred-list-via-new-api.patch
new file mode 100644 (file)
index 0000000..323c7f3
--- /dev/null
@@ -0,0 +1,140 @@
+From stable+bounces-204350-greg=kroah.com@vger.kernel.org Wed Dec 31 15:32:29 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 31 Dec 2025 09:32:13 -0500
+Subject: gpiolib: acpi: Handle deferred list via new API
+To: stable@vger.kernel.org
+Cc: Andy Shevchenko <andriy.shevchenko@linux.intel.com>, Hans de Goede <hdegoede@redhat.com>, Mika Westerberg <mika.westerberg@linux.intel.com>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251231143218.3042757-2-sashal@kernel.org>
+
+From: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+
+[ Upstream commit a594877663d1e3d5cf57ec8af739582fc5c47cec ]
+
+Introduce a new API and handle deferred list via it which moves
+towards isolating the GPIO ACPI and quirk APIs. It will helps
+splitting them completely in the next changes.
+
+No functional changes.
+
+Reviewed-by: Hans de Goede <hdegoede@redhat.com>
+Acked-by: Mika Westerberg <mika.westerberg@linux.intel.com>
+Signed-off-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Stable-dep-of: 2d967310c49e ("gpiolib: acpi: Add quirk for Dell Precision 7780")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpio/gpiolib-acpi.c |   52 +++++++++++++++++++++++++++-----------------
+ drivers/gpio/gpiolib-acpi.h |    5 ++++
+ 2 files changed, 37 insertions(+), 20 deletions(-)
+
+--- a/drivers/gpio/gpiolib-acpi.c
++++ b/drivers/gpio/gpiolib-acpi.c
+@@ -350,6 +350,27 @@ static struct gpio_desc *acpi_request_ow
+       return desc;
+ }
++bool acpi_gpio_add_to_deferred_list(struct list_head *list)
++{
++      bool defer;
++
++      mutex_lock(&acpi_gpio_deferred_req_irqs_lock);
++      defer = !acpi_gpio_deferred_req_irqs_done;
++      if (defer)
++              list_add(list, &acpi_gpio_deferred_req_irqs_list);
++      mutex_unlock(&acpi_gpio_deferred_req_irqs_lock);
++
++      return defer;
++}
++
++void acpi_gpio_remove_from_deferred_list(struct list_head *list)
++{
++      mutex_lock(&acpi_gpio_deferred_req_irqs_lock);
++      if (!list_empty(list))
++              list_del_init(list);
++      mutex_unlock(&acpi_gpio_deferred_req_irqs_lock);
++}
++
+ bool acpi_gpio_in_ignore_list(enum acpi_gpio_ignore_list list, const char *controller_in,
+                             unsigned int pin_in)
+ {
+@@ -536,7 +557,6 @@ void acpi_gpiochip_request_interrupts(st
+       struct acpi_gpio_chip *acpi_gpio;
+       acpi_handle handle;
+       acpi_status status;
+-      bool defer;
+       if (!chip->parent || !chip->to_irq)
+               return;
+@@ -555,14 +575,7 @@ void acpi_gpiochip_request_interrupts(st
+       acpi_walk_resources(handle, METHOD_NAME__AEI,
+                           acpi_gpiochip_alloc_event, acpi_gpio);
+-      mutex_lock(&acpi_gpio_deferred_req_irqs_lock);
+-      defer = !acpi_gpio_deferred_req_irqs_done;
+-      if (defer)
+-              list_add(&acpi_gpio->deferred_req_irqs_list_entry,
+-                       &acpi_gpio_deferred_req_irqs_list);
+-      mutex_unlock(&acpi_gpio_deferred_req_irqs_lock);
+-
+-      if (defer)
++      if (acpi_gpio_add_to_deferred_list(&acpi_gpio->deferred_req_irqs_list_entry))
+               return;
+       acpi_gpiochip_request_irqs(acpi_gpio);
+@@ -594,10 +607,7 @@ void acpi_gpiochip_free_interrupts(struc
+       if (ACPI_FAILURE(status))
+               return;
+-      mutex_lock(&acpi_gpio_deferred_req_irqs_lock);
+-      if (!list_empty(&acpi_gpio->deferred_req_irqs_list_entry))
+-              list_del_init(&acpi_gpio->deferred_req_irqs_list_entry);
+-      mutex_unlock(&acpi_gpio_deferred_req_irqs_lock);
++      acpi_gpio_remove_from_deferred_list(&acpi_gpio->deferred_req_irqs_list_entry);
+       list_for_each_entry_safe_reverse(event, ep, &acpi_gpio->events, node) {
+               if (event->irq_requested) {
+@@ -615,6 +625,14 @@ void acpi_gpiochip_free_interrupts(struc
+ }
+ EXPORT_SYMBOL_GPL(acpi_gpiochip_free_interrupts);
++void __init acpi_gpio_process_deferred_list(struct list_head *list)
++{
++      struct acpi_gpio_chip *acpi_gpio, *tmp;
++
++      list_for_each_entry_safe(acpi_gpio, tmp, list, deferred_req_irqs_list_entry)
++              acpi_gpiochip_request_irqs(acpi_gpio);
++}
++
+ int acpi_dev_add_driver_gpios(struct acpi_device *adev,
+                             const struct acpi_gpio_mapping *gpios)
+ {
+@@ -1505,14 +1523,8 @@ int acpi_gpio_count(const struct fwnode_
+ /* Run deferred acpi_gpiochip_request_irqs() */
+ static int __init acpi_gpio_handle_deferred_request_irqs(void)
+ {
+-      struct acpi_gpio_chip *acpi_gpio, *tmp;
+-
+       mutex_lock(&acpi_gpio_deferred_req_irqs_lock);
+-      list_for_each_entry_safe(acpi_gpio, tmp,
+-                               &acpi_gpio_deferred_req_irqs_list,
+-                               deferred_req_irqs_list_entry)
+-              acpi_gpiochip_request_irqs(acpi_gpio);
+-
++      acpi_gpio_process_deferred_list(&acpi_gpio_deferred_req_irqs_list);
+       acpi_gpio_deferred_req_irqs_done = true;
+       mutex_unlock(&acpi_gpio_deferred_req_irqs_lock);
+--- a/drivers/gpio/gpiolib-acpi.h
++++ b/drivers/gpio/gpiolib-acpi.h
+@@ -58,6 +58,11 @@ static inline int acpi_gpio_count(const
+ }
+ #endif
++void acpi_gpio_process_deferred_list(struct list_head *list);
++
++bool acpi_gpio_add_to_deferred_list(struct list_head *list);
++void acpi_gpio_remove_from_deferred_list(struct list_head *list);
++
+ enum acpi_gpio_ignore_list {
+       ACPI_GPIO_IGNORE_WAKE,
+       ACPI_GPIO_IGNORE_INTERRUPT,
diff --git a/queue-6.12/gpiolib-acpi-move-quirks-to-a-separate-file.patch b/queue-6.12/gpiolib-acpi-move-quirks-to-a-separate-file.patch
new file mode 100644 (file)
index 0000000..84318c1
--- /dev/null
@@ -0,0 +1,3598 @@
+From stable+bounces-204352-greg=kroah.com@vger.kernel.org Wed Dec 31 15:32:26 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 31 Dec 2025 09:32:15 -0500
+Subject: gpiolib: acpi: Move quirks to a separate file
+To: stable@vger.kernel.org
+Cc: Andy Shevchenko <andriy.shevchenko@linux.intel.com>, Hans de Goede <hdegoede@redhat.com>, Mika Westerberg <mika.westerberg@linux.intel.com>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251231143218.3042757-4-sashal@kernel.org>
+
+From: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+
+[ Upstream commit 92dc572852ddcae687590cb159189004d58e382e ]
+
+The gpiolib-acpi.c is huge enough even without DMI quirks.
+Move them to a separate file for a better maintenance.
+
+No functional change intended.
+
+Reviewed-by: Hans de Goede <hdegoede@redhat.com>
+Acked-by: Mika Westerberg <mika.westerberg@linux.intel.com>
+Signed-off-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Stable-dep-of: 2d967310c49e ("gpiolib: acpi: Add quirk for Dell Precision 7780")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpio/Makefile              |    1 
+ drivers/gpio/gpiolib-acpi-core.c   | 1419 +++++++++++++++++++++++++++++
+ drivers/gpio/gpiolib-acpi-quirks.c |  363 +++++++
+ drivers/gpio/gpiolib-acpi.c        | 1765 -------------------------------------
+ 4 files changed, 1783 insertions(+), 1765 deletions(-)
+ rename drivers/gpio/{gpiolib-acpi.c => gpiolib-acpi-core.c} (79%)
+ create mode 100644 drivers/gpio/gpiolib-acpi-quirks.c
+
+--- a/drivers/gpio/Makefile
++++ b/drivers/gpio/Makefile
+@@ -10,6 +10,7 @@ obj-$(CONFIG_OF_GPIO)                += gpiolib-of.o
+ obj-$(CONFIG_GPIO_CDEV)               += gpiolib-cdev.o
+ obj-$(CONFIG_GPIO_SYSFS)      += gpiolib-sysfs.o
+ obj-$(CONFIG_GPIO_ACPI)               += gpiolib-acpi.o
++gpiolib-acpi-y                        := gpiolib-acpi-core.o gpiolib-acpi-quirks.o
+ obj-$(CONFIG_GPIOLIB)         += gpiolib-swnode.o
+ # Device drivers. Generally keep list sorted alphabetically
+--- /dev/null
++++ b/drivers/gpio/gpiolib-acpi-core.c
+@@ -0,0 +1,1419 @@
++// SPDX-License-Identifier: GPL-2.0
++/*
++ * ACPI helpers for GPIO API
++ *
++ * Copyright (C) 2012, Intel Corporation
++ * Authors: Mathias Nyman <mathias.nyman@linux.intel.com>
++ *          Mika Westerberg <mika.westerberg@linux.intel.com>
++ */
++
++#include <linux/acpi.h>
++#include <linux/dmi.h>
++#include <linux/errno.h>
++#include <linux/export.h>
++#include <linux/interrupt.h>
++#include <linux/irq.h>
++#include <linux/mutex.h>
++#include <linux/pinctrl/pinctrl.h>
++
++#include <linux/gpio/consumer.h>
++#include <linux/gpio/driver.h>
++#include <linux/gpio/machine.h>
++
++#include "gpiolib.h"
++#include "gpiolib-acpi.h"
++
++/**
++ * struct acpi_gpio_event - ACPI GPIO event handler data
++ *
++ * @node:       list-entry of the events list of the struct acpi_gpio_chip
++ * @handle:     handle of ACPI method to execute when the IRQ triggers
++ * @handler:    handler function to pass to request_irq() when requesting the IRQ
++ * @pin:        GPIO pin number on the struct gpio_chip
++ * @irq:        Linux IRQ number for the event, for request_irq() / free_irq()
++ * @irqflags:   flags to pass to request_irq() when requesting the IRQ
++ * @irq_is_wake:  If the ACPI flags indicate the IRQ is a wakeup source
++ * @irq_requested:True if request_irq() has been done
++ * @desc:       struct gpio_desc for the GPIO pin for this event
++ */
++struct acpi_gpio_event {
++      struct list_head node;
++      acpi_handle handle;
++      irq_handler_t handler;
++      unsigned int pin;
++      unsigned int irq;
++      unsigned long irqflags;
++      bool irq_is_wake;
++      bool irq_requested;
++      struct gpio_desc *desc;
++};
++
++struct acpi_gpio_connection {
++      struct list_head node;
++      unsigned int pin;
++      struct gpio_desc *desc;
++};
++
++struct acpi_gpio_chip {
++      /*
++       * ACPICA requires that the first field of the context parameter
++       * passed to acpi_install_address_space_handler() is large enough
++       * to hold struct acpi_connection_info.
++       */
++      struct acpi_connection_info conn_info;
++      struct list_head conns;
++      struct mutex conn_lock;
++      struct gpio_chip *chip;
++      struct list_head events;
++      struct list_head deferred_req_irqs_list_entry;
++};
++
++/**
++ * struct acpi_gpio_info - ACPI GPIO specific information
++ * @adev: reference to ACPI device which consumes GPIO resource
++ * @flags: GPIO initialization flags
++ * @gpioint: if %true this GPIO is of type GpioInt otherwise type is GpioIo
++ * @pin_config: pin bias as provided by ACPI
++ * @polarity: interrupt polarity as provided by ACPI
++ * @triggering: triggering type as provided by ACPI
++ * @wake_capable: wake capability as provided by ACPI
++ * @debounce: debounce timeout as provided by ACPI
++ * @quirks: Linux specific quirks as provided by struct acpi_gpio_mapping
++ */
++struct acpi_gpio_info {
++      struct acpi_device *adev;
++      enum gpiod_flags flags;
++      bool gpioint;
++      int pin_config;
++      int polarity;
++      int triggering;
++      bool wake_capable;
++      unsigned int debounce;
++      unsigned int quirks;
++};
++
++static int acpi_gpiochip_find(struct gpio_chip *gc, const void *data)
++{
++      /* First check the actual GPIO device */
++      if (device_match_acpi_handle(&gc->gpiodev->dev, data))
++              return true;
++
++      /*
++       * When the ACPI device is artificially split to the banks of GPIOs,
++       * where each of them is represented by a separate GPIO device,
++       * the firmware node of the physical device may not be shared among
++       * the banks as they may require different values for the same property,
++       * e.g., number of GPIOs in a certain bank. In such case the ACPI handle
++       * of a GPIO device is NULL and can not be used. Hence we have to check
++       * the parent device to be sure that there is no match before bailing
++       * out.
++       */
++      if (gc->parent)
++              return device_match_acpi_handle(gc->parent, data);
++
++      return false;
++}
++
++/**
++ * acpi_get_gpiod() - Translate ACPI GPIO pin to GPIO descriptor usable with GPIO API
++ * @path:     ACPI GPIO controller full path name, (e.g. "\\_SB.GPO1")
++ * @pin:      ACPI GPIO pin number (0-based, controller-relative)
++ *
++ * Returns:
++ * GPIO descriptor to use with Linux generic GPIO API.
++ * If the GPIO cannot be translated or there is an error an ERR_PTR is
++ * returned.
++ *
++ * Specifically returns %-EPROBE_DEFER if the referenced GPIO
++ * controller does not have GPIO chip registered at the moment. This is to
++ * support probe deferral.
++ */
++static struct gpio_desc *acpi_get_gpiod(char *path, unsigned int pin)
++{
++      acpi_handle handle;
++      acpi_status status;
++
++      status = acpi_get_handle(NULL, path, &handle);
++      if (ACPI_FAILURE(status))
++              return ERR_PTR(-ENODEV);
++
++      struct gpio_device *gdev __free(gpio_device_put) =
++                              gpio_device_find(handle, acpi_gpiochip_find);
++      if (!gdev)
++              return ERR_PTR(-EPROBE_DEFER);
++
++      /*
++       * FIXME: keep track of the reference to the GPIO device somehow
++       * instead of putting it here.
++       */
++      return gpio_device_get_desc(gdev, pin);
++}
++
++static irqreturn_t acpi_gpio_irq_handler(int irq, void *data)
++{
++      struct acpi_gpio_event *event = data;
++
++      acpi_evaluate_object(event->handle, NULL, NULL, NULL);
++
++      return IRQ_HANDLED;
++}
++
++static irqreturn_t acpi_gpio_irq_handler_evt(int irq, void *data)
++{
++      struct acpi_gpio_event *event = data;
++
++      acpi_execute_simple_method(event->handle, NULL, event->pin);
++
++      return IRQ_HANDLED;
++}
++
++static void acpi_gpio_chip_dh(acpi_handle handle, void *data)
++{
++      /* The address of this function is used as a key. */
++}
++
++bool acpi_gpio_get_irq_resource(struct acpi_resource *ares,
++                              struct acpi_resource_gpio **agpio)
++{
++      struct acpi_resource_gpio *gpio;
++
++      if (ares->type != ACPI_RESOURCE_TYPE_GPIO)
++              return false;
++
++      gpio = &ares->data.gpio;
++      if (gpio->connection_type != ACPI_RESOURCE_GPIO_TYPE_INT)
++              return false;
++
++      *agpio = gpio;
++      return true;
++}
++EXPORT_SYMBOL_GPL(acpi_gpio_get_irq_resource);
++
++/**
++ * acpi_gpio_get_io_resource - Fetch details of an ACPI resource if it is a GPIO
++ *                           I/O resource or return False if not.
++ * @ares:     Pointer to the ACPI resource to fetch
++ * @agpio:    Pointer to a &struct acpi_resource_gpio to store the output pointer
++ *
++ * Returns:
++ * %true if GpioIo resource is found, %false otherwise.
++ */
++bool acpi_gpio_get_io_resource(struct acpi_resource *ares,
++                             struct acpi_resource_gpio **agpio)
++{
++      struct acpi_resource_gpio *gpio;
++
++      if (ares->type != ACPI_RESOURCE_TYPE_GPIO)
++              return false;
++
++      gpio = &ares->data.gpio;
++      if (gpio->connection_type != ACPI_RESOURCE_GPIO_TYPE_IO)
++              return false;
++
++      *agpio = gpio;
++      return true;
++}
++EXPORT_SYMBOL_GPL(acpi_gpio_get_io_resource);
++
++static void acpi_gpiochip_request_irq(struct acpi_gpio_chip *acpi_gpio,
++                                    struct acpi_gpio_event *event)
++{
++      struct device *parent = acpi_gpio->chip->parent;
++      int ret, value;
++
++      ret = request_threaded_irq(event->irq, NULL, event->handler,
++                                 event->irqflags | IRQF_ONESHOT, "ACPI:Event", event);
++      if (ret) {
++              dev_err(parent, "Failed to setup interrupt handler for %d\n", event->irq);
++              return;
++      }
++
++      if (event->irq_is_wake)
++              enable_irq_wake(event->irq);
++
++      event->irq_requested = true;
++
++      /* Make sure we trigger the initial state of edge-triggered IRQs */
++      if (acpi_gpio_need_run_edge_events_on_boot() &&
++          (event->irqflags & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING))) {
++              value = gpiod_get_raw_value_cansleep(event->desc);
++              if (((event->irqflags & IRQF_TRIGGER_RISING) && value == 1) ||
++                  ((event->irqflags & IRQF_TRIGGER_FALLING) && value == 0))
++                      event->handler(event->irq, event);
++      }
++}
++
++static void acpi_gpiochip_request_irqs(struct acpi_gpio_chip *acpi_gpio)
++{
++      struct acpi_gpio_event *event;
++
++      list_for_each_entry(event, &acpi_gpio->events, node)
++              acpi_gpiochip_request_irq(acpi_gpio, event);
++}
++
++static enum gpiod_flags
++acpi_gpio_to_gpiod_flags(const struct acpi_resource_gpio *agpio, int polarity)
++{
++      /* GpioInt() implies input configuration */
++      if (agpio->connection_type == ACPI_RESOURCE_GPIO_TYPE_INT)
++              return GPIOD_IN;
++
++      switch (agpio->io_restriction) {
++      case ACPI_IO_RESTRICT_INPUT:
++              return GPIOD_IN;
++      case ACPI_IO_RESTRICT_OUTPUT:
++              /*
++               * ACPI GPIO resources don't contain an initial value for the
++               * GPIO. Therefore we deduce that value from the pull field
++               * and the polarity instead. If the pin is pulled up we assume
++               * default to be high, if it is pulled down we assume default
++               * to be low, otherwise we leave pin untouched. For active low
++               * polarity values will be switched. See also
++               * Documentation/firmware-guide/acpi/gpio-properties.rst.
++               */
++              switch (agpio->pin_config) {
++              case ACPI_PIN_CONFIG_PULLUP:
++                      return polarity == GPIO_ACTIVE_LOW ? GPIOD_OUT_LOW : GPIOD_OUT_HIGH;
++              case ACPI_PIN_CONFIG_PULLDOWN:
++                      return polarity == GPIO_ACTIVE_LOW ? GPIOD_OUT_HIGH : GPIOD_OUT_LOW;
++              default:
++                      break;
++              }
++              break;
++      default:
++              break;
++      }
++
++      /*
++       * Assume that the BIOS has configured the direction and pull
++       * accordingly.
++       */
++      return GPIOD_ASIS;
++}
++
++static struct gpio_desc *acpi_request_own_gpiod(struct gpio_chip *chip,
++                                              struct acpi_resource_gpio *agpio,
++                                              unsigned int index,
++                                              const char *label)
++{
++      int polarity = GPIO_ACTIVE_HIGH;
++      enum gpiod_flags flags = acpi_gpio_to_gpiod_flags(agpio, polarity);
++      unsigned int pin = agpio->pin_table[index];
++      struct gpio_desc *desc;
++      int ret;
++
++      desc = gpiochip_request_own_desc(chip, pin, label, polarity, flags);
++      if (IS_ERR(desc))
++              return desc;
++
++      /* ACPI uses hundredths of milliseconds units */
++      ret = gpio_set_debounce_timeout(desc, agpio->debounce_timeout * 10);
++      if (ret)
++              dev_warn(chip->parent,
++                       "Failed to set debounce-timeout for pin 0x%04X, err %d\n",
++                       pin, ret);
++
++      return desc;
++}
++
++static bool acpi_gpio_irq_is_wake(struct device *parent,
++                                const struct acpi_resource_gpio *agpio)
++{
++      unsigned int pin = agpio->pin_table[0];
++
++      if (agpio->wake_capable != ACPI_WAKE_CAPABLE)
++              return false;
++
++      if (acpi_gpio_in_ignore_list(ACPI_GPIO_IGNORE_WAKE, dev_name(parent), pin)) {
++              dev_info(parent, "Ignoring wakeup on pin %u\n", pin);
++              return false;
++      }
++
++      return true;
++}
++
++/* Always returns AE_OK so that we keep looping over the resources */
++static acpi_status acpi_gpiochip_alloc_event(struct acpi_resource *ares,
++                                           void *context)
++{
++      struct acpi_gpio_chip *acpi_gpio = context;
++      struct gpio_chip *chip = acpi_gpio->chip;
++      struct acpi_resource_gpio *agpio;
++      acpi_handle handle, evt_handle;
++      struct acpi_gpio_event *event;
++      irq_handler_t handler = NULL;
++      struct gpio_desc *desc;
++      unsigned int pin;
++      int ret, irq;
++
++      if (!acpi_gpio_get_irq_resource(ares, &agpio))
++              return AE_OK;
++
++      handle = ACPI_HANDLE(chip->parent);
++      pin = agpio->pin_table[0];
++
++      if (pin <= 255) {
++              char ev_name[8];
++              sprintf(ev_name, "_%c%02X",
++                      agpio->triggering == ACPI_EDGE_SENSITIVE ? 'E' : 'L',
++                      pin);
++              if (ACPI_SUCCESS(acpi_get_handle(handle, ev_name, &evt_handle)))
++                      handler = acpi_gpio_irq_handler;
++      }
++      if (!handler) {
++              if (ACPI_SUCCESS(acpi_get_handle(handle, "_EVT", &evt_handle)))
++                      handler = acpi_gpio_irq_handler_evt;
++      }
++      if (!handler)
++              return AE_OK;
++
++      if (acpi_gpio_in_ignore_list(ACPI_GPIO_IGNORE_INTERRUPT, dev_name(chip->parent), pin)) {
++              dev_info(chip->parent, "Ignoring interrupt on pin %u\n", pin);
++              return AE_OK;
++      }
++
++      desc = acpi_request_own_gpiod(chip, agpio, 0, "ACPI:Event");
++      if (IS_ERR(desc)) {
++              dev_err(chip->parent,
++                      "Failed to request GPIO for pin 0x%04X, err %ld\n",
++                      pin, PTR_ERR(desc));
++              return AE_OK;
++      }
++
++      ret = gpiochip_lock_as_irq(chip, pin);
++      if (ret) {
++              dev_err(chip->parent,
++                      "Failed to lock GPIO pin 0x%04X as interrupt, err %d\n",
++                      pin, ret);
++              goto fail_free_desc;
++      }
++
++      irq = gpiod_to_irq(desc);
++      if (irq < 0) {
++              dev_err(chip->parent,
++                      "Failed to translate GPIO pin 0x%04X to IRQ, err %d\n",
++                      pin, irq);
++              goto fail_unlock_irq;
++      }
++
++      event = kzalloc(sizeof(*event), GFP_KERNEL);
++      if (!event)
++              goto fail_unlock_irq;
++
++      event->irqflags = IRQF_ONESHOT;
++      if (agpio->triggering == ACPI_LEVEL_SENSITIVE) {
++              if (agpio->polarity == ACPI_ACTIVE_HIGH)
++                      event->irqflags |= IRQF_TRIGGER_HIGH;
++              else
++                      event->irqflags |= IRQF_TRIGGER_LOW;
++      } else {
++              switch (agpio->polarity) {
++              case ACPI_ACTIVE_HIGH:
++                      event->irqflags |= IRQF_TRIGGER_RISING;
++                      break;
++              case ACPI_ACTIVE_LOW:
++                      event->irqflags |= IRQF_TRIGGER_FALLING;
++                      break;
++              default:
++                      event->irqflags |= IRQF_TRIGGER_RISING |
++                                         IRQF_TRIGGER_FALLING;
++                      break;
++              }
++      }
++
++      event->handle = evt_handle;
++      event->handler = handler;
++      event->irq = irq;
++      event->irq_is_wake = acpi_gpio_irq_is_wake(chip->parent, agpio);
++      event->pin = pin;
++      event->desc = desc;
++
++      list_add_tail(&event->node, &acpi_gpio->events);
++
++      return AE_OK;
++
++fail_unlock_irq:
++      gpiochip_unlock_as_irq(chip, pin);
++fail_free_desc:
++      gpiochip_free_own_desc(desc);
++
++      return AE_OK;
++}
++
++/**
++ * acpi_gpiochip_request_interrupts() - Register isr for gpio chip ACPI events
++ * @chip:      GPIO chip
++ *
++ * ACPI5 platforms can use GPIO signaled ACPI events. These GPIO interrupts are
++ * handled by ACPI event methods which need to be called from the GPIO
++ * chip's interrupt handler. acpi_gpiochip_request_interrupts() finds out which
++ * GPIO pins have ACPI event methods and assigns interrupt handlers that calls
++ * the ACPI event methods for those pins.
++ */
++void acpi_gpiochip_request_interrupts(struct gpio_chip *chip)
++{
++      struct acpi_gpio_chip *acpi_gpio;
++      acpi_handle handle;
++      acpi_status status;
++
++      if (!chip->parent || !chip->to_irq)
++              return;
++
++      handle = ACPI_HANDLE(chip->parent);
++      if (!handle)
++              return;
++
++      status = acpi_get_data(handle, acpi_gpio_chip_dh, (void **)&acpi_gpio);
++      if (ACPI_FAILURE(status))
++              return;
++
++      if (acpi_quirk_skip_gpio_event_handlers())
++              return;
++
++      acpi_walk_resources(handle, METHOD_NAME__AEI,
++                          acpi_gpiochip_alloc_event, acpi_gpio);
++
++      if (acpi_gpio_add_to_deferred_list(&acpi_gpio->deferred_req_irqs_list_entry))
++              return;
++
++      acpi_gpiochip_request_irqs(acpi_gpio);
++}
++EXPORT_SYMBOL_GPL(acpi_gpiochip_request_interrupts);
++
++/**
++ * acpi_gpiochip_free_interrupts() - Free GPIO ACPI event interrupts.
++ * @chip:      GPIO chip
++ *
++ * Free interrupts associated with GPIO ACPI event method for the given
++ * GPIO chip.
++ */
++void acpi_gpiochip_free_interrupts(struct gpio_chip *chip)
++{
++      struct acpi_gpio_chip *acpi_gpio;
++      struct acpi_gpio_event *event, *ep;
++      acpi_handle handle;
++      acpi_status status;
++
++      if (!chip->parent || !chip->to_irq)
++              return;
++
++      handle = ACPI_HANDLE(chip->parent);
++      if (!handle)
++              return;
++
++      status = acpi_get_data(handle, acpi_gpio_chip_dh, (void **)&acpi_gpio);
++      if (ACPI_FAILURE(status))
++              return;
++
++      acpi_gpio_remove_from_deferred_list(&acpi_gpio->deferred_req_irqs_list_entry);
++
++      list_for_each_entry_safe_reverse(event, ep, &acpi_gpio->events, node) {
++              if (event->irq_requested) {
++                      if (event->irq_is_wake)
++                              disable_irq_wake(event->irq);
++
++                      free_irq(event->irq, event);
++              }
++
++              gpiochip_unlock_as_irq(chip, event->pin);
++              gpiochip_free_own_desc(event->desc);
++              list_del(&event->node);
++              kfree(event);
++      }
++}
++EXPORT_SYMBOL_GPL(acpi_gpiochip_free_interrupts);
++
++void __init acpi_gpio_process_deferred_list(struct list_head *list)
++{
++      struct acpi_gpio_chip *acpi_gpio, *tmp;
++
++      list_for_each_entry_safe(acpi_gpio, tmp, list, deferred_req_irqs_list_entry)
++              acpi_gpiochip_request_irqs(acpi_gpio);
++}
++
++int acpi_dev_add_driver_gpios(struct acpi_device *adev,
++                            const struct acpi_gpio_mapping *gpios)
++{
++      if (adev && gpios) {
++              adev->driver_gpios = gpios;
++              return 0;
++      }
++      return -EINVAL;
++}
++EXPORT_SYMBOL_GPL(acpi_dev_add_driver_gpios);
++
++void acpi_dev_remove_driver_gpios(struct acpi_device *adev)
++{
++      if (adev)
++              adev->driver_gpios = NULL;
++}
++EXPORT_SYMBOL_GPL(acpi_dev_remove_driver_gpios);
++
++static void acpi_dev_release_driver_gpios(void *adev)
++{
++      acpi_dev_remove_driver_gpios(adev);
++}
++
++int devm_acpi_dev_add_driver_gpios(struct device *dev,
++                                 const struct acpi_gpio_mapping *gpios)
++{
++      struct acpi_device *adev = ACPI_COMPANION(dev);
++      int ret;
++
++      ret = acpi_dev_add_driver_gpios(adev, gpios);
++      if (ret)
++              return ret;
++
++      return devm_add_action_or_reset(dev, acpi_dev_release_driver_gpios, adev);
++}
++EXPORT_SYMBOL_GPL(devm_acpi_dev_add_driver_gpios);
++
++static bool acpi_get_driver_gpio_data(struct acpi_device *adev,
++                                    const char *name, int index,
++                                    struct fwnode_reference_args *args,
++                                    unsigned int *quirks)
++{
++      const struct acpi_gpio_mapping *gm;
++
++      if (!adev || !adev->driver_gpios)
++              return false;
++
++      for (gm = adev->driver_gpios; gm->name; gm++)
++              if (!strcmp(name, gm->name) && gm->data && index < gm->size) {
++                      const struct acpi_gpio_params *par = gm->data + index;
++
++                      args->fwnode = acpi_fwnode_handle(adev);
++                      args->args[0] = par->crs_entry_index;
++                      args->args[1] = par->line_index;
++                      args->args[2] = par->active_low;
++                      args->nargs = 3;
++
++                      *quirks = gm->quirks;
++                      return true;
++              }
++
++      return false;
++}
++
++static int
++__acpi_gpio_update_gpiod_flags(enum gpiod_flags *flags, enum gpiod_flags update)
++{
++      const enum gpiod_flags mask =
++              GPIOD_FLAGS_BIT_DIR_SET | GPIOD_FLAGS_BIT_DIR_OUT |
++              GPIOD_FLAGS_BIT_DIR_VAL;
++      int ret = 0;
++
++      /*
++       * Check if the BIOS has IoRestriction with explicitly set direction
++       * and update @flags accordingly. Otherwise use whatever caller asked
++       * for.
++       */
++      if (update & GPIOD_FLAGS_BIT_DIR_SET) {
++              enum gpiod_flags diff = *flags ^ update;
++
++              /*
++               * Check if caller supplied incompatible GPIO initialization
++               * flags.
++               *
++               * Return %-EINVAL to notify that firmware has different
++               * settings and we are going to use them.
++               */
++              if (((*flags & GPIOD_FLAGS_BIT_DIR_SET) && (diff & GPIOD_FLAGS_BIT_DIR_OUT)) ||
++                  ((*flags & GPIOD_FLAGS_BIT_DIR_OUT) && (diff & GPIOD_FLAGS_BIT_DIR_VAL)))
++                      ret = -EINVAL;
++              *flags = (*flags & ~mask) | (update & mask);
++      }
++      return ret;
++}
++
++static int acpi_gpio_update_gpiod_flags(enum gpiod_flags *flags,
++                                      struct acpi_gpio_info *info)
++{
++      struct device *dev = &info->adev->dev;
++      enum gpiod_flags old = *flags;
++      int ret;
++
++      ret = __acpi_gpio_update_gpiod_flags(&old, info->flags);
++      if (info->quirks & ACPI_GPIO_QUIRK_NO_IO_RESTRICTION) {
++              if (ret)
++                      dev_warn(dev, FW_BUG "GPIO not in correct mode, fixing\n");
++      } else {
++              if (ret)
++                      dev_dbg(dev, "Override GPIO initialization flags\n");
++              *flags = old;
++      }
++
++      return ret;
++}
++
++static int acpi_gpio_update_gpiod_lookup_flags(unsigned long *lookupflags,
++                                             struct acpi_gpio_info *info)
++{
++      switch (info->pin_config) {
++      case ACPI_PIN_CONFIG_PULLUP:
++              *lookupflags |= GPIO_PULL_UP;
++              break;
++      case ACPI_PIN_CONFIG_PULLDOWN:
++              *lookupflags |= GPIO_PULL_DOWN;
++              break;
++      case ACPI_PIN_CONFIG_NOPULL:
++              *lookupflags |= GPIO_PULL_DISABLE;
++              break;
++      default:
++              break;
++      }
++
++      if (info->polarity == GPIO_ACTIVE_LOW)
++              *lookupflags |= GPIO_ACTIVE_LOW;
++
++      return 0;
++}
++
++struct acpi_gpio_lookup {
++      struct acpi_gpio_info info;
++      int index;
++      u16 pin_index;
++      bool active_low;
++      struct gpio_desc *desc;
++      int n;
++};
++
++static int acpi_populate_gpio_lookup(struct acpi_resource *ares, void *data)
++{
++      struct acpi_gpio_lookup *lookup = data;
++
++      if (ares->type != ACPI_RESOURCE_TYPE_GPIO)
++              return 1;
++
++      if (!lookup->desc) {
++              const struct acpi_resource_gpio *agpio = &ares->data.gpio;
++              bool gpioint = agpio->connection_type == ACPI_RESOURCE_GPIO_TYPE_INT;
++              struct gpio_desc *desc;
++              u16 pin_index;
++
++              if (lookup->info.quirks & ACPI_GPIO_QUIRK_ONLY_GPIOIO && gpioint)
++                      lookup->index++;
++
++              if (lookup->n++ != lookup->index)
++                      return 1;
++
++              pin_index = lookup->pin_index;
++              if (pin_index >= agpio->pin_table_length)
++                      return 1;
++
++              if (lookup->info.quirks & ACPI_GPIO_QUIRK_ABSOLUTE_NUMBER)
++                      desc = gpio_to_desc(agpio->pin_table[pin_index]);
++              else
++                      desc = acpi_get_gpiod(agpio->resource_source.string_ptr,
++                                            agpio->pin_table[pin_index]);
++              lookup->desc = desc;
++              lookup->info.pin_config = agpio->pin_config;
++              lookup->info.debounce = agpio->debounce_timeout;
++              lookup->info.gpioint = gpioint;
++              lookup->info.wake_capable = acpi_gpio_irq_is_wake(&lookup->info.adev->dev, agpio);
++
++              /*
++               * Polarity and triggering are only specified for GpioInt
++               * resource.
++               * Note: we expect here:
++               * - ACPI_ACTIVE_LOW == GPIO_ACTIVE_LOW
++               * - ACPI_ACTIVE_HIGH == GPIO_ACTIVE_HIGH
++               */
++              if (lookup->info.gpioint) {
++                      lookup->info.polarity = agpio->polarity;
++                      lookup->info.triggering = agpio->triggering;
++              } else {
++                      lookup->info.polarity = lookup->active_low;
++              }
++
++              lookup->info.flags = acpi_gpio_to_gpiod_flags(agpio, lookup->info.polarity);
++      }
++
++      return 1;
++}
++
++static int acpi_gpio_resource_lookup(struct acpi_gpio_lookup *lookup,
++                                   struct acpi_gpio_info *info)
++{
++      struct acpi_device *adev = lookup->info.adev;
++      struct list_head res_list;
++      int ret;
++
++      INIT_LIST_HEAD(&res_list);
++
++      ret = acpi_dev_get_resources(adev, &res_list,
++                                   acpi_populate_gpio_lookup,
++                                   lookup);
++      if (ret < 0)
++              return ret;
++
++      acpi_dev_free_resource_list(&res_list);
++
++      if (!lookup->desc)
++              return -ENOENT;
++
++      if (info)
++              *info = lookup->info;
++      return 0;
++}
++
++static int acpi_gpio_property_lookup(struct fwnode_handle *fwnode,
++                                   const char *propname, int index,
++                                   struct acpi_gpio_lookup *lookup)
++{
++      struct fwnode_reference_args args;
++      unsigned int quirks = 0;
++      int ret;
++
++      memset(&args, 0, sizeof(args));
++      ret = __acpi_node_get_property_reference(fwnode, propname, index, 3,
++                                               &args);
++      if (ret) {
++              struct acpi_device *adev;
++
++              adev = to_acpi_device_node(fwnode);
++              if (!acpi_get_driver_gpio_data(adev, propname, index, &args, &quirks))
++                      return ret;
++      }
++      /*
++       * The property was found and resolved, so need to lookup the GPIO based
++       * on returned args.
++       */
++      if (!to_acpi_device_node(args.fwnode))
++              return -EINVAL;
++      if (args.nargs != 3)
++              return -EPROTO;
++
++      lookup->index = args.args[0];
++      lookup->pin_index = args.args[1];
++      lookup->active_low = !!args.args[2];
++
++      lookup->info.adev = to_acpi_device_node(args.fwnode);
++      lookup->info.quirks = quirks;
++
++      return 0;
++}
++
++/**
++ * acpi_get_gpiod_by_index() - get a GPIO descriptor from device resources
++ * @adev: pointer to a ACPI device to get GPIO from
++ * @propname: Property name of the GPIO (optional)
++ * @index: index of GpioIo/GpioInt resource (starting from %0)
++ * @info: info pointer to fill in (optional)
++ *
++ * Function goes through ACPI resources for @adev and based on @index looks
++ * up a GpioIo/GpioInt resource, translates it to the Linux GPIO descriptor,
++ * and returns it. @index matches GpioIo/GpioInt resources only so if there
++ * are total %3 GPIO resources, the index goes from %0 to %2.
++ *
++ * If @propname is specified the GPIO is looked using device property. In
++ * that case @index is used to select the GPIO entry in the property value
++ * (in case of multiple).
++ *
++ * Returns:
++ * GPIO descriptor to use with Linux generic GPIO API.
++ * If the GPIO cannot be translated or there is an error an ERR_PTR is
++ * returned.
++ *
++ * Note: if the GPIO resource has multiple entries in the pin list, this
++ * function only returns the first.
++ */
++static struct gpio_desc *acpi_get_gpiod_by_index(struct acpi_device *adev,
++                                               const char *propname,
++                                               int index,
++                                               struct acpi_gpio_info *info)
++{
++      struct acpi_gpio_lookup lookup;
++      int ret;
++
++      memset(&lookup, 0, sizeof(lookup));
++      lookup.index = index;
++
++      if (propname) {
++              dev_dbg(&adev->dev, "GPIO: looking up %s\n", propname);
++
++              ret = acpi_gpio_property_lookup(acpi_fwnode_handle(adev),
++                                              propname, index, &lookup);
++              if (ret)
++                      return ERR_PTR(ret);
++
++              dev_dbg(&adev->dev, "GPIO: _DSD returned %s %d %u %u\n",
++                      dev_name(&lookup.info.adev->dev), lookup.index,
++                      lookup.pin_index, lookup.active_low);
++      } else {
++              dev_dbg(&adev->dev, "GPIO: looking up %d in _CRS\n", index);
++              lookup.info.adev = adev;
++      }
++
++      ret = acpi_gpio_resource_lookup(&lookup, info);
++      return ret ? ERR_PTR(ret) : lookup.desc;
++}
++
++/**
++ * acpi_get_gpiod_from_data() - get a GPIO descriptor from ACPI data node
++ * @fwnode: pointer to an ACPI firmware node to get the GPIO information from
++ * @propname: Property name of the GPIO
++ * @index: index of GpioIo/GpioInt resource (starting from %0)
++ * @info: info pointer to fill in (optional)
++ *
++ * This function uses the property-based GPIO lookup to get to the GPIO
++ * resource with the relevant information from a data-only ACPI firmware node
++ * and uses that to obtain the GPIO descriptor to return.
++ *
++ * Returns:
++ * GPIO descriptor to use with Linux generic GPIO API.
++ * If the GPIO cannot be translated or there is an error an ERR_PTR is
++ * returned.
++ */
++static struct gpio_desc *acpi_get_gpiod_from_data(struct fwnode_handle *fwnode,
++                                                const char *propname,
++                                                int index,
++                                                struct acpi_gpio_info *info)
++{
++      struct acpi_gpio_lookup lookup;
++      int ret;
++
++      if (!is_acpi_data_node(fwnode))
++              return ERR_PTR(-ENODEV);
++
++      if (!propname)
++              return ERR_PTR(-EINVAL);
++
++      memset(&lookup, 0, sizeof(lookup));
++      lookup.index = index;
++
++      ret = acpi_gpio_property_lookup(fwnode, propname, index, &lookup);
++      if (ret)
++              return ERR_PTR(ret);
++
++      ret = acpi_gpio_resource_lookup(&lookup, info);
++      return ret ? ERR_PTR(ret) : lookup.desc;
++}
++
++static bool acpi_can_fallback_to_crs(struct acpi_device *adev,
++                                   const char *con_id)
++{
++      /* If there is no ACPI device, there is no _CRS to fall back to */
++      if (!adev)
++              return false;
++
++      /* Never allow fallback if the device has properties */
++      if (acpi_dev_has_props(adev) || adev->driver_gpios)
++              return false;
++
++      return con_id == NULL;
++}
++
++static struct gpio_desc *
++__acpi_find_gpio(struct fwnode_handle *fwnode, const char *con_id, unsigned int idx,
++               bool can_fallback, struct acpi_gpio_info *info)
++{
++      struct acpi_device *adev = to_acpi_device_node(fwnode);
++      struct gpio_desc *desc;
++      char propname[32];
++
++      /* Try first from _DSD */
++      for_each_gpio_property_name(propname, con_id) {
++              if (adev)
++                      desc = acpi_get_gpiod_by_index(adev,
++                                                     propname, idx, info);
++              else
++                      desc = acpi_get_gpiod_from_data(fwnode,
++                                                      propname, idx, info);
++              if (PTR_ERR(desc) == -EPROBE_DEFER)
++                      return ERR_CAST(desc);
++
++              if (!IS_ERR(desc))
++                      return desc;
++      }
++
++      /* Then from plain _CRS GPIOs */
++      if (can_fallback)
++              return acpi_get_gpiod_by_index(adev, NULL, idx, info);
++
++      return ERR_PTR(-ENOENT);
++}
++
++struct gpio_desc *acpi_find_gpio(struct fwnode_handle *fwnode,
++                               const char *con_id,
++                               unsigned int idx,
++                               enum gpiod_flags *dflags,
++                               unsigned long *lookupflags)
++{
++      struct acpi_device *adev = to_acpi_device_node(fwnode);
++      bool can_fallback = acpi_can_fallback_to_crs(adev, con_id);
++      struct acpi_gpio_info info;
++      struct gpio_desc *desc;
++
++      desc = __acpi_find_gpio(fwnode, con_id, idx, can_fallback, &info);
++      if (IS_ERR(desc))
++              return desc;
++
++      if (info.gpioint &&
++          (*dflags == GPIOD_OUT_LOW || *dflags == GPIOD_OUT_HIGH)) {
++              dev_dbg(&adev->dev, "refusing GpioInt() entry when doing GPIOD_OUT_* lookup\n");
++              return ERR_PTR(-ENOENT);
++      }
++
++      acpi_gpio_update_gpiod_flags(dflags, &info);
++      acpi_gpio_update_gpiod_lookup_flags(lookupflags, &info);
++      return desc;
++}
++
++/**
++ * acpi_dev_gpio_irq_wake_get_by() - Find GpioInt and translate it to Linux IRQ number
++ * @adev: pointer to a ACPI device to get IRQ from
++ * @con_id: optional name of GpioInt resource
++ * @index: index of GpioInt resource (starting from %0)
++ * @wake_capable: Set to true if the IRQ is wake capable
++ *
++ * If the device has one or more GpioInt resources, this function can be
++ * used to translate from the GPIO offset in the resource to the Linux IRQ
++ * number.
++ *
++ * The function is idempotent, though each time it runs it will configure GPIO
++ * pin direction according to the flags in GpioInt resource.
++ *
++ * The function takes optional @con_id parameter. If the resource has
++ * a @con_id in a property, then only those will be taken into account.
++ *
++ * The GPIO is considered wake capable if the GpioInt resource specifies
++ * SharedAndWake or ExclusiveAndWake.
++ *
++ * Returns:
++ * Linux IRQ number (> 0) on success, negative errno on failure.
++ */
++int acpi_dev_gpio_irq_wake_get_by(struct acpi_device *adev, const char *con_id, int index,
++                                bool *wake_capable)
++{
++      struct fwnode_handle *fwnode = acpi_fwnode_handle(adev);
++      int idx, i;
++      unsigned int irq_flags;
++      int ret;
++
++      for (i = 0, idx = 0; idx <= index; i++) {
++              struct acpi_gpio_info info;
++              struct gpio_desc *desc;
++
++              /* Ignore -EPROBE_DEFER, it only matters if idx matches */
++              desc = __acpi_find_gpio(fwnode, con_id, i, true, &info);
++              if (IS_ERR(desc) && PTR_ERR(desc) != -EPROBE_DEFER)
++                      return PTR_ERR(desc);
++
++              if (info.gpioint && idx++ == index) {
++                      unsigned long lflags = GPIO_LOOKUP_FLAGS_DEFAULT;
++                      enum gpiod_flags dflags = GPIOD_ASIS;
++                      char label[32];
++                      int irq;
++
++                      if (IS_ERR(desc))
++                              return PTR_ERR(desc);
++
++                      irq = gpiod_to_irq(desc);
++                      if (irq < 0)
++                              return irq;
++
++                      acpi_gpio_update_gpiod_flags(&dflags, &info);
++                      acpi_gpio_update_gpiod_lookup_flags(&lflags, &info);
++
++                      snprintf(label, sizeof(label), "%pfwP GpioInt(%d)", fwnode, index);
++                      ret = gpiod_set_consumer_name(desc, con_id ?: label);
++                      if (ret)
++                              return ret;
++
++                      ret = gpiod_configure_flags(desc, label, lflags, dflags);
++                      if (ret < 0)
++                              return ret;
++
++                      /* ACPI uses hundredths of milliseconds units */
++                      ret = gpio_set_debounce_timeout(desc, info.debounce * 10);
++                      if (ret)
++                              return ret;
++
++                      irq_flags = acpi_dev_get_irq_type(info.triggering,
++                                                        info.polarity);
++
++                      /*
++                       * If the IRQ is not already in use then set type
++                       * if specified and different than the current one.
++                       */
++                      if (can_request_irq(irq, irq_flags)) {
++                              if (irq_flags != IRQ_TYPE_NONE &&
++                                  irq_flags != irq_get_trigger_type(irq))
++                                      irq_set_irq_type(irq, irq_flags);
++                      } else {
++                              dev_dbg(&adev->dev, "IRQ %d already in use\n", irq);
++                      }
++
++                      /* avoid suspend issues with GPIOs when systems are using S3 */
++                      if (wake_capable && acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0)
++                              *wake_capable = info.wake_capable;
++
++                      return irq;
++              }
++
++      }
++      return -ENOENT;
++}
++EXPORT_SYMBOL_GPL(acpi_dev_gpio_irq_wake_get_by);
++
++static acpi_status
++acpi_gpio_adr_space_handler(u32 function, acpi_physical_address address,
++                          u32 bits, u64 *value, void *handler_context,
++                          void *region_context)
++{
++      struct acpi_gpio_chip *achip = region_context;
++      struct gpio_chip *chip = achip->chip;
++      struct acpi_resource_gpio *agpio;
++      struct acpi_resource *ares;
++      u16 pin_index = address;
++      acpi_status status;
++      int length;
++      int i;
++
++      status = acpi_buffer_to_resource(achip->conn_info.connection,
++                                       achip->conn_info.length, &ares);
++      if (ACPI_FAILURE(status))
++              return status;
++
++      if (WARN_ON(ares->type != ACPI_RESOURCE_TYPE_GPIO)) {
++              ACPI_FREE(ares);
++              return AE_BAD_PARAMETER;
++      }
++
++      agpio = &ares->data.gpio;
++
++      if (WARN_ON(agpio->io_restriction == ACPI_IO_RESTRICT_INPUT &&
++          function == ACPI_WRITE)) {
++              ACPI_FREE(ares);
++              return AE_BAD_PARAMETER;
++      }
++
++      length = min_t(u16, agpio->pin_table_length, pin_index + bits);
++      for (i = pin_index; i < length; ++i) {
++              unsigned int pin = agpio->pin_table[i];
++              struct acpi_gpio_connection *conn;
++              struct gpio_desc *desc;
++              bool found;
++
++              mutex_lock(&achip->conn_lock);
++
++              found = false;
++              list_for_each_entry(conn, &achip->conns, node) {
++                      if (conn->pin == pin) {
++                              found = true;
++                              desc = conn->desc;
++                              break;
++                      }
++              }
++
++              /*
++               * The same GPIO can be shared between operation region and
++               * event but only if the access here is ACPI_READ. In that
++               * case we "borrow" the event GPIO instead.
++               */
++              if (!found && agpio->shareable == ACPI_SHARED &&
++                   function == ACPI_READ) {
++                      struct acpi_gpio_event *event;
++
++                      list_for_each_entry(event, &achip->events, node) {
++                              if (event->pin == pin) {
++                                      desc = event->desc;
++                                      found = true;
++                                      break;
++                              }
++                      }
++              }
++
++              if (!found) {
++                      desc = acpi_request_own_gpiod(chip, agpio, i, "ACPI:OpRegion");
++                      if (IS_ERR(desc)) {
++                              mutex_unlock(&achip->conn_lock);
++                              status = AE_ERROR;
++                              goto out;
++                      }
++
++                      conn = kzalloc(sizeof(*conn), GFP_KERNEL);
++                      if (!conn) {
++                              gpiochip_free_own_desc(desc);
++                              mutex_unlock(&achip->conn_lock);
++                              status = AE_NO_MEMORY;
++                              goto out;
++                      }
++
++                      conn->pin = pin;
++                      conn->desc = desc;
++                      list_add_tail(&conn->node, &achip->conns);
++              }
++
++              mutex_unlock(&achip->conn_lock);
++
++              if (function == ACPI_WRITE)
++                      gpiod_set_raw_value_cansleep(desc, !!(*value & BIT(i)));
++              else
++                      *value |= (u64)gpiod_get_raw_value_cansleep(desc) << i;
++      }
++
++out:
++      ACPI_FREE(ares);
++      return status;
++}
++
++static void acpi_gpiochip_request_regions(struct acpi_gpio_chip *achip)
++{
++      struct gpio_chip *chip = achip->chip;
++      acpi_handle handle = ACPI_HANDLE(chip->parent);
++      acpi_status status;
++
++      INIT_LIST_HEAD(&achip->conns);
++      mutex_init(&achip->conn_lock);
++      status = acpi_install_address_space_handler(handle, ACPI_ADR_SPACE_GPIO,
++                                                  acpi_gpio_adr_space_handler,
++                                                  NULL, achip);
++      if (ACPI_FAILURE(status))
++              dev_err(chip->parent,
++                      "Failed to install GPIO OpRegion handler\n");
++}
++
++static void acpi_gpiochip_free_regions(struct acpi_gpio_chip *achip)
++{
++      struct gpio_chip *chip = achip->chip;
++      acpi_handle handle = ACPI_HANDLE(chip->parent);
++      struct acpi_gpio_connection *conn, *tmp;
++      acpi_status status;
++
++      status = acpi_remove_address_space_handler(handle, ACPI_ADR_SPACE_GPIO,
++                                                 acpi_gpio_adr_space_handler);
++      if (ACPI_FAILURE(status)) {
++              dev_err(chip->parent,
++                      "Failed to remove GPIO OpRegion handler\n");
++              return;
++      }
++
++      list_for_each_entry_safe_reverse(conn, tmp, &achip->conns, node) {
++              gpiochip_free_own_desc(conn->desc);
++              list_del(&conn->node);
++              kfree(conn);
++      }
++}
++
++static struct gpio_desc *
++acpi_gpiochip_parse_own_gpio(struct acpi_gpio_chip *achip,
++                           struct fwnode_handle *fwnode,
++                           const char **name,
++                           unsigned long *lflags,
++                           enum gpiod_flags *dflags)
++{
++      struct gpio_chip *chip = achip->chip;
++      struct gpio_desc *desc;
++      u32 gpios[2];
++      int ret;
++
++      *lflags = GPIO_LOOKUP_FLAGS_DEFAULT;
++      *dflags = GPIOD_ASIS;
++      *name = NULL;
++
++      ret = fwnode_property_read_u32_array(fwnode, "gpios", gpios,
++                                           ARRAY_SIZE(gpios));
++      if (ret < 0)
++              return ERR_PTR(ret);
++
++      desc = gpiochip_get_desc(chip, gpios[0]);
++      if (IS_ERR(desc))
++              return desc;
++
++      if (gpios[1])
++              *lflags |= GPIO_ACTIVE_LOW;
++
++      if (fwnode_property_present(fwnode, "input"))
++              *dflags |= GPIOD_IN;
++      else if (fwnode_property_present(fwnode, "output-low"))
++              *dflags |= GPIOD_OUT_LOW;
++      else if (fwnode_property_present(fwnode, "output-high"))
++              *dflags |= GPIOD_OUT_HIGH;
++      else
++              return ERR_PTR(-EINVAL);
++
++      fwnode_property_read_string(fwnode, "line-name", name);
++
++      return desc;
++}
++
++static void acpi_gpiochip_scan_gpios(struct acpi_gpio_chip *achip)
++{
++      struct gpio_chip *chip = achip->chip;
++      struct fwnode_handle *fwnode;
++
++      device_for_each_child_node(chip->parent, fwnode) {
++              unsigned long lflags;
++              enum gpiod_flags dflags;
++              struct gpio_desc *desc;
++              const char *name;
++              int ret;
++
++              if (!fwnode_property_present(fwnode, "gpio-hog"))
++                      continue;
++
++              desc = acpi_gpiochip_parse_own_gpio(achip, fwnode, &name,
++                                                  &lflags, &dflags);
++              if (IS_ERR(desc))
++                      continue;
++
++              ret = gpiod_hog(desc, name, lflags, dflags);
++              if (ret) {
++                      dev_err(chip->parent, "Failed to hog GPIO\n");
++                      fwnode_handle_put(fwnode);
++                      return;
++              }
++      }
++}
++
++void acpi_gpiochip_add(struct gpio_chip *chip)
++{
++      struct acpi_gpio_chip *acpi_gpio;
++      struct acpi_device *adev;
++      acpi_status status;
++
++      if (!chip || !chip->parent)
++              return;
++
++      adev = ACPI_COMPANION(chip->parent);
++      if (!adev)
++              return;
++
++      acpi_gpio = kzalloc(sizeof(*acpi_gpio), GFP_KERNEL);
++      if (!acpi_gpio) {
++              dev_err(chip->parent,
++                      "Failed to allocate memory for ACPI GPIO chip\n");
++              return;
++      }
++
++      acpi_gpio->chip = chip;
++      INIT_LIST_HEAD(&acpi_gpio->events);
++      INIT_LIST_HEAD(&acpi_gpio->deferred_req_irqs_list_entry);
++
++      status = acpi_attach_data(adev->handle, acpi_gpio_chip_dh, acpi_gpio);
++      if (ACPI_FAILURE(status)) {
++              dev_err(chip->parent, "Failed to attach ACPI GPIO chip\n");
++              kfree(acpi_gpio);
++              return;
++      }
++
++      acpi_gpiochip_request_regions(acpi_gpio);
++      acpi_gpiochip_scan_gpios(acpi_gpio);
++      acpi_dev_clear_dependencies(adev);
++}
++
++void acpi_gpiochip_remove(struct gpio_chip *chip)
++{
++      struct acpi_gpio_chip *acpi_gpio;
++      acpi_handle handle;
++      acpi_status status;
++
++      if (!chip || !chip->parent)
++              return;
++
++      handle = ACPI_HANDLE(chip->parent);
++      if (!handle)
++              return;
++
++      status = acpi_get_data(handle, acpi_gpio_chip_dh, (void **)&acpi_gpio);
++      if (ACPI_FAILURE(status)) {
++              dev_warn(chip->parent, "Failed to retrieve ACPI GPIO chip\n");
++              return;
++      }
++
++      acpi_gpiochip_free_regions(acpi_gpio);
++
++      acpi_detach_data(handle, acpi_gpio_chip_dh);
++      kfree(acpi_gpio);
++}
++
++static int acpi_gpio_package_count(const union acpi_object *obj)
++{
++      const union acpi_object *element = obj->package.elements;
++      const union acpi_object *end = element + obj->package.count;
++      unsigned int count = 0;
++
++      while (element < end) {
++              switch (element->type) {
++              case ACPI_TYPE_LOCAL_REFERENCE:
++                      element += 3;
++                      fallthrough;
++              case ACPI_TYPE_INTEGER:
++                      element++;
++                      count++;
++                      break;
++
++              default:
++                      return -EPROTO;
++              }
++      }
++
++      return count;
++}
++
++static int acpi_find_gpio_count(struct acpi_resource *ares, void *data)
++{
++      unsigned int *count = data;
++
++      if (ares->type == ACPI_RESOURCE_TYPE_GPIO)
++              *count += ares->data.gpio.pin_table_length;
++
++      return 1;
++}
++
++/**
++ * acpi_gpio_count - count the GPIOs associated with a firmware node / function
++ * @fwnode:   firmware node of the GPIO consumer
++ * @con_id:   function within the GPIO consumer
++ *
++ * Returns:
++ * The number of GPIOs associated with a firmware node / function or %-ENOENT,
++ * if no GPIO has been assigned to the requested function.
++ */
++int acpi_gpio_count(const struct fwnode_handle *fwnode, const char *con_id)
++{
++      struct acpi_device *adev = to_acpi_device_node(fwnode);
++      const union acpi_object *obj;
++      const struct acpi_gpio_mapping *gm;
++      int count = -ENOENT;
++      int ret;
++      char propname[32];
++
++      /* Try first from _DSD */
++      for_each_gpio_property_name(propname, con_id) {
++              ret = acpi_dev_get_property(adev, propname, ACPI_TYPE_ANY, &obj);
++              if (ret == 0) {
++                      if (obj->type == ACPI_TYPE_LOCAL_REFERENCE)
++                              count = 1;
++                      else if (obj->type == ACPI_TYPE_PACKAGE)
++                              count = acpi_gpio_package_count(obj);
++              } else if (adev->driver_gpios) {
++                      for (gm = adev->driver_gpios; gm->name; gm++)
++                              if (strcmp(propname, gm->name) == 0) {
++                                      count = gm->size;
++                                      break;
++                              }
++              }
++              if (count > 0)
++                      break;
++      }
++
++      /* Then from plain _CRS GPIOs */
++      if (count < 0) {
++              struct list_head resource_list;
++              unsigned int crs_count = 0;
++
++              if (!acpi_can_fallback_to_crs(adev, con_id))
++                      return count;
++
++              INIT_LIST_HEAD(&resource_list);
++              acpi_dev_get_resources(adev, &resource_list,
++                                     acpi_find_gpio_count, &crs_count);
++              acpi_dev_free_resource_list(&resource_list);
++              if (crs_count > 0)
++                      count = crs_count;
++      }
++      return count ? count : -ENOENT;
++}
+--- /dev/null
++++ b/drivers/gpio/gpiolib-acpi-quirks.c
+@@ -0,0 +1,363 @@
++// SPDX-License-Identifier: GPL-2.0
++/*
++ * ACPI quirks for GPIO ACPI helpers
++ *
++ * Author: Hans de Goede <hdegoede@redhat.com>
++ */
++
++#include <linux/dmi.h>
++#include <linux/kstrtox.h>
++#include <linux/list.h>
++#include <linux/module.h>
++#include <linux/mutex.h>
++#include <linux/printk.h>
++#include <linux/string.h>
++#include <linux/types.h>
++
++#include "gpiolib-acpi.h"
++
++static int run_edge_events_on_boot = -1;
++module_param(run_edge_events_on_boot, int, 0444);
++MODULE_PARM_DESC(run_edge_events_on_boot,
++               "Run edge _AEI event-handlers at boot: 0=no, 1=yes, -1=auto");
++
++static char *ignore_wake;
++module_param(ignore_wake, charp, 0444);
++MODULE_PARM_DESC(ignore_wake,
++               "controller@pin combos on which to ignore the ACPI wake flag "
++               "ignore_wake=controller@pin[,controller@pin[,...]]");
++
++static char *ignore_interrupt;
++module_param(ignore_interrupt, charp, 0444);
++MODULE_PARM_DESC(ignore_interrupt,
++               "controller@pin combos on which to ignore interrupt "
++               "ignore_interrupt=controller@pin[,controller@pin[,...]]");
++
++/*
++ * For GPIO chips which call acpi_gpiochip_request_interrupts() before late_init
++ * (so builtin drivers) we register the ACPI GpioInt IRQ handlers from a
++ * late_initcall_sync() handler, so that other builtin drivers can register their
++ * OpRegions before the event handlers can run. This list contains GPIO chips
++ * for which the acpi_gpiochip_request_irqs() call has been deferred.
++ */
++static DEFINE_MUTEX(acpi_gpio_deferred_req_irqs_lock);
++static LIST_HEAD(acpi_gpio_deferred_req_irqs_list);
++static bool acpi_gpio_deferred_req_irqs_done;
++
++bool acpi_gpio_add_to_deferred_list(struct list_head *list)
++{
++      bool defer;
++
++      mutex_lock(&acpi_gpio_deferred_req_irqs_lock);
++      defer = !acpi_gpio_deferred_req_irqs_done;
++      if (defer)
++              list_add(list, &acpi_gpio_deferred_req_irqs_list);
++      mutex_unlock(&acpi_gpio_deferred_req_irqs_lock);
++
++      return defer;
++}
++
++void acpi_gpio_remove_from_deferred_list(struct list_head *list)
++{
++      mutex_lock(&acpi_gpio_deferred_req_irqs_lock);
++      if (!list_empty(list))
++              list_del_init(list);
++      mutex_unlock(&acpi_gpio_deferred_req_irqs_lock);
++}
++
++int acpi_gpio_need_run_edge_events_on_boot(void)
++{
++      return run_edge_events_on_boot;
++}
++
++bool acpi_gpio_in_ignore_list(enum acpi_gpio_ignore_list list,
++                            const char *controller_in, unsigned int pin_in)
++{
++      const char *ignore_list, *controller, *pin_str;
++      unsigned int pin;
++      char *endp;
++      int len;
++
++      switch (list) {
++      case ACPI_GPIO_IGNORE_WAKE:
++              ignore_list = ignore_wake;
++              break;
++      case ACPI_GPIO_IGNORE_INTERRUPT:
++              ignore_list = ignore_interrupt;
++              break;
++      default:
++              return false;
++      }
++
++      controller = ignore_list;
++      while (controller) {
++              pin_str = strchr(controller, '@');
++              if (!pin_str)
++                      goto err;
++
++              len = pin_str - controller;
++              if (len == strlen(controller_in) &&
++                  strncmp(controller, controller_in, len) == 0) {
++                      pin = simple_strtoul(pin_str + 1, &endp, 10);
++                      if (*endp != 0 && *endp != ',')
++                              goto err;
++
++                      if (pin == pin_in)
++                              return true;
++              }
++
++              controller = strchr(controller, ',');
++              if (controller)
++                      controller++;
++      }
++
++      return false;
++err:
++      pr_err_once("Error: Invalid value for gpiolib_acpi.ignore_...: %s\n", ignore_list);
++      return false;
++}
++
++/* Run deferred acpi_gpiochip_request_irqs() */
++static int __init acpi_gpio_handle_deferred_request_irqs(void)
++{
++      mutex_lock(&acpi_gpio_deferred_req_irqs_lock);
++      acpi_gpio_process_deferred_list(&acpi_gpio_deferred_req_irqs_list);
++      acpi_gpio_deferred_req_irqs_done = true;
++      mutex_unlock(&acpi_gpio_deferred_req_irqs_lock);
++
++      return 0;
++}
++/* We must use _sync so that this runs after the first deferred_probe run */
++late_initcall_sync(acpi_gpio_handle_deferred_request_irqs);
++
++struct acpi_gpiolib_dmi_quirk {
++      bool no_edge_events_on_boot;
++      char *ignore_wake;
++      char *ignore_interrupt;
++};
++
++static const struct dmi_system_id gpiolib_acpi_quirks[] __initconst = {
++      {
++              /*
++               * The Minix Neo Z83-4 has a micro-USB-B id-pin handler for
++               * a non existing micro-USB-B connector which puts the HDMI
++               * DDC pins in GPIO mode, breaking HDMI support.
++               */
++              .matches = {
++                      DMI_MATCH(DMI_SYS_VENDOR, "MINIX"),
++                      DMI_MATCH(DMI_PRODUCT_NAME, "Z83-4"),
++              },
++              .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
++                      .no_edge_events_on_boot = true,
++              },
++      },
++      {
++              /*
++               * The Terra Pad 1061 has a micro-USB-B id-pin handler, which
++               * instead of controlling the actual micro-USB-B turns the 5V
++               * boost for its USB-A connector off. The actual micro-USB-B
++               * connector is wired for charging only.
++               */
++              .matches = {
++                      DMI_MATCH(DMI_SYS_VENDOR, "Wortmann_AG"),
++                      DMI_MATCH(DMI_PRODUCT_NAME, "TERRA_PAD_1061"),
++              },
++              .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
++                      .no_edge_events_on_boot = true,
++              },
++      },
++      {
++              /*
++               * The Dell Venue 10 Pro 5055, with Bay Trail SoC + TI PMIC uses an
++               * external embedded-controller connected via I2C + an ACPI GPIO
++               * event handler on INT33FFC:02 pin 12, causing spurious wakeups.
++               */
++              .matches = {
++                      DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
++                      DMI_MATCH(DMI_PRODUCT_NAME, "Venue 10 Pro 5055"),
++              },
++              .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
++                      .ignore_wake = "INT33FC:02@12",
++              },
++      },
++      {
++              /*
++               * HP X2 10 models with Cherry Trail SoC + TI PMIC use an
++               * external embedded-controller connected via I2C + an ACPI GPIO
++               * event handler on INT33FF:01 pin 0, causing spurious wakeups.
++               * When suspending by closing the LID, the power to the USB
++               * keyboard is turned off, causing INT0002 ACPI events to
++               * trigger once the XHCI controller notices the keyboard is
++               * gone. So INT0002 events cause spurious wakeups too. Ignoring
++               * EC wakes breaks wakeup when opening the lid, the user needs
++               * to press the power-button to wakeup the system. The
++               * alternative is suspend simply not working, which is worse.
++               */
++              .matches = {
++                      DMI_MATCH(DMI_SYS_VENDOR, "HP"),
++                      DMI_MATCH(DMI_PRODUCT_NAME, "HP x2 Detachable 10-p0XX"),
++              },
++              .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
++                      .ignore_wake = "INT33FF:01@0,INT0002:00@2",
++              },
++      },
++      {
++              /*
++               * HP X2 10 models with Bay Trail SoC + AXP288 PMIC use an
++               * external embedded-controller connected via I2C + an ACPI GPIO
++               * event handler on INT33FC:02 pin 28, causing spurious wakeups.
++               */
++              .matches = {
++                      DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
++                      DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion x2 Detachable"),
++                      DMI_MATCH(DMI_BOARD_NAME, "815D"),
++              },
++              .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
++                      .ignore_wake = "INT33FC:02@28",
++              },
++      },
++      {
++              /*
++               * HP X2 10 models with Cherry Trail SoC + AXP288 PMIC use an
++               * external embedded-controller connected via I2C + an ACPI GPIO
++               * event handler on INT33FF:01 pin 0, causing spurious wakeups.
++               */
++              .matches = {
++                      DMI_MATCH(DMI_SYS_VENDOR, "HP"),
++                      DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion x2 Detachable"),
++                      DMI_MATCH(DMI_BOARD_NAME, "813E"),
++              },
++              .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
++                      .ignore_wake = "INT33FF:01@0",
++              },
++      },
++      {
++              /*
++               * Interrupt storm caused from edge triggered floating pin
++               * Found in BIOS UX325UAZ.300
++               * https://bugzilla.kernel.org/show_bug.cgi?id=216208
++               */
++              .matches = {
++                      DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
++                      DMI_MATCH(DMI_PRODUCT_NAME, "ZenBook UX325UAZ_UM325UAZ"),
++              },
++              .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
++                      .ignore_interrupt = "AMDI0030:00@18",
++              },
++      },
++      {
++              /*
++               * Spurious wakeups from TP_ATTN# pin
++               * Found in BIOS 1.7.8
++               * https://gitlab.freedesktop.org/drm/amd/-/issues/1722#note_1720627
++               */
++              .matches = {
++                      DMI_MATCH(DMI_BOARD_NAME, "NL5xNU"),
++              },
++              .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
++                      .ignore_wake = "ELAN0415:00@9",
++              },
++      },
++      {
++              /*
++               * Spurious wakeups from TP_ATTN# pin
++               * Found in BIOS 1.7.8
++               * https://gitlab.freedesktop.org/drm/amd/-/issues/1722#note_1720627
++               */
++              .matches = {
++                      DMI_MATCH(DMI_BOARD_NAME, "NL5xRU"),
++              },
++              .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
++                      .ignore_wake = "ELAN0415:00@9",
++              },
++      },
++      {
++              /*
++               * Spurious wakeups from TP_ATTN# pin
++               * Found in BIOS 1.7.7
++               */
++              .matches = {
++                      DMI_MATCH(DMI_BOARD_NAME, "NH5xAx"),
++              },
++              .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
++                      .ignore_wake = "SYNA1202:00@16",
++              },
++      },
++      {
++              /*
++               * On the Peaq C1010 2-in-1 INT33FC:00 pin 3 is connected to
++               * a "dolby" button. At the ACPI level an _AEI event-handler
++               * is connected which sets an ACPI variable to 1 on both
++               * edges. This variable can be polled + cleared to 0 using
++               * WMI. But since the variable is set on both edges the WMI
++               * interface is pretty useless even when polling.
++               * So instead the x86-android-tablets code instantiates
++               * a gpio-keys platform device for it.
++               * Ignore the _AEI handler for the pin, so that it is not busy.
++               */
++              .matches = {
++                      DMI_MATCH(DMI_SYS_VENDOR, "PEAQ"),
++                      DMI_MATCH(DMI_PRODUCT_NAME, "PEAQ PMM C1010 MD99187"),
++              },
++              .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
++                      .ignore_interrupt = "INT33FC:00@3",
++              },
++      },
++      {
++              /*
++               * Spurious wakeups from TP_ATTN# pin
++               * Found in BIOS 0.35
++               * https://gitlab.freedesktop.org/drm/amd/-/issues/3073
++               */
++              .matches = {
++                      DMI_MATCH(DMI_SYS_VENDOR, "GPD"),
++                      DMI_MATCH(DMI_PRODUCT_NAME, "G1619-04"),
++              },
++              .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
++                      .ignore_wake = "PNP0C50:00@8",
++              },
++      },
++      {
++              /*
++               * Spurious wakeups from GPIO 11
++               * Found in BIOS 1.04
++               * https://gitlab.freedesktop.org/drm/amd/-/issues/3954
++               */
++              .matches = {
++                      DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
++                      DMI_MATCH(DMI_PRODUCT_FAMILY, "Acer Nitro V 14"),
++              },
++              .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
++                      .ignore_interrupt = "AMDI0030:00@11",
++              },
++      },
++      {} /* Terminating entry */
++};
++
++static int __init acpi_gpio_setup_params(void)
++{
++      const struct acpi_gpiolib_dmi_quirk *quirk = NULL;
++      const struct dmi_system_id *id;
++
++      id = dmi_first_match(gpiolib_acpi_quirks);
++      if (id)
++              quirk = id->driver_data;
++
++      if (run_edge_events_on_boot < 0) {
++              if (quirk && quirk->no_edge_events_on_boot)
++                      run_edge_events_on_boot = 0;
++              else
++                      run_edge_events_on_boot = 1;
++      }
++
++      if (ignore_wake == NULL && quirk && quirk->ignore_wake)
++              ignore_wake = quirk->ignore_wake;
++
++      if (ignore_interrupt == NULL && quirk && quirk->ignore_interrupt)
++              ignore_interrupt = quirk->ignore_interrupt;
++
++      return 0;
++}
++
++/* Directly after dmi_setup() which runs as core_initcall() */
++postcore_initcall(acpi_gpio_setup_params);
+--- a/drivers/gpio/gpiolib-acpi.c
++++ /dev/null
+@@ -1,1765 +0,0 @@
+-// SPDX-License-Identifier: GPL-2.0
+-/*
+- * ACPI helpers for GPIO API
+- *
+- * Copyright (C) 2012, Intel Corporation
+- * Authors: Mathias Nyman <mathias.nyman@linux.intel.com>
+- *          Mika Westerberg <mika.westerberg@linux.intel.com>
+- */
+-
+-#include <linux/acpi.h>
+-#include <linux/dmi.h>
+-#include <linux/errno.h>
+-#include <linux/export.h>
+-#include <linux/interrupt.h>
+-#include <linux/irq.h>
+-#include <linux/mutex.h>
+-#include <linux/pinctrl/pinctrl.h>
+-
+-#include <linux/gpio/consumer.h>
+-#include <linux/gpio/driver.h>
+-#include <linux/gpio/machine.h>
+-
+-#include "gpiolib.h"
+-#include "gpiolib-acpi.h"
+-
+-static int run_edge_events_on_boot = -1;
+-module_param(run_edge_events_on_boot, int, 0444);
+-MODULE_PARM_DESC(run_edge_events_on_boot,
+-               "Run edge _AEI event-handlers at boot: 0=no, 1=yes, -1=auto");
+-
+-static char *ignore_wake;
+-module_param(ignore_wake, charp, 0444);
+-MODULE_PARM_DESC(ignore_wake,
+-               "controller@pin combos on which to ignore the ACPI wake flag "
+-               "ignore_wake=controller@pin[,controller@pin[,...]]");
+-
+-static char *ignore_interrupt;
+-module_param(ignore_interrupt, charp, 0444);
+-MODULE_PARM_DESC(ignore_interrupt,
+-               "controller@pin combos on which to ignore interrupt "
+-               "ignore_interrupt=controller@pin[,controller@pin[,...]]");
+-
+-struct acpi_gpiolib_dmi_quirk {
+-      bool no_edge_events_on_boot;
+-      char *ignore_wake;
+-      char *ignore_interrupt;
+-};
+-
+-/**
+- * struct acpi_gpio_event - ACPI GPIO event handler data
+- *
+- * @node:       list-entry of the events list of the struct acpi_gpio_chip
+- * @handle:     handle of ACPI method to execute when the IRQ triggers
+- * @handler:    handler function to pass to request_irq() when requesting the IRQ
+- * @pin:        GPIO pin number on the struct gpio_chip
+- * @irq:        Linux IRQ number for the event, for request_irq() / free_irq()
+- * @irqflags:   flags to pass to request_irq() when requesting the IRQ
+- * @irq_is_wake:  If the ACPI flags indicate the IRQ is a wakeup source
+- * @irq_requested:True if request_irq() has been done
+- * @desc:       struct gpio_desc for the GPIO pin for this event
+- */
+-struct acpi_gpio_event {
+-      struct list_head node;
+-      acpi_handle handle;
+-      irq_handler_t handler;
+-      unsigned int pin;
+-      unsigned int irq;
+-      unsigned long irqflags;
+-      bool irq_is_wake;
+-      bool irq_requested;
+-      struct gpio_desc *desc;
+-};
+-
+-struct acpi_gpio_connection {
+-      struct list_head node;
+-      unsigned int pin;
+-      struct gpio_desc *desc;
+-};
+-
+-struct acpi_gpio_chip {
+-      /*
+-       * ACPICA requires that the first field of the context parameter
+-       * passed to acpi_install_address_space_handler() is large enough
+-       * to hold struct acpi_connection_info.
+-       */
+-      struct acpi_connection_info conn_info;
+-      struct list_head conns;
+-      struct mutex conn_lock;
+-      struct gpio_chip *chip;
+-      struct list_head events;
+-      struct list_head deferred_req_irqs_list_entry;
+-};
+-
+-/**
+- * struct acpi_gpio_info - ACPI GPIO specific information
+- * @adev: reference to ACPI device which consumes GPIO resource
+- * @flags: GPIO initialization flags
+- * @gpioint: if %true this GPIO is of type GpioInt otherwise type is GpioIo
+- * @pin_config: pin bias as provided by ACPI
+- * @polarity: interrupt polarity as provided by ACPI
+- * @triggering: triggering type as provided by ACPI
+- * @wake_capable: wake capability as provided by ACPI
+- * @debounce: debounce timeout as provided by ACPI
+- * @quirks: Linux specific quirks as provided by struct acpi_gpio_mapping
+- */
+-struct acpi_gpio_info {
+-      struct acpi_device *adev;
+-      enum gpiod_flags flags;
+-      bool gpioint;
+-      int pin_config;
+-      int polarity;
+-      int triggering;
+-      bool wake_capable;
+-      unsigned int debounce;
+-      unsigned int quirks;
+-};
+-
+-/*
+- * For GPIO chips which call acpi_gpiochip_request_interrupts() before late_init
+- * (so builtin drivers) we register the ACPI GpioInt IRQ handlers from a
+- * late_initcall_sync() handler, so that other builtin drivers can register their
+- * OpRegions before the event handlers can run. This list contains GPIO chips
+- * for which the acpi_gpiochip_request_irqs() call has been deferred.
+- */
+-static DEFINE_MUTEX(acpi_gpio_deferred_req_irqs_lock);
+-static LIST_HEAD(acpi_gpio_deferred_req_irqs_list);
+-static bool acpi_gpio_deferred_req_irqs_done;
+-
+-static int acpi_gpiochip_find(struct gpio_chip *gc, const void *data)
+-{
+-      /* First check the actual GPIO device */
+-      if (device_match_acpi_handle(&gc->gpiodev->dev, data))
+-              return true;
+-
+-      /*
+-       * When the ACPI device is artificially split to the banks of GPIOs,
+-       * where each of them is represented by a separate GPIO device,
+-       * the firmware node of the physical device may not be shared among
+-       * the banks as they may require different values for the same property,
+-       * e.g., number of GPIOs in a certain bank. In such case the ACPI handle
+-       * of a GPIO device is NULL and can not be used. Hence we have to check
+-       * the parent device to be sure that there is no match before bailing
+-       * out.
+-       */
+-      if (gc->parent)
+-              return device_match_acpi_handle(gc->parent, data);
+-
+-      return false;
+-}
+-
+-/**
+- * acpi_get_gpiod() - Translate ACPI GPIO pin to GPIO descriptor usable with GPIO API
+- * @path:     ACPI GPIO controller full path name, (e.g. "\\_SB.GPO1")
+- * @pin:      ACPI GPIO pin number (0-based, controller-relative)
+- *
+- * Returns:
+- * GPIO descriptor to use with Linux generic GPIO API.
+- * If the GPIO cannot be translated or there is an error an ERR_PTR is
+- * returned.
+- *
+- * Specifically returns %-EPROBE_DEFER if the referenced GPIO
+- * controller does not have GPIO chip registered at the moment. This is to
+- * support probe deferral.
+- */
+-static struct gpio_desc *acpi_get_gpiod(char *path, unsigned int pin)
+-{
+-      acpi_handle handle;
+-      acpi_status status;
+-
+-      status = acpi_get_handle(NULL, path, &handle);
+-      if (ACPI_FAILURE(status))
+-              return ERR_PTR(-ENODEV);
+-
+-      struct gpio_device *gdev __free(gpio_device_put) =
+-                              gpio_device_find(handle, acpi_gpiochip_find);
+-      if (!gdev)
+-              return ERR_PTR(-EPROBE_DEFER);
+-
+-      /*
+-       * FIXME: keep track of the reference to the GPIO device somehow
+-       * instead of putting it here.
+-       */
+-      return gpio_device_get_desc(gdev, pin);
+-}
+-
+-static irqreturn_t acpi_gpio_irq_handler(int irq, void *data)
+-{
+-      struct acpi_gpio_event *event = data;
+-
+-      acpi_evaluate_object(event->handle, NULL, NULL, NULL);
+-
+-      return IRQ_HANDLED;
+-}
+-
+-static irqreturn_t acpi_gpio_irq_handler_evt(int irq, void *data)
+-{
+-      struct acpi_gpio_event *event = data;
+-
+-      acpi_execute_simple_method(event->handle, NULL, event->pin);
+-
+-      return IRQ_HANDLED;
+-}
+-
+-static void acpi_gpio_chip_dh(acpi_handle handle, void *data)
+-{
+-      /* The address of this function is used as a key. */
+-}
+-
+-bool acpi_gpio_get_irq_resource(struct acpi_resource *ares,
+-                              struct acpi_resource_gpio **agpio)
+-{
+-      struct acpi_resource_gpio *gpio;
+-
+-      if (ares->type != ACPI_RESOURCE_TYPE_GPIO)
+-              return false;
+-
+-      gpio = &ares->data.gpio;
+-      if (gpio->connection_type != ACPI_RESOURCE_GPIO_TYPE_INT)
+-              return false;
+-
+-      *agpio = gpio;
+-      return true;
+-}
+-EXPORT_SYMBOL_GPL(acpi_gpio_get_irq_resource);
+-
+-/**
+- * acpi_gpio_get_io_resource - Fetch details of an ACPI resource if it is a GPIO
+- *                           I/O resource or return False if not.
+- * @ares:     Pointer to the ACPI resource to fetch
+- * @agpio:    Pointer to a &struct acpi_resource_gpio to store the output pointer
+- *
+- * Returns:
+- * %true if GpioIo resource is found, %false otherwise.
+- */
+-bool acpi_gpio_get_io_resource(struct acpi_resource *ares,
+-                             struct acpi_resource_gpio **agpio)
+-{
+-      struct acpi_resource_gpio *gpio;
+-
+-      if (ares->type != ACPI_RESOURCE_TYPE_GPIO)
+-              return false;
+-
+-      gpio = &ares->data.gpio;
+-      if (gpio->connection_type != ACPI_RESOURCE_GPIO_TYPE_IO)
+-              return false;
+-
+-      *agpio = gpio;
+-      return true;
+-}
+-EXPORT_SYMBOL_GPL(acpi_gpio_get_io_resource);
+-
+-static void acpi_gpiochip_request_irq(struct acpi_gpio_chip *acpi_gpio,
+-                                    struct acpi_gpio_event *event)
+-{
+-      struct device *parent = acpi_gpio->chip->parent;
+-      int ret, value;
+-
+-      ret = request_threaded_irq(event->irq, NULL, event->handler,
+-                                 event->irqflags | IRQF_ONESHOT, "ACPI:Event", event);
+-      if (ret) {
+-              dev_err(parent, "Failed to setup interrupt handler for %d\n", event->irq);
+-              return;
+-      }
+-
+-      if (event->irq_is_wake)
+-              enable_irq_wake(event->irq);
+-
+-      event->irq_requested = true;
+-
+-      /* Make sure we trigger the initial state of edge-triggered IRQs */
+-      if (acpi_gpio_need_run_edge_events_on_boot() &&
+-          (event->irqflags & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING))) {
+-              value = gpiod_get_raw_value_cansleep(event->desc);
+-              if (((event->irqflags & IRQF_TRIGGER_RISING) && value == 1) ||
+-                  ((event->irqflags & IRQF_TRIGGER_FALLING) && value == 0))
+-                      event->handler(event->irq, event);
+-      }
+-}
+-
+-static void acpi_gpiochip_request_irqs(struct acpi_gpio_chip *acpi_gpio)
+-{
+-      struct acpi_gpio_event *event;
+-
+-      list_for_each_entry(event, &acpi_gpio->events, node)
+-              acpi_gpiochip_request_irq(acpi_gpio, event);
+-}
+-
+-static enum gpiod_flags
+-acpi_gpio_to_gpiod_flags(const struct acpi_resource_gpio *agpio, int polarity)
+-{
+-      /* GpioInt() implies input configuration */
+-      if (agpio->connection_type == ACPI_RESOURCE_GPIO_TYPE_INT)
+-              return GPIOD_IN;
+-
+-      switch (agpio->io_restriction) {
+-      case ACPI_IO_RESTRICT_INPUT:
+-              return GPIOD_IN;
+-      case ACPI_IO_RESTRICT_OUTPUT:
+-              /*
+-               * ACPI GPIO resources don't contain an initial value for the
+-               * GPIO. Therefore we deduce that value from the pull field
+-               * and the polarity instead. If the pin is pulled up we assume
+-               * default to be high, if it is pulled down we assume default
+-               * to be low, otherwise we leave pin untouched. For active low
+-               * polarity values will be switched. See also
+-               * Documentation/firmware-guide/acpi/gpio-properties.rst.
+-               */
+-              switch (agpio->pin_config) {
+-              case ACPI_PIN_CONFIG_PULLUP:
+-                      return polarity == GPIO_ACTIVE_LOW ? GPIOD_OUT_LOW : GPIOD_OUT_HIGH;
+-              case ACPI_PIN_CONFIG_PULLDOWN:
+-                      return polarity == GPIO_ACTIVE_LOW ? GPIOD_OUT_HIGH : GPIOD_OUT_LOW;
+-              default:
+-                      break;
+-              }
+-              break;
+-      default:
+-              break;
+-      }
+-
+-      /*
+-       * Assume that the BIOS has configured the direction and pull
+-       * accordingly.
+-       */
+-      return GPIOD_ASIS;
+-}
+-
+-static struct gpio_desc *acpi_request_own_gpiod(struct gpio_chip *chip,
+-                                              struct acpi_resource_gpio *agpio,
+-                                              unsigned int index,
+-                                              const char *label)
+-{
+-      int polarity = GPIO_ACTIVE_HIGH;
+-      enum gpiod_flags flags = acpi_gpio_to_gpiod_flags(agpio, polarity);
+-      unsigned int pin = agpio->pin_table[index];
+-      struct gpio_desc *desc;
+-      int ret;
+-
+-      desc = gpiochip_request_own_desc(chip, pin, label, polarity, flags);
+-      if (IS_ERR(desc))
+-              return desc;
+-
+-      /* ACPI uses hundredths of milliseconds units */
+-      ret = gpio_set_debounce_timeout(desc, agpio->debounce_timeout * 10);
+-      if (ret)
+-              dev_warn(chip->parent,
+-                       "Failed to set debounce-timeout for pin 0x%04X, err %d\n",
+-                       pin, ret);
+-
+-      return desc;
+-}
+-
+-bool acpi_gpio_add_to_deferred_list(struct list_head *list)
+-{
+-      bool defer;
+-
+-      mutex_lock(&acpi_gpio_deferred_req_irqs_lock);
+-      defer = !acpi_gpio_deferred_req_irqs_done;
+-      if (defer)
+-              list_add(list, &acpi_gpio_deferred_req_irqs_list);
+-      mutex_unlock(&acpi_gpio_deferred_req_irqs_lock);
+-
+-      return defer;
+-}
+-
+-void acpi_gpio_remove_from_deferred_list(struct list_head *list)
+-{
+-      mutex_lock(&acpi_gpio_deferred_req_irqs_lock);
+-      if (!list_empty(list))
+-              list_del_init(list);
+-      mutex_unlock(&acpi_gpio_deferred_req_irqs_lock);
+-}
+-
+-int acpi_gpio_need_run_edge_events_on_boot(void)
+-{
+-      return run_edge_events_on_boot;
+-}
+-
+-bool acpi_gpio_in_ignore_list(enum acpi_gpio_ignore_list list, const char *controller_in,
+-                            unsigned int pin_in)
+-{
+-      const char *ignore_list, *controller, *pin_str;
+-      unsigned int pin;
+-      char *endp;
+-      int len;
+-
+-      switch (list) {
+-      case ACPI_GPIO_IGNORE_WAKE:
+-              ignore_list = ignore_wake;
+-              break;
+-      case ACPI_GPIO_IGNORE_INTERRUPT:
+-              ignore_list = ignore_interrupt;
+-              break;
+-      default:
+-              return false;
+-      }
+-
+-      controller = ignore_list;
+-      while (controller) {
+-              pin_str = strchr(controller, '@');
+-              if (!pin_str)
+-                      goto err;
+-
+-              len = pin_str - controller;
+-              if (len == strlen(controller_in) &&
+-                  strncmp(controller, controller_in, len) == 0) {
+-                      pin = simple_strtoul(pin_str + 1, &endp, 10);
+-                      if (*endp != 0 && *endp != ',')
+-                              goto err;
+-
+-                      if (pin == pin_in)
+-                              return true;
+-              }
+-
+-              controller = strchr(controller, ',');
+-              if (controller)
+-                      controller++;
+-      }
+-
+-      return false;
+-err:
+-      pr_err_once("Error: Invalid value for gpiolib_acpi.ignore_...: %s\n", ignore_list);
+-      return false;
+-}
+-
+-static bool acpi_gpio_irq_is_wake(struct device *parent,
+-                                const struct acpi_resource_gpio *agpio)
+-{
+-      unsigned int pin = agpio->pin_table[0];
+-
+-      if (agpio->wake_capable != ACPI_WAKE_CAPABLE)
+-              return false;
+-
+-      if (acpi_gpio_in_ignore_list(ACPI_GPIO_IGNORE_WAKE, dev_name(parent), pin)) {
+-              dev_info(parent, "Ignoring wakeup on pin %u\n", pin);
+-              return false;
+-      }
+-
+-      return true;
+-}
+-
+-/* Always returns AE_OK so that we keep looping over the resources */
+-static acpi_status acpi_gpiochip_alloc_event(struct acpi_resource *ares,
+-                                           void *context)
+-{
+-      struct acpi_gpio_chip *acpi_gpio = context;
+-      struct gpio_chip *chip = acpi_gpio->chip;
+-      struct acpi_resource_gpio *agpio;
+-      acpi_handle handle, evt_handle;
+-      struct acpi_gpio_event *event;
+-      irq_handler_t handler = NULL;
+-      struct gpio_desc *desc;
+-      unsigned int pin;
+-      int ret, irq;
+-
+-      if (!acpi_gpio_get_irq_resource(ares, &agpio))
+-              return AE_OK;
+-
+-      handle = ACPI_HANDLE(chip->parent);
+-      pin = agpio->pin_table[0];
+-
+-      if (pin <= 255) {
+-              char ev_name[8];
+-              sprintf(ev_name, "_%c%02X",
+-                      agpio->triggering == ACPI_EDGE_SENSITIVE ? 'E' : 'L',
+-                      pin);
+-              if (ACPI_SUCCESS(acpi_get_handle(handle, ev_name, &evt_handle)))
+-                      handler = acpi_gpio_irq_handler;
+-      }
+-      if (!handler) {
+-              if (ACPI_SUCCESS(acpi_get_handle(handle, "_EVT", &evt_handle)))
+-                      handler = acpi_gpio_irq_handler_evt;
+-      }
+-      if (!handler)
+-              return AE_OK;
+-
+-      if (acpi_gpio_in_ignore_list(ACPI_GPIO_IGNORE_INTERRUPT, dev_name(chip->parent), pin)) {
+-              dev_info(chip->parent, "Ignoring interrupt on pin %u\n", pin);
+-              return AE_OK;
+-      }
+-
+-      desc = acpi_request_own_gpiod(chip, agpio, 0, "ACPI:Event");
+-      if (IS_ERR(desc)) {
+-              dev_err(chip->parent,
+-                      "Failed to request GPIO for pin 0x%04X, err %ld\n",
+-                      pin, PTR_ERR(desc));
+-              return AE_OK;
+-      }
+-
+-      ret = gpiochip_lock_as_irq(chip, pin);
+-      if (ret) {
+-              dev_err(chip->parent,
+-                      "Failed to lock GPIO pin 0x%04X as interrupt, err %d\n",
+-                      pin, ret);
+-              goto fail_free_desc;
+-      }
+-
+-      irq = gpiod_to_irq(desc);
+-      if (irq < 0) {
+-              dev_err(chip->parent,
+-                      "Failed to translate GPIO pin 0x%04X to IRQ, err %d\n",
+-                      pin, irq);
+-              goto fail_unlock_irq;
+-      }
+-
+-      event = kzalloc(sizeof(*event), GFP_KERNEL);
+-      if (!event)
+-              goto fail_unlock_irq;
+-
+-      event->irqflags = IRQF_ONESHOT;
+-      if (agpio->triggering == ACPI_LEVEL_SENSITIVE) {
+-              if (agpio->polarity == ACPI_ACTIVE_HIGH)
+-                      event->irqflags |= IRQF_TRIGGER_HIGH;
+-              else
+-                      event->irqflags |= IRQF_TRIGGER_LOW;
+-      } else {
+-              switch (agpio->polarity) {
+-              case ACPI_ACTIVE_HIGH:
+-                      event->irqflags |= IRQF_TRIGGER_RISING;
+-                      break;
+-              case ACPI_ACTIVE_LOW:
+-                      event->irqflags |= IRQF_TRIGGER_FALLING;
+-                      break;
+-              default:
+-                      event->irqflags |= IRQF_TRIGGER_RISING |
+-                                         IRQF_TRIGGER_FALLING;
+-                      break;
+-              }
+-      }
+-
+-      event->handle = evt_handle;
+-      event->handler = handler;
+-      event->irq = irq;
+-      event->irq_is_wake = acpi_gpio_irq_is_wake(chip->parent, agpio);
+-      event->pin = pin;
+-      event->desc = desc;
+-
+-      list_add_tail(&event->node, &acpi_gpio->events);
+-
+-      return AE_OK;
+-
+-fail_unlock_irq:
+-      gpiochip_unlock_as_irq(chip, pin);
+-fail_free_desc:
+-      gpiochip_free_own_desc(desc);
+-
+-      return AE_OK;
+-}
+-
+-/**
+- * acpi_gpiochip_request_interrupts() - Register isr for gpio chip ACPI events
+- * @chip:      GPIO chip
+- *
+- * ACPI5 platforms can use GPIO signaled ACPI events. These GPIO interrupts are
+- * handled by ACPI event methods which need to be called from the GPIO
+- * chip's interrupt handler. acpi_gpiochip_request_interrupts() finds out which
+- * GPIO pins have ACPI event methods and assigns interrupt handlers that calls
+- * the ACPI event methods for those pins.
+- */
+-void acpi_gpiochip_request_interrupts(struct gpio_chip *chip)
+-{
+-      struct acpi_gpio_chip *acpi_gpio;
+-      acpi_handle handle;
+-      acpi_status status;
+-
+-      if (!chip->parent || !chip->to_irq)
+-              return;
+-
+-      handle = ACPI_HANDLE(chip->parent);
+-      if (!handle)
+-              return;
+-
+-      status = acpi_get_data(handle, acpi_gpio_chip_dh, (void **)&acpi_gpio);
+-      if (ACPI_FAILURE(status))
+-              return;
+-
+-      if (acpi_quirk_skip_gpio_event_handlers())
+-              return;
+-
+-      acpi_walk_resources(handle, METHOD_NAME__AEI,
+-                          acpi_gpiochip_alloc_event, acpi_gpio);
+-
+-      if (acpi_gpio_add_to_deferred_list(&acpi_gpio->deferred_req_irqs_list_entry))
+-              return;
+-
+-      acpi_gpiochip_request_irqs(acpi_gpio);
+-}
+-EXPORT_SYMBOL_GPL(acpi_gpiochip_request_interrupts);
+-
+-/**
+- * acpi_gpiochip_free_interrupts() - Free GPIO ACPI event interrupts.
+- * @chip:      GPIO chip
+- *
+- * Free interrupts associated with GPIO ACPI event method for the given
+- * GPIO chip.
+- */
+-void acpi_gpiochip_free_interrupts(struct gpio_chip *chip)
+-{
+-      struct acpi_gpio_chip *acpi_gpio;
+-      struct acpi_gpio_event *event, *ep;
+-      acpi_handle handle;
+-      acpi_status status;
+-
+-      if (!chip->parent || !chip->to_irq)
+-              return;
+-
+-      handle = ACPI_HANDLE(chip->parent);
+-      if (!handle)
+-              return;
+-
+-      status = acpi_get_data(handle, acpi_gpio_chip_dh, (void **)&acpi_gpio);
+-      if (ACPI_FAILURE(status))
+-              return;
+-
+-      acpi_gpio_remove_from_deferred_list(&acpi_gpio->deferred_req_irqs_list_entry);
+-
+-      list_for_each_entry_safe_reverse(event, ep, &acpi_gpio->events, node) {
+-              if (event->irq_requested) {
+-                      if (event->irq_is_wake)
+-                              disable_irq_wake(event->irq);
+-
+-                      free_irq(event->irq, event);
+-              }
+-
+-              gpiochip_unlock_as_irq(chip, event->pin);
+-              gpiochip_free_own_desc(event->desc);
+-              list_del(&event->node);
+-              kfree(event);
+-      }
+-}
+-EXPORT_SYMBOL_GPL(acpi_gpiochip_free_interrupts);
+-
+-void __init acpi_gpio_process_deferred_list(struct list_head *list)
+-{
+-      struct acpi_gpio_chip *acpi_gpio, *tmp;
+-
+-      list_for_each_entry_safe(acpi_gpio, tmp, list, deferred_req_irqs_list_entry)
+-              acpi_gpiochip_request_irqs(acpi_gpio);
+-}
+-
+-int acpi_dev_add_driver_gpios(struct acpi_device *adev,
+-                            const struct acpi_gpio_mapping *gpios)
+-{
+-      if (adev && gpios) {
+-              adev->driver_gpios = gpios;
+-              return 0;
+-      }
+-      return -EINVAL;
+-}
+-EXPORT_SYMBOL_GPL(acpi_dev_add_driver_gpios);
+-
+-void acpi_dev_remove_driver_gpios(struct acpi_device *adev)
+-{
+-      if (adev)
+-              adev->driver_gpios = NULL;
+-}
+-EXPORT_SYMBOL_GPL(acpi_dev_remove_driver_gpios);
+-
+-static void acpi_dev_release_driver_gpios(void *adev)
+-{
+-      acpi_dev_remove_driver_gpios(adev);
+-}
+-
+-int devm_acpi_dev_add_driver_gpios(struct device *dev,
+-                                 const struct acpi_gpio_mapping *gpios)
+-{
+-      struct acpi_device *adev = ACPI_COMPANION(dev);
+-      int ret;
+-
+-      ret = acpi_dev_add_driver_gpios(adev, gpios);
+-      if (ret)
+-              return ret;
+-
+-      return devm_add_action_or_reset(dev, acpi_dev_release_driver_gpios, adev);
+-}
+-EXPORT_SYMBOL_GPL(devm_acpi_dev_add_driver_gpios);
+-
+-static bool acpi_get_driver_gpio_data(struct acpi_device *adev,
+-                                    const char *name, int index,
+-                                    struct fwnode_reference_args *args,
+-                                    unsigned int *quirks)
+-{
+-      const struct acpi_gpio_mapping *gm;
+-
+-      if (!adev || !adev->driver_gpios)
+-              return false;
+-
+-      for (gm = adev->driver_gpios; gm->name; gm++)
+-              if (!strcmp(name, gm->name) && gm->data && index < gm->size) {
+-                      const struct acpi_gpio_params *par = gm->data + index;
+-
+-                      args->fwnode = acpi_fwnode_handle(adev);
+-                      args->args[0] = par->crs_entry_index;
+-                      args->args[1] = par->line_index;
+-                      args->args[2] = par->active_low;
+-                      args->nargs = 3;
+-
+-                      *quirks = gm->quirks;
+-                      return true;
+-              }
+-
+-      return false;
+-}
+-
+-static int
+-__acpi_gpio_update_gpiod_flags(enum gpiod_flags *flags, enum gpiod_flags update)
+-{
+-      const enum gpiod_flags mask =
+-              GPIOD_FLAGS_BIT_DIR_SET | GPIOD_FLAGS_BIT_DIR_OUT |
+-              GPIOD_FLAGS_BIT_DIR_VAL;
+-      int ret = 0;
+-
+-      /*
+-       * Check if the BIOS has IoRestriction with explicitly set direction
+-       * and update @flags accordingly. Otherwise use whatever caller asked
+-       * for.
+-       */
+-      if (update & GPIOD_FLAGS_BIT_DIR_SET) {
+-              enum gpiod_flags diff = *flags ^ update;
+-
+-              /*
+-               * Check if caller supplied incompatible GPIO initialization
+-               * flags.
+-               *
+-               * Return %-EINVAL to notify that firmware has different
+-               * settings and we are going to use them.
+-               */
+-              if (((*flags & GPIOD_FLAGS_BIT_DIR_SET) && (diff & GPIOD_FLAGS_BIT_DIR_OUT)) ||
+-                  ((*flags & GPIOD_FLAGS_BIT_DIR_OUT) && (diff & GPIOD_FLAGS_BIT_DIR_VAL)))
+-                      ret = -EINVAL;
+-              *flags = (*flags & ~mask) | (update & mask);
+-      }
+-      return ret;
+-}
+-
+-static int acpi_gpio_update_gpiod_flags(enum gpiod_flags *flags,
+-                                      struct acpi_gpio_info *info)
+-{
+-      struct device *dev = &info->adev->dev;
+-      enum gpiod_flags old = *flags;
+-      int ret;
+-
+-      ret = __acpi_gpio_update_gpiod_flags(&old, info->flags);
+-      if (info->quirks & ACPI_GPIO_QUIRK_NO_IO_RESTRICTION) {
+-              if (ret)
+-                      dev_warn(dev, FW_BUG "GPIO not in correct mode, fixing\n");
+-      } else {
+-              if (ret)
+-                      dev_dbg(dev, "Override GPIO initialization flags\n");
+-              *flags = old;
+-      }
+-
+-      return ret;
+-}
+-
+-static int acpi_gpio_update_gpiod_lookup_flags(unsigned long *lookupflags,
+-                                             struct acpi_gpio_info *info)
+-{
+-      switch (info->pin_config) {
+-      case ACPI_PIN_CONFIG_PULLUP:
+-              *lookupflags |= GPIO_PULL_UP;
+-              break;
+-      case ACPI_PIN_CONFIG_PULLDOWN:
+-              *lookupflags |= GPIO_PULL_DOWN;
+-              break;
+-      case ACPI_PIN_CONFIG_NOPULL:
+-              *lookupflags |= GPIO_PULL_DISABLE;
+-              break;
+-      default:
+-              break;
+-      }
+-
+-      if (info->polarity == GPIO_ACTIVE_LOW)
+-              *lookupflags |= GPIO_ACTIVE_LOW;
+-
+-      return 0;
+-}
+-
+-struct acpi_gpio_lookup {
+-      struct acpi_gpio_info info;
+-      int index;
+-      u16 pin_index;
+-      bool active_low;
+-      struct gpio_desc *desc;
+-      int n;
+-};
+-
+-static int acpi_populate_gpio_lookup(struct acpi_resource *ares, void *data)
+-{
+-      struct acpi_gpio_lookup *lookup = data;
+-
+-      if (ares->type != ACPI_RESOURCE_TYPE_GPIO)
+-              return 1;
+-
+-      if (!lookup->desc) {
+-              const struct acpi_resource_gpio *agpio = &ares->data.gpio;
+-              bool gpioint = agpio->connection_type == ACPI_RESOURCE_GPIO_TYPE_INT;
+-              struct gpio_desc *desc;
+-              u16 pin_index;
+-
+-              if (lookup->info.quirks & ACPI_GPIO_QUIRK_ONLY_GPIOIO && gpioint)
+-                      lookup->index++;
+-
+-              if (lookup->n++ != lookup->index)
+-                      return 1;
+-
+-              pin_index = lookup->pin_index;
+-              if (pin_index >= agpio->pin_table_length)
+-                      return 1;
+-
+-              if (lookup->info.quirks & ACPI_GPIO_QUIRK_ABSOLUTE_NUMBER)
+-                      desc = gpio_to_desc(agpio->pin_table[pin_index]);
+-              else
+-                      desc = acpi_get_gpiod(agpio->resource_source.string_ptr,
+-                                            agpio->pin_table[pin_index]);
+-              lookup->desc = desc;
+-              lookup->info.pin_config = agpio->pin_config;
+-              lookup->info.debounce = agpio->debounce_timeout;
+-              lookup->info.gpioint = gpioint;
+-              lookup->info.wake_capable = acpi_gpio_irq_is_wake(&lookup->info.adev->dev, agpio);
+-
+-              /*
+-               * Polarity and triggering are only specified for GpioInt
+-               * resource.
+-               * Note: we expect here:
+-               * - ACPI_ACTIVE_LOW == GPIO_ACTIVE_LOW
+-               * - ACPI_ACTIVE_HIGH == GPIO_ACTIVE_HIGH
+-               */
+-              if (lookup->info.gpioint) {
+-                      lookup->info.polarity = agpio->polarity;
+-                      lookup->info.triggering = agpio->triggering;
+-              } else {
+-                      lookup->info.polarity = lookup->active_low;
+-              }
+-
+-              lookup->info.flags = acpi_gpio_to_gpiod_flags(agpio, lookup->info.polarity);
+-      }
+-
+-      return 1;
+-}
+-
+-static int acpi_gpio_resource_lookup(struct acpi_gpio_lookup *lookup,
+-                                   struct acpi_gpio_info *info)
+-{
+-      struct acpi_device *adev = lookup->info.adev;
+-      struct list_head res_list;
+-      int ret;
+-
+-      INIT_LIST_HEAD(&res_list);
+-
+-      ret = acpi_dev_get_resources(adev, &res_list,
+-                                   acpi_populate_gpio_lookup,
+-                                   lookup);
+-      if (ret < 0)
+-              return ret;
+-
+-      acpi_dev_free_resource_list(&res_list);
+-
+-      if (!lookup->desc)
+-              return -ENOENT;
+-
+-      if (info)
+-              *info = lookup->info;
+-      return 0;
+-}
+-
+-static int acpi_gpio_property_lookup(struct fwnode_handle *fwnode,
+-                                   const char *propname, int index,
+-                                   struct acpi_gpio_lookup *lookup)
+-{
+-      struct fwnode_reference_args args;
+-      unsigned int quirks = 0;
+-      int ret;
+-
+-      memset(&args, 0, sizeof(args));
+-      ret = __acpi_node_get_property_reference(fwnode, propname, index, 3,
+-                                               &args);
+-      if (ret) {
+-              struct acpi_device *adev;
+-
+-              adev = to_acpi_device_node(fwnode);
+-              if (!acpi_get_driver_gpio_data(adev, propname, index, &args, &quirks))
+-                      return ret;
+-      }
+-      /*
+-       * The property was found and resolved, so need to lookup the GPIO based
+-       * on returned args.
+-       */
+-      if (!to_acpi_device_node(args.fwnode))
+-              return -EINVAL;
+-      if (args.nargs != 3)
+-              return -EPROTO;
+-
+-      lookup->index = args.args[0];
+-      lookup->pin_index = args.args[1];
+-      lookup->active_low = !!args.args[2];
+-
+-      lookup->info.adev = to_acpi_device_node(args.fwnode);
+-      lookup->info.quirks = quirks;
+-
+-      return 0;
+-}
+-
+-/**
+- * acpi_get_gpiod_by_index() - get a GPIO descriptor from device resources
+- * @adev: pointer to a ACPI device to get GPIO from
+- * @propname: Property name of the GPIO (optional)
+- * @index: index of GpioIo/GpioInt resource (starting from %0)
+- * @info: info pointer to fill in (optional)
+- *
+- * Function goes through ACPI resources for @adev and based on @index looks
+- * up a GpioIo/GpioInt resource, translates it to the Linux GPIO descriptor,
+- * and returns it. @index matches GpioIo/GpioInt resources only so if there
+- * are total %3 GPIO resources, the index goes from %0 to %2.
+- *
+- * If @propname is specified the GPIO is looked using device property. In
+- * that case @index is used to select the GPIO entry in the property value
+- * (in case of multiple).
+- *
+- * Returns:
+- * GPIO descriptor to use with Linux generic GPIO API.
+- * If the GPIO cannot be translated or there is an error an ERR_PTR is
+- * returned.
+- *
+- * Note: if the GPIO resource has multiple entries in the pin list, this
+- * function only returns the first.
+- */
+-static struct gpio_desc *acpi_get_gpiod_by_index(struct acpi_device *adev,
+-                                               const char *propname,
+-                                               int index,
+-                                               struct acpi_gpio_info *info)
+-{
+-      struct acpi_gpio_lookup lookup;
+-      int ret;
+-
+-      memset(&lookup, 0, sizeof(lookup));
+-      lookup.index = index;
+-
+-      if (propname) {
+-              dev_dbg(&adev->dev, "GPIO: looking up %s\n", propname);
+-
+-              ret = acpi_gpio_property_lookup(acpi_fwnode_handle(adev),
+-                                              propname, index, &lookup);
+-              if (ret)
+-                      return ERR_PTR(ret);
+-
+-              dev_dbg(&adev->dev, "GPIO: _DSD returned %s %d %u %u\n",
+-                      dev_name(&lookup.info.adev->dev), lookup.index,
+-                      lookup.pin_index, lookup.active_low);
+-      } else {
+-              dev_dbg(&adev->dev, "GPIO: looking up %d in _CRS\n", index);
+-              lookup.info.adev = adev;
+-      }
+-
+-      ret = acpi_gpio_resource_lookup(&lookup, info);
+-      return ret ? ERR_PTR(ret) : lookup.desc;
+-}
+-
+-/**
+- * acpi_get_gpiod_from_data() - get a GPIO descriptor from ACPI data node
+- * @fwnode: pointer to an ACPI firmware node to get the GPIO information from
+- * @propname: Property name of the GPIO
+- * @index: index of GpioIo/GpioInt resource (starting from %0)
+- * @info: info pointer to fill in (optional)
+- *
+- * This function uses the property-based GPIO lookup to get to the GPIO
+- * resource with the relevant information from a data-only ACPI firmware node
+- * and uses that to obtain the GPIO descriptor to return.
+- *
+- * Returns:
+- * GPIO descriptor to use with Linux generic GPIO API.
+- * If the GPIO cannot be translated or there is an error an ERR_PTR is
+- * returned.
+- */
+-static struct gpio_desc *acpi_get_gpiod_from_data(struct fwnode_handle *fwnode,
+-                                                const char *propname,
+-                                                int index,
+-                                                struct acpi_gpio_info *info)
+-{
+-      struct acpi_gpio_lookup lookup;
+-      int ret;
+-
+-      if (!is_acpi_data_node(fwnode))
+-              return ERR_PTR(-ENODEV);
+-
+-      if (!propname)
+-              return ERR_PTR(-EINVAL);
+-
+-      memset(&lookup, 0, sizeof(lookup));
+-      lookup.index = index;
+-
+-      ret = acpi_gpio_property_lookup(fwnode, propname, index, &lookup);
+-      if (ret)
+-              return ERR_PTR(ret);
+-
+-      ret = acpi_gpio_resource_lookup(&lookup, info);
+-      return ret ? ERR_PTR(ret) : lookup.desc;
+-}
+-
+-static bool acpi_can_fallback_to_crs(struct acpi_device *adev,
+-                                   const char *con_id)
+-{
+-      /* If there is no ACPI device, there is no _CRS to fall back to */
+-      if (!adev)
+-              return false;
+-
+-      /* Never allow fallback if the device has properties */
+-      if (acpi_dev_has_props(adev) || adev->driver_gpios)
+-              return false;
+-
+-      return con_id == NULL;
+-}
+-
+-static struct gpio_desc *
+-__acpi_find_gpio(struct fwnode_handle *fwnode, const char *con_id, unsigned int idx,
+-               bool can_fallback, struct acpi_gpio_info *info)
+-{
+-      struct acpi_device *adev = to_acpi_device_node(fwnode);
+-      struct gpio_desc *desc;
+-      char propname[32];
+-
+-      /* Try first from _DSD */
+-      for_each_gpio_property_name(propname, con_id) {
+-              if (adev)
+-                      desc = acpi_get_gpiod_by_index(adev,
+-                                                     propname, idx, info);
+-              else
+-                      desc = acpi_get_gpiod_from_data(fwnode,
+-                                                      propname, idx, info);
+-              if (PTR_ERR(desc) == -EPROBE_DEFER)
+-                      return ERR_CAST(desc);
+-
+-              if (!IS_ERR(desc))
+-                      return desc;
+-      }
+-
+-      /* Then from plain _CRS GPIOs */
+-      if (can_fallback)
+-              return acpi_get_gpiod_by_index(adev, NULL, idx, info);
+-
+-      return ERR_PTR(-ENOENT);
+-}
+-
+-struct gpio_desc *acpi_find_gpio(struct fwnode_handle *fwnode,
+-                               const char *con_id,
+-                               unsigned int idx,
+-                               enum gpiod_flags *dflags,
+-                               unsigned long *lookupflags)
+-{
+-      struct acpi_device *adev = to_acpi_device_node(fwnode);
+-      bool can_fallback = acpi_can_fallback_to_crs(adev, con_id);
+-      struct acpi_gpio_info info;
+-      struct gpio_desc *desc;
+-
+-      desc = __acpi_find_gpio(fwnode, con_id, idx, can_fallback, &info);
+-      if (IS_ERR(desc))
+-              return desc;
+-
+-      if (info.gpioint &&
+-          (*dflags == GPIOD_OUT_LOW || *dflags == GPIOD_OUT_HIGH)) {
+-              dev_dbg(&adev->dev, "refusing GpioInt() entry when doing GPIOD_OUT_* lookup\n");
+-              return ERR_PTR(-ENOENT);
+-      }
+-
+-      acpi_gpio_update_gpiod_flags(dflags, &info);
+-      acpi_gpio_update_gpiod_lookup_flags(lookupflags, &info);
+-      return desc;
+-}
+-
+-/**
+- * acpi_dev_gpio_irq_wake_get_by() - Find GpioInt and translate it to Linux IRQ number
+- * @adev: pointer to a ACPI device to get IRQ from
+- * @con_id: optional name of GpioInt resource
+- * @index: index of GpioInt resource (starting from %0)
+- * @wake_capable: Set to true if the IRQ is wake capable
+- *
+- * If the device has one or more GpioInt resources, this function can be
+- * used to translate from the GPIO offset in the resource to the Linux IRQ
+- * number.
+- *
+- * The function is idempotent, though each time it runs it will configure GPIO
+- * pin direction according to the flags in GpioInt resource.
+- *
+- * The function takes optional @con_id parameter. If the resource has
+- * a @con_id in a property, then only those will be taken into account.
+- *
+- * The GPIO is considered wake capable if the GpioInt resource specifies
+- * SharedAndWake or ExclusiveAndWake.
+- *
+- * Returns:
+- * Linux IRQ number (> 0) on success, negative errno on failure.
+- */
+-int acpi_dev_gpio_irq_wake_get_by(struct acpi_device *adev, const char *con_id, int index,
+-                                bool *wake_capable)
+-{
+-      struct fwnode_handle *fwnode = acpi_fwnode_handle(adev);
+-      int idx, i;
+-      unsigned int irq_flags;
+-      int ret;
+-
+-      for (i = 0, idx = 0; idx <= index; i++) {
+-              struct acpi_gpio_info info;
+-              struct gpio_desc *desc;
+-
+-              /* Ignore -EPROBE_DEFER, it only matters if idx matches */
+-              desc = __acpi_find_gpio(fwnode, con_id, i, true, &info);
+-              if (IS_ERR(desc) && PTR_ERR(desc) != -EPROBE_DEFER)
+-                      return PTR_ERR(desc);
+-
+-              if (info.gpioint && idx++ == index) {
+-                      unsigned long lflags = GPIO_LOOKUP_FLAGS_DEFAULT;
+-                      enum gpiod_flags dflags = GPIOD_ASIS;
+-                      char label[32];
+-                      int irq;
+-
+-                      if (IS_ERR(desc))
+-                              return PTR_ERR(desc);
+-
+-                      irq = gpiod_to_irq(desc);
+-                      if (irq < 0)
+-                              return irq;
+-
+-                      acpi_gpio_update_gpiod_flags(&dflags, &info);
+-                      acpi_gpio_update_gpiod_lookup_flags(&lflags, &info);
+-
+-                      snprintf(label, sizeof(label), "%pfwP GpioInt(%d)", fwnode, index);
+-                      ret = gpiod_set_consumer_name(desc, con_id ?: label);
+-                      if (ret)
+-                              return ret;
+-
+-                      ret = gpiod_configure_flags(desc, label, lflags, dflags);
+-                      if (ret < 0)
+-                              return ret;
+-
+-                      /* ACPI uses hundredths of milliseconds units */
+-                      ret = gpio_set_debounce_timeout(desc, info.debounce * 10);
+-                      if (ret)
+-                              return ret;
+-
+-                      irq_flags = acpi_dev_get_irq_type(info.triggering,
+-                                                        info.polarity);
+-
+-                      /*
+-                       * If the IRQ is not already in use then set type
+-                       * if specified and different than the current one.
+-                       */
+-                      if (can_request_irq(irq, irq_flags)) {
+-                              if (irq_flags != IRQ_TYPE_NONE &&
+-                                  irq_flags != irq_get_trigger_type(irq))
+-                                      irq_set_irq_type(irq, irq_flags);
+-                      } else {
+-                              dev_dbg(&adev->dev, "IRQ %d already in use\n", irq);
+-                      }
+-
+-                      /* avoid suspend issues with GPIOs when systems are using S3 */
+-                      if (wake_capable && acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0)
+-                              *wake_capable = info.wake_capable;
+-
+-                      return irq;
+-              }
+-
+-      }
+-      return -ENOENT;
+-}
+-EXPORT_SYMBOL_GPL(acpi_dev_gpio_irq_wake_get_by);
+-
+-static acpi_status
+-acpi_gpio_adr_space_handler(u32 function, acpi_physical_address address,
+-                          u32 bits, u64 *value, void *handler_context,
+-                          void *region_context)
+-{
+-      struct acpi_gpio_chip *achip = region_context;
+-      struct gpio_chip *chip = achip->chip;
+-      struct acpi_resource_gpio *agpio;
+-      struct acpi_resource *ares;
+-      u16 pin_index = address;
+-      acpi_status status;
+-      int length;
+-      int i;
+-
+-      status = acpi_buffer_to_resource(achip->conn_info.connection,
+-                                       achip->conn_info.length, &ares);
+-      if (ACPI_FAILURE(status))
+-              return status;
+-
+-      if (WARN_ON(ares->type != ACPI_RESOURCE_TYPE_GPIO)) {
+-              ACPI_FREE(ares);
+-              return AE_BAD_PARAMETER;
+-      }
+-
+-      agpio = &ares->data.gpio;
+-
+-      if (WARN_ON(agpio->io_restriction == ACPI_IO_RESTRICT_INPUT &&
+-          function == ACPI_WRITE)) {
+-              ACPI_FREE(ares);
+-              return AE_BAD_PARAMETER;
+-      }
+-
+-      length = min_t(u16, agpio->pin_table_length, pin_index + bits);
+-      for (i = pin_index; i < length; ++i) {
+-              unsigned int pin = agpio->pin_table[i];
+-              struct acpi_gpio_connection *conn;
+-              struct gpio_desc *desc;
+-              bool found;
+-
+-              mutex_lock(&achip->conn_lock);
+-
+-              found = false;
+-              list_for_each_entry(conn, &achip->conns, node) {
+-                      if (conn->pin == pin) {
+-                              found = true;
+-                              desc = conn->desc;
+-                              break;
+-                      }
+-              }
+-
+-              /*
+-               * The same GPIO can be shared between operation region and
+-               * event but only if the access here is ACPI_READ. In that
+-               * case we "borrow" the event GPIO instead.
+-               */
+-              if (!found && agpio->shareable == ACPI_SHARED &&
+-                   function == ACPI_READ) {
+-                      struct acpi_gpio_event *event;
+-
+-                      list_for_each_entry(event, &achip->events, node) {
+-                              if (event->pin == pin) {
+-                                      desc = event->desc;
+-                                      found = true;
+-                                      break;
+-                              }
+-                      }
+-              }
+-
+-              if (!found) {
+-                      desc = acpi_request_own_gpiod(chip, agpio, i, "ACPI:OpRegion");
+-                      if (IS_ERR(desc)) {
+-                              mutex_unlock(&achip->conn_lock);
+-                              status = AE_ERROR;
+-                              goto out;
+-                      }
+-
+-                      conn = kzalloc(sizeof(*conn), GFP_KERNEL);
+-                      if (!conn) {
+-                              gpiochip_free_own_desc(desc);
+-                              mutex_unlock(&achip->conn_lock);
+-                              status = AE_NO_MEMORY;
+-                              goto out;
+-                      }
+-
+-                      conn->pin = pin;
+-                      conn->desc = desc;
+-                      list_add_tail(&conn->node, &achip->conns);
+-              }
+-
+-              mutex_unlock(&achip->conn_lock);
+-
+-              if (function == ACPI_WRITE)
+-                      gpiod_set_raw_value_cansleep(desc, !!(*value & BIT(i)));
+-              else
+-                      *value |= (u64)gpiod_get_raw_value_cansleep(desc) << i;
+-      }
+-
+-out:
+-      ACPI_FREE(ares);
+-      return status;
+-}
+-
+-static void acpi_gpiochip_request_regions(struct acpi_gpio_chip *achip)
+-{
+-      struct gpio_chip *chip = achip->chip;
+-      acpi_handle handle = ACPI_HANDLE(chip->parent);
+-      acpi_status status;
+-
+-      INIT_LIST_HEAD(&achip->conns);
+-      mutex_init(&achip->conn_lock);
+-      status = acpi_install_address_space_handler(handle, ACPI_ADR_SPACE_GPIO,
+-                                                  acpi_gpio_adr_space_handler,
+-                                                  NULL, achip);
+-      if (ACPI_FAILURE(status))
+-              dev_err(chip->parent,
+-                      "Failed to install GPIO OpRegion handler\n");
+-}
+-
+-static void acpi_gpiochip_free_regions(struct acpi_gpio_chip *achip)
+-{
+-      struct gpio_chip *chip = achip->chip;
+-      acpi_handle handle = ACPI_HANDLE(chip->parent);
+-      struct acpi_gpio_connection *conn, *tmp;
+-      acpi_status status;
+-
+-      status = acpi_remove_address_space_handler(handle, ACPI_ADR_SPACE_GPIO,
+-                                                 acpi_gpio_adr_space_handler);
+-      if (ACPI_FAILURE(status)) {
+-              dev_err(chip->parent,
+-                      "Failed to remove GPIO OpRegion handler\n");
+-              return;
+-      }
+-
+-      list_for_each_entry_safe_reverse(conn, tmp, &achip->conns, node) {
+-              gpiochip_free_own_desc(conn->desc);
+-              list_del(&conn->node);
+-              kfree(conn);
+-      }
+-}
+-
+-static struct gpio_desc *
+-acpi_gpiochip_parse_own_gpio(struct acpi_gpio_chip *achip,
+-                           struct fwnode_handle *fwnode,
+-                           const char **name,
+-                           unsigned long *lflags,
+-                           enum gpiod_flags *dflags)
+-{
+-      struct gpio_chip *chip = achip->chip;
+-      struct gpio_desc *desc;
+-      u32 gpios[2];
+-      int ret;
+-
+-      *lflags = GPIO_LOOKUP_FLAGS_DEFAULT;
+-      *dflags = GPIOD_ASIS;
+-      *name = NULL;
+-
+-      ret = fwnode_property_read_u32_array(fwnode, "gpios", gpios,
+-                                           ARRAY_SIZE(gpios));
+-      if (ret < 0)
+-              return ERR_PTR(ret);
+-
+-      desc = gpiochip_get_desc(chip, gpios[0]);
+-      if (IS_ERR(desc))
+-              return desc;
+-
+-      if (gpios[1])
+-              *lflags |= GPIO_ACTIVE_LOW;
+-
+-      if (fwnode_property_present(fwnode, "input"))
+-              *dflags |= GPIOD_IN;
+-      else if (fwnode_property_present(fwnode, "output-low"))
+-              *dflags |= GPIOD_OUT_LOW;
+-      else if (fwnode_property_present(fwnode, "output-high"))
+-              *dflags |= GPIOD_OUT_HIGH;
+-      else
+-              return ERR_PTR(-EINVAL);
+-
+-      fwnode_property_read_string(fwnode, "line-name", name);
+-
+-      return desc;
+-}
+-
+-static void acpi_gpiochip_scan_gpios(struct acpi_gpio_chip *achip)
+-{
+-      struct gpio_chip *chip = achip->chip;
+-      struct fwnode_handle *fwnode;
+-
+-      device_for_each_child_node(chip->parent, fwnode) {
+-              unsigned long lflags;
+-              enum gpiod_flags dflags;
+-              struct gpio_desc *desc;
+-              const char *name;
+-              int ret;
+-
+-              if (!fwnode_property_present(fwnode, "gpio-hog"))
+-                      continue;
+-
+-              desc = acpi_gpiochip_parse_own_gpio(achip, fwnode, &name,
+-                                                  &lflags, &dflags);
+-              if (IS_ERR(desc))
+-                      continue;
+-
+-              ret = gpiod_hog(desc, name, lflags, dflags);
+-              if (ret) {
+-                      dev_err(chip->parent, "Failed to hog GPIO\n");
+-                      fwnode_handle_put(fwnode);
+-                      return;
+-              }
+-      }
+-}
+-
+-void acpi_gpiochip_add(struct gpio_chip *chip)
+-{
+-      struct acpi_gpio_chip *acpi_gpio;
+-      struct acpi_device *adev;
+-      acpi_status status;
+-
+-      if (!chip || !chip->parent)
+-              return;
+-
+-      adev = ACPI_COMPANION(chip->parent);
+-      if (!adev)
+-              return;
+-
+-      acpi_gpio = kzalloc(sizeof(*acpi_gpio), GFP_KERNEL);
+-      if (!acpi_gpio) {
+-              dev_err(chip->parent,
+-                      "Failed to allocate memory for ACPI GPIO chip\n");
+-              return;
+-      }
+-
+-      acpi_gpio->chip = chip;
+-      INIT_LIST_HEAD(&acpi_gpio->events);
+-      INIT_LIST_HEAD(&acpi_gpio->deferred_req_irqs_list_entry);
+-
+-      status = acpi_attach_data(adev->handle, acpi_gpio_chip_dh, acpi_gpio);
+-      if (ACPI_FAILURE(status)) {
+-              dev_err(chip->parent, "Failed to attach ACPI GPIO chip\n");
+-              kfree(acpi_gpio);
+-              return;
+-      }
+-
+-      acpi_gpiochip_request_regions(acpi_gpio);
+-      acpi_gpiochip_scan_gpios(acpi_gpio);
+-      acpi_dev_clear_dependencies(adev);
+-}
+-
+-void acpi_gpiochip_remove(struct gpio_chip *chip)
+-{
+-      struct acpi_gpio_chip *acpi_gpio;
+-      acpi_handle handle;
+-      acpi_status status;
+-
+-      if (!chip || !chip->parent)
+-              return;
+-
+-      handle = ACPI_HANDLE(chip->parent);
+-      if (!handle)
+-              return;
+-
+-      status = acpi_get_data(handle, acpi_gpio_chip_dh, (void **)&acpi_gpio);
+-      if (ACPI_FAILURE(status)) {
+-              dev_warn(chip->parent, "Failed to retrieve ACPI GPIO chip\n");
+-              return;
+-      }
+-
+-      acpi_gpiochip_free_regions(acpi_gpio);
+-
+-      acpi_detach_data(handle, acpi_gpio_chip_dh);
+-      kfree(acpi_gpio);
+-}
+-
+-static int acpi_gpio_package_count(const union acpi_object *obj)
+-{
+-      const union acpi_object *element = obj->package.elements;
+-      const union acpi_object *end = element + obj->package.count;
+-      unsigned int count = 0;
+-
+-      while (element < end) {
+-              switch (element->type) {
+-              case ACPI_TYPE_LOCAL_REFERENCE:
+-                      element += 3;
+-                      fallthrough;
+-              case ACPI_TYPE_INTEGER:
+-                      element++;
+-                      count++;
+-                      break;
+-
+-              default:
+-                      return -EPROTO;
+-              }
+-      }
+-
+-      return count;
+-}
+-
+-static int acpi_find_gpio_count(struct acpi_resource *ares, void *data)
+-{
+-      unsigned int *count = data;
+-
+-      if (ares->type == ACPI_RESOURCE_TYPE_GPIO)
+-              *count += ares->data.gpio.pin_table_length;
+-
+-      return 1;
+-}
+-
+-/**
+- * acpi_gpio_count - count the GPIOs associated with a firmware node / function
+- * @fwnode:   firmware node of the GPIO consumer
+- * @con_id:   function within the GPIO consumer
+- *
+- * Returns:
+- * The number of GPIOs associated with a firmware node / function or %-ENOENT,
+- * if no GPIO has been assigned to the requested function.
+- */
+-int acpi_gpio_count(const struct fwnode_handle *fwnode, const char *con_id)
+-{
+-      struct acpi_device *adev = to_acpi_device_node(fwnode);
+-      const union acpi_object *obj;
+-      const struct acpi_gpio_mapping *gm;
+-      int count = -ENOENT;
+-      int ret;
+-      char propname[32];
+-
+-      /* Try first from _DSD */
+-      for_each_gpio_property_name(propname, con_id) {
+-              ret = acpi_dev_get_property(adev, propname, ACPI_TYPE_ANY, &obj);
+-              if (ret == 0) {
+-                      if (obj->type == ACPI_TYPE_LOCAL_REFERENCE)
+-                              count = 1;
+-                      else if (obj->type == ACPI_TYPE_PACKAGE)
+-                              count = acpi_gpio_package_count(obj);
+-              } else if (adev->driver_gpios) {
+-                      for (gm = adev->driver_gpios; gm->name; gm++)
+-                              if (strcmp(propname, gm->name) == 0) {
+-                                      count = gm->size;
+-                                      break;
+-                              }
+-              }
+-              if (count > 0)
+-                      break;
+-      }
+-
+-      /* Then from plain _CRS GPIOs */
+-      if (count < 0) {
+-              struct list_head resource_list;
+-              unsigned int crs_count = 0;
+-
+-              if (!acpi_can_fallback_to_crs(adev, con_id))
+-                      return count;
+-
+-              INIT_LIST_HEAD(&resource_list);
+-              acpi_dev_get_resources(adev, &resource_list,
+-                                     acpi_find_gpio_count, &crs_count);
+-              acpi_dev_free_resource_list(&resource_list);
+-              if (crs_count > 0)
+-                      count = crs_count;
+-      }
+-      return count ? count : -ENOENT;
+-}
+-
+-/* Run deferred acpi_gpiochip_request_irqs() */
+-static int __init acpi_gpio_handle_deferred_request_irqs(void)
+-{
+-      mutex_lock(&acpi_gpio_deferred_req_irqs_lock);
+-      acpi_gpio_process_deferred_list(&acpi_gpio_deferred_req_irqs_list);
+-      acpi_gpio_deferred_req_irqs_done = true;
+-      mutex_unlock(&acpi_gpio_deferred_req_irqs_lock);
+-
+-      return 0;
+-}
+-/* We must use _sync so that this runs after the first deferred_probe run */
+-late_initcall_sync(acpi_gpio_handle_deferred_request_irqs);
+-
+-static const struct dmi_system_id gpiolib_acpi_quirks[] __initconst = {
+-      {
+-              /*
+-               * The Minix Neo Z83-4 has a micro-USB-B id-pin handler for
+-               * a non existing micro-USB-B connector which puts the HDMI
+-               * DDC pins in GPIO mode, breaking HDMI support.
+-               */
+-              .matches = {
+-                      DMI_MATCH(DMI_SYS_VENDOR, "MINIX"),
+-                      DMI_MATCH(DMI_PRODUCT_NAME, "Z83-4"),
+-              },
+-              .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
+-                      .no_edge_events_on_boot = true,
+-              },
+-      },
+-      {
+-              /*
+-               * The Terra Pad 1061 has a micro-USB-B id-pin handler, which
+-               * instead of controlling the actual micro-USB-B turns the 5V
+-               * boost for its USB-A connector off. The actual micro-USB-B
+-               * connector is wired for charging only.
+-               */
+-              .matches = {
+-                      DMI_MATCH(DMI_SYS_VENDOR, "Wortmann_AG"),
+-                      DMI_MATCH(DMI_PRODUCT_NAME, "TERRA_PAD_1061"),
+-              },
+-              .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
+-                      .no_edge_events_on_boot = true,
+-              },
+-      },
+-      {
+-              /*
+-               * The Dell Venue 10 Pro 5055, with Bay Trail SoC + TI PMIC uses an
+-               * external embedded-controller connected via I2C + an ACPI GPIO
+-               * event handler on INT33FFC:02 pin 12, causing spurious wakeups.
+-               */
+-              .matches = {
+-                      DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+-                      DMI_MATCH(DMI_PRODUCT_NAME, "Venue 10 Pro 5055"),
+-              },
+-              .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
+-                      .ignore_wake = "INT33FC:02@12",
+-              },
+-      },
+-      {
+-              /*
+-               * HP X2 10 models with Cherry Trail SoC + TI PMIC use an
+-               * external embedded-controller connected via I2C + an ACPI GPIO
+-               * event handler on INT33FF:01 pin 0, causing spurious wakeups.
+-               * When suspending by closing the LID, the power to the USB
+-               * keyboard is turned off, causing INT0002 ACPI events to
+-               * trigger once the XHCI controller notices the keyboard is
+-               * gone. So INT0002 events cause spurious wakeups too. Ignoring
+-               * EC wakes breaks wakeup when opening the lid, the user needs
+-               * to press the power-button to wakeup the system. The
+-               * alternative is suspend simply not working, which is worse.
+-               */
+-              .matches = {
+-                      DMI_MATCH(DMI_SYS_VENDOR, "HP"),
+-                      DMI_MATCH(DMI_PRODUCT_NAME, "HP x2 Detachable 10-p0XX"),
+-              },
+-              .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
+-                      .ignore_wake = "INT33FF:01@0,INT0002:00@2",
+-              },
+-      },
+-      {
+-              /*
+-               * HP X2 10 models with Bay Trail SoC + AXP288 PMIC use an
+-               * external embedded-controller connected via I2C + an ACPI GPIO
+-               * event handler on INT33FC:02 pin 28, causing spurious wakeups.
+-               */
+-              .matches = {
+-                      DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+-                      DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion x2 Detachable"),
+-                      DMI_MATCH(DMI_BOARD_NAME, "815D"),
+-              },
+-              .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
+-                      .ignore_wake = "INT33FC:02@28",
+-              },
+-      },
+-      {
+-              /*
+-               * HP X2 10 models with Cherry Trail SoC + AXP288 PMIC use an
+-               * external embedded-controller connected via I2C + an ACPI GPIO
+-               * event handler on INT33FF:01 pin 0, causing spurious wakeups.
+-               */
+-              .matches = {
+-                      DMI_MATCH(DMI_SYS_VENDOR, "HP"),
+-                      DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion x2 Detachable"),
+-                      DMI_MATCH(DMI_BOARD_NAME, "813E"),
+-              },
+-              .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
+-                      .ignore_wake = "INT33FF:01@0",
+-              },
+-      },
+-      {
+-              /*
+-               * Interrupt storm caused from edge triggered floating pin
+-               * Found in BIOS UX325UAZ.300
+-               * https://bugzilla.kernel.org/show_bug.cgi?id=216208
+-               */
+-              .matches = {
+-                      DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+-                      DMI_MATCH(DMI_PRODUCT_NAME, "ZenBook UX325UAZ_UM325UAZ"),
+-              },
+-              .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
+-                      .ignore_interrupt = "AMDI0030:00@18",
+-              },
+-      },
+-      {
+-              /*
+-               * Spurious wakeups from TP_ATTN# pin
+-               * Found in BIOS 1.7.8
+-               * https://gitlab.freedesktop.org/drm/amd/-/issues/1722#note_1720627
+-               */
+-              .matches = {
+-                      DMI_MATCH(DMI_BOARD_NAME, "NL5xNU"),
+-              },
+-              .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
+-                      .ignore_wake = "ELAN0415:00@9",
+-              },
+-      },
+-      {
+-              /*
+-               * Spurious wakeups from TP_ATTN# pin
+-               * Found in BIOS 1.7.8
+-               * https://gitlab.freedesktop.org/drm/amd/-/issues/1722#note_1720627
+-               */
+-              .matches = {
+-                      DMI_MATCH(DMI_BOARD_NAME, "NL5xRU"),
+-              },
+-              .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
+-                      .ignore_wake = "ELAN0415:00@9",
+-              },
+-      },
+-      {
+-              /*
+-               * Spurious wakeups from TP_ATTN# pin
+-               * Found in BIOS 1.7.7
+-               */
+-              .matches = {
+-                      DMI_MATCH(DMI_BOARD_NAME, "NH5xAx"),
+-              },
+-              .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
+-                      .ignore_wake = "SYNA1202:00@16",
+-              },
+-      },
+-      {
+-              /*
+-               * On the Peaq C1010 2-in-1 INT33FC:00 pin 3 is connected to
+-               * a "dolby" button. At the ACPI level an _AEI event-handler
+-               * is connected which sets an ACPI variable to 1 on both
+-               * edges. This variable can be polled + cleared to 0 using
+-               * WMI. But since the variable is set on both edges the WMI
+-               * interface is pretty useless even when polling.
+-               * So instead the x86-android-tablets code instantiates
+-               * a gpio-keys platform device for it.
+-               * Ignore the _AEI handler for the pin, so that it is not busy.
+-               */
+-              .matches = {
+-                      DMI_MATCH(DMI_SYS_VENDOR, "PEAQ"),
+-                      DMI_MATCH(DMI_PRODUCT_NAME, "PEAQ PMM C1010 MD99187"),
+-              },
+-              .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
+-                      .ignore_interrupt = "INT33FC:00@3",
+-              },
+-      },
+-      {
+-              /*
+-               * Spurious wakeups from TP_ATTN# pin
+-               * Found in BIOS 0.35
+-               * https://gitlab.freedesktop.org/drm/amd/-/issues/3073
+-               */
+-              .matches = {
+-                      DMI_MATCH(DMI_SYS_VENDOR, "GPD"),
+-                      DMI_MATCH(DMI_PRODUCT_NAME, "G1619-04"),
+-              },
+-              .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
+-                      .ignore_wake = "PNP0C50:00@8",
+-              },
+-      },
+-      {
+-              /*
+-               * Spurious wakeups from GPIO 11
+-               * Found in BIOS 1.04
+-               * https://gitlab.freedesktop.org/drm/amd/-/issues/3954
+-               */
+-              .matches = {
+-                      DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+-                      DMI_MATCH(DMI_PRODUCT_FAMILY, "Acer Nitro V 14"),
+-              },
+-              .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
+-                      .ignore_interrupt = "AMDI0030:00@11",
+-              },
+-      },
+-      {} /* Terminating entry */
+-};
+-
+-static int __init acpi_gpio_setup_params(void)
+-{
+-      const struct acpi_gpiolib_dmi_quirk *quirk = NULL;
+-      const struct dmi_system_id *id;
+-
+-      id = dmi_first_match(gpiolib_acpi_quirks);
+-      if (id)
+-              quirk = id->driver_data;
+-
+-      if (run_edge_events_on_boot < 0) {
+-              if (quirk && quirk->no_edge_events_on_boot)
+-                      run_edge_events_on_boot = 0;
+-              else
+-                      run_edge_events_on_boot = 1;
+-      }
+-
+-      if (ignore_wake == NULL && quirk && quirk->ignore_wake)
+-              ignore_wake = quirk->ignore_wake;
+-
+-      if (ignore_interrupt == NULL && quirk && quirk->ignore_interrupt)
+-              ignore_interrupt = quirk->ignore_interrupt;
+-
+-      return 0;
+-}
+-
+-/* Directly after dmi_setup() which runs as core_initcall() */
+-postcore_initcall(acpi_gpio_setup_params);
diff --git a/queue-6.12/gpiolib-acpi-switch-to-use-enum-in-acpi_gpio_in_ignore_list.patch b/queue-6.12/gpiolib-acpi-switch-to-use-enum-in-acpi_gpio_in_ignore_list.patch
new file mode 100644 (file)
index 0000000..1b1abac
--- /dev/null
@@ -0,0 +1,93 @@
+From stable+bounces-204349-greg=kroah.com@vger.kernel.org Wed Dec 31 15:32:26 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 31 Dec 2025 09:32:12 -0500
+Subject: gpiolib: acpi: Switch to use enum in acpi_gpio_in_ignore_list()
+To: stable@vger.kernel.org
+Cc: Andy Shevchenko <andriy.shevchenko@linux.intel.com>, Hans de Goede <hdegoede@redhat.com>, Mika Westerberg <mika.westerberg@linux.intel.com>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251231143218.3042757-1-sashal@kernel.org>
+
+From: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+
+[ Upstream commit b24fd5bc8e6d6b6006db65b5956c2c2cd0ee5a7b ]
+
+Switch to use enum instead of pointers in acpi_gpio_in_ignore_list()
+which moves towards isolating the GPIO ACPI and quirk APIs. It will
+helps splitting them completely in the next changes.
+
+No functional changes.
+
+Reviewed-by: Hans de Goede <hdegoede@redhat.com>
+Acked-by: Mika Westerberg <mika.westerberg@linux.intel.com>
+Signed-off-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Stable-dep-of: 2d967310c49e ("gpiolib: acpi: Add quirk for Dell Precision 7780")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpio/gpiolib-acpi.c |   21 ++++++++++++++++-----
+ drivers/gpio/gpiolib-acpi.h |    8 ++++++++
+ 2 files changed, 24 insertions(+), 5 deletions(-)
+
+--- a/drivers/gpio/gpiolib-acpi.c
++++ b/drivers/gpio/gpiolib-acpi.c
+@@ -350,14 +350,25 @@ static struct gpio_desc *acpi_request_ow
+       return desc;
+ }
+-static bool acpi_gpio_in_ignore_list(const char *ignore_list, const char *controller_in,
+-                                   unsigned int pin_in)
++bool acpi_gpio_in_ignore_list(enum acpi_gpio_ignore_list list, const char *controller_in,
++                            unsigned int pin_in)
+ {
+-      const char *controller, *pin_str;
++      const char *ignore_list, *controller, *pin_str;
+       unsigned int pin;
+       char *endp;
+       int len;
++      switch (list) {
++      case ACPI_GPIO_IGNORE_WAKE:
++              ignore_list = ignore_wake;
++              break;
++      case ACPI_GPIO_IGNORE_INTERRUPT:
++              ignore_list = ignore_interrupt;
++              break;
++      default:
++              return false;
++      }
++
+       controller = ignore_list;
+       while (controller) {
+               pin_str = strchr(controller, '@');
+@@ -394,7 +405,7 @@ static bool acpi_gpio_irq_is_wake(struct
+       if (agpio->wake_capable != ACPI_WAKE_CAPABLE)
+               return false;
+-      if (acpi_gpio_in_ignore_list(ignore_wake, dev_name(parent), pin)) {
++      if (acpi_gpio_in_ignore_list(ACPI_GPIO_IGNORE_WAKE, dev_name(parent), pin)) {
+               dev_info(parent, "Ignoring wakeup on pin %u\n", pin);
+               return false;
+       }
+@@ -437,7 +448,7 @@ static acpi_status acpi_gpiochip_alloc_e
+       if (!handler)
+               return AE_OK;
+-      if (acpi_gpio_in_ignore_list(ignore_interrupt, dev_name(chip->parent), pin)) {
++      if (acpi_gpio_in_ignore_list(ACPI_GPIO_IGNORE_INTERRUPT, dev_name(chip->parent), pin)) {
+               dev_info(chip->parent, "Ignoring interrupt on pin %u\n", pin);
+               return AE_OK;
+       }
+--- a/drivers/gpio/gpiolib-acpi.h
++++ b/drivers/gpio/gpiolib-acpi.h
+@@ -58,4 +58,12 @@ static inline int acpi_gpio_count(const
+ }
+ #endif
++enum acpi_gpio_ignore_list {
++      ACPI_GPIO_IGNORE_WAKE,
++      ACPI_GPIO_IGNORE_INTERRUPT,
++};
++
++bool acpi_gpio_in_ignore_list(enum acpi_gpio_ignore_list list,
++                            const char *controller_in, unsigned int pin_in);
++
+ #endif /* GPIOLIB_ACPI_H */
diff --git a/queue-6.12/hrtimers-introduce-hrtimer_update_function.patch b/queue-6.12/hrtimers-introduce-hrtimer_update_function.patch
new file mode 100644 (file)
index 0000000..a3980fb
--- /dev/null
@@ -0,0 +1,64 @@
+From stable+bounces-204235-greg=kroah.com@vger.kernel.org Tue Dec 30 16:56:01 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 30 Dec 2025 10:55:54 -0500
+Subject: hrtimers: Introduce hrtimer_update_function()
+To: stable@vger.kernel.org
+Cc: Nam Cao <namcao@linutronix.de>, Thomas Gleixner <tglx@linutronix.de>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251230155556.2289800-1-sashal@kernel.org>
+
+From: Nam Cao <namcao@linutronix.de>
+
+[ Upstream commit 8f02e3563bb5824eb01c94f2c75f1dcee2d05625 ]
+
+Some users of hrtimer need to change the callback function after the
+initial setup. They write to hrtimer::function directly.
+
+That's not safe under all circumstances as the write is lockless and a
+concurrent timer expiry might end up using the wrong function pointer.
+
+Introduce hrtimer_update_function(), which also performs runtime checks
+whether it is safe to modify the callback.
+
+This allows to make hrtimer::function private once all users are converted.
+
+Signed-off-by: Nam Cao <namcao@linutronix.de>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Link: https://lore.kernel.org/all/20a937b0ae09ad54b5b6d86eabead7c570f1b72e.1730386209.git.namcao@linutronix.de
+Stable-dep-of: 267ee93c417e ("serial: xilinx_uartps: fix rs485 delay_rts_after_send")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/hrtimer.h |   22 ++++++++++++++++++++++
+ 1 file changed, 22 insertions(+)
+
+--- a/include/linux/hrtimer.h
++++ b/include/linux/hrtimer.h
+@@ -337,6 +337,28 @@ static inline int hrtimer_callback_runni
+       return timer->base->running == timer;
+ }
++/**
++ * hrtimer_update_function - Update the timer's callback function
++ * @timer:    Timer to update
++ * @function: New callback function
++ *
++ * Only safe to call if the timer is not enqueued. Can be called in the callback function if the
++ * timer is not enqueued at the same time (see the comments above HRTIMER_STATE_ENQUEUED).
++ */
++static inline void hrtimer_update_function(struct hrtimer *timer,
++                                         enum hrtimer_restart (*function)(struct hrtimer *))
++{
++      guard(raw_spinlock_irqsave)(&timer->base->cpu_base->lock);
++
++      if (WARN_ON_ONCE(hrtimer_is_queued(timer)))
++              return;
++
++      if (WARN_ON_ONCE(!function))
++              return;
++
++      timer->function = function;
++}
++
+ /* Forward a hrtimer so it expires after now: */
+ extern u64
+ hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval);
diff --git a/queue-6.12/hrtimers-make-hrtimer_update_function-less-expensive.patch b/queue-6.12/hrtimers-make-hrtimer_update_function-less-expensive.patch
new file mode 100644 (file)
index 0000000..5a2a612
--- /dev/null
@@ -0,0 +1,43 @@
+From 2ea97b76d6712bfb0408e5b81ffd7bc4551d3153 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Fri, 7 Feb 2025 22:16:09 +0100
+Subject: hrtimers: Make hrtimer_update_function() less expensive
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+commit 2ea97b76d6712bfb0408e5b81ffd7bc4551d3153 upstream.
+
+The sanity checks in hrtimer_update_function() are expensive for high
+frequency usage like in the io/uring code due to locking.
+
+Hide the sanity checks behind CONFIG_PROVE_LOCKING, which has a decent
+chance to be enabled on a regular basis for testing.
+
+Fixes: 8f02e3563bb5 ("hrtimers: Introduce hrtimer_update_function()")
+Reported-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Link: https://lore.kernel.org/all/87ikpllali.ffs@tglx
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/hrtimer.h |    3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/include/linux/hrtimer.h
++++ b/include/linux/hrtimer.h
+@@ -348,6 +348,7 @@ static inline int hrtimer_callback_runni
+ static inline void hrtimer_update_function(struct hrtimer *timer,
+                                          enum hrtimer_restart (*function)(struct hrtimer *))
+ {
++#ifdef CONFIG_PROVE_LOCKING
+       guard(raw_spinlock_irqsave)(&timer->base->cpu_base->lock);
+       if (WARN_ON_ONCE(hrtimer_is_queued(timer)))
+@@ -355,7 +356,7 @@ static inline void hrtimer_update_functi
+       if (WARN_ON_ONCE(!function))
+               return;
+-
++#endif
+       timer->function = function;
+ }
diff --git a/queue-6.12/idpf-add-support-for-sw-triggered-interrupts.patch b/queue-6.12/idpf-add-support-for-sw-triggered-interrupts.patch
new file mode 100644 (file)
index 0000000..14bdcac
--- /dev/null
@@ -0,0 +1,93 @@
+From stable+bounces-201105-greg=kroah.com@vger.kernel.org Mon Dec 15 22:50:25 2025
+From: Tony Nguyen <anthony.l.nguyen@intel.com>
+Date: Mon, 15 Dec 2025 13:42:40 -0800
+Subject: idpf: add support for SW triggered interrupts
+To: stable@vger.kernel.org
+Cc: Joshua Hay <joshua.a.hay@intel.com>, anthony.l.nguyen@intel.com, madhu.chittim@intel.com, decot@google.com, lrizzo@google.com, brianvv@google.com, Krishneil Singh <krishneil.k.singh@intel.com>
+Message-ID: <20251215214303.2608822-2-anthony.l.nguyen@intel.com>
+
+From: Joshua Hay <joshua.a.hay@intel.com>
+
+[ Upstream commit 93433c1d919775f8ac0f7893692f42e6731a5373 ]
+
+SW triggered interrupts are guaranteed to fire after their timer
+expires, unlike Tx and Rx interrupts which will only fire after the
+timer expires _and_ a descriptor write back is available to be processed
+by the driver.
+
+Add the necessary fields, defines, and initializations to enable a SW
+triggered interrupt in the vector's dyn_ctl register.
+
+Reviewed-by: Madhu Chittim <madhu.chittim@intel.com>
+Signed-off-by: Joshua Hay <joshua.a.hay@intel.com>
+Tested-by: Krishneil Singh <krishneil.k.singh@intel.com>
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/intel/idpf/idpf_dev.c    |    3 +++
+ drivers/net/ethernet/intel/idpf/idpf_txrx.h   |    8 +++++++-
+ drivers/net/ethernet/intel/idpf/idpf_vf_dev.c |    3 +++
+ 3 files changed, 13 insertions(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/intel/idpf/idpf_dev.c
++++ b/drivers/net/ethernet/intel/idpf/idpf_dev.c
+@@ -101,6 +101,9 @@ static int idpf_intr_reg_init(struct idp
+               intr->dyn_ctl_itridx_s = PF_GLINT_DYN_CTL_ITR_INDX_S;
+               intr->dyn_ctl_intrvl_s = PF_GLINT_DYN_CTL_INTERVAL_S;
+               intr->dyn_ctl_wb_on_itr_m = PF_GLINT_DYN_CTL_WB_ON_ITR_M;
++              intr->dyn_ctl_swint_trig_m = PF_GLINT_DYN_CTL_SWINT_TRIG_M;
++              intr->dyn_ctl_sw_itridx_ena_m =
++                      PF_GLINT_DYN_CTL_SW_ITR_INDX_ENA_M;
+               spacing = IDPF_ITR_IDX_SPACING(reg_vals[vec_id].itrn_index_spacing,
+                                              IDPF_PF_ITR_IDX_SPACING);
+--- a/drivers/net/ethernet/intel/idpf/idpf_txrx.h
++++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.h
+@@ -354,6 +354,8 @@ struct idpf_vec_regs {
+  * @dyn_ctl_itridx_m: Mask for ITR index
+  * @dyn_ctl_intrvl_s: Register bit offset for ITR interval
+  * @dyn_ctl_wb_on_itr_m: Mask for WB on ITR feature
++ * @dyn_ctl_sw_itridx_ena_m: Mask for SW ITR index
++ * @dyn_ctl_swint_trig_m: Mask for dyn_ctl SW triggered interrupt enable
+  * @rx_itr: RX ITR register
+  * @tx_itr: TX ITR register
+  * @icr_ena: Interrupt cause register offset
+@@ -367,6 +369,8 @@ struct idpf_intr_reg {
+       u32 dyn_ctl_itridx_m;
+       u32 dyn_ctl_intrvl_s;
+       u32 dyn_ctl_wb_on_itr_m;
++      u32 dyn_ctl_sw_itridx_ena_m;
++      u32 dyn_ctl_swint_trig_m;
+       void __iomem *rx_itr;
+       void __iomem *tx_itr;
+       void __iomem *icr_ena;
+@@ -437,7 +441,7 @@ struct idpf_q_vector {
+       cpumask_var_t affinity_mask;
+       __cacheline_group_end_aligned(cold);
+ };
+-libeth_cacheline_set_assert(struct idpf_q_vector, 112,
++libeth_cacheline_set_assert(struct idpf_q_vector, 120,
+                           24 + sizeof(struct napi_struct) +
+                           2 * sizeof(struct dim),
+                           8 + sizeof(cpumask_var_t));
+@@ -471,6 +475,8 @@ struct idpf_tx_queue_stats {
+ #define IDPF_ITR_IS_DYNAMIC(itr_mode) (itr_mode)
+ #define IDPF_ITR_TX_DEF               IDPF_ITR_20K
+ #define IDPF_ITR_RX_DEF               IDPF_ITR_20K
++/* Index used for 'SW ITR' update in DYN_CTL register */
++#define IDPF_SW_ITR_UPDATE_IDX        2
+ /* Index used for 'No ITR' update in DYN_CTL register */
+ #define IDPF_NO_ITR_UPDATE_IDX        3
+ #define IDPF_ITR_IDX_SPACING(spacing, dflt)   (spacing ? spacing : dflt)
+--- a/drivers/net/ethernet/intel/idpf/idpf_vf_dev.c
++++ b/drivers/net/ethernet/intel/idpf/idpf_vf_dev.c
+@@ -101,6 +101,9 @@ static int idpf_vf_intr_reg_init(struct
+               intr->dyn_ctl_itridx_s = VF_INT_DYN_CTLN_ITR_INDX_S;
+               intr->dyn_ctl_intrvl_s = VF_INT_DYN_CTLN_INTERVAL_S;
+               intr->dyn_ctl_wb_on_itr_m = VF_INT_DYN_CTLN_WB_ON_ITR_M;
++              intr->dyn_ctl_swint_trig_m = VF_INT_DYN_CTLN_SWINT_TRIG_M;
++              intr->dyn_ctl_sw_itridx_ena_m =
++                      VF_INT_DYN_CTLN_SW_ITR_INDX_ENA_M;
+               spacing = IDPF_ITR_IDX_SPACING(reg_vals[vec_id].itrn_index_spacing,
+                                              IDPF_VF_ITR_IDX_SPACING);
diff --git a/queue-6.12/idpf-add-support-for-tx-refillqs-in-flow-scheduling-mode.patch b/queue-6.12/idpf-add-support-for-tx-refillqs-in-flow-scheduling-mode.patch
new file mode 100644 (file)
index 0000000..7d16a4b
--- /dev/null
@@ -0,0 +1,285 @@
+From stable+bounces-201107-greg=kroah.com@vger.kernel.org Mon Dec 15 22:50:36 2025
+From: Tony Nguyen <anthony.l.nguyen@intel.com>
+Date: Mon, 15 Dec 2025 13:42:42 -0800
+Subject: idpf: add support for Tx refillqs in flow scheduling mode
+To: stable@vger.kernel.org
+Cc: Joshua Hay <joshua.a.hay@intel.com>, anthony.l.nguyen@intel.com, madhu.chittim@intel.com, decot@google.com, lrizzo@google.com, brianvv@google.com, Samuel Salin <Samuel.salin@intel.com>
+Message-ID: <20251215214303.2608822-4-anthony.l.nguyen@intel.com>
+
+From: Joshua Hay <joshua.a.hay@intel.com>
+
+[ Upstream commit cb83b559bea39f207ee214ee2972657e8576ed18 ]
+
+Changes from original commit:
+- Adjusted idpf_tx_queue assert size to align with 6.12 struct definition
+
+In certain production environments, it is possible for completion tags
+to collide, meaning N packets with the same completion tag are in flight
+at the same time. In this environment, any given Tx queue is effectively
+used to send both slower traffic and higher throughput traffic
+simultaneously. This is the result of a customer's specific
+configuration in the device pipeline, the details of which Intel cannot
+provide. This configuration results in a small number of out-of-order
+completions, i.e., a small number of packets in flight. The existing
+guardrails in the driver only protect against a large number of packets
+in flight. The slower flow completions are delayed which causes the
+out-of-order completions. The fast flow will continue sending traffic
+and generating tags. Because tags are generated on the fly, the fast
+flow eventually uses the same tag for a packet that is still in flight
+from the slower flow. The driver has no idea which packet it should
+clean when it processes the completion with that tag, but it will look
+for the packet on the buffer ring before the hash table.  If the slower
+flow packet completion is processed first, it will end up cleaning the
+fast flow packet on the ring prematurely. This leaves the descriptor
+ring in a bad state resulting in a crash or Tx timeout.
+
+In summary, generating a tag when a packet is sent can lead to the same
+tag being associated with multiple packets. This can lead to resource
+leaks, crashes, and/or Tx timeouts.
+
+Before we can replace the tag generation, we need a new mechanism for
+the send path to know what tag to use next. The driver will allocate and
+initialize a refillq for each TxQ with all of the possible free tag
+values. During send, the driver grabs the next free tag from the refillq
+from next_to_clean. While cleaning the packet, the clean routine posts
+the tag back to the refillq's next_to_use to indicate that it is now
+free to use.
+
+This mechanism works exactly the same way as the existing Rx refill
+queues, which post the cleaned buffer IDs back to the buffer queue to be
+reposted to HW. Since we're using the refillqs for both Rx and Tx now,
+genericize some of the existing refillq support.
+
+Note: the refillqs will not be used yet. This is only demonstrating how
+they will be used to pass free tags back to the send path.
+
+Signed-off-by: Joshua Hay <joshua.a.hay@intel.com>
+Reviewed-by: Madhu Chittim <madhu.chittim@intel.com>
+Tested-by: Samuel Salin <Samuel.salin@intel.com>
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/intel/idpf/idpf_txrx.c |   93 +++++++++++++++++++++++++---
+ drivers/net/ethernet/intel/idpf/idpf_txrx.h |    8 +-
+ 2 files changed, 91 insertions(+), 10 deletions(-)
+
+--- a/drivers/net/ethernet/intel/idpf/idpf_txrx.c
++++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
+@@ -158,6 +158,9 @@ static void idpf_tx_desc_rel(struct idpf
+       if (!txq->desc_ring)
+               return;
++      if (txq->refillq)
++              kfree(txq->refillq->ring);
++
+       dmam_free_coherent(txq->dev, txq->size, txq->desc_ring, txq->dma);
+       txq->desc_ring = NULL;
+       txq->next_to_use = 0;
+@@ -263,6 +266,7 @@ static int idpf_tx_desc_alloc(const stru
+                             struct idpf_tx_queue *tx_q)
+ {
+       struct device *dev = tx_q->dev;
++      struct idpf_sw_queue *refillq;
+       int err;
+       err = idpf_tx_buf_alloc_all(tx_q);
+@@ -286,6 +290,29 @@ static int idpf_tx_desc_alloc(const stru
+       tx_q->next_to_clean = 0;
+       idpf_queue_set(GEN_CHK, tx_q);
++      if (!idpf_queue_has(FLOW_SCH_EN, tx_q))
++              return 0;
++
++      refillq = tx_q->refillq;
++      refillq->desc_count = tx_q->desc_count;
++      refillq->ring = kcalloc(refillq->desc_count, sizeof(u32),
++                              GFP_KERNEL);
++      if (!refillq->ring) {
++              err = -ENOMEM;
++              goto err_alloc;
++      }
++
++      for (unsigned int i = 0; i < refillq->desc_count; i++)
++              refillq->ring[i] =
++                      FIELD_PREP(IDPF_RFL_BI_BUFID_M, i) |
++                      FIELD_PREP(IDPF_RFL_BI_GEN_M,
++                                 idpf_queue_has(GEN_CHK, refillq));
++
++      /* Go ahead and flip the GEN bit since this counts as filling
++       * up the ring, i.e. we already ring wrapped.
++       */
++      idpf_queue_change(GEN_CHK, refillq);
++
+       return 0;
+ err_alloc:
+@@ -622,18 +649,18 @@ static int idpf_rx_hdr_buf_alloc_all(str
+ }
+ /**
+- * idpf_rx_post_buf_refill - Post buffer id to refill queue
++ * idpf_post_buf_refill - Post buffer id to refill queue
+  * @refillq: refill queue to post to
+  * @buf_id: buffer id to post
+  */
+-static void idpf_rx_post_buf_refill(struct idpf_sw_queue *refillq, u16 buf_id)
++static void idpf_post_buf_refill(struct idpf_sw_queue *refillq, u16 buf_id)
+ {
+       u32 nta = refillq->next_to_use;
+       /* store the buffer ID and the SW maintained GEN bit to the refillq */
+       refillq->ring[nta] =
+-              FIELD_PREP(IDPF_RX_BI_BUFID_M, buf_id) |
+-              FIELD_PREP(IDPF_RX_BI_GEN_M,
++              FIELD_PREP(IDPF_RFL_BI_BUFID_M, buf_id) |
++              FIELD_PREP(IDPF_RFL_BI_GEN_M,
+                          idpf_queue_has(GEN_CHK, refillq));
+       if (unlikely(++nta == refillq->desc_count)) {
+@@ -1014,6 +1041,11 @@ static void idpf_txq_group_rel(struct id
+               struct idpf_txq_group *txq_grp = &vport->txq_grps[i];
+               for (j = 0; j < txq_grp->num_txq; j++) {
++                      if (flow_sch_en) {
++                              kfree(txq_grp->txqs[j]->refillq);
++                              txq_grp->txqs[j]->refillq = NULL;
++                      }
++
+                       kfree(txq_grp->txqs[j]);
+                       txq_grp->txqs[j] = NULL;
+               }
+@@ -1425,6 +1457,13 @@ static int idpf_txq_group_alloc(struct i
+                       }
+                       idpf_queue_set(FLOW_SCH_EN, q);
++
++                      q->refillq = kzalloc(sizeof(*q->refillq), GFP_KERNEL);
++                      if (!q->refillq)
++                              goto err_alloc;
++
++                      idpf_queue_set(GEN_CHK, q->refillq);
++                      idpf_queue_set(RFL_GEN_CHK, q->refillq);
+               }
+               if (!split)
+@@ -1973,6 +2012,8 @@ static void idpf_tx_handle_rs_completion
+       compl_tag = le16_to_cpu(desc->q_head_compl_tag.compl_tag);
++      idpf_post_buf_refill(txq->refillq, compl_tag);
++
+       /* If we didn't clean anything on the ring, this packet must be
+        * in the hash table. Go clean it there.
+        */
+@@ -2333,6 +2374,37 @@ static unsigned int idpf_tx_splitq_bump_
+ }
+ /**
++ * idpf_tx_get_free_buf_id - get a free buffer ID from the refill queue
++ * @refillq: refill queue to get buffer ID from
++ * @buf_id: return buffer ID
++ *
++ * Return: true if a buffer ID was found, false if not
++ */
++static bool idpf_tx_get_free_buf_id(struct idpf_sw_queue *refillq,
++                                  u16 *buf_id)
++{
++      u32 ntc = refillq->next_to_clean;
++      u32 refill_desc;
++
++      refill_desc = refillq->ring[ntc];
++
++      if (unlikely(idpf_queue_has(RFL_GEN_CHK, refillq) !=
++                   !!(refill_desc & IDPF_RFL_BI_GEN_M)))
++              return false;
++
++      *buf_id = FIELD_GET(IDPF_RFL_BI_BUFID_M, refill_desc);
++
++      if (unlikely(++ntc == refillq->desc_count)) {
++              idpf_queue_change(RFL_GEN_CHK, refillq);
++              ntc = 0;
++      }
++
++      refillq->next_to_clean = ntc;
++
++      return true;
++}
++
++/**
+  * idpf_tx_splitq_map - Build the Tx flex descriptor
+  * @tx_q: queue to send buffer on
+  * @params: pointer to splitq params struct
+@@ -2702,6 +2774,13 @@ static netdev_tx_t idpf_tx_splitq_frame(
+       }
+       if (idpf_queue_has(FLOW_SCH_EN, tx_q)) {
++              if (unlikely(!idpf_tx_get_free_buf_id(tx_q->refillq,
++                                                    &tx_params.compl_tag))) {
++                      u64_stats_update_begin(&tx_q->stats_sync);
++                      u64_stats_inc(&tx_q->q_stats.q_busy);
++                      u64_stats_update_end(&tx_q->stats_sync);
++              }
++
+               tx_params.dtype = IDPF_TX_DESC_DTYPE_FLEX_FLOW_SCHE;
+               tx_params.eop_cmd = IDPF_TXD_FLEX_FLOW_CMD_EOP;
+               /* Set the RE bit to catch any packets that may have not been
+@@ -3220,7 +3299,7 @@ payload:
+ skip_data:
+               rx_buf->page = NULL;
+-              idpf_rx_post_buf_refill(refillq, buf_id);
++              idpf_post_buf_refill(refillq, buf_id);
+               IDPF_RX_BUMP_NTC(rxq, ntc);
+               /* skip if it is non EOP desc */
+@@ -3328,10 +3407,10 @@ static void idpf_rx_clean_refillq(struct
+               bool failure;
+               if (idpf_queue_has(RFL_GEN_CHK, refillq) !=
+-                  !!(refill_desc & IDPF_RX_BI_GEN_M))
++                  !!(refill_desc & IDPF_RFL_BI_GEN_M))
+                       break;
+-              buf_id = FIELD_GET(IDPF_RX_BI_BUFID_M, refill_desc);
++              buf_id = FIELD_GET(IDPF_RFL_BI_BUFID_M, refill_desc);
+               failure = idpf_rx_update_bufq_desc(bufq, buf_id, buf_desc);
+               if (failure)
+                       break;
+--- a/drivers/net/ethernet/intel/idpf/idpf_txrx.h
++++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.h
+@@ -107,8 +107,8 @@ do {                                                               \
+  */
+ #define IDPF_TX_SPLITQ_RE_MIN_GAP     64
+-#define IDPF_RX_BI_GEN_M              BIT(16)
+-#define IDPF_RX_BI_BUFID_M            GENMASK(15, 0)
++#define IDPF_RFL_BI_GEN_M             BIT(16)
++#define IDPF_RFL_BI_BUFID_M           GENMASK(15, 0)
+ #define IDPF_RXD_EOF_SPLITQ           VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_EOF_M
+ #define IDPF_RXD_EOF_SINGLEQ          VIRTCHNL2_RX_BASE_DESC_STATUS_EOF_M
+@@ -635,6 +635,7 @@ libeth_cacheline_set_assert(struct idpf_
+  * @cleaned_pkts: Number of packets cleaned for the above said case
+  * @tx_max_bufs: Max buffers that can be transmitted with scatter-gather
+  * @stash: Tx buffer stash for Flow-based scheduling mode
++ * @refillq: Pointer to refill queue
+  * @compl_tag_bufid_m: Completion tag buffer id mask
+  * @compl_tag_cur_gen: Used to keep track of current completion tag generation
+  * @compl_tag_gen_max: To determine when compl_tag_cur_gen should be reset
+@@ -682,6 +683,7 @@ struct idpf_tx_queue {
+       u16 tx_max_bufs;
+       struct idpf_txq_stash *stash;
++      struct idpf_sw_queue *refillq;
+       u16 compl_tag_bufid_m;
+       u16 compl_tag_cur_gen;
+@@ -700,7 +702,7 @@ struct idpf_tx_queue {
+       __cacheline_group_end_aligned(cold);
+ };
+ libeth_cacheline_set_assert(struct idpf_tx_queue, 64,
+-                          88 + sizeof(struct u64_stats_sync),
++                          96 + sizeof(struct u64_stats_sync),
+                           24);
+ /**
diff --git a/queue-6.12/idpf-improve-when-to-set-re-bit-logic.patch b/queue-6.12/idpf-improve-when-to-set-re-bit-logic.patch
new file mode 100644 (file)
index 0000000..4a2537d
--- /dev/null
@@ -0,0 +1,117 @@
+From stable+bounces-201108-greg=kroah.com@vger.kernel.org Mon Dec 15 22:50:40 2025
+From: Tony Nguyen <anthony.l.nguyen@intel.com>
+Date: Mon, 15 Dec 2025 13:42:43 -0800
+Subject: idpf: improve when to set RE bit logic
+To: stable@vger.kernel.org
+Cc: Joshua Hay <joshua.a.hay@intel.com>, anthony.l.nguyen@intel.com, madhu.chittim@intel.com, decot@google.com, lrizzo@google.com, brianvv@google.com, Samuel Salin <Samuel.salin@intel.com>
+Message-ID: <20251215214303.2608822-5-anthony.l.nguyen@intel.com>
+
+From: Joshua Hay <joshua.a.hay@intel.com>
+
+[ Upstream commit f2d18e16479cac7a708d77cbfb4220a9114a71fc ]
+
+Track the gap between next_to_use and the last RE index. Set RE again
+if the gap is large enough to ensure RE bit is set frequently. This is
+critical before removing the stashing mechanisms because the
+opportunistic descriptor ring cleaning from the out-of-order completions
+will go away. Previously the descriptors would be "cleaned" by both the
+descriptor (RE) completion and the out-of-order completions. Without the
+latter, we must ensure the RE bit is set more frequently. Otherwise,
+it's theoretically possible for the descriptor ring next_to_clean to
+never advance.  The previous implementation was dependent on the start
+of a packet falling on a 64th index in the descriptor ring, which is not
+guaranteed with large packets.
+
+Signed-off-by: Luigi Rizzo <lrizzo@google.com>
+Signed-off-by: Brian Vazquez <brianvv@google.com>
+Signed-off-by: Joshua Hay <joshua.a.hay@intel.com>
+Reviewed-by: Madhu Chittim <madhu.chittim@intel.com>
+Tested-by: Samuel Salin <Samuel.salin@intel.com>
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/intel/idpf/idpf_txrx.c |   20 +++++++++++++++++++-
+ drivers/net/ethernet/intel/idpf/idpf_txrx.h |    6 ++++--
+ 2 files changed, 23 insertions(+), 3 deletions(-)
+
+--- a/drivers/net/ethernet/intel/idpf/idpf_txrx.c
++++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
+@@ -313,6 +313,8 @@ static int idpf_tx_desc_alloc(const stru
+        */
+       idpf_queue_change(GEN_CHK, refillq);
++      tx_q->last_re = tx_q->desc_count - IDPF_TX_SPLITQ_RE_MIN_GAP;
++
+       return 0;
+ err_alloc:
+@@ -2709,6 +2711,21 @@ netdev_tx_t idpf_tx_drop_skb(struct idpf
+ }
+ /**
++ * idpf_tx_splitq_need_re - check whether RE bit needs to be set
++ * @tx_q: pointer to Tx queue
++ *
++ * Return: true if RE bit needs to be set, false otherwise
++ */
++static bool idpf_tx_splitq_need_re(struct idpf_tx_queue *tx_q)
++{
++      int gap = tx_q->next_to_use - tx_q->last_re;
++
++      gap += (gap < 0) ? tx_q->desc_count : 0;
++
++      return gap >= IDPF_TX_SPLITQ_RE_MIN_GAP;
++}
++
++/**
+  * idpf_tx_splitq_frame - Sends buffer on Tx ring using flex descriptors
+  * @skb: send buffer
+  * @tx_q: queue to send buffer on
+@@ -2788,9 +2805,10 @@ static netdev_tx_t idpf_tx_splitq_frame(
+                * MIN_RING size to ensure it will be set at least once each
+                * time around the ring.
+                */
+-              if (!(tx_q->next_to_use % IDPF_TX_SPLITQ_RE_MIN_GAP)) {
++              if (idpf_tx_splitq_need_re(tx_q)) {
+                       tx_params.eop_cmd |= IDPF_TXD_FLEX_FLOW_CMD_RE;
+                       tx_q->txq_grp->num_completions_pending++;
++                      tx_q->last_re = tx_q->next_to_use;
+               }
+               if (skb->ip_summed == CHECKSUM_PARTIAL)
+--- a/drivers/net/ethernet/intel/idpf/idpf_txrx.h
++++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.h
+@@ -623,6 +623,8 @@ libeth_cacheline_set_assert(struct idpf_
+  * @netdev: &net_device corresponding to this queue
+  * @next_to_use: Next descriptor to use
+  * @next_to_clean: Next descriptor to clean
++ * @last_re: last descriptor index that RE bit was set
++ * @tx_max_bufs: Max buffers that can be transmitted with scatter-gather
+  * @cleaned_bytes: Splitq only, TXQ only: When a TX completion is received on
+  *               the TX completion queue, it can be for any TXQ associated
+  *               with that completion queue. This means we can clean up to
+@@ -633,7 +635,6 @@ libeth_cacheline_set_assert(struct idpf_
+  *               only once at the end of the cleaning routine.
+  * @clean_budget: singleq only, queue cleaning budget
+  * @cleaned_pkts: Number of packets cleaned for the above said case
+- * @tx_max_bufs: Max buffers that can be transmitted with scatter-gather
+  * @stash: Tx buffer stash for Flow-based scheduling mode
+  * @refillq: Pointer to refill queue
+  * @compl_tag_bufid_m: Completion tag buffer id mask
+@@ -674,6 +675,8 @@ struct idpf_tx_queue {
+       __cacheline_group_begin_aligned(read_write);
+       u16 next_to_use;
+       u16 next_to_clean;
++      u16 last_re;
++      u16 tx_max_bufs;
+       union {
+               u32 cleaned_bytes;
+@@ -681,7 +684,6 @@ struct idpf_tx_queue {
+       };
+       u16 cleaned_pkts;
+-      u16 tx_max_bufs;
+       struct idpf_txq_stash *stash;
+       struct idpf_sw_queue *refillq;
diff --git a/queue-6.12/idpf-remove-obsolete-stashing-code.patch b/queue-6.12/idpf-remove-obsolete-stashing-code.patch
new file mode 100644 (file)
index 0000000..b752668
--- /dev/null
@@ -0,0 +1,666 @@
+From stable+bounces-201111-greg=kroah.com@vger.kernel.org Mon Dec 15 22:51:08 2025
+From: Tony Nguyen <anthony.l.nguyen@intel.com>
+Date: Mon, 15 Dec 2025 13:42:47 -0800
+Subject: idpf: remove obsolete stashing code
+To: stable@vger.kernel.org
+Cc: Joshua Hay <joshua.a.hay@intel.com>, anthony.l.nguyen@intel.com, madhu.chittim@intel.com, decot@google.com, lrizzo@google.com, brianvv@google.com, Aleksandr Loktionov <aleksandr.loktionov@intel.com>, Samuel Salin <Samuel.salin@intel.com>
+Message-ID: <20251215214303.2608822-9-anthony.l.nguyen@intel.com>
+
+From: Joshua Hay <joshua.a.hay@intel.com>
+
+[ Upstream commit 6c4e68480238274f84aa50d54da0d9e262df6284 ]
+
+Changes from original commit:
+- Adjusted idpf_tx_queue assert size to align with 6.12 struct definition
+
+With the new Tx buffer management scheme, there is no need for all of
+the stashing mechanisms, the hash table, the reserve buffer stack, etc.
+Remove all of that.
+
+Signed-off-by: Joshua Hay <joshua.a.hay@intel.com>
+Reviewed-by: Madhu Chittim <madhu.chittim@intel.com>
+Reviewed-by: Aleksandr Loktionov <aleksandr.loktionov@intel.com>
+Tested-by: Samuel Salin <Samuel.salin@intel.com>
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/intel/idpf/idpf_txrx.c |  309 +---------------------------
+ drivers/net/ethernet/intel/idpf/idpf_txrx.h |   47 ----
+ 2 files changed, 22 insertions(+), 334 deletions(-)
+
+--- a/drivers/net/ethernet/intel/idpf/idpf_txrx.c
++++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
+@@ -7,13 +7,7 @@
+ #include "idpf.h"
+ #include "idpf_virtchnl.h"
+-struct idpf_tx_stash {
+-      struct hlist_node hlist;
+-      struct libeth_sqe buf;
+-};
+-
+ #define idpf_tx_buf_next(buf)         (*(u32 *)&(buf)->priv)
+-#define idpf_tx_buf_compl_tag(buf)    (*(u32 *)&(buf)->priv)
+ LIBETH_SQE_CHECK_PRIV(u32);
+ /**
+@@ -40,36 +34,6 @@ static bool idpf_chk_linearize(const str
+ }
+ /**
+- * idpf_buf_lifo_push - push a buffer pointer onto stack
+- * @stack: pointer to stack struct
+- * @buf: pointer to buf to push
+- *
+- * Returns 0 on success, negative on failure
+- **/
+-static int idpf_buf_lifo_push(struct idpf_buf_lifo *stack,
+-                            struct idpf_tx_stash *buf)
+-{
+-      if (unlikely(stack->top == stack->size))
+-              return -ENOSPC;
+-
+-      stack->bufs[stack->top++] = buf;
+-
+-      return 0;
+-}
+-
+-/**
+- * idpf_buf_lifo_pop - pop a buffer pointer from stack
+- * @stack: pointer to stack struct
+- **/
+-static struct idpf_tx_stash *idpf_buf_lifo_pop(struct idpf_buf_lifo *stack)
+-{
+-      if (unlikely(!stack->top))
+-              return NULL;
+-
+-      return stack->bufs[--stack->top];
+-}
+-
+-/**
+  * idpf_tx_timeout - Respond to a Tx Hang
+  * @netdev: network interface device structure
+  * @txqueue: TX queue
+@@ -97,14 +61,11 @@ void idpf_tx_timeout(struct net_device *
+ static void idpf_tx_buf_rel_all(struct idpf_tx_queue *txq)
+ {
+       struct libeth_sq_napi_stats ss = { };
+-      struct idpf_buf_lifo *buf_stack;
+-      struct idpf_tx_stash *stash;
+       struct libeth_cq_pp cp = {
+               .dev    = txq->dev,
+               .ss     = &ss,
+       };
+-      struct hlist_node *tmp;
+-      u32 i, tag;
++      u32 i;
+       /* Buffers already cleared, nothing to do */
+       if (!txq->tx_buf)
+@@ -116,33 +77,6 @@ static void idpf_tx_buf_rel_all(struct i
+       kfree(txq->tx_buf);
+       txq->tx_buf = NULL;
+-
+-      if (!idpf_queue_has(FLOW_SCH_EN, txq))
+-              return;
+-
+-      buf_stack = &txq->stash->buf_stack;
+-      if (!buf_stack->bufs)
+-              return;
+-
+-      /*
+-       * If a Tx timeout occurred, there are potentially still bufs in the
+-       * hash table, free them here.
+-       */
+-      hash_for_each_safe(txq->stash->sched_buf_hash, tag, tmp, stash,
+-                         hlist) {
+-              if (!stash)
+-                      continue;
+-
+-              libeth_tx_complete(&stash->buf, &cp);
+-              hash_del(&stash->hlist);
+-              idpf_buf_lifo_push(buf_stack, stash);
+-      }
+-
+-      for (i = 0; i < buf_stack->size; i++)
+-              kfree(buf_stack->bufs[i]);
+-
+-      kfree(buf_stack->bufs);
+-      buf_stack->bufs = NULL;
+ }
+ /**
+@@ -218,9 +152,6 @@ static void idpf_tx_desc_rel_all(struct
+  */
+ static int idpf_tx_buf_alloc_all(struct idpf_tx_queue *tx_q)
+ {
+-      struct idpf_buf_lifo *buf_stack;
+-      int i;
+-
+       /* Allocate book keeping buffers only. Buffers to be supplied to HW
+        * are allocated by kernel network stack and received as part of skb
+        */
+@@ -233,29 +164,6 @@ static int idpf_tx_buf_alloc_all(struct
+       if (!tx_q->tx_buf)
+               return -ENOMEM;
+-      if (!idpf_queue_has(FLOW_SCH_EN, tx_q))
+-              return 0;
+-
+-      buf_stack = &tx_q->stash->buf_stack;
+-
+-      /* Initialize tx buf stack for out-of-order completions if
+-       * flow scheduling offload is enabled
+-       */
+-      buf_stack->bufs = kcalloc(tx_q->desc_count, sizeof(*buf_stack->bufs),
+-                                GFP_KERNEL);
+-      if (!buf_stack->bufs)
+-              return -ENOMEM;
+-
+-      buf_stack->size = tx_q->desc_count;
+-      buf_stack->top = tx_q->desc_count;
+-
+-      for (i = 0; i < tx_q->desc_count; i++) {
+-              buf_stack->bufs[i] = kzalloc(sizeof(*buf_stack->bufs[i]),
+-                                           GFP_KERNEL);
+-              if (!buf_stack->bufs[i])
+-                      return -ENOMEM;
+-      }
+-
+       return 0;
+ }
+@@ -369,8 +277,6 @@ static int idpf_tx_desc_alloc_all(struct
+       for (i = 0; i < vport->num_txq_grp; i++) {
+               for (j = 0; j < vport->txq_grps[i].num_txq; j++) {
+                       struct idpf_tx_queue *txq = vport->txq_grps[i].txqs[j];
+-                      u8 gen_bits = 0;
+-                      u16 bufidx_mask;
+                       err = idpf_tx_desc_alloc(vport, txq);
+                       if (err) {
+@@ -379,34 +285,6 @@ static int idpf_tx_desc_alloc_all(struct
+                                       i);
+                               goto err_out;
+                       }
+-
+-                      if (!idpf_is_queue_model_split(vport->txq_model))
+-                              continue;
+-
+-                      txq->compl_tag_cur_gen = 0;
+-
+-                      /* Determine the number of bits in the bufid
+-                       * mask and add one to get the start of the
+-                       * generation bits
+-                       */
+-                      bufidx_mask = txq->desc_count - 1;
+-                      while (bufidx_mask >> 1) {
+-                              txq->compl_tag_gen_s++;
+-                              bufidx_mask = bufidx_mask >> 1;
+-                      }
+-                      txq->compl_tag_gen_s++;
+-
+-                      gen_bits = IDPF_TX_SPLITQ_COMPL_TAG_WIDTH -
+-                                                      txq->compl_tag_gen_s;
+-                      txq->compl_tag_gen_max = GETMAXVAL(gen_bits);
+-
+-                      /* Set bufid mask based on location of first
+-                       * gen bit; it cannot simply be the descriptor
+-                       * ring size-1 since we can have size values
+-                       * where not all of those bits are set.
+-                       */
+-                      txq->compl_tag_bufid_m =
+-                              GETMAXVAL(txq->compl_tag_gen_s);
+               }
+               if (!idpf_is_queue_model_split(vport->txq_model))
+@@ -1061,9 +939,6 @@ static void idpf_txq_group_rel(struct id
+               kfree(txq_grp->complq);
+               txq_grp->complq = NULL;
+-
+-              if (flow_sch_en)
+-                      kfree(txq_grp->stashes);
+       }
+       kfree(vport->txq_grps);
+       vport->txq_grps = NULL;
+@@ -1416,7 +1291,6 @@ static int idpf_txq_group_alloc(struct i
+       for (i = 0; i < vport->num_txq_grp; i++) {
+               struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
+               struct idpf_adapter *adapter = vport->adapter;
+-              struct idpf_txq_stash *stashes;
+               int j;
+               tx_qgrp->vport = vport;
+@@ -1429,15 +1303,6 @@ static int idpf_txq_group_alloc(struct i
+                               goto err_alloc;
+               }
+-              if (split && flow_sch_en) {
+-                      stashes = kcalloc(num_txq, sizeof(*stashes),
+-                                        GFP_KERNEL);
+-                      if (!stashes)
+-                              goto err_alloc;
+-
+-                      tx_qgrp->stashes = stashes;
+-              }
+-
+               for (j = 0; j < tx_qgrp->num_txq; j++) {
+                       struct idpf_tx_queue *q = tx_qgrp->txqs[j];
+@@ -1457,11 +1322,6 @@ static int idpf_txq_group_alloc(struct i
+                       if (!flow_sch_en)
+                               continue;
+-                      if (split) {
+-                              q->stash = &stashes[j];
+-                              hash_init(q->stash->sched_buf_hash);
+-                      }
+-
+                       idpf_queue_set(FLOW_SCH_EN, q);
+                       q->refillq = kzalloc(sizeof(*q->refillq), GFP_KERNEL);
+@@ -1719,82 +1579,6 @@ static void idpf_tx_handle_sw_marker(str
+       wake_up(&vport->sw_marker_wq);
+ }
+-/**
+- * idpf_tx_clean_stashed_bufs - clean bufs that were stored for
+- * out of order completions
+- * @txq: queue to clean
+- * @compl_tag: completion tag of packet to clean (from completion descriptor)
+- * @cleaned: pointer to stats struct to track cleaned packets/bytes
+- * @budget: Used to determine if we are in netpoll
+- */
+-static void idpf_tx_clean_stashed_bufs(struct idpf_tx_queue *txq,
+-                                     u16 compl_tag,
+-                                     struct libeth_sq_napi_stats *cleaned,
+-                                     int budget)
+-{
+-      struct idpf_tx_stash *stash;
+-      struct hlist_node *tmp_buf;
+-      struct libeth_cq_pp cp = {
+-              .dev    = txq->dev,
+-              .ss     = cleaned,
+-              .napi   = budget,
+-      };
+-
+-      /* Buffer completion */
+-      hash_for_each_possible_safe(txq->stash->sched_buf_hash, stash, tmp_buf,
+-                                  hlist, compl_tag) {
+-              if (unlikely(idpf_tx_buf_compl_tag(&stash->buf) != compl_tag))
+-                      continue;
+-
+-              hash_del(&stash->hlist);
+-              libeth_tx_complete(&stash->buf, &cp);
+-
+-              /* Push shadow buf back onto stack */
+-              idpf_buf_lifo_push(&txq->stash->buf_stack, stash);
+-      }
+-}
+-
+-/**
+- * idpf_stash_flow_sch_buffers - store buffer parameters info to be freed at a
+- * later time (only relevant for flow scheduling mode)
+- * @txq: Tx queue to clean
+- * @tx_buf: buffer to store
+- */
+-static int idpf_stash_flow_sch_buffers(struct idpf_tx_queue *txq,
+-                                     struct idpf_tx_buf *tx_buf)
+-{
+-      struct idpf_tx_stash *stash;
+-
+-      if (unlikely(tx_buf->type <= LIBETH_SQE_CTX))
+-              return 0;
+-
+-      stash = idpf_buf_lifo_pop(&txq->stash->buf_stack);
+-      if (unlikely(!stash)) {
+-              net_err_ratelimited("%s: No out-of-order TX buffers left!\n",
+-                                  netdev_name(txq->netdev));
+-
+-              return -ENOMEM;
+-      }
+-
+-      /* Store buffer params in shadow buffer */
+-      stash->buf.skb = tx_buf->skb;
+-      stash->buf.bytes = tx_buf->bytes;
+-      stash->buf.packets = tx_buf->packets;
+-      stash->buf.type = tx_buf->type;
+-      stash->buf.nr_frags = tx_buf->nr_frags;
+-      dma_unmap_addr_set(&stash->buf, dma, dma_unmap_addr(tx_buf, dma));
+-      dma_unmap_len_set(&stash->buf, len, dma_unmap_len(tx_buf, len));
+-      idpf_tx_buf_compl_tag(&stash->buf) = idpf_tx_buf_compl_tag(tx_buf);
+-
+-      /* Add buffer to buf_hash table to be freed later */
+-      hash_add(txq->stash->sched_buf_hash, &stash->hlist,
+-               idpf_tx_buf_compl_tag(&stash->buf));
+-
+-      tx_buf->type = LIBETH_SQE_EMPTY;
+-
+-      return 0;
+-}
+-
+ #define idpf_tx_splitq_clean_bump_ntc(txq, ntc, desc, buf)    \
+ do {                                                          \
+       if (unlikely(++(ntc) == (txq)->desc_count)) {           \
+@@ -1822,14 +1606,8 @@ do {                                                            \
+  * Separate packet completion events will be reported on the completion queue,
+  * and the buffers will be cleaned separately. The stats are not updated from
+  * this function when using flow-based scheduling.
+- *
+- * Furthermore, in flow scheduling mode, check to make sure there are enough
+- * reserve buffers to stash the packet. If there are not, return early, which
+- * will leave next_to_clean pointing to the packet that failed to be stashed.
+- *
+- * Return: false in the scenario above, true otherwise.
+  */
+-static bool idpf_tx_splitq_clean(struct idpf_tx_queue *tx_q, u16 end,
++static void idpf_tx_splitq_clean(struct idpf_tx_queue *tx_q, u16 end,
+                                int napi_budget,
+                                struct libeth_sq_napi_stats *cleaned,
+                                bool descs_only)
+@@ -1843,12 +1621,11 @@ static bool idpf_tx_splitq_clean(struct
+               .napi   = napi_budget,
+       };
+       struct idpf_tx_buf *tx_buf;
+-      bool clean_complete = true;
+       if (descs_only) {
+               /* Bump ring index to mark as cleaned. */
+               tx_q->next_to_clean = end;
+-              return true;
++              return;
+       }
+       tx_desc = &tx_q->flex_tx[ntc];
+@@ -1869,53 +1646,24 @@ static bool idpf_tx_splitq_clean(struct
+                       break;
+               eop_idx = tx_buf->rs_idx;
++              libeth_tx_complete(tx_buf, &cp);
+-              if (descs_only) {
+-                      if (IDPF_TX_BUF_RSV_UNUSED(tx_q) < tx_buf->nr_frags) {
+-                              clean_complete = false;
+-                              goto tx_splitq_clean_out;
+-                      }
+-
+-                      idpf_stash_flow_sch_buffers(tx_q, tx_buf);
++              /* unmap remaining buffers */
++              while (ntc != eop_idx) {
++                      idpf_tx_splitq_clean_bump_ntc(tx_q, ntc,
++                                                    tx_desc, tx_buf);
+-                      while (ntc != eop_idx) {
+-                              idpf_tx_splitq_clean_bump_ntc(tx_q, ntc,
+-                                                            tx_desc, tx_buf);
+-                              idpf_stash_flow_sch_buffers(tx_q, tx_buf);
+-                      }
+-              } else {
++                      /* unmap any remaining paged data */
+                       libeth_tx_complete(tx_buf, &cp);
+-
+-                      /* unmap remaining buffers */
+-                      while (ntc != eop_idx) {
+-                              idpf_tx_splitq_clean_bump_ntc(tx_q, ntc,
+-                                                            tx_desc, tx_buf);
+-
+-                              /* unmap any remaining paged data */
+-                              libeth_tx_complete(tx_buf, &cp);
+-                      }
+               }
+ fetch_next_txq_desc:
+               idpf_tx_splitq_clean_bump_ntc(tx_q, ntc, tx_desc, tx_buf);
+       }
+-tx_splitq_clean_out:
+       tx_q->next_to_clean = ntc;
+-
+-      return clean_complete;
+ }
+-#define idpf_tx_clean_buf_ring_bump_ntc(txq, ntc, buf)        \
+-do {                                                  \
+-      (buf)++;                                        \
+-      (ntc)++;                                        \
+-      if (unlikely((ntc) == (txq)->desc_count)) {     \
+-              buf = (txq)->tx_buf;                    \
+-              ntc = 0;                                \
+-      }                                               \
+-} while (0)
+-
+ /**
+  * idpf_tx_clean_bufs - clean flow scheduling TX queue buffers
+  * @txq: queue to clean
+@@ -1926,7 +1674,7 @@ do {                                                     \
+  * Clean all buffers associated with the packet starting at buf_id. Returns the
+  * byte/segment count for the cleaned packet.
+  */
+-static bool idpf_tx_clean_bufs(struct idpf_tx_queue *txq, u32 buf_id,
++static void idpf_tx_clean_bufs(struct idpf_tx_queue *txq, u32 buf_id,
+                              struct libeth_sq_napi_stats *cleaned,
+                              int budget)
+ {
+@@ -1950,8 +1698,6 @@ static bool idpf_tx_clean_bufs(struct id
+               libeth_tx_complete(tx_buf, &cp);
+               idpf_post_buf_refill(txq->refillq, buf_id);
+       }
+-
+-      return true;
+ }
+ /**
+@@ -1970,22 +1716,17 @@ static void idpf_tx_handle_rs_completion
+                                        struct libeth_sq_napi_stats *cleaned,
+                                        int budget)
+ {
+-      u16 compl_tag;
++      /* RS completion contains queue head for queue based scheduling or
++       * completion tag for flow based scheduling.
++       */
++      u16 rs_compl_val = le16_to_cpu(desc->q_head_compl_tag.q_head);
+       if (!idpf_queue_has(FLOW_SCH_EN, txq)) {
+-              u16 head = le16_to_cpu(desc->q_head_compl_tag.q_head);
+-
+-              idpf_tx_splitq_clean(txq, head, budget, cleaned, false);
++              idpf_tx_splitq_clean(txq, rs_compl_val, budget, cleaned, false);
+               return;
+       }
+-      compl_tag = le16_to_cpu(desc->q_head_compl_tag.compl_tag);
+-
+-      /* If we didn't clean anything on the ring, this packet must be
+-       * in the hash table. Go clean it there.
+-       */
+-      if (!idpf_tx_clean_bufs(txq, compl_tag, cleaned, budget))
+-              idpf_tx_clean_stashed_bufs(txq, compl_tag, cleaned, budget);
++      idpf_tx_clean_bufs(txq, rs_compl_val, cleaned, budget);
+ }
+ /**
+@@ -2102,8 +1843,7 @@ fetch_next_desc:
+               /* Update BQL */
+               nq = netdev_get_tx_queue(tx_q->netdev, tx_q->idx);
+-              dont_wake = !complq_ok || IDPF_TX_BUF_RSV_LOW(tx_q) ||
+-                          np->state != __IDPF_VPORT_UP ||
++              dont_wake = !complq_ok || np->state != __IDPF_VPORT_UP ||
+                           !netif_carrier_ok(tx_q->netdev);
+               /* Check if the TXQ needs to and can be restarted */
+               __netif_txq_completed_wake(nq, tx_q->cleaned_pkts, tx_q->cleaned_bytes,
+@@ -2174,7 +1914,6 @@ static int idpf_txq_has_room(struct idpf
+       if (IDPF_DESC_UNUSED(tx_q) < descs_needed ||
+           IDPF_TX_COMPLQ_PENDING(tx_q->txq_grp) >
+               IDPF_TX_COMPLQ_OVERFLOW_THRESH(tx_q->txq_grp->complq) ||
+-          IDPF_TX_BUF_RSV_LOW(tx_q) ||
+           idpf_tx_splitq_get_free_bufs(tx_q->refillq) < bufs_needed)
+               return 0;
+       return 1;
+@@ -2298,10 +2037,8 @@ static unsigned int idpf_tx_splitq_bump_
+ {
+       ntu++;
+-      if (ntu == txq->desc_count) {
++      if (ntu == txq->desc_count)
+               ntu = 0;
+-              txq->compl_tag_cur_gen = IDPF_TX_ADJ_COMPL_TAG_GEN(txq);
+-      }
+       return ntu;
+ }
+@@ -2483,8 +2220,6 @@ static void idpf_tx_splitq_map(struct id
+                       if (unlikely(++i == tx_q->desc_count)) {
+                               tx_desc = &tx_q->flex_tx[0];
+                               i = 0;
+-                              tx_q->compl_tag_cur_gen =
+-                                      IDPF_TX_ADJ_COMPL_TAG_GEN(tx_q);
+                       } else {
+                               tx_desc++;
+                       }
+@@ -2515,7 +2250,6 @@ static void idpf_tx_splitq_map(struct id
+               if (unlikely(++i == tx_q->desc_count)) {
+                       tx_desc = &tx_q->flex_tx[0];
+                       i = 0;
+-                      tx_q->compl_tag_cur_gen = IDPF_TX_ADJ_COMPL_TAG_GEN(tx_q);
+               } else {
+                       tx_desc++;
+               }
+@@ -2771,10 +2505,9 @@ static netdev_tx_t idpf_tx_splitq_frame(
+               tx_params.dtype = IDPF_TX_DESC_DTYPE_FLEX_FLOW_SCHE;
+               tx_params.eop_cmd = IDPF_TXD_FLEX_FLOW_CMD_EOP;
+-              /* Set the RE bit to catch any packets that may have not been
+-               * stashed during RS completion cleaning. MIN_GAP is set to
+-               * MIN_RING size to ensure it will be set at least once each
+-               * time around the ring.
++              /* Set the RE bit to periodically "clean" the descriptor ring.
++               * MIN_GAP is set to MIN_RING size to ensure it will be set at
++               * least once each time around the ring.
+                */
+               if (idpf_tx_splitq_need_re(tx_q)) {
+                       tx_params.eop_cmd |= IDPF_TXD_FLEX_FLOW_CMD_RE;
+--- a/drivers/net/ethernet/intel/idpf/idpf_txrx.h
++++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.h
+@@ -117,10 +117,6 @@ do {                                                              \
+       ((((txq)->next_to_clean > (txq)->next_to_use) ? 0 : (txq)->desc_count) + \
+       (txq)->next_to_clean - (txq)->next_to_use - 1)
+-#define IDPF_TX_BUF_RSV_UNUSED(txq)   ((txq)->stash->buf_stack.top)
+-#define IDPF_TX_BUF_RSV_LOW(txq)      (IDPF_TX_BUF_RSV_UNUSED(txq) < \
+-                                       (txq)->desc_count >> 2)
+-
+ #define IDPF_TX_COMPLQ_OVERFLOW_THRESH(txcq)  ((txcq)->desc_count >> 1)
+ /* Determine the absolute number of completions pending, i.e. the number of
+  * completions that are expected to arrive on the TX completion queue.
+@@ -130,12 +126,6 @@ do {                                                              \
+       0 : U32_MAX) + \
+       (txq)->num_completions_pending - (txq)->complq->num_completions)
+-#define IDPF_TX_SPLITQ_COMPL_TAG_WIDTH        16
+-/* Adjust the generation for the completion tag and wrap if necessary */
+-#define IDPF_TX_ADJ_COMPL_TAG_GEN(txq) \
+-      ((++(txq)->compl_tag_cur_gen) >= (txq)->compl_tag_gen_max ? \
+-      0 : (txq)->compl_tag_cur_gen)
+-
+ #define IDPF_TXBUF_NULL                       U32_MAX
+ #define IDPF_TXD_LAST_DESC_CMD (IDPF_TX_DESC_CMD_EOP | IDPF_TX_DESC_CMD_RS)
+@@ -153,18 +143,6 @@ union idpf_tx_flex_desc {
+ #define idpf_tx_buf libeth_sqe
+ /**
+- * struct idpf_buf_lifo - LIFO for managing OOO completions
+- * @top: Used to know how many buffers are left
+- * @size: Total size of LIFO
+- * @bufs: Backing array
+- */
+-struct idpf_buf_lifo {
+-      u16 top;
+-      u16 size;
+-      struct idpf_tx_stash **bufs;
+-};
+-
+-/**
+  * struct idpf_tx_offload_params - Offload parameters for a given packet
+  * @tx_flags: Feature flags enabled for this packet
+  * @hdr_offsets: Offset parameter for single queue model
+@@ -492,17 +470,6 @@ struct idpf_tx_queue_stats {
+ #define IDPF_DIM_DEFAULT_PROFILE_IX           1
+ /**
+- * struct idpf_txq_stash - Tx buffer stash for Flow-based scheduling mode
+- * @buf_stack: Stack of empty buffers to store buffer info for out of order
+- *           buffer completions. See struct idpf_buf_lifo
+- * @sched_buf_hash: Hash table to store buffers
+- */
+-struct idpf_txq_stash {
+-      struct idpf_buf_lifo buf_stack;
+-      DECLARE_HASHTABLE(sched_buf_hash, 12);
+-} ____cacheline_aligned;
+-
+-/**
+  * struct idpf_rx_queue - software structure representing a receive queue
+  * @rx: universal receive descriptor array
+  * @single_buf: buffer descriptor array in singleq
+@@ -644,11 +611,7 @@ libeth_cacheline_set_assert(struct idpf_
+  *               only once at the end of the cleaning routine.
+  * @clean_budget: singleq only, queue cleaning budget
+  * @cleaned_pkts: Number of packets cleaned for the above said case
+- * @stash: Tx buffer stash for Flow-based scheduling mode
+  * @refillq: Pointer to refill queue
+- * @compl_tag_bufid_m: Completion tag buffer id mask
+- * @compl_tag_cur_gen: Used to keep track of current completion tag generation
+- * @compl_tag_gen_max: To determine when compl_tag_cur_gen should be reset
+  * @stats_sync: See struct u64_stats_sync
+  * @q_stats: See union idpf_tx_queue_stats
+  * @q_id: Queue id
+@@ -677,7 +640,6 @@ struct idpf_tx_queue {
+       u16 desc_count;
+       u16 tx_min_pkt_len;
+-      u16 compl_tag_gen_s;
+       struct net_device *netdev;
+       __cacheline_group_end_aligned(read_mostly);
+@@ -694,13 +656,8 @@ struct idpf_tx_queue {
+       };
+       u16 cleaned_pkts;
+-      struct idpf_txq_stash *stash;
+       struct idpf_sw_queue *refillq;
+-      u16 compl_tag_bufid_m;
+-      u16 compl_tag_cur_gen;
+-      u16 compl_tag_gen_max;
+-
+       struct u64_stats_sync stats_sync;
+       struct idpf_tx_queue_stats q_stats;
+       __cacheline_group_end_aligned(read_write);
+@@ -715,7 +672,7 @@ struct idpf_tx_queue {
+       __cacheline_group_end_aligned(cold);
+ };
+ libeth_cacheline_set_assert(struct idpf_tx_queue, 64,
+-                          96 + sizeof(struct u64_stats_sync),
++                          80 + sizeof(struct u64_stats_sync),
+                           32);
+ /**
+@@ -926,7 +883,6 @@ struct idpf_rxq_group {
+  * @vport: Vport back pointer
+  * @num_txq: Number of TX queues associated
+  * @txqs: Array of TX queue pointers
+- * @stashes: array of OOO stashes for the queues
+  * @complq: Associated completion queue pointer, split queue only
+  * @num_completions_pending: Total number of completions pending for the
+  *                         completion queue, acculumated for all TX queues
+@@ -941,7 +897,6 @@ struct idpf_txq_group {
+       u16 num_txq;
+       struct idpf_tx_queue *txqs[IDPF_LARGE_MAX_Q];
+-      struct idpf_txq_stash *stashes;
+       struct idpf_compl_queue *complq;
diff --git a/queue-6.12/idpf-replace-flow-scheduling-buffer-ring-with-buffer-pool.patch b/queue-6.12/idpf-replace-flow-scheduling-buffer-ring-with-buffer-pool.patch
new file mode 100644 (file)
index 0000000..c1bc8e5
--- /dev/null
@@ -0,0 +1,528 @@
+From stable+bounces-201112-greg=kroah.com@vger.kernel.org Mon Dec 15 22:51:08 2025
+From: Tony Nguyen <anthony.l.nguyen@intel.com>
+Date: Mon, 15 Dec 2025 13:42:45 -0800
+Subject: idpf: replace flow scheduling buffer ring with buffer pool
+To: stable@vger.kernel.org
+Cc: Joshua Hay <joshua.a.hay@intel.com>, anthony.l.nguyen@intel.com, madhu.chittim@intel.com, decot@google.com, lrizzo@google.com, brianvv@google.com, Aleksandr Loktionov <aleksandr.loktionov@intel.com>, Samuel Salin <Samuel.salin@intel.com>
+Message-ID: <20251215214303.2608822-7-anthony.l.nguyen@intel.com>
+
+From: Joshua Hay <joshua.a.hay@intel.com>
+
+[ Upstream commit 5f417d551324d2894168b362f2429d120ab06243 ]
+
+Replace the TxQ buffer ring with one large pool/array of buffers (only
+for flow scheduling). This eliminates the tag generation and makes it
+impossible for a tag to be associated with more than one packet.
+
+The completion tag passed to HW through the descriptor is the index into
+the array. That same completion tag is posted back to the driver in the
+completion descriptor, and used to index into the array to quickly
+retrieve the buffer during cleaning.  In this way, the tags are treated
+as a fix sized resource. If all tags are in use, no more packets can be
+sent on that particular queue (until some are freed up). The tag pool
+size is 64K since the completion tag width is 16 bits.
+
+For each packet, the driver pulls a free tag from the refillq to get the
+next free buffer index. When cleaning is complete, the tag is posted
+back to the refillq. A multi-frag packet spans multiple buffers in the
+driver, therefore it uses multiple buffer indexes/tags from the pool.
+Each frag pulls from the refillq to get the next free buffer index.
+These are tracked in a next_buf field that replaces the completion tag
+field in the buffer struct. This chains the buffers together so that the
+packet can be cleaned from the starting completion tag taken from the
+completion descriptor, then from the next_buf field for each subsequent
+buffer.
+
+In case of a dma_mapping_error occurs or the refillq runs out of free
+buf_ids, the packet will execute the rollback error path. This unmaps
+any buffers previously mapped for the packet. Since several free
+buf_ids could have already been pulled from the refillq, we need to
+restore its original state as well. Otherwise, the buf_ids/tags
+will be leaked and not used again until the queue is reallocated.
+
+Descriptor completions only advance the descriptor ring index to "clean"
+the descriptors. The packet completions only clean the buffers
+associated with the given packet completion tag and do not update the
+descriptor ring index.
+
+When operating in queue based scheduling mode, the array still acts as a
+ring and will only have TxQ descriptor count entries. The tx_bufs are
+still associated 1:1 with the descriptor ring entries and we can use the
+conventional indexing mechanisms.
+
+Fixes: c2d548cad150 ("idpf: add TX splitq napi poll support")
+Signed-off-by: Luigi Rizzo <lrizzo@google.com>
+Signed-off-by: Brian Vazquez <brianvv@google.com>
+Signed-off-by: Joshua Hay <joshua.a.hay@intel.com>
+Reviewed-by: Madhu Chittim <madhu.chittim@intel.com>
+Reviewed-by: Aleksandr Loktionov <aleksandr.loktionov@intel.com>
+Tested-by: Samuel Salin <Samuel.salin@intel.com>
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/intel/idpf/idpf_txrx.c |  209 ++++++++++++----------------
+ drivers/net/ethernet/intel/idpf/idpf_txrx.h |   10 +
+ 2 files changed, 106 insertions(+), 113 deletions(-)
+
+--- a/drivers/net/ethernet/intel/idpf/idpf_txrx.c
++++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
+@@ -12,6 +12,7 @@ struct idpf_tx_stash {
+       struct libeth_sqe buf;
+ };
++#define idpf_tx_buf_next(buf)         (*(u32 *)&(buf)->priv)
+ #define idpf_tx_buf_compl_tag(buf)    (*(u32 *)&(buf)->priv)
+ LIBETH_SQE_CHECK_PRIV(u32);
+@@ -110,7 +111,7 @@ static void idpf_tx_buf_rel_all(struct i
+               return;
+       /* Free all the Tx buffer sk_buffs */
+-      for (i = 0; i < txq->desc_count; i++)
++      for (i = 0; i < txq->buf_pool_size; i++)
+               libeth_tx_complete(&txq->tx_buf[i], &cp);
+       kfree(txq->tx_buf);
+@@ -218,14 +219,17 @@ static void idpf_tx_desc_rel_all(struct
+ static int idpf_tx_buf_alloc_all(struct idpf_tx_queue *tx_q)
+ {
+       struct idpf_buf_lifo *buf_stack;
+-      int buf_size;
+       int i;
+       /* Allocate book keeping buffers only. Buffers to be supplied to HW
+        * are allocated by kernel network stack and received as part of skb
+        */
+-      buf_size = sizeof(struct idpf_tx_buf) * tx_q->desc_count;
+-      tx_q->tx_buf = kzalloc(buf_size, GFP_KERNEL);
++      if (idpf_queue_has(FLOW_SCH_EN, tx_q))
++              tx_q->buf_pool_size = U16_MAX;
++      else
++              tx_q->buf_pool_size = tx_q->desc_count;
++      tx_q->tx_buf = kcalloc(tx_q->buf_pool_size, sizeof(*tx_q->tx_buf),
++                             GFP_KERNEL);
+       if (!tx_q->tx_buf)
+               return -ENOMEM;
+@@ -294,7 +298,7 @@ static int idpf_tx_desc_alloc(const stru
+               return 0;
+       refillq = tx_q->refillq;
+-      refillq->desc_count = tx_q->desc_count;
++      refillq->desc_count = tx_q->buf_pool_size;
+       refillq->ring = kcalloc(refillq->desc_count, sizeof(u32),
+                               GFP_KERNEL);
+       if (!refillq->ring) {
+@@ -1841,6 +1845,12 @@ static bool idpf_tx_splitq_clean(struct
+       struct idpf_tx_buf *tx_buf;
+       bool clean_complete = true;
++      if (descs_only) {
++              /* Bump ring index to mark as cleaned. */
++              tx_q->next_to_clean = end;
++              return true;
++      }
++
+       tx_desc = &tx_q->flex_tx[ntc];
+       next_pending_desc = &tx_q->flex_tx[end];
+       tx_buf = &tx_q->tx_buf[ntc];
+@@ -1907,83 +1917,40 @@ do {                                                   \
+ } while (0)
+ /**
+- * idpf_tx_clean_buf_ring - clean flow scheduling TX queue buffers
++ * idpf_tx_clean_bufs - clean flow scheduling TX queue buffers
+  * @txq: queue to clean
+- * @compl_tag: completion tag of packet to clean (from completion descriptor)
++ * @buf_id: packet's starting buffer ID, from completion descriptor
+  * @cleaned: pointer to stats struct to track cleaned packets/bytes
+  * @budget: Used to determine if we are in netpoll
+  *
+- * Cleans all buffers associated with the input completion tag either from the
+- * TX buffer ring or from the hash table if the buffers were previously
+- * stashed. Returns the byte/segment count for the cleaned packet associated
+- * this completion tag.
+- */
+-static bool idpf_tx_clean_buf_ring(struct idpf_tx_queue *txq, u16 compl_tag,
+-                                 struct libeth_sq_napi_stats *cleaned,
+-                                 int budget)
++ * Clean all buffers associated with the packet starting at buf_id. Returns the
++ * byte/segment count for the cleaned packet.
++ */
++static bool idpf_tx_clean_bufs(struct idpf_tx_queue *txq, u32 buf_id,
++                             struct libeth_sq_napi_stats *cleaned,
++                             int budget)
+ {
+-      u16 idx = compl_tag & txq->compl_tag_bufid_m;
+       struct idpf_tx_buf *tx_buf = NULL;
+       struct libeth_cq_pp cp = {
+               .dev    = txq->dev,
+               .ss     = cleaned,
+               .napi   = budget,
+       };
+-      u16 ntc, orig_idx = idx;
+-
+-      tx_buf = &txq->tx_buf[idx];
+-
+-      if (unlikely(tx_buf->type <= LIBETH_SQE_CTX ||
+-                   idpf_tx_buf_compl_tag(tx_buf) != compl_tag))
+-              return false;
+-      if (tx_buf->type == LIBETH_SQE_SKB)
++      tx_buf = &txq->tx_buf[buf_id];
++      if (tx_buf->type == LIBETH_SQE_SKB) {
+               libeth_tx_complete(tx_buf, &cp);
++              idpf_post_buf_refill(txq->refillq, buf_id);
++      }
+-      idpf_tx_clean_buf_ring_bump_ntc(txq, idx, tx_buf);
++      while (idpf_tx_buf_next(tx_buf) != IDPF_TXBUF_NULL) {
++              buf_id = idpf_tx_buf_next(tx_buf);
+-      while (idpf_tx_buf_compl_tag(tx_buf) == compl_tag) {
++              tx_buf = &txq->tx_buf[buf_id];
+               libeth_tx_complete(tx_buf, &cp);
+-              idpf_tx_clean_buf_ring_bump_ntc(txq, idx, tx_buf);
++              idpf_post_buf_refill(txq->refillq, buf_id);
+       }
+-      /*
+-       * It's possible the packet we just cleaned was an out of order
+-       * completion, which means we can stash the buffers starting from
+-       * the original next_to_clean and reuse the descriptors. We need
+-       * to compare the descriptor ring next_to_clean packet's "first" buffer
+-       * to the "first" buffer of the packet we just cleaned to determine if
+-       * this is the case. Howevever, next_to_clean can point to either a
+-       * reserved buffer that corresponds to a context descriptor used for the
+-       * next_to_clean packet (TSO packet) or the "first" buffer (single
+-       * packet). The orig_idx from the packet we just cleaned will always
+-       * point to the "first" buffer. If next_to_clean points to a reserved
+-       * buffer, let's bump ntc once and start the comparison from there.
+-       */
+-      ntc = txq->next_to_clean;
+-      tx_buf = &txq->tx_buf[ntc];
+-
+-      if (tx_buf->type == LIBETH_SQE_CTX)
+-              idpf_tx_clean_buf_ring_bump_ntc(txq, ntc, tx_buf);
+-
+-      /*
+-       * If ntc still points to a different "first" buffer, clean the
+-       * descriptor ring and stash all of the buffers for later cleaning. If
+-       * we cannot stash all of the buffers, next_to_clean will point to the
+-       * "first" buffer of the packet that could not be stashed and cleaning
+-       * will start there next time.
+-       */
+-      if (unlikely(tx_buf != &txq->tx_buf[orig_idx] &&
+-                   !idpf_tx_splitq_clean(txq, orig_idx, budget, cleaned,
+-                                         true)))
+-              return true;
+-
+-      /*
+-       * Otherwise, update next_to_clean to reflect the cleaning that was
+-       * done above.
+-       */
+-      txq->next_to_clean = idx;
+-
+       return true;
+ }
+@@ -2014,12 +1981,10 @@ static void idpf_tx_handle_rs_completion
+       compl_tag = le16_to_cpu(desc->q_head_compl_tag.compl_tag);
+-      idpf_post_buf_refill(txq->refillq, compl_tag);
+-
+       /* If we didn't clean anything on the ring, this packet must be
+        * in the hash table. Go clean it there.
+        */
+-      if (!idpf_tx_clean_buf_ring(txq, compl_tag, cleaned, budget))
++      if (!idpf_tx_clean_bufs(txq, compl_tag, cleaned, budget))
+               idpf_tx_clean_stashed_bufs(txq, compl_tag, cleaned, budget);
+ }
+@@ -2332,7 +2297,7 @@ static unsigned int idpf_tx_splitq_bump_
+  * Return: true if a buffer ID was found, false if not
+  */
+ static bool idpf_tx_get_free_buf_id(struct idpf_sw_queue *refillq,
+-                                  u16 *buf_id)
++                                  u32 *buf_id)
+ {
+       u32 ntc = refillq->next_to_clean;
+       u32 refill_desc;
+@@ -2365,25 +2330,34 @@ static void idpf_tx_splitq_pkt_err_unmap
+                                        struct idpf_tx_splitq_params *params,
+                                        struct idpf_tx_buf *first)
+ {
++      struct idpf_sw_queue *refillq = txq->refillq;
+       struct libeth_sq_napi_stats ss = { };
+       struct idpf_tx_buf *tx_buf = first;
+       struct libeth_cq_pp cp = {
+               .dev    = txq->dev,
+               .ss     = &ss,
+       };
+-      u32 idx = 0;
+       u64_stats_update_begin(&txq->stats_sync);
+       u64_stats_inc(&txq->q_stats.dma_map_errs);
+       u64_stats_update_end(&txq->stats_sync);
+-      do {
++      libeth_tx_complete(tx_buf, &cp);
++      while (idpf_tx_buf_next(tx_buf) != IDPF_TXBUF_NULL) {
++              tx_buf = &txq->tx_buf[idpf_tx_buf_next(tx_buf)];
+               libeth_tx_complete(tx_buf, &cp);
+-              idpf_tx_clean_buf_ring_bump_ntc(txq, idx, tx_buf);
+-      } while (idpf_tx_buf_compl_tag(tx_buf) == params->compl_tag);
++      }
+       /* Update tail in case netdev_xmit_more was previously true. */
+       idpf_tx_buf_hw_update(txq, params->prev_ntu, false);
++
++      if (!refillq)
++              return;
++
++      /* Restore refillq state to avoid leaking tags. */
++      if (params->prev_refill_gen != idpf_queue_has(RFL_GEN_CHK, refillq))
++              idpf_queue_change(RFL_GEN_CHK, refillq);
++      refillq->next_to_clean = params->prev_refill_ntc;
+ }
+ /**
+@@ -2407,6 +2381,7 @@ static void idpf_tx_splitq_map(struct id
+       struct netdev_queue *nq;
+       struct sk_buff *skb;
+       skb_frag_t *frag;
++      u32 next_buf_id;
+       u16 td_cmd = 0;
+       dma_addr_t dma;
+@@ -2424,18 +2399,16 @@ static void idpf_tx_splitq_map(struct id
+       tx_buf = first;
+       first->nr_frags = 0;
+-      params->compl_tag =
+-              (tx_q->compl_tag_cur_gen << tx_q->compl_tag_gen_s) | i;
+-
+       for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
+               unsigned int max_data = IDPF_TX_MAX_DESC_DATA_ALIGNED;
+-              if (unlikely(dma_mapping_error(tx_q->dev, dma)))
++              if (unlikely(dma_mapping_error(tx_q->dev, dma))) {
++                      idpf_tx_buf_next(tx_buf) = IDPF_TXBUF_NULL;
+                       return idpf_tx_splitq_pkt_err_unmap(tx_q, params,
+                                                           first);
++              }
+               first->nr_frags++;
+-              idpf_tx_buf_compl_tag(tx_buf) = params->compl_tag;
+               tx_buf->type = LIBETH_SQE_FRAG;
+               /* record length, and DMA address */
+@@ -2491,29 +2464,14 @@ static void idpf_tx_splitq_map(struct id
+                                                 max_data);
+                       if (unlikely(++i == tx_q->desc_count)) {
+-                              tx_buf = tx_q->tx_buf;
+                               tx_desc = &tx_q->flex_tx[0];
+                               i = 0;
+                               tx_q->compl_tag_cur_gen =
+                                       IDPF_TX_ADJ_COMPL_TAG_GEN(tx_q);
+                       } else {
+-                              tx_buf++;
+                               tx_desc++;
+                       }
+-                      /* Since this packet has a buffer that is going to span
+-                       * multiple descriptors, it's going to leave holes in
+-                       * to the TX buffer ring. To ensure these holes do not
+-                       * cause issues in the cleaning routines, we will clear
+-                       * them of any stale data and assign them the same
+-                       * completion tag as the current packet. Then when the
+-                       * packet is being cleaned, the cleaning routines will
+-                       * simply pass over these holes and finish cleaning the
+-                       * rest of the packet.
+-                       */
+-                      tx_buf->type = LIBETH_SQE_EMPTY;
+-                      idpf_tx_buf_compl_tag(tx_buf) = params->compl_tag;
+-
+                       /* Adjust the DMA offset and the remaining size of the
+                        * fragment.  On the first iteration of this loop,
+                        * max_data will be >= 12K and <= 16K-1.  On any
+@@ -2538,15 +2496,26 @@ static void idpf_tx_splitq_map(struct id
+               idpf_tx_splitq_build_desc(tx_desc, params, td_cmd, size);
+               if (unlikely(++i == tx_q->desc_count)) {
+-                      tx_buf = tx_q->tx_buf;
+                       tx_desc = &tx_q->flex_tx[0];
+                       i = 0;
+                       tx_q->compl_tag_cur_gen = IDPF_TX_ADJ_COMPL_TAG_GEN(tx_q);
+               } else {
+-                      tx_buf++;
+                       tx_desc++;
+               }
++              if (idpf_queue_has(FLOW_SCH_EN, tx_q)) {
++                      if (unlikely(!idpf_tx_get_free_buf_id(tx_q->refillq,
++                                                            &next_buf_id))) {
++                              idpf_tx_buf_next(tx_buf) = IDPF_TXBUF_NULL;
++                              return idpf_tx_splitq_pkt_err_unmap(tx_q, params,
++                                                                  first);
++                      }
++              } else {
++                      next_buf_id = i;
++              }
++              idpf_tx_buf_next(tx_buf) = next_buf_id;
++              tx_buf = &tx_q->tx_buf[next_buf_id];
++
+               size = skb_frag_size(frag);
+               data_len -= size;
+@@ -2561,6 +2530,7 @@ static void idpf_tx_splitq_map(struct id
+       /* write last descriptor with RS and EOP bits */
+       first->rs_idx = i;
++      idpf_tx_buf_next(tx_buf) = IDPF_TXBUF_NULL;
+       td_cmd |= params->eop_cmd;
+       idpf_tx_splitq_build_desc(tx_desc, params, td_cmd, size);
+       i = idpf_tx_splitq_bump_ntu(tx_q, i);
+@@ -2664,8 +2634,6 @@ idpf_tx_splitq_get_ctx_desc(struct idpf_
+       struct idpf_flex_tx_ctx_desc *desc;
+       int i = txq->next_to_use;
+-      txq->tx_buf[i].type = LIBETH_SQE_CTX;
+-
+       /* grab the next descriptor */
+       desc = &txq->flex_ctx[i];
+       txq->next_to_use = idpf_tx_splitq_bump_ntu(txq, i);
+@@ -2721,6 +2689,7 @@ static netdev_tx_t idpf_tx_splitq_frame(
+       };
+       struct idpf_tx_buf *first;
+       unsigned int count;
++      u32 buf_id;
+       int tso;
+       count = idpf_tx_desc_count_required(tx_q, skb);
+@@ -2760,26 +2729,28 @@ static netdev_tx_t idpf_tx_splitq_frame(
+               u64_stats_update_end(&tx_q->stats_sync);
+       }
+-      /* record the location of the first descriptor for this packet */
+-      first = &tx_q->tx_buf[tx_q->next_to_use];
+-      first->skb = skb;
++      if (idpf_queue_has(FLOW_SCH_EN, tx_q)) {
++              struct idpf_sw_queue *refillq = tx_q->refillq;
+-      if (tso) {
+-              first->packets = tx_params.offload.tso_segs;
+-              first->bytes = skb->len +
+-                      ((first->packets - 1) * tx_params.offload.tso_hdr_len);
+-      } else {
+-              first->packets = 1;
+-              first->bytes = max_t(unsigned int, skb->len, ETH_ZLEN);
+-      }
++              /* Save refillq state in case of a packet rollback.  Otherwise,
++               * the tags will be leaked since they will be popped from the
++               * refillq but never reposted during cleaning.
++               */
++              tx_params.prev_refill_gen =
++                      idpf_queue_has(RFL_GEN_CHK, refillq);
++              tx_params.prev_refill_ntc = refillq->next_to_clean;
+-      if (idpf_queue_has(FLOW_SCH_EN, tx_q)) {
+               if (unlikely(!idpf_tx_get_free_buf_id(tx_q->refillq,
+-                                                    &tx_params.compl_tag))) {
+-                      u64_stats_update_begin(&tx_q->stats_sync);
+-                      u64_stats_inc(&tx_q->q_stats.q_busy);
+-                      u64_stats_update_end(&tx_q->stats_sync);
++                                                    &buf_id))) {
++                      if (tx_params.prev_refill_gen !=
++                          idpf_queue_has(RFL_GEN_CHK, refillq))
++                              idpf_queue_change(RFL_GEN_CHK, refillq);
++                      refillq->next_to_clean = tx_params.prev_refill_ntc;
++
++                      tx_q->next_to_use = tx_params.prev_ntu;
++                      return idpf_tx_drop_skb(tx_q, skb);
+               }
++              tx_params.compl_tag = buf_id;
+               tx_params.dtype = IDPF_TX_DESC_DTYPE_FLEX_FLOW_SCHE;
+               tx_params.eop_cmd = IDPF_TXD_FLEX_FLOW_CMD_EOP;
+@@ -2798,6 +2769,8 @@ static netdev_tx_t idpf_tx_splitq_frame(
+                       tx_params.offload.td_cmd |= IDPF_TXD_FLEX_FLOW_CMD_CS_EN;
+       } else {
++              buf_id = tx_q->next_to_use;
++
+               tx_params.dtype = IDPF_TX_DESC_DTYPE_FLEX_L2TAG1_L2TAG2;
+               tx_params.eop_cmd = IDPF_TXD_LAST_DESC_CMD;
+@@ -2805,6 +2778,18 @@ static netdev_tx_t idpf_tx_splitq_frame(
+                       tx_params.offload.td_cmd |= IDPF_TX_FLEX_DESC_CMD_CS_EN;
+       }
++      first = &tx_q->tx_buf[buf_id];
++      first->skb = skb;
++
++      if (tso) {
++              first->packets = tx_params.offload.tso_segs;
++              first->bytes = skb->len +
++                      ((first->packets - 1) * tx_params.offload.tso_hdr_len);
++      } else {
++              first->packets = 1;
++              first->bytes = max_t(unsigned int, skb->len, ETH_ZLEN);
++      }
++
+       idpf_tx_splitq_map(tx_q, &tx_params, first);
+       return NETDEV_TX_OK;
+--- a/drivers/net/ethernet/intel/idpf/idpf_txrx.h
++++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.h
+@@ -136,6 +136,8 @@ do {                                                               \
+       ((++(txq)->compl_tag_cur_gen) >= (txq)->compl_tag_gen_max ? \
+       0 : (txq)->compl_tag_cur_gen)
++#define IDPF_TXBUF_NULL                       U32_MAX
++
+ #define IDPF_TXD_LAST_DESC_CMD (IDPF_TX_DESC_CMD_EOP | IDPF_TX_DESC_CMD_RS)
+ #define IDPF_TX_FLAGS_TSO             BIT(0)
+@@ -195,6 +197,8 @@ struct idpf_tx_offload_params {
+  * @td_tag: Descriptor tunneling tag
+  * @offload: Offload parameters
+  * @prev_ntu: stored TxQ next_to_use in case of rollback
++ * @prev_refill_ntc: stored refillq next_to_clean in case of packet rollback
++ * @prev_refill_gen: stored refillq generation bit in case of packet rollback
+  */
+ struct idpf_tx_splitq_params {
+       enum idpf_tx_desc_dtype_value dtype;
+@@ -207,6 +211,8 @@ struct idpf_tx_splitq_params {
+       struct idpf_tx_offload_params offload;
+       u16 prev_ntu;
++      u16 prev_refill_ntc;
++      bool prev_refill_gen;
+ };
+ enum idpf_tx_ctx_desc_eipt_offload {
+@@ -649,6 +655,7 @@ libeth_cacheline_set_assert(struct idpf_
+  * @size: Length of descriptor ring in bytes
+  * @dma: Physical address of ring
+  * @q_vector: Backreference to associated vector
++ * @buf_pool_size: Total number of idpf_tx_buf
+  */
+ struct idpf_tx_queue {
+       __cacheline_group_begin_aligned(read_mostly);
+@@ -704,11 +711,12 @@ struct idpf_tx_queue {
+       dma_addr_t dma;
+       struct idpf_q_vector *q_vector;
++      u32 buf_pool_size;
+       __cacheline_group_end_aligned(cold);
+ };
+ libeth_cacheline_set_assert(struct idpf_tx_queue, 64,
+                           96 + sizeof(struct u64_stats_sync),
+-                          24);
++                          32);
+ /**
+  * struct idpf_buf_queue - software structure representing a buffer queue
diff --git a/queue-6.12/idpf-simplify-and-fix-splitq-tx-packet-rollback-error-path.patch b/queue-6.12/idpf-simplify-and-fix-splitq-tx-packet-rollback-error-path.patch
new file mode 100644 (file)
index 0000000..32be611
--- /dev/null
@@ -0,0 +1,262 @@
+From stable+bounces-201109-greg=kroah.com@vger.kernel.org Mon Dec 15 22:50:48 2025
+From: Tony Nguyen <anthony.l.nguyen@intel.com>
+Date: Mon, 15 Dec 2025 13:42:44 -0800
+Subject: idpf: simplify and fix splitq Tx packet rollback error path
+To: stable@vger.kernel.org
+Cc: Joshua Hay <joshua.a.hay@intel.com>, anthony.l.nguyen@intel.com, madhu.chittim@intel.com, decot@google.com, lrizzo@google.com, brianvv@google.com, Samuel Salin <Samuel.salin@intel.com>
+Message-ID: <20251215214303.2608822-6-anthony.l.nguyen@intel.com>
+
+From: Joshua Hay <joshua.a.hay@intel.com>
+
+[ Upstream commit b61dfa9bc4430ad82b96d3a7c1c485350f91b467 ]
+
+Move (and rename) the existing rollback logic to singleq.c since that
+will be the only consumer. Create a simplified splitq specific rollback
+function to loop through and unmap tx_bufs based on the completion tag.
+This is critical before replacing the Tx buffer ring with the buffer
+pool since the previous rollback indexing will not work to unmap the
+chained buffers from the pool.
+
+Cache the next_to_use index before any portion of the packet is put on
+the descriptor ring. In case of an error, the rollback will bump tail to
+the correct next_to_use value. Because the splitq path now supports
+different types of context descriptors (and potentially multiple in the
+future), this will take care of rolling back any and all context
+descriptors encoded on the ring for the erroneous packet. The previous
+rollback logic was broken for PTP packets since it would not account for
+the PTP context descriptor.
+
+Fixes: 1a49cf814fe1 ("idpf: add Tx timestamp flows")
+Signed-off-by: Joshua Hay <joshua.a.hay@intel.com>
+Reviewed-by: Madhu Chittim <madhu.chittim@intel.com>
+Tested-by: Samuel Salin <Samuel.salin@intel.com>
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c |   57 ++++++++++++
+ drivers/net/ethernet/intel/idpf/idpf_txrx.c         |   91 ++++++++------------
+ drivers/net/ethernet/intel/idpf/idpf_txrx.h         |    5 -
+ 3 files changed, 95 insertions(+), 58 deletions(-)
+
+--- a/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c
++++ b/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c
+@@ -180,6 +180,58 @@ static int idpf_tx_singleq_csum(struct s
+ }
+ /**
++ * idpf_tx_singleq_dma_map_error - handle TX DMA map errors
++ * @txq: queue to send buffer on
++ * @skb: send buffer
++ * @first: original first buffer info buffer for packet
++ * @idx: starting point on ring to unwind
++ */
++static void idpf_tx_singleq_dma_map_error(struct idpf_tx_queue *txq,
++                                        struct sk_buff *skb,
++                                        struct idpf_tx_buf *first, u16 idx)
++{
++      struct libeth_sq_napi_stats ss = { };
++      struct libeth_cq_pp cp = {
++              .dev    = txq->dev,
++              .ss     = &ss,
++      };
++
++      u64_stats_update_begin(&txq->stats_sync);
++      u64_stats_inc(&txq->q_stats.dma_map_errs);
++      u64_stats_update_end(&txq->stats_sync);
++
++      /* clear dma mappings for failed tx_buf map */
++      for (;;) {
++              struct idpf_tx_buf *tx_buf;
++
++              tx_buf = &txq->tx_buf[idx];
++              libeth_tx_complete(tx_buf, &cp);
++              if (tx_buf == first)
++                      break;
++              if (idx == 0)
++                      idx = txq->desc_count;
++              idx--;
++      }
++
++      if (skb_is_gso(skb)) {
++              union idpf_tx_flex_desc *tx_desc;
++
++              /* If we failed a DMA mapping for a TSO packet, we will have
++               * used one additional descriptor for a context
++               * descriptor. Reset that here.
++               */
++              tx_desc = &txq->flex_tx[idx];
++              memset(tx_desc, 0, sizeof(*tx_desc));
++              if (idx == 0)
++                      idx = txq->desc_count;
++              idx--;
++      }
++
++      /* Update tail in case netdev_xmit_more was previously true */
++      idpf_tx_buf_hw_update(txq, idx, false);
++}
++
++/**
+  * idpf_tx_singleq_map - Build the Tx base descriptor
+  * @tx_q: queue to send buffer on
+  * @first: first buffer info buffer to use
+@@ -219,8 +271,9 @@ static void idpf_tx_singleq_map(struct i
+       for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
+               unsigned int max_data = IDPF_TX_MAX_DESC_DATA_ALIGNED;
+-              if (dma_mapping_error(tx_q->dev, dma))
+-                      return idpf_tx_dma_map_error(tx_q, skb, first, i);
++              if (unlikely(dma_mapping_error(tx_q->dev, dma)))
++                      return idpf_tx_singleq_dma_map_error(tx_q, skb,
++                                                           first, i);
+               /* record length, and DMA address */
+               dma_unmap_len_set(tx_buf, len, size);
+--- a/drivers/net/ethernet/intel/idpf/idpf_txrx.c
++++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
+@@ -2308,57 +2308,6 @@ unsigned int idpf_tx_desc_count_required
+ }
+ /**
+- * idpf_tx_dma_map_error - handle TX DMA map errors
+- * @txq: queue to send buffer on
+- * @skb: send buffer
+- * @first: original first buffer info buffer for packet
+- * @idx: starting point on ring to unwind
+- */
+-void idpf_tx_dma_map_error(struct idpf_tx_queue *txq, struct sk_buff *skb,
+-                         struct idpf_tx_buf *first, u16 idx)
+-{
+-      struct libeth_sq_napi_stats ss = { };
+-      struct libeth_cq_pp cp = {
+-              .dev    = txq->dev,
+-              .ss     = &ss,
+-      };
+-
+-      u64_stats_update_begin(&txq->stats_sync);
+-      u64_stats_inc(&txq->q_stats.dma_map_errs);
+-      u64_stats_update_end(&txq->stats_sync);
+-
+-      /* clear dma mappings for failed tx_buf map */
+-      for (;;) {
+-              struct idpf_tx_buf *tx_buf;
+-
+-              tx_buf = &txq->tx_buf[idx];
+-              libeth_tx_complete(tx_buf, &cp);
+-              if (tx_buf == first)
+-                      break;
+-              if (idx == 0)
+-                      idx = txq->desc_count;
+-              idx--;
+-      }
+-
+-      if (skb_is_gso(skb)) {
+-              union idpf_tx_flex_desc *tx_desc;
+-
+-              /* If we failed a DMA mapping for a TSO packet, we will have
+-               * used one additional descriptor for a context
+-               * descriptor. Reset that here.
+-               */
+-              tx_desc = &txq->flex_tx[idx];
+-              memset(tx_desc, 0, sizeof(struct idpf_flex_tx_ctx_desc));
+-              if (idx == 0)
+-                      idx = txq->desc_count;
+-              idx--;
+-      }
+-
+-      /* Update tail in case netdev_xmit_more was previously true */
+-      idpf_tx_buf_hw_update(txq, idx, false);
+-}
+-
+-/**
+  * idpf_tx_splitq_bump_ntu - adjust NTU and generation
+  * @txq: the tx ring to wrap
+  * @ntu: ring index to bump
+@@ -2407,6 +2356,37 @@ static bool idpf_tx_get_free_buf_id(stru
+ }
+ /**
++ * idpf_tx_splitq_pkt_err_unmap - Unmap buffers and bump tail in case of error
++ * @txq: Tx queue to unwind
++ * @params: pointer to splitq params struct
++ * @first: starting buffer for packet to unmap
++ */
++static void idpf_tx_splitq_pkt_err_unmap(struct idpf_tx_queue *txq,
++                                       struct idpf_tx_splitq_params *params,
++                                       struct idpf_tx_buf *first)
++{
++      struct libeth_sq_napi_stats ss = { };
++      struct idpf_tx_buf *tx_buf = first;
++      struct libeth_cq_pp cp = {
++              .dev    = txq->dev,
++              .ss     = &ss,
++      };
++      u32 idx = 0;
++
++      u64_stats_update_begin(&txq->stats_sync);
++      u64_stats_inc(&txq->q_stats.dma_map_errs);
++      u64_stats_update_end(&txq->stats_sync);
++
++      do {
++              libeth_tx_complete(tx_buf, &cp);
++              idpf_tx_clean_buf_ring_bump_ntc(txq, idx, tx_buf);
++      } while (idpf_tx_buf_compl_tag(tx_buf) == params->compl_tag);
++
++      /* Update tail in case netdev_xmit_more was previously true. */
++      idpf_tx_buf_hw_update(txq, params->prev_ntu, false);
++}
++
++/**
+  * idpf_tx_splitq_map - Build the Tx flex descriptor
+  * @tx_q: queue to send buffer on
+  * @params: pointer to splitq params struct
+@@ -2450,8 +2430,9 @@ static void idpf_tx_splitq_map(struct id
+       for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
+               unsigned int max_data = IDPF_TX_MAX_DESC_DATA_ALIGNED;
+-              if (dma_mapping_error(tx_q->dev, dma))
+-                      return idpf_tx_dma_map_error(tx_q, skb, first, i);
++              if (unlikely(dma_mapping_error(tx_q->dev, dma)))
++                      return idpf_tx_splitq_pkt_err_unmap(tx_q, params,
++                                                          first);
+               first->nr_frags++;
+               idpf_tx_buf_compl_tag(tx_buf) = params->compl_tag;
+@@ -2735,7 +2716,9 @@ static bool idpf_tx_splitq_need_re(struc
+ static netdev_tx_t idpf_tx_splitq_frame(struct sk_buff *skb,
+                                       struct idpf_tx_queue *tx_q)
+ {
+-      struct idpf_tx_splitq_params tx_params = { };
++      struct idpf_tx_splitq_params tx_params = {
++              .prev_ntu = tx_q->next_to_use,
++      };
+       struct idpf_tx_buf *first;
+       unsigned int count;
+       int tso;
+--- a/drivers/net/ethernet/intel/idpf/idpf_txrx.h
++++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.h
+@@ -194,6 +194,7 @@ struct idpf_tx_offload_params {
+  * @compl_tag: Associated tag for completion
+  * @td_tag: Descriptor tunneling tag
+  * @offload: Offload parameters
++ * @prev_ntu: stored TxQ next_to_use in case of rollback
+  */
+ struct idpf_tx_splitq_params {
+       enum idpf_tx_desc_dtype_value dtype;
+@@ -204,6 +205,8 @@ struct idpf_tx_splitq_params {
+       };
+       struct idpf_tx_offload_params offload;
++
++      u16 prev_ntu;
+ };
+ enum idpf_tx_ctx_desc_eipt_offload {
+@@ -1050,8 +1053,6 @@ void idpf_tx_buf_hw_update(struct idpf_t
+                          bool xmit_more);
+ unsigned int idpf_size_to_txd_count(unsigned int size);
+ netdev_tx_t idpf_tx_drop_skb(struct idpf_tx_queue *tx_q, struct sk_buff *skb);
+-void idpf_tx_dma_map_error(struct idpf_tx_queue *txq, struct sk_buff *skb,
+-                         struct idpf_tx_buf *first, u16 ring_idx);
+ unsigned int idpf_tx_desc_count_required(struct idpf_tx_queue *txq,
+                                        struct sk_buff *skb);
+ void idpf_tx_timeout(struct net_device *netdev, unsigned int txqueue);
diff --git a/queue-6.12/idpf-stop-tx-if-there-are-insufficient-buffer-resources.patch b/queue-6.12/idpf-stop-tx-if-there-are-insufficient-buffer-resources.patch
new file mode 100644 (file)
index 0000000..e5cace1
--- /dev/null
@@ -0,0 +1,184 @@
+From stable+bounces-201110-greg=kroah.com@vger.kernel.org Mon Dec 15 22:51:01 2025
+From: Tony Nguyen <anthony.l.nguyen@intel.com>
+Date: Mon, 15 Dec 2025 13:42:46 -0800
+Subject: idpf: stop Tx if there are insufficient buffer resources
+To: stable@vger.kernel.org
+Cc: Joshua Hay <joshua.a.hay@intel.com>, anthony.l.nguyen@intel.com, madhu.chittim@intel.com, decot@google.com, lrizzo@google.com, brianvv@google.com, Samuel Salin <Samuel.salin@intel.com>
+Message-ID: <20251215214303.2608822-8-anthony.l.nguyen@intel.com>
+
+From: Joshua Hay <joshua.a.hay@intel.com>
+
+[ Upstream commit 0c3f135e840d4a2ba4253e15d530ec61bc30718e ]
+
+The Tx refillq logic will cause packets to be silently dropped if there
+are not enough buffer resources available to send a packet in flow
+scheduling mode. Instead, determine how many buffers are needed along
+with number of descriptors. Make sure there are enough of both resources
+to send the packet, and stop the queue if not.
+
+Fixes: 7292af042bcf ("idpf: fix a race in txq wakeup")
+Signed-off-by: Joshua Hay <joshua.a.hay@intel.com>
+Reviewed-by: Madhu Chittim <madhu.chittim@intel.com>
+Tested-by: Samuel Salin <Samuel.salin@intel.com>
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c |    4 -
+ drivers/net/ethernet/intel/idpf/idpf_txrx.c         |   47 +++++++++++++-------
+ drivers/net/ethernet/intel/idpf/idpf_txrx.h         |   15 +++++-
+ 3 files changed, 47 insertions(+), 19 deletions(-)
+
+--- a/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c
++++ b/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c
+@@ -415,11 +415,11 @@ netdev_tx_t idpf_tx_singleq_frame(struct
+ {
+       struct idpf_tx_offload_params offload = { };
+       struct idpf_tx_buf *first;
++      u32 count, buf_count = 1;
+       int csum, tso, needed;
+-      unsigned int count;
+       __be16 protocol;
+-      count = idpf_tx_desc_count_required(tx_q, skb);
++      count = idpf_tx_res_count_required(tx_q, skb, &buf_count);
+       if (unlikely(!count))
+               return idpf_tx_drop_skb(tx_q, skb);
+--- a/drivers/net/ethernet/intel/idpf/idpf_txrx.c
++++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
+@@ -2160,15 +2160,22 @@ void idpf_tx_splitq_build_flow_desc(unio
+       desc->flow.qw1.compl_tag = cpu_to_le16(params->compl_tag);
+ }
+-/* Global conditions to tell whether the txq (and related resources)
+- * has room to allow the use of "size" descriptors.
++/**
++ * idpf_tx_splitq_has_room - check if enough Tx splitq resources are available
++ * @tx_q: the queue to be checked
++ * @descs_needed: number of descriptors required for this packet
++ * @bufs_needed: number of Tx buffers required for this packet
++ *
++ * Return: 0 if no room available, 1 otherwise
+  */
+-static int idpf_txq_has_room(struct idpf_tx_queue *tx_q, u32 size)
++static int idpf_txq_has_room(struct idpf_tx_queue *tx_q, u32 descs_needed,
++                           u32 bufs_needed)
+ {
+-      if (IDPF_DESC_UNUSED(tx_q) < size ||
++      if (IDPF_DESC_UNUSED(tx_q) < descs_needed ||
+           IDPF_TX_COMPLQ_PENDING(tx_q->txq_grp) >
+               IDPF_TX_COMPLQ_OVERFLOW_THRESH(tx_q->txq_grp->complq) ||
+-          IDPF_TX_BUF_RSV_LOW(tx_q))
++          IDPF_TX_BUF_RSV_LOW(tx_q) ||
++          idpf_tx_splitq_get_free_bufs(tx_q->refillq) < bufs_needed)
+               return 0;
+       return 1;
+ }
+@@ -2177,14 +2184,21 @@ static int idpf_txq_has_room(struct idpf
+  * idpf_tx_maybe_stop_splitq - 1st level check for Tx splitq stop conditions
+  * @tx_q: the queue to be checked
+  * @descs_needed: number of descriptors required for this packet
++ * @bufs_needed: number of buffers needed for this packet
+  *
+- * Returns 0 if stop is not needed
++ * Return: 0 if stop is not needed
+  */
+ static int idpf_tx_maybe_stop_splitq(struct idpf_tx_queue *tx_q,
+-                                   unsigned int descs_needed)
++                                   u32 descs_needed,
++                                   u32 bufs_needed)
+ {
++      /* Since we have multiple resources to check for splitq, our
++       * start,stop_thrs becomes a boolean check instead of a count
++       * threshold.
++       */
+       if (netif_subqueue_maybe_stop(tx_q->netdev, tx_q->idx,
+-                                    idpf_txq_has_room(tx_q, descs_needed),
++                                    idpf_txq_has_room(tx_q, descs_needed,
++                                                      bufs_needed),
+                                     1, 1))
+               return 0;
+@@ -2226,14 +2240,16 @@ void idpf_tx_buf_hw_update(struct idpf_t
+ }
+ /**
+- * idpf_tx_desc_count_required - calculate number of Tx descriptors needed
++ * idpf_tx_res_count_required - get number of Tx resources needed for this pkt
+  * @txq: queue to send buffer on
+  * @skb: send buffer
++ * @bufs_needed: (output) number of buffers needed for this skb.
+  *
+- * Returns number of data descriptors needed for this skb.
++ * Return: number of data descriptors and buffers needed for this skb.
+  */
+-unsigned int idpf_tx_desc_count_required(struct idpf_tx_queue *txq,
+-                                       struct sk_buff *skb)
++unsigned int idpf_tx_res_count_required(struct idpf_tx_queue *txq,
++                                      struct sk_buff *skb,
++                                      u32 *bufs_needed)
+ {
+       const struct skb_shared_info *shinfo;
+       unsigned int count = 0, i;
+@@ -2244,6 +2260,7 @@ unsigned int idpf_tx_desc_count_required
+               return count;
+       shinfo = skb_shinfo(skb);
++      *bufs_needed += shinfo->nr_frags;
+       for (i = 0; i < shinfo->nr_frags; i++) {
+               unsigned int size;
+@@ -2688,11 +2705,11 @@ static netdev_tx_t idpf_tx_splitq_frame(
+               .prev_ntu = tx_q->next_to_use,
+       };
+       struct idpf_tx_buf *first;
+-      unsigned int count;
++      u32 count, buf_count = 1;
+       u32 buf_id;
+       int tso;
+-      count = idpf_tx_desc_count_required(tx_q, skb);
++      count = idpf_tx_res_count_required(tx_q, skb, &buf_count);
+       if (unlikely(!count))
+               return idpf_tx_drop_skb(tx_q, skb);
+@@ -2702,7 +2719,7 @@ static netdev_tx_t idpf_tx_splitq_frame(
+       /* Check for splitq specific TX resources */
+       count += (IDPF_TX_DESCS_PER_CACHE_LINE + tso);
+-      if (idpf_tx_maybe_stop_splitq(tx_q, count)) {
++      if (idpf_tx_maybe_stop_splitq(tx_q, count, buf_count)) {
+               idpf_tx_buf_hw_update(tx_q, tx_q->next_to_use, false);
+               return NETDEV_TX_BUSY;
+--- a/drivers/net/ethernet/intel/idpf/idpf_txrx.h
++++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.h
+@@ -1034,6 +1034,17 @@ static inline void idpf_vport_intr_set_w
+              reg->dyn_ctl);
+ }
++/**
++ * idpf_tx_splitq_get_free_bufs - get number of free buf_ids in refillq
++ * @refillq: pointer to refillq containing buf_ids
++ */
++static inline u32 idpf_tx_splitq_get_free_bufs(struct idpf_sw_queue *refillq)
++{
++      return (refillq->next_to_use > refillq->next_to_clean ?
++              0 : refillq->desc_count) +
++             refillq->next_to_use - refillq->next_to_clean - 1;
++}
++
+ int idpf_vport_singleq_napi_poll(struct napi_struct *napi, int budget);
+ void idpf_vport_init_num_qs(struct idpf_vport *vport,
+                           struct virtchnl2_create_vport *vport_msg);
+@@ -1061,8 +1072,8 @@ void idpf_tx_buf_hw_update(struct idpf_t
+                          bool xmit_more);
+ unsigned int idpf_size_to_txd_count(unsigned int size);
+ netdev_tx_t idpf_tx_drop_skb(struct idpf_tx_queue *tx_q, struct sk_buff *skb);
+-unsigned int idpf_tx_desc_count_required(struct idpf_tx_queue *txq,
+-                                       struct sk_buff *skb);
++unsigned int idpf_tx_res_count_required(struct idpf_tx_queue *txq,
++                                      struct sk_buff *skb, u32 *buf_count);
+ void idpf_tx_timeout(struct net_device *netdev, unsigned int txqueue);
+ netdev_tx_t idpf_tx_singleq_frame(struct sk_buff *skb,
+                                 struct idpf_tx_queue *tx_q);
diff --git a/queue-6.12/idpf-trigger-sw-interrupt-when-exiting-wb_on_itr-mode.patch b/queue-6.12/idpf-trigger-sw-interrupt-when-exiting-wb_on_itr-mode.patch
new file mode 100644 (file)
index 0000000..008fb44
--- /dev/null
@@ -0,0 +1,101 @@
+From stable+bounces-201106-greg=kroah.com@vger.kernel.org Mon Dec 15 22:50:30 2025
+From: Tony Nguyen <anthony.l.nguyen@intel.com>
+Date: Mon, 15 Dec 2025 13:42:41 -0800
+Subject: idpf: trigger SW interrupt when exiting wb_on_itr mode
+To: stable@vger.kernel.org
+Cc: Joshua Hay <joshua.a.hay@intel.com>, anthony.l.nguyen@intel.com, madhu.chittim@intel.com, decot@google.com, lrizzo@google.com, brianvv@google.com, Krishneil Singh <krishneil.k.singh@intel.com>
+Message-ID: <20251215214303.2608822-3-anthony.l.nguyen@intel.com>
+
+From: Joshua Hay <joshua.a.hay@intel.com>
+
+[ Upstream commit 0c1683c681681c14f4389e3bfa8de10baf242ba8 ]
+
+There is a race condition between exiting wb_on_itr and completion write
+backs. For example, we are in wb_on_itr mode and a Tx completion is
+generated by HW, ready to be written back, as we are re-enabling
+interrupts:
+
+       HW                      SW
+       |                       |
+       |                       | idpf_tx_splitq_clean_all
+       |                       | napi_complete_done
+       |                       |
+       | tx_completion_wb      | idpf_vport_intr_update_itr_ena_irq
+
+That tx_completion_wb happens before the vector is fully re-enabled.
+Continuing with this example, it is a UDP stream and the
+tx_completion_wb is the last one in the flow (there are no rx packets).
+Because the HW generated the completion before the interrupt is fully
+enabled, the HW will not fire the interrupt once the timer expires and
+the write back will not happen. NAPI poll won't be called.  We have
+indicated we're back in interrupt mode but nothing else will trigger the
+interrupt. Therefore, the completion goes unprocessed, triggering a Tx
+timeout.
+
+To mitigate this, fire a SW triggered interrupt upon exiting wb_on_itr.
+This interrupt will catch the rogue completion and avoid the timeout.
+Add logic to set the appropriate bits in the vector's dyn_ctl register.
+
+Fixes: 9c4a27da0ecc ("idpf: enable WB_ON_ITR")
+Reviewed-by: Madhu Chittim <madhu.chittim@intel.com>
+Signed-off-by: Joshua Hay <joshua.a.hay@intel.com>
+Tested-by: Krishneil Singh <krishneil.k.singh@intel.com>
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/intel/idpf/idpf_txrx.c |   29 ++++++++++++++++++----------
+ 1 file changed, 19 insertions(+), 10 deletions(-)
+
+--- a/drivers/net/ethernet/intel/idpf/idpf_txrx.c
++++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
+@@ -3502,21 +3502,31 @@ static void idpf_vport_intr_dis_irq_all(
+ /**
+  * idpf_vport_intr_buildreg_itr - Enable default interrupt generation settings
+  * @q_vector: pointer to q_vector
+- * @type: itr index
+- * @itr: itr value
+  */
+-static u32 idpf_vport_intr_buildreg_itr(struct idpf_q_vector *q_vector,
+-                                      const int type, u16 itr)
++static u32 idpf_vport_intr_buildreg_itr(struct idpf_q_vector *q_vector)
+ {
+-      u32 itr_val;
++      u32 itr_val = q_vector->intr_reg.dyn_ctl_intena_m;
++      int type = IDPF_NO_ITR_UPDATE_IDX;
++      u16 itr = 0;
++
++      if (q_vector->wb_on_itr) {
++              /*
++               * Trigger a software interrupt when exiting wb_on_itr, to make
++               * sure we catch any pending write backs that might have been
++               * missed due to interrupt state transition.
++               */
++              itr_val |= q_vector->intr_reg.dyn_ctl_swint_trig_m |
++                         q_vector->intr_reg.dyn_ctl_sw_itridx_ena_m;
++              type = IDPF_SW_ITR_UPDATE_IDX;
++              itr = IDPF_ITR_20K;
++      }
+       itr &= IDPF_ITR_MASK;
+       /* Don't clear PBA because that can cause lost interrupts that
+        * came in while we were cleaning/polling
+        */
+-      itr_val = q_vector->intr_reg.dyn_ctl_intena_m |
+-                (type << q_vector->intr_reg.dyn_ctl_itridx_s) |
+-                (itr << (q_vector->intr_reg.dyn_ctl_intrvl_s - 1));
++      itr_val |= (type << q_vector->intr_reg.dyn_ctl_itridx_s) |
++                 (itr << (q_vector->intr_reg.dyn_ctl_intrvl_s - 1));
+       return itr_val;
+ }
+@@ -3614,9 +3624,8 @@ void idpf_vport_intr_update_itr_ena_irq(
+       /* net_dim() updates ITR out-of-band using a work item */
+       idpf_net_dim(q_vector);
++      intval = idpf_vport_intr_buildreg_itr(q_vector);
+       q_vector->wb_on_itr = false;
+-      intval = idpf_vport_intr_buildreg_itr(q_vector,
+-                                            IDPF_NO_ITR_UPDATE_IDX, 0);
+       writel(intval, q_vector->intr_reg.dyn_ctl);
+ }
diff --git a/queue-6.12/ipv6-adopt-dst_dev-helper.patch b/queue-6.12/ipv6-adopt-dst_dev-helper.patch
new file mode 100644 (file)
index 0000000..3ca526d
--- /dev/null
@@ -0,0 +1,508 @@
+From stable+bounces-204512-greg=kroah.com@vger.kernel.org Fri Jan  2 21:38:01 2026
+From: Harshit Mogalapalli <harshit.m.mogalapalli@oracle.com>
+Date: Fri,  2 Jan 2026 12:37:25 -0800
+Subject: ipv6: adopt dst_dev() helper
+To: stable@vger.kernel.org
+Cc: Eric Dumazet <edumazet@google.com>, Kuniyuki Iwashima <kuniyu@google.com>, Jakub Kicinski <kuba@kernel.org>, Harshit Mogalapalli <harshit.m.mogalapalli@oracle.com>
+Message-ID: <20260102203727.1455662-6-harshit.m.mogalapalli@oracle.com>
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit 1caf27297215a5241f9bfc9c07336349d9034ee3 ]
+
+Use the new helper as a step to deal with potential dst->dev races.
+
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Reviewed-by: Kuniyuki Iwashima <kuniyu@google.com>
+Link: https://patch.msgid.link/20250630121934.3399505-9-edumazet@google.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+[Harshit: Backport to 6.12.y, pulled this is a prerequisite]
+Stable-dep-of: 99a2ace61b21 ("net: use dst_dev_rcu() in sk_setup_caps()")
+Signed-off-by: Harshit Mogalapalli <harshit.m.mogalapalli@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/net/ip6_route.h          |    4 ++--
+ net/ipv6/exthdrs.c               |    2 +-
+ net/ipv6/icmp.c                  |    4 +++-
+ net/ipv6/ila/ila_lwt.c           |    2 +-
+ net/ipv6/ioam6_iptunnel.c        |    4 ++--
+ net/ipv6/ip6_gre.c               |    8 +++++---
+ net/ipv6/ip6_output.c            |   19 ++++++++++---------
+ net/ipv6/ip6_tunnel.c            |    4 ++--
+ net/ipv6/ip6_udp_tunnel.c        |    2 +-
+ net/ipv6/ip6_vti.c               |    2 +-
+ net/ipv6/ndisc.c                 |    6 ++++--
+ net/ipv6/netfilter/nf_dup_ipv6.c |    2 +-
+ net/ipv6/output_core.c           |    2 +-
+ net/ipv6/route.c                 |   20 ++++++++++++--------
+ net/ipv6/rpl_iptunnel.c          |    4 ++--
+ net/ipv6/seg6_iptunnel.c         |   20 +++++++++++---------
+ net/ipv6/seg6_local.c            |    2 +-
+ 17 files changed, 60 insertions(+), 47 deletions(-)
+
+--- a/include/net/ip6_route.h
++++ b/include/net/ip6_route.h
+@@ -274,7 +274,7 @@ static inline unsigned int ip6_skb_dst_m
+       unsigned int mtu;
+       if (np && READ_ONCE(np->pmtudisc) >= IPV6_PMTUDISC_PROBE) {
+-              mtu = READ_ONCE(dst->dev->mtu);
++              mtu = READ_ONCE(dst_dev(dst)->mtu);
+               mtu -= lwtunnel_headroom(dst->lwtstate, mtu);
+       } else {
+               mtu = dst_mtu(dst);
+@@ -337,7 +337,7 @@ static inline unsigned int ip6_dst_mtu_m
+       mtu = IPV6_MIN_MTU;
+       rcu_read_lock();
+-      idev = __in6_dev_get(dst->dev);
++      idev = __in6_dev_get(dst_dev(dst));
+       if (idev)
+               mtu = READ_ONCE(idev->cnf.mtu6);
+       rcu_read_unlock();
+--- a/net/ipv6/exthdrs.c
++++ b/net/ipv6/exthdrs.c
+@@ -306,7 +306,7 @@ static int ipv6_destopt_rcv(struct sk_bu
+       if (!pskb_may_pull(skb, skb_transport_offset(skb) + 8) ||
+           !pskb_may_pull(skb, (skb_transport_offset(skb) +
+                                ((skb_transport_header(skb)[1] + 1) << 3)))) {
+-              __IP6_INC_STATS(dev_net(dst->dev), idev,
++              __IP6_INC_STATS(dev_net(dst_dev(dst)), idev,
+                               IPSTATS_MIB_INHDRERRORS);
+ fail_and_free:
+               kfree_skb(skb);
+--- a/net/ipv6/icmp.c
++++ b/net/ipv6/icmp.c
+@@ -196,6 +196,7 @@ static bool icmpv6_xrlim_allow(struct so
+                              struct flowi6 *fl6, bool apply_ratelimit)
+ {
+       struct net *net = sock_net(sk);
++      struct net_device *dev;
+       struct dst_entry *dst;
+       bool res = false;
+@@ -208,10 +209,11 @@ static bool icmpv6_xrlim_allow(struct so
+        * this lookup should be more aggressive (not longer than timeout).
+        */
+       dst = ip6_route_output(net, sk, fl6);
++      dev = dst_dev(dst);
+       if (dst->error) {
+               IP6_INC_STATS(net, ip6_dst_idev(dst),
+                             IPSTATS_MIB_OUTNOROUTES);
+-      } else if (dst->dev && (dst->dev->flags&IFF_LOOPBACK)) {
++      } else if (dev && (dev->flags & IFF_LOOPBACK)) {
+               res = true;
+       } else {
+               struct rt6_info *rt = dst_rt6_info(dst);
+--- a/net/ipv6/ila/ila_lwt.c
++++ b/net/ipv6/ila/ila_lwt.c
+@@ -70,7 +70,7 @@ static int ila_output(struct net *net, s
+                */
+               memset(&fl6, 0, sizeof(fl6));
+-              fl6.flowi6_oif = orig_dst->dev->ifindex;
++              fl6.flowi6_oif = dst_dev(orig_dst)->ifindex;
+               fl6.flowi6_iif = LOOPBACK_IFINDEX;
+               fl6.daddr = *rt6_nexthop(dst_rt6_info(orig_dst),
+                                        &ip6h->daddr);
+--- a/net/ipv6/ioam6_iptunnel.c
++++ b/net/ipv6/ioam6_iptunnel.c
+@@ -328,7 +328,7 @@ static int ioam6_do_encap(struct net *ne
+       if (has_tunsrc)
+               memcpy(&hdr->saddr, tunsrc, sizeof(*tunsrc));
+       else
+-              ipv6_dev_get_saddr(net, dst->dev, &hdr->daddr,
++              ipv6_dev_get_saddr(net, dst_dev(dst), &hdr->daddr,
+                                  IPV6_PREFER_SRC_PUBLIC, &hdr->saddr);
+       skb_postpush_rcsum(skb, hdr, len);
+@@ -417,7 +417,7 @@ do_encap:
+                       local_bh_enable();
+               }
+-              err = skb_cow_head(skb, LL_RESERVED_SPACE(dst->dev));
++              err = skb_cow_head(skb, LL_RESERVED_SPACE(dst_dev(dst)));
+               if (unlikely(err))
+                       goto drop;
+       }
+--- a/net/ipv6/ip6_gre.c
++++ b/net/ipv6/ip6_gre.c
+@@ -1084,9 +1084,11 @@ static netdev_tx_t ip6erspan_tunnel_xmit
+                        htonl(atomic_fetch_inc(&t->o_seqno)));
+       /* TooBig packet may have updated dst->dev's mtu */
+-      if (!t->parms.collect_md && dst && dst_mtu(dst) > dst->dev->mtu)
+-              dst->ops->update_pmtu(dst, NULL, skb, dst->dev->mtu, false);
+-
++      if (!t->parms.collect_md && dst) {
++              mtu = READ_ONCE(dst_dev(dst)->mtu);
++              if (dst_mtu(dst) > mtu)
++                      dst->ops->update_pmtu(dst, NULL, skb, mtu, false);
++      }
+       err = ip6_tnl_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu,
+                          NEXTHDR_GRE);
+       if (err != 0) {
+--- a/net/ipv6/ip6_output.c
++++ b/net/ipv6/ip6_output.c
+@@ -60,7 +60,7 @@
+ static int ip6_finish_output2(struct net *net, struct sock *sk, struct sk_buff *skb)
+ {
+       struct dst_entry *dst = skb_dst(skb);
+-      struct net_device *dev = dst->dev;
++      struct net_device *dev = dst_dev(dst);
+       struct inet6_dev *idev = ip6_dst_idev(dst);
+       unsigned int hh_len = LL_RESERVED_SPACE(dev);
+       const struct in6_addr *daddr, *nexthop;
+@@ -271,7 +271,7 @@ int ip6_xmit(const struct sock *sk, stru
+       const struct ipv6_pinfo *np = inet6_sk(sk);
+       struct in6_addr *first_hop = &fl6->daddr;
+       struct dst_entry *dst = skb_dst(skb);
+-      struct net_device *dev = dst->dev;
++      struct net_device *dev = dst_dev(dst);
+       struct inet6_dev *idev = ip6_dst_idev(dst);
+       struct hop_jumbo_hdr *hop_jumbo;
+       int hoplen = sizeof(*hop_jumbo);
+@@ -503,7 +503,8 @@ int ip6_forward(struct sk_buff *skb)
+       struct dst_entry *dst = skb_dst(skb);
+       struct ipv6hdr *hdr = ipv6_hdr(skb);
+       struct inet6_skb_parm *opt = IP6CB(skb);
+-      struct net *net = dev_net(dst->dev);
++      struct net *net = dev_net(dst_dev(dst));
++      struct net_device *dev;
+       struct inet6_dev *idev;
+       SKB_DR(reason);
+       u32 mtu;
+@@ -591,12 +592,12 @@ int ip6_forward(struct sk_buff *skb)
+               goto drop;
+       }
+       dst = skb_dst(skb);
+-
++      dev = dst_dev(dst);
+       /* IPv6 specs say nothing about it, but it is clear that we cannot
+          send redirects to source routed frames.
+          We don't send redirects to frames decapsulated from IPsec.
+        */
+-      if (IP6CB(skb)->iif == dst->dev->ifindex &&
++      if (IP6CB(skb)->iif == dev->ifindex &&
+           opt->srcrt == 0 && !skb_sec_path(skb)) {
+               struct in6_addr *target = NULL;
+               struct inet_peer *peer;
+@@ -644,7 +645,7 @@ int ip6_forward(struct sk_buff *skb)
+       if (ip6_pkt_too_big(skb, mtu)) {
+               /* Again, force OUTPUT device used as source address */
+-              skb->dev = dst->dev;
++              skb->dev = dev;
+               icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
+               __IP6_INC_STATS(net, idev, IPSTATS_MIB_INTOOBIGERRORS);
+               __IP6_INC_STATS(net, ip6_dst_idev(dst),
+@@ -653,7 +654,7 @@ int ip6_forward(struct sk_buff *skb)
+               return -EMSGSIZE;
+       }
+-      if (skb_cow(skb, dst->dev->hard_header_len)) {
++      if (skb_cow(skb, dev->hard_header_len)) {
+               __IP6_INC_STATS(net, ip6_dst_idev(dst),
+                               IPSTATS_MIB_OUTDISCARDS);
+               goto drop;
+@@ -666,7 +667,7 @@ int ip6_forward(struct sk_buff *skb)
+       hdr->hop_limit--;
+       return NF_HOOK(NFPROTO_IPV6, NF_INET_FORWARD,
+-                     net, NULL, skb, skb->dev, dst->dev,
++                     net, NULL, skb, skb->dev, dev,
+                      ip6_forward_finish);
+ error:
+@@ -1093,7 +1094,7 @@ static struct dst_entry *ip6_sk_dst_chec
+ #ifdef CONFIG_IPV6_SUBTREES
+           ip6_rt_check(&rt->rt6i_src, &fl6->saddr, np->saddr_cache) ||
+ #endif
+-         (fl6->flowi6_oif && fl6->flowi6_oif != dst->dev->ifindex)) {
++         (fl6->flowi6_oif && fl6->flowi6_oif != dst_dev(dst)->ifindex)) {
+               dst_release(dst);
+               dst = NULL;
+       }
+--- a/net/ipv6/ip6_tunnel.c
++++ b/net/ipv6/ip6_tunnel.c
+@@ -1179,7 +1179,7 @@ route_lookup:
+               ndst = dst;
+       }
+-      tdev = dst->dev;
++      tdev = dst_dev(dst);
+       if (tdev == dev) {
+               DEV_STATS_INC(dev, collisions);
+@@ -1255,7 +1255,7 @@ route_lookup:
+       /* Calculate max headroom for all the headers and adjust
+        * needed_headroom if necessary.
+        */
+-      max_headroom = LL_RESERVED_SPACE(dst->dev) + sizeof(struct ipv6hdr)
++      max_headroom = LL_RESERVED_SPACE(tdev) + sizeof(struct ipv6hdr)
+                       + dst->header_len + t->hlen;
+       ip_tunnel_adj_headroom(dev, max_headroom);
+--- a/net/ipv6/ip6_udp_tunnel.c
++++ b/net/ipv6/ip6_udp_tunnel.c
+@@ -168,7 +168,7 @@ struct dst_entry *udp_tunnel6_dst_lookup
+               netdev_dbg(dev, "no route to %pI6\n", &fl6.daddr);
+               return ERR_PTR(-ENETUNREACH);
+       }
+-      if (dst->dev == dev) { /* is this necessary? */
++      if (dst_dev(dst) == dev) { /* is this necessary? */
+               netdev_dbg(dev, "circular route to %pI6\n", &fl6.daddr);
+               dst_release(dst);
+               return ERR_PTR(-ELOOP);
+--- a/net/ipv6/ip6_vti.c
++++ b/net/ipv6/ip6_vti.c
+@@ -497,7 +497,7 @@ vti6_xmit(struct sk_buff *skb, struct ne
+                             (const struct in6_addr *)&x->id.daddr))
+               goto tx_err_link_failure;
+-      tdev = dst->dev;
++      tdev = dst_dev(dst);
+       if (tdev == dev) {
+               DEV_STATS_INC(dev, collisions);
+--- a/net/ipv6/ndisc.c
++++ b/net/ipv6/ndisc.c
+@@ -473,6 +473,7 @@ void ndisc_send_skb(struct sk_buff *skb,
+ {
+       struct icmp6hdr *icmp6h = icmp6_hdr(skb);
+       struct dst_entry *dst = skb_dst(skb);
++      struct net_device *dev;
+       struct inet6_dev *idev;
+       struct net *net;
+       struct sock *sk;
+@@ -507,11 +508,12 @@ void ndisc_send_skb(struct sk_buff *skb,
+       ip6_nd_hdr(skb, saddr, daddr, READ_ONCE(inet6_sk(sk)->hop_limit), skb->len);
+-      idev = __in6_dev_get(dst->dev);
++      dev = dst_dev(dst);
++      idev = __in6_dev_get(dev);
+       IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTREQUESTS);
+       err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT,
+-                    net, sk, skb, NULL, dst->dev,
++                    net, sk, skb, NULL, dev,
+                     dst_output);
+       if (!err) {
+               ICMP6MSGOUT_INC_STATS(net, idev, type);
+--- a/net/ipv6/netfilter/nf_dup_ipv6.c
++++ b/net/ipv6/netfilter/nf_dup_ipv6.c
+@@ -38,7 +38,7 @@ static bool nf_dup_ipv6_route(struct net
+       }
+       skb_dst_drop(skb);
+       skb_dst_set(skb, dst);
+-      skb->dev      = dst->dev;
++      skb->dev      = dst_dev(dst);
+       skb->protocol = htons(ETH_P_IPV6);
+       return true;
+--- a/net/ipv6/output_core.c
++++ b/net/ipv6/output_core.c
+@@ -105,7 +105,7 @@ int ip6_dst_hoplimit(struct dst_entry *d
+ {
+       int hoplimit = dst_metric_raw(dst, RTAX_HOPLIMIT);
+       if (hoplimit == 0) {
+-              struct net_device *dev = dst->dev;
++              struct net_device *dev = dst_dev(dst);
+               struct inet6_dev *idev;
+               rcu_read_lock();
+--- a/net/ipv6/route.c
++++ b/net/ipv6/route.c
+@@ -228,13 +228,13 @@ static struct neighbour *ip6_dst_neigh_l
+       const struct rt6_info *rt = dst_rt6_info(dst);
+       return ip6_neigh_lookup(rt6_nexthop(rt, &in6addr_any),
+-                              dst->dev, skb, daddr);
++                              dst_dev(dst), skb, daddr);
+ }
+ static void ip6_confirm_neigh(const struct dst_entry *dst, const void *daddr)
+ {
+       const struct rt6_info *rt = dst_rt6_info(dst);
+-      struct net_device *dev = dst->dev;
++      struct net_device *dev = dst_dev(dst);
+       daddr = choose_neigh_daddr(rt6_nexthop(rt, &in6addr_any), NULL, daddr);
+       if (!daddr)
+@@ -2945,7 +2945,7 @@ static void __ip6_rt_update_pmtu(struct
+               if (res.f6i->nh) {
+                       struct fib6_nh_match_arg arg = {
+-                              .dev = dst->dev,
++                              .dev = dst_dev(dst),
+                               .gw = &rt6->rt6i_gateway,
+                       };
+@@ -3240,7 +3240,7 @@ EXPORT_SYMBOL_GPL(ip6_sk_redirect);
+ static unsigned int ip6_default_advmss(const struct dst_entry *dst)
+ {
+-      struct net_device *dev = dst->dev;
++      struct net_device *dev = dst_dev(dst);
+       unsigned int mtu = dst_mtu(dst);
+       struct net *net;
+@@ -4264,7 +4264,7 @@ static void rt6_do_redirect(struct dst_e
+       if (res.f6i->nh) {
+               struct fib6_nh_match_arg arg = {
+-                      .dev = dst->dev,
++                      .dev = dst_dev(dst),
+                       .gw = &rt->rt6i_gateway,
+               };
+@@ -4551,13 +4551,14 @@ int ipv6_route_ioctl(struct net *net, un
+ static int ip6_pkt_drop(struct sk_buff *skb, u8 code, int ipstats_mib_noroutes)
+ {
+       struct dst_entry *dst = skb_dst(skb);
+-      struct net *net = dev_net(dst->dev);
++      struct net_device *dev = dst_dev(dst);
++      struct net *net = dev_net(dev);
+       struct inet6_dev *idev;
+       SKB_DR(reason);
+       int type;
+       if (netif_is_l3_master(skb->dev) ||
+-          dst->dev == net->loopback_dev)
++          dev == net->loopback_dev)
+               idev = __in6_dev_get_safely(dev_get_by_index_rcu(net, IP6CB(skb)->iif));
+       else
+               idev = ip6_dst_idev(dst);
+@@ -5775,11 +5776,14 @@ static int rt6_fill_node(struct net *net
+        * each as a nexthop within RTA_MULTIPATH.
+        */
+       if (rt6) {
++              struct net_device *dev;
++
+               if (rt6_flags & RTF_GATEWAY &&
+                   nla_put_in6_addr(skb, RTA_GATEWAY, &rt6->rt6i_gateway))
+                       goto nla_put_failure;
+-              if (dst->dev && nla_put_u32(skb, RTA_OIF, dst->dev->ifindex))
++              dev = dst_dev(dst);
++              if (dev && nla_put_u32(skb, RTA_OIF, dev->ifindex))
+                       goto nla_put_failure;
+               if (dst->lwtstate &&
+--- a/net/ipv6/rpl_iptunnel.c
++++ b/net/ipv6/rpl_iptunnel.c
+@@ -242,7 +242,7 @@ static int rpl_output(struct net *net, s
+                       local_bh_enable();
+               }
+-              err = skb_cow_head(skb, LL_RESERVED_SPACE(dst->dev));
++              err = skb_cow_head(skb, LL_RESERVED_SPACE(dst_dev(dst)));
+               if (unlikely(err))
+                       goto drop;
+       }
+@@ -297,7 +297,7 @@ static int rpl_input(struct sk_buff *skb
+                       local_bh_enable();
+               }
+-              err = skb_cow_head(skb, LL_RESERVED_SPACE(dst->dev));
++              err = skb_cow_head(skb, LL_RESERVED_SPACE(dst_dev(dst)));
+               if (unlikely(err))
+                       goto drop;
+       } else {
+--- a/net/ipv6/seg6_iptunnel.c
++++ b/net/ipv6/seg6_iptunnel.c
+@@ -128,7 +128,8 @@ static int __seg6_do_srh_encap(struct sk
+                              int proto, struct dst_entry *cache_dst)
+ {
+       struct dst_entry *dst = skb_dst(skb);
+-      struct net *net = dev_net(dst->dev);
++      struct net_device *dev = dst_dev(dst);
++      struct net *net = dev_net(dev);
+       struct ipv6hdr *hdr, *inner_hdr;
+       struct ipv6_sr_hdr *isrh;
+       int hdrlen, tot_len, err;
+@@ -181,7 +182,7 @@ static int __seg6_do_srh_encap(struct sk
+       isrh->nexthdr = proto;
+       hdr->daddr = isrh->segments[isrh->first_segment];
+-      set_tun_src(net, dst->dev, &hdr->daddr, &hdr->saddr);
++      set_tun_src(net, dev, &hdr->daddr, &hdr->saddr);
+ #ifdef CONFIG_IPV6_SEG6_HMAC
+       if (sr_has_hmac(isrh)) {
+@@ -212,7 +213,8 @@ static int seg6_do_srh_encap_red(struct
+ {
+       __u8 first_seg = osrh->first_segment;
+       struct dst_entry *dst = skb_dst(skb);
+-      struct net *net = dev_net(dst->dev);
++      struct net_device *dev = dst_dev(dst);
++      struct net *net = dev_net(dev);
+       struct ipv6hdr *hdr, *inner_hdr;
+       int hdrlen = ipv6_optlen(osrh);
+       int red_tlv_offset, tlv_offset;
+@@ -270,7 +272,7 @@ static int seg6_do_srh_encap_red(struct
+       if (skip_srh) {
+               hdr->nexthdr = proto;
+-              set_tun_src(net, dst->dev, &hdr->daddr, &hdr->saddr);
++              set_tun_src(net, dev, &hdr->daddr, &hdr->saddr);
+               goto out;
+       }
+@@ -306,7 +308,7 @@ static int seg6_do_srh_encap_red(struct
+ srcaddr:
+       isrh->nexthdr = proto;
+-      set_tun_src(net, dst->dev, &hdr->daddr, &hdr->saddr);
++      set_tun_src(net, dev, &hdr->daddr, &hdr->saddr);
+ #ifdef CONFIG_IPV6_SEG6_HMAC
+       if (unlikely(!skip_srh && sr_has_hmac(isrh))) {
+@@ -507,7 +509,7 @@ static int seg6_input_core(struct net *n
+                       local_bh_enable();
+               }
+-              err = skb_cow_head(skb, LL_RESERVED_SPACE(dst->dev));
++              err = skb_cow_head(skb, LL_RESERVED_SPACE(dst_dev(dst)));
+               if (unlikely(err))
+                       goto drop;
+       } else {
+@@ -518,7 +520,7 @@ static int seg6_input_core(struct net *n
+       if (static_branch_unlikely(&nf_hooks_lwtunnel_enabled))
+               return NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT,
+                              dev_net(skb->dev), NULL, skb, NULL,
+-                             skb_dst(skb)->dev, seg6_input_finish);
++                             skb_dst_dev(skb), seg6_input_finish);
+       return seg6_input_finish(dev_net(skb->dev), NULL, skb);
+ drop:
+@@ -593,7 +595,7 @@ static int seg6_output_core(struct net *
+                       local_bh_enable();
+               }
+-              err = skb_cow_head(skb, LL_RESERVED_SPACE(dst->dev));
++              err = skb_cow_head(skb, LL_RESERVED_SPACE(dst_dev(dst)));
+               if (unlikely(err))
+                       goto drop;
+       }
+@@ -603,7 +605,7 @@ static int seg6_output_core(struct net *
+       if (static_branch_unlikely(&nf_hooks_lwtunnel_enabled))
+               return NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, net, sk, skb,
+-                             NULL, skb_dst(skb)->dev, dst_output);
++                             NULL, dst_dev(dst), dst_output);
+       return dst_output(net, sk, skb);
+ drop:
+--- a/net/ipv6/seg6_local.c
++++ b/net/ipv6/seg6_local.c
+@@ -310,7 +310,7 @@ seg6_lookup_any_nexthop(struct sk_buff *
+       if (!local_delivery)
+               dev_flags |= IFF_LOOPBACK;
+-      if (dst && (dst->dev->flags & dev_flags) && !dst->error) {
++      if (dst && (dst_dev(dst)->flags & dev_flags) && !dst->error) {
+               dst_release(dst);
+               dst = NULL;
+       }
diff --git a/queue-6.12/jbd2-fix-the-inconsistency-between-checksum-and-data-in-memory-for-journal-sb.patch b/queue-6.12/jbd2-fix-the-inconsistency-between-checksum-and-data-in-memory-for-journal-sb.patch
new file mode 100644 (file)
index 0000000..23842ad
--- /dev/null
@@ -0,0 +1,94 @@
+From stable+bounces-204153-greg=kroah.com@vger.kernel.org Tue Dec 30 00:34:33 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 29 Dec 2025 18:34:27 -0500
+Subject: jbd2: fix the inconsistency between checksum and data in memory for journal sb
+To: stable@vger.kernel.org
+Cc: Ye Bin <yebin10@huawei.com>, Baokun Li <libaokun1@huawei.com>, "Darrick J. Wong" <djwong@kernel.org>, Jan Kara <jack@suse.cz>, Theodore Ts'o <tytso@mit.edu>, stable@kernel.org, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251229233427.1825600-1-sashal@kernel.org>
+
+From: Ye Bin <yebin10@huawei.com>
+
+[ Upstream commit 6abfe107894af7e8ce3a2e120c619d81ee764ad5 ]
+
+Copying the file system while it is mounted as read-only results in
+a mount failure:
+[~]# mkfs.ext4 -F /dev/sdc
+[~]# mount /dev/sdc -o ro /mnt/test
+[~]# dd if=/dev/sdc of=/dev/sda bs=1M
+[~]# mount /dev/sda /mnt/test1
+[ 1094.849826] JBD2: journal checksum error
+[ 1094.850927] EXT4-fs (sda): Could not load journal inode
+mount: mount /dev/sda on /mnt/test1 failed: Bad message
+
+The process described above is just an abstracted way I came up with to
+reproduce the issue. In the actual scenario, the file system was mounted
+read-only and then copied while it was still mounted. It was found that
+the mount operation failed. The user intended to verify the data or use
+it as a backup, and this action was performed during a version upgrade.
+Above issue may happen as follows:
+ext4_fill_super
+ set_journal_csum_feature_set(sb)
+  if (ext4_has_metadata_csum(sb))
+   incompat = JBD2_FEATURE_INCOMPAT_CSUM_V3;
+  if (test_opt(sb, JOURNAL_CHECKSUM)
+   jbd2_journal_set_features(sbi->s_journal, compat, 0, incompat);
+    lock_buffer(journal->j_sb_buffer);
+    sb->s_feature_incompat  |= cpu_to_be32(incompat);
+    //The data in the journal sb was modified, but the checksum was not
+      updated, so the data remaining in memory has a mismatch between the
+      data and the checksum.
+    unlock_buffer(journal->j_sb_buffer);
+
+In this case, the journal sb copied over is in a state where the checksum
+and data are inconsistent, so mounting fails.
+To solve the above issue, update the checksum in memory after modifying
+the journal sb.
+
+Fixes: 4fd5ea43bc11 ("jbd2: checksum journal superblock")
+Signed-off-by: Ye Bin <yebin10@huawei.com>
+Reviewed-by: Baokun Li <libaokun1@huawei.com>
+Reviewed-by: Darrick J. Wong <djwong@kernel.org>
+Reviewed-by: Jan Kara <jack@suse.cz>
+Message-ID: <20251103010123.3753631-1-yebin@huaweicloud.com>
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Cc: stable@kernel.org
+[ jbd2_superblock_csum() also takes a journal param ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/jbd2/journal.c |   14 ++++++++++++++
+ 1 file changed, 14 insertions(+)
+
+--- a/fs/jbd2/journal.c
++++ b/fs/jbd2/journal.c
+@@ -2375,6 +2375,12 @@ int jbd2_journal_set_features(journal_t
+       sb->s_feature_compat    |= cpu_to_be32(compat);
+       sb->s_feature_ro_compat |= cpu_to_be32(ro);
+       sb->s_feature_incompat  |= cpu_to_be32(incompat);
++      /*
++       * Update the checksum now so that it is valid even for read-only
++       * filesystems where jbd2_write_superblock() doesn't get called.
++       */
++      if (jbd2_journal_has_csum_v2or3(journal))
++              sb->s_checksum = jbd2_superblock_csum(journal, sb);
+       unlock_buffer(journal->j_sb_buffer);
+       jbd2_journal_init_transaction_limits(journal);
+@@ -2404,9 +2410,17 @@ void jbd2_journal_clear_features(journal
+       sb = journal->j_superblock;
++      lock_buffer(journal->j_sb_buffer);
+       sb->s_feature_compat    &= ~cpu_to_be32(compat);
+       sb->s_feature_ro_compat &= ~cpu_to_be32(ro);
+       sb->s_feature_incompat  &= ~cpu_to_be32(incompat);
++      /*
++       * Update the checksum now so that it is valid even for read-only
++       * filesystems where jbd2_write_superblock() doesn't get called.
++       */
++      if (jbd2_journal_has_csum_v2or3(journal))
++              sb->s_checksum = jbd2_superblock_csum(journal, sb);
++      unlock_buffer(journal->j_sb_buffer);
+       jbd2_journal_init_transaction_limits(journal);
+ }
+ EXPORT_SYMBOL(jbd2_journal_clear_features);
diff --git a/queue-6.12/lib-crypto-riscv-chacha-avoid-s0-fp-register.patch b/queue-6.12/lib-crypto-riscv-chacha-avoid-s0-fp-register.patch
new file mode 100644 (file)
index 0000000..c26de20
--- /dev/null
@@ -0,0 +1,57 @@
+From stable+bounces-204145-greg=kroah.com@vger.kernel.org Mon Dec 29 23:39:13 2025
+From: Eric Biggers <ebiggers@kernel.org>
+Date: Mon, 29 Dec 2025 14:37:29 -0800
+Subject: lib/crypto: riscv/chacha: Avoid s0/fp register
+To: stable@vger.kernel.org
+Cc: linux-crypto@vger.kernel.org, linux-riscv@lists.infradead.org, Vivian Wang <wangruikang@iscas.ac.cn>, Eric Biggers <ebiggers@kernel.org>
+Message-ID: <20251229223729.99861-1-ebiggers@kernel.org>
+
+From: Vivian Wang <wangruikang@iscas.ac.cn>
+
+commit 43169328c7b4623b54b7713ec68479cebda5465f upstream.
+
+In chacha_zvkb, avoid using the s0 register, which is the frame pointer,
+by reallocating KEY0 to t5. This makes stack traces available if e.g. a
+crash happens in chacha_zvkb.
+
+No frame pointer maintenance is otherwise required since this is a leaf
+function.
+
+Signed-off-by: Vivian Wang <wangruikang@iscas.ac.cn>
+Fixes: bb54668837a0 ("crypto: riscv - add vector crypto accelerated ChaCha20")
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/20251202-riscv-chacha_zvkb-fp-v2-1-7bd00098c9dc@iscas.ac.cn
+Signed-off-by: Eric Biggers <ebiggers@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/riscv/crypto/chacha-riscv64-zvkb.S |    5 ++---
+ 1 file changed, 2 insertions(+), 3 deletions(-)
+
+--- a/arch/riscv/crypto/chacha-riscv64-zvkb.S
++++ b/arch/riscv/crypto/chacha-riscv64-zvkb.S
+@@ -60,7 +60,8 @@
+ #define VL            t2
+ #define STRIDE                t3
+ #define NROUNDS               t4
+-#define KEY0          s0
++#define KEY0          t5
++// Avoid s0/fp to allow for unwinding
+ #define KEY1          s1
+ #define KEY2          s2
+ #define KEY3          s3
+@@ -141,7 +142,6 @@ SYM_FUNC_START(chacha20_zvkb)
+       srli            LEN, LEN, 6     // Bytes to blocks
+       addi            sp, sp, -96
+-      sd              s0, 0(sp)
+       sd              s1, 8(sp)
+       sd              s2, 16(sp)
+       sd              s3, 24(sp)
+@@ -277,7 +277,6 @@ SYM_FUNC_START(chacha20_zvkb)
+       add             INP, INP, TMP
+       bnez            LEN, .Lblock_loop
+-      ld              s0, 0(sp)
+       ld              s1, 8(sp)
+       ld              s2, 16(sp)
+       ld              s3, 24(sp)
diff --git a/queue-6.12/md-raid10-wait-barrier-before-returning-discard-request-with-req_nowait.patch b/queue-6.12/md-raid10-wait-barrier-before-returning-discard-request-with-req_nowait.patch
new file mode 100644 (file)
index 0000000..694e4d3
--- /dev/null
@@ -0,0 +1,44 @@
+From stable+bounces-204509-greg=kroah.com@vger.kernel.org Fri Jan  2 21:37:54 2026
+From: Harshit Mogalapalli <harshit.m.mogalapalli@oracle.com>
+Date: Fri,  2 Jan 2026 12:37:22 -0800
+Subject: md/raid10: wait barrier before returning discard request with REQ_NOWAIT
+To: stable@vger.kernel.org
+Cc: Xiao Ni <xni@redhat.com>, Coly Li <colyli@kernel.org>, Yu Kuai <yukuai3@huawei.com>, Harshit Mogalapalli <harshit.m.mogalapalli@oracle.com>
+Message-ID: <20260102203727.1455662-3-harshit.m.mogalapalli@oracle.com>
+
+From: Xiao Ni <xni@redhat.com>
+
+[ Upstream commit 3db4404435397a345431b45f57876a3df133f3b4 ]
+
+raid10_handle_discard should wait barrier before returning a discard bio
+which has REQ_NOWAIT. And there is no need to print warning calltrace
+if a discard bio has REQ_NOWAIT flag. Quality engineer usually checks
+dmesg and reports error if dmesg has warning/error calltrace.
+
+Fixes: c9aa889b035f ("md: raid10 add nowait support")
+Signed-off-by: Xiao Ni <xni@redhat.com>
+Acked-by: Coly Li <colyli@kernel.org>
+Link: https://lore.kernel.org/linux-raid/20250306094938.48952-1-xni@redhat.com
+Signed-off-by: Yu Kuai <yukuai3@huawei.com>
+[Harshit: Clean backport to 6.12.y]
+Signed-off-by: Harshit Mogalapalli <harshit.m.mogalapalli@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/md/raid10.c |    3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+--- a/drivers/md/raid10.c
++++ b/drivers/md/raid10.c
+@@ -1626,11 +1626,10 @@ static int raid10_handle_discard(struct
+       if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
+               return -EAGAIN;
+-      if (WARN_ON_ONCE(bio->bi_opf & REQ_NOWAIT)) {
++      if (!wait_barrier(conf, bio->bi_opf & REQ_NOWAIT)) {
+               bio_wouldblock_error(bio);
+               return 0;
+       }
+-      wait_barrier(conf, false);
+       /*
+        * Check reshape again to avoid reshape happens after checking
diff --git a/queue-6.12/media-i2c-imx219-fix-1920x1080-mode-to-use-1-1-pixel-aspect-ratio.patch b/queue-6.12/media-i2c-imx219-fix-1920x1080-mode-to-use-1-1-pixel-aspect-ratio.patch
new file mode 100644 (file)
index 0000000..d5a4166
--- /dev/null
@@ -0,0 +1,59 @@
+From stable+bounces-204721-greg=kroah.com@vger.kernel.org Mon Jan  5 12:41:04 2026
+From: Jai Luthra <jai.luthra@ideasonboard.com>
+Date: Mon,  5 Jan 2026 17:10:32 +0530
+Subject: media: i2c: imx219: Fix 1920x1080 mode to use 1:1 pixel aspect ratio
+To: stable@vger.kernel.org
+Cc: Dave Stevenson <dave.stevenson@raspberrypi.com>, Jacopo Mondi <jacopo.mondi@ideasonboard.com>, Sakari Ailus <sakari.ailus@linux.intel.com>, Hans Verkuil <hverkuil+cisco@kernel.org>, Jai Luthra <jai.luthra@ideasonboard.com>
+Message-ID: <20260105114032.3222732-1-jai.luthra@ideasonboard.com>
+
+From: Dave Stevenson <dave.stevenson@raspberrypi.com>
+
+commit 9ef6e4db152c34580cc52792f32485c193945395 upstream.
+
+Commit 0af46fbc333d ("media: i2c: imx219: Calculate crop rectangle
+dynamically") meant that the 1920x1080 mode switched from using no
+binning to using vertical binning but no horizontal binning, which
+resulted in stretched pixels.
+
+Until proper controls are available to independently select horizontal
+and vertical binning, restore the original 1:1 pixel aspect ratio by
+forcing binning to be uniform in both directions.
+
+Cc: stable@vger.kernel.org
+Fixes: 0af46fbc333d ("media: i2c: imx219: Calculate crop rectangle dynamically")
+Signed-off-by: Dave Stevenson <dave.stevenson@raspberrypi.com>
+Reviewed-by: Jacopo Mondi <jacopo.mondi@ideasonboard.com>
+Signed-off-by: Sakari Ailus <sakari.ailus@linux.intel.com>
+Signed-off-by: Hans Verkuil <hverkuil+cisco@kernel.org>
+Signed-off-by: Jai Luthra <jai.luthra@ideasonboard.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/media/i2c/imx219.c |    9 ++++++---
+ 1 file changed, 6 insertions(+), 3 deletions(-)
+
+--- a/drivers/media/i2c/imx219.c
++++ b/drivers/media/i2c/imx219.c
+@@ -843,7 +843,7 @@ static int imx219_set_pad_format(struct
+       const struct imx219_mode *mode;
+       struct v4l2_mbus_framefmt *format;
+       struct v4l2_rect *crop;
+-      unsigned int bin_h, bin_v;
++      unsigned int bin_h, bin_v, binning;
+       mode = v4l2_find_nearest_size(supported_modes,
+                                     ARRAY_SIZE(supported_modes),
+@@ -862,9 +862,12 @@ static int imx219_set_pad_format(struct
+       bin_h = min(IMX219_PIXEL_ARRAY_WIDTH / format->width, 2U);
+       bin_v = min(IMX219_PIXEL_ARRAY_HEIGHT / format->height, 2U);
++      /* Ensure bin_h and bin_v are same to avoid 1:2 or 2:1 stretching */
++      binning = min(bin_h, bin_v);
++
+       crop = v4l2_subdev_state_get_crop(state, 0);
+-      crop->width = format->width * bin_h;
+-      crop->height = format->height * bin_v;
++      crop->width = format->width * binning;
++      crop->height = format->height * binning;
+       crop->left = (IMX219_NATIVE_WIDTH - crop->width) / 2;
+       crop->top = (IMX219_NATIVE_HEIGHT - crop->height) / 2;
diff --git a/queue-6.12/mm-ksm-fix-exec-fork-inheritance-support-for-prctl.patch b/queue-6.12/mm-ksm-fix-exec-fork-inheritance-support-for-prctl.patch
new file mode 100644 (file)
index 0000000..44893df
--- /dev/null
@@ -0,0 +1,121 @@
+From stable+bounces-204177-greg=kroah.com@vger.kernel.org Tue Dec 30 03:49:44 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 29 Dec 2025 21:49:37 -0500
+Subject: mm/ksm: fix exec/fork inheritance support for prctl
+To: stable@vger.kernel.org
+Cc: xu xin <xu.xin16@zte.com.cn>, Stefan Roesch <shr@devkernel.io>, David Hildenbrand <david@redhat.com>, Jinjiang Tu <tujinjiang@huawei.com>, Wang Yaxin <wang.yaxin@zte.com.cn>, Yang Yang <yang.yang29@zte.com.cn>, Andrew Morton <akpm@linux-foundation.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251230024937.1975419-1-sashal@kernel.org>
+
+From: xu xin <xu.xin16@zte.com.cn>
+
+[ Upstream commit 590c03ca6a3fbb114396673314e2aa483839608b ]
+
+Patch series "ksm: fix exec/fork inheritance", v2.
+
+This series fixes exec/fork inheritance.  See the detailed description of
+the issue below.
+
+This patch (of 2):
+
+Background
+==========
+
+commit d7597f59d1d33 ("mm: add new api to enable ksm per process")
+introduced MMF_VM_MERGE_ANY for mm->flags, and allowed user to set it by
+prctl() so that the process's VMAs are forcibly scanned by ksmd.
+
+Subsequently, the 3c6f33b7273a ("mm/ksm: support fork/exec for prctl")
+supported inheriting the MMF_VM_MERGE_ANY flag when a task calls execve().
+
+Finally, commit 3a9e567ca45fb ("mm/ksm: fix ksm exec support for prctl")
+fixed the issue that ksmd doesn't scan the mm_struct with MMF_VM_MERGE_ANY
+by adding the mm_slot to ksm_mm_head in __bprm_mm_init().
+
+Problem
+=======
+
+In some extreme scenarios, however, this inheritance of MMF_VM_MERGE_ANY
+during exec/fork can fail.  For example, when the scanning frequency of
+ksmd is tuned extremely high, a process carrying MMF_VM_MERGE_ANY may
+still fail to pass it to the newly exec'd process.  This happens because
+ksm_execve() is executed too early in the do_execve flow (prematurely
+adding the new mm_struct to the ksm_mm_slot list).
+
+As a result, before do_execve completes, ksmd may have already performed a
+scan and found that this new mm_struct has no VM_MERGEABLE VMAs, thus
+clearing its MMF_VM_MERGE_ANY flag.  Consequently, when the new program
+executes, the flag MMF_VM_MERGE_ANY inheritance missed.
+
+Root reason
+===========
+
+commit d7597f59d1d33 ("mm: add new api to enable ksm per process") clear
+the flag MMF_VM_MERGE_ANY when ksmd found no VM_MERGEABLE VMAs.
+
+Solution
+========
+
+Firstly, Don't clear MMF_VM_MERGE_ANY when ksmd found no VM_MERGEABLE
+VMAs, because perhaps their mm_struct has just been added to ksm_mm_slot
+list, and its process has not yet officially started running or has not
+yet performed mmap/brk to allocate anonymous VMAS.
+
+Secondly, recheck MMF_VM_MERGEABLE again if a process takes
+MMF_VM_MERGE_ANY, and create a mm_slot and join it into ksm_scan_list
+again.
+
+Link: https://lkml.kernel.org/r/20251007182504440BJgK8VXRHh8TD7IGSUIY4@zte.com.cn
+Link: https://lkml.kernel.org/r/20251007182821572h_SoFqYZXEP1mvWI4n9VL@zte.com.cn
+Fixes: 3c6f33b7273a ("mm/ksm: support fork/exec for prctl")
+Fixes: d7597f59d1d3 ("mm: add new api to enable ksm per process")
+Signed-off-by: xu xin <xu.xin16@zte.com.cn>
+Cc: Stefan Roesch <shr@devkernel.io>
+Cc: David Hildenbrand <david@redhat.com>
+Cc: Jinjiang Tu <tujinjiang@huawei.com>
+Cc: Wang Yaxin <wang.yaxin@zte.com.cn>
+Cc: Yang Yang <yang.yang29@zte.com.cn>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+[ changed mm_flags_test() and mm_flags_clear() calls to test_bit() and clear_bit() ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/ksm.c |   18 ++++++++++++++++--
+ 1 file changed, 16 insertions(+), 2 deletions(-)
+
+--- a/mm/ksm.c
++++ b/mm/ksm.c
+@@ -2704,8 +2704,14 @@ no_vmas:
+               spin_unlock(&ksm_mmlist_lock);
+               mm_slot_free(mm_slot_cache, mm_slot);
++              /*
++               * Only clear MMF_VM_MERGEABLE. We must not clear
++               * MMF_VM_MERGE_ANY, because for those MMF_VM_MERGE_ANY process,
++               * perhaps their mm_struct has just been added to ksm_mm_slot
++               * list, and its process has not yet officially started running
++               * or has not yet performed mmap/brk to allocate anonymous VMAS.
++               */
+               clear_bit(MMF_VM_MERGEABLE, &mm->flags);
+-              clear_bit(MMF_VM_MERGE_ANY, &mm->flags);
+               mmap_read_unlock(mm);
+               mmdrop(mm);
+       } else {
+@@ -2820,8 +2826,16 @@ void ksm_add_vma(struct vm_area_struct *
+ {
+       struct mm_struct *mm = vma->vm_mm;
+-      if (test_bit(MMF_VM_MERGE_ANY, &mm->flags))
++      if (test_bit(MMF_VM_MERGE_ANY, &mm->flags)) {
+               __ksm_add_vma(vma);
++              /*
++               * Generally, the flags here always include MMF_VM_MERGEABLE.
++               * However, in rare cases, this flag may be cleared by ksmd who
++               * scans a cycle without finding any mergeable vma.
++               */
++              if (unlikely(!test_bit(MMF_VM_MERGEABLE, &mm->flags)))
++                      __ksm_enter(mm);
++      }
+ }
+ static void ksm_add_vmas(struct mm_struct *mm)
diff --git a/queue-6.12/mptcp-pm-ignore-unknown-endpoint-flags.patch b/queue-6.12/mptcp-pm-ignore-unknown-endpoint-flags.patch
new file mode 100644 (file)
index 0000000..ed77358
--- /dev/null
@@ -0,0 +1,68 @@
+From stable+bounces-204205-greg=kroah.com@vger.kernel.org Tue Dec 30 14:21:21 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 30 Dec 2025 08:21:11 -0500
+Subject: mptcp: pm: ignore unknown endpoint flags
+To: stable@vger.kernel.org
+Cc: "Matthieu Baerts (NGI0)" <matttbe@kernel.org>, Mat Martineau <martineau@kernel.org>, Jakub Kicinski <kuba@kernel.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251230132111.2180152-1-sashal@kernel.org>
+
+From: "Matthieu Baerts (NGI0)" <matttbe@kernel.org>
+
+[ Upstream commit 0ace3297a7301911e52d8195cb1006414897c859 ]
+
+Before this patch, the kernel was saving any flags set by the userspace,
+even unknown ones. This doesn't cause critical issues because the kernel
+is only looking at specific ones. But on the other hand, endpoints dumps
+could tell the userspace some recent flags seem to be supported on older
+kernel versions.
+
+Instead, ignore all unknown flags when parsing them. By doing that, the
+userspace can continue to set unsupported flags, but it has a way to
+verify what is supported by the kernel.
+
+Note that it sounds better to continue accepting unsupported flags not
+to change the behaviour, but also that eases things on the userspace
+side by adding "optional" endpoint types only supported by newer kernel
+versions without having to deal with the different kernel versions.
+
+A note for the backports: there will be conflicts in mptcp.h on older
+versions not having the mentioned flags, the new line should still be
+added last, and the '5' needs to be adapted to have the same value as
+the last entry.
+
+Fixes: 01cacb00b35c ("mptcp: add netlink-based PM")
+Cc: stable@vger.kernel.org
+Reviewed-by: Mat Martineau <martineau@kernel.org>
+Signed-off-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+Link: https://patch.msgid.link/20251205-net-mptcp-misc-fixes-6-19-rc1-v1-1-9e4781a6c1b8@kernel.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+[ GENMASK(5, 0) => GENMASK(4, 0) + context ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/uapi/linux/mptcp.h |    1 +
+ net/mptcp/pm_netlink.c     |    3 ++-
+ 2 files changed, 3 insertions(+), 1 deletion(-)
+
+--- a/include/uapi/linux/mptcp.h
++++ b/include/uapi/linux/mptcp.h
+@@ -38,6 +38,7 @@
+ #define MPTCP_PM_ADDR_FLAG_BACKUP                      (1 << 2)
+ #define MPTCP_PM_ADDR_FLAG_FULLMESH                    (1 << 3)
+ #define MPTCP_PM_ADDR_FLAG_IMPLICIT                    (1 << 4)
++#define MPTCP_PM_ADDR_FLAGS_MASK              GENMASK(4, 0)
+ struct mptcp_info {
+       __u8    mptcpi_subflows;
+--- a/net/mptcp/pm_netlink.c
++++ b/net/mptcp/pm_netlink.c
+@@ -1409,7 +1409,8 @@ int mptcp_pm_parse_entry(struct nlattr *
+       }
+       if (tb[MPTCP_PM_ADDR_ATTR_FLAGS])
+-              entry->flags = nla_get_u32(tb[MPTCP_PM_ADDR_ATTR_FLAGS]);
++              entry->flags = nla_get_u32(tb[MPTCP_PM_ADDR_ATTR_FLAGS]) &
++                             MPTCP_PM_ADDR_FLAGS_MASK;
+       if (tb[MPTCP_PM_ADDR_ATTR_PORT])
+               entry->addr.port = htons(nla_get_u16(tb[MPTCP_PM_ADDR_ATTR_PORT]));
diff --git a/queue-6.12/net-ipv6-ioam6-use-consistent-dst-names.patch b/queue-6.12/net-ipv6-ioam6-use-consistent-dst-names.patch
new file mode 100644 (file)
index 0000000..5940bd6
--- /dev/null
@@ -0,0 +1,133 @@
+From stable+bounces-204510-greg=kroah.com@vger.kernel.org Fri Jan  2 21:37:56 2026
+From: Harshit Mogalapalli <harshit.m.mogalapalli@oracle.com>
+Date: Fri,  2 Jan 2026 12:37:24 -0800
+Subject: net: ipv6: ioam6: use consistent dst names
+To: stable@vger.kernel.org
+Cc: Justin Iurman <justin.iurman@uliege.be>, Paolo Abeni <pabeni@redhat.com>, Harshit Mogalapalli <harshit.m.mogalapalli@oracle.com>
+Message-ID: <20260102203727.1455662-5-harshit.m.mogalapalli@oracle.com>
+
+From: Justin Iurman <justin.iurman@uliege.be>
+
+[ Upstream commit d55acb9732d981c7a8e07dd63089a77d2938e382 ]
+
+Be consistent and use the same terminology as other lwt users: orig_dst
+is the dst_entry before the transformation, while dst is either the
+dst_entry in the cache or the dst_entry after the transformation
+
+Signed-off-by: Justin Iurman <justin.iurman@uliege.be>
+Link: https://patch.msgid.link/20250415112554.23823-2-justin.iurman@uliege.be
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+[Harshit: Backport to 6.12.y]
+Stable-dep-of: 99a2ace61b21 ("net: use dst_dev_rcu() in sk_setup_caps()")
+Signed-off-by: Harshit Mogalapalli <harshit.m.mogalapalli@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv6/ioam6_iptunnel.c |   35 ++++++++++++++++++-----------------
+ 1 file changed, 18 insertions(+), 17 deletions(-)
+
+--- a/net/ipv6/ioam6_iptunnel.c
++++ b/net/ipv6/ioam6_iptunnel.c
+@@ -338,7 +338,8 @@ static int ioam6_do_encap(struct net *ne
+ static int ioam6_output(struct net *net, struct sock *sk, struct sk_buff *skb)
+ {
+-      struct dst_entry *dst = skb_dst(skb), *cache_dst = NULL;
++      struct dst_entry *orig_dst = skb_dst(skb);
++      struct dst_entry *dst = NULL;
+       struct ioam6_lwt *ilwt;
+       int err = -EINVAL;
+       u32 pkt_cnt;
+@@ -346,7 +347,7 @@ static int ioam6_output(struct net *net,
+       if (skb->protocol != htons(ETH_P_IPV6))
+               goto drop;
+-      ilwt = ioam6_lwt_state(dst->lwtstate);
++      ilwt = ioam6_lwt_state(orig_dst->lwtstate);
+       /* Check for insertion frequency (i.e., "k over n" insertions) */
+       pkt_cnt = atomic_fetch_inc(&ilwt->pkt_cnt);
+@@ -354,7 +355,7 @@ static int ioam6_output(struct net *net,
+               goto out;
+       local_bh_disable();
+-      cache_dst = dst_cache_get(&ilwt->cache);
++      dst = dst_cache_get(&ilwt->cache);
+       local_bh_enable();
+       switch (ilwt->mode) {
+@@ -364,7 +365,7 @@ do_inline:
+               if (ipv6_hdr(skb)->nexthdr == NEXTHDR_HOP)
+                       goto out;
+-              err = ioam6_do_inline(net, skb, &ilwt->tuninfo, cache_dst);
++              err = ioam6_do_inline(net, skb, &ilwt->tuninfo, dst);
+               if (unlikely(err))
+                       goto drop;
+@@ -374,7 +375,7 @@ do_encap:
+               /* Encapsulation (ip6ip6) */
+               err = ioam6_do_encap(net, skb, &ilwt->tuninfo,
+                                    ilwt->has_tunsrc, &ilwt->tunsrc,
+-                                   &ilwt->tundst, cache_dst);
++                                   &ilwt->tundst, dst);
+               if (unlikely(err))
+                       goto drop;
+@@ -392,7 +393,7 @@ do_encap:
+               goto drop;
+       }
+-      if (unlikely(!cache_dst)) {
++      if (unlikely(!dst)) {
+               struct ipv6hdr *hdr = ipv6_hdr(skb);
+               struct flowi6 fl6;
+@@ -403,20 +404,20 @@ do_encap:
+               fl6.flowi6_mark = skb->mark;
+               fl6.flowi6_proto = hdr->nexthdr;
+-              cache_dst = ip6_route_output(net, NULL, &fl6);
+-              if (cache_dst->error) {
+-                      err = cache_dst->error;
++              dst = ip6_route_output(net, NULL, &fl6);
++              if (dst->error) {
++                      err = dst->error;
+                       goto drop;
+               }
+               /* cache only if we don't create a dst reference loop */
+-              if (dst->lwtstate != cache_dst->lwtstate) {
++              if (orig_dst->lwtstate != dst->lwtstate) {
+                       local_bh_disable();
+-                      dst_cache_set_ip6(&ilwt->cache, cache_dst, &fl6.saddr);
++                      dst_cache_set_ip6(&ilwt->cache, dst, &fl6.saddr);
+                       local_bh_enable();
+               }
+-              err = skb_cow_head(skb, LL_RESERVED_SPACE(cache_dst->dev));
++              err = skb_cow_head(skb, LL_RESERVED_SPACE(dst->dev));
+               if (unlikely(err))
+                       goto drop;
+       }
+@@ -424,16 +425,16 @@ do_encap:
+       /* avoid lwtunnel_output() reentry loop when destination is the same
+        * after transformation (e.g., with the inline mode)
+        */
+-      if (dst->lwtstate != cache_dst->lwtstate) {
++      if (orig_dst->lwtstate != dst->lwtstate) {
+               skb_dst_drop(skb);
+-              skb_dst_set(skb, cache_dst);
++              skb_dst_set(skb, dst);
+               return dst_output(net, sk, skb);
+       }
+ out:
+-      dst_release(cache_dst);
+-      return dst->lwtstate->orig_output(net, sk, skb);
++      dst_release(dst);
++      return orig_dst->lwtstate->orig_output(net, sk, skb);
+ drop:
+-      dst_release(cache_dst);
++      dst_release(dst);
+       kfree_skb(skb);
+       return err;
+ }
diff --git a/queue-6.12/net-use-dst_dev_rcu-in-sk_setup_caps.patch b/queue-6.12/net-use-dst_dev_rcu-in-sk_setup_caps.patch
new file mode 100644 (file)
index 0000000..f652003
--- /dev/null
@@ -0,0 +1,138 @@
+From stable+bounces-204511-greg=kroah.com@vger.kernel.org Fri Jan  2 21:37:58 2026
+From: Harshit Mogalapalli <harshit.m.mogalapalli@oracle.com>
+Date: Fri,  2 Jan 2026 12:37:26 -0800
+Subject: net: use dst_dev_rcu() in sk_setup_caps()
+To: stable@vger.kernel.org
+Cc: Eric Dumazet <edumazet@google.com>, David Ahern <dsahern@kernel.org>, Jakub Kicinski <kuba@kernel.org>, Harshit Mogalapalli <harshit.m.mogalapalli@oracle.com>
+Message-ID: <20260102203727.1455662-7-harshit.m.mogalapalli@oracle.com>
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit 99a2ace61b211b0be861b07fbaa062fca4b58879 ]
+
+Use RCU to protect accesses to dst->dev from sk_setup_caps()
+and sk_dst_gso_max_size().
+
+Also use dst_dev_rcu() in ip6_dst_mtu_maybe_forward(),
+and ip_dst_mtu_maybe_forward().
+
+ip4_dst_hoplimit() can use dst_dev_net_rcu().
+
+Fixes: 4a6ce2b6f2ec ("net: introduce a new function dst_dev_put()")
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Reviewed-by: David Ahern <dsahern@kernel.org>
+Link: https://patch.msgid.link/20250828195823.3958522-6-edumazet@google.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+[Harshit: Backport to 6.12.y, resolve conflict due to missing commit:
+22d6c9eebf2e ("net: Unexport shared functions for DCCP.")  in 6.12.y]
+Signed-off-by: Harshit Mogalapalli <harshit.m.mogalapalli@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/net/ip.h        |    6 ++++--
+ include/net/ip6_route.h |    2 +-
+ include/net/route.h     |    2 +-
+ net/core/sock.c         |   16 ++++++++++------
+ 4 files changed, 16 insertions(+), 10 deletions(-)
+
+--- a/include/net/ip.h
++++ b/include/net/ip.h
+@@ -470,12 +470,14 @@ static inline unsigned int ip_dst_mtu_ma
+                                                   bool forwarding)
+ {
+       const struct rtable *rt = dst_rtable(dst);
++      const struct net_device *dev;
+       unsigned int mtu, res;
+       struct net *net;
+       rcu_read_lock();
+-      net = dev_net_rcu(dst_dev(dst));
++      dev = dst_dev_rcu(dst);
++      net = dev_net_rcu(dev);
+       if (READ_ONCE(net->ipv4.sysctl_ip_fwd_use_pmtu) ||
+           ip_mtu_locked(dst) ||
+           !forwarding) {
+@@ -489,7 +491,7 @@ static inline unsigned int ip_dst_mtu_ma
+       if (mtu)
+               goto out;
+-      mtu = READ_ONCE(dst_dev(dst)->mtu);
++      mtu = READ_ONCE(dev->mtu);
+       if (unlikely(ip_mtu_locked(dst))) {
+               if (rt->rt_uses_gateway && mtu > 576)
+--- a/include/net/ip6_route.h
++++ b/include/net/ip6_route.h
+@@ -337,7 +337,7 @@ static inline unsigned int ip6_dst_mtu_m
+       mtu = IPV6_MIN_MTU;
+       rcu_read_lock();
+-      idev = __in6_dev_get(dst_dev(dst));
++      idev = __in6_dev_get(dst_dev_rcu(dst));
+       if (idev)
+               mtu = READ_ONCE(idev->cnf.mtu6);
+       rcu_read_unlock();
+--- a/include/net/route.h
++++ b/include/net/route.h
+@@ -369,7 +369,7 @@ static inline int ip4_dst_hoplimit(const
+               const struct net *net;
+               rcu_read_lock();
+-              net = dev_net_rcu(dst_dev(dst));
++              net = dst_dev_net_rcu(dst);
+               hoplimit = READ_ONCE(net->ipv4.sysctl_ip_default_ttl);
+               rcu_read_unlock();
+       }
+--- a/net/core/sock.c
++++ b/net/core/sock.c
+@@ -2524,7 +2524,7 @@ void sk_free_unlock_clone(struct sock *s
+ }
+ EXPORT_SYMBOL_GPL(sk_free_unlock_clone);
+-static u32 sk_dst_gso_max_size(struct sock *sk, struct dst_entry *dst)
++static u32 sk_dst_gso_max_size(struct sock *sk, const struct net_device *dev)
+ {
+       bool is_ipv6 = false;
+       u32 max_size;
+@@ -2534,8 +2534,8 @@ static u32 sk_dst_gso_max_size(struct so
+                  !ipv6_addr_v4mapped(&sk->sk_v6_rcv_saddr));
+ #endif
+       /* pairs with the WRITE_ONCE() in netif_set_gso(_ipv4)_max_size() */
+-      max_size = is_ipv6 ? READ_ONCE(dst_dev(dst)->gso_max_size) :
+-                      READ_ONCE(dst_dev(dst)->gso_ipv4_max_size);
++      max_size = is_ipv6 ? READ_ONCE(dev->gso_max_size) :
++                      READ_ONCE(dev->gso_ipv4_max_size);
+       if (max_size > GSO_LEGACY_MAX_SIZE && !sk_is_tcp(sk))
+               max_size = GSO_LEGACY_MAX_SIZE;
+@@ -2544,9 +2544,12 @@ static u32 sk_dst_gso_max_size(struct so
+ void sk_setup_caps(struct sock *sk, struct dst_entry *dst)
+ {
++      const struct net_device *dev;
+       u32 max_segs = 1;
+-      sk->sk_route_caps = dst_dev(dst)->features;
++      rcu_read_lock();
++      dev = dst_dev_rcu(dst);
++      sk->sk_route_caps = dev->features;
+       if (sk_is_tcp(sk)) {
+               struct inet_connection_sock *icsk = inet_csk(sk);
+@@ -2562,13 +2565,14 @@ void sk_setup_caps(struct sock *sk, stru
+                       sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
+               } else {
+                       sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM;
+-                      sk->sk_gso_max_size = sk_dst_gso_max_size(sk, dst);
++                      sk->sk_gso_max_size = sk_dst_gso_max_size(sk, dev);
+                       /* pairs with the WRITE_ONCE() in netif_set_gso_max_segs() */
+-                      max_segs = max_t(u32, READ_ONCE(dst_dev(dst)->gso_max_segs), 1);
++                      max_segs = max_t(u32, READ_ONCE(dev->gso_max_segs), 1);
+               }
+       }
+       sk->sk_gso_max_segs = max_segs;
+       sk_dst_set(sk, dst);
++      rcu_read_unlock();
+ }
+ EXPORT_SYMBOL_GPL(sk_setup_caps);
diff --git a/queue-6.12/netfilter-nft_ct-add-seqadj-extension-for-natted-connections.patch b/queue-6.12/netfilter-nft_ct-add-seqadj-extension-for-natted-connections.patch
new file mode 100644 (file)
index 0000000..d0bd1bc
--- /dev/null
@@ -0,0 +1,113 @@
+From stable+bounces-204513-greg=kroah.com@vger.kernel.org Fri Jan  2 21:38:05 2026
+From: Harshit Mogalapalli <harshit.m.mogalapalli@oracle.com>
+Date: Fri,  2 Jan 2026 12:37:21 -0800
+Subject: netfilter: nft_ct: add seqadj extension for natted connections
+To: stable@vger.kernel.org
+Cc: Andrii Melnychenko <a.melnychenko@vyos.io>, Florian Westphal <fw@strlen.de>, Harshit Mogalapalli <harshit.m.mogalapalli@oracle.com>
+Message-ID: <20260102203727.1455662-2-harshit.m.mogalapalli@oracle.com>
+
+From: Andrii Melnychenko <a.melnychenko@vyos.io>
+
+[ Upstream commit 90918e3b6404c2a37837b8f11692471b4c512de2 ]
+
+Sequence adjustment may be required for FTP traffic with PASV/EPSV modes.
+due to need to re-write packet payload (IP, port) on the ftp control
+connection. This can require changes to the TCP length and expected
+seq / ack_seq.
+
+The easiest way to reproduce this issue is with PASV mode.
+Example ruleset:
+table inet ftp_nat {
+        ct helper ftp_helper {
+                type "ftp" protocol tcp
+                l3proto inet
+        }
+
+        chain prerouting {
+                type filter hook prerouting priority 0; policy accept;
+                tcp dport 21 ct state new ct helper set "ftp_helper"
+        }
+}
+table ip nat {
+        chain prerouting {
+                type nat hook prerouting priority -100; policy accept;
+                tcp dport 21 dnat ip prefix to ip daddr map {
+                       192.168.100.1 : 192.168.13.2/32 }
+        }
+
+        chain postrouting {
+                type nat hook postrouting priority 100 ; policy accept;
+                tcp sport 21 snat ip prefix to ip saddr map {
+                       192.168.13.2 : 192.168.100.1/32 }
+        }
+}
+
+Note that the ftp helper gets assigned *after* the dnat setup.
+
+The inverse (nat after helper assign) is handled by an existing
+check in nf_nat_setup_info() and will not show the problem.
+
+Topoloy:
+
+ +-------------------+     +----------------------------------+
+ | FTP: 192.168.13.2 | <-> | NAT: 192.168.13.3, 192.168.100.1 |
+ +-------------------+     +----------------------------------+
+                                      |
+                         +-----------------------+
+                         | Client: 192.168.100.2 |
+                         +-----------------------+
+
+ftp nat changes do not work as expected in this case:
+Connected to 192.168.100.1.
+[..]
+ftp> epsv
+EPSV/EPRT on IPv4 off.
+ftp> ls
+227 Entering passive mode (192,168,100,1,209,129).
+421 Service not available, remote server has closed connection.
+
+Kernel logs:
+Missing nfct_seqadj_ext_add() setup call
+WARNING: CPU: 1 PID: 0 at net/netfilter/nf_conntrack_seqadj.c:41
+[..]
+ __nf_nat_mangle_tcp_packet+0x100/0x160 [nf_nat]
+ nf_nat_ftp+0x142/0x280 [nf_nat_ftp]
+ help+0x4d1/0x880 [nf_conntrack_ftp]
+ nf_confirm+0x122/0x2e0 [nf_conntrack]
+ nf_hook_slow+0x3c/0xb0
+ ..
+
+Fix this by adding the required extension when a conntrack helper is assigned
+to a connection that has a nat binding.
+
+Fixes: 1a64edf54f55 ("netfilter: nft_ct: add helper set support")
+Signed-off-by: Andrii Melnychenko <a.melnychenko@vyos.io>
+Signed-off-by: Florian Westphal <fw@strlen.de>
+[Harshit: Clean cherry-pick, apply it to stable-6.12.y]
+Signed-off-by: Harshit Mogalapalli <harshit.m.mogalapalli@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/netfilter/nft_ct.c |    5 +++++
+ 1 file changed, 5 insertions(+)
+
+--- a/net/netfilter/nft_ct.c
++++ b/net/netfilter/nft_ct.c
+@@ -22,6 +22,7 @@
+ #include <net/netfilter/nf_conntrack_timeout.h>
+ #include <net/netfilter/nf_conntrack_l4proto.h>
+ #include <net/netfilter/nf_conntrack_expect.h>
++#include <net/netfilter/nf_conntrack_seqadj.h>
+ struct nft_ct_helper_obj  {
+       struct nf_conntrack_helper *helper4;
+@@ -1173,6 +1174,10 @@ static void nft_ct_helper_obj_eval(struc
+       if (help) {
+               rcu_assign_pointer(help->helper, to_assign);
+               set_bit(IPS_HELPER_BIT, &ct->status);
++
++              if ((ct->status & IPS_NAT_MASK) && !nfct_seqadj(ct))
++                      if (!nfct_seqadj_ext_add(ct))
++                              regs->verdict.code = NF_DROP;
+       }
+ }
diff --git a/queue-6.12/sched-eevdf-fix-min_vruntime-vs-avg_vruntime.patch b/queue-6.12/sched-eevdf-fix-min_vruntime-vs-avg_vruntime.patch
new file mode 100644 (file)
index 0000000..5781215
--- /dev/null
@@ -0,0 +1,321 @@
+From stable+bounces-204132-greg=kroah.com@vger.kernel.org Mon Dec 29 21:23:56 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 29 Dec 2025 15:23:50 -0500
+Subject: sched/eevdf: Fix min_vruntime vs avg_vruntime
+To: stable@vger.kernel.org
+Cc: Peter Zijlstra <peterz@infradead.org>, Zicheng Qu <quzicheng@huawei.com>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251229202350.1683612-1-sashal@kernel.org>
+
+From: Peter Zijlstra <peterz@infradead.org>
+
+[ Upstream commit 79f3f9bedd149ea438aaeb0fb6a083637affe205 ]
+
+Basically, from the constraint that the sum of lag is zero, you can
+infer that the 0-lag point is the weighted average of the individual
+vruntime, which is what we're trying to compute:
+
+        \Sum w_i * v_i
+  avg = --------------
+           \Sum w_i
+
+Now, since vruntime takes the whole u64 (worse, it wraps), this
+multiplication term in the numerator is not something we can compute;
+instead we do the min_vruntime (v0 henceforth) thing like:
+
+  v_i = (v_i - v0) + v0
+
+This does two things:
+ - it keeps the key: (v_i - v0) 'small';
+ - it creates a relative 0-point in the modular space.
+
+If you do that subtitution and work it all out, you end up with:
+
+        \Sum w_i * (v_i - v0)
+  avg = --------------------- + v0
+              \Sum w_i
+
+Since you cannot very well track a ratio like that (and not suffer
+terrible numerical problems) we simpy track the numerator and
+denominator individually and only perform the division when strictly
+needed.
+
+Notably, the numerator lives in cfs_rq->avg_vruntime and the denominator
+lives in cfs_rq->avg_load.
+
+The one extra 'funny' is that these numbers track the entities in the
+tree, and current is typically outside of the tree, so avg_vruntime()
+adds current when needed before doing the division.
+
+(vruntime_eligible() elides the division by cross-wise multiplication)
+
+Anyway, as mentioned above, we currently use the CFS era min_vruntime
+for this purpose. However, this thing can only move forward, while the
+above avg can in fact move backward (when a non-eligible task leaves,
+the average becomes smaller), this can cause trouble when through
+happenstance (or construction) these values drift far enough apart to
+wreck the game.
+
+Replace cfs_rq::min_vruntime with cfs_rq::zero_vruntime which is kept
+near/at avg_vruntime, following its motion.
+
+The down-side is that this requires computing the avg more often.
+
+Fixes: 147f3efaa241 ("sched/fair: Implement an EEVDF-like scheduling policy")
+Reported-by: Zicheng Qu <quzicheng@huawei.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Link: https://patch.msgid.link/20251106111741.GC4068168@noisy.programming.kicks-ass.net
+Cc: stable@vger.kernel.org
+[ Adjust context in comments + init_cfs_rq ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/sched/debug.c |    8 ++--
+ kernel/sched/fair.c  |   84 +++++++++++----------------------------------------
+ kernel/sched/sched.h |    4 +-
+ 3 files changed, 25 insertions(+), 71 deletions(-)
+
+--- a/kernel/sched/debug.c
++++ b/kernel/sched/debug.c
+@@ -804,7 +804,7 @@ static void print_rq(struct seq_file *m,
+ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
+ {
+-      s64 left_vruntime = -1, min_vruntime, right_vruntime = -1, left_deadline = -1, spread;
++      s64 left_vruntime = -1, zero_vruntime, right_vruntime = -1, left_deadline = -1, spread;
+       struct sched_entity *last, *first, *root;
+       struct rq *rq = cpu_rq(cpu);
+       unsigned long flags;
+@@ -827,15 +827,15 @@ void print_cfs_rq(struct seq_file *m, in
+       last = __pick_last_entity(cfs_rq);
+       if (last)
+               right_vruntime = last->vruntime;
+-      min_vruntime = cfs_rq->min_vruntime;
++      zero_vruntime = cfs_rq->zero_vruntime;
+       raw_spin_rq_unlock_irqrestore(rq, flags);
+       SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "left_deadline",
+                       SPLIT_NS(left_deadline));
+       SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "left_vruntime",
+                       SPLIT_NS(left_vruntime));
+-      SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "min_vruntime",
+-                      SPLIT_NS(min_vruntime));
++      SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "zero_vruntime",
++                      SPLIT_NS(zero_vruntime));
+       SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "avg_vruntime",
+                       SPLIT_NS(avg_vruntime(cfs_rq)));
+       SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "right_vruntime",
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -553,7 +553,7 @@ static inline bool entity_before(const s
+ static inline s64 entity_key(struct cfs_rq *cfs_rq, struct sched_entity *se)
+ {
+-      return (s64)(se->vruntime - cfs_rq->min_vruntime);
++      return (s64)(se->vruntime - cfs_rq->zero_vruntime);
+ }
+ #define __node_2_se(node) \
+@@ -605,13 +605,13 @@ static inline s64 entity_key(struct cfs_
+  *
+  * Which we track using:
+  *
+- *                    v0 := cfs_rq->min_vruntime
++ *                    v0 := cfs_rq->zero_vruntime
+  * \Sum (v_i - v0) * w_i := cfs_rq->avg_vruntime
+  *              \Sum w_i := cfs_rq->avg_load
+  *
+- * Since min_vruntime is a monotonic increasing variable that closely tracks
+- * the per-task service, these deltas: (v_i - v), will be in the order of the
+- * maximal (virtual) lag induced in the system due to quantisation.
++ * Since zero_vruntime closely tracks the per-task service, these
++ * deltas: (v_i - v), will be in the order of the maximal (virtual) lag
++ * induced in the system due to quantisation.
+  *
+  * Also, we use scale_load_down() to reduce the size.
+  *
+@@ -670,7 +670,7 @@ u64 avg_vruntime(struct cfs_rq *cfs_rq)
+               avg = div_s64(avg, load);
+       }
+-      return cfs_rq->min_vruntime + avg;
++      return cfs_rq->zero_vruntime + avg;
+ }
+ /*
+@@ -736,7 +736,7 @@ static int vruntime_eligible(struct cfs_
+               load += weight;
+       }
+-      return avg >= (s64)(vruntime - cfs_rq->min_vruntime) * load;
++      return avg >= (s64)(vruntime - cfs_rq->zero_vruntime) * load;
+ }
+ int entity_eligible(struct cfs_rq *cfs_rq, struct sched_entity *se)
+@@ -744,42 +744,14 @@ int entity_eligible(struct cfs_rq *cfs_r
+       return vruntime_eligible(cfs_rq, se->vruntime);
+ }
+-static u64 __update_min_vruntime(struct cfs_rq *cfs_rq, u64 vruntime)
++static void update_zero_vruntime(struct cfs_rq *cfs_rq)
+ {
+-      u64 min_vruntime = cfs_rq->min_vruntime;
+-      /*
+-       * open coded max_vruntime() to allow updating avg_vruntime
+-       */
+-      s64 delta = (s64)(vruntime - min_vruntime);
+-      if (delta > 0) {
+-              avg_vruntime_update(cfs_rq, delta);
+-              min_vruntime = vruntime;
+-      }
+-      return min_vruntime;
+-}
+-
+-static void update_min_vruntime(struct cfs_rq *cfs_rq)
+-{
+-      struct sched_entity *se = __pick_root_entity(cfs_rq);
+-      struct sched_entity *curr = cfs_rq->curr;
+-      u64 vruntime = cfs_rq->min_vruntime;
++      u64 vruntime = avg_vruntime(cfs_rq);
++      s64 delta = (s64)(vruntime - cfs_rq->zero_vruntime);
+-      if (curr) {
+-              if (curr->on_rq)
+-                      vruntime = curr->vruntime;
+-              else
+-                      curr = NULL;
+-      }
++      avg_vruntime_update(cfs_rq, delta);
+-      if (se) {
+-              if (!curr)
+-                      vruntime = se->min_vruntime;
+-              else
+-                      vruntime = min_vruntime(vruntime, se->min_vruntime);
+-      }
+-
+-      /* ensure we never gain time by being placed backwards. */
+-      cfs_rq->min_vruntime = __update_min_vruntime(cfs_rq, vruntime);
++      cfs_rq->zero_vruntime = vruntime;
+ }
+ static inline u64 cfs_rq_min_slice(struct cfs_rq *cfs_rq)
+@@ -852,6 +824,7 @@ RB_DECLARE_CALLBACKS(static, min_vruntim
+ static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
+ {
+       avg_vruntime_add(cfs_rq, se);
++      update_zero_vruntime(cfs_rq);
+       se->min_vruntime = se->vruntime;
+       se->min_slice = se->slice;
+       rb_add_augmented_cached(&se->run_node, &cfs_rq->tasks_timeline,
+@@ -863,6 +836,7 @@ static void __dequeue_entity(struct cfs_
+       rb_erase_augmented_cached(&se->run_node, &cfs_rq->tasks_timeline,
+                                 &min_vruntime_cb);
+       avg_vruntime_sub(cfs_rq, se);
++      update_zero_vruntime(cfs_rq);
+ }
+ struct sched_entity *__pick_root_entity(struct cfs_rq *cfs_rq)
+@@ -1243,7 +1217,6 @@ static void update_curr(struct cfs_rq *c
+       curr->vruntime += calc_delta_fair(delta_exec, curr);
+       resched = update_deadline(cfs_rq, curr);
+-      update_min_vruntime(cfs_rq);
+       if (entity_is_task(curr)) {
+               struct task_struct *p = task_of(curr);
+@@ -3937,15 +3910,6 @@ static void reweight_entity(struct cfs_r
+               update_load_add(&cfs_rq->load, se->load.weight);
+               if (!curr)
+                       __enqueue_entity(cfs_rq, se);
+-
+-              /*
+-               * The entity's vruntime has been adjusted, so let's check
+-               * whether the rq-wide min_vruntime needs updated too. Since
+-               * the calculations above require stable min_vruntime rather
+-               * than up-to-date one, we do the update at the end of the
+-               * reweight process.
+-               */
+-              update_min_vruntime(cfs_rq);
+       }
+ }
+@@ -5614,15 +5578,6 @@ dequeue_entity(struct cfs_rq *cfs_rq, st
+       update_cfs_group(se);
+-      /*
+-       * Now advance min_vruntime if @se was the entity holding it back,
+-       * except when: DEQUEUE_SAVE && !DEQUEUE_MOVE, in this case we'll be
+-       * put back on, and if we advance min_vruntime, we'll be placed back
+-       * further than we started -- i.e. we'll be penalized.
+-       */
+-      if ((flags & (DEQUEUE_SAVE | DEQUEUE_MOVE)) != DEQUEUE_SAVE)
+-              update_min_vruntime(cfs_rq);
+-
+       if (flags & DEQUEUE_DELAYED)
+               finish_delayed_dequeue_entity(se);
+@@ -9165,7 +9120,6 @@ static void yield_task_fair(struct rq *r
+       if (entity_eligible(cfs_rq, se)) {
+               se->vruntime = se->deadline;
+               se->deadline += calc_delta_fair(se->slice, se);
+-              update_min_vruntime(cfs_rq);
+       }
+ }
+@@ -13093,7 +13047,7 @@ static inline void task_tick_core(struct
+ }
+ /*
+- * se_fi_update - Update the cfs_rq->min_vruntime_fi in a CFS hierarchy if needed.
++ * se_fi_update - Update the cfs_rq->zero_vruntime_fi in a CFS hierarchy if needed.
+  */
+ static void se_fi_update(const struct sched_entity *se, unsigned int fi_seq,
+                        bool forceidle)
+@@ -13107,7 +13061,7 @@ static void se_fi_update(const struct sc
+                       cfs_rq->forceidle_seq = fi_seq;
+               }
+-              cfs_rq->min_vruntime_fi = cfs_rq->min_vruntime;
++              cfs_rq->zero_vruntime_fi = cfs_rq->zero_vruntime;
+       }
+ }
+@@ -13160,11 +13114,11 @@ bool cfs_prio_less(const struct task_str
+       /*
+        * Find delta after normalizing se's vruntime with its cfs_rq's
+-       * min_vruntime_fi, which would have been updated in prior calls
++       * zero_vruntime_fi, which would have been updated in prior calls
+        * to se_fi_update().
+        */
+       delta = (s64)(sea->vruntime - seb->vruntime) +
+-              (s64)(cfs_rqb->min_vruntime_fi - cfs_rqa->min_vruntime_fi);
++              (s64)(cfs_rqb->zero_vruntime_fi - cfs_rqa->zero_vruntime_fi);
+       return delta > 0;
+ }
+@@ -13402,7 +13356,7 @@ static void set_next_task_fair(struct rq
+ void init_cfs_rq(struct cfs_rq *cfs_rq)
+ {
+       cfs_rq->tasks_timeline = RB_ROOT_CACHED;
+-      cfs_rq->min_vruntime = (u64)(-(1LL << 20));
++      cfs_rq->zero_vruntime = (u64)(-(1LL << 20));
+ #ifdef CONFIG_SMP
+       raw_spin_lock_init(&cfs_rq->removed.lock);
+ #endif
+--- a/kernel/sched/sched.h
++++ b/kernel/sched/sched.h
+@@ -660,10 +660,10 @@ struct cfs_rq {
+       s64                     avg_vruntime;
+       u64                     avg_load;
+-      u64                     min_vruntime;
++      u64                     zero_vruntime;
+ #ifdef CONFIG_SCHED_CORE
+       unsigned int            forceidle_seq;
+-      u64                     min_vruntime_fi;
++      u64                     zero_vruntime_fi;
+ #endif
+       struct rb_root_cached   tasks_timeline;
diff --git a/queue-6.12/sched_ext-factor-out-local_dsq_post_enq-from-dispatch_enqueue.patch b/queue-6.12/sched_ext-factor-out-local_dsq_post_enq-from-dispatch_enqueue.patch
new file mode 100644 (file)
index 0000000..fffd176
--- /dev/null
@@ -0,0 +1,79 @@
+From stable+bounces-204422-greg=kroah.com@vger.kernel.org Fri Jan  2 03:01:23 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu,  1 Jan 2026 21:01:16 -0500
+Subject: sched_ext: Factor out local_dsq_post_enq() from dispatch_enqueue()
+To: stable@vger.kernel.org
+Cc: Tejun Heo <tj@kernel.org>, Andrea Righi <arighi@nvidia.com>, Emil Tsalapatis <emil@etsalapatis.com>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260102020117.100465-1-sashal@kernel.org>
+
+From: Tejun Heo <tj@kernel.org>
+
+[ Upstream commit 530b6637c79e728d58f1d9b66bd4acf4b735b86d ]
+
+Factor out local_dsq_post_enq() which performs post-enqueue handling for
+local DSQs - triggering resched_curr() if SCX_ENQ_PREEMPT is specified or if
+the current CPU is idle. No functional change.
+
+This will be used by the next patch to fix move_local_task_to_local_dsq().
+
+Cc: stable@vger.kernel.org # v6.12+
+Reviewed-by: Andrea Righi <arighi@nvidia.com>
+Reviewed-by: Emil Tsalapatis <emil@etsalapatis.com>
+Signed-off-by: Tejun Heo <tj@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/sched/ext.c |   34 +++++++++++++++++++---------------
+ 1 file changed, 19 insertions(+), 15 deletions(-)
+
+--- a/kernel/sched/ext.c
++++ b/kernel/sched/ext.c
+@@ -1676,6 +1676,22 @@ static void dsq_mod_nr(struct scx_dispat
+       WRITE_ONCE(dsq->nr, dsq->nr + delta);
+ }
++static void local_dsq_post_enq(struct scx_dispatch_q *dsq, struct task_struct *p,
++                             u64 enq_flags)
++{
++      struct rq *rq = container_of(dsq, struct rq, scx.local_dsq);
++      bool preempt = false;
++
++      if ((enq_flags & SCX_ENQ_PREEMPT) && p != rq->curr &&
++          rq->curr->sched_class == &ext_sched_class) {
++              rq->curr->scx.slice = 0;
++              preempt = true;
++      }
++
++      if (preempt || sched_class_above(&ext_sched_class, rq->curr->sched_class))
++              resched_curr(rq);
++}
++
+ static void dispatch_enqueue(struct scx_dispatch_q *dsq, struct task_struct *p,
+                            u64 enq_flags)
+ {
+@@ -1773,22 +1789,10 @@ static void dispatch_enqueue(struct scx_
+       if (enq_flags & SCX_ENQ_CLEAR_OPSS)
+               atomic_long_set_release(&p->scx.ops_state, SCX_OPSS_NONE);
+-      if (is_local) {
+-              struct rq *rq = container_of(dsq, struct rq, scx.local_dsq);
+-              bool preempt = false;
+-
+-              if ((enq_flags & SCX_ENQ_PREEMPT) && p != rq->curr &&
+-                  rq->curr->sched_class == &ext_sched_class) {
+-                      rq->curr->scx.slice = 0;
+-                      preempt = true;
+-              }
+-
+-              if (preempt || sched_class_above(&ext_sched_class,
+-                                               rq->curr->sched_class))
+-                      resched_curr(rq);
+-      } else {
++      if (is_local)
++              local_dsq_post_enq(dsq, p, enq_flags);
++      else
+               raw_spin_unlock(&dsq->lock);
+-      }
+ }
+ static void task_unlink_from_dsq(struct task_struct *p,
diff --git a/queue-6.12/sched_ext-fix-incorrect-sched_class-settings-for-per-cpu-migration-tasks.patch b/queue-6.12/sched_ext-fix-incorrect-sched_class-settings-for-per-cpu-migration-tasks.patch
new file mode 100644 (file)
index 0000000..01699a9
--- /dev/null
@@ -0,0 +1,105 @@
+From stable+bounces-204128-greg=kroah.com@vger.kernel.org Mon Dec 29 20:39:26 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 29 Dec 2025 14:39:20 -0500
+Subject: sched_ext: Fix incorrect sched_class settings for per-cpu migration tasks
+To: stable@vger.kernel.org
+Cc: Zqiang <qiang.zhang@linux.dev>, Andrea Righi <arighi@nvidia.com>, Tejun Heo <tj@kernel.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251229193920.1643627-1-sashal@kernel.org>
+
+From: Zqiang <qiang.zhang@linux.dev>
+
+[ Upstream commit 1dd6c84f1c544e552848a8968599220bd464e338 ]
+
+When loading the ebpf scheduler, the tasks in the scx_tasks list will
+be traversed and invoke __setscheduler_class() to get new sched_class.
+however, this would also incorrectly set the per-cpu migration
+task's->sched_class to rt_sched_class, even after unload, the per-cpu
+migration task's->sched_class remains sched_rt_class.
+
+The log for this issue is as follows:
+
+./scx_rustland --stats 1
+[  199.245639][  T630] sched_ext: "rustland" does not implement cgroup cpu.weight
+[  199.269213][  T630] sched_ext: BPF scheduler "rustland" enabled
+04:25:09 [INFO] RustLand scheduler attached
+
+bpftrace -e 'iter:task /strcontains(ctx->task->comm, "migration")/
+{ printf("%s:%d->%pS\n", ctx->task->comm, ctx->task->pid, ctx->task->sched_class); }'
+Attaching 1 probe...
+migration/0:24->rt_sched_class+0x0/0xe0
+migration/1:27->rt_sched_class+0x0/0xe0
+migration/2:33->rt_sched_class+0x0/0xe0
+migration/3:39->rt_sched_class+0x0/0xe0
+migration/4:45->rt_sched_class+0x0/0xe0
+migration/5:52->rt_sched_class+0x0/0xe0
+migration/6:58->rt_sched_class+0x0/0xe0
+migration/7:64->rt_sched_class+0x0/0xe0
+
+sched_ext: BPF scheduler "rustland" disabled (unregistered from user space)
+EXIT: unregistered from user space
+04:25:21 [INFO] Unregister RustLand scheduler
+
+bpftrace -e 'iter:task /strcontains(ctx->task->comm, "migration")/
+{ printf("%s:%d->%pS\n", ctx->task->comm, ctx->task->pid, ctx->task->sched_class); }'
+Attaching 1 probe...
+migration/0:24->rt_sched_class+0x0/0xe0
+migration/1:27->rt_sched_class+0x0/0xe0
+migration/2:33->rt_sched_class+0x0/0xe0
+migration/3:39->rt_sched_class+0x0/0xe0
+migration/4:45->rt_sched_class+0x0/0xe0
+migration/5:52->rt_sched_class+0x0/0xe0
+migration/6:58->rt_sched_class+0x0/0xe0
+migration/7:64->rt_sched_class+0x0/0xe0
+
+This commit therefore generate a new scx_setscheduler_class() and
+add check for stop_sched_class to replace __setscheduler_class().
+
+Fixes: f0e1a0643a59 ("sched_ext: Implement BPF extensible scheduler class")
+Cc: stable@vger.kernel.org # v6.12+
+Signed-off-by: Zqiang <qiang.zhang@linux.dev>
+Reviewed-by: Andrea Righi <arighi@nvidia.com>
+Signed-off-by: Tejun Heo <tj@kernel.org>
+[ Adjust context ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/sched/ext.c |   14 ++++++++++----
+ 1 file changed, 10 insertions(+), 4 deletions(-)
+
+--- a/kernel/sched/ext.c
++++ b/kernel/sched/ext.c
+@@ -1057,6 +1057,14 @@ static struct scx_dispatch_q *find_user_
+       return rhashtable_lookup_fast(&dsq_hash, &dsq_id, dsq_hash_params);
+ }
++static const struct sched_class *scx_setscheduler_class(struct task_struct *p)
++{
++      if (p->sched_class == &stop_sched_class)
++              return &stop_sched_class;
++
++      return __setscheduler_class(p->policy, p->prio);
++}
++
+ /*
+  * scx_kf_mask enforcement. Some kfuncs can only be called from specific SCX
+  * ops. When invoking SCX ops, SCX_CALL_OP[_RET]() should be used to indicate
+@@ -4653,8 +4661,7 @@ static void scx_ops_disable_workfn(struc
+       scx_task_iter_start(&sti);
+       while ((p = scx_task_iter_next_locked(&sti))) {
+               const struct sched_class *old_class = p->sched_class;
+-              const struct sched_class *new_class =
+-                      __setscheduler_class(p->policy, p->prio);
++              const struct sched_class *new_class = scx_setscheduler_class(p);
+               struct sched_enq_and_set_ctx ctx;
+               if (old_class != new_class && p->se.sched_delayed)
+@@ -5368,8 +5375,7 @@ static int scx_ops_enable(struct sched_e
+       scx_task_iter_start(&sti);
+       while ((p = scx_task_iter_next_locked(&sti))) {
+               const struct sched_class *old_class = p->sched_class;
+-              const struct sched_class *new_class =
+-                      __setscheduler_class(p->policy, p->prio);
++              const struct sched_class *new_class = scx_setscheduler_class(p);
+               struct sched_enq_and_set_ctx ctx;
+               if (!tryget_task_struct(p))
diff --git a/queue-6.12/sched_ext-fix-missing-post-enqueue-handling-in-move_local_task_to_local_dsq.patch b/queue-6.12/sched_ext-fix-missing-post-enqueue-handling-in-move_local_task_to_local_dsq.patch
new file mode 100644 (file)
index 0000000..bff4c19
--- /dev/null
@@ -0,0 +1,64 @@
+From stable+bounces-204423-greg=kroah.com@vger.kernel.org Fri Jan  2 03:01:25 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu,  1 Jan 2026 21:01:17 -0500
+Subject: sched_ext: Fix missing post-enqueue handling in move_local_task_to_local_dsq()
+To: stable@vger.kernel.org
+Cc: Tejun Heo <tj@kernel.org>, Andrea Righi <arighi@nvidia.com>, Emil Tsalapatis <emil@etsalapatis.com>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260102020117.100465-2-sashal@kernel.org>
+
+From: Tejun Heo <tj@kernel.org>
+
+[ Upstream commit f5e1e5ec204da11fa87fdf006d451d80ce06e118 ]
+
+move_local_task_to_local_dsq() is used when moving a task from a non-local
+DSQ to a local DSQ on the same CPU. It directly manipulates the local DSQ
+without going through dispatch_enqueue() and was missing the post-enqueue
+handling that triggers preemption when SCX_ENQ_PREEMPT is set or the idle
+task is running.
+
+The function is used by move_task_between_dsqs() which backs
+scx_bpf_dsq_move() and may be called while the CPU is busy.
+
+Add local_dsq_post_enq() call to move_local_task_to_local_dsq(). As the
+dispatch path doesn't need post-enqueue handling, add SCX_RQ_IN_BALANCE
+early exit to keep consume_dispatch_q() behavior unchanged and avoid
+triggering unnecessary resched when scx_bpf_dsq_move() is used from the
+dispatch path.
+
+Fixes: 4c30f5ce4f7a ("sched_ext: Implement scx_bpf_dispatch[_vtime]_from_dsq()")
+Cc: stable@vger.kernel.org # v6.12+
+Reviewed-by: Andrea Righi <arighi@nvidia.com>
+Reviewed-by: Emil Tsalapatis <emil@etsalapatis.com>
+Signed-off-by: Tejun Heo <tj@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/sched/ext.c |   10 ++++++++++
+ 1 file changed, 10 insertions(+)
+
+--- a/kernel/sched/ext.c
++++ b/kernel/sched/ext.c
+@@ -1682,6 +1682,14 @@ static void local_dsq_post_enq(struct sc
+       struct rq *rq = container_of(dsq, struct rq, scx.local_dsq);
+       bool preempt = false;
++      /*
++       * If @rq is in balance, the CPU is already vacant and looking for the
++       * next task to run. No need to preempt or trigger resched after moving
++       * @p into its local DSQ.
++       */
++      if (rq->scx.flags & SCX_RQ_IN_BALANCE)
++              return;
++
+       if ((enq_flags & SCX_ENQ_PREEMPT) && p != rq->curr &&
+           rq->curr->sched_class == &ext_sched_class) {
+               rq->curr->scx.slice = 0;
+@@ -2259,6 +2267,8 @@ static void move_local_task_to_local_dsq
+       dsq_mod_nr(dst_dsq, 1);
+       p->scx.dsq = dst_dsq;
++
++      local_dsq_post_enq(dst_dsq, p, enq_flags);
+ }
+ #ifdef CONFIG_SMP
diff --git a/queue-6.12/serial-core-fix-of-node-leak.patch b/queue-6.12/serial-core-fix-of-node-leak.patch
new file mode 100644 (file)
index 0000000..0bb79ed
--- /dev/null
@@ -0,0 +1,53 @@
+From stable+bounces-204221-greg=kroah.com@vger.kernel.org Tue Dec 30 15:00:42 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 30 Dec 2025 08:59:17 -0500
+Subject: serial: core: fix OF node leak
+To: stable@vger.kernel.org
+Cc: Johan Hovold <johan@kernel.org>, Aidan Stewart <astewart@tektelic.com>, Greg Kroah-Hartman <gregkh@linuxfoundation.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251230135918.2221627-1-sashal@kernel.org>
+
+From: Johan Hovold <johan@kernel.org>
+
+[ Upstream commit 273cc3406c8d4e830ed45967c70d08d20ca1380e ]
+
+Make sure to drop the OF node reference taken when initialising the
+control and port devices when the devices are later released.
+
+Fixes: d36f0e9a0002 ("serial: core: restore of_node information in sysfs")
+Cc: Aidan Stewart <astewart@tektelic.com>
+Signed-off-by: Johan Hovold <johan@kernel.org>
+Link: https://lore.kernel.org/r/20250708085817.16070-1-johan@kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Stable-dep-of: 24ec03cc5512 ("serial: core: Restore sysfs fwnode information")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/tty/serial/serial_base_bus.c |    3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/drivers/tty/serial/serial_base_bus.c
++++ b/drivers/tty/serial/serial_base_bus.c
+@@ -13,6 +13,7 @@
+ #include <linux/device.h>
+ #include <linux/idr.h>
+ #include <linux/module.h>
++#include <linux/of.h>
+ #include <linux/serial_core.h>
+ #include <linux/slab.h>
+ #include <linux/spinlock.h>
+@@ -93,6 +94,7 @@ static void serial_base_ctrl_release(str
+ {
+       struct serial_ctrl_device *ctrl_dev = to_serial_base_ctrl_device(dev);
++      of_node_put(dev->of_node);
+       kfree(ctrl_dev);
+ }
+@@ -140,6 +142,7 @@ static void serial_base_port_release(str
+ {
+       struct serial_port_device *port_dev = to_serial_base_port_device(dev);
++      of_node_put(dev->of_node);
+       kfree(port_dev);
+ }
diff --git a/queue-6.12/serial-core-fix-serial-device-initialization.patch b/queue-6.12/serial-core-fix-serial-device-initialization.patch
new file mode 100644 (file)
index 0000000..ee7028e
--- /dev/null
@@ -0,0 +1,36 @@
+From f54151148b969fb4b62bec8093d255306d20df30 Mon Sep 17 00:00:00 2001
+From: Alexander Stein <alexander.stein@ew.tq-group.com>
+Date: Fri, 19 Dec 2025 16:28:12 +0100
+Subject: serial: core: Fix serial device initialization
+
+From: Alexander Stein <alexander.stein@ew.tq-group.com>
+
+commit f54151148b969fb4b62bec8093d255306d20df30 upstream.
+
+During restoring sysfs fwnode information the information of_node_reused
+was dropped. This was previously set by device_set_of_node_from_dev().
+Add it back manually
+
+Fixes: 24ec03cc5512 ("serial: core: Restore sysfs fwnode information")
+Cc: stable <stable@kernel.org>
+Suggested-by: Cosmin Tanislav <demonsingur@gmail.com>
+Signed-off-by: Alexander Stein <alexander.stein@ew.tq-group.com>
+Tested-by: Michael Walle <mwalle@kernel.org>
+Tested-by: Marek Szyprowski <m.szyprowski@samsung.com>
+Tested-by: Cosmin Tanislav <demonsingur@gmail.com>
+Link: https://patch.msgid.link/20251219152813.1893982-1-alexander.stein@ew.tq-group.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/tty/serial/serial_base_bus.c |    1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/tty/serial/serial_base_bus.c
++++ b/drivers/tty/serial/serial_base_bus.c
+@@ -74,6 +74,7 @@ static int serial_base_device_init(struc
+       dev->parent = parent_dev;
+       dev->bus = &serial_base_bus_type;
+       dev->release = release;
++      dev->of_node_reused = true;
+       device_set_node(dev, fwnode_handle_get(dev_fwnode(parent_dev)));
diff --git a/queue-6.12/serial-core-restore-sysfs-fwnode-information.patch b/queue-6.12/serial-core-restore-sysfs-fwnode-information.patch
new file mode 100644 (file)
index 0000000..ff5b864
--- /dev/null
@@ -0,0 +1,73 @@
+From stable+bounces-204222-greg=kroah.com@vger.kernel.org Tue Dec 30 14:59:31 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 30 Dec 2025 08:59:18 -0500
+Subject: serial: core: Restore sysfs fwnode information
+To: stable@vger.kernel.org
+Cc: Andy Shevchenko <andriy.shevchenko@linux.intel.com>, stable <stable@kernel.org>, Greg Kroah-Hartman <gregkh@linuxfoundation.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251230135918.2221627-2-sashal@kernel.org>
+
+From: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+
+[ Upstream commit 24ec03cc55126b7b3adf102f4b3d9f716532b329 ]
+
+The change that restores sysfs fwnode information does it only for OF cases.
+Update the fix to cover all possible types of fwnodes.
+
+Fixes: d36f0e9a0002 ("serial: core: restore of_node information in sysfs")
+Cc: stable <stable@kernel.org>
+Signed-off-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Link: https://patch.msgid.link/20251127163650.2942075-1-andriy.shevchenko@linux.intel.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/tty/serial/serial_base_bus.c |   10 ++++++----
+ 1 file changed, 6 insertions(+), 4 deletions(-)
+
+--- a/drivers/tty/serial/serial_base_bus.c
++++ b/drivers/tty/serial/serial_base_bus.c
+@@ -13,7 +13,7 @@
+ #include <linux/device.h>
+ #include <linux/idr.h>
+ #include <linux/module.h>
+-#include <linux/of.h>
++#include <linux/property.h>
+ #include <linux/serial_core.h>
+ #include <linux/slab.h>
+ #include <linux/spinlock.h>
+@@ -60,6 +60,7 @@ void serial_base_driver_unregister(struc
+       driver_unregister(driver);
+ }
++/* On failure the caller must put device @dev with put_device() */
+ static int serial_base_device_init(struct uart_port *port,
+                                  struct device *dev,
+                                  struct device *parent_dev,
+@@ -73,7 +74,8 @@ static int serial_base_device_init(struc
+       dev->parent = parent_dev;
+       dev->bus = &serial_base_bus_type;
+       dev->release = release;
+-      device_set_of_node_from_dev(dev, parent_dev);
++
++      device_set_node(dev, fwnode_handle_get(dev_fwnode(parent_dev)));
+       if (!serial_base_initialized) {
+               dev_dbg(port->dev, "uart_add_one_port() called before arch_initcall()?\n");
+@@ -94,7 +96,7 @@ static void serial_base_ctrl_release(str
+ {
+       struct serial_ctrl_device *ctrl_dev = to_serial_base_ctrl_device(dev);
+-      of_node_put(dev->of_node);
++      fwnode_handle_put(dev_fwnode(dev));
+       kfree(ctrl_dev);
+ }
+@@ -142,7 +144,7 @@ static void serial_base_port_release(str
+ {
+       struct serial_port_device *port_dev = to_serial_base_port_device(dev);
+-      of_node_put(dev->of_node);
++      fwnode_handle_put(dev_fwnode(dev));
+       kfree(port_dev);
+ }
diff --git a/queue-6.12/serial-xilinx_uartps-fix-rs485-delay_rts_after_send.patch b/queue-6.12/serial-xilinx_uartps-fix-rs485-delay_rts_after_send.patch
new file mode 100644 (file)
index 0000000..d75f79a
--- /dev/null
@@ -0,0 +1,68 @@
+From stable+bounces-204237-greg=kroah.com@vger.kernel.org Tue Dec 30 16:56:06 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 30 Dec 2025 10:55:56 -0500
+Subject: serial: xilinx_uartps: fix rs485 delay_rts_after_send
+To: stable@vger.kernel.org
+Cc: "j.turek" <jakub.turek@elsta.tech>, stable <stable@kernel.org>, Greg Kroah-Hartman <gregkh@linuxfoundation.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251230155556.2289800-3-sashal@kernel.org>
+
+From: "j.turek" <jakub.turek@elsta.tech>
+
+[ Upstream commit 267ee93c417e685d9f8e079e41c70ba6ee4df5a5 ]
+
+RTS line control with delay should be triggered when there is no more
+bytes in kfifo and hardware buffer is empty. Without this patch RTS
+control is scheduled right after feeding hardware buffer and this is too
+early.
+
+RTS line may change state before hardware buffer is empty.
+
+With this patch delayed RTS state change is triggered when function
+cdns_uart_handle_tx is called from cdns_uart_isr on
+CDNS_UART_IXR_TXEMPTY exactly when hardware completed transmission
+
+Fixes: fccc9d9233f9 ("tty: serial: uartps: Add rs485 support to uartps driver")
+Cc: stable <stable@kernel.org>
+Link: https://patch.msgid.link/20251221103221.1971125-1-jakub.turek@elsta.tech
+Signed-off-by: Jakub Turek  <jakub.turek@elsta.tech>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/tty/serial/xilinx_uartps.c |   14 +++++++-------
+ 1 file changed, 7 insertions(+), 7 deletions(-)
+
+--- a/drivers/tty/serial/xilinx_uartps.c
++++ b/drivers/tty/serial/xilinx_uartps.c
+@@ -431,10 +431,17 @@ static void cdns_uart_handle_tx(void *de
+       struct tty_port *tport = &port->state->port;
+       unsigned int numbytes;
+       unsigned char ch;
++      ktime_t rts_delay;
+       if (kfifo_is_empty(&tport->xmit_fifo) || uart_tx_stopped(port)) {
+               /* Disable the TX Empty interrupt */
+               writel(CDNS_UART_IXR_TXEMPTY, port->membase + CDNS_UART_IDR);
++              /* Set RTS line after delay */
++              if (cdns_uart->port->rs485.flags & SER_RS485_ENABLED) {
++                      cdns_uart->tx_timer.function = &cdns_rs485_rx_callback;
++                      rts_delay = ns_to_ktime(cdns_calc_after_tx_delay(cdns_uart));
++                      hrtimer_start(&cdns_uart->tx_timer, rts_delay, HRTIMER_MODE_REL);
++              }
+               return;
+       }
+@@ -451,13 +458,6 @@ static void cdns_uart_handle_tx(void *de
+       /* Enable the TX Empty interrupt */
+       writel(CDNS_UART_IXR_TXEMPTY, cdns_uart->port->membase + CDNS_UART_IER);
+-
+-      if (cdns_uart->port->rs485.flags & SER_RS485_ENABLED &&
+-          (kfifo_is_empty(&tport->xmit_fifo) || uart_tx_stopped(port))) {
+-              hrtimer_update_function(&cdns_uart->tx_timer, cdns_rs485_rx_callback);
+-              hrtimer_start(&cdns_uart->tx_timer,
+-                            ns_to_ktime(cdns_calc_after_tx_delay(cdns_uart)), HRTIMER_MODE_REL);
+-      }
+ }
+ /**
diff --git a/queue-6.12/serial-xilinx_uartps-use-helper-function-hrtimer_update_function.patch b/queue-6.12/serial-xilinx_uartps-use-helper-function-hrtimer_update_function.patch
new file mode 100644 (file)
index 0000000..5622f2f
--- /dev/null
@@ -0,0 +1,49 @@
+From stable+bounces-204236-greg=kroah.com@vger.kernel.org Tue Dec 30 16:56:04 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 30 Dec 2025 10:55:55 -0500
+Subject: serial: xilinx_uartps: Use helper function hrtimer_update_function()
+To: stable@vger.kernel.org
+Cc: Nam Cao <namcao@linutronix.de>, Thomas Gleixner <tglx@linutronix.de>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251230155556.2289800-2-sashal@kernel.org>
+
+From: Nam Cao <namcao@linutronix.de>
+
+[ Upstream commit eee00df8e1f1f5648ed8f9e40e2bb54c2877344a ]
+
+The field 'function' of struct hrtimer should not be changed directly, as
+the write is lockless and a concurrent timer expiry might end up using the
+wrong function pointer.
+
+Switch to use hrtimer_update_function() which also performs runtime checks
+that it is safe to modify the callback.
+
+Signed-off-by: Nam Cao <namcao@linutronix.de>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Link: https://lore.kernel.org/all/af7823518fb060c6c97105a2513cfc61adbdf38f.1738746927.git.namcao@linutronix.de
+Stable-dep-of: 267ee93c417e ("serial: xilinx_uartps: fix rs485 delay_rts_after_send")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/tty/serial/xilinx_uartps.c |    4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/tty/serial/xilinx_uartps.c
++++ b/drivers/tty/serial/xilinx_uartps.c
+@@ -454,7 +454,7 @@ static void cdns_uart_handle_tx(void *de
+       if (cdns_uart->port->rs485.flags & SER_RS485_ENABLED &&
+           (kfifo_is_empty(&tport->xmit_fifo) || uart_tx_stopped(port))) {
+-              cdns_uart->tx_timer.function = &cdns_rs485_rx_callback;
++              hrtimer_update_function(&cdns_uart->tx_timer, cdns_rs485_rx_callback);
+               hrtimer_start(&cdns_uart->tx_timer,
+                             ns_to_ktime(cdns_calc_after_tx_delay(cdns_uart)), HRTIMER_MODE_REL);
+       }
+@@ -734,7 +734,7 @@ static void cdns_uart_start_tx(struct ua
+       if (cdns_uart->port->rs485.flags & SER_RS485_ENABLED) {
+               if (!cdns_uart->rs485_tx_started) {
+-                      cdns_uart->tx_timer.function = &cdns_rs485_tx_callback;
++                      hrtimer_update_function(&cdns_uart->tx_timer, cdns_rs485_tx_callback);
+                       cdns_rs485_tx_setup(cdns_uart);
+                       return hrtimer_start(&cdns_uart->tx_timer,
+                                            ms_to_ktime(port->rs485.delay_rts_before_send),
index 5c2a53b27337831c5c1223d0a517e8ee870ab6d9..b7ce9717f3135b98013c4f1ae11f11a31c419162 100644 (file)
@@ -483,3 +483,64 @@ drm-msm-dpu-add-missing-null-pointer-check-for-pingpong-interface.patch
 drm-i915-gem-zero-initialize-the-eb.vma-array-in-i915_gem_do_execbuffer.patch
 drm-nouveau-dispnv50-don-t-call-drm_atomic_get_crtc_state-in-prepare_fb.patch
 drm-imagination-disallow-exporting-of-pm-fw-protected-objects.patch
+lib-crypto-riscv-chacha-avoid-s0-fp-register.patch
+gfs2-fix-freeze-error-handling.patch
+btrfs-don-t-rewrite-ret-from-inode_permission.patch
+sched-eevdf-fix-min_vruntime-vs-avg_vruntime.patch
+erofs-fix-unexpected-eio-under-memory-pressure.patch
+sched_ext-fix-incorrect-sched_class-settings-for-per-cpu-migration-tasks.patch
+jbd2-fix-the-inconsistency-between-checksum-and-data-in-memory-for-journal-sb.patch
+tty-introduce-and-use-tty_port_tty_vhangup-helper.patch
+xhci-dbgtty-fix-device-unregister-fixup.patch
+f2fs-fix-to-detect-recoverable-inode-during-dryrun-of-find_fsync_dnodes.patch
+f2fs-use-global-inline_xattr_slab-instead-of-per-sb-slab-cache.patch
+f2fs-drop-inode-from-the-donation-list-when-the-last-file-is-closed.patch
+f2fs-fix-to-avoid-updating-compression-context-during-writeback.patch
+serial-core-fix-of-node-leak.patch
+serial-core-restore-sysfs-fwnode-information.patch
+mptcp-pm-ignore-unknown-endpoint-flags.patch
+mm-ksm-fix-exec-fork-inheritance-support-for-prctl.patch
+svcrdma-bound-check-rq_pages-index-in-inline-path.patch
+arm-dts-microchip-sama7g5-fix-uart-fifo-size-to-32.patch
+block-freeze-queue-when-updating-zone-resources.patch
+tpm2-sessions-fix-tpm2_read_public-range-checks.patch
+sched_ext-factor-out-local_dsq_post_enq-from-dispatch_enqueue.patch
+sched_ext-fix-missing-post-enqueue-handling-in-move_local_task_to_local_dsq.patch
+drm-displayid-add-quirk-to-ignore-displayid-checksum-errors.patch
+hrtimers-introduce-hrtimer_update_function.patch
+serial-xilinx_uartps-use-helper-function-hrtimer_update_function.patch
+serial-xilinx_uartps-fix-rs485-delay_rts_after_send.patch
+f2fs-clear-sbi_por_doing-before-initing-inmem-curseg.patch
+f2fs-add-timeout-in-f2fs_enable_checkpoint.patch
+f2fs-dump-more-information-for-f2fs_-enable-disable-_checkpoint.patch
+f2fs-fix-to-propagate-error-from-f2fs_enable_checkpoint.patch
+gpiolib-acpi-switch-to-use-enum-in-acpi_gpio_in_ignore_list.patch
+gpiolib-acpi-handle-deferred-list-via-new-api.patch
+gpiolib-acpi-add-acpi_gpio_need_run_edge_events_on_boot-getter.patch
+gpiolib-acpi-move-quirks-to-a-separate-file.patch
+gpiolib-acpi-add-a-quirk-for-acer-nitro-v15.patch
+gpiolib-acpi-add-quirk-for-asus-proart-px13.patch
+gpiolib-acpi-add-quirk-for-dell-precision-7780.patch
+netfilter-nft_ct-add-seqadj-extension-for-natted-connections.patch
+md-raid10-wait-barrier-before-returning-discard-request-with-req_nowait.patch
+drm-panthor-flush-shmem-writes-before-mapping-buffers-cpu-uncached.patch
+net-ipv6-ioam6-use-consistent-dst-names.patch
+ipv6-adopt-dst_dev-helper.patch
+net-use-dst_dev_rcu-in-sk_setup_caps.patch
+usbnet-fix-using-smp_processor_id-in-preemptible-code-warnings.patch
+serial-core-fix-serial-device-initialization.patch
+tty-fix-tty_port_tty_-hangup-kernel-doc.patch
+x86-microcode-amd-select-which-microcode-patch-to-load.patch
+media-i2c-imx219-fix-1920x1080-mode-to-use-1-1-pixel-aspect-ratio.patch
+wifi-mt76-mt7925-fix-the-unfinished-command-of-regd_notifier-before-suspend.patch
+wifi-mt76-mt7925-fix-clc-command-timeout-when-suspend-resume.patch
+wifi-mt76-mt7925-add-handler-to-hif-suspend-resume-event.patch
+idpf-add-support-for-sw-triggered-interrupts.patch
+idpf-trigger-sw-interrupt-when-exiting-wb_on_itr-mode.patch
+idpf-add-support-for-tx-refillqs-in-flow-scheduling-mode.patch
+idpf-improve-when-to-set-re-bit-logic.patch
+idpf-simplify-and-fix-splitq-tx-packet-rollback-error-path.patch
+idpf-replace-flow-scheduling-buffer-ring-with-buffer-pool.patch
+idpf-stop-tx-if-there-are-insufficient-buffer-resources.patch
+idpf-remove-obsolete-stashing-code.patch
+hrtimers-make-hrtimer_update_function-less-expensive.patch
diff --git a/queue-6.12/svcrdma-bound-check-rq_pages-index-in-inline-path.patch b/queue-6.12/svcrdma-bound-check-rq_pages-index-in-inline-path.patch
new file mode 100644 (file)
index 0000000..e7daef3
--- /dev/null
@@ -0,0 +1,39 @@
+From stable+bounces-204365-greg=kroah.com@vger.kernel.org Wed Dec 31 16:25:17 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 31 Dec 2025 10:25:11 -0500
+Subject: svcrdma: bound check rq_pages index in inline path
+To: stable@vger.kernel.org
+Cc: Joshua Rogers <linux@joshua.hu>, Chuck Lever <chuck.lever@oracle.com>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20251231152511.3166711-1-sashal@kernel.org>
+
+From: Joshua Rogers <linux@joshua.hu>
+
+[ Upstream commit d1bea0ce35b6095544ee82bb54156fc62c067e58 ]
+
+svc_rdma_copy_inline_range indexed rqstp->rq_pages[rc_curpage] without
+verifying rc_curpage stays within the allocated page array. Add guards
+before the first use and after advancing to a new page.
+
+Fixes: d7cc73972661 ("svcrdma: support multiple Read chunks per RPC")
+Cc: stable@vger.kernel.org
+Signed-off-by: Joshua Rogers <linux@joshua.hu>
+Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
+[ replaced rqstp->rq_maxpages with ARRAY_SIZE(rqstp->rq_pages) ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/sunrpc/xprtrdma/svc_rdma_rw.c |    3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/net/sunrpc/xprtrdma/svc_rdma_rw.c
++++ b/net/sunrpc/xprtrdma/svc_rdma_rw.c
+@@ -841,6 +841,9 @@ static int svc_rdma_copy_inline_range(st
+       for (page_no = 0; page_no < numpages; page_no++) {
+               unsigned int page_len;
++              if (head->rc_curpage >= ARRAY_SIZE(rqstp->rq_pages))
++                      return -EINVAL;
++
+               page_len = min_t(unsigned int, remaining,
+                                PAGE_SIZE - head->rc_pageoff);
diff --git a/queue-6.12/tpm2-sessions-fix-tpm2_read_public-range-checks.patch b/queue-6.12/tpm2-sessions-fix-tpm2_read_public-range-checks.patch
new file mode 100644 (file)
index 0000000..dd2e4e2
--- /dev/null
@@ -0,0 +1,171 @@
+From stable+bounces-204425-greg=kroah.com@vger.kernel.org Fri Jan  2 03:45:47 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu,  1 Jan 2026 21:45:19 -0500
+Subject: tpm2-sessions: Fix tpm2_read_public range checks
+To: stable@vger.kernel.org
+Cc: Jarkko Sakkinen <jarkko@kernel.org>, Jonathan McDowell <noodles@meta.com>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260102024519.115144-1-sashal@kernel.org>
+
+From: Jarkko Sakkinen <jarkko@kernel.org>
+
+[ Upstream commit bda1cbf73c6e241267c286427f2ed52b5735d872 ]
+
+tpm2_read_public() has some rudimentary range checks but the function does
+not ensure that the response buffer has enough bytes for the full TPMT_HA
+payload.
+
+Re-implement the function with necessary checks and validation, and return
+name and name size for all handle types back to the caller.
+
+Cc: stable@vger.kernel.org # v6.10+
+Fixes: d0a25bb961e6 ("tpm: Add HMAC session name/handle append")
+Signed-off-by: Jarkko Sakkinen <jarkko@kernel.org>
+Reviewed-by: Jonathan McDowell <noodles@meta.com>
+[ different semantics around u8 name_size() ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/char/tpm/tpm2-cmd.c      |    3 +
+ drivers/char/tpm/tpm2-sessions.c |   85 ++++++++++++++++++++++++---------------
+ 2 files changed, 56 insertions(+), 32 deletions(-)
+
+--- a/drivers/char/tpm/tpm2-cmd.c
++++ b/drivers/char/tpm/tpm2-cmd.c
+@@ -11,8 +11,11 @@
+  * used by the kernel internally.
+  */
++#include "linux/dev_printk.h"
++#include "linux/tpm.h"
+ #include "tpm.h"
+ #include <crypto/hash_info.h>
++#include <linux/unaligned.h>
+ static bool disable_pcr_integrity;
+ module_param(disable_pcr_integrity, bool, 0444);
+--- a/drivers/char/tpm/tpm2-sessions.c
++++ b/drivers/char/tpm/tpm2-sessions.c
+@@ -156,47 +156,60 @@ static u8 name_size(const u8 *name)
+       return size_map[alg] + 2;
+ }
+-static int tpm2_parse_read_public(char *name, struct tpm_buf *buf)
++static int tpm2_read_public(struct tpm_chip *chip, u32 handle, void *name)
+ {
+-      struct tpm_header *head = (struct tpm_header *)buf->data;
++      u32 mso = tpm2_handle_mso(handle);
+       off_t offset = TPM_HEADER_SIZE;
+-      u32 tot_len = be32_to_cpu(head->length);
+-      u32 val;
+-
+-      /* we're starting after the header so adjust the length */
+-      tot_len -= TPM_HEADER_SIZE;
+-
+-      /* skip public */
+-      val = tpm_buf_read_u16(buf, &offset);
+-      if (val > tot_len)
+-              return -EINVAL;
+-      offset += val;
+-      /* name */
+-      val = tpm_buf_read_u16(buf, &offset);
+-      if (val != name_size(&buf->data[offset]))
+-              return -EINVAL;
+-      memcpy(name, &buf->data[offset], val);
+-      /* forget the rest */
+-      return 0;
+-}
+-
+-static int tpm2_read_public(struct tpm_chip *chip, u32 handle, char *name)
+-{
+-      struct tpm_buf buf;
+       int rc;
++      u8 name_size_alg;
++      struct tpm_buf buf;
++
++      if (mso != TPM2_MSO_PERSISTENT && mso != TPM2_MSO_VOLATILE &&
++          mso != TPM2_MSO_NVRAM) {
++              memcpy(name, &handle, sizeof(u32));
++              return sizeof(u32);
++      }
+       rc = tpm_buf_init(&buf, TPM2_ST_NO_SESSIONS, TPM2_CC_READ_PUBLIC);
+       if (rc)
+               return rc;
+       tpm_buf_append_u32(&buf, handle);
+-      rc = tpm_transmit_cmd(chip, &buf, 0, "read public");
+-      if (rc == TPM2_RC_SUCCESS)
+-              rc = tpm2_parse_read_public(name, &buf);
+-      tpm_buf_destroy(&buf);
++      rc = tpm_transmit_cmd(chip, &buf, 0, "TPM2_ReadPublic");
++      if (rc) {
++              tpm_buf_destroy(&buf);
++              return tpm_ret_to_err(rc);
++      }
+-      return rc;
++      /* Skip TPMT_PUBLIC: */
++      offset += tpm_buf_read_u16(&buf, &offset);
++
++      /*
++       * Ensure space for the length field of TPM2B_NAME and hashAlg field of
++       * TPMT_HA (the extra four bytes).
++       */
++      if (offset + 4 > tpm_buf_length(&buf)) {
++              tpm_buf_destroy(&buf);
++              return -EIO;
++      }
++
++      rc = tpm_buf_read_u16(&buf, &offset);
++      name_size_alg = name_size(&buf.data[offset]);
++
++      if (rc != name_size_alg) {
++              tpm_buf_destroy(&buf);
++              return -EIO;
++      }
++
++      if (offset + rc > tpm_buf_length(&buf)) {
++              tpm_buf_destroy(&buf);
++              return -EIO;
++      }
++
++      memcpy(name, &buf.data[offset], rc);
++      tpm_buf_destroy(&buf);
++      return name_size_alg;
+ }
+ #endif /* CONFIG_TCG_TPM2_HMAC */
+@@ -229,6 +242,7 @@ void tpm_buf_append_name(struct tpm_chip
+       enum tpm2_mso_type mso = tpm2_handle_mso(handle);
+       struct tpm2_auth *auth;
+       int slot;
++      int ret;
+ #endif
+       if (!tpm2_chip_auth(chip)) {
+@@ -251,8 +265,11 @@ void tpm_buf_append_name(struct tpm_chip
+       if (mso == TPM2_MSO_PERSISTENT ||
+           mso == TPM2_MSO_VOLATILE ||
+           mso == TPM2_MSO_NVRAM) {
+-              if (!name)
+-                      tpm2_read_public(chip, handle, auth->name[slot]);
++              if (!name) {
++                      ret = tpm2_read_public(chip, handle, auth->name[slot]);
++                      if (ret < 0)
++                              goto err;
++              }
+       } else {
+               if (name)
+                       dev_err(&chip->dev, "TPM: Handle does not require name but one is specified\n");
+@@ -261,6 +278,10 @@ void tpm_buf_append_name(struct tpm_chip
+       auth->name_h[slot] = handle;
+       if (name)
+               memcpy(auth->name[slot], name, name_size(name));
++      return;
++
++err:
++      tpm2_end_auth_session(chip);
+ #endif
+ }
+ EXPORT_SYMBOL_GPL(tpm_buf_append_name);
diff --git a/queue-6.12/tty-fix-tty_port_tty_-hangup-kernel-doc.patch b/queue-6.12/tty-fix-tty_port_tty_-hangup-kernel-doc.patch
new file mode 100644 (file)
index 0000000..309bc74
--- /dev/null
@@ -0,0 +1,81 @@
+From 6241b49540a65a6d5274fa938fd3eb4cbfe2e076 Mon Sep 17 00:00:00 2001
+From: "Jiri Slaby (SUSE)" <jirislaby@kernel.org>
+Date: Tue, 24 Jun 2025 10:06:41 +0200
+Subject: tty: fix tty_port_tty_*hangup() kernel-doc
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Jiri Slaby (SUSE) <jirislaby@kernel.org>
+
+commit 6241b49540a65a6d5274fa938fd3eb4cbfe2e076 upstream.
+
+The commit below added a new helper, but omitted to move (and add) the
+corressponding kernel-doc. Do it now.
+
+Signed-off-by: "Jiri Slaby (SUSE)" <jirislaby@kernel.org>
+Fixes: 2b5eac0f8c6e ("tty: introduce and use tty_port_tty_vhangup() helper")
+Link: https://lore.kernel.org/all/b23d566c-09dc-7374-cc87-0ad4660e8b2e@linux.intel.com/
+Reported-by: Ilpo Järvinen <ilpo.jarvinen@linux.intel.com>
+Cc: Jonathan Corbet <corbet@lwn.net>
+Cc: linux-doc@vger.kernel.org
+Link: https://lore.kernel.org/r/20250624080641.509959-6-jirislaby@kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ Documentation/driver-api/tty/tty_port.rst |    5 +++--
+ drivers/tty/tty_port.c                    |    5 -----
+ include/linux/tty_port.h                  |    9 +++++++++
+ 3 files changed, 12 insertions(+), 7 deletions(-)
+
+--- a/Documentation/driver-api/tty/tty_port.rst
++++ b/Documentation/driver-api/tty/tty_port.rst
+@@ -42,9 +42,10 @@ TTY Refcounting
+ TTY Helpers
+ -----------
++.. kernel-doc::  include/linux/tty_port.h
++   :identifiers: tty_port_tty_hangup tty_port_tty_vhangup
+ .. kernel-doc::  drivers/tty/tty_port.c
+-   :identifiers: tty_port_tty_hangup tty_port_tty_wakeup
+-
++   :identifiers: tty_port_tty_wakeup
+ Modem Signals
+ -------------
+--- a/drivers/tty/tty_port.c
++++ b/drivers/tty/tty_port.c
+@@ -411,11 +411,6 @@ void tty_port_hangup(struct tty_port *po
+ }
+ EXPORT_SYMBOL(tty_port_hangup);
+-/**
+- * tty_port_tty_hangup - helper to hang up a tty
+- * @port: tty port
+- * @check_clocal: hang only ttys with %CLOCAL unset?
+- */
+ void __tty_port_tty_hangup(struct tty_port *port, bool check_clocal, bool async)
+ {
+       struct tty_struct *tty = tty_port_tty_get(port);
+--- a/include/linux/tty_port.h
++++ b/include/linux/tty_port.h
+@@ -254,11 +254,20 @@ static inline int tty_port_users(struct
+       return port->count + port->blocked_open;
+ }
++/**
++ * tty_port_tty_hangup - helper to hang up a tty asynchronously
++ * @port: tty port
++ * @check_clocal: hang only ttys with %CLOCAL unset?
++ */
+ static inline void tty_port_tty_hangup(struct tty_port *port, bool check_clocal)
+ {
+       __tty_port_tty_hangup(port, check_clocal, true);
+ }
++/**
++ * tty_port_tty_vhangup - helper to hang up a tty synchronously
++ * @port: tty port
++ */
+ static inline void tty_port_tty_vhangup(struct tty_port *port)
+ {
+       __tty_port_tty_hangup(port, false, false);
diff --git a/queue-6.12/tty-introduce-and-use-tty_port_tty_vhangup-helper.patch b/queue-6.12/tty-introduce-and-use-tty_port_tty_vhangup-helper.patch
new file mode 100644 (file)
index 0000000..8d62df6
--- /dev/null
@@ -0,0 +1,233 @@
+From stable+bounces-204273-greg=kroah.com@vger.kernel.org Tue Dec 30 19:32:43 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 30 Dec 2025 13:32:36 -0500
+Subject: tty: introduce and use tty_port_tty_vhangup() helper
+To: stable@vger.kernel.org
+Cc: "Jiri Slaby (SUSE)" <jirislaby@kernel.org>, "Karsten Keil" <isdn@linux-pingi.de>, "David Lin" <dtwlin@gmail.com>, "Johan Hovold" <johan@kernel.org>, "Alex Elder" <elder@kernel.org>, "Oliver Neukum" <oneukum@suse.com>, "Marcel Holtmann" <marcel@holtmann.org>, "Johan Hedberg" <johan.hedberg@gmail.com>, "Luiz Augusto von Dentz" <luiz.dentz@gmail.com>, "Ilpo Järvinen" <ilpo.jarvinen@linux.intel.com>, "Greg Kroah-Hartman" <gregkh@linuxfoundation.org>, "Sasha Levin" <sashal@kernel.org>
+Message-ID: <20251230183237.2393657-1-sashal@kernel.org>
+
+From: "Jiri Slaby (SUSE)" <jirislaby@kernel.org>
+
+[ Upstream commit 2b5eac0f8c6e79bc152c8804f9f88d16717013ab ]
+
+This code (tty_get -> vhangup -> tty_put) is repeated on few places.
+Introduce a helper similar to tty_port_tty_hangup() (asynchronous) to
+handle even vhangup (synchronous).
+
+And use it on those places.
+
+In fact, reuse the tty_port_tty_hangup()'s code and call tty_vhangup()
+depending on a new bool parameter.
+
+Signed-off-by: "Jiri Slaby (SUSE)" <jirislaby@kernel.org>
+Cc: Karsten Keil <isdn@linux-pingi.de>
+Cc: David Lin <dtwlin@gmail.com>
+Cc: Johan Hovold <johan@kernel.org>
+Cc: Alex Elder <elder@kernel.org>
+Cc: Oliver Neukum <oneukum@suse.com>
+Cc: Marcel Holtmann <marcel@holtmann.org>
+Cc: Johan Hedberg <johan.hedberg@gmail.com>
+Cc: Luiz Augusto von Dentz <luiz.dentz@gmail.com>
+Reviewed-by: Ilpo Järvinen <ilpo.jarvinen@linux.intel.com>
+Link: https://lore.kernel.org/r/20250611100319.186924-2-jirislaby@kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Stable-dep-of: 74098cc06e75 ("xhci: dbgtty: fix device unregister: fixup")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/isdn/capi/capi.c         |    8 +-------
+ drivers/staging/greybus/uart.c   |    7 +------
+ drivers/tty/serial/serial_core.c |    7 +------
+ drivers/tty/tty_port.c           |   12 ++++++++----
+ drivers/usb/class/cdc-acm.c      |    7 +------
+ drivers/usb/serial/usb-serial.c  |    7 +------
+ include/linux/tty_port.h         |   12 +++++++++++-
+ net/bluetooth/rfcomm/tty.c       |    7 +------
+ 8 files changed, 25 insertions(+), 42 deletions(-)
+
+--- a/drivers/isdn/capi/capi.c
++++ b/drivers/isdn/capi/capi.c
+@@ -306,15 +306,9 @@ static void capincci_alloc_minor(struct
+ static void capincci_free_minor(struct capincci *np)
+ {
+       struct capiminor *mp = np->minorp;
+-      struct tty_struct *tty;
+       if (mp) {
+-              tty = tty_port_tty_get(&mp->port);
+-              if (tty) {
+-                      tty_vhangup(tty);
+-                      tty_kref_put(tty);
+-              }
+-
++              tty_port_tty_vhangup(&mp->port);
+               capiminor_free(mp);
+       }
+ }
+--- a/drivers/staging/greybus/uart.c
++++ b/drivers/staging/greybus/uart.c
+@@ -914,7 +914,6 @@ static void gb_uart_remove(struct gbphy_
+ {
+       struct gb_tty *gb_tty = gb_gbphy_get_data(gbphy_dev);
+       struct gb_connection *connection = gb_tty->connection;
+-      struct tty_struct *tty;
+       int ret;
+       ret = gbphy_runtime_get_sync(gbphy_dev);
+@@ -927,11 +926,7 @@ static void gb_uart_remove(struct gbphy_
+       wake_up_all(&gb_tty->wioctl);
+       mutex_unlock(&gb_tty->mutex);
+-      tty = tty_port_tty_get(&gb_tty->port);
+-      if (tty) {
+-              tty_vhangup(tty);
+-              tty_kref_put(tty);
+-      }
++      tty_port_tty_vhangup(&gb_tty->port);
+       gb_connection_disable_rx(connection);
+       tty_unregister_device(gb_tty_driver, gb_tty->minor);
+--- a/drivers/tty/serial/serial_core.c
++++ b/drivers/tty/serial/serial_core.c
+@@ -3238,7 +3238,6 @@ static void serial_core_remove_one_port(
+       struct uart_state *state = drv->state + uport->line;
+       struct tty_port *port = &state->port;
+       struct uart_port *uart_port;
+-      struct tty_struct *tty;
+       mutex_lock(&port->mutex);
+       uart_port = uart_port_check(state);
+@@ -3257,11 +3256,7 @@ static void serial_core_remove_one_port(
+        */
+       tty_port_unregister_device(port, drv->tty_driver, uport->line);
+-      tty = tty_port_tty_get(port);
+-      if (tty) {
+-              tty_vhangup(port->tty);
+-              tty_kref_put(tty);
+-      }
++      tty_port_tty_vhangup(port);
+       /*
+        * If the port is used as a console, unregister it
+--- a/drivers/tty/tty_port.c
++++ b/drivers/tty/tty_port.c
+@@ -416,15 +416,19 @@ EXPORT_SYMBOL(tty_port_hangup);
+  * @port: tty port
+  * @check_clocal: hang only ttys with %CLOCAL unset?
+  */
+-void tty_port_tty_hangup(struct tty_port *port, bool check_clocal)
++void __tty_port_tty_hangup(struct tty_port *port, bool check_clocal, bool async)
+ {
+       struct tty_struct *tty = tty_port_tty_get(port);
+-      if (tty && (!check_clocal || !C_CLOCAL(tty)))
+-              tty_hangup(tty);
++      if (tty && (!check_clocal || !C_CLOCAL(tty))) {
++              if (async)
++                      tty_hangup(tty);
++              else
++                      tty_vhangup(tty);
++      }
+       tty_kref_put(tty);
+ }
+-EXPORT_SYMBOL_GPL(tty_port_tty_hangup);
++EXPORT_SYMBOL_GPL(__tty_port_tty_hangup);
+ /**
+  * tty_port_tty_wakeup - helper to wake up a tty
+--- a/drivers/usb/class/cdc-acm.c
++++ b/drivers/usb/class/cdc-acm.c
+@@ -1572,7 +1572,6 @@ err_put_port:
+ static void acm_disconnect(struct usb_interface *intf)
+ {
+       struct acm *acm = usb_get_intfdata(intf);
+-      struct tty_struct *tty;
+       int i;
+       /* sibling interface is already cleaning up */
+@@ -1599,11 +1598,7 @@ static void acm_disconnect(struct usb_in
+       usb_set_intfdata(acm->data, NULL);
+       mutex_unlock(&acm->mutex);
+-      tty = tty_port_tty_get(&acm->port);
+-      if (tty) {
+-              tty_vhangup(tty);
+-              tty_kref_put(tty);
+-      }
++      tty_port_tty_vhangup(&acm->port);
+       cancel_delayed_work_sync(&acm->dwork);
+--- a/drivers/usb/serial/usb-serial.c
++++ b/drivers/usb/serial/usb-serial.c
+@@ -1178,7 +1178,6 @@ static void usb_serial_disconnect(struct
+       struct usb_serial *serial = usb_get_intfdata(interface);
+       struct device *dev = &interface->dev;
+       struct usb_serial_port *port;
+-      struct tty_struct *tty;
+       /* sibling interface is cleaning up */
+       if (!serial)
+@@ -1193,11 +1192,7 @@ static void usb_serial_disconnect(struct
+       for (i = 0; i < serial->num_ports; ++i) {
+               port = serial->port[i];
+-              tty = tty_port_tty_get(&port->port);
+-              if (tty) {
+-                      tty_vhangup(tty);
+-                      tty_kref_put(tty);
+-              }
++              tty_port_tty_vhangup(&port->port);
+               usb_serial_port_poison_urbs(port);
+               wake_up_interruptible(&port->port.delta_msr_wait);
+               cancel_work_sync(&port->work);
+--- a/include/linux/tty_port.h
++++ b/include/linux/tty_port.h
+@@ -235,7 +235,7 @@ bool tty_port_carrier_raised(struct tty_
+ void tty_port_raise_dtr_rts(struct tty_port *port);
+ void tty_port_lower_dtr_rts(struct tty_port *port);
+ void tty_port_hangup(struct tty_port *port);
+-void tty_port_tty_hangup(struct tty_port *port, bool check_clocal);
++void __tty_port_tty_hangup(struct tty_port *port, bool check_clocal, bool async);
+ void tty_port_tty_wakeup(struct tty_port *port);
+ int tty_port_block_til_ready(struct tty_port *port, struct tty_struct *tty,
+               struct file *filp);
+@@ -254,4 +254,14 @@ static inline int tty_port_users(struct
+       return port->count + port->blocked_open;
+ }
++static inline void tty_port_tty_hangup(struct tty_port *port, bool check_clocal)
++{
++      __tty_port_tty_hangup(port, check_clocal, true);
++}
++
++static inline void tty_port_tty_vhangup(struct tty_port *port)
++{
++      __tty_port_tty_hangup(port, false, false);
++}
++
+ #endif
+--- a/net/bluetooth/rfcomm/tty.c
++++ b/net/bluetooth/rfcomm/tty.c
+@@ -438,7 +438,6 @@ static int __rfcomm_release_dev(void __u
+ {
+       struct rfcomm_dev_req req;
+       struct rfcomm_dev *dev;
+-      struct tty_struct *tty;
+       if (copy_from_user(&req, arg, sizeof(req)))
+               return -EFAULT;
+@@ -464,11 +463,7 @@ static int __rfcomm_release_dev(void __u
+               rfcomm_dlc_close(dev->dlc, 0);
+       /* Shut down TTY synchronously before freeing rfcomm_dev */
+-      tty = tty_port_tty_get(&dev->port);
+-      if (tty) {
+-              tty_vhangup(tty);
+-              tty_kref_put(tty);
+-      }
++      tty_port_tty_vhangup(&dev->port);
+       if (!test_bit(RFCOMM_TTY_OWNED, &dev->status))
+               tty_port_put(&dev->port);
diff --git a/queue-6.12/usbnet-fix-using-smp_processor_id-in-preemptible-code-warnings.patch b/queue-6.12/usbnet-fix-using-smp_processor_id-in-preemptible-code-warnings.patch
new file mode 100644 (file)
index 0000000..d4eb411
--- /dev/null
@@ -0,0 +1,78 @@
+From stable+bounces-204514-greg=kroah.com@vger.kernel.org Fri Jan  2 21:38:09 2026
+From: Harshit Mogalapalli <harshit.m.mogalapalli@oracle.com>
+Date: Fri,  2 Jan 2026 12:37:27 -0800
+Subject: usbnet: Fix using smp_processor_id() in preemptible code warnings
+To: stable@vger.kernel.org
+Cc: Zqiang <qiang.zhang@linux.dev>, Jakub Kicinski <kuba@kernel.org>, Paolo Abeni <pabeni@redhat.com>, Harshit Mogalapalli <harshit.m.mogalapalli@oracle.com>
+Message-ID: <20260102203727.1455662-8-harshit.m.mogalapalli@oracle.com>
+
+From: Zqiang <qiang.zhang@linux.dev>
+
+[ Upstream commit 327cd4b68b4398b6c24f10eb2b2533ffbfc10185 ]
+
+Syzbot reported the following warning:
+
+BUG: using smp_processor_id() in preemptible [00000000] code: dhcpcd/2879
+caller is usbnet_skb_return+0x74/0x490 drivers/net/usb/usbnet.c:331
+CPU: 1 UID: 0 PID: 2879 Comm: dhcpcd Not tainted 6.15.0-rc4-syzkaller-00098-g615dca38c2ea #0 PREEMPT(voluntary)
+Call Trace:
+ <TASK>
+ __dump_stack lib/dump_stack.c:94 [inline]
+ dump_stack_lvl+0x16c/0x1f0 lib/dump_stack.c:120
+ check_preemption_disabled+0xd0/0xe0 lib/smp_processor_id.c:49
+ usbnet_skb_return+0x74/0x490 drivers/net/usb/usbnet.c:331
+ usbnet_resume_rx+0x4b/0x170 drivers/net/usb/usbnet.c:708
+ usbnet_change_mtu+0x1be/0x220 drivers/net/usb/usbnet.c:417
+ __dev_set_mtu net/core/dev.c:9443 [inline]
+ netif_set_mtu_ext+0x369/0x5c0 net/core/dev.c:9496
+ netif_set_mtu+0xb0/0x160 net/core/dev.c:9520
+ dev_set_mtu+0xae/0x170 net/core/dev_api.c:247
+ dev_ifsioc+0xa31/0x18d0 net/core/dev_ioctl.c:572
+ dev_ioctl+0x223/0x10e0 net/core/dev_ioctl.c:821
+ sock_do_ioctl+0x19d/0x280 net/socket.c:1204
+ sock_ioctl+0x42f/0x6a0 net/socket.c:1311
+ vfs_ioctl fs/ioctl.c:51 [inline]
+ __do_sys_ioctl fs/ioctl.c:906 [inline]
+ __se_sys_ioctl fs/ioctl.c:892 [inline]
+ __x64_sys_ioctl+0x190/0x200 fs/ioctl.c:892
+ do_syscall_x64 arch/x86/entry/syscall_64.c:63 [inline]
+ do_syscall_64+0xcd/0x260 arch/x86/entry/syscall_64.c:94
+ entry_SYSCALL_64_after_hwframe+0x77/0x7f
+
+For historical and portability reasons, the netif_rx() is usually
+run in the softirq or interrupt context, this commit therefore add
+local_bh_disable/enable() protection in the usbnet_resume_rx().
+
+Fixes: 43daa96b166c ("usbnet: Stop RX Q on MTU change")
+Link: https://syzkaller.appspot.com/bug?id=81f55dfa587ee544baaaa5a359a060512228c1e1
+Suggested-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Zqiang <qiang.zhang@linux.dev>
+Link: https://patch.msgid.link/20251011070518.7095-1-qiang.zhang@linux.dev
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+[Harshit: Resolved conflicts due to missing commit: 2c04d279e857 ("net:
+usb: Convert tasklet API to new bottom half workqueue mechanism") in
+6.12.y]
+Signed-off-by: Harshit Mogalapalli <harshit.m.mogalapalli@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/usb/usbnet.c |    2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/drivers/net/usb/usbnet.c
++++ b/drivers/net/usb/usbnet.c
+@@ -702,6 +702,7 @@ void usbnet_resume_rx(struct usbnet *dev
+       struct sk_buff *skb;
+       int num = 0;
++      local_bh_disable();
+       clear_bit(EVENT_RX_PAUSED, &dev->flags);
+       while ((skb = skb_dequeue(&dev->rxq_pause)) != NULL) {
+@@ -710,6 +711,7 @@ void usbnet_resume_rx(struct usbnet *dev
+       }
+       tasklet_schedule(&dev->bh);
++      local_bh_enable();
+       netif_dbg(dev, rx_status, dev->net,
+                 "paused rx queue disabled, %d skbs requeued\n", num);
diff --git a/queue-6.12/wifi-mt76-mt7925-add-handler-to-hif-suspend-resume-event.patch b/queue-6.12/wifi-mt76-mt7925-add-handler-to-hif-suspend-resume-event.patch
new file mode 100644 (file)
index 0000000..823e459
--- /dev/null
@@ -0,0 +1,480 @@
+From fm-294854-202601051116303c39c6d4f200020724-VdAqR_@rts-flowmailer.siemens.com Mon Jan  5 12:16:41 2026
+From: Jan Kiszka <jan.kiszka@siemens.com>
+Date: Mon,  5 Jan 2026 12:16:28 +0100
+Subject: wifi: mt76: mt7925: add handler to hif suspend/resume event
+To: stable@vger.kernel.org, Greg Kroah-Hartman <gregkh@linuxfoundation.org>, Felix Fietkau <nbd@nbd.name>, Quan Zhou <quan.zhou@mediatek.com>
+Cc: Sasha Levin <sashal@kernel.org>, linux-kernel@vger.kernel.org, linux-wireless@vger.kernel.org
+Message-ID: <2976de5866957f422963bda026179b8a2958139d.1767611788.git.jan.kiszka@siemens.com>
+
+From: Quan Zhou <quan.zhou@mediatek.com>
+
+[ Upstream commit 8f6571ad470feb242dcef36e53f7cf1bba03780f ]
+
+When the system suspend or resume, the WiFi driver sends
+an hif_ctrl command to the firmware and waits for an event.
+Due to changes in the event format reported by the chip, the
+current mt7925's driver does not account for these changes,
+resulting in command timeout. Add flow to handle hif_ctrl
+event to avoid command timeout. We also exented API
+mt76_connac_mcu_set_hif_suspend for connac3 this time.
+
+Signed-off-by: Quan Zhou <quan.zhou@mediatek.com>
+Link: https://patch.msgid.link/3a0844ff5162142c4a9f3cf7104f75076ddd3b87.1735910562.git.quan.zhou@mediatek.com
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+Signed-off-by: Jan Kiszka <jan.kiszka@siemens.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/wireless/mediatek/mt76/mt7615/main.c     |    4 -
+ drivers/net/wireless/mediatek/mt76/mt7615/pci.c      |    6 +-
+ drivers/net/wireless/mediatek/mt76/mt7615/sdio.c     |    4 -
+ drivers/net/wireless/mediatek/mt76/mt7615/usb.c      |    4 -
+ drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c |    4 -
+ drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h |    3 -
+ drivers/net/wireless/mediatek/mt76/mt7921/pci.c      |    6 +-
+ drivers/net/wireless/mediatek/mt76/mt7921/sdio.c     |    6 +-
+ drivers/net/wireless/mediatek/mt76/mt7921/usb.c      |    4 -
+ drivers/net/wireless/mediatek/mt76/mt7925/mcu.c      |   49 ++++++++++++++++++-
+ drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h   |   20 +++++++
+ drivers/net/wireless/mediatek/mt76/mt7925/pci.c      |   29 ++++++++---
+ drivers/net/wireless/mediatek/mt76/mt7925/usb.c      |   20 +++++--
+ drivers/net/wireless/mediatek/mt76/mt792x.h          |    2 
+ 14 files changed, 127 insertions(+), 34 deletions(-)
+
+--- a/drivers/net/wireless/mediatek/mt76/mt7615/main.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7615/main.c
+@@ -1249,7 +1249,7 @@ static int mt7615_suspend(struct ieee802
+                                           phy->mt76);
+       if (!mt7615_dev_running(dev))
+-              err = mt76_connac_mcu_set_hif_suspend(&dev->mt76, true);
++              err = mt76_connac_mcu_set_hif_suspend(&dev->mt76, true, true);
+       mt7615_mutex_release(dev);
+@@ -1271,7 +1271,7 @@ static int mt7615_resume(struct ieee8021
+       if (!running) {
+               int err;
+-              err = mt76_connac_mcu_set_hif_suspend(&dev->mt76, false);
++              err = mt76_connac_mcu_set_hif_suspend(&dev->mt76, false, true);
+               if (err < 0) {
+                       mt7615_mutex_release(dev);
+                       return err;
+--- a/drivers/net/wireless/mediatek/mt76/mt7615/pci.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7615/pci.c
+@@ -83,7 +83,7 @@ static int mt7615_pci_suspend(struct pci
+       hif_suspend = !test_bit(MT76_STATE_SUSPEND, &dev->mphy.state) &&
+                     mt7615_firmware_offload(dev);
+       if (hif_suspend) {
+-              err = mt76_connac_mcu_set_hif_suspend(mdev, true);
++              err = mt76_connac_mcu_set_hif_suspend(mdev, true, true);
+               if (err)
+                       return err;
+       }
+@@ -131,7 +131,7 @@ restore:
+       }
+       napi_enable(&mdev->tx_napi);
+       if (hif_suspend)
+-              mt76_connac_mcu_set_hif_suspend(mdev, false);
++              mt76_connac_mcu_set_hif_suspend(mdev, false, true);
+       return err;
+ }
+@@ -175,7 +175,7 @@ static int mt7615_pci_resume(struct pci_
+       if (!test_bit(MT76_STATE_SUSPEND, &dev->mphy.state) &&
+           mt7615_firmware_offload(dev))
+-              err = mt76_connac_mcu_set_hif_suspend(mdev, false);
++              err = mt76_connac_mcu_set_hif_suspend(mdev, false, true);
+       return err;
+ }
+--- a/drivers/net/wireless/mediatek/mt76/mt7615/sdio.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7615/sdio.c
+@@ -191,7 +191,7 @@ static int mt7663s_suspend(struct device
+           mt7615_firmware_offload(mdev)) {
+               int err;
+-              err = mt76_connac_mcu_set_hif_suspend(&mdev->mt76, true);
++              err = mt76_connac_mcu_set_hif_suspend(&mdev->mt76, true, true);
+               if (err < 0)
+                       return err;
+       }
+@@ -230,7 +230,7 @@ static int mt7663s_resume(struct device
+       if (!test_bit(MT76_STATE_SUSPEND, &mdev->mphy.state) &&
+           mt7615_firmware_offload(mdev))
+-              err = mt76_connac_mcu_set_hif_suspend(&mdev->mt76, false);
++              err = mt76_connac_mcu_set_hif_suspend(&mdev->mt76, false, true);
+       return err;
+ }
+--- a/drivers/net/wireless/mediatek/mt76/mt7615/usb.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7615/usb.c
+@@ -225,7 +225,7 @@ static int mt7663u_suspend(struct usb_in
+           mt7615_firmware_offload(dev)) {
+               int err;
+-              err = mt76_connac_mcu_set_hif_suspend(&dev->mt76, true);
++              err = mt76_connac_mcu_set_hif_suspend(&dev->mt76, true, true);
+               if (err < 0)
+                       return err;
+       }
+@@ -253,7 +253,7 @@ static int mt7663u_resume(struct usb_int
+       if (!test_bit(MT76_STATE_SUSPEND, &dev->mphy.state) &&
+           mt7615_firmware_offload(dev))
+-              err = mt76_connac_mcu_set_hif_suspend(&dev->mt76, false);
++              err = mt76_connac_mcu_set_hif_suspend(&dev->mt76, false, true);
+       return err;
+ }
+--- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
++++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
+@@ -2527,7 +2527,7 @@ mt76_connac_mcu_set_wow_ctrl(struct mt76
+ }
+ EXPORT_SYMBOL_GPL(mt76_connac_mcu_set_wow_ctrl);
+-int mt76_connac_mcu_set_hif_suspend(struct mt76_dev *dev, bool suspend)
++int mt76_connac_mcu_set_hif_suspend(struct mt76_dev *dev, bool suspend, bool wait_resp)
+ {
+       struct {
+               struct {
+@@ -2559,7 +2559,7 @@ int mt76_connac_mcu_set_hif_suspend(stru
+               req.hdr.hif_type = 0;
+       return mt76_mcu_send_msg(dev, MCU_UNI_CMD(HIF_CTRL), &req,
+-                               sizeof(req), true);
++                               sizeof(req), wait_resp);
+ }
+ EXPORT_SYMBOL_GPL(mt76_connac_mcu_set_hif_suspend);
+--- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h
++++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h
+@@ -1049,6 +1049,7 @@ enum {
+ /* unified event table */
+ enum {
+       MCU_UNI_EVENT_RESULT = 0x01,
++      MCU_UNI_EVENT_HIF_CTRL = 0x03,
+       MCU_UNI_EVENT_FW_LOG_2_HOST = 0x04,
+       MCU_UNI_EVENT_ACCESS_REG = 0x6,
+       MCU_UNI_EVENT_IE_COUNTDOWN = 0x09,
+@@ -1989,7 +1990,7 @@ int mt76_connac_mcu_set_suspend_mode(str
+                                    struct ieee80211_vif *vif,
+                                    bool enable, u8 mdtim,
+                                    bool wow_suspend);
+-int mt76_connac_mcu_set_hif_suspend(struct mt76_dev *dev, bool suspend);
++int mt76_connac_mcu_set_hif_suspend(struct mt76_dev *dev, bool suspend, bool wait_resp);
+ void mt76_connac_mcu_set_suspend_iter(void *priv, u8 *mac,
+                                     struct ieee80211_vif *vif);
+ int mt76_connac_sta_state_dp(struct mt76_dev *dev,
+--- a/drivers/net/wireless/mediatek/mt76/mt7921/pci.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7921/pci.c
+@@ -435,7 +435,7 @@ static int mt7921_pci_suspend(struct dev
+       if (err < 0)
+               goto restore_suspend;
+-      err = mt76_connac_mcu_set_hif_suspend(mdev, true);
++      err = mt76_connac_mcu_set_hif_suspend(mdev, true, true);
+       if (err)
+               goto restore_suspend;
+@@ -481,7 +481,7 @@ restore_napi:
+       if (!pm->ds_enable)
+               mt76_connac_mcu_set_deep_sleep(&dev->mt76, false);
+-      mt76_connac_mcu_set_hif_suspend(mdev, false);
++      mt76_connac_mcu_set_hif_suspend(mdev, false, true);
+ restore_suspend:
+       pm->suspended = false;
+@@ -532,7 +532,7 @@ static int mt7921_pci_resume(struct devi
+       if (!pm->ds_enable)
+               mt76_connac_mcu_set_deep_sleep(&dev->mt76, false);
+-      err = mt76_connac_mcu_set_hif_suspend(mdev, false);
++      err = mt76_connac_mcu_set_hif_suspend(mdev, false, true);
+       if (err < 0)
+               goto failed;
+--- a/drivers/net/wireless/mediatek/mt76/mt7921/sdio.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7921/sdio.c
+@@ -240,7 +240,7 @@ static int mt7921s_suspend(struct device
+                          mt76s_txqs_empty(&dev->mt76), 5 * HZ);
+       /* It is supposed that SDIO bus is idle at the point */
+-      err = mt76_connac_mcu_set_hif_suspend(mdev, true);
++      err = mt76_connac_mcu_set_hif_suspend(mdev, true, true);
+       if (err)
+               goto restore_worker;
+@@ -258,7 +258,7 @@ static int mt7921s_suspend(struct device
+ restore_txrx_worker:
+       mt76_worker_enable(&mdev->sdio.net_worker);
+       mt76_worker_enable(&mdev->sdio.txrx_worker);
+-      mt76_connac_mcu_set_hif_suspend(mdev, false);
++      mt76_connac_mcu_set_hif_suspend(mdev, false, true);
+ restore_worker:
+       mt76_worker_enable(&mdev->tx_worker);
+@@ -302,7 +302,7 @@ static int mt7921s_resume(struct device
+       if (!pm->ds_enable)
+               mt76_connac_mcu_set_deep_sleep(mdev, false);
+-      err = mt76_connac_mcu_set_hif_suspend(mdev, false);
++      err = mt76_connac_mcu_set_hif_suspend(mdev, false, true);
+ failed:
+       pm->suspended = false;
+--- a/drivers/net/wireless/mediatek/mt76/mt7921/usb.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7921/usb.c
+@@ -263,7 +263,7 @@ static int mt7921u_suspend(struct usb_in
+       pm->suspended = true;
+       flush_work(&dev->reset_work);
+-      err = mt76_connac_mcu_set_hif_suspend(&dev->mt76, true);
++      err = mt76_connac_mcu_set_hif_suspend(&dev->mt76, true, true);
+       if (err)
+               goto failed;
+@@ -313,7 +313,7 @@ static int mt7921u_resume(struct usb_int
+       if (err < 0)
+               goto failed;
+-      err = mt76_connac_mcu_set_hif_suspend(&dev->mt76, false);
++      err = mt76_connac_mcu_set_hif_suspend(&dev->mt76, false, true);
+ failed:
+       pm->suspended = false;
+--- a/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c
+@@ -39,7 +39,6 @@ int mt7925_mcu_parse_response(struct mt7
+       } else if (cmd == MCU_UNI_CMD(DEV_INFO_UPDATE) ||
+                  cmd == MCU_UNI_CMD(BSS_INFO_UPDATE) ||
+                  cmd == MCU_UNI_CMD(STA_REC_UPDATE) ||
+-                 cmd == MCU_UNI_CMD(HIF_CTRL) ||
+                  cmd == MCU_UNI_CMD(OFFLOAD) ||
+                  cmd == MCU_UNI_CMD(SUSPEND)) {
+               struct mt7925_mcu_uni_event *event;
+@@ -342,6 +341,51 @@ static void mt7925_mcu_roc_handle_grant(
+ }
+ static void
++mt7925_mcu_handle_hif_ctrl_basic(struct mt792x_dev *dev, struct tlv *tlv)
++{
++      struct mt7925_mcu_hif_ctrl_basic_tlv *basic;
++
++      basic = (struct mt7925_mcu_hif_ctrl_basic_tlv *)tlv;
++
++      if (basic->hifsuspend) {
++              if (basic->hif_tx_traffic_status == HIF_TRAFFIC_IDLE &&
++                  basic->hif_rx_traffic_status == HIF_TRAFFIC_IDLE)
++                      /* success */
++                      dev->hif_idle = true;
++              else
++                      /* busy */
++                      /* invalid */
++                      dev->hif_idle = false;
++      } else {
++              dev->hif_resumed = true;
++      }
++      wake_up(&dev->wait);
++}
++
++static void
++mt7925_mcu_uni_hif_ctrl_event(struct mt792x_dev *dev, struct sk_buff *skb)
++{
++      struct tlv *tlv;
++      u32 tlv_len;
++
++      skb_pull(skb, sizeof(struct mt7925_mcu_rxd) + 4);
++      tlv = (struct tlv *)skb->data;
++      tlv_len = skb->len;
++
++      while (tlv_len > 0 && le16_to_cpu(tlv->len) <= tlv_len) {
++              switch (le16_to_cpu(tlv->tag)) {
++              case UNI_EVENT_HIF_CTRL_BASIC:
++                      mt7925_mcu_handle_hif_ctrl_basic(dev, tlv);
++                      break;
++              default:
++                      break;
++              }
++              tlv_len -= le16_to_cpu(tlv->len);
++              tlv = (struct tlv *)((char *)(tlv) + le16_to_cpu(tlv->len));
++      }
++}
++
++static void
+ mt7925_mcu_uni_roc_event(struct mt792x_dev *dev, struct sk_buff *skb)
+ {
+       struct tlv *tlv;
+@@ -487,6 +531,9 @@ mt7925_mcu_uni_rx_unsolicited_event(stru
+       rxd = (struct mt7925_mcu_rxd *)skb->data;
+       switch (rxd->eid) {
++      case MCU_UNI_EVENT_HIF_CTRL:
++              mt7925_mcu_uni_hif_ctrl_event(dev, skb);
++              break;
+       case MCU_UNI_EVENT_FW_LOG_2_HOST:
+               mt7925_mcu_uni_debug_msg_event(dev, skb);
+               break;
+--- a/drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h
++++ b/drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h
+@@ -27,6 +27,26 @@
+ #define MCU_UNI_EVENT_ROC  0x27
++#define HIF_TRAFFIC_IDLE 0x2
++
++enum {
++      UNI_EVENT_HIF_CTRL_BASIC = 0,
++      UNI_EVENT_HIF_CTRL_TAG_NUM
++};
++
++struct mt7925_mcu_hif_ctrl_basic_tlv {
++      __le16 tag;
++      __le16 len;
++      u8 cid;
++      u8 pad[3];
++      u32 status;
++      u8 hif_type;
++      u8 hif_tx_traffic_status;
++      u8 hif_rx_traffic_status;
++      u8 hifsuspend;
++      u8 rsv[4];
++} __packed;
++
+ enum {
+       UNI_ROC_ACQUIRE,
+       UNI_ROC_ABORT,
+--- a/drivers/net/wireless/mediatek/mt76/mt7925/pci.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7925/pci.c
+@@ -442,9 +442,10 @@ static int mt7925_pci_suspend(struct dev
+       struct mt76_dev *mdev = pci_get_drvdata(pdev);
+       struct mt792x_dev *dev = container_of(mdev, struct mt792x_dev, mt76);
+       struct mt76_connac_pm *pm = &dev->pm;
+-      int i, err;
++      int i, err, ret;
+       pm->suspended = true;
++      dev->hif_resumed = false;
+       flush_work(&dev->reset_work);
+       cancel_delayed_work_sync(&pm->ps_work);
+       cancel_work_sync(&pm->wake_work);
+@@ -463,9 +464,13 @@ static int mt7925_pci_suspend(struct dev
+        */
+       mt7925_mcu_set_deep_sleep(dev, true);
+-      err = mt76_connac_mcu_set_hif_suspend(mdev, true);
+-      if (err)
++      mt76_connac_mcu_set_hif_suspend(mdev, true, false);
++      ret = wait_event_timeout(dev->wait,
++                               dev->hif_idle, 3 * HZ);
++      if (!ret) {
++              err = -ETIMEDOUT;
+               goto restore_suspend;
++      }
+       napi_disable(&mdev->tx_napi);
+       mt76_worker_disable(&mdev->tx_worker);
+@@ -506,8 +511,11 @@ restore_napi:
+       if (!pm->ds_enable)
+               mt7925_mcu_set_deep_sleep(dev, false);
+-      mt76_connac_mcu_set_hif_suspend(mdev, false);
+-
++      mt76_connac_mcu_set_hif_suspend(mdev, false, false);
++      ret = wait_event_timeout(dev->wait,
++                               dev->hif_resumed, 3 * HZ);
++      if (!ret)
++              err = -ETIMEDOUT;
+ restore_suspend:
+       pm->suspended = false;
+@@ -523,8 +531,9 @@ static int mt7925_pci_resume(struct devi
+       struct mt76_dev *mdev = pci_get_drvdata(pdev);
+       struct mt792x_dev *dev = container_of(mdev, struct mt792x_dev, mt76);
+       struct mt76_connac_pm *pm = &dev->pm;
+-      int i, err;
++      int i, err, ret;
++      dev->hif_idle = false;
+       err = mt792x_mcu_drv_pmctrl(dev);
+       if (err < 0)
+               goto failed;
+@@ -553,9 +562,13 @@ static int mt7925_pci_resume(struct devi
+       napi_schedule(&mdev->tx_napi);
+       local_bh_enable();
+-      err = mt76_connac_mcu_set_hif_suspend(mdev, false);
+-      if (err < 0)
++      mt76_connac_mcu_set_hif_suspend(mdev, false, false);
++      ret = wait_event_timeout(dev->wait,
++                               dev->hif_resumed, 3 * HZ);
++      if (!ret) {
++              err = -ETIMEDOUT;
+               goto failed;
++      }
+       /* restore previous ds setting */
+       if (!pm->ds_enable)
+--- a/drivers/net/wireless/mediatek/mt76/mt7925/usb.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7925/usb.c
+@@ -246,14 +246,19 @@ static int mt7925u_suspend(struct usb_in
+ {
+       struct mt792x_dev *dev = usb_get_intfdata(intf);
+       struct mt76_connac_pm *pm = &dev->pm;
+-      int err;
++      int err, ret;
+       pm->suspended = true;
++      dev->hif_resumed = false;
+       flush_work(&dev->reset_work);
+-      err = mt76_connac_mcu_set_hif_suspend(&dev->mt76, true);
+-      if (err)
++      mt76_connac_mcu_set_hif_suspend(&dev->mt76, true, false);
++      ret = wait_event_timeout(dev->wait,
++                               dev->hif_idle, 3 * HZ);
++      if (!ret) {
++              err = -ETIMEDOUT;
+               goto failed;
++      }
+       mt76u_stop_rx(&dev->mt76);
+       mt76u_stop_tx(&dev->mt76);
+@@ -274,8 +279,9 @@ static int mt7925u_resume(struct usb_int
+       struct mt792x_dev *dev = usb_get_intfdata(intf);
+       struct mt76_connac_pm *pm = &dev->pm;
+       bool reinit = true;
+-      int err, i;
++      int err, i, ret;
++      dev->hif_idle = false;
+       for (i = 0; i < 10; i++) {
+               u32 val = mt76_rr(dev, MT_WF_SW_DEF_CR_USB_MCU_EVENT);
+@@ -301,7 +307,11 @@ static int mt7925u_resume(struct usb_int
+       if (err < 0)
+               goto failed;
+-      err = mt76_connac_mcu_set_hif_suspend(&dev->mt76, false);
++      mt76_connac_mcu_set_hif_suspend(&dev->mt76, false, false);
++      ret = wait_event_timeout(dev->wait,
++                               dev->hif_resumed, 3 * HZ);
++      if (!ret)
++              err = -ETIMEDOUT;
+ failed:
+       pm->suspended = false;
+--- a/drivers/net/wireless/mediatek/mt76/mt792x.h
++++ b/drivers/net/wireless/mediatek/mt76/mt792x.h
+@@ -216,6 +216,8 @@ struct mt792x_dev {
+       bool has_eht:1;
+       bool regd_in_progress:1;
+       bool aspm_supported:1;
++      bool hif_idle:1;
++      bool hif_resumed:1;
+       wait_queue_head_t wait;
+       struct work_struct init_work;
diff --git a/queue-6.12/wifi-mt76-mt7925-fix-clc-command-timeout-when-suspend-resume.patch b/queue-6.12/wifi-mt76-mt7925-fix-clc-command-timeout-when-suspend-resume.patch
new file mode 100644 (file)
index 0000000..34c3dfd
--- /dev/null
@@ -0,0 +1,103 @@
+From fm-294854-2026010511163001e6bc2d2e00020755-pP_Jh5@rts-flowmailer.siemens.com Mon Jan  5 12:16:31 2026
+From: Jan Kiszka <jan.kiszka@siemens.com>
+Date: Mon,  5 Jan 2026 12:16:27 +0100
+Subject: wifi: mt76: mt7925: fix CLC command timeout when suspend/resume
+To: stable@vger.kernel.org, Greg Kroah-Hartman <gregkh@linuxfoundation.org>, Felix Fietkau <nbd@nbd.name>, Quan Zhou <quan.zhou@mediatek.com>
+Cc: Sasha Levin <sashal@kernel.org>, linux-kernel@vger.kernel.org, linux-wireless@vger.kernel.org
+Message-ID: <95428f78ab30c93a150999e430f07e7047d95adb.1767611788.git.jan.kiszka@siemens.com>
+
+From: Quan Zhou <quan.zhou@mediatek.com>
+
+[ Upstream commit a0f721b8d986b62b4de316444f2b2e356d17e3b5 ]
+
+When enter suspend/resume while in a connected state, the upper layer
+will trigger disconnection before entering suspend, and at the same time,
+it will trigger regd_notifier() and update CLC, causing the CLC event to
+not be received due to suspend, resulting in a command timeout.
+
+Therefore, the update of CLC is postponed until resume, to ensure data
+consistency and avoid the occurrence of command timeout.
+
+Signed-off-by: Quan Zhou <quan.zhou@mediatek.com>
+Link: https://patch.msgid.link/bab00a2805d0533fd8beaa059222659858a9dcb5.1735910455.git.quan.zhou@mediatek.com
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+Signed-off-by: Jan Kiszka <jan.kiszka@siemens.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/wireless/mediatek/mt76/mt7925/init.c   |   20 +++++++++++++++++---
+ drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h |    1 +
+ drivers/net/wireless/mediatek/mt76/mt7925/pci.c    |    3 +++
+ 3 files changed, 21 insertions(+), 3 deletions(-)
+
+--- a/drivers/net/wireless/mediatek/mt76/mt7925/init.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7925/init.c
+@@ -59,6 +59,18 @@ static int mt7925_thermal_init(struct mt
+                                                      mt7925_hwmon_groups);
+       return PTR_ERR_OR_ZERO(hwmon);
+ }
++
++void mt7925_regd_update(struct mt792x_dev *dev)
++{
++      struct mt76_dev *mdev = &dev->mt76;
++      struct ieee80211_hw *hw = mdev->hw;
++
++      mt7925_mcu_set_clc(dev, mdev->alpha2, dev->country_ie_env);
++      mt7925_mcu_set_channel_domain(hw->priv);
++      mt7925_set_tx_sar_pwr(hw, NULL);
++}
++EXPORT_SYMBOL_GPL(mt7925_regd_update);
++
+ static void
+ mt7925_regd_notifier(struct wiphy *wiphy,
+                    struct regulatory_request *req)
+@@ -66,6 +78,7 @@ mt7925_regd_notifier(struct wiphy *wiphy
+       struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
+       struct mt792x_dev *dev = mt792x_hw_dev(hw);
+       struct mt76_dev *mdev = &dev->mt76;
++      struct mt76_connac_pm *pm = &dev->pm;
+       /* allow world regdom at the first boot only */
+       if (!memcmp(req->alpha2, "00", 2) &&
+@@ -81,11 +94,12 @@ mt7925_regd_notifier(struct wiphy *wiphy
+       mdev->region = req->dfs_region;
+       dev->country_ie_env = req->country_ie_env;
++      if (pm->suspended)
++              return;
++
+       dev->regd_in_progress = true;
+       mt792x_mutex_acquire(dev);
+-      mt7925_mcu_set_clc(dev, req->alpha2, req->country_ie_env);
+-      mt7925_mcu_set_channel_domain(hw->priv);
+-      mt7925_set_tx_sar_pwr(hw, NULL);
++      mt7925_regd_update(dev);
+       mt792x_mutex_release(dev);
+       dev->regd_in_progress = false;
+       wake_up(&dev->wait);
+--- a/drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h
++++ b/drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h
+@@ -218,6 +218,7 @@ int mt7925_mcu_chip_config(struct mt792x
+ int mt7925_mcu_set_rxfilter(struct mt792x_dev *dev, u32 fif,
+                           u8 bit_op, u32 bit_map);
++void mt7925_regd_update(struct mt792x_dev *dev);
+ int mt7925_mac_init(struct mt792x_dev *dev);
+ int mt7925_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
+                      struct ieee80211_sta *sta);
+--- a/drivers/net/wireless/mediatek/mt76/mt7925/pci.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7925/pci.c
+@@ -554,11 +554,14 @@ static int mt7925_pci_resume(struct devi
+       local_bh_enable();
+       err = mt76_connac_mcu_set_hif_suspend(mdev, false);
++      if (err < 0)
++              goto failed;
+       /* restore previous ds setting */
+       if (!pm->ds_enable)
+               mt7925_mcu_set_deep_sleep(dev, false);
++      mt7925_regd_update(dev);
+ failed:
+       pm->suspended = false;
diff --git a/queue-6.12/wifi-mt76-mt7925-fix-the-unfinished-command-of-regd_notifier-before-suspend.patch b/queue-6.12/wifi-mt76-mt7925-fix-the-unfinished-command-of-regd_notifier-before-suspend.patch
new file mode 100644 (file)
index 0000000..c05a032
--- /dev/null
@@ -0,0 +1,63 @@
+From fm-294854-20260105111630c51a7006be000207bb-Uu0BLy@rts-flowmailer.siemens.com Mon Jan  5 12:16:31 2026
+From: Jan Kiszka <jan.kiszka@siemens.com>
+Date: Mon,  5 Jan 2026 12:16:26 +0100
+Subject: wifi: mt76: mt7925: fix the unfinished command of regd_notifier before suspend
+To: stable@vger.kernel.org, Greg Kroah-Hartman <gregkh@linuxfoundation.org>, Felix Fietkau <nbd@nbd.name>, Quan Zhou <quan.zhou@mediatek.com>
+Cc: Sasha Levin <sashal@kernel.org>, linux-kernel@vger.kernel.org, linux-wireless@vger.kernel.org
+Message-ID: <9b06a0703d4a825b1db17f3213814ddc42644a5b.1767611788.git.jan.kiszka@siemens.com>
+
+From: Quan Zhou <quan.zhou@mediatek.com>
+
+[ Upstream commit 1b97fc8443aea01922560de9f24a6383e6eb6ae8 ]
+
+Before entering suspend, we need to ensure that all MCU command are
+completed. In some cases, such as with regd_notifier, there is a
+chance that CLC commands, will be executed before suspend.
+
+Signed-off-by: Quan Zhou <quan.zhou@mediatek.com>
+Link: https://patch.msgid.link/3af7b4e5bf7437832b016e32743657d1d55b1f9d.1735910288.git.quan.zhou@mediatek.com
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+Signed-off-by: Jan Kiszka <jan.kiszka@siemens.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/wireless/mediatek/mt76/mt7925/init.c |    4 ++++
+ drivers/net/wireless/mediatek/mt76/mt7925/pci.c  |    3 +++
+ 2 files changed, 7 insertions(+)
+
+--- a/drivers/net/wireless/mediatek/mt76/mt7925/init.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7925/init.c
+@@ -81,11 +81,14 @@ mt7925_regd_notifier(struct wiphy *wiphy
+       mdev->region = req->dfs_region;
+       dev->country_ie_env = req->country_ie_env;
++      dev->regd_in_progress = true;
+       mt792x_mutex_acquire(dev);
+       mt7925_mcu_set_clc(dev, req->alpha2, req->country_ie_env);
+       mt7925_mcu_set_channel_domain(hw->priv);
+       mt7925_set_tx_sar_pwr(hw, NULL);
+       mt792x_mutex_release(dev);
++      dev->regd_in_progress = false;
++      wake_up(&dev->wait);
+ }
+ static void mt7925_mac_init_basic_rates(struct mt792x_dev *dev)
+@@ -235,6 +238,7 @@ int mt7925_register_device(struct mt792x
+       spin_lock_init(&dev->pm.wake.lock);
+       mutex_init(&dev->pm.mutex);
+       init_waitqueue_head(&dev->pm.wait);
++      init_waitqueue_head(&dev->wait);
+       spin_lock_init(&dev->pm.txq_lock);
+       INIT_DELAYED_WORK(&dev->mphy.mac_work, mt792x_mac_work);
+       INIT_DELAYED_WORK(&dev->phy.scan_work, mt7925_scan_work);
+--- a/drivers/net/wireless/mediatek/mt76/mt7925/pci.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7925/pci.c
+@@ -455,6 +455,9 @@ static int mt7925_pci_suspend(struct dev
+       if (err < 0)
+               goto restore_suspend;
++      wait_event_timeout(dev->wait,
++                         !dev->regd_in_progress, 5 * HZ);
++
+       /* always enable deep sleep during suspend to reduce
+        * power consumption
+        */
diff --git a/queue-6.12/x86-microcode-amd-select-which-microcode-patch-to-load.patch b/queue-6.12/x86-microcode-amd-select-which-microcode-patch-to-load.patch
new file mode 100644 (file)
index 0000000..06b3804
--- /dev/null
@@ -0,0 +1,164 @@
+From 8d171045069c804e5ffaa18be590c42c6af0cf3f Mon Sep 17 00:00:00 2001
+From: "Borislav Petkov (AMD)" <bp@alien8.de>
+Date: Thu, 25 Sep 2025 13:46:00 +0200
+Subject: x86/microcode/AMD: Select which microcode patch to load
+
+From: Borislav Petkov (AMD) <bp@alien8.de>
+
+commit 8d171045069c804e5ffaa18be590c42c6af0cf3f upstream.
+
+All microcode patches up to the proper BIOS Entrysign fix are loaded
+only after the sha256 signature carried in the driver has been verified.
+
+Microcode patches after the Entrysign fix has been applied, do not need
+that signature verification anymore.
+
+In order to not abandon machines which haven't received the BIOS update
+yet, add the capability to select which microcode patch to load.
+
+The corresponding microcode container supplied through firmware-linux
+has been modified to carry two patches per CPU type
+(family/model/stepping) so that the proper one gets selected.
+
+Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
+Tested-by: Waiman Long <longman@redhat.com>
+Link: https://patch.msgid.link/20251027133818.4363-1-bp@kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/cpu/microcode/amd.c |  104 +++++++++++++++++++++++-------------
+ 1 file changed, 67 insertions(+), 37 deletions(-)
+
+--- a/arch/x86/kernel/cpu/microcode/amd.c
++++ b/arch/x86/kernel/cpu/microcode/amd.c
+@@ -174,50 +174,61 @@ static u32 cpuid_to_ucode_rev(unsigned i
+       return p.ucode_rev;
+ }
++static u32 get_cutoff_revision(u32 rev)
++{
++      switch (rev >> 8) {
++      case 0x80012: return 0x8001277; break;
++      case 0x80082: return 0x800820f; break;
++      case 0x83010: return 0x830107c; break;
++      case 0x86001: return 0x860010e; break;
++      case 0x86081: return 0x8608108; break;
++      case 0x87010: return 0x8701034; break;
++      case 0x8a000: return 0x8a0000a; break;
++      case 0xa0010: return 0xa00107a; break;
++      case 0xa0011: return 0xa0011da; break;
++      case 0xa0012: return 0xa001243; break;
++      case 0xa0082: return 0xa00820e; break;
++      case 0xa1011: return 0xa101153; break;
++      case 0xa1012: return 0xa10124e; break;
++      case 0xa1081: return 0xa108109; break;
++      case 0xa2010: return 0xa20102f; break;
++      case 0xa2012: return 0xa201212; break;
++      case 0xa4041: return 0xa404109; break;
++      case 0xa5000: return 0xa500013; break;
++      case 0xa6012: return 0xa60120a; break;
++      case 0xa7041: return 0xa704109; break;
++      case 0xa7052: return 0xa705208; break;
++      case 0xa7080: return 0xa708009; break;
++      case 0xa70c0: return 0xa70C009; break;
++      case 0xaa001: return 0xaa00116; break;
++      case 0xaa002: return 0xaa00218; break;
++      case 0xb0021: return 0xb002146; break;
++      case 0xb0081: return 0xb008111; break;
++      case 0xb1010: return 0xb101046; break;
++      case 0xb2040: return 0xb204031; break;
++      case 0xb4040: return 0xb404031; break;
++      case 0xb4041: return 0xb404101; break;
++      case 0xb6000: return 0xb600031; break;
++      case 0xb6080: return 0xb608031; break;
++      case 0xb7000: return 0xb700031; break;
++      default: break;
++
++      }
++      return 0;
++}
++
+ static bool need_sha_check(u32 cur_rev)
+ {
++      u32 cutoff;
++
+       if (!cur_rev) {
+               cur_rev = cpuid_to_ucode_rev(bsp_cpuid_1_eax);
+               pr_info_once("No current revision, generating the lowest one: 0x%x\n", cur_rev);
+       }
+-      switch (cur_rev >> 8) {
+-      case 0x80012: return cur_rev <= 0x8001277; break;
+-      case 0x80082: return cur_rev <= 0x800820f; break;
+-      case 0x83010: return cur_rev <= 0x830107c; break;
+-      case 0x86001: return cur_rev <= 0x860010e; break;
+-      case 0x86081: return cur_rev <= 0x8608108; break;
+-      case 0x87010: return cur_rev <= 0x8701034; break;
+-      case 0x8a000: return cur_rev <= 0x8a0000a; break;
+-      case 0xa0010: return cur_rev <= 0xa00107a; break;
+-      case 0xa0011: return cur_rev <= 0xa0011da; break;
+-      case 0xa0012: return cur_rev <= 0xa001243; break;
+-      case 0xa0082: return cur_rev <= 0xa00820e; break;
+-      case 0xa1011: return cur_rev <= 0xa101153; break;
+-      case 0xa1012: return cur_rev <= 0xa10124e; break;
+-      case 0xa1081: return cur_rev <= 0xa108109; break;
+-      case 0xa2010: return cur_rev <= 0xa20102f; break;
+-      case 0xa2012: return cur_rev <= 0xa201212; break;
+-      case 0xa4041: return cur_rev <= 0xa404109; break;
+-      case 0xa5000: return cur_rev <= 0xa500013; break;
+-      case 0xa6012: return cur_rev <= 0xa60120a; break;
+-      case 0xa7041: return cur_rev <= 0xa704109; break;
+-      case 0xa7052: return cur_rev <= 0xa705208; break;
+-      case 0xa7080: return cur_rev <= 0xa708009; break;
+-      case 0xa70c0: return cur_rev <= 0xa70C009; break;
+-      case 0xaa001: return cur_rev <= 0xaa00116; break;
+-      case 0xaa002: return cur_rev <= 0xaa00218; break;
+-      case 0xb0021: return cur_rev <= 0xb002146; break;
+-      case 0xb0081: return cur_rev <= 0xb008111; break;
+-      case 0xb1010: return cur_rev <= 0xb101046; break;
+-      case 0xb2040: return cur_rev <= 0xb204031; break;
+-      case 0xb4040: return cur_rev <= 0xb404031; break;
+-      case 0xb4041: return cur_rev <= 0xb404101; break;
+-      case 0xb6000: return cur_rev <= 0xb600031; break;
+-      case 0xb6080: return cur_rev <= 0xb608031; break;
+-      case 0xb7000: return cur_rev <= 0xb700031; break;
+-      default: break;
+-      }
++      cutoff = get_cutoff_revision(cur_rev);
++      if (cutoff)
++              return cur_rev <= cutoff;
+       pr_info("You should not be seeing this. Please send the following couple of lines to x86-<at>-kernel.org\n");
+       pr_info("CPUID(1).EAX: 0x%x, current revision: 0x%x\n", bsp_cpuid_1_eax, cur_rev);
+@@ -468,6 +479,7 @@ static int verify_patch(const u8 *buf, s
+ {
+       u8 family = x86_family(bsp_cpuid_1_eax);
+       struct microcode_header_amd *mc_hdr;
++      u32 cur_rev, cutoff, patch_rev;
+       unsigned int ret;
+       u32 sh_psize;
+       u16 proc_id;
+@@ -511,6 +523,24 @@ static int verify_patch(const u8 *buf, s
+       if (patch_fam != family)
+               return 1;
++      cur_rev = get_patch_level();
++
++      /* No cutoff revision means old/unaffected by signing algorithm weakness => matches */
++      cutoff = get_cutoff_revision(cur_rev);
++      if (!cutoff)
++              goto ok;
++
++      patch_rev = mc_hdr->patch_id;
++
++      if (cur_rev <= cutoff && patch_rev <= cutoff)
++              goto ok;
++
++      if (cur_rev > cutoff && patch_rev > cutoff)
++              goto ok;
++
++      return 1;
++ok:
++
+       return 0;
+ }
diff --git a/queue-6.12/xhci-dbgtty-fix-device-unregister-fixup.patch b/queue-6.12/xhci-dbgtty-fix-device-unregister-fixup.patch
new file mode 100644 (file)
index 0000000..1e82c20
--- /dev/null
@@ -0,0 +1,40 @@
+From stable+bounces-204274-greg=kroah.com@vger.kernel.org Tue Dec 30 19:32:47 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 30 Dec 2025 13:32:37 -0500
+Subject: xhci: dbgtty: fix device unregister: fixup
+To: stable@vger.kernel.org
+Cc: "Łukasz Bartosik" <ukaszb@chromium.org>, stable <stable@kernel.org>, "Greg Kroah-Hartman" <gregkh@linuxfoundation.org>, "Sasha Levin" <sashal@kernel.org>
+Message-ID: <20251230183237.2393657-2-sashal@kernel.org>
+
+From: Łukasz Bartosik <ukaszb@chromium.org>
+
+[ Upstream commit 74098cc06e753d3ffd8398b040a3a1dfb65260c0 ]
+
+This fixup replaces tty_vhangup() call with call to
+tty_port_tty_vhangup(). Both calls hangup tty device
+synchronously however tty_port_tty_vhangup() increases
+reference count during the hangup operation using
+scoped_guard(tty_port_tty).
+
+Cc: stable <stable@kernel.org>
+Fixes: 1f73b8b56cf3 ("xhci: dbgtty: fix device unregister")
+Signed-off-by: Łukasz Bartosik <ukaszb@chromium.org>
+Link: https://patch.msgid.link/20251127111644.3161386-1-ukaszb@google.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/usb/host/xhci-dbgtty.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/usb/host/xhci-dbgtty.c
++++ b/drivers/usb/host/xhci-dbgtty.c
+@@ -522,7 +522,7 @@ static void xhci_dbc_tty_unregister_devi
+        * Hang up the TTY. This wakes up any blocked
+        * writers and causes subsequent writes to fail.
+        */
+-      tty_vhangup(port->port.tty);
++      tty_port_tty_vhangup(&port->port);
+       tty_unregister_device(dbc_tty_driver, port->minor);
+       xhci_dbc_tty_exit_port(port);