]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
6.6-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 10 Dec 2024 09:35:53 +0000 (10:35 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 10 Dec 2024 09:35:53 +0000 (10:35 +0100)
added patches:
bpf-fix-oob-devmap-writes-when-deleting-elements.patch
cacheinfo-allocate-memory-during-cpu-hotplug-if-not-done-from-the-primary-cpu.patch
dma-buf-fix-dma_fence_array_signaled-v4.patch
dma-fence-fix-reference-leak-on-fence-merge-failure-path.patch
dma-fence-use-kernel-s-sort-for-merging-fences.patch
drm-amdgpu-hdp5.2-do-a-posting-read-when-flushing-hdp.patch
drm-dp_mst-fix-mst-sideband-message-body-length-check.patch
drm-dp_mst-fix-resetting-msg-rx-state-after-topology-removal.patch
drm-dp_mst-verify-request-type-in-the-corresponding-down-message-reply.patch
mmc-core-further-prevent-card-detect-during-shutdown.patch
mmc-sdhci-pci-add-dmi-quirk-for-missing-cd-gpio-on-vexia-edu-atla-10-tablet.patch
modpost-add-.irqentry.text-to-other_sections.patch
regmap-detach-regmap-from-dev-on-regmap_exit.patch
x86-kexec-restore-gdt-on-return-from-preserve_context-kexec.patch
xsk-fix-oob-map-writes-when-deleting-elements.patch

16 files changed:
queue-6.6/bpf-fix-oob-devmap-writes-when-deleting-elements.patch [new file with mode: 0644]
queue-6.6/cacheinfo-allocate-memory-during-cpu-hotplug-if-not-done-from-the-primary-cpu.patch [new file with mode: 0644]
queue-6.6/dma-buf-fix-dma_fence_array_signaled-v4.patch [new file with mode: 0644]
queue-6.6/dma-fence-fix-reference-leak-on-fence-merge-failure-path.patch [new file with mode: 0644]
queue-6.6/dma-fence-use-kernel-s-sort-for-merging-fences.patch [new file with mode: 0644]
queue-6.6/drm-amdgpu-hdp5.2-do-a-posting-read-when-flushing-hdp.patch [new file with mode: 0644]
queue-6.6/drm-dp_mst-fix-mst-sideband-message-body-length-check.patch [new file with mode: 0644]
queue-6.6/drm-dp_mst-fix-resetting-msg-rx-state-after-topology-removal.patch [new file with mode: 0644]
queue-6.6/drm-dp_mst-verify-request-type-in-the-corresponding-down-message-reply.patch [new file with mode: 0644]
queue-6.6/mmc-core-further-prevent-card-detect-during-shutdown.patch [new file with mode: 0644]
queue-6.6/mmc-sdhci-pci-add-dmi-quirk-for-missing-cd-gpio-on-vexia-edu-atla-10-tablet.patch [new file with mode: 0644]
queue-6.6/modpost-add-.irqentry.text-to-other_sections.patch [new file with mode: 0644]
queue-6.6/regmap-detach-regmap-from-dev-on-regmap_exit.patch [new file with mode: 0644]
queue-6.6/series
queue-6.6/x86-kexec-restore-gdt-on-return-from-preserve_context-kexec.patch [new file with mode: 0644]
queue-6.6/xsk-fix-oob-map-writes-when-deleting-elements.patch [new file with mode: 0644]

diff --git a/queue-6.6/bpf-fix-oob-devmap-writes-when-deleting-elements.patch b/queue-6.6/bpf-fix-oob-devmap-writes-when-deleting-elements.patch
new file mode 100644 (file)
index 0000000..314e1c6
--- /dev/null
@@ -0,0 +1,110 @@
+From ab244dd7cf4c291f82faacdc50b45cc0f55b674d Mon Sep 17 00:00:00 2001
+From: Maciej Fijalkowski <maciej.fijalkowski@intel.com>
+Date: Fri, 22 Nov 2024 13:10:30 +0100
+Subject: bpf: fix OOB devmap writes when deleting elements
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Maciej Fijalkowski <maciej.fijalkowski@intel.com>
+
+commit ab244dd7cf4c291f82faacdc50b45cc0f55b674d upstream.
+
+Jordy reported issue against XSKMAP which also applies to DEVMAP - the
+index used for accessing map entry, due to being a signed integer,
+causes the OOB writes. Fix is simple as changing the type from int to
+u32, however, when compared to XSKMAP case, one more thing needs to be
+addressed.
+
+When map is released from system via dev_map_free(), we iterate through
+all of the entries and an iterator variable is also an int, which
+implies OOB accesses. Again, change it to be u32.
+
+Example splat below:
+
+[  160.724676] BUG: unable to handle page fault for address: ffffc8fc2c001000
+[  160.731662] #PF: supervisor read access in kernel mode
+[  160.736876] #PF: error_code(0x0000) - not-present page
+[  160.742095] PGD 0 P4D 0
+[  160.744678] Oops: Oops: 0000 [#1] PREEMPT SMP
+[  160.749106] CPU: 1 UID: 0 PID: 520 Comm: kworker/u145:12 Not tainted 6.12.0-rc1+ #487
+[  160.757050] Hardware name: Intel Corporation S2600WFT/S2600WFT, BIOS SE5C620.86B.02.01.0008.031920191559 03/19/2019
+[  160.767642] Workqueue: events_unbound bpf_map_free_deferred
+[  160.773308] RIP: 0010:dev_map_free+0x77/0x170
+[  160.777735] Code: 00 e8 fd 91 ed ff e8 b8 73 ed ff 41 83 7d 18 19 74 6e 41 8b 45 24 49 8b bd f8 00 00 00 31 db 85 c0 74 48 48 63 c3 48 8d 04 c7 <48> 8b 28 48 85 ed 74 30 48 8b 7d 18 48 85 ff 74 05 e8 b3 52 fa ff
+[  160.796777] RSP: 0018:ffffc9000ee1fe38 EFLAGS: 00010202
+[  160.802086] RAX: ffffc8fc2c001000 RBX: 0000000080000000 RCX: 0000000000000024
+[  160.809331] RDX: 0000000000000000 RSI: 0000000000000024 RDI: ffffc9002c001000
+[  160.816576] RBP: 0000000000000000 R08: 0000000000000023 R09: 0000000000000001
+[  160.823823] R10: 0000000000000001 R11: 00000000000ee6b2 R12: dead000000000122
+[  160.831066] R13: ffff88810c928e00 R14: ffff8881002df405 R15: 0000000000000000
+[  160.838310] FS:  0000000000000000(0000) GS:ffff8897e0c40000(0000) knlGS:0000000000000000
+[  160.846528] CS:  0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+[  160.852357] CR2: ffffc8fc2c001000 CR3: 0000000005c32006 CR4: 00000000007726f0
+[  160.859604] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
+[  160.866847] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
+[  160.874092] PKRU: 55555554
+[  160.876847] Call Trace:
+[  160.879338]  <TASK>
+[  160.881477]  ? __die+0x20/0x60
+[  160.884586]  ? page_fault_oops+0x15a/0x450
+[  160.888746]  ? search_extable+0x22/0x30
+[  160.892647]  ? search_bpf_extables+0x5f/0x80
+[  160.896988]  ? exc_page_fault+0xa9/0x140
+[  160.900973]  ? asm_exc_page_fault+0x22/0x30
+[  160.905232]  ? dev_map_free+0x77/0x170
+[  160.909043]  ? dev_map_free+0x58/0x170
+[  160.912857]  bpf_map_free_deferred+0x51/0x90
+[  160.917196]  process_one_work+0x142/0x370
+[  160.921272]  worker_thread+0x29e/0x3b0
+[  160.925082]  ? rescuer_thread+0x4b0/0x4b0
+[  160.929157]  kthread+0xd4/0x110
+[  160.932355]  ? kthread_park+0x80/0x80
+[  160.936079]  ret_from_fork+0x2d/0x50
+[  160.943396]  ? kthread_park+0x80/0x80
+[  160.950803]  ret_from_fork_asm+0x11/0x20
+[  160.958482]  </TASK>
+
+Fixes: 546ac1ffb70d ("bpf: add devmap, a map for storing net device references")
+CC: stable@vger.kernel.org
+Reported-by: Jordy Zomer <jordyzomer@google.com>
+Suggested-by: Jordy Zomer <jordyzomer@google.com>
+Reviewed-by: Toke Høiland-Jørgensen <toke@redhat.com>
+Acked-by: John Fastabend <john.fastabend@gmail.com>
+Signed-off-by: Maciej Fijalkowski <maciej.fijalkowski@intel.com>
+Link: https://lore.kernel.org/r/20241122121030.716788-3-maciej.fijalkowski@intel.com
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/bpf/devmap.c |    6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/kernel/bpf/devmap.c
++++ b/kernel/bpf/devmap.c
+@@ -180,7 +180,7 @@ static struct bpf_map *dev_map_alloc(uni
+ static void dev_map_free(struct bpf_map *map)
+ {
+       struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
+-      int i;
++      u32 i;
+       /* At this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0,
+        * so the programs (can be more than one that used this map) were
+@@ -813,7 +813,7 @@ static long dev_map_delete_elem(struct b
+ {
+       struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
+       struct bpf_dtab_netdev *old_dev;
+-      int k = *(u32 *)key;
++      u32 k = *(u32 *)key;
+       if (k >= map->max_entries)
+               return -EINVAL;
+@@ -830,7 +830,7 @@ static long dev_map_hash_delete_elem(str
+ {
+       struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
+       struct bpf_dtab_netdev *old_dev;
+-      int k = *(u32 *)key;
++      u32 k = *(u32 *)key;
+       unsigned long flags;
+       int ret = -ENOENT;
diff --git a/queue-6.6/cacheinfo-allocate-memory-during-cpu-hotplug-if-not-done-from-the-primary-cpu.patch b/queue-6.6/cacheinfo-allocate-memory-during-cpu-hotplug-if-not-done-from-the-primary-cpu.patch
new file mode 100644 (file)
index 0000000..dbafb9d
--- /dev/null
@@ -0,0 +1,103 @@
+From b3fce429a1e030b50c1c91351d69b8667eef627b Mon Sep 17 00:00:00 2001
+From: Ricardo Neri <ricardo.neri-calderon@linux.intel.com>
+Date: Wed, 27 Nov 2024 16:22:46 -0800
+Subject: cacheinfo: Allocate memory during CPU hotplug if not done from the primary CPU
+
+From: Ricardo Neri <ricardo.neri-calderon@linux.intel.com>
+
+commit b3fce429a1e030b50c1c91351d69b8667eef627b upstream.
+
+Commit
+
+  5944ce092b97 ("arch_topology: Build cacheinfo from primary CPU")
+
+adds functionality that architectures can use to optionally allocate and
+build cacheinfo early during boot. Commit
+
+  6539cffa9495 ("cacheinfo: Add arch specific early level initializer")
+
+lets secondary CPUs correct (and reallocate memory) cacheinfo data if
+needed.
+
+If the early build functionality is not used and cacheinfo does not need
+correction, memory for cacheinfo is never allocated. x86 does not use
+the early build functionality. Consequently, during the cacheinfo CPU
+hotplug callback, last_level_cache_is_valid() attempts to dereference
+a NULL pointer:
+
+  BUG: kernel NULL pointer dereference, address: 0000000000000100
+  #PF: supervisor read access in kernel mode
+  #PF: error_code(0x0000) - not present page
+  PGD 0 P4D 0
+  Oops: 0000 [#1] PREEPMT SMP NOPTI
+  CPU: 0 PID 19 Comm: cpuhp/0 Not tainted 6.4.0-rc2 #1
+  RIP: 0010: last_level_cache_is_valid+0x95/0xe0a
+
+Allocate memory for cacheinfo during the cacheinfo CPU hotplug callback
+if not done earlier.
+
+Moreover, before determining the validity of the last-level cache info,
+ensure that it has been allocated. Simply checking for non-zero
+cache_leaves() is not sufficient, as some architectures (e.g., Intel
+processors) have non-zero cache_leaves() before allocation.
+
+Dereferencing NULL cacheinfo can occur in update_per_cpu_data_slice_size().
+This function iterates over all online CPUs. However, a CPU may have come
+online recently, but its cacheinfo may not have been allocated yet.
+
+While here, remove an unnecessary indentation in allocate_cache_info().
+
+  [ bp: Massage. ]
+
+Fixes: 6539cffa9495 ("cacheinfo: Add arch specific early level initializer")
+Signed-off-by: Ricardo Neri <ricardo.neri-calderon@linux.intel.com>
+Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
+Reviewed-by: Radu Rendec <rrendec@redhat.com>
+Reviewed-by: Nikolay Borisov <nik.borisov@suse.com>
+Reviewed-by: Andreas Herrmann <aherrmann@suse.de>
+Reviewed-by: Sudeep Holla <sudeep.holla@arm.com>
+Cc: stable@vger.kernel.org # 6.3+
+Link: https://lore.kernel.org/r/20241128002247.26726-2-ricardo.neri-calderon@linux.intel.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/base/cacheinfo.c |   14 ++++++++------
+ 1 file changed, 8 insertions(+), 6 deletions(-)
+
+--- a/drivers/base/cacheinfo.c
++++ b/drivers/base/cacheinfo.c
+@@ -58,7 +58,7 @@ bool last_level_cache_is_valid(unsigned
+ {
+       struct cacheinfo *llc;
+-      if (!cache_leaves(cpu))
++      if (!cache_leaves(cpu) || !per_cpu_cacheinfo(cpu))
+               return false;
+       llc = per_cpu_cacheinfo_idx(cpu, cache_leaves(cpu) - 1);
+@@ -478,11 +478,9 @@ int __weak populate_cache_leaves(unsigne
+       return -ENOENT;
+ }
+-static inline
+-int allocate_cache_info(int cpu)
++static inline int allocate_cache_info(int cpu)
+ {
+-      per_cpu_cacheinfo(cpu) = kcalloc(cache_leaves(cpu),
+-                                       sizeof(struct cacheinfo), GFP_ATOMIC);
++      per_cpu_cacheinfo(cpu) = kcalloc(cache_leaves(cpu), sizeof(struct cacheinfo), GFP_ATOMIC);
+       if (!per_cpu_cacheinfo(cpu)) {
+               cache_leaves(cpu) = 0;
+               return -ENOMEM;
+@@ -554,7 +552,11 @@ static inline int init_level_allocate_ci
+        */
+       ci_cacheinfo(cpu)->early_ci_levels = false;
+-      if (cache_leaves(cpu) <= early_leaves)
++      /*
++       * Some architectures (e.g., x86) do not use early initialization.
++       * Allocate memory now in such case.
++       */
++      if (cache_leaves(cpu) <= early_leaves && per_cpu_cacheinfo(cpu))
+               return 0;
+       kfree(per_cpu_cacheinfo(cpu));
diff --git a/queue-6.6/dma-buf-fix-dma_fence_array_signaled-v4.patch b/queue-6.6/dma-buf-fix-dma_fence_array_signaled-v4.patch
new file mode 100644 (file)
index 0000000..c84e446
--- /dev/null
@@ -0,0 +1,75 @@
+From 78ac1c3558810486d90aa533b0039aa70487a3da Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig@amd.com>
+Date: Fri, 8 Nov 2024 09:29:48 +0100
+Subject: dma-buf: fix dma_fence_array_signaled v4
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Christian König <christian.koenig@amd.com>
+
+commit 78ac1c3558810486d90aa533b0039aa70487a3da upstream.
+
+The function silently assumed that signaling was already enabled for the
+dma_fence_array. This meant that without enabling signaling first we would
+never see forward progress.
+
+Fix that by falling back to testing each individual fence when signaling
+isn't enabled yet.
+
+v2: add the comment suggested by Boris why this is done this way
+v3: fix the underflow pointed out by Tvrtko
+v4: atomic_read_acquire() as suggested by Tvrtko
+
+Signed-off-by: Christian König <christian.koenig@amd.com>
+Reviewed-by: Boris Brezillon <boris.brezillon@collabora.com>
+Tested-by: Chia-I Wu <olvaffe@gmail.com>
+Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@igalia.com>
+Closes: https://gitlab.freedesktop.org/mesa/mesa/-/issues/12094
+Cc: <stable@vger.kernel.org>
+Link: https://patchwork.freedesktop.org/patch/msgid/20241112121925.18464-1-christian.koenig@amd.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/dma-buf/dma-fence-array.c |   28 +++++++++++++++++++++++++++-
+ 1 file changed, 27 insertions(+), 1 deletion(-)
+
+--- a/drivers/dma-buf/dma-fence-array.c
++++ b/drivers/dma-buf/dma-fence-array.c
+@@ -103,10 +103,36 @@ static bool dma_fence_array_enable_signa
+ static bool dma_fence_array_signaled(struct dma_fence *fence)
+ {
+       struct dma_fence_array *array = to_dma_fence_array(fence);
++      int num_pending;
++      unsigned int i;
+-      if (atomic_read(&array->num_pending) > 0)
++      /*
++       * We need to read num_pending before checking the enable_signal bit
++       * to avoid racing with the enable_signaling() implementation, which
++       * might decrement the counter, and cause a partial check.
++       * atomic_read_acquire() pairs with atomic_dec_and_test() in
++       * dma_fence_array_enable_signaling()
++       *
++       * The !--num_pending check is here to account for the any_signaled case
++       * if we race with enable_signaling(), that means the !num_pending check
++       * in the is_signalling_enabled branch might be outdated (num_pending
++       * might have been decremented), but that's fine. The user will get the
++       * right value when testing again later.
++       */
++      num_pending = atomic_read_acquire(&array->num_pending);
++      if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &array->base.flags)) {
++              if (num_pending <= 0)
++                      goto signal;
+               return false;
++      }
++      for (i = 0; i < array->num_fences; ++i) {
++              if (dma_fence_is_signaled(array->fences[i]) && !--num_pending)
++                      goto signal;
++      }
++      return false;
++
++signal:
+       dma_fence_array_clear_pending_error(array);
+       return true;
+ }
diff --git a/queue-6.6/dma-fence-fix-reference-leak-on-fence-merge-failure-path.patch b/queue-6.6/dma-fence-fix-reference-leak-on-fence-merge-failure-path.patch
new file mode 100644 (file)
index 0000000..d49561b
--- /dev/null
@@ -0,0 +1,45 @@
+From 949291c5314009b4f6e252391edbb40fdd5d5414 Mon Sep 17 00:00:00 2001
+From: Tvrtko Ursulin <tvrtko.ursulin@igalia.com>
+Date: Fri, 15 Nov 2024 10:21:49 +0000
+Subject: dma-fence: Fix reference leak on fence merge failure path
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Tvrtko Ursulin <tvrtko.ursulin@igalia.com>
+
+commit 949291c5314009b4f6e252391edbb40fdd5d5414 upstream.
+
+Release all fence references if the output dma-fence-array could not be
+allocated.
+
+Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@igalia.com>
+Fixes: 245a4a7b531c ("dma-buf: generalize dma_fence unwrap & merging v3")
+Cc: Christian König <christian.koenig@amd.com>
+Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
+Cc: Sumit Semwal <sumit.semwal@linaro.org>
+Cc: Gustavo Padovan <gustavo@padovan.org>
+Cc: Friedrich Vock <friedrich.vock@gmx.de>
+Cc: linux-media@vger.kernel.org
+Cc: dri-devel@lists.freedesktop.org
+Cc: linaro-mm-sig@lists.linaro.org
+Cc: <stable@vger.kernel.org> # v6.0+
+Reviewed-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Christian König <christian.koenig@amd.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20241115102153.1980-2-tursulin@igalia.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/dma-buf/dma-fence-unwrap.c |    2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/drivers/dma-buf/dma-fence-unwrap.c
++++ b/drivers/dma-buf/dma-fence-unwrap.c
+@@ -164,6 +164,8 @@ restart:
+                                       dma_fence_context_alloc(1),
+                                       1, false);
+       if (!result) {
++              for (i = 0; i < count; i++)
++                      dma_fence_put(array[i]);
+               tmp = NULL;
+               goto return_tmp;
+       }
diff --git a/queue-6.6/dma-fence-use-kernel-s-sort-for-merging-fences.patch b/queue-6.6/dma-fence-use-kernel-s-sort-for-merging-fences.patch
new file mode 100644 (file)
index 0000000..94f11c4
--- /dev/null
@@ -0,0 +1,271 @@
+From fe52c649438b8489c9456681d93a9b3de3d38263 Mon Sep 17 00:00:00 2001
+From: Tvrtko Ursulin <tvrtko.ursulin@igalia.com>
+Date: Fri, 15 Nov 2024 10:21:50 +0000
+Subject: dma-fence: Use kernel's sort for merging fences
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Tvrtko Ursulin <tvrtko.ursulin@igalia.com>
+
+commit fe52c649438b8489c9456681d93a9b3de3d38263 upstream.
+
+One alternative to the fix Christian proposed in
+https://lore.kernel.org/dri-devel/20241024124159.4519-3-christian.koenig@amd.com/
+is to replace the rather complex open coded sorting loops with the kernel
+standard sort followed by a context squashing pass.
+
+Proposed advantage of this would be readability but one concern Christian
+raised was that there could be many fences, that they are typically mostly
+sorted, and so the kernel's heap sort would be much worse by the proposed
+algorithm.
+
+I had a look running some games and vkcube to see what are the typical
+number of input fences. Tested scenarios:
+
+1) Hogwarts Legacy under Gamescope
+
+450 calls per second to __dma_fence_unwrap_merge.
+
+Percentages per number of fences buckets, before and after checking for
+signalled status, sorting and flattening:
+
+   N       Before      After
+   0       0.91%
+   1      69.40%
+  2-3     28.72%       9.4%  (90.6% resolved to one fence)
+  4-5      0.93%
+  6-9      0.03%
+  10+
+
+2) Cyberpunk 2077 under Gamescope
+
+1050 calls per second, amounting to 0.01% CPU time according to perf top.
+
+   N       Before      After
+   0       1.13%
+   1      52.30%
+  2-3     40.34%       55.57%
+  4-5      1.46%        0.50%
+  6-9      2.44%
+  10+      2.34%
+
+3) vkcube under Plasma
+
+90 calls per second.
+
+   N       Before      After
+   0
+   1
+  2-3      100%         0%   (Ie. all resolved to a single fence)
+  4-5
+  6-9
+  10+
+
+In the case of vkcube all invocations in the 2-3 bucket were actually
+just two input fences.
+
+From these numbers it looks like the heap sort should not be a
+disadvantage, given how the dominant case is <= 2 input fences which heap
+sort solves with just one compare and swap. (And for the case of one input
+fence we have a fast path in the previous patch.)
+
+A complementary possibility is to implement a different sorting algorithm
+under the same API as the kernel's sort() and so keep the simplicity,
+potentially moving the new sort under lib/ if it would be found more
+widely useful.
+
+v2:
+ * Hold on to fence references and reduce commentary. (Christian)
+ * Record and use latest signaled timestamp in the 2nd loop too.
+ * Consolidate zero or one fences fast paths.
+
+v3:
+ * Reverse the seqno sort order for a simpler squashing pass. (Christian)
+
+Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@igalia.com>
+Fixes: 245a4a7b531c ("dma-buf: generalize dma_fence unwrap & merging v3")
+Closes: https://gitlab.freedesktop.org/drm/amd/-/issues/3617
+Cc: Christian König <christian.koenig@amd.com>
+Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
+Cc: Sumit Semwal <sumit.semwal@linaro.org>
+Cc: Gustavo Padovan <gustavo@padovan.org>
+Cc: Friedrich Vock <friedrich.vock@gmx.de>
+Cc: linux-media@vger.kernel.org
+Cc: dri-devel@lists.freedesktop.org
+Cc: linaro-mm-sig@lists.linaro.org
+Cc: <stable@vger.kernel.org> # v6.0+
+Reviewed-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Christian König <christian.koenig@amd.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20241115102153.1980-3-tursulin@igalia.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/dma-buf/dma-fence-unwrap.c |  126 +++++++++++++++++--------------------
+ 1 file changed, 60 insertions(+), 66 deletions(-)
+
+--- a/drivers/dma-buf/dma-fence-unwrap.c
++++ b/drivers/dma-buf/dma-fence-unwrap.c
+@@ -12,6 +12,7 @@
+ #include <linux/dma-fence-chain.h>
+ #include <linux/dma-fence-unwrap.h>
+ #include <linux/slab.h>
++#include <linux/sort.h>
+ /* Internal helper to start new array iteration, don't use directly */
+ static struct dma_fence *
+@@ -59,6 +60,25 @@ struct dma_fence *dma_fence_unwrap_next(
+ }
+ EXPORT_SYMBOL_GPL(dma_fence_unwrap_next);
++
++static int fence_cmp(const void *_a, const void *_b)
++{
++      struct dma_fence *a = *(struct dma_fence **)_a;
++      struct dma_fence *b = *(struct dma_fence **)_b;
++
++      if (a->context < b->context)
++              return -1;
++      else if (a->context > b->context)
++              return 1;
++
++      if (dma_fence_is_later(b, a))
++              return 1;
++      else if (dma_fence_is_later(a, b))
++              return -1;
++
++      return 0;
++}
++
+ /* Implementation for the dma_fence_merge() marco, don't use directly */
+ struct dma_fence *__dma_fence_unwrap_merge(unsigned int num_fences,
+                                          struct dma_fence **fences,
+@@ -67,8 +87,7 @@ struct dma_fence *__dma_fence_unwrap_mer
+       struct dma_fence_array *result;
+       struct dma_fence *tmp, **array;
+       ktime_t timestamp;
+-      unsigned int i;
+-      size_t count;
++      int i, j, count;
+       count = 0;
+       timestamp = ns_to_ktime(0);
+@@ -96,80 +115,55 @@ struct dma_fence *__dma_fence_unwrap_mer
+       if (!array)
+               return NULL;
+-      /*
+-       * This trashes the input fence array and uses it as position for the
+-       * following merge loop. This works because the dma_fence_merge()
+-       * wrapper macro is creating this temporary array on the stack together
+-       * with the iterators.
+-       */
+-      for (i = 0; i < num_fences; ++i)
+-              fences[i] = dma_fence_unwrap_first(fences[i], &iter[i]);
+-
+       count = 0;
+-      do {
+-              unsigned int sel;
+-
+-restart:
+-              tmp = NULL;
+-              for (i = 0; i < num_fences; ++i) {
+-                      struct dma_fence *next;
+-
+-                      while (fences[i] && dma_fence_is_signaled(fences[i]))
+-                              fences[i] = dma_fence_unwrap_next(&iter[i]);
+-
+-                      next = fences[i];
+-                      if (!next)
+-                              continue;
+-
+-                      /*
+-                       * We can't guarantee that inpute fences are ordered by
+-                       * context, but it is still quite likely when this
+-                       * function is used multiple times. So attempt to order
+-                       * the fences by context as we pass over them and merge
+-                       * fences with the same context.
+-                       */
+-                      if (!tmp || tmp->context > next->context) {
+-                              tmp = next;
+-                              sel = i;
+-
+-                      } else if (tmp->context < next->context) {
+-                              continue;
+-
+-                      } else if (dma_fence_is_later(tmp, next)) {
+-                              fences[i] = dma_fence_unwrap_next(&iter[i]);
+-                              goto restart;
++      for (i = 0; i < num_fences; ++i) {
++              dma_fence_unwrap_for_each(tmp, &iter[i], fences[i]) {
++                      if (!dma_fence_is_signaled(tmp)) {
++                              array[count++] = dma_fence_get(tmp);
+                       } else {
+-                              fences[sel] = dma_fence_unwrap_next(&iter[sel]);
+-                              goto restart;
++                              ktime_t t = dma_fence_timestamp(tmp);
++
++                              if (ktime_after(t, timestamp))
++                                      timestamp = t;
+                       }
+               }
++      }
+-              if (tmp) {
+-                      array[count++] = dma_fence_get(tmp);
+-                      fences[sel] = dma_fence_unwrap_next(&iter[sel]);
+-              }
+-      } while (tmp);
++      if (count == 0 || count == 1)
++              goto return_fastpath;
+-      if (count == 0) {
+-              tmp = dma_fence_allocate_private_stub(ktime_get());
+-              goto return_tmp;
+-      }
++      sort(array, count, sizeof(*array), fence_cmp, NULL);
+-      if (count == 1) {
+-              tmp = array[0];
+-              goto return_tmp;
++      /*
++       * Only keep the most recent fence for each context.
++       */
++      j = 0;
++      for (i = 1; i < count; i++) {
++              if (array[i]->context == array[j]->context)
++                      dma_fence_put(array[i]);
++              else
++                      array[++j] = array[i];
+       }
++      count = ++j;
+-      result = dma_fence_array_create(count, array,
+-                                      dma_fence_context_alloc(1),
+-                                      1, false);
+-      if (!result) {
+-              for (i = 0; i < count; i++)
+-                      dma_fence_put(array[i]);
+-              tmp = NULL;
+-              goto return_tmp;
++      if (count > 1) {
++              result = dma_fence_array_create(count, array,
++                                              dma_fence_context_alloc(1),
++                                              1, false);
++              if (!result) {
++                      for (i = 0; i < count; i++)
++                              dma_fence_put(array[i]);
++                      tmp = NULL;
++                      goto return_tmp;
++              }
++              return &result->base;
+       }
+-      return &result->base;
++
++return_fastpath:
++      if (count == 0)
++              tmp = dma_fence_allocate_private_stub(timestamp);
++      else
++              tmp = array[0];
+ return_tmp:
+       kfree(array);
diff --git a/queue-6.6/drm-amdgpu-hdp5.2-do-a-posting-read-when-flushing-hdp.patch b/queue-6.6/drm-amdgpu-hdp5.2-do-a-posting-read-when-flushing-hdp.patch
new file mode 100644 (file)
index 0000000..16cd341
--- /dev/null
@@ -0,0 +1,40 @@
+From f756dbac1ce1d5f9a2b35e3b55fa429cf6336437 Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Fri, 22 Nov 2024 11:24:13 -0500
+Subject: drm/amdgpu/hdp5.2: do a posting read when flushing HDP
+
+From: Alex Deucher <alexander.deucher@amd.com>
+
+commit f756dbac1ce1d5f9a2b35e3b55fa429cf6336437 upstream.
+
+Need to read back to make sure the write goes through.
+
+Cc: David Belanger <david.belanger@amd.com>
+Reviewed-by: Frank Min <frank.min@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/amd/amdgpu/hdp_v5_2.c |    6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+--- a/drivers/gpu/drm/amd/amdgpu/hdp_v5_2.c
++++ b/drivers/gpu/drm/amd/amdgpu/hdp_v5_2.c
+@@ -31,13 +31,15 @@
+ static void hdp_v5_2_flush_hdp(struct amdgpu_device *adev,
+                               struct amdgpu_ring *ring)
+ {
+-      if (!ring || !ring->funcs->emit_wreg)
++      if (!ring || !ring->funcs->emit_wreg) {
+               WREG32_NO_KIQ((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2,
+                       0);
+-      else
++              RREG32_NO_KIQ((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2);
++      } else {
+               amdgpu_ring_emit_wreg(ring,
+                       (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2,
+                       0);
++      }
+ }
+ static void hdp_v5_2_update_mem_power_gating(struct amdgpu_device *adev,
diff --git a/queue-6.6/drm-dp_mst-fix-mst-sideband-message-body-length-check.patch b/queue-6.6/drm-dp_mst-fix-mst-sideband-message-body-length-check.patch
new file mode 100644 (file)
index 0000000..bc5ab44
--- /dev/null
@@ -0,0 +1,54 @@
+From bd2fccac61b40eaf08d9546acc9fef958bfe4763 Mon Sep 17 00:00:00 2001
+From: Imre Deak <imre.deak@intel.com>
+Date: Mon, 25 Nov 2024 22:53:14 +0200
+Subject: drm/dp_mst: Fix MST sideband message body length check
+
+From: Imre Deak <imre.deak@intel.com>
+
+commit bd2fccac61b40eaf08d9546acc9fef958bfe4763 upstream.
+
+Fix the MST sideband message body length check, which must be at least 1
+byte accounting for the message body CRC (aka message data CRC) at the
+end of the message.
+
+This fixes a case where an MST branch device returns a header with a
+correct header CRC (indicating a correctly received body length), with
+the body length being incorrectly set to 0. This will later lead to a
+memory corruption in drm_dp_sideband_append_payload() and the following
+errors in dmesg:
+
+   UBSAN: array-index-out-of-bounds in drivers/gpu/drm/display/drm_dp_mst_topology.c:786:25
+   index -1 is out of range for type 'u8 [48]'
+   Call Trace:
+    drm_dp_sideband_append_payload+0x33d/0x350 [drm_display_helper]
+    drm_dp_get_one_sb_msg+0x3ce/0x5f0 [drm_display_helper]
+    drm_dp_mst_hpd_irq_handle_event+0xc8/0x1580 [drm_display_helper]
+
+   memcpy: detected field-spanning write (size 18446744073709551615) of single field "&msg->msg[msg->curlen]" at drivers/gpu/drm/display/drm_dp_mst_topology.c:791 (size 256)
+   Call Trace:
+    drm_dp_sideband_append_payload+0x324/0x350 [drm_display_helper]
+    drm_dp_get_one_sb_msg+0x3ce/0x5f0 [drm_display_helper]
+    drm_dp_mst_hpd_irq_handle_event+0xc8/0x1580 [drm_display_helper]
+
+Cc: <stable@vger.kernel.org>
+Cc: Lyude Paul <lyude@redhat.com>
+Reviewed-by: Lyude Paul <lyude@redhat.com>
+Signed-off-by: Imre Deak <imre.deak@intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20241125205314.1725887-1-imre.deak@intel.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/display/drm_dp_mst_topology.c |    3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/drivers/gpu/drm/display/drm_dp_mst_topology.c
++++ b/drivers/gpu/drm/display/drm_dp_mst_topology.c
+@@ -319,6 +319,9 @@ static bool drm_dp_decode_sideband_msg_h
+       hdr->broadcast = (buf[idx] >> 7) & 0x1;
+       hdr->path_msg = (buf[idx] >> 6) & 0x1;
+       hdr->msg_len = buf[idx] & 0x3f;
++      if (hdr->msg_len < 1)           /* min space for body CRC */
++              return false;
++
+       idx++;
+       hdr->somt = (buf[idx] >> 7) & 0x1;
+       hdr->eomt = (buf[idx] >> 6) & 0x1;
diff --git a/queue-6.6/drm-dp_mst-fix-resetting-msg-rx-state-after-topology-removal.patch b/queue-6.6/drm-dp_mst-fix-resetting-msg-rx-state-after-topology-removal.patch
new file mode 100644 (file)
index 0000000..5df13db
--- /dev/null
@@ -0,0 +1,108 @@
+From a6fa67d26de385c3c7a23c1e109a0e23bfda4ec7 Mon Sep 17 00:00:00 2001
+From: Imre Deak <imre.deak@intel.com>
+Date: Tue, 3 Dec 2024 18:02:17 +0200
+Subject: drm/dp_mst: Fix resetting msg rx state after topology removal
+
+From: Imre Deak <imre.deak@intel.com>
+
+commit a6fa67d26de385c3c7a23c1e109a0e23bfda4ec7 upstream.
+
+If the MST topology is removed during the reception of an MST down reply
+or MST up request sideband message, the
+drm_dp_mst_topology_mgr::up_req_recv/down_rep_recv states could be reset
+from one thread via drm_dp_mst_topology_mgr_set_mst(false), racing with
+the reading/parsing of the message from another thread via
+drm_dp_mst_handle_down_rep() or drm_dp_mst_handle_up_req(). The race is
+possible since the reader/parser doesn't hold any lock while accessing
+the reception state. This in turn can lead to a memory corruption in the
+reader/parser as described by commit bd2fccac61b4 ("drm/dp_mst: Fix MST
+sideband message body length check").
+
+Fix the above by resetting the message reception state if needed before
+reading/parsing a message. Another solution would be to hold the
+drm_dp_mst_topology_mgr::lock for the whole duration of the message
+reception/parsing in drm_dp_mst_handle_down_rep() and
+drm_dp_mst_handle_up_req(), however this would require a bigger change.
+Since the fix is also needed for stable, opting for the simpler solution
+in this patch.
+
+Cc: Lyude Paul <lyude@redhat.com>
+Cc: <stable@vger.kernel.org>
+Fixes: 1d082618bbf3 ("drm/display/dp_mst: Fix down/up message handling after sink disconnect")
+Closes: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/13056
+Reviewed-by: Lyude Paul <lyude@redhat.com>
+Signed-off-by: Imre Deak <imre.deak@intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20241203160223.2926014-2-imre.deak@intel.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/display/drm_dp_mst_topology.c |   21 +++++++++++++++++++--
+ include/drm/display/drm_dp_mst_helper.h       |    7 +++++++
+ 2 files changed, 26 insertions(+), 2 deletions(-)
+
+--- a/drivers/gpu/drm/display/drm_dp_mst_topology.c
++++ b/drivers/gpu/drm/display/drm_dp_mst_topology.c
+@@ -3655,8 +3655,7 @@ int drm_dp_mst_topology_mgr_set_mst(stru
+               ret = 0;
+               mgr->payload_id_table_cleared = false;
+-              memset(&mgr->down_rep_recv, 0, sizeof(mgr->down_rep_recv));
+-              memset(&mgr->up_req_recv, 0, sizeof(mgr->up_req_recv));
++              mgr->reset_rx_state = true;
+       }
+ out_unlock:
+@@ -3784,6 +3783,11 @@ out_fail:
+ }
+ EXPORT_SYMBOL(drm_dp_mst_topology_mgr_resume);
++static void reset_msg_rx_state(struct drm_dp_sideband_msg_rx *msg)
++{
++      memset(msg, 0, sizeof(*msg));
++}
++
+ static bool
+ drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up,
+                     struct drm_dp_mst_branch **mstb)
+@@ -4097,6 +4101,17 @@ out:
+       return 0;
+ }
++static void update_msg_rx_state(struct drm_dp_mst_topology_mgr *mgr)
++{
++      mutex_lock(&mgr->lock);
++      if (mgr->reset_rx_state) {
++              mgr->reset_rx_state = false;
++              reset_msg_rx_state(&mgr->down_rep_recv);
++              reset_msg_rx_state(&mgr->up_req_recv);
++      }
++      mutex_unlock(&mgr->lock);
++}
++
+ /**
+  * drm_dp_mst_hpd_irq_handle_event() - MST hotplug IRQ handle MST event
+  * @mgr: manager to notify irq for.
+@@ -4131,6 +4146,8 @@ int drm_dp_mst_hpd_irq_handle_event(stru
+               *handled = true;
+       }
++      update_msg_rx_state(mgr);
++
+       if (esi[1] & DP_DOWN_REP_MSG_RDY) {
+               ret = drm_dp_mst_handle_down_rep(mgr);
+               *handled = true;
+--- a/include/drm/display/drm_dp_mst_helper.h
++++ b/include/drm/display/drm_dp_mst_helper.h
+@@ -690,6 +690,13 @@ struct drm_dp_mst_topology_mgr {
+       bool payload_id_table_cleared : 1;
+       /**
++       * @reset_rx_state: The down request's reply and up request message
++       * receiver state must be reset, after the topology manager got
++       * removed. Protected by @lock.
++       */
++      bool reset_rx_state : 1;
++
++      /**
+        * @payload_count: The number of currently active payloads in hardware. This value is only
+        * intended to be used internally by MST helpers for payload tracking, and is only safe to
+        * read/write from the atomic commit (not check) context.
diff --git a/queue-6.6/drm-dp_mst-verify-request-type-in-the-corresponding-down-message-reply.patch b/queue-6.6/drm-dp_mst-verify-request-type-in-the-corresponding-down-message-reply.patch
new file mode 100644 (file)
index 0000000..f64808b
--- /dev/null
@@ -0,0 +1,77 @@
+From 4d49e77a973d3b5d1881663c3f122906a0702940 Mon Sep 17 00:00:00 2001
+From: Imre Deak <imre.deak@intel.com>
+Date: Tue, 3 Dec 2024 18:02:18 +0200
+Subject: drm/dp_mst: Verify request type in the corresponding down message reply
+
+From: Imre Deak <imre.deak@intel.com>
+
+commit 4d49e77a973d3b5d1881663c3f122906a0702940 upstream.
+
+After receiving the response for an MST down request message, the
+response should be accepted/parsed only if the response type matches
+that of the request. Ensure this by checking if the request type code
+stored both in the request and the reply match, dropping the reply in
+case of a mismatch.
+
+This fixes the topology detection for an MST hub, as described in the
+Closes link below, where the hub sends an incorrect reply message after
+a CLEAR_PAYLOAD_TABLE -> LINK_ADDRESS down request message sequence.
+
+Cc: Lyude Paul <lyude@redhat.com>
+Cc: <stable@vger.kernel.org>
+Closes: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/12804
+Reviewed-by: Lyude Paul <lyude@redhat.com>
+Signed-off-by: Imre Deak <imre.deak@intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20241203160223.2926014-3-imre.deak@intel.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/display/drm_dp_mst_topology.c |   31 ++++++++++++++++++++++++++
+ 1 file changed, 31 insertions(+)
+
+--- a/drivers/gpu/drm/display/drm_dp_mst_topology.c
++++ b/drivers/gpu/drm/display/drm_dp_mst_topology.c
+@@ -3862,6 +3862,34 @@ drm_dp_get_one_sb_msg(struct drm_dp_mst_
+       return true;
+ }
++static int get_msg_request_type(u8 data)
++{
++      return data & 0x7f;
++}
++
++static bool verify_rx_request_type(struct drm_dp_mst_topology_mgr *mgr,
++                                 const struct drm_dp_sideband_msg_tx *txmsg,
++                                 const struct drm_dp_sideband_msg_rx *rxmsg)
++{
++      const struct drm_dp_sideband_msg_hdr *hdr = &rxmsg->initial_hdr;
++      const struct drm_dp_mst_branch *mstb = txmsg->dst;
++      int tx_req_type = get_msg_request_type(txmsg->msg[0]);
++      int rx_req_type = get_msg_request_type(rxmsg->msg[0]);
++      char rad_str[64];
++
++      if (tx_req_type == rx_req_type)
++              return true;
++
++      drm_dp_mst_rad_to_str(mstb->rad, mstb->lct, rad_str, sizeof(rad_str));
++      drm_dbg_kms(mgr->dev,
++                  "Got unexpected MST reply, mstb: %p seqno: %d lct: %d rad: %s rx_req_type: %s (%02x) != tx_req_type: %s (%02x)\n",
++                  mstb, hdr->seqno, mstb->lct, rad_str,
++                  drm_dp_mst_req_type_str(rx_req_type), rx_req_type,
++                  drm_dp_mst_req_type_str(tx_req_type), tx_req_type);
++
++      return false;
++}
++
+ static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)
+ {
+       struct drm_dp_sideband_msg_tx *txmsg;
+@@ -3891,6 +3919,9 @@ static int drm_dp_mst_handle_down_rep(st
+               goto out_clear_reply;
+       }
++      if (!verify_rx_request_type(mgr, txmsg, msg))
++              goto out_clear_reply;
++
+       drm_dp_sideband_parse_reply(mgr, msg, &txmsg->reply);
+       if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
diff --git a/queue-6.6/mmc-core-further-prevent-card-detect-during-shutdown.patch b/queue-6.6/mmc-core-further-prevent-card-detect-during-shutdown.patch
new file mode 100644 (file)
index 0000000..a094310
--- /dev/null
@@ -0,0 +1,56 @@
+From 87a0d90fcd31c0f36da0332428c9e1a1e0f97432 Mon Sep 17 00:00:00 2001
+From: Ulf Hansson <ulf.hansson@linaro.org>
+Date: Mon, 25 Nov 2024 13:24:46 +0100
+Subject: mmc: core: Further prevent card detect during shutdown
+
+From: Ulf Hansson <ulf.hansson@linaro.org>
+
+commit 87a0d90fcd31c0f36da0332428c9e1a1e0f97432 upstream.
+
+Disabling card detect from the host's ->shutdown_pre() callback turned out
+to not be the complete solution. More precisely, beyond the point when the
+mmc_bus->shutdown() has been called, to gracefully power off the card, we
+need to prevent card detect. Otherwise the mmc_rescan work may poll for the
+card with a CMD13, to see if it's still alive, which then will fail and
+hang as the card has already been powered off.
+
+To fix this problem, let's disable mmc_rescan prior to power off the card
+during shutdown.
+
+Reported-by: Anthony Pighin <anthony.pighin@nokia.com>
+Fixes: 66c915d09b94 ("mmc: core: Disable card detect during shutdown")
+Cc: stable@vger.kernel.org
+Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
+Reviewed-by: Adrian Hunter <adrian.hunter@intel.com>
+Closes: https://lore.kernel.org/all/BN0PR08MB695133000AF116F04C3A9FFE83212@BN0PR08MB6951.namprd08.prod.outlook.com/
+Tested-by: Anthony Pighin <anthony.pighin@nokia.com>
+Message-ID: <20241125122446.18684-1-ulf.hansson@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/mmc/core/bus.c  |    2 ++
+ drivers/mmc/core/core.c |    3 +++
+ 2 files changed, 5 insertions(+)
+
+--- a/drivers/mmc/core/bus.c
++++ b/drivers/mmc/core/bus.c
+@@ -149,6 +149,8 @@ static void mmc_bus_shutdown(struct devi
+       if (dev->driver && drv->shutdown)
+               drv->shutdown(card);
++      __mmc_stop_host(host);
++
+       if (host->bus_ops->shutdown) {
+               ret = host->bus_ops->shutdown(host);
+               if (ret)
+--- a/drivers/mmc/core/core.c
++++ b/drivers/mmc/core/core.c
+@@ -2296,6 +2296,9 @@ void mmc_start_host(struct mmc_host *hos
+ void __mmc_stop_host(struct mmc_host *host)
+ {
++      if (host->rescan_disable)
++              return;
++
+       if (host->slot.cd_irq >= 0) {
+               mmc_gpio_set_cd_wake(host, false);
+               disable_irq(host->slot.cd_irq);
diff --git a/queue-6.6/mmc-sdhci-pci-add-dmi-quirk-for-missing-cd-gpio-on-vexia-edu-atla-10-tablet.patch b/queue-6.6/mmc-sdhci-pci-add-dmi-quirk-for-missing-cd-gpio-on-vexia-edu-atla-10-tablet.patch
new file mode 100644 (file)
index 0000000..947961d
--- /dev/null
@@ -0,0 +1,155 @@
+From 7f0fa47ceebcff0e3591bb7e32a71a2cd7846149 Mon Sep 17 00:00:00 2001
+From: Hans de Goede <hdegoede@redhat.com>
+Date: Mon, 18 Nov 2024 22:00:49 +0100
+Subject: mmc: sdhci-pci: Add DMI quirk for missing CD GPIO on Vexia Edu Atla 10 tablet
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Hans de Goede <hdegoede@redhat.com>
+
+commit 7f0fa47ceebcff0e3591bb7e32a71a2cd7846149 upstream.
+
+The Vexia Edu Atla 10 tablet distributed to schools in the Spanish
+Andalucía region has no ACPI fwnode associated with the SDHCI controller
+for its microsd-slot and thus has no ACPI GPIO resource info.
+
+This causes the following error to be logged and the slot to not work:
+[   10.572113] sdhci-pci 0000:00:12.0: failed to setup card detect gpio
+
+Add a DMI quirk table for providing gpiod_lookup_tables with manually
+provided CD GPIO info and use this DMI table to provide the CD GPIO info
+on this tablet. This fixes the microsd-slot not working.
+
+Signed-off-by: Hans de Goede <hdegoede@redhat.com>
+Acked-by: Adrian Hunter <adrian.hunter@intel.com>
+Cc: stable@vger.kernel.org
+Message-ID: <20241118210049.311079-1-hdegoede@redhat.com>
+Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/mmc/host/sdhci-pci-core.c |   72 ++++++++++++++++++++++++++++++++++++++
+ drivers/mmc/host/sdhci-pci.h      |    1 
+ 2 files changed, 73 insertions(+)
+
+--- a/drivers/mmc/host/sdhci-pci-core.c
++++ b/drivers/mmc/host/sdhci-pci-core.c
+@@ -21,6 +21,7 @@
+ #include <linux/io.h>
+ #include <linux/iopoll.h>
+ #include <linux/gpio.h>
++#include <linux/gpio/machine.h>
+ #include <linux/pm_runtime.h>
+ #include <linux/pm_qos.h>
+ #include <linux/debugfs.h>
+@@ -1234,6 +1235,29 @@ static const struct sdhci_pci_fixes sdhc
+       .priv_size      = sizeof(struct intel_host),
+ };
++/* DMI quirks for devices with missing or broken CD GPIO info */
++static const struct gpiod_lookup_table vexia_edu_atla10_cd_gpios = {
++      .dev_id = "0000:00:12.0",
++      .table = {
++              GPIO_LOOKUP("INT33FC:00", 38, "cd", GPIO_ACTIVE_HIGH),
++              { }
++      },
++};
++
++static const struct dmi_system_id sdhci_intel_byt_cd_gpio_override[] = {
++      {
++              /* Vexia Edu Atla 10 tablet 9V version */
++              .matches = {
++                      DMI_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"),
++                      DMI_MATCH(DMI_BOARD_NAME, "Aptio CRB"),
++                      /* Above strings are too generic, also match on BIOS date */
++                      DMI_MATCH(DMI_BIOS_DATE, "08/25/2014"),
++              },
++              .driver_data = (void *)&vexia_edu_atla10_cd_gpios,
++      },
++      { }
++};
++
+ static const struct sdhci_pci_fixes sdhci_intel_byt_sd = {
+ #ifdef CONFIG_PM_SLEEP
+       .resume         = byt_resume,
+@@ -1252,6 +1276,7 @@ static const struct sdhci_pci_fixes sdhc
+       .add_host       = byt_add_host,
+       .remove_slot    = byt_remove_slot,
+       .ops            = &sdhci_intel_byt_ops,
++      .cd_gpio_override = sdhci_intel_byt_cd_gpio_override,
+       .priv_size      = sizeof(struct intel_host),
+ };
+@@ -2036,6 +2061,42 @@ static const struct dev_pm_ops sdhci_pci
+  *                                                                           *
+ \*****************************************************************************/
++static struct gpiod_lookup_table *sdhci_pci_add_gpio_lookup_table(
++      struct sdhci_pci_chip *chip)
++{
++      struct gpiod_lookup_table *cd_gpio_lookup_table;
++      const struct dmi_system_id *dmi_id = NULL;
++      size_t count;
++
++      if (chip->fixes && chip->fixes->cd_gpio_override)
++              dmi_id = dmi_first_match(chip->fixes->cd_gpio_override);
++
++      if (!dmi_id)
++              return NULL;
++
++      cd_gpio_lookup_table = dmi_id->driver_data;
++      for (count = 0; cd_gpio_lookup_table->table[count].key; count++)
++              ;
++
++      cd_gpio_lookup_table = kmemdup(dmi_id->driver_data,
++                                     /* count + 1 terminating entry */
++                                     struct_size(cd_gpio_lookup_table, table, count + 1),
++                                     GFP_KERNEL);
++      if (!cd_gpio_lookup_table)
++              return ERR_PTR(-ENOMEM);
++
++      gpiod_add_lookup_table(cd_gpio_lookup_table);
++      return cd_gpio_lookup_table;
++}
++
++static void sdhci_pci_remove_gpio_lookup_table(struct gpiod_lookup_table *lookup_table)
++{
++      if (lookup_table) {
++              gpiod_remove_lookup_table(lookup_table);
++              kfree(lookup_table);
++      }
++}
++
+ static struct sdhci_pci_slot *sdhci_pci_probe_slot(
+       struct pci_dev *pdev, struct sdhci_pci_chip *chip, int first_bar,
+       int slotno)
+@@ -2111,8 +2172,19 @@ static struct sdhci_pci_slot *sdhci_pci_
+               device_init_wakeup(&pdev->dev, true);
+       if (slot->cd_idx >= 0) {
++              struct gpiod_lookup_table *cd_gpio_lookup_table;
++
++              cd_gpio_lookup_table = sdhci_pci_add_gpio_lookup_table(chip);
++              if (IS_ERR(cd_gpio_lookup_table)) {
++                      ret = PTR_ERR(cd_gpio_lookup_table);
++                      goto remove;
++              }
++
+               ret = mmc_gpiod_request_cd(host->mmc, "cd", slot->cd_idx,
+                                          slot->cd_override_level, 0);
++
++              sdhci_pci_remove_gpio_lookup_table(cd_gpio_lookup_table);
++
+               if (ret && ret != -EPROBE_DEFER)
+                       ret = mmc_gpiod_request_cd(host->mmc, NULL,
+                                                  slot->cd_idx,
+--- a/drivers/mmc/host/sdhci-pci.h
++++ b/drivers/mmc/host/sdhci-pci.h
+@@ -156,6 +156,7 @@ struct sdhci_pci_fixes {
+ #endif
+       const struct sdhci_ops  *ops;
++      const struct dmi_system_id *cd_gpio_override;
+       size_t                  priv_size;
+ };
diff --git a/queue-6.6/modpost-add-.irqentry.text-to-other_sections.patch b/queue-6.6/modpost-add-.irqentry.text-to-other_sections.patch
new file mode 100644 (file)
index 0000000..10af061
--- /dev/null
@@ -0,0 +1,42 @@
+From 7912405643a14b527cd4a4f33c1d4392da900888 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Sun, 1 Dec 2024 12:17:30 +0100
+Subject: modpost: Add .irqentry.text to OTHER_SECTIONS
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+commit 7912405643a14b527cd4a4f33c1d4392da900888 upstream.
+
+The compiler can fully inline the actual handler function of an interrupt
+entry into the .irqentry.text entry point. If such a function contains an
+access which has an exception table entry, modpost complains about a
+section mismatch:
+
+  WARNING: vmlinux.o(__ex_table+0x447c): Section mismatch in reference ...
+
+  The relocation at __ex_table+0x447c references section ".irqentry.text"
+  which is not in the list of authorized sections.
+
+Add .irqentry.text to OTHER_SECTIONS to cure the issue.
+
+Reported-by: Sergey Senozhatsky <senozhatsky@chromium.org>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: stable@vger.kernel.org # needed for linux-5.4-y
+Link: https://lore.kernel.org/all/20241128111844.GE10431@google.com/
+Signed-off-by: Masahiro Yamada <masahiroy@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ scripts/mod/modpost.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/scripts/mod/modpost.c
++++ b/scripts/mod/modpost.c
+@@ -808,7 +808,7 @@ static void check_section(const char *mo
+               ".ltext", ".ltext.*"
+ #define OTHER_TEXT_SECTIONS ".ref.text", ".head.text", ".spinlock.text", \
+               ".fixup", ".entry.text", ".exception.text", \
+-              ".coldtext", ".softirqentry.text"
++              ".coldtext", ".softirqentry.text", ".irqentry.text"
+ #define ALL_TEXT_SECTIONS  ".init.text", ".exit.text", \
+               TEXT_SECTIONS, OTHER_TEXT_SECTIONS
diff --git a/queue-6.6/regmap-detach-regmap-from-dev-on-regmap_exit.patch b/queue-6.6/regmap-detach-regmap-from-dev-on-regmap_exit.patch
new file mode 100644 (file)
index 0000000..8c3d9e0
--- /dev/null
@@ -0,0 +1,59 @@
+From 3061e170381af96d1e66799d34264e6414d428a7 Mon Sep 17 00:00:00 2001
+From: Cosmin Tanislav <demonsingur@gmail.com>
+Date: Thu, 28 Nov 2024 15:16:23 +0200
+Subject: regmap: detach regmap from dev on regmap_exit
+
+From: Cosmin Tanislav <demonsingur@gmail.com>
+
+commit 3061e170381af96d1e66799d34264e6414d428a7 upstream.
+
+At the end of __regmap_init(), if dev is not NULL, regmap_attach_dev()
+is called, which adds a devres reference to the regmap, to be able to
+retrieve a dev's regmap by name using dev_get_regmap().
+
+When calling regmap_exit, the opposite does not happen, and the
+reference is kept until the dev is detached.
+
+Add a regmap_detach_dev() function and call it in regmap_exit() to make
+sure that the devres reference is not kept.
+
+Cc: stable@vger.kernel.org
+Fixes: 72b39f6f2b5a ("regmap: Implement dev_get_regmap()")
+Signed-off-by: Cosmin Tanislav <demonsingur@gmail.com>
+Rule: add
+Link: https://lore.kernel.org/stable/20241128130554.362486-1-demonsingur%40gmail.com
+Link: https://patch.msgid.link/20241128131625.363835-1-demonsingur@gmail.com
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/base/regmap/regmap.c |   12 ++++++++++++
+ 1 file changed, 12 insertions(+)
+
+--- a/drivers/base/regmap/regmap.c
++++ b/drivers/base/regmap/regmap.c
+@@ -598,6 +598,17 @@ int regmap_attach_dev(struct device *dev
+ }
+ EXPORT_SYMBOL_GPL(regmap_attach_dev);
++static int dev_get_regmap_match(struct device *dev, void *res, void *data);
++
++static int regmap_detach_dev(struct device *dev, struct regmap *map)
++{
++      if (!dev)
++              return 0;
++
++      return devres_release(dev, dev_get_regmap_release,
++                            dev_get_regmap_match, (void *)map->name);
++}
++
+ static enum regmap_endian regmap_get_reg_endian(const struct regmap_bus *bus,
+                                       const struct regmap_config *config)
+ {
+@@ -1442,6 +1453,7 @@ void regmap_exit(struct regmap *map)
+ {
+       struct regmap_async *async;
++      regmap_detach_dev(map->dev, map);
+       regcache_exit(map);
+       regmap_debugfs_exit(map);
+       regmap_range_exit(map);
index a3776f09d918d296d2a75ca512dce02571f9c39c..7ad2d9f866e6b764dc84f354d2dc2fdd4db378da 100644 (file)
@@ -174,3 +174,18 @@ fs-smb-client-implement-new-smb3-posix-type.patch
 fs-smb-client-cifs_prime_dcache-for-smb3-posix-reparse-points.patch
 smb3.1.1-fix-posix-mounts-to-older-servers.patch
 bcache-revert-replacing-is_err_or_null-with-is_err-again.patch
+cacheinfo-allocate-memory-during-cpu-hotplug-if-not-done-from-the-primary-cpu.patch
+drm-dp_mst-fix-mst-sideband-message-body-length-check.patch
+drm-dp_mst-verify-request-type-in-the-corresponding-down-message-reply.patch
+drm-dp_mst-fix-resetting-msg-rx-state-after-topology-removal.patch
+drm-amdgpu-hdp5.2-do-a-posting-read-when-flushing-hdp.patch
+modpost-add-.irqentry.text-to-other_sections.patch
+x86-kexec-restore-gdt-on-return-from-preserve_context-kexec.patch
+bpf-fix-oob-devmap-writes-when-deleting-elements.patch
+dma-buf-fix-dma_fence_array_signaled-v4.patch
+dma-fence-fix-reference-leak-on-fence-merge-failure-path.patch
+dma-fence-use-kernel-s-sort-for-merging-fences.patch
+xsk-fix-oob-map-writes-when-deleting-elements.patch
+regmap-detach-regmap-from-dev-on-regmap_exit.patch
+mmc-sdhci-pci-add-dmi-quirk-for-missing-cd-gpio-on-vexia-edu-atla-10-tablet.patch
+mmc-core-further-prevent-card-detect-during-shutdown.patch
diff --git a/queue-6.6/x86-kexec-restore-gdt-on-return-from-preserve_context-kexec.patch b/queue-6.6/x86-kexec-restore-gdt-on-return-from-preserve_context-kexec.patch
new file mode 100644 (file)
index 0000000..85159da
--- /dev/null
@@ -0,0 +1,82 @@
+From 07fa619f2a40c221ea27747a3323cabc59ab25eb Mon Sep 17 00:00:00 2001
+From: David Woodhouse <dwmw@amazon.co.uk>
+Date: Thu, 5 Dec 2024 15:05:07 +0000
+Subject: x86/kexec: Restore GDT on return from ::preserve_context kexec
+
+From: David Woodhouse <dwmw@amazon.co.uk>
+
+commit 07fa619f2a40c221ea27747a3323cabc59ab25eb upstream.
+
+The restore_processor_state() function explicitly states that "the asm code
+that gets us here will have restored a usable GDT". That wasn't true in the
+case of returning from a ::preserve_context kexec. Make it so.
+
+Without this, the kernel was depending on the called function to reload a
+GDT which is appropriate for the kernel before returning.
+
+Test program:
+
+ #include <unistd.h>
+ #include <errno.h>
+ #include <stdio.h>
+ #include <stdlib.h>
+ #include <linux/kexec.h>
+ #include <linux/reboot.h>
+ #include <sys/reboot.h>
+ #include <sys/syscall.h>
+
+ int main (void)
+ {
+        struct kexec_segment segment = {};
+       unsigned char purgatory[] = {
+               0x66, 0xba, 0xf8, 0x03, // mov $0x3f8, %dx
+               0xb0, 0x42,             // mov $0x42, %al
+               0xee,                   // outb %al, (%dx)
+               0xc3,                   // ret
+       };
+       int ret;
+
+       segment.buf = &purgatory;
+       segment.bufsz = sizeof(purgatory);
+       segment.mem = (void *)0x400000;
+       segment.memsz = 0x1000;
+       ret = syscall(__NR_kexec_load, 0x400000, 1, &segment, KEXEC_PRESERVE_CONTEXT);
+       if (ret) {
+               perror("kexec_load");
+               exit(1);
+       }
+
+       ret = syscall(__NR_reboot, LINUX_REBOOT_MAGIC1, LINUX_REBOOT_MAGIC2, LINUX_REBOOT_CMD_KEXEC);
+       if (ret) {
+               perror("kexec reboot");
+               exit(1);
+       }
+       printf("Success\n");
+       return 0;
+ }
+
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/20241205153343.3275139-2-dwmw2@infradead.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/relocate_kernel_64.S |    7 +++++++
+ 1 file changed, 7 insertions(+)
+
+--- a/arch/x86/kernel/relocate_kernel_64.S
++++ b/arch/x86/kernel/relocate_kernel_64.S
+@@ -240,6 +240,13 @@ SYM_CODE_START_LOCAL_NOALIGN(virtual_map
+       movq    CR0(%r8), %r8
+       movq    %rax, %cr3
+       movq    %r8, %cr0
++
++#ifdef CONFIG_KEXEC_JUMP
++      /* Saved in save_processor_state. */
++      movq    $saved_context, %rax
++      lgdt    saved_context_gdt_desc(%rax)
++#endif
++
+       movq    %rbp, %rax
+       popf
diff --git a/queue-6.6/xsk-fix-oob-map-writes-when-deleting-elements.patch b/queue-6.6/xsk-fix-oob-map-writes-when-deleting-elements.patch
new file mode 100644 (file)
index 0000000..8b3204a
--- /dev/null
@@ -0,0 +1,113 @@
+From 32cd3db7de97c0c7a018756ce66244342fd583f0 Mon Sep 17 00:00:00 2001
+From: Maciej Fijalkowski <maciej.fijalkowski@intel.com>
+Date: Fri, 22 Nov 2024 13:10:29 +0100
+Subject: xsk: fix OOB map writes when deleting elements
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Maciej Fijalkowski <maciej.fijalkowski@intel.com>
+
+commit 32cd3db7de97c0c7a018756ce66244342fd583f0 upstream.
+
+Jordy says:
+
+"
+In the xsk_map_delete_elem function an unsigned integer
+(map->max_entries) is compared with a user-controlled signed integer
+(k). Due to implicit type conversion, a large unsigned value for
+map->max_entries can bypass the intended bounds check:
+
+       if (k >= map->max_entries)
+               return -EINVAL;
+
+This allows k to hold a negative value (between -2147483648 and -2),
+which is then used as an array index in m->xsk_map[k], which results
+in an out-of-bounds access.
+
+       spin_lock_bh(&m->lock);
+       map_entry = &m->xsk_map[k]; // Out-of-bounds map_entry
+       old_xs = unrcu_pointer(xchg(map_entry, NULL));  // Oob write
+       if (old_xs)
+               xsk_map_sock_delete(old_xs, map_entry);
+       spin_unlock_bh(&m->lock);
+
+The xchg operation can then be used to cause an out-of-bounds write.
+Moreover, the invalid map_entry passed to xsk_map_sock_delete can lead
+to further memory corruption.
+"
+
+It indeed results in following splat:
+
+[76612.897343] BUG: unable to handle page fault for address: ffffc8fc2e461108
+[76612.904330] #PF: supervisor write access in kernel mode
+[76612.909639] #PF: error_code(0x0002) - not-present page
+[76612.914855] PGD 0 P4D 0
+[76612.917431] Oops: Oops: 0002 [#1] PREEMPT SMP
+[76612.921859] CPU: 11 UID: 0 PID: 10318 Comm: a.out Not tainted 6.12.0-rc1+ #470
+[76612.929189] Hardware name: Intel Corporation S2600WFT/S2600WFT, BIOS SE5C620.86B.02.01.0008.031920191559 03/19/2019
+[76612.939781] RIP: 0010:xsk_map_delete_elem+0x2d/0x60
+[76612.944738] Code: 00 00 41 54 55 53 48 63 2e 3b 6f 24 73 38 4c 8d a7 f8 00 00 00 48 89 fb 4c 89 e7 e8 2d bf 05 00 48 8d b4 eb 00 01 00 00 31 ff <48> 87 3e 48 85 ff 74 05 e8 16 ff ff ff 4c 89 e7 e8 3e bc 05 00 31
+[76612.963774] RSP: 0018:ffffc9002e407df8 EFLAGS: 00010246
+[76612.969079] RAX: 0000000000000000 RBX: ffffc9002e461000 RCX: 0000000000000000
+[76612.976323] RDX: 0000000000000001 RSI: ffffc8fc2e461108 RDI: 0000000000000000
+[76612.983569] RBP: ffffffff80000001 R08: 0000000000000000 R09: 0000000000000007
+[76612.990812] R10: ffffc9002e407e18 R11: ffff888108a38858 R12: ffffc9002e4610f8
+[76612.998060] R13: ffff888108a38858 R14: 00007ffd1ae0ac78 R15: ffffc9002e4610c0
+[76613.005303] FS:  00007f80b6f59740(0000) GS:ffff8897e0ec0000(0000) knlGS:0000000000000000
+[76613.013517] CS:  0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+[76613.019349] CR2: ffffc8fc2e461108 CR3: 000000011e3ef001 CR4: 00000000007726f0
+[76613.026595] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
+[76613.033841] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
+[76613.041086] PKRU: 55555554
+[76613.043842] Call Trace:
+[76613.046331]  <TASK>
+[76613.048468]  ? __die+0x20/0x60
+[76613.051581]  ? page_fault_oops+0x15a/0x450
+[76613.055747]  ? search_extable+0x22/0x30
+[76613.059649]  ? search_bpf_extables+0x5f/0x80
+[76613.063988]  ? exc_page_fault+0xa9/0x140
+[76613.067975]  ? asm_exc_page_fault+0x22/0x30
+[76613.072229]  ? xsk_map_delete_elem+0x2d/0x60
+[76613.076573]  ? xsk_map_delete_elem+0x23/0x60
+[76613.080914]  __sys_bpf+0x19b7/0x23c0
+[76613.084555]  __x64_sys_bpf+0x1a/0x20
+[76613.088194]  do_syscall_64+0x37/0xb0
+[76613.091832]  entry_SYSCALL_64_after_hwframe+0x4b/0x53
+[76613.096962] RIP: 0033:0x7f80b6d1e88d
+[76613.100592] Code: 5b 41 5c c3 66 0f 1f 84 00 00 00 00 00 f3 0f 1e fa 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 8b 0d 73 b5 0f 00 f7 d8 64 89 01 48
+[76613.119631] RSP: 002b:00007ffd1ae0ac68 EFLAGS: 00000206 ORIG_RAX: 0000000000000141
+[76613.131330] RAX: ffffffffffffffda RBX: 0000000000000000 RCX: 00007f80b6d1e88d
+[76613.142632] RDX: 0000000000000098 RSI: 00007ffd1ae0ad20 RDI: 0000000000000003
+[76613.153967] RBP: 00007ffd1ae0adc0 R08: 0000000000000000 R09: 0000000000000000
+[76613.166030] R10: 00007f80b6f77040 R11: 0000000000000206 R12: 00007ffd1ae0aed8
+[76613.177130] R13: 000055ddf42ce1e9 R14: 000055ddf42d0d98 R15: 00007f80b6fab040
+[76613.188129]  </TASK>
+
+Fix this by simply changing key type from int to u32.
+
+Fixes: fbfc504a24f5 ("bpf: introduce new bpf AF_XDP map type BPF_MAP_TYPE_XSKMAP")
+CC: stable@vger.kernel.org
+Reported-by: Jordy Zomer <jordyzomer@google.com>
+Suggested-by: Jordy Zomer <jordyzomer@google.com>
+Reviewed-by: Toke Høiland-Jørgensen <toke@redhat.com>
+Acked-by: John Fastabend <john.fastabend@gmail.com>
+Signed-off-by: Maciej Fijalkowski <maciej.fijalkowski@intel.com>
+Link: https://lore.kernel.org/r/20241122121030.716788-2-maciej.fijalkowski@intel.com
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/xdp/xskmap.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/net/xdp/xskmap.c
++++ b/net/xdp/xskmap.c
+@@ -224,7 +224,7 @@ static long xsk_map_delete_elem(struct b
+       struct xsk_map *m = container_of(map, struct xsk_map, map);
+       struct xdp_sock __rcu **map_entry;
+       struct xdp_sock *old_xs;
+-      int k = *(u32 *)key;
++      u32 k = *(u32 *)key;
+       if (k >= map->max_entries)
+               return -EINVAL;