--- /dev/null
+From 93d0fcdddc9e7be9d4f42acbe57bc90dbb0fe75d Mon Sep 17 00:00:00 2001
+From: Keith Busch <kbusch@kernel.org>
+Date: Thu, 5 Mar 2026 12:40:56 -0800
+Subject: cxl/acpi: Fix CXL_ACPI and CXL_PMEM Kconfig tristate mismatch
+
+From: Keith Busch <kbusch@kernel.org>
+
+commit 93d0fcdddc9e7be9d4f42acbe57bc90dbb0fe75d upstream.
+
+Commit e7e222ad73d9 ("cxl: Move devm_cxl_add_nvdimm_bridge() to
+cxl_pmem.ko") moves devm_cxl_add_nvdimm_bridge() into the cxl_pmem file,
+which has independent config compile options for built-in or module. The
+call from cxl_acpi_probe() is guarded by IS_ENABLED(CONFIG_CXL_PMEM),
+which evaluates to true for both =y and =m.
+
+When CONFIG_CXL_PMEM=m, a built-in cxl_acpi attempts to reference a
+symbol exported by a module, which fails to link. CXL_PMEM cannot simply
+be promoted to =y in this configuration because it depends on LIBNVDIMM,
+which may itself be =m.
+
+Add a Kconfig dependency to prevent CXL_ACPI from being built-in when
+CXL_PMEM is a module. This contrains CXL_ACPI to =m when CXL_PMEM=m,
+while still allowing CXL_ACPI to be freely configured when CXL_PMEM is
+either built-in or disabled.
+
+[ dj: Fix up commit reference formatting. ]
+
+Fixes: e7e222ad73d9 ("cxl: Move devm_cxl_add_nvdimm_bridge() to cxl_pmem.ko")
+Signed-off-by: Keith Busch <kbusch@kernel.org>
+Reviewed-by: Jonathan Cameron <jonathan.cameron@huawei.com>
+Reviewed-by: Dan Williams <dan.j.williams@intel.com>
+Link: https://patch.msgid.link/20260305204057.1516948-1-kbusch@meta.com
+Signed-off-by: Dave Jiang <dave.jiang@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/cxl/Kconfig | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/cxl/Kconfig
++++ b/drivers/cxl/Kconfig
+@@ -58,6 +58,7 @@ config CXL_ACPI
+ tristate "CXL ACPI: Platform Support"
+ depends on ACPI
+ depends on ACPI_NUMA
++ depends on CXL_PMEM || !CXL_PMEM
+ default CXL_BUS
+ select ACPI_TABLE_LIB
+ select ACPI_HMAT
--- /dev/null
+From stable+bounces-226032-greg=kroah.com@vger.kernel.org Tue Mar 17 15:36:07 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 17 Mar 2026 10:28:47 -0400
+Subject: drm/gud: fix NULL crtc dereference on display disable
+To: stable@vger.kernel.org
+Cc: Shenghao Yang <me@shenghaoyang.info>, kernel test robot <lkp@intel.com>, Dan Carpenter <dan.carpenter@linaro.org>, Thomas Zimmermann <tzimmermann@suse.de>, Ruben Wauters <rubenru09@aol.com>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260317142847.165695-2-sashal@kernel.org>
+
+From: Shenghao Yang <me@shenghaoyang.info>
+
+[ Upstream commit 7149be786da012afc6bae293d38f8c1fff1fb90d ]
+
+gud_plane_atomic_update() currently handles both crtc state and
+framebuffer updates - the complexity has led to a few accidental
+NULL pointer dereferences.
+
+Commit dc2d5ddb193e ("drm/gud: fix NULL fb and crtc dereferences
+on USB disconnect") [1] fixed an earlier dereference but planes
+can also be disabled in non-hotplug paths (e.g. display disables
+via the desktop environment). The drm_dev_enter() call would not
+cause an early return in those and subsequently oops on
+dereferencing crtc:
+
+BUG: kernel NULL pointer dereference, address: 00000000000005c8
+CPU: 6 UID: 1000 PID: 3473 Comm: kwin_wayland Not tainted 6.18.2-200.vanilla.gud.fc42.x86_64 #1 PREEMPT(lazy)
+RIP: 0010:gud_plane_atomic_update+0x148/0x470 [gud]
+ <TASK>
+ drm_atomic_helper_commit_planes+0x28e/0x310
+ drm_atomic_helper_commit_tail+0x2a/0x70
+ commit_tail+0xf1/0x150
+ drm_atomic_helper_commit+0x13c/0x180
+ drm_atomic_commit+0xb1/0xe0
+info ? __pfx___drm_printfn_info+0x10/0x10
+ drm_mode_atomic_ioctl+0x70f/0x7c0
+ ? __pfx_drm_mode_atomic_ioctl+0x10/0x10
+ drm_ioctl_kernel+0xae/0x100
+ drm_ioctl+0x2a8/0x550
+ ? __pfx_drm_mode_atomic_ioctl+0x10/0x10
+ __x64_sys_ioctl+0x97/0xe0
+ do_syscall_64+0x7e/0x7f0
+ ? __ct_user_enter+0x56/0xd0
+ ? do_syscall_64+0x158/0x7f0
+ ? __ct_user_enter+0x56/0xd0
+ ? do_syscall_64+0x158/0x7f0
+ entry_SYSCALL_64_after_hwframe+0x76/0x7e
+
+Split out crtc handling from gud_plane_atomic_update() into
+atomic_enable() and atomic_disable() functions to delegate
+crtc state transitioning work to the DRM helpers.
+
+To preserve the gud state commit sequence [2], switch to
+the runtime PM version of drm_atomic_helper_commit_tail() which
+ensures that crtcs are enabled (hence sending the
+GUD_REQ_SET_CONTROLLER_ENABLE and GUD_REQ_SET_DISPLAY_ENABLE
+requests) before a framebuffer update is sent.
+
+[1] https://lore.kernel.org/all/20251231055039.44266-1-me@shenghaoyang.info/
+[2] https://github.com/notro/gud/wiki/GUD-Protocol#display-state
+
+Reported-by: kernel test robot <lkp@intel.com>
+Reported-by: Dan Carpenter <dan.carpenter@linaro.org>
+Closes: https://lore.kernel.org/r/202601142159.0v8ilfVs-lkp@intel.com/
+Fixes: 73cfd166e045 ("drm/gud: Replace simple display pipe with DRM atomic helpers")
+Cc: <stable@vger.kernel.org> # 6.19.x
+Cc: <stable@vger.kernel.org> # 6.18.x
+Signed-off-by: Shenghao Yang <me@shenghaoyang.info>
+Reviewed-by: Thomas Zimmermann <tzimmermann@suse.de>
+Acked-by: Ruben Wauters <rubenru09@aol.com>
+Signed-off-by: Ruben Wauters <rubenru09@aol.com>
+Link: https://patch.msgid.link/20260222054551.80864-1-me@shenghaoyang.info
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/gud/gud_drv.c | 9 +++++-
+ drivers/gpu/drm/gud/gud_internal.h | 4 ++
+ drivers/gpu/drm/gud/gud_pipe.c | 54 ++++++++++++++++++++++++-------------
+ 3 files changed, 48 insertions(+), 19 deletions(-)
+
+--- a/drivers/gpu/drm/gud/gud_drv.c
++++ b/drivers/gpu/drm/gud/gud_drv.c
+@@ -339,7 +339,9 @@ static int gud_stats_debugfs(struct seq_
+ }
+
+ static const struct drm_crtc_helper_funcs gud_crtc_helper_funcs = {
+- .atomic_check = drm_crtc_helper_atomic_check
++ .atomic_check = drm_crtc_helper_atomic_check,
++ .atomic_enable = gud_crtc_atomic_enable,
++ .atomic_disable = gud_crtc_atomic_disable,
+ };
+
+ static const struct drm_crtc_funcs gud_crtc_funcs = {
+@@ -364,6 +366,10 @@ static const struct drm_plane_funcs gud_
+ DRM_GEM_SHADOW_PLANE_FUNCS,
+ };
+
++static const struct drm_mode_config_helper_funcs gud_mode_config_helpers = {
++ .atomic_commit_tail = drm_atomic_helper_commit_tail_rpm,
++};
++
+ static const struct drm_mode_config_funcs gud_mode_config_funcs = {
+ .fb_create = drm_gem_fb_create_with_dirty,
+ .atomic_check = drm_atomic_helper_check,
+@@ -499,6 +505,7 @@ static int gud_probe(struct usb_interfac
+ drm->mode_config.min_height = le32_to_cpu(desc.min_height);
+ drm->mode_config.max_height = le32_to_cpu(desc.max_height);
+ drm->mode_config.funcs = &gud_mode_config_funcs;
++ drm->mode_config.helper_private = &gud_mode_config_helpers;
+
+ /* Format init */
+ formats_dev = devm_kmalloc(dev, GUD_FORMATS_MAX_NUM, GFP_KERNEL);
+--- a/drivers/gpu/drm/gud/gud_internal.h
++++ b/drivers/gpu/drm/gud/gud_internal.h
+@@ -62,6 +62,10 @@ int gud_usb_set_u8(struct gud_device *gd
+
+ void gud_clear_damage(struct gud_device *gdrm);
+ void gud_flush_work(struct work_struct *work);
++void gud_crtc_atomic_enable(struct drm_crtc *crtc,
++ struct drm_atomic_state *state);
++void gud_crtc_atomic_disable(struct drm_crtc *crtc,
++ struct drm_atomic_state *state);
+ int gud_plane_atomic_check(struct drm_plane *plane,
+ struct drm_atomic_state *state);
+ void gud_plane_atomic_update(struct drm_plane *plane,
+--- a/drivers/gpu/drm/gud/gud_pipe.c
++++ b/drivers/gpu/drm/gud/gud_pipe.c
+@@ -580,6 +580,39 @@ out:
+ return ret;
+ }
+
++void gud_crtc_atomic_enable(struct drm_crtc *crtc,
++ struct drm_atomic_state *state)
++{
++ struct drm_device *drm = crtc->dev;
++ struct gud_device *gdrm = to_gud_device(drm);
++ int idx;
++
++ if (!drm_dev_enter(drm, &idx))
++ return;
++
++ gud_usb_set_u8(gdrm, GUD_REQ_SET_CONTROLLER_ENABLE, 1);
++ gud_usb_set(gdrm, GUD_REQ_SET_STATE_COMMIT, 0, NULL, 0);
++ gud_usb_set_u8(gdrm, GUD_REQ_SET_DISPLAY_ENABLE, 1);
++
++ drm_dev_exit(idx);
++}
++
++void gud_crtc_atomic_disable(struct drm_crtc *crtc,
++ struct drm_atomic_state *state)
++{
++ struct drm_device *drm = crtc->dev;
++ struct gud_device *gdrm = to_gud_device(drm);
++ int idx;
++
++ if (!drm_dev_enter(drm, &idx))
++ return;
++
++ gud_usb_set_u8(gdrm, GUD_REQ_SET_DISPLAY_ENABLE, 0);
++ gud_usb_set_u8(gdrm, GUD_REQ_SET_CONTROLLER_ENABLE, 0);
++
++ drm_dev_exit(idx);
++}
++
+ void gud_plane_atomic_update(struct drm_plane *plane,
+ struct drm_atomic_state *atomic_state)
+ {
+@@ -607,24 +640,12 @@ void gud_plane_atomic_update(struct drm_
+ mutex_unlock(&gdrm->damage_lock);
+ }
+
+- if (!drm_dev_enter(drm, &idx))
++ if (!crtc || !drm_dev_enter(drm, &idx))
+ return;
+
+- if (!old_state->fb)
+- gud_usb_set_u8(gdrm, GUD_REQ_SET_CONTROLLER_ENABLE, 1);
+-
+- if (fb && (crtc->state->mode_changed || crtc->state->connectors_changed))
+- gud_usb_set(gdrm, GUD_REQ_SET_STATE_COMMIT, 0, NULL, 0);
+-
+- if (crtc->state->active_changed)
+- gud_usb_set_u8(gdrm, GUD_REQ_SET_DISPLAY_ENABLE, crtc->state->active);
+-
+- if (!fb)
+- goto ctrl_disable;
+-
+ ret = drm_gem_fb_begin_cpu_access(fb, DMA_FROM_DEVICE);
+ if (ret)
+- goto ctrl_disable;
++ goto out;
+
+ drm_atomic_helper_damage_iter_init(&iter, old_state, new_state);
+ drm_atomic_for_each_plane_damage(&iter, &damage)
+@@ -632,9 +653,6 @@ void gud_plane_atomic_update(struct drm_
+
+ drm_gem_fb_end_cpu_access(fb, DMA_FROM_DEVICE);
+
+-ctrl_disable:
+- if (!crtc->state->enable)
+- gud_usb_set_u8(gdrm, GUD_REQ_SET_CONTROLLER_ENABLE, 0);
+-
++out:
+ drm_dev_exit(idx);
+ }
--- /dev/null
+From stable+bounces-226031-greg=kroah.com@vger.kernel.org Tue Mar 17 15:43:33 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 17 Mar 2026 10:28:46 -0400
+Subject: drm/gud: rearrange gud_probe() to prepare for function splitting
+To: stable@vger.kernel.org
+Cc: Ruben Wauters <rubenru09@aol.com>, Thomas Zimmermann <tzimmermann@suse.de>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260317142847.165695-1-sashal@kernel.org>
+
+From: Ruben Wauters <rubenru09@aol.com>
+
+[ Upstream commit b9e5e9d2c187b849e050d59823e8c834f78475ab ]
+
+gud_probe() is currently very large and does many things, including
+pipeline setup and feature detection, as well as having USB functions.
+
+This patch re-orders the code in gud_probe() to make it more organised
+and easier to split apart in the future.
+
+Signed-off-by: Ruben Wauters <rubenru09@aol.com>
+Reviewed-by: Thomas Zimmermann <tzimmermann@suse.de>
+Link: https://lore.kernel.org/r/20251020140147.5017-1-rubenru09@aol.com/
+Stable-dep-of: 7149be786da0 ("drm/gud: fix NULL crtc dereference on display disable")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/gud/gud_drv.c | 45 ++++++++++++++++++++++--------------------
+ 1 file changed, 24 insertions(+), 21 deletions(-)
+
+--- a/drivers/gpu/drm/gud/gud_drv.c
++++ b/drivers/gpu/drm/gud/gud_drv.c
+@@ -249,7 +249,7 @@ int gud_usb_set_u8(struct gud_device *gd
+ return gud_usb_set(gdrm, request, 0, &val, sizeof(val));
+ }
+
+-static int gud_get_properties(struct gud_device *gdrm)
++static int gud_plane_add_properties(struct gud_device *gdrm)
+ {
+ struct gud_property_req *properties;
+ unsigned int i, num_properties;
+@@ -463,10 +463,6 @@ static int gud_probe(struct usb_interfac
+ return PTR_ERR(gdrm);
+
+ drm = &gdrm->drm;
+- drm->mode_config.funcs = &gud_mode_config_funcs;
+- ret = drmm_mode_config_init(drm);
+- if (ret)
+- return ret;
+
+ gdrm->flags = le32_to_cpu(desc.flags);
+ gdrm->compression = desc.compression & GUD_COMPRESSION_LZ4;
+@@ -483,11 +479,28 @@ static int gud_probe(struct usb_interfac
+ if (ret)
+ return ret;
+
++ usb_set_intfdata(intf, gdrm);
++
++ dma_dev = usb_intf_get_dma_device(intf);
++ if (dma_dev) {
++ drm_dev_set_dma_dev(drm, dma_dev);
++ put_device(dma_dev);
++ } else {
++ dev_warn(dev, "buffer sharing not supported"); /* not an error */
++ }
++
++ /* Mode config init */
++ ret = drmm_mode_config_init(drm);
++ if (ret)
++ return ret;
++
+ drm->mode_config.min_width = le32_to_cpu(desc.min_width);
+ drm->mode_config.max_width = le32_to_cpu(desc.max_width);
+ drm->mode_config.min_height = le32_to_cpu(desc.min_height);
+ drm->mode_config.max_height = le32_to_cpu(desc.max_height);
++ drm->mode_config.funcs = &gud_mode_config_funcs;
+
++ /* Format init */
+ formats_dev = devm_kmalloc(dev, GUD_FORMATS_MAX_NUM, GFP_KERNEL);
+ /* Add room for emulated XRGB8888 */
+ formats = devm_kmalloc_array(dev, GUD_FORMATS_MAX_NUM + 1, sizeof(*formats), GFP_KERNEL);
+@@ -587,6 +600,7 @@ static int gud_probe(struct usb_interfac
+ return -ENOMEM;
+ }
+
++ /* Pipeline init */
+ ret = drm_universal_plane_init(drm, &gdrm->plane, 0,
+ &gud_plane_funcs,
+ formats, num_formats,
+@@ -598,12 +612,9 @@ static int gud_probe(struct usb_interfac
+ drm_plane_helper_add(&gdrm->plane, &gud_plane_helper_funcs);
+ drm_plane_enable_fb_damage_clips(&gdrm->plane);
+
+- devm_kfree(dev, formats);
+- devm_kfree(dev, formats_dev);
+-
+- ret = gud_get_properties(gdrm);
++ ret = gud_plane_add_properties(gdrm);
+ if (ret) {
+- dev_err(dev, "Failed to get properties (error=%d)\n", ret);
++ dev_err(dev, "Failed to add properties (error=%d)\n", ret);
+ return ret;
+ }
+
+@@ -621,16 +632,7 @@ static int gud_probe(struct usb_interfac
+ }
+
+ drm_mode_config_reset(drm);
+-
+- usb_set_intfdata(intf, gdrm);
+-
+- dma_dev = usb_intf_get_dma_device(intf);
+- if (dma_dev) {
+- drm_dev_set_dma_dev(drm, dma_dev);
+- put_device(dma_dev);
+- } else {
+- dev_warn(dev, "buffer sharing not supported"); /* not an error */
+- }
++ drm_kms_helper_poll_init(drm);
+
+ drm_debugfs_add_file(drm, "stats", gud_stats_debugfs, NULL);
+
+@@ -638,7 +640,8 @@ static int gud_probe(struct usb_interfac
+ if (ret)
+ return ret;
+
+- drm_kms_helper_poll_init(drm);
++ devm_kfree(dev, formats);
++ devm_kfree(dev, formats_dev);
+
+ drm_client_setup(drm, NULL);
+
--- /dev/null
+From f79a6d0889318dce79b1691b2e7e94b8d789fe36 Mon Sep 17 00:00:00 2001
+From: Jens Axboe <axboe@kernel.dk>
+Date: Mon, 9 Mar 2026 14:21:37 -0600
+Subject: io_uring: ensure ctx->rings is stable for task work flags manipulation
+
+From: Jens Axboe <axboe@kernel.dk>
+
+Commit 96189080265e6bb5dde3a4afbaf947af493e3f82 upstream.
+
+If DEFER_TASKRUN | SETUP_TASKRUN is used and task work is added while
+the ring is being resized, it's possible for the OR'ing of
+IORING_SQ_TASKRUN to happen in the small window of swapping into the
+new rings and the old rings being freed.
+
+Prevent this by adding a 2nd ->rings pointer, ->rings_rcu, which is
+protected by RCU. The task work flags manipulation is inside RCU
+already, and if the resize ring freeing is done post an RCU synchronize,
+then there's no need to add locking to the fast path of task work
+additions.
+
+Note: this is only done for DEFER_TASKRUN, as that's the only setup mode
+that supports ring resizing. If this ever changes, then they too need to
+use the io_ctx_mark_taskrun() helper.
+
+Link: https://lore.kernel.org/io-uring/20260309062759.482210-1-naup96721@gmail.com/
+Cc: stable@vger.kernel.org
+Fixes: 79cfe9e59c2a ("io_uring/register: add IORING_REGISTER_RESIZE_RINGS")
+Reported-by: Hao-Yu Yang <naup96721@gmail.com>
+Suggested-by: Pavel Begunkov <asml.silence@gmail.com>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/io_uring_types.h | 1 +
+ io_uring/io_uring.c | 25 ++++++++++++++++++++++---
+ io_uring/register.c | 11 +++++++++++
+ 3 files changed, 34 insertions(+), 3 deletions(-)
+
+--- a/include/linux/io_uring_types.h
++++ b/include/linux/io_uring_types.h
+@@ -372,6 +372,7 @@ struct io_ring_ctx {
+ * regularly bounce b/w CPUs.
+ */
+ struct {
++ struct io_rings __rcu *rings_rcu;
+ struct llist_head work_llist;
+ struct llist_head retry_llist;
+ unsigned long check_cq;
+--- a/io_uring/io_uring.c
++++ b/io_uring/io_uring.c
+@@ -1238,6 +1238,21 @@ void tctx_task_work(struct callback_head
+ WARN_ON_ONCE(ret);
+ }
+
++/*
++ * Sets IORING_SQ_TASKRUN in the sq_flags shared with userspace, using the
++ * RCU protected rings pointer to be safe against concurrent ring resizing.
++ */
++static void io_ctx_mark_taskrun(struct io_ring_ctx *ctx)
++{
++ lockdep_assert_in_rcu_read_lock();
++
++ if (ctx->flags & IORING_SETUP_TASKRUN_FLAG) {
++ struct io_rings *rings = rcu_dereference(ctx->rings_rcu);
++
++ atomic_or(IORING_SQ_TASKRUN, &rings->sq_flags);
++ }
++}
++
+ static void io_req_local_work_add(struct io_kiocb *req, unsigned flags)
+ {
+ struct io_ring_ctx *ctx = req->ctx;
+@@ -1292,8 +1307,7 @@ static void io_req_local_work_add(struct
+ */
+
+ if (!head) {
+- if (ctx->flags & IORING_SETUP_TASKRUN_FLAG)
+- atomic_or(IORING_SQ_TASKRUN, &ctx->rings->sq_flags);
++ io_ctx_mark_taskrun(ctx);
+ if (ctx->has_evfd)
+ io_eventfd_signal(ctx, false);
+ }
+@@ -1317,6 +1331,10 @@ static void io_req_normal_work_add(struc
+ if (!llist_add(&req->io_task_work.node, &tctx->task_list))
+ return;
+
++ /*
++ * Doesn't need to use ->rings_rcu, as resizing isn't supported for
++ * !DEFER_TASKRUN.
++ */
+ if (ctx->flags & IORING_SETUP_TASKRUN_FLAG)
+ atomic_or(IORING_SQ_TASKRUN, &ctx->rings->sq_flags);
+
+@@ -2774,6 +2792,7 @@ static void io_rings_free(struct io_ring
+ io_free_region(ctx, &ctx->sq_region);
+ io_free_region(ctx, &ctx->ring_region);
+ ctx->rings = NULL;
++ RCU_INIT_POINTER(ctx->rings_rcu, NULL);
+ ctx->sq_sqes = NULL;
+ }
+
+@@ -3627,7 +3646,7 @@ static __cold int io_allocate_scq_urings
+ if (ret)
+ return ret;
+ ctx->rings = rings = io_region_get_ptr(&ctx->ring_region);
+-
++ rcu_assign_pointer(ctx->rings_rcu, rings);
+ if (!(ctx->flags & IORING_SETUP_NO_SQARRAY))
+ ctx->sq_array = (u32 *)((char *)rings + sq_array_offset);
+
+--- a/io_uring/register.c
++++ b/io_uring/register.c
+@@ -556,7 +556,15 @@ overflow:
+ ctx->sq_entries = p.sq_entries;
+ ctx->cq_entries = p.cq_entries;
+
++ /*
++ * Just mark any flag we may have missed and that the application
++ * should act on unconditionally. Worst case it'll be an extra
++ * syscall.
++ */
++ atomic_or(IORING_SQ_TASKRUN | IORING_SQ_NEED_WAKEUP, &n.rings->sq_flags);
+ ctx->rings = n.rings;
++ rcu_assign_pointer(ctx->rings_rcu, n.rings);
++
+ ctx->sq_sqes = n.sq_sqes;
+ swap_old(ctx, o, n, ring_region);
+ swap_old(ctx, o, n, sq_region);
+@@ -565,6 +573,9 @@ overflow:
+ out:
+ spin_unlock(&ctx->completion_lock);
+ mutex_unlock(&ctx->mmap_lock);
++ /* Wait for concurrent io_ctx_mark_taskrun() */
++ if (to_free == &o)
++ synchronize_rcu_expedited();
+ io_register_free_rings(ctx, &p, to_free);
+
+ if (ctx->sq_data)
--- /dev/null
+From f39899d2c0b31a4f5c9742edce0f7eff4414833b Mon Sep 17 00:00:00 2001
+From: Jens Axboe <axboe@kernel.dk>
+Date: Mon, 9 Mar 2026 14:35:49 -0600
+Subject: io_uring/eventfd: use ctx->rings_rcu for flags checking
+
+From: Jens Axboe <axboe@kernel.dk>
+
+Commit 177c69432161f6e4bab07ccacf8a1748a6898a6b upstream.
+
+Similarly to what commit e78f7b70e837 did for local task work additions,
+use ->rings_rcu under RCU rather than dereference ->rings directly. See
+that commit for more details.
+
+Cc: stable@vger.kernel.org
+Fixes: 79cfe9e59c2a ("io_uring/register: add IORING_REGISTER_RESIZE_RINGS")
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ io_uring/eventfd.c | 10 +++++++---
+ 1 file changed, 7 insertions(+), 3 deletions(-)
+
+--- a/io_uring/eventfd.c
++++ b/io_uring/eventfd.c
+@@ -76,11 +76,15 @@ void io_eventfd_signal(struct io_ring_ct
+ {
+ bool skip = false;
+ struct io_ev_fd *ev_fd;
+-
+- if (READ_ONCE(ctx->rings->cq_flags) & IORING_CQ_EVENTFD_DISABLED)
+- return;
++ struct io_rings *rings;
+
+ guard(rcu)();
++
++ rings = rcu_dereference(ctx->rings_rcu);
++ if (!rings)
++ return;
++ if (READ_ONCE(rings->cq_flags) & IORING_CQ_EVENTFD_DISABLED)
++ return;
+ ev_fd = rcu_dereference(ctx->io_ev_fd);
+ /*
+ * Check again if ev_fd exists in case an io_eventfd_unregister call
--- /dev/null
+From stable+bounces-223714-greg=kroah.com@vger.kernel.org Mon Mar 9 18:43:58 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 9 Mar 2026 13:43:49 -0400
+Subject: kbuild: Leave objtool binary around with 'make clean'
+To: stable@vger.kernel.org
+Cc: Nathan Chancellor <nathan@kernel.org>, Michal Suchanek <msuchanek@suse.de>, Rainer Fiebig <jrf@mailbox.org>, Josh Poimboeuf <jpoimboe@kernel.org>, "Peter Zijlstra (Intel)" <peterz@infradead.org>, Nicolas Schier <nsc@kernel.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260309174349.1332818-1-sashal@kernel.org>
+
+From: Nathan Chancellor <nathan@kernel.org>
+
+[ Upstream commit fdb12c8a24a453bdd6759979b6ef1e04ebd4beb4 ]
+
+The difference between 'make clean' and 'make mrproper' is documented in
+'make help' as:
+
+ clean - Remove most generated files but keep the config and
+ enough build support to build external modules
+ mrproper - Remove all generated files + config + various backup files
+
+After commit 68b4fe32d737 ("kbuild: Add objtool to top-level clean
+target"), running 'make clean' then attempting to build an external
+module with the resulting build directory fails with
+
+ $ make ARCH=x86_64 O=build clean
+
+ $ make -C build M=... MO=...
+ ...
+ /bin/sh: line 1: .../build/tools/objtool/objtool: No such file or directory
+
+as 'make clean' removes the objtool binary.
+
+Split the objtool clean target into mrproper and clean like Kbuild does
+and remove all generated artifacts with 'make clean' except for the
+objtool binary, which is removed with 'make mrproper'. To avoid a small
+race when running the objtool clean target through both objtool_mrproper
+and objtool_clean when running 'make mrproper', modify objtool's clean
+up find command to avoid using find's '-delete' command by piping the
+files into 'xargs rm -f' like the rest of Kbuild does.
+
+Cc: stable@vger.kernel.org
+Fixes: 68b4fe32d737 ("kbuild: Add objtool to top-level clean target")
+Reported-by: Michal Suchanek <msuchanek@suse.de>
+Closes: https://lore.kernel.org/20260225112633.6123-1-msuchanek@suse.de/
+Reported-by: Rainer Fiebig <jrf@mailbox.org>
+Closes: https://lore.kernel.org/62d12399-76e5-3d40-126a-7490b4795b17@mailbox.org/
+Acked-by: Josh Poimboeuf <jpoimboe@kernel.org>
+Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Reviewed-by: Nicolas Schier <nsc@kernel.org>
+Tested-by: Nicolas Schier <nsc@kernel.org>
+Link: https://patch.msgid.link/20260227-avoid-objtool-binary-removal-clean-v1-1-122f3e55eae9@kernel.org
+Signed-off-by: Nathan Chancellor <nathan@kernel.org>
+[ Context ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ Makefile | 8 ++++----
+ tools/objtool/Makefile | 8 +++++---
+ 2 files changed, 9 insertions(+), 7 deletions(-)
+
+--- a/Makefile
++++ b/Makefile
+@@ -1441,13 +1441,13 @@ ifneq ($(wildcard $(resolve_btfids_O)),)
+ $(Q)$(MAKE) -sC $(srctree)/tools/bpf/resolve_btfids O=$(resolve_btfids_O) clean
+ endif
+
+-PHONY += objtool_clean
++PHONY += objtool_clean objtool_mrproper
+
+ objtool_O = $(abspath $(objtree))/tools/objtool
+
+-objtool_clean:
++objtool_clean objtool_mrproper:
+ ifneq ($(wildcard $(objtool_O)),)
+- $(Q)$(MAKE) -sC $(abs_srctree)/tools/objtool O=$(objtool_O) srctree=$(abs_srctree) clean
++ $(Q)$(MAKE) -sC $(abs_srctree)/tools/objtool O=$(objtool_O) srctree=$(abs_srctree) $(patsubst objtool_%,%,$@)
+ endif
+
+ tools/: FORCE
+@@ -1624,7 +1624,7 @@ PHONY += $(mrproper-dirs) mrproper
+ $(mrproper-dirs):
+ $(Q)$(MAKE) $(clean)=$(patsubst _mrproper_%,%,$@)
+
+-mrproper: clean $(mrproper-dirs)
++mrproper: clean objtool_mrproper $(mrproper-dirs)
+ $(call cmd,rmfiles)
+ @find . $(RCS_FIND_IGNORE) \
+ \( -name '*.rmeta' \) \
+--- a/tools/objtool/Makefile
++++ b/tools/objtool/Makefile
+@@ -86,10 +86,12 @@ $(LIBSUBCMD)-clean:
+ $(Q)$(RM) -r -- $(LIBSUBCMD_OUTPUT)
+
+ clean: $(LIBSUBCMD)-clean
+- $(call QUIET_CLEAN, objtool) $(RM) $(OBJTOOL)
+- $(Q)find $(OUTPUT) -name '*.o' -delete -o -name '\.*.cmd' -delete -o -name '\.*.d' -delete
++ $(Q)find $(OUTPUT) \( -name '*.o' -o -name '\.*.cmd' -o -name '\.*.d' \) -type f -print | xargs $(RM)
+ $(Q)$(RM) $(OUTPUT)arch/x86/lib/inat-tables.c $(OUTPUT)fixdep
+
++mrproper: clean
++ $(call QUIET_CLEAN, objtool) $(RM) $(OBJTOOL)
++
+ FORCE:
+
+-.PHONY: clean FORCE
++.PHONY: clean mrproper FORCE
--- /dev/null
+From stable+bounces-224559-greg=kroah.com@vger.kernel.org Tue Mar 10 20:53:50 2026
+From: Eric Biggers <ebiggers@kernel.org>
+Date: Tue, 10 Mar 2026 12:52:14 -0700
+Subject: ksmbd: Compare MACs in constant time
+To: stable@vger.kernel.org
+Cc: linux-crypto@vger.kernel.org, linux-cifs@vger.kernel.org, Eric Biggers <ebiggers@kernel.org>, Namjae Jeon <linkinjeon@kernel.org>, Steve French <stfrench@microsoft.com>
+Message-ID: <20260310195214.70843-1-ebiggers@kernel.org>
+
+From: Eric Biggers <ebiggers@kernel.org>
+
+commit c5794709bc9105935dbedef8b9cf9c06f2b559fa upstream.
+
+To prevent timing attacks, MAC comparisons need to be constant-time.
+Replace the memcmp() with the correct function, crypto_memneq().
+
+Fixes: e2f34481b24d ("cifsd: add server-side procedures for SMB3")
+Cc: stable@vger.kernel.org
+Signed-off-by: Eric Biggers <ebiggers@kernel.org>
+Acked-by: Namjae Jeon <linkinjeon@kernel.org>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/smb/server/Kconfig | 1 +
+ fs/smb/server/auth.c | 4 +++-
+ fs/smb/server/smb2pdu.c | 5 +++--
+ 3 files changed, 7 insertions(+), 3 deletions(-)
+
+--- a/fs/smb/server/Kconfig
++++ b/fs/smb/server/Kconfig
+@@ -13,6 +13,7 @@ config SMB_SERVER
+ select CRYPTO_LIB_ARC4
+ select CRYPTO_LIB_DES
+ select CRYPTO_LIB_SHA256
++ select CRYPTO_LIB_UTILS
+ select CRYPTO_SHA256
+ select CRYPTO_CMAC
+ select CRYPTO_SHA512
+--- a/fs/smb/server/auth.c
++++ b/fs/smb/server/auth.c
+@@ -13,6 +13,7 @@
+ #include <linux/xattr.h>
+ #include <crypto/hash.h>
+ #include <crypto/aead.h>
++#include <crypto/utils.h>
+ #include <linux/random.h>
+ #include <linux/scatterlist.h>
+
+@@ -283,7 +284,8 @@ int ksmbd_auth_ntlmv2(struct ksmbd_conn
+ goto out;
+ }
+
+- if (memcmp(ntlmv2->ntlmv2_hash, ntlmv2_rsp, CIFS_HMAC_MD5_HASH_SIZE) != 0)
++ if (crypto_memneq(ntlmv2->ntlmv2_hash, ntlmv2_rsp,
++ CIFS_HMAC_MD5_HASH_SIZE))
+ rc = -EINVAL;
+ out:
+ if (ctx)
+--- a/fs/smb/server/smb2pdu.c
++++ b/fs/smb/server/smb2pdu.c
+@@ -4,6 +4,7 @@
+ * Copyright (C) 2018 Samsung Electronics Co., Ltd.
+ */
+
++#include <crypto/utils.h>
+ #include <linux/inetdevice.h>
+ #include <net/addrconf.h>
+ #include <linux/syscalls.h>
+@@ -8881,7 +8882,7 @@ int smb2_check_sign_req(struct ksmbd_wor
+ signature))
+ return 0;
+
+- if (memcmp(signature, signature_req, SMB2_SIGNATURE_SIZE)) {
++ if (crypto_memneq(signature, signature_req, SMB2_SIGNATURE_SIZE)) {
+ pr_err("bad smb2 signature\n");
+ return 0;
+ }
+@@ -8969,7 +8970,7 @@ int smb3_check_sign_req(struct ksmbd_wor
+ if (ksmbd_sign_smb3_pdu(conn, signing_key, iov, 1, signature))
+ return 0;
+
+- if (memcmp(signature, signature_req, SMB2_SIGNATURE_SIZE)) {
++ if (crypto_memneq(signature, signature_req, SMB2_SIGNATURE_SIZE)) {
+ pr_err("bad smb2 signature\n");
+ return 0;
+ }
--- /dev/null
+From stable+bounces-225694-greg=kroah.com@vger.kernel.org Mon Mar 16 21:17:23 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 16 Mar 2026 16:17:16 -0400
+Subject: KVM: arm64: Eagerly init vgic dist/redist on vgic creation
+To: stable@vger.kernel.org
+Cc: Marc Zyngier <maz@kernel.org>, syzbot+f6a46b038fc243ac0175@syzkaller.appspotmail.com, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260316201716.1375450-2-sashal@kernel.org>
+
+From: Marc Zyngier <maz@kernel.org>
+
+[ Upstream commit ac6769c8f948dff33265c50e524aebf9aa6f1be0 ]
+
+If vgic_allocate_private_irqs_locked() fails for any odd reason,
+we exit kvm_vgic_create() early, leaving dist->rd_regions uninitialised.
+
+kvm_vgic_dist_destroy() then comes along and walks into the weeds
+trying to free the RDs. Got to love this stuff.
+
+Solve it by moving all the static initialisation early, and make
+sure that if we fail halfway, we're in a reasonable shape to
+perform the rest of the teardown. While at it, reset the vgic model
+on failure, just in case...
+
+Reported-by: syzbot+f6a46b038fc243ac0175@syzkaller.appspotmail.com
+Tested-by: syzbot+f6a46b038fc243ac0175@syzkaller.appspotmail.com
+Fixes: b3aa9283c0c50 ("KVM: arm64: vgic: Hoist SGI/PPI alloc from vgic_init() to kvm_create_vgic()")
+Link: https://lore.kernel.org/r/69a2d58c.050a0220.3a55be.003b.GAE@google.com
+Link: https://patch.msgid.link/20260228164559.936268-1-maz@kernel.org
+Signed-off-by: Marc Zyngier <maz@kernel.org>
+Cc: stable@vger.kernel.org
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/kvm/vgic/vgic-init.c | 32 ++++++++++++++++----------------
+ 1 file changed, 16 insertions(+), 16 deletions(-)
+
+--- a/arch/arm64/kvm/vgic/vgic-init.c
++++ b/arch/arm64/kvm/vgic/vgic-init.c
+@@ -143,6 +143,21 @@ int kvm_vgic_create(struct kvm *kvm, u32
+ kvm->arch.vgic.in_kernel = true;
+ kvm->arch.vgic.vgic_model = type;
+ kvm->arch.vgic.implementation_rev = KVM_VGIC_IMP_REV_LATEST;
++ kvm->arch.vgic.vgic_dist_base = VGIC_ADDR_UNDEF;
++
++ aa64pfr0 = kvm_read_vm_id_reg(kvm, SYS_ID_AA64PFR0_EL1) & ~ID_AA64PFR0_EL1_GIC;
++ pfr1 = kvm_read_vm_id_reg(kvm, SYS_ID_PFR1_EL1) & ~ID_PFR1_EL1_GIC;
++
++ if (type == KVM_DEV_TYPE_ARM_VGIC_V2) {
++ kvm->arch.vgic.vgic_cpu_base = VGIC_ADDR_UNDEF;
++ } else {
++ INIT_LIST_HEAD(&kvm->arch.vgic.rd_regions);
++ aa64pfr0 |= SYS_FIELD_PREP_ENUM(ID_AA64PFR0_EL1, GIC, IMP);
++ pfr1 |= SYS_FIELD_PREP_ENUM(ID_PFR1_EL1, GIC, GICv3);
++ }
++
++ kvm_set_vm_id_reg(kvm, SYS_ID_AA64PFR0_EL1, aa64pfr0);
++ kvm_set_vm_id_reg(kvm, SYS_ID_PFR1_EL1, pfr1);
+
+ kvm_for_each_vcpu(i, vcpu, kvm) {
+ ret = vgic_allocate_private_irqs_locked(vcpu, type);
+@@ -157,25 +172,10 @@ int kvm_vgic_create(struct kvm *kvm, u32
+ vgic_cpu->private_irqs = NULL;
+ }
+
++ kvm->arch.vgic.vgic_model = 0;
+ goto out_unlock;
+ }
+
+- kvm->arch.vgic.vgic_dist_base = VGIC_ADDR_UNDEF;
+-
+- aa64pfr0 = kvm_read_vm_id_reg(kvm, SYS_ID_AA64PFR0_EL1) & ~ID_AA64PFR0_EL1_GIC;
+- pfr1 = kvm_read_vm_id_reg(kvm, SYS_ID_PFR1_EL1) & ~ID_PFR1_EL1_GIC;
+-
+- if (type == KVM_DEV_TYPE_ARM_VGIC_V2) {
+- kvm->arch.vgic.vgic_cpu_base = VGIC_ADDR_UNDEF;
+- } else {
+- INIT_LIST_HEAD(&kvm->arch.vgic.rd_regions);
+- aa64pfr0 |= SYS_FIELD_PREP_ENUM(ID_AA64PFR0_EL1, GIC, IMP);
+- pfr1 |= SYS_FIELD_PREP_ENUM(ID_PFR1_EL1, GIC, GICv3);
+- }
+-
+- kvm_set_vm_id_reg(kvm, SYS_ID_AA64PFR0_EL1, aa64pfr0);
+- kvm_set_vm_id_reg(kvm, SYS_ID_PFR1_EL1, pfr1);
+-
+ if (type == KVM_DEV_TYPE_ARM_VGIC_V3)
+ kvm->arch.vgic.nassgicap = system_supports_direct_sgis();
+
--- /dev/null
+From stable+bounces-225693-greg=kroah.com@vger.kernel.org Mon Mar 16 21:17:22 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 16 Mar 2026 16:17:15 -0400
+Subject: KVM: arm64: gic: Set vgic_model before initing private IRQs
+To: stable@vger.kernel.org
+Cc: Sascha Bischoff <Sascha.Bischoff@arm.com>, Sascha Bischoff <sascha.bischoff@arm.com>, Jonathan Cameron <jonathan.cameron@huawei.com>, Marc Zyngier <maz@kernel.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260316201716.1375450-1-sashal@kernel.org>
+
+From: Sascha Bischoff <Sascha.Bischoff@arm.com>
+
+[ Upstream commit 9435c1e1431003e23aa34ef8e46c30d09c3dbcb5 ]
+
+Different GIC types require the private IRQs to be initialised
+differently. GICv5 is the culprit as it supports both a different
+number of private IRQs, and all of these are PPIs (there are no
+SGIs). Moreover, as GICv5 uses the top bits of the interrupt ID to
+encode the type, the intid also needs to computed differently.
+
+Up until now, the GIC model has been set after initialising the
+private IRQs for a VCPU. Move this earlier to ensure that the GIC
+model is available when configuring the private IRQs. While we're at
+it, also move the setting of the in_kernel flag and implementation
+revision to keep them grouped together as before.
+
+Signed-off-by: Sascha Bischoff <sascha.bischoff@arm.com>
+Reviewed-by: Jonathan Cameron <jonathan.cameron@huawei.com>
+Link: https://patch.msgid.link/20260128175919.3828384-7-sascha.bischoff@arm.com
+Signed-off-by: Marc Zyngier <maz@kernel.org>
+Stable-dep-of: ac6769c8f948 ("KVM: arm64: Eagerly init vgic dist/redist on vgic creation")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/kvm/vgic/vgic-init.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+--- a/arch/arm64/kvm/vgic/vgic-init.c
++++ b/arch/arm64/kvm/vgic/vgic-init.c
+@@ -140,6 +140,10 @@ int kvm_vgic_create(struct kvm *kvm, u32
+ goto out_unlock;
+ }
+
++ kvm->arch.vgic.in_kernel = true;
++ kvm->arch.vgic.vgic_model = type;
++ kvm->arch.vgic.implementation_rev = KVM_VGIC_IMP_REV_LATEST;
++
+ kvm_for_each_vcpu(i, vcpu, kvm) {
+ ret = vgic_allocate_private_irqs_locked(vcpu, type);
+ if (ret)
+@@ -156,10 +160,6 @@ int kvm_vgic_create(struct kvm *kvm, u32
+ goto out_unlock;
+ }
+
+- kvm->arch.vgic.in_kernel = true;
+- kvm->arch.vgic.vgic_model = type;
+- kvm->arch.vgic.implementation_rev = KVM_VGIC_IMP_REV_LATEST;
+-
+ kvm->arch.vgic.vgic_dist_base = VGIC_ADDR_UNDEF;
+
+ aa64pfr0 = kvm_read_vm_id_reg(kvm, SYS_ID_AA64PFR0_EL1) & ~ID_AA64PFR0_EL1_GIC;
--- /dev/null
+From stable+bounces-225663-greg=kroah.com@vger.kernel.org Mon Mar 16 20:15:44 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 16 Mar 2026 15:11:52 -0400
+Subject: KVM: SVM: Add a helper to look up the max physical ID for AVIC
+To: stable@vger.kernel.org
+Cc: Naveen N Rao <naveen@kernel.org>, Sean Christopherson <seanjc@google.com>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260316191153.1326996-2-sashal@kernel.org>
+
+From: Naveen N Rao <naveen@kernel.org>
+
+[ Upstream commit f2f6e67a56dc88fea7e9b10c4e79bb01d97386b7 ]
+
+To help with a future change, add a helper to look up the maximum
+physical ID depending on the vCPU AVIC mode. No functional change
+intended.
+
+Suggested-by: Sean Christopherson <seanjc@google.com>
+Signed-off-by: Naveen N Rao (AMD) <naveen@kernel.org>
+Link: https://lore.kernel.org/r/0ab9bf5e20a3463a4aa3a5ea9bbbac66beedf1d1.1757009416.git.naveen@kernel.org
+Signed-off-by: Sean Christopherson <seanjc@google.com>
+Stable-dep-of: 87d0f901a9bd ("KVM: SVM: Set/clear CR8 write interception when AVIC is (de)activated")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/svm/avic.c | 26 ++++++++++++++++++++------
+ 1 file changed, 20 insertions(+), 6 deletions(-)
+
+--- a/arch/x86/kvm/svm/avic.c
++++ b/arch/x86/kvm/svm/avic.c
+@@ -158,13 +158,31 @@ static void avic_set_x2apic_msr_intercep
+ svm->x2avic_msrs_intercepted = intercept;
+ }
+
++static u32 avic_get_max_physical_id(struct kvm_vcpu *vcpu)
++{
++ u32 arch_max;
++
++ if (x2avic_enabled && apic_x2apic_mode(vcpu->arch.apic))
++ arch_max = X2AVIC_MAX_PHYSICAL_ID;
++ else
++ arch_max = AVIC_MAX_PHYSICAL_ID;
++
++ /*
++ * Despite its name, KVM_CAP_MAX_VCPU_ID represents the maximum APIC ID
++ * plus one, so the max possible APIC ID is one less than that.
++ */
++ return min(vcpu->kvm->arch.max_vcpu_ids - 1, arch_max);
++}
++
+ static void avic_activate_vmcb(struct vcpu_svm *svm)
+ {
+ struct vmcb *vmcb = svm->vmcb01.ptr;
+- struct kvm *kvm = svm->vcpu.kvm;
++ struct kvm_vcpu *vcpu = &svm->vcpu;
+
+ vmcb->control.int_ctl &= ~(AVIC_ENABLE_MASK | X2APIC_MODE_MASK);
++
+ vmcb->control.avic_physical_id &= ~AVIC_PHYSICAL_MAX_INDEX_MASK;
++ vmcb->control.avic_physical_id |= avic_get_max_physical_id(vcpu);
+
+ vmcb->control.int_ctl |= AVIC_ENABLE_MASK;
+
+@@ -177,8 +195,7 @@ static void avic_activate_vmcb(struct vc
+ */
+ if (x2avic_enabled && apic_x2apic_mode(svm->vcpu.arch.apic)) {
+ vmcb->control.int_ctl |= X2APIC_MODE_MASK;
+- vmcb->control.avic_physical_id |= min(kvm->arch.max_vcpu_ids - 1,
+- X2AVIC_MAX_PHYSICAL_ID);
++
+ /* Disabling MSR intercept for x2APIC registers */
+ avic_set_x2apic_msr_interception(svm, false);
+ } else {
+@@ -188,9 +205,6 @@ static void avic_activate_vmcb(struct vc
+ */
+ kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, &svm->vcpu);
+
+- /* For xAVIC and hybrid-xAVIC modes */
+- vmcb->control.avic_physical_id |= min(kvm->arch.max_vcpu_ids - 1,
+- AVIC_MAX_PHYSICAL_ID);
+ /* Enabling MSR intercept for x2APIC registers */
+ avic_set_x2apic_msr_interception(svm, true);
+ }
--- /dev/null
+From stable+bounces-225662-greg=kroah.com@vger.kernel.org Mon Mar 16 20:15:40 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 16 Mar 2026 15:11:51 -0400
+Subject: KVM: SVM: Limit AVIC physical max index based on configured max_vcpu_ids
+To: stable@vger.kernel.org
+Cc: Naveen N Rao <naveen@kernel.org>, Sean Christopherson <seanjc@google.com>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260316191153.1326996-1-sashal@kernel.org>
+
+From: Naveen N Rao <naveen@kernel.org>
+
+[ Upstream commit 574ef752d4aea04134bc121294d717f4422c2755 ]
+
+KVM allows VMMs to specify the maximum possible APIC ID for a virtual
+machine through KVM_CAP_MAX_VCPU_ID capability so as to limit data
+structures related to APIC/x2APIC. Utilize the same to set the AVIC
+physical max index in the VMCB, similar to VMX. This helps hardware
+limit the number of entries to be scanned in the physical APIC ID table
+speeding up IPI broadcasts for virtual machines with smaller number of
+vCPUs.
+
+Unlike VMX, SVM AVIC requires a single page to be allocated for the
+Physical APIC ID table and the Logical APIC ID table, so retain the
+existing approach of allocating those during VM init.
+
+Signed-off-by: Naveen N Rao (AMD) <naveen@kernel.org>
+Link: https://lore.kernel.org/r/adb07ccdb3394cd79cb372ba6bcc69a4e4d4ef54.1757009416.git.naveen@kernel.org
+Signed-off-by: Sean Christopherson <seanjc@google.com>
+Stable-dep-of: 87d0f901a9bd ("KVM: SVM: Set/clear CR8 write interception when AVIC is (de)activated")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/svm/avic.c | 7 +++++--
+ 1 file changed, 5 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/kvm/svm/avic.c
++++ b/arch/x86/kvm/svm/avic.c
+@@ -161,6 +161,7 @@ static void avic_set_x2apic_msr_intercep
+ static void avic_activate_vmcb(struct vcpu_svm *svm)
+ {
+ struct vmcb *vmcb = svm->vmcb01.ptr;
++ struct kvm *kvm = svm->vcpu.kvm;
+
+ vmcb->control.int_ctl &= ~(AVIC_ENABLE_MASK | X2APIC_MODE_MASK);
+ vmcb->control.avic_physical_id &= ~AVIC_PHYSICAL_MAX_INDEX_MASK;
+@@ -176,7 +177,8 @@ static void avic_activate_vmcb(struct vc
+ */
+ if (x2avic_enabled && apic_x2apic_mode(svm->vcpu.arch.apic)) {
+ vmcb->control.int_ctl |= X2APIC_MODE_MASK;
+- vmcb->control.avic_physical_id |= X2AVIC_MAX_PHYSICAL_ID;
++ vmcb->control.avic_physical_id |= min(kvm->arch.max_vcpu_ids - 1,
++ X2AVIC_MAX_PHYSICAL_ID);
+ /* Disabling MSR intercept for x2APIC registers */
+ avic_set_x2apic_msr_interception(svm, false);
+ } else {
+@@ -187,7 +189,8 @@ static void avic_activate_vmcb(struct vc
+ kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, &svm->vcpu);
+
+ /* For xAVIC and hybrid-xAVIC modes */
+- vmcb->control.avic_physical_id |= AVIC_MAX_PHYSICAL_ID;
++ vmcb->control.avic_physical_id |= min(kvm->arch.max_vcpu_ids - 1,
++ AVIC_MAX_PHYSICAL_ID);
+ /* Enabling MSR intercept for x2APIC registers */
+ avic_set_x2apic_msr_interception(svm, true);
+ }
--- /dev/null
+From stable+bounces-225664-greg=kroah.com@vger.kernel.org Mon Mar 16 20:15:46 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 16 Mar 2026 15:11:53 -0400
+Subject: KVM: SVM: Set/clear CR8 write interception when AVIC is (de)activated
+To: stable@vger.kernel.org
+Cc: Sean Christopherson <seanjc@google.com>, Jim Mattson <jmattson@google.com>, "Naveen N Rao (AMD)" <naveen@kernel.org>, "Maciej S. Szmigiero" <maciej.szmigiero@oracle.com>, Paolo Bonzini <pbonzini@redhat.com>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260316191153.1326996-3-sashal@kernel.org>
+
+From: Sean Christopherson <seanjc@google.com>
+
+[ Upstream commit 87d0f901a9bd8ae6be57249c737f20ac0cace93d ]
+
+Explicitly set/clear CR8 write interception when AVIC is (de)activated to
+fix a bug where KVM leaves the interception enabled after AVIC is
+activated. E.g. if KVM emulates INIT=>WFS while AVIC is deactivated, CR8
+will remain intercepted in perpetuity.
+
+On its own, the dangling CR8 intercept is "just" a performance issue, but
+combined with the TPR sync bug fixed by commit d02e48830e3f ("KVM: SVM:
+Sync TPR from LAPIC into VMCB::V_TPR even if AVIC is active"), the danging
+intercept is fatal to Windows guests as the TPR seen by hardware gets
+wildly out of sync with reality.
+
+Note, VMX isn't affected by the bug as TPR_THRESHOLD is explicitly ignored
+when Virtual Interrupt Delivery is enabled, i.e. when APICv is active in
+KVM's world. I.e. there's no need to trigger update_cr8_intercept(), this
+is firmly an SVM implementation flaw/detail.
+
+WARN if KVM gets a CR8 write #VMEXIT while AVIC is active, as KVM should
+never enter the guest with AVIC enabled and CR8 writes intercepted.
+
+Fixes: 3bbf3565f48c ("svm: Do not intercept CR8 when enable AVIC")
+Cc: stable@vger.kernel.org
+Cc: Jim Mattson <jmattson@google.com>
+Cc: Naveen N Rao (AMD) <naveen@kernel.org>
+Cc: Maciej S. Szmigiero <maciej.szmigiero@oracle.com>
+Reviewed-by: Naveen N Rao (AMD) <naveen@kernel.org>
+Reviewed-by: Jim Mattson <jmattson@google.com>
+Link: https://patch.msgid.link/20260203190711.458413-3-seanjc@google.com
+Signed-off-by: Sean Christopherson <seanjc@google.com>
+[Squash fix to avic_deactivate_vmcb. - Paolo]
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/svm/avic.c | 7 +++++--
+ arch/x86/kvm/svm/svm.c | 7 ++++---
+ 2 files changed, 9 insertions(+), 5 deletions(-)
+
+--- a/arch/x86/kvm/svm/avic.c
++++ b/arch/x86/kvm/svm/avic.c
+@@ -180,12 +180,12 @@ static void avic_activate_vmcb(struct vc
+ struct kvm_vcpu *vcpu = &svm->vcpu;
+
+ vmcb->control.int_ctl &= ~(AVIC_ENABLE_MASK | X2APIC_MODE_MASK);
+-
+ vmcb->control.avic_physical_id &= ~AVIC_PHYSICAL_MAX_INDEX_MASK;
+ vmcb->control.avic_physical_id |= avic_get_max_physical_id(vcpu);
+-
+ vmcb->control.int_ctl |= AVIC_ENABLE_MASK;
+
++ svm_clr_intercept(svm, INTERCEPT_CR8_WRITE);
++
+ /*
+ * Note: KVM supports hybrid-AVIC mode, where KVM emulates x2APIC MSR
+ * accesses, while interrupt injection to a running vCPU can be
+@@ -217,6 +217,9 @@ static void avic_deactivate_vmcb(struct
+ vmcb->control.int_ctl &= ~(AVIC_ENABLE_MASK | X2APIC_MODE_MASK);
+ vmcb->control.avic_physical_id &= ~AVIC_PHYSICAL_MAX_INDEX_MASK;
+
++ if (!sev_es_guest(svm->vcpu.kvm))
++ svm_set_intercept(svm, INTERCEPT_CR8_WRITE);
++
+ /*
+ * If running nested and the guest uses its own MSR bitmap, there
+ * is no need to update L0's msr bitmap
+--- a/arch/x86/kvm/svm/svm.c
++++ b/arch/x86/kvm/svm/svm.c
+@@ -1032,8 +1032,7 @@ static void init_vmcb(struct kvm_vcpu *v
+ svm_set_intercept(svm, INTERCEPT_CR0_WRITE);
+ svm_set_intercept(svm, INTERCEPT_CR3_WRITE);
+ svm_set_intercept(svm, INTERCEPT_CR4_WRITE);
+- if (!kvm_vcpu_apicv_active(vcpu))
+- svm_set_intercept(svm, INTERCEPT_CR8_WRITE);
++ svm_set_intercept(svm, INTERCEPT_CR8_WRITE);
+
+ set_dr_intercepts(svm);
+
+@@ -2598,9 +2597,11 @@ static int dr_interception(struct kvm_vc
+
+ static int cr8_write_interception(struct kvm_vcpu *vcpu)
+ {
++ u8 cr8_prev = kvm_get_cr8(vcpu);
+ int r;
+
+- u8 cr8_prev = kvm_get_cr8(vcpu);
++ WARN_ON_ONCE(kvm_vcpu_apicv_active(vcpu));
++
+ /* instruction emulation calls kvm_set_cr8() */
+ r = cr_interception(vcpu);
+ if (lapic_in_kernel(vcpu))
--- /dev/null
+From stable+bounces-224564-greg=kroah.com@vger.kernel.org Tue Mar 10 20:57:28 2026
+From: Eric Biggers <ebiggers@kernel.org>
+Date: Tue, 10 Mar 2026 12:56:46 -0700
+Subject: lib/crypto: tests: Depend on library options rather than selecting them
+To: stable@vger.kernel.org
+Cc: linux-crypto@vger.kernel.org, Eric Biggers <ebiggers@kernel.org>, Geert Uytterhoeven <geert@linux-m68k.org>, David Gow <david@davidgow.net>, Ard Biesheuvel <ardb@kernel.org>
+Message-ID: <20260310195646.71713-1-ebiggers@kernel.org>
+
+From: Eric Biggers <ebiggers@kernel.org>
+
+commit 4478e8eeb87120c11e90041864c2233238b2155a upstream.
+
+The convention for KUnit tests is to have the test kconfig options
+visible only when the code they depend on is already enabled. This way
+only the tests that are relevant to the particular kernel build can be
+enabled, either manually or via KUNIT_ALL_TESTS.
+
+Update lib/crypto/tests/Kconfig to follow that convention, i.e. depend
+on the corresponding library options rather than selecting them. This
+fixes an issue where enabling KUNIT_ALL_TESTS enabled non-test code.
+
+This does mean that it becomes a bit more difficult to enable *all* the
+crypto library tests (which is what I do as a maintainer of the code),
+since doing so will now require enabling other options that select the
+libraries. Regardless, we should follow the standard KUnit convention.
+I'll also add a .kunitconfig file that does enable all these options.
+
+Note: currently most of the crypto library options are selected by
+visible options in crypto/Kconfig, which can be used to enable them
+without too much trouble. If in the future we end up with more cases
+like CRYPTO_LIB_CURVE25519 which is selected only by WIREGUARD (thus
+making CRYPTO_LIB_CURVE25519_KUNIT_TEST effectively depend on WIREGUARD
+after this commit), we could consider adding a new kconfig option that
+enables all the library code specifically for testing.
+
+Reported-by: Geert Uytterhoeven <geert@linux-m68k.org>
+Closes: https://lore.kernel.org/r/CAMuHMdULzMdxuTVfg8_4jdgzbzjfx-PHkcgbGSthcUx_sHRNMg@mail.gmail.com
+Fixes: 4dcf6caddaa0 ("lib/crypto: tests: Add KUnit tests for SHA-224 and SHA-256")
+Fixes: 571eaeddb67d ("lib/crypto: tests: Add KUnit tests for SHA-384 and SHA-512")
+Fixes: 6dd4d9f7919e ("lib/crypto: tests: Add KUnit tests for Poly1305")
+Fixes: 66b130607908 ("lib/crypto: tests: Add KUnit tests for SHA-1 and HMAC-SHA1")
+Fixes: d6b6aac0cdb4 ("lib/crypto: tests: Add KUnit tests for MD5 and HMAC-MD5")
+Fixes: afc4e4a5f122 ("lib/crypto: tests: Migrate Curve25519 self-test to KUnit")
+Fixes: 6401fd334ddf ("lib/crypto: tests: Add KUnit tests for BLAKE2b")
+Fixes: 15c64c47e484 ("lib/crypto: tests: Add SHA3 kunit tests")
+Fixes: b3aed551b3fc ("lib/crypto: tests: Add KUnit tests for POLYVAL")
+Fixes: ed894faccb8d ("lib/crypto: tests: Add KUnit tests for ML-DSA verification")
+Fixes: 7246fe6cd644 ("lib/crypto: tests: Add KUnit tests for NH")
+Cc: stable@vger.kernel.org
+Reviewed-by: David Gow <david@davidgow.net>
+Acked-by: Ard Biesheuvel <ardb@kernel.org>
+Link: https://lore.kernel.org/r/20260226191749.39397-1-ebiggers@kernel.org
+Signed-off-by: Eric Biggers <ebiggers@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ lib/crypto/tests/Kconfig | 20 +++++++-------------
+ 1 file changed, 7 insertions(+), 13 deletions(-)
+
+--- a/lib/crypto/tests/Kconfig
++++ b/lib/crypto/tests/Kconfig
+@@ -5,45 +5,41 @@ config CRYPTO_LIB_BLAKE2S_KUNIT_TEST
+ depends on KUNIT
+ default KUNIT_ALL_TESTS || CRYPTO_SELFTESTS
+ select CRYPTO_LIB_BENCHMARK_VISIBLE
+- # No need to select CRYPTO_LIB_BLAKE2S here, as that option doesn't
++ # No need to depend on CRYPTO_LIB_BLAKE2S here, as that option doesn't
+ # exist; the BLAKE2s code is always built-in for the /dev/random driver.
+ help
+ KUnit tests for the BLAKE2s cryptographic hash function.
+
+ config CRYPTO_LIB_CURVE25519_KUNIT_TEST
+ tristate "KUnit tests for Curve25519" if !KUNIT_ALL_TESTS
+- depends on KUNIT
++ depends on KUNIT && CRYPTO_LIB_CURVE25519
+ default KUNIT_ALL_TESTS || CRYPTO_SELFTESTS
+ select CRYPTO_LIB_BENCHMARK_VISIBLE
+- select CRYPTO_LIB_CURVE25519
+ help
+ KUnit tests for the Curve25519 Diffie-Hellman function.
+
+ config CRYPTO_LIB_MD5_KUNIT_TEST
+ tristate "KUnit tests for MD5" if !KUNIT_ALL_TESTS
+- depends on KUNIT
++ depends on KUNIT && CRYPTO_LIB_MD5
+ default KUNIT_ALL_TESTS || CRYPTO_SELFTESTS
+ select CRYPTO_LIB_BENCHMARK_VISIBLE
+- select CRYPTO_LIB_MD5
+ help
+ KUnit tests for the MD5 cryptographic hash function and its
+ corresponding HMAC.
+
+ config CRYPTO_LIB_POLY1305_KUNIT_TEST
+ tristate "KUnit tests for Poly1305" if !KUNIT_ALL_TESTS
+- depends on KUNIT
++ depends on KUNIT && CRYPTO_LIB_POLY1305
+ default KUNIT_ALL_TESTS || CRYPTO_SELFTESTS
+ select CRYPTO_LIB_BENCHMARK_VISIBLE
+- select CRYPTO_LIB_POLY1305
+ help
+ KUnit tests for the Poly1305 library functions.
+
+ config CRYPTO_LIB_SHA1_KUNIT_TEST
+ tristate "KUnit tests for SHA-1" if !KUNIT_ALL_TESTS
+- depends on KUNIT
++ depends on KUNIT && CRYPTO_LIB_SHA1
+ default KUNIT_ALL_TESTS || CRYPTO_SELFTESTS
+ select CRYPTO_LIB_BENCHMARK_VISIBLE
+- select CRYPTO_LIB_SHA1
+ help
+ KUnit tests for the SHA-1 cryptographic hash function and its
+ corresponding HMAC.
+@@ -52,10 +48,9 @@ config CRYPTO_LIB_SHA1_KUNIT_TEST
+ # included, for consistency with the naming used elsewhere (e.g. CRYPTO_SHA256).
+ config CRYPTO_LIB_SHA256_KUNIT_TEST
+ tristate "KUnit tests for SHA-224 and SHA-256" if !KUNIT_ALL_TESTS
+- depends on KUNIT
++ depends on KUNIT && CRYPTO_LIB_SHA256
+ default KUNIT_ALL_TESTS || CRYPTO_SELFTESTS
+ select CRYPTO_LIB_BENCHMARK_VISIBLE
+- select CRYPTO_LIB_SHA256
+ help
+ KUnit tests for the SHA-224 and SHA-256 cryptographic hash functions
+ and their corresponding HMACs.
+@@ -64,10 +59,9 @@ config CRYPTO_LIB_SHA256_KUNIT_TEST
+ # included, for consistency with the naming used elsewhere (e.g. CRYPTO_SHA512).
+ config CRYPTO_LIB_SHA512_KUNIT_TEST
+ tristate "KUnit tests for SHA-384 and SHA-512" if !KUNIT_ALL_TESTS
+- depends on KUNIT
++ depends on KUNIT && CRYPTO_LIB_SHA512
+ default KUNIT_ALL_TESTS || CRYPTO_SELFTESTS
+ select CRYPTO_LIB_BENCHMARK_VISIBLE
+- select CRYPTO_LIB_SHA512
+ help
+ KUnit tests for the SHA-384 and SHA-512 cryptographic hash functions
+ and their corresponding HMACs.
--- /dev/null
+From stable+bounces-226102-greg=kroah.com@vger.kernel.org Tue Mar 17 16:25:04 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 17 Mar 2026 11:24:50 -0400
+Subject: mm/damon/core: disallow non-power of two min_region_sz
+To: stable@vger.kernel.org
+Cc: SeongJae Park <sj@kernel.org>, Quanmin Yan <yanquanmin1@huawei.com>, Andrew Morton <akpm@linux-foundation.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260317152450.191424-1-sashal@kernel.org>
+
+From: SeongJae Park <sj@kernel.org>
+
+[ Upstream commit c80f46ac228b48403866d65391ad09bdf0e8562a ]
+
+DAMON core uses min_region_sz parameter value as the DAMON region
+alignment. The alignment is made using ALIGN() and ALIGN_DOWN(), which
+support only the power of two alignments. But DAMON core API callers can
+set min_region_sz to an arbitrary number. Users can also set it
+indirectly, using addr_unit.
+
+When the alignment is not properly set, DAMON behavior becomes difficult
+to expect and understand, makes it effectively broken. It doesn't cause a
+kernel crash-like significant issue, though.
+
+Fix the issue by disallowing min_region_sz input that is not a power of
+two. Add the check to damon_commit_ctx(), as all DAMON API callers who
+set min_region_sz uses the function.
+
+This can be a sort of behavioral change, but it does not break users, for
+the following reasons. As the symptom is making DAMON effectively broken,
+it is not reasonable to believe there are real use cases of non-power of
+two min_region_sz. There is no known use case or issue reports from the
+setup, either.
+
+In future, if we find real use cases of non-power of two alignments and we
+can support it with low enough overhead, we can consider moving the
+restriction. But, for now, simply disallowing the corner case should be
+good enough as a hot fix.
+
+Link: https://lkml.kernel.org/r/20260214214124.87689-1-sj@kernel.org
+Fixes: d8f867fa0825 ("mm/damon: add damon_ctx->min_sz_region")
+Signed-off-by: SeongJae Park <sj@kernel.org>
+Cc: Quanmin Yan <yanquanmin1@huawei.com>
+Cc: <stable@vger.kernel.org> [6.18+]
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+[ min_region_sz => min_sz_region ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/damon/core.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/mm/damon/core.c
++++ b/mm/damon/core.c
+@@ -1236,6 +1236,9 @@ int damon_commit_ctx(struct damon_ctx *d
+ {
+ int err;
+
++ if (!is_power_of_2(src->min_sz_region))
++ return -EINVAL;
++
+ err = damon_commit_schemes(dst, src);
+ if (err)
+ return err;
--- /dev/null
+From b570f37a2ce480be26c665345c5514686a8a0274 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Thomas=20Hellstr=C3=B6m?= <thomas.hellstrom@linux.intel.com>
+Date: Tue, 10 Feb 2026 12:56:53 +0100
+Subject: mm: Fix a hmm_range_fault() livelock / starvation problem
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Thomas Hellström <thomas.hellstrom@linux.intel.com>
+
+commit b570f37a2ce480be26c665345c5514686a8a0274 upstream.
+
+If hmm_range_fault() fails a folio_trylock() in do_swap_page,
+trying to acquire the lock of a device-private folio for migration,
+to ram, the function will spin until it succeeds grabbing the lock.
+
+However, if the process holding the lock is depending on a work
+item to be completed, which is scheduled on the same CPU as the
+spinning hmm_range_fault(), that work item might be starved and
+we end up in a livelock / starvation situation which is never
+resolved.
+
+This can happen, for example if the process holding the
+device-private folio lock is stuck in
+ migrate_device_unmap()->lru_add_drain_all()
+sinc lru_add_drain_all() requires a short work-item
+to be run on all online cpus to complete.
+
+A prerequisite for this to happen is:
+a) Both zone device and system memory folios are considered in
+ migrate_device_unmap(), so that there is a reason to call
+ lru_add_drain_all() for a system memory folio while a
+ folio lock is held on a zone device folio.
+b) The zone device folio has an initial mapcount > 1 which causes
+ at least one migration PTE entry insertion to be deferred to
+ try_to_migrate(), which can happen after the call to
+ lru_add_drain_all().
+c) No or voluntary only preemption.
+
+This all seems pretty unlikely to happen, but indeed is hit by
+the "xe_exec_system_allocator" igt test.
+
+Resolve this by waiting for the folio to be unlocked if the
+folio_trylock() fails in do_swap_page().
+
+Rename migration_entry_wait_on_locked() to
+softleaf_entry_wait_unlock() and update its documentation to
+indicate the new use-case.
+
+Future code improvements might consider moving
+the lru_add_drain_all() call in migrate_device_unmap() to be
+called *after* all pages have migration entries inserted.
+That would eliminate also b) above.
+
+v2:
+- Instead of a cond_resched() in hmm_range_fault(),
+ eliminate the problem by waiting for the folio to be unlocked
+ in do_swap_page() (Alistair Popple, Andrew Morton)
+v3:
+- Add a stub migration_entry_wait_on_locked() for the
+ !CONFIG_MIGRATION case. (Kernel Test Robot)
+v4:
+- Rename migrate_entry_wait_on_locked() to
+ softleaf_entry_wait_on_locked() and update docs (Alistair Popple)
+v5:
+- Add a WARN_ON_ONCE() for the !CONFIG_MIGRATION
+ version of softleaf_entry_wait_on_locked().
+- Modify wording around function names in the commit message
+ (Andrew Morton)
+
+Suggested-by: Alistair Popple <apopple@nvidia.com>
+Fixes: 1afaeb8293c9 ("mm/migrate: Trylock device page in do_swap_page")
+Cc: Ralph Campbell <rcampbell@nvidia.com>
+Cc: Christoph Hellwig <hch@lst.de>
+Cc: Jason Gunthorpe <jgg@mellanox.com>
+Cc: Jason Gunthorpe <jgg@ziepe.ca>
+Cc: Leon Romanovsky <leon@kernel.org>
+Cc: Andrew Morton <akpm@linux-foundation.org>
+Cc: Matthew Brost <matthew.brost@intel.com>
+Cc: John Hubbard <jhubbard@nvidia.com>
+Cc: Alistair Popple <apopple@nvidia.com>
+Cc: linux-mm@kvack.org
+Cc: <dri-devel@lists.freedesktop.org>
+Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
+Cc: <stable@vger.kernel.org> # v6.15+
+Reviewed-by: John Hubbard <jhubbard@nvidia.com> #v3
+Reviewed-by: Alistair Popple <apopple@nvidia.com>
+Link: https://patch.msgid.link/20260210115653.92413-1-thomas.hellstrom@linux.intel.com
+(cherry picked from commit a69d1ab971a624c6f112cea61536569d579c3215)
+Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
+Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/migrate.h | 8 ++++++++
+ mm/filemap.c | 13 +++++++++----
+ mm/memory.c | 3 ++-
+ 3 files changed, 19 insertions(+), 5 deletions(-)
+
+--- a/include/linux/migrate.h
++++ b/include/linux/migrate.h
+@@ -97,6 +97,14 @@ static inline int set_movable_ops(const
+ return -ENOSYS;
+ }
+
++static inline void migration_entry_wait_on_locked(swp_entry_t entry, spinlock_t *ptl)
++ __releases(ptl)
++{
++ WARN_ON_ONCE(1);
++
++ spin_unlock(ptl);
++}
++
+ #endif /* CONFIG_MIGRATION */
+
+ #ifdef CONFIG_NUMA_BALANCING
+--- a/mm/filemap.c
++++ b/mm/filemap.c
+@@ -1386,14 +1386,16 @@ repeat:
+
+ #ifdef CONFIG_MIGRATION
+ /**
+- * migration_entry_wait_on_locked - Wait for a migration entry to be removed
+- * @entry: migration swap entry.
++ * migration_entry_wait_on_locked - Wait for a migration entry or
++ * device_private entry to be removed.
++ * @entry: migration or device_private swap entry.
+ * @ptl: already locked ptl. This function will drop the lock.
+ *
+- * Wait for a migration entry referencing the given page to be removed. This is
++ * Wait for a migration entry referencing the given page, or device_private
++ * entry referencing a dvice_private page to be unlocked. This is
+ * equivalent to folio_put_wait_locked(folio, TASK_UNINTERRUPTIBLE) except
+ * this can be called without taking a reference on the page. Instead this
+- * should be called while holding the ptl for the migration entry referencing
++ * should be called while holding the ptl for @entry referencing
+ * the page.
+ *
+ * Returns after unlocking the ptl.
+@@ -1435,6 +1437,9 @@ void migration_entry_wait_on_locked(swp_
+ * If a migration entry exists for the page the migration path must hold
+ * a valid reference to the page, and it must take the ptl to remove the
+ * migration entry. So the page is valid until the ptl is dropped.
++ * Similarly any path attempting to drop the last reference to a
++ * device-private page needs to grab the ptl to remove the device-private
++ * entry.
+ */
+ spin_unlock(ptl);
+
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -4642,7 +4642,8 @@ vm_fault_t do_swap_page(struct vm_fault
+ unlock_page(vmf->page);
+ put_page(vmf->page);
+ } else {
+- pte_unmap_unlock(vmf->pte, vmf->ptl);
++ pte_unmap(vmf->pte);
++ migration_entry_wait_on_locked(entry, vmf->ptl);
+ }
+ } else if (is_hwpoison_entry(entry)) {
+ ret = VM_FAULT_HWPOISON;
--- /dev/null
+From stable+bounces-225883-greg=kroah.com@vger.kernel.org Tue Mar 17 13:08:48 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 17 Mar 2026 08:04:52 -0400
+Subject: mmc: dw_mmc-rockchip: Add memory clock auto-gating support
+To: stable@vger.kernel.org
+Cc: Shawn Lin <shawn.lin@rock-chips.com>, Ulf Hansson <ulf.hansson@linaro.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260317120453.135633-1-sashal@kernel.org>
+
+From: Shawn Lin <shawn.lin@rock-chips.com>
+
+[ Upstream commit ff6f0286c896f062853552097220dd93961be9c4 ]
+
+Per design recommendations, the memory clock can be gated when there
+is no in-flight transfer, which helps save power. This feature is
+introduced alongside internal phase support, and this patch enables it.
+
+Signed-off-by: Shawn Lin <shawn.lin@rock-chips.com>
+Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
+Stable-dep-of: 6465a8bbb0f6 ("mmc: dw_mmc-rockchip: Fix runtime PM support for internal phase support")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/mmc/host/dw_mmc-rockchip.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+--- a/drivers/mmc/host/dw_mmc-rockchip.c
++++ b/drivers/mmc/host/dw_mmc-rockchip.c
+@@ -19,6 +19,8 @@
+ #define RK3288_CLKGEN_DIV 2
+ #define SDMMC_TIMING_CON0 0x130
+ #define SDMMC_TIMING_CON1 0x134
++#define SDMMC_MISC_CON 0x138
++#define MEM_CLK_AUTOGATE_ENABLE BIT(5)
+ #define ROCKCHIP_MMC_DELAY_SEL BIT(10)
+ #define ROCKCHIP_MMC_DEGREE_MASK 0x3
+ #define ROCKCHIP_MMC_DEGREE_OFFSET 1
+@@ -470,6 +472,7 @@ static int dw_mci_rk3576_parse_dt(struct
+
+ static int dw_mci_rockchip_init(struct dw_mci *host)
+ {
++ struct dw_mci_rockchip_priv_data *priv = host->priv;
+ int ret, i;
+
+ /* It is slot 8 on Rockchip SoCs */
+@@ -494,6 +497,9 @@ static int dw_mci_rockchip_init(struct d
+ dev_warn(host->dev, "no valid minimum freq: %d\n", ret);
+ }
+
++ if (priv->internal_phase)
++ mci_writel(host, MISC_CON, MEM_CLK_AUTOGATE_ENABLE);
++
+ return 0;
+ }
+
--- /dev/null
+From stable+bounces-225884-greg=kroah.com@vger.kernel.org Tue Mar 17 13:05:00 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 17 Mar 2026 08:04:53 -0400
+Subject: mmc: dw_mmc-rockchip: Fix runtime PM support for internal phase support
+To: stable@vger.kernel.org
+Cc: Shawn Lin <shawn.lin@rock-chips.com>, Marco Schirrmeister <mschirrmeister@gmail.com>, Heiko Stuebner <heiko@sntech.de>, Ulf Hansson <ulf.hansson@linaro.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260317120453.135633-2-sashal@kernel.org>
+
+From: Shawn Lin <shawn.lin@rock-chips.com>
+
+[ Upstream commit 6465a8bbb0f6ad98aeb66dc9ea19c32c193a610b ]
+
+RK3576 is the first platform to introduce internal phase support, and
+subsequent platforms are expected to adopt a similar design. In this
+architecture, runtime suspend powers off the attached power domain, which
+resets registers, including vendor-specific ones such as SDMMC_TIMING_CON0,
+SDMMC_TIMING_CON1, and SDMMC_MISC_CON. These registers must be saved and
+restored, a requirement that falls outside the scope of the dw_mmc core.
+
+Fixes: 59903441f5e4 ("mmc: dw_mmc-rockchip: Add internal phase support")
+Signed-off-by: Shawn Lin <shawn.lin@rock-chips.com>
+Tested-by: Marco Schirrmeister <mschirrmeister@gmail.com>
+Reviewed-by: Heiko Stuebner <heiko@sntech.de>
+Cc: stable@vger.kernel.org
+Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/mmc/host/dw_mmc-rockchip.c | 38 ++++++++++++++++++++++++++++++++++++-
+ 1 file changed, 37 insertions(+), 1 deletion(-)
+
+--- a/drivers/mmc/host/dw_mmc-rockchip.c
++++ b/drivers/mmc/host/dw_mmc-rockchip.c
+@@ -36,6 +36,8 @@ struct dw_mci_rockchip_priv_data {
+ int default_sample_phase;
+ int num_phases;
+ bool internal_phase;
++ int sample_phase;
++ int drv_phase;
+ };
+
+ /*
+@@ -574,9 +576,43 @@ static void dw_mci_rockchip_remove(struc
+ dw_mci_pltfm_remove(pdev);
+ }
+
++static int dw_mci_rockchip_runtime_suspend(struct device *dev)
++{
++ struct platform_device *pdev = to_platform_device(dev);
++ struct dw_mci *host = platform_get_drvdata(pdev);
++ struct dw_mci_rockchip_priv_data *priv = host->priv;
++
++ if (priv->internal_phase) {
++ priv->sample_phase = rockchip_mmc_get_phase(host, true);
++ priv->drv_phase = rockchip_mmc_get_phase(host, false);
++ }
++
++ return dw_mci_runtime_suspend(dev);
++}
++
++static int dw_mci_rockchip_runtime_resume(struct device *dev)
++{
++ struct platform_device *pdev = to_platform_device(dev);
++ struct dw_mci *host = platform_get_drvdata(pdev);
++ struct dw_mci_rockchip_priv_data *priv = host->priv;
++ int ret;
++
++ ret = dw_mci_runtime_resume(dev);
++ if (ret)
++ return ret;
++
++ if (priv->internal_phase) {
++ rockchip_mmc_set_phase(host, true, priv->sample_phase);
++ rockchip_mmc_set_phase(host, false, priv->drv_phase);
++ mci_writel(host, MISC_CON, MEM_CLK_AUTOGATE_ENABLE);
++ }
++
++ return ret;
++}
++
+ static const struct dev_pm_ops dw_mci_rockchip_dev_pm_ops = {
+ SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume)
+- RUNTIME_PM_OPS(dw_mci_runtime_suspend, dw_mci_runtime_resume, NULL)
++ RUNTIME_PM_OPS(dw_mci_rockchip_runtime_suspend, dw_mci_rockchip_runtime_resume, NULL)
+ };
+
+ static struct platform_driver dw_mci_rockchip_pltfm_driver = {
--- /dev/null
+From stable+bounces-224565-greg=kroah.com@vger.kernel.org Tue Mar 10 21:17:02 2026
+From: Eric Biggers <ebiggers@kernel.org>
+Date: Tue, 10 Mar 2026 13:16:36 -0700
+Subject: net/tcp-md5: Fix MAC comparison to be constant-time
+To: stable@vger.kernel.org
+Cc: linux-crypto@vger.kernel.org, netdev@vger.kernel.org, Dmitry Safonov <0x7f454c46@gmail.com>, Eric Biggers <ebiggers@kernel.org>, Jakub Kicinski <kuba@kernel.org>
+Message-ID: <20260310201636.119877-1-ebiggers@kernel.org>
+
+From: Eric Biggers <ebiggers@kernel.org>
+
+commit 46d0d6f50dab706637f4c18a470aac20a21900d3 upstream.
+
+To prevent timing attacks, MACs need to be compared in constant
+time. Use the appropriate helper function for this.
+
+Fixes: cfb6eeb4c860 ("[TCP]: MD5 Signature Option (RFC2385) support.")
+Fixes: 658ddaaf6694 ("tcp: md5: RST: getting md5 key from listener")
+Cc: stable@vger.kernel.org
+Signed-off-by: Eric Biggers <ebiggers@kernel.org>
+Link: https://patch.msgid.link/20260302203409.13388-1-ebiggers@kernel.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv4/tcp.c | 3 ++-
+ net/ipv4/tcp_ipv4.c | 3 ++-
+ net/ipv6/tcp_ipv6.c | 3 ++-
+ 3 files changed, 6 insertions(+), 3 deletions(-)
+
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -244,6 +244,7 @@
+ #define pr_fmt(fmt) "TCP: " fmt
+
+ #include <crypto/hash.h>
++#include <crypto/utils.h>
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/types.h>
+@@ -4899,7 +4900,7 @@ tcp_inbound_md5_hash(const struct sock *
+ else
+ genhash = tp->af_specific->calc_md5_hash(newhash, key,
+ NULL, skb);
+- if (genhash || memcmp(hash_location, newhash, 16) != 0) {
++ if (genhash || crypto_memneq(hash_location, newhash, 16)) {
+ NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5FAILURE);
+ trace_tcp_hash_md5_mismatch(sk, skb);
+ return SKB_DROP_REASON_TCP_MD5FAILURE;
+--- a/net/ipv4/tcp_ipv4.c
++++ b/net/ipv4/tcp_ipv4.c
+@@ -87,6 +87,7 @@
+ #include <linux/skbuff_ref.h>
+
+ #include <crypto/hash.h>
++#include <crypto/utils.h>
+ #include <linux/scatterlist.h>
+
+ #include <trace/events/tcp.h>
+@@ -840,7 +841,7 @@ static void tcp_v4_send_reset(const stru
+
+
+ genhash = tcp_v4_md5_hash_skb(newhash, key, NULL, skb);
+- if (genhash || memcmp(md5_hash_location, newhash, 16) != 0)
++ if (genhash || crypto_memneq(md5_hash_location, newhash, 16))
+ goto out;
+
+ }
+--- a/net/ipv6/tcp_ipv6.c
++++ b/net/ipv6/tcp_ipv6.c
+@@ -68,6 +68,7 @@
+ #include <linux/seq_file.h>
+
+ #include <crypto/hash.h>
++#include <crypto/utils.h>
+ #include <linux/scatterlist.h>
+
+ #include <trace/events/tcp.h>
+@@ -1089,7 +1090,7 @@ static void tcp_v6_send_reset(const stru
+ key.type = TCP_KEY_MD5;
+
+ genhash = tcp_v6_md5_hash_skb(newhash, key.md5_key, NULL, skb);
+- if (genhash || memcmp(md5_hash_location, newhash, 16) != 0)
++ if (genhash || crypto_memneq(md5_hash_location, newhash, 16))
+ goto out;
+ }
+ #endif
i3c-mipi-i3c-hci-add-missing-tid-field-to-no-op-command-descriptor.patch
i3c-mipi-i3c-hci-fix-race-in-dma-ring-dequeue.patch
i3c-mipi-i3c-hci-correct-ring_ctrl_abort-handling-in-dma-dequeue.patch
+cxl-acpi-fix-cxl_acpi-and-cxl_pmem-kconfig-tristate-mismatch.patch
+mm-fix-a-hmm_range_fault-livelock-starvation-problem.patch
+drm-gud-rearrange-gud_probe-to-prepare-for-function-splitting.patch
+drm-gud-fix-null-crtc-dereference-on-display-disable.patch
+mmc-dw_mmc-rockchip-add-memory-clock-auto-gating-support.patch
+mmc-dw_mmc-rockchip-fix-runtime-pm-support-for-internal-phase-support.patch
+kvm-arm64-gic-set-vgic_model-before-initing-private-irqs.patch
+kvm-arm64-eagerly-init-vgic-dist-redist-on-vgic-creation.patch
+kvm-svm-limit-avic-physical-max-index-based-on-configured-max_vcpu_ids.patch
+kvm-svm-add-a-helper-to-look-up-the-max-physical-id-for-avic.patch
+kvm-svm-set-clear-cr8-write-interception-when-avic-is-de-activated.patch
+kbuild-leave-objtool-binary-around-with-make-clean.patch
+smb-client-compare-macs-in-constant-time.patch
+ksmbd-compare-macs-in-constant-time.patch
+lib-crypto-tests-depend-on-library-options-rather-than-selecting-them.patch
+net-tcp-md5-fix-mac-comparison-to-be-constant-time.patch
+io_uring-ensure-ctx-rings-is-stable-for-task-work-flags-manipulation.patch
+io_uring-eventfd-use-ctx-rings_rcu-for-flags-checking.patch
+mm-damon-core-disallow-non-power-of-two-min_region_sz.patch
--- /dev/null
+From stable+bounces-224553-greg=kroah.com@vger.kernel.org Tue Mar 10 20:50:53 2026
+From: Eric Biggers <ebiggers@kernel.org>
+Date: Tue, 10 Mar 2026 12:50:20 -0700
+Subject: smb: client: Compare MACs in constant time
+To: stable@vger.kernel.org
+Cc: linux-crypto@vger.kernel.org, linux-cifs@vger.kernel.org, Eric Biggers <ebiggers@kernel.org>, "Paulo Alcantara (Red Hat)" <pc@manguebit.org>, Steve French <stfrench@microsoft.com>
+Message-ID: <20260310195020.70563-1-ebiggers@kernel.org>
+
+From: Eric Biggers <ebiggers@kernel.org>
+
+commit 26bc83b88bbbf054f0980a4a42047a8d1e210e4c upstream.
+
+To prevent timing attacks, MAC comparisons need to be constant-time.
+Replace the memcmp() with the correct function, crypto_memneq().
+
+Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
+Cc: stable@vger.kernel.org
+Acked-by: Paulo Alcantara (Red Hat) <pc@manguebit.org>
+Signed-off-by: Eric Biggers <ebiggers@kernel.org>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/smb/client/cifsencrypt.c | 3 ++-
+ fs/smb/client/smb2transport.c | 4 +++-
+ 2 files changed, 5 insertions(+), 2 deletions(-)
+
+--- a/fs/smb/client/cifsencrypt.c
++++ b/fs/smb/client/cifsencrypt.c
+@@ -26,6 +26,7 @@
+ #include <crypto/arc4.h>
+ #include <crypto/md5.h>
+ #include <crypto/sha2.h>
++#include <crypto/utils.h>
+
+ static int cifs_sig_update(struct cifs_calc_sig_ctx *ctx,
+ const u8 *data, size_t len)
+@@ -277,7 +278,7 @@ int cifs_verify_signature(struct smb_rqs
+ /* cifs_dump_mem("what we think it should be: ",
+ what_we_think_sig_should_be, 16); */
+
+- if (memcmp(server_response_sig, what_we_think_sig_should_be, 8))
++ if (crypto_memneq(server_response_sig, what_we_think_sig_should_be, 8))
+ return -EACCES;
+ else
+ return 0;
+--- a/fs/smb/client/smb2transport.c
++++ b/fs/smb/client/smb2transport.c
+@@ -20,6 +20,7 @@
+ #include <linux/highmem.h>
+ #include <crypto/aead.h>
+ #include <crypto/sha2.h>
++#include <crypto/utils.h>
+ #include "cifsglob.h"
+ #include "cifsproto.h"
+ #include "smb2proto.h"
+@@ -617,7 +618,8 @@ smb2_verify_signature(struct smb_rqst *r
+ if (rc)
+ return rc;
+
+- if (memcmp(server_response_sig, shdr->Signature, SMB2_SIGNATURE_SIZE)) {
++ if (crypto_memneq(server_response_sig, shdr->Signature,
++ SMB2_SIGNATURE_SIZE)) {
+ cifs_dbg(VFS, "sign fail cmd 0x%x message id 0x%llx\n",
+ shdr->Command, shdr->MessageId);
+ return -EACCES;