--- /dev/null
+From stable+bounces-223504-greg=kroah.com@vger.kernel.org Mon Mar 9 08:18:20 2026
+From: Johnny Hao <johnny_haocn@sina.com>
+Date: Mon, 9 Mar 2026 15:17:15 +0800
+Subject: dlm: fix possible lkb_resource null dereference
+To: gregkh@linuxfoundation.org, stable@vger.kernel.org
+Cc: linux-kernel@vger.kernel.org, Alexander Aring <aahringo@redhat.com>, David Teigland <teigland@redhat.com>, Johnny Hao <johnny_haocn@sina.com>
+Message-ID: <20260309071715.2380423-1-johnny_haocn@sina.com>
+
+From: Alexander Aring <aahringo@redhat.com>
+
+[ Upstream commit b98333c67daf887c724cd692e88e2db9418c0861 ]
+
+This patch fixes a possible null pointer dereference when this function is
+called from request_lock() as lkb->lkb_resource is not assigned yet,
+only after validate_lock_args() by calling attach_lkb(). Another issue
+is that a resource name could be a non printable bytearray and we cannot
+assume to be ASCII coded.
+
+The log functionality is probably never being hit when DLM is used in
+normal way and no debug logging is enabled. The null pointer dereference
+can only occur on a new created lkb that does not have the resource
+assigned yet, it probably never hits the null pointer dereference but we
+should be sure that other changes might not change this behaviour and we
+actually can hit the mentioned null pointer dereference.
+
+In this patch we just drop the printout of the resource name, the lkb id
+is enough to make a possible connection to a resource name if this
+exists.
+
+Signed-off-by: Alexander Aring <aahringo@redhat.com>
+Signed-off-by: David Teigland <teigland@redhat.com>
+[ The context change is due to the commit e1af8728f600
+("fs: dlm: move internal flags to atomic ops") in v6.4
+which is irrelevant to the logic of this patch. ]
+Signed-off-by: Johnny Hao <johnny_haocn@sina.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/dlm/lock.c | 10 ++++------
+ 1 file changed, 4 insertions(+), 6 deletions(-)
+
+--- a/fs/dlm/lock.c
++++ b/fs/dlm/lock.c
+@@ -2908,16 +2908,14 @@ static int validate_lock_args(struct dlm
+ case -EINVAL:
+ /* annoy the user because dlm usage is wrong */
+ WARN_ON(1);
+- log_error(ls, "%s %d %x %x %x %d %d %s", __func__,
++ log_error(ls, "%s %d %x %x %x %d %d", __func__,
+ rv, lkb->lkb_id, lkb->lkb_flags, args->flags,
+- lkb->lkb_status, lkb->lkb_wait_type,
+- lkb->lkb_resource->res_name);
++ lkb->lkb_status, lkb->lkb_wait_type);
+ break;
+ default:
+- log_debug(ls, "%s %d %x %x %x %d %d %s", __func__,
++ log_debug(ls, "%s %d %x %x %x %d %d", __func__,
+ rv, lkb->lkb_id, lkb->lkb_flags, args->flags,
+- lkb->lkb_status, lkb->lkb_wait_type,
+- lkb->lkb_resource->res_name);
++ lkb->lkb_status, lkb->lkb_wait_type);
+ break;
+ }
+
--- /dev/null
+From a143545855bc2c6e1330f6f57ae375ac44af00a7 Mon Sep 17 00:00:00 2001
+From: Guodong Xu <guodong@riscstar.com>
+Date: Tue, 16 Dec 2025 22:10:06 +0800
+Subject: dmaengine: mmp_pdma: Fix race condition in mmp_pdma_residue()
+
+From: Guodong Xu <guodong@riscstar.com>
+
+commit a143545855bc2c6e1330f6f57ae375ac44af00a7 upstream.
+
+Add proper locking in mmp_pdma_residue() to prevent use-after-free when
+accessing descriptor list and descriptor contents.
+
+The race occurs when multiple threads call tx_status() while the tasklet
+on another CPU is freeing completed descriptors:
+
+CPU 0 CPU 1
+----- -----
+mmp_pdma_tx_status()
+mmp_pdma_residue()
+ -> NO LOCK held
+ list_for_each_entry(sw, ..)
+ DMA interrupt
+ dma_do_tasklet()
+ -> spin_lock(&desc_lock)
+ list_move(sw->node, ...)
+ spin_unlock(&desc_lock)
+ | dma_pool_free(sw) <- FREED!
+ -> access sw->desc <- UAF!
+
+This issue can be reproduced when running dmatest on the same channel with
+multiple threads (threads_per_chan > 1).
+
+Fix by protecting the chain_running list iteration and descriptor access
+with the chan->desc_lock spinlock.
+
+Signed-off-by: Juan Li <lijuan@linux.spacemit.com>
+Signed-off-by: Guodong Xu <guodong@riscstar.com>
+Link: https://patch.msgid.link/20251216-mmp-pdma-race-v1-1-976a224bb622@riscstar.com
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+[ Minor context conflict resolved. ]
+Signed-off-by: Wenshan Lan <jetlan9@163.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/dma/mmp_pdma.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+--- a/drivers/dma/mmp_pdma.c
++++ b/drivers/dma/mmp_pdma.c
+@@ -764,6 +764,7 @@ static unsigned int mmp_pdma_residue(str
+ {
+ struct mmp_pdma_desc_sw *sw;
+ u32 curr, residue = 0;
++ unsigned long flags;
+ bool passed = false;
+ bool cyclic = chan->cyclic_first != NULL;
+
+@@ -779,6 +780,8 @@ static unsigned int mmp_pdma_residue(str
+ else
+ curr = readl(chan->phy->base + DSADR(chan->phy->idx));
+
++ spin_lock_irqsave(&chan->desc_lock, flags);
++
+ list_for_each_entry(sw, &chan->chain_running, node) {
+ u32 start, end, len;
+
+@@ -822,6 +825,7 @@ static unsigned int mmp_pdma_residue(str
+ continue;
+
+ if (sw->async_tx.cookie == cookie) {
++ spin_unlock_irqrestore(&chan->desc_lock, flags);
+ return residue;
+ } else {
+ residue = 0;
+@@ -829,6 +833,8 @@ static unsigned int mmp_pdma_residue(str
+ }
+ }
+
++ spin_unlock_irqrestore(&chan->desc_lock, flags);
++
+ /* We should only get here in case of cyclic transactions */
+ return residue;
+ }
--- /dev/null
+From rosenp@gmail.com Sat Mar 21 06:45:19 2026
+From: Rosen Penev <rosenp@gmail.com>
+Date: Fri, 20 Mar 2026 22:44:52 -0700
+Subject: drm/amd/display: Add pixel_clock to amd_pp_display_configuration
+To: stable@vger.kernel.org
+Cc: "Alex Deucher" <alexander.deucher@amd.com>, "Christian König" <christian.koenig@amd.com>, "Pan, Xinhui" <Xinhui.Pan@amd.com>, "David Airlie" <airlied@gmail.com>, "Daniel Vetter" <daniel@ffwll.ch>, "Harry Wentland" <harry.wentland@amd.com>, "Leo Li" <sunpeng.li@amd.com>, "Rodrigo Siqueira" <Rodrigo.Siqueira@amd.com>, "Evan Quan" <evan.quan@amd.com>, "Mario Limonciello" <mario.limonciello@amd.com>, "Sasha Levin" <sashal@kernel.org>, "Rosen Penev" <rosenp@gmail.com>, "Lijo Lazar" <lijo.lazar@amd.com>, "Ma Jun" <Jun.Ma2@amd.com>, "Greg Kroah-Hartman" <gregkh@linuxfoundation.org>, "Srinivasan Shanmugam" <srinivasan.shanmugam@amd.com>, "Mario Limonciello (AMD)" <superm1@kernel.org>, "Zhigang Luo" <Zhigang.Luo@amd.com>, "Bert Karwatzki" <spasswolf@web.de>, "Ray Wu" <ray.wu@amd.com>, "Wayne Lin" <Wayne.Lin@amd.com>, "Roman Li" <Roman.Li@amd.com>, "Hersen Wu" <hersenxs.wu@amd.com>, "Timur Kristóf" <timur.kristof@gmail.com>, "Alex Hung" <alex.hung@amd.com>, decce6 <decce6@proton.me>, "Wentao Liang" <vulab@iscas.ac.cn>, amd-gfx@lists.freedesktop.org (open list:RADEON and AMDGPU DRM DRIVERS), dri-devel@lists.freedesktop.org (open list:DRM DRIVERS), linux-kernel@vger.kernel.org (open list)
+Message-ID: <20260321054453.19683-4-rosenp@gmail.com>
+
+From: Timur Kristóf <timur.kristof@gmail.com>
+
+[ Upstream commit b515dcb0dc4e85d8254f5459cfb32fce88dacbfb ]
+
+This commit adds the pixel_clock field to the display config
+struct so that power management (DPM) can use it.
+
+We currently don't have a proper bandwidth calculation on old
+GPUs with DCE 6-10 because dce_calcs only supports DCE 11+.
+So the power management (DPM) on these GPUs may need to make
+ad-hoc decisions for display based on the pixel clock.
+
+Also rename sym_clock to pixel_clock in dm_pp_single_disp_config
+to avoid confusion with other code where the sym_clock refers to
+the DisplayPort symbol clock.
+
+Signed-off-by: Timur Kristóf <timur.kristof@gmail.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Rosen Penev <rosenp@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c | 1 +
+ drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c | 2 +-
+ drivers/gpu/drm/amd/display/dc/dm_services_types.h | 2 +-
+ drivers/gpu/drm/amd/include/dm_pp_interface.h | 1 +
+ 4 files changed, 4 insertions(+), 2 deletions(-)
+
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
+@@ -97,6 +97,7 @@ bool dm_pp_apply_display_requirements(
+ const struct dm_pp_single_disp_config *dc_cfg =
+ &pp_display_cfg->disp_configs[i];
+ adev->pm.pm_display_cfg.displays[i].controller_id = dc_cfg->pipe_idx + 1;
++ adev->pm.pm_display_cfg.displays[i].pixel_clock = dc_cfg->pixel_clock;
+ }
+
+ amdgpu_dpm_display_configuration_change(adev, &adev->pm.pm_display_cfg);
+--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c
++++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c
+@@ -164,7 +164,7 @@ void dce110_fill_display_configs(
+ stream->link->cur_link_settings.link_rate;
+ cfg->link_settings.link_spread =
+ stream->link->cur_link_settings.link_spread;
+- cfg->sym_clock = stream->phy_pix_clk;
++ cfg->pixel_clock = stream->phy_pix_clk;
+ /* Round v_refresh*/
+ cfg->v_refresh = stream->timing.pix_clk_100hz * 100;
+ cfg->v_refresh /= stream->timing.h_total;
+--- a/drivers/gpu/drm/amd/display/dc/dm_services_types.h
++++ b/drivers/gpu/drm/amd/display/dc/dm_services_types.h
+@@ -127,7 +127,7 @@ struct dm_pp_single_disp_config {
+ uint32_t src_height;
+ uint32_t src_width;
+ uint32_t v_refresh;
+- uint32_t sym_clock; /* HDMI only */
++ uint32_t pixel_clock; /* Pixel clock in KHz (for HDMI only: normalized) */
+ struct dc_link_settings link_settings; /* DP only */
+ };
+
+--- a/drivers/gpu/drm/amd/include/dm_pp_interface.h
++++ b/drivers/gpu/drm/amd/include/dm_pp_interface.h
+@@ -66,6 +66,7 @@ struct single_display_configuration
+ uint32_t view_resolution_cy;
+ enum amd_pp_display_config_type displayconfigtype;
+ uint32_t vertical_refresh; /* for active display */
++ uint32_t pixel_clock; /* Pixel clock in KHz (for HDMI only: normalized) */
+ };
+
+ #define MAX_NUM_DISPLAY 32
--- /dev/null
+From rosenp@gmail.com Sat Mar 21 06:45:21 2026
+From: Rosen Penev <rosenp@gmail.com>
+Date: Fri, 20 Mar 2026 22:44:53 -0700
+Subject: drm/amd/pm: Use pm_display_cfg in legacy DPM (v2)
+To: stable@vger.kernel.org
+Cc: "Alex Deucher" <alexander.deucher@amd.com>, "Christian König" <christian.koenig@amd.com>, "Pan, Xinhui" <Xinhui.Pan@amd.com>, "David Airlie" <airlied@gmail.com>, "Daniel Vetter" <daniel@ffwll.ch>, "Harry Wentland" <harry.wentland@amd.com>, "Leo Li" <sunpeng.li@amd.com>, "Rodrigo Siqueira" <Rodrigo.Siqueira@amd.com>, "Evan Quan" <evan.quan@amd.com>, "Mario Limonciello" <mario.limonciello@amd.com>, "Sasha Levin" <sashal@kernel.org>, "Rosen Penev" <rosenp@gmail.com>, "Lijo Lazar" <lijo.lazar@amd.com>, "Ma Jun" <Jun.Ma2@amd.com>, "Greg Kroah-Hartman" <gregkh@linuxfoundation.org>, "Srinivasan Shanmugam" <srinivasan.shanmugam@amd.com>, "Mario Limonciello (AMD)" <superm1@kernel.org>, "Zhigang Luo" <Zhigang.Luo@amd.com>, "Bert Karwatzki" <spasswolf@web.de>, "Ray Wu" <ray.wu@amd.com>, "Wayne Lin" <Wayne.Lin@amd.com>, "Roman Li" <Roman.Li@amd.com>, "Hersen Wu" <hersenxs.wu@amd.com>, "Timur Kristóf" <timur.kristof@gmail.com>, "Alex Hung" <alex.hung@amd.com>, decce6 <decce6@proton.me>, "Wentao Liang" <vulab@iscas.ac.cn>, amd-gfx@lists.freedesktop.org (open list:RADEON and AMDGPU DRM DRIVERS), dri-devel@lists.freedesktop.org (open list:DRM DRIVERS), linux-kernel@vger.kernel.org (open list)
+Message-ID: <20260321054453.19683-5-rosenp@gmail.com>
+
+From: Timur Kristóf <timur.kristof@gmail.com>
+
+[ Upstream commit 9d73b107a61b73e7101d4b728ddac3d2c77db111 ]
+
+This commit is necessary for DC to function well with chips
+that use the legacy power management code, ie. SI and KV.
+Communicate display information from DC to the legacy PM code.
+
+Currently DC uses pm_display_cfg to communicate power management
+requirements from the display code to the DPM code.
+However, the legacy (non-DC) code path used different fields
+and therefore could not take into account anything from DC.
+
+Change the legacy display code to fill the same pm_display_cfg
+struct as DC and use the same in the legacy DPM code.
+
+To ease review and reduce churn, this commit does not yet
+delete the now unneeded code, that is done in the next commit.
+
+v2:
+Rebase.
+Fix single_display in amdgpu_dpm_pick_power_state.
+
+Signed-off-by: Timur Kristóf <timur.kristof@gmail.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Rosen Penev <rosenp@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/amd/pm/amdgpu_dpm_internal.c | 67 +++++++++++++++++++++++
+ drivers/gpu/drm/amd/pm/inc/amdgpu_dpm_internal.h | 2
+ drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c | 4 -
+ drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c | 6 +-
+ drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c | 65 +++++++---------------
+ drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c | 11 ---
+ 6 files changed, 97 insertions(+), 58 deletions(-)
+
+--- a/drivers/gpu/drm/amd/pm/amdgpu_dpm_internal.c
++++ b/drivers/gpu/drm/amd/pm/amdgpu_dpm_internal.c
+@@ -100,3 +100,70 @@ u32 amdgpu_dpm_get_vrefresh(struct amdgp
+
+ return vrefresh;
+ }
++
++void amdgpu_dpm_get_display_cfg(struct amdgpu_device *adev)
++{
++ struct drm_device *ddev = adev_to_drm(adev);
++ struct amd_pp_display_configuration *cfg = &adev->pm.pm_display_cfg;
++ struct single_display_configuration *display_cfg;
++ struct drm_crtc *crtc;
++ struct amdgpu_crtc *amdgpu_crtc;
++ struct amdgpu_connector *conn;
++ int num_crtcs = 0;
++ int vrefresh;
++ u32 vblank_in_pixels, vblank_time_us;
++
++ cfg->min_vblank_time = 0xffffffff; /* if the displays are off, vblank time is max */
++
++ if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
++ list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) {
++ amdgpu_crtc = to_amdgpu_crtc(crtc);
++
++ /* The array should only contain active displays. */
++ if (!amdgpu_crtc->enabled)
++ continue;
++
++ conn = to_amdgpu_connector(amdgpu_crtc->connector);
++ display_cfg = &adev->pm.pm_display_cfg.displays[num_crtcs++];
++
++ if (amdgpu_crtc->hw_mode.clock) {
++ vrefresh = drm_mode_vrefresh(&amdgpu_crtc->hw_mode);
++
++ vblank_in_pixels =
++ amdgpu_crtc->hw_mode.crtc_htotal *
++ (amdgpu_crtc->hw_mode.crtc_vblank_end -
++ amdgpu_crtc->hw_mode.crtc_vdisplay +
++ (amdgpu_crtc->v_border * 2));
++
++ vblank_time_us =
++ vblank_in_pixels * 1000 / amdgpu_crtc->hw_mode.clock;
++
++ /* The legacy (non-DC) code has issues with mclk switching
++ * with refresh rates over 120 Hz. Disable mclk switching.
++ */
++ if (vrefresh > 120)
++ vblank_time_us = 0;
++
++ /* Find minimum vblank time. */
++ if (vblank_time_us < cfg->min_vblank_time)
++ cfg->min_vblank_time = vblank_time_us;
++
++ /* Find vertical refresh rate of first active display. */
++ if (!cfg->vrefresh)
++ cfg->vrefresh = vrefresh;
++ }
++
++ if (amdgpu_crtc->crtc_id < cfg->crtc_index) {
++ /* Find first active CRTC and its line time. */
++ cfg->crtc_index = amdgpu_crtc->crtc_id;
++ cfg->line_time_in_us = amdgpu_crtc->line_time;
++ }
++
++ display_cfg->controller_id = amdgpu_crtc->crtc_id;
++ display_cfg->pixel_clock = conn->pixelclock_for_modeset;
++ }
++ }
++
++ cfg->display_clk = adev->clock.default_dispclk;
++ cfg->num_display = num_crtcs;
++}
+--- a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm_internal.h
++++ b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm_internal.h
+@@ -29,4 +29,6 @@ u32 amdgpu_dpm_get_vblank_time(struct am
+
+ u32 amdgpu_dpm_get_vrefresh(struct amdgpu_device *adev);
+
++void amdgpu_dpm_get_display_cfg(struct amdgpu_device *adev);
++
+ #endif
+--- a/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
++++ b/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
+@@ -2312,7 +2312,7 @@ static void kv_apply_state_adjust_rules(
+
+ if (pi->sys_info.nb_dpm_enable) {
+ force_high = (mclk >= pi->sys_info.nbp_memory_clock[3]) ||
+- pi->video_start || (adev->pm.dpm.new_active_crtc_count >= 3) ||
++ pi->video_start || (adev->pm.pm_display_cfg.num_display >= 3) ||
+ pi->disable_nb_ps3_in_battery;
+ ps->dpm0_pg_nb_ps_lo = force_high ? 0x2 : 0x3;
+ ps->dpm0_pg_nb_ps_hi = 0x2;
+@@ -2371,7 +2371,7 @@ static int kv_calculate_nbps_level_setti
+ return 0;
+
+ force_high = ((mclk >= pi->sys_info.nbp_memory_clock[3]) ||
+- (adev->pm.dpm.new_active_crtc_count >= 3) || pi->video_start);
++ (adev->pm.pm_display_cfg.num_display >= 3) || pi->video_start);
+
+ if (force_high) {
+ for (i = pi->lowest_valid; i <= pi->highest_valid; i++)
+--- a/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c
++++ b/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c
+@@ -797,8 +797,7 @@ static struct amdgpu_ps *amdgpu_dpm_pick
+ int i;
+ struct amdgpu_ps *ps;
+ u32 ui_class;
+- bool single_display = (adev->pm.dpm.new_active_crtc_count < 2) ?
+- true : false;
++ bool single_display = adev->pm.pm_display_cfg.num_display < 2;
+
+ /* check if the vblank period is too short to adjust the mclk */
+ if (single_display && adev->powerplay.pp_funcs->vblank_too_short) {
+@@ -1003,7 +1002,8 @@ void amdgpu_legacy_dpm_compute_clocks(vo
+ {
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+- amdgpu_dpm_get_active_displays(adev);
++ if (!adev->dc_enabled)
++ amdgpu_dpm_get_display_cfg(adev);
+
+ amdgpu_dpm_change_power_state_locked(adev);
+ }
+--- a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
++++ b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
+@@ -3058,7 +3058,7 @@ static int si_get_vce_clock_voltage(stru
+ static bool si_dpm_vblank_too_short(void *handle)
+ {
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+- u32 vblank_time = amdgpu_dpm_get_vblank_time(adev);
++ u32 vblank_time = adev->pm.pm_display_cfg.min_vblank_time;
+ /* we never hit the non-gddr5 limit so disable it */
+ u32 switch_limit = adev->gmc.vram_type == AMDGPU_VRAM_TYPE_GDDR5 ? 450 : 0;
+
+@@ -3424,9 +3424,10 @@ static void rv770_get_engine_memory_ss(s
+ static void si_apply_state_adjust_rules(struct amdgpu_device *adev,
+ struct amdgpu_ps *rps)
+ {
++ const struct amd_pp_display_configuration *display_cfg =
++ &adev->pm.pm_display_cfg;
+ struct si_ps *ps = si_get_ps(rps);
+ struct amdgpu_clock_and_voltage_limits *max_limits;
+- struct amdgpu_connector *conn;
+ bool disable_mclk_switching = false;
+ bool disable_sclk_switching = false;
+ u32 mclk, sclk;
+@@ -3477,14 +3478,9 @@ static void si_apply_state_adjust_rules(
+ * For example, 4K 60Hz and 1080p 144Hz fall into this category.
+ * Find number of such displays connected.
+ */
+- for (i = 0; i < adev->mode_info.num_crtc; i++) {
+- if (!(adev->pm.dpm.new_active_crtcs & (1 << i)) ||
+- !adev->mode_info.crtcs[i]->enabled)
+- continue;
+-
+- conn = to_amdgpu_connector(adev->mode_info.crtcs[i]->connector);
+-
+- if (conn->pixelclock_for_modeset > 297000)
++ for (i = 0; i < display_cfg->num_display; i++) {
++ /* The array only contains active displays. */
++ if (display_cfg->displays[i].pixel_clock > 297000)
+ high_pixelclock_count++;
+ }
+
+@@ -3517,7 +3513,7 @@ static void si_apply_state_adjust_rules(
+ rps->ecclk = 0;
+ }
+
+- if ((adev->pm.dpm.new_active_crtc_count > 1) ||
++ if ((adev->pm.pm_display_cfg.num_display > 1) ||
+ si_dpm_vblank_too_short(adev))
+ disable_mclk_switching = true;
+
+@@ -3665,7 +3661,7 @@ static void si_apply_state_adjust_rules(
+ ps->performance_levels[i].mclk,
+ max_limits->vddc, &ps->performance_levels[i].vddc);
+ btc_apply_voltage_dependency_rules(&adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk,
+- adev->clock.current_dispclk,
++ display_cfg->display_clk,
+ max_limits->vddc, &ps->performance_levels[i].vddc);
+ }
+
+@@ -4190,16 +4186,16 @@ static void si_program_ds_registers(stru
+
+ static void si_program_display_gap(struct amdgpu_device *adev)
+ {
++ const struct amd_pp_display_configuration *cfg = &adev->pm.pm_display_cfg;
+ u32 tmp, pipe;
+- int i;
+
+ tmp = RREG32(CG_DISPLAY_GAP_CNTL) & ~(DISP1_GAP_MASK | DISP2_GAP_MASK);
+- if (adev->pm.dpm.new_active_crtc_count > 0)
++ if (cfg->num_display > 0)
+ tmp |= DISP1_GAP(R600_PM_DISPLAY_GAP_VBLANK_OR_WM);
+ else
+ tmp |= DISP1_GAP(R600_PM_DISPLAY_GAP_IGNORE);
+
+- if (adev->pm.dpm.new_active_crtc_count > 1)
++ if (cfg->num_display > 1)
+ tmp |= DISP2_GAP(R600_PM_DISPLAY_GAP_VBLANK_OR_WM);
+ else
+ tmp |= DISP2_GAP(R600_PM_DISPLAY_GAP_IGNORE);
+@@ -4209,17 +4205,8 @@ static void si_program_display_gap(struc
+ tmp = RREG32(DCCG_DISP_SLOW_SELECT_REG);
+ pipe = (tmp & DCCG_DISP1_SLOW_SELECT_MASK) >> DCCG_DISP1_SLOW_SELECT_SHIFT;
+
+- if ((adev->pm.dpm.new_active_crtc_count > 0) &&
+- (!(adev->pm.dpm.new_active_crtcs & (1 << pipe)))) {
+- /* find the first active crtc */
+- for (i = 0; i < adev->mode_info.num_crtc; i++) {
+- if (adev->pm.dpm.new_active_crtcs & (1 << i))
+- break;
+- }
+- if (i == adev->mode_info.num_crtc)
+- pipe = 0;
+- else
+- pipe = i;
++ if (cfg->num_display > 0 && pipe != cfg->crtc_index) {
++ pipe = cfg->crtc_index;
+
+ tmp &= ~DCCG_DISP1_SLOW_SELECT_MASK;
+ tmp |= DCCG_DISP1_SLOW_SELECT(pipe);
+@@ -4230,7 +4217,7 @@ static void si_program_display_gap(struc
+ * This can be a problem on PowerXpress systems or if you want to use the card
+ * for offscreen rendering or compute if there are no crtcs enabled.
+ */
+- si_notify_smc_display_change(adev, adev->pm.dpm.new_active_crtc_count > 0);
++ si_notify_smc_display_change(adev, cfg->num_display > 0);
+ }
+
+ static void si_enable_spread_spectrum(struct amdgpu_device *adev, bool enable)
+@@ -5535,7 +5522,7 @@ static int si_convert_power_level_to_smc
+ (pl->mclk <= pi->mclk_stutter_mode_threshold) &&
+ !eg_pi->uvd_enabled &&
+ (RREG32(DPG_PIPE_STUTTER_CONTROL) & STUTTER_ENABLE) &&
+- (adev->pm.dpm.new_active_crtc_count <= 2)) {
++ (adev->pm.pm_display_cfg.num_display <= 2)) {
+ level->mcFlags |= SISLANDS_SMC_MC_STUTTER_EN;
+
+ if (gmc_pg)
+@@ -5687,7 +5674,7 @@ static bool si_is_state_ulv_compatible(s
+ /* XXX validate against display requirements! */
+
+ for (i = 0; i < adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.count; i++) {
+- if (adev->clock.current_dispclk <=
++ if (adev->pm.pm_display_cfg.display_clk <=
+ adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[i].clk) {
+ if (ulv->pl.vddc <
+ adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[i].v)
+@@ -5841,30 +5828,22 @@ static int si_upload_ulv_state(struct am
+
+ static int si_upload_smc_data(struct amdgpu_device *adev)
+ {
+- struct amdgpu_crtc *amdgpu_crtc = NULL;
+- int i;
++ const struct amd_pp_display_configuration *cfg = &adev->pm.pm_display_cfg;
+ u32 crtc_index = 0;
+ u32 mclk_change_block_cp_min = 0;
+ u32 mclk_change_block_cp_max = 0;
+
+- for (i = 0; i < adev->mode_info.num_crtc; i++) {
+- if (adev->pm.dpm.new_active_crtcs & (1 << i)) {
+- amdgpu_crtc = adev->mode_info.crtcs[i];
+- break;
+- }
+- }
+-
+ /* When a display is plugged in, program these so that the SMC
+ * performs MCLK switching when it doesn't cause flickering.
+ * When no display is plugged in, there is no need to restrict
+ * MCLK switching, so program them to zero.
+ */
+- if (adev->pm.dpm.new_active_crtc_count && amdgpu_crtc) {
+- crtc_index = amdgpu_crtc->crtc_id;
++ if (cfg->num_display) {
++ crtc_index = cfg->crtc_index;
+
+- if (amdgpu_crtc->line_time) {
+- mclk_change_block_cp_min = 200 / amdgpu_crtc->line_time;
+- mclk_change_block_cp_max = 100 / amdgpu_crtc->line_time;
++ if (cfg->line_time_in_us) {
++ mclk_change_block_cp_min = 200 / cfg->line_time_in_us;
++ mclk_change_block_cp_max = 100 / cfg->line_time_in_us;
+ }
+ }
+
+--- a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
++++ b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
+@@ -1568,16 +1568,7 @@ static void pp_pm_compute_clocks(void *h
+ struct amdgpu_device *adev = hwmgr->adev;
+
+ if (!adev->dc_enabled) {
+- amdgpu_dpm_get_active_displays(adev);
+- adev->pm.pm_display_cfg.num_display = adev->pm.dpm.new_active_crtc_count;
+- adev->pm.pm_display_cfg.vrefresh = amdgpu_dpm_get_vrefresh(adev);
+- adev->pm.pm_display_cfg.min_vblank_time = amdgpu_dpm_get_vblank_time(adev);
+- /* we have issues with mclk switching with
+- * refresh rates over 120 hz on the non-DC code.
+- */
+- if (adev->pm.pm_display_cfg.vrefresh > 120)
+- adev->pm.pm_display_cfg.min_vblank_time = 0;
+-
++ amdgpu_dpm_get_display_cfg(adev);
+ pp_display_configuration_change(handle,
+ &adev->pm.pm_display_cfg);
+ }
--- /dev/null
+From rosenp@gmail.com Sat Mar 21 06:45:17 2026
+From: Rosen Penev <rosenp@gmail.com>
+Date: Fri, 20 Mar 2026 22:44:51 -0700
+Subject: drm/amdgpu: clarify DC checks
+To: stable@vger.kernel.org
+Cc: "Alex Deucher" <alexander.deucher@amd.com>, "Christian König" <christian.koenig@amd.com>, "Pan, Xinhui" <Xinhui.Pan@amd.com>, "David Airlie" <airlied@gmail.com>, "Daniel Vetter" <daniel@ffwll.ch>, "Harry Wentland" <harry.wentland@amd.com>, "Leo Li" <sunpeng.li@amd.com>, "Rodrigo Siqueira" <Rodrigo.Siqueira@amd.com>, "Evan Quan" <evan.quan@amd.com>, "Mario Limonciello" <mario.limonciello@amd.com>, "Sasha Levin" <sashal@kernel.org>, "Rosen Penev" <rosenp@gmail.com>, "Lijo Lazar" <lijo.lazar@amd.com>, "Ma Jun" <Jun.Ma2@amd.com>, "Greg Kroah-Hartman" <gregkh@linuxfoundation.org>, "Srinivasan Shanmugam" <srinivasan.shanmugam@amd.com>, "Mario Limonciello (AMD)" <superm1@kernel.org>, "Zhigang Luo" <Zhigang.Luo@amd.com>, "Bert Karwatzki" <spasswolf@web.de>, "Ray Wu" <ray.wu@amd.com>, "Wayne Lin" <Wayne.Lin@amd.com>, "Roman Li" <Roman.Li@amd.com>, "Hersen Wu" <hersenxs.wu@amd.com>, "Timur Kristóf" <timur.kristof@gmail.com>, "Alex Hung" <alex.hung@amd.com>, decce6 <decce6@proton.me>, "Wentao Liang" <vulab@iscas.ac.cn>, amd-gfx@lists.freedesktop.org (open list:RADEON and AMDGPU DRM DRIVERS), dri-devel@lists.freedesktop.org (open list:DRM DRIVERS), linux-kernel@vger.kernel.org (open list)
+Message-ID: <20260321054453.19683-3-rosenp@gmail.com>
+
+From: Alex Deucher <alexander.deucher@amd.com>
+
+[ Upstream commit d09ef243035b75a6d403ebfeb7e87fa20d7e25c6 ]
+
+There are several places where we don't want to check
+if a particular asic could support DC, but rather, if
+DC is enabled. Set a flag if DC is enabled and check
+for that rather than if a device supports DC or not.
+
+Acked-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Rosen Penev <rosenp@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu.h | 1
+ drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c | 2 -
+ drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c | 2 -
+ drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 32 +++++++++++-----------
+ drivers/gpu/drm/amd/amdgpu/amdgpu_display.c | 4 +-
+ drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 2 -
+ drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 1
+ drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c | 2 -
+ 8 files changed, 25 insertions(+), 21 deletions(-)
+
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+@@ -1071,6 +1071,7 @@ struct amdgpu_device {
+ struct work_struct reset_work;
+
+ bool job_hang;
++ bool dc_enabled;
+ };
+
+ static inline struct amdgpu_device *drm_to_adev(struct drm_device *ddev)
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
+@@ -857,7 +857,7 @@ int amdgpu_acpi_init(struct amdgpu_devic
+ struct amdgpu_atif *atif = &amdgpu_acpi_priv.atif;
+
+ if (atif->notifications.brightness_change) {
+- if (amdgpu_device_has_dc_support(adev)) {
++ if (adev->dc_enabled) {
+ #if defined(CONFIG_DRM_AMD_DC)
+ struct amdgpu_display_manager *dm = &adev->dm;
+
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+@@ -1981,7 +1981,7 @@ int amdgpu_debugfs_init(struct amdgpu_de
+ amdgpu_ta_if_debugfs_init(adev);
+
+ #if defined(CONFIG_DRM_AMD_DC)
+- if (amdgpu_device_has_dc_support(adev))
++ if (adev->dc_enabled)
+ dtn_debugfs_init(adev);
+ #endif
+
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -4404,25 +4404,27 @@ int amdgpu_device_resume(struct drm_devi
+
+ amdgpu_ras_resume(adev);
+
+- /*
+- * Most of the connector probing functions try to acquire runtime pm
+- * refs to ensure that the GPU is powered on when connector polling is
+- * performed. Since we're calling this from a runtime PM callback,
+- * trying to acquire rpm refs will cause us to deadlock.
+- *
+- * Since we're guaranteed to be holding the rpm lock, it's safe to
+- * temporarily disable the rpm helpers so this doesn't deadlock us.
+- */
++ if (adev->mode_info.num_crtc) {
++ /*
++ * Most of the connector probing functions try to acquire runtime pm
++ * refs to ensure that the GPU is powered on when connector polling is
++ * performed. Since we're calling this from a runtime PM callback,
++ * trying to acquire rpm refs will cause us to deadlock.
++ *
++ * Since we're guaranteed to be holding the rpm lock, it's safe to
++ * temporarily disable the rpm helpers so this doesn't deadlock us.
++ */
+ #ifdef CONFIG_PM
+- dev->dev->power.disable_depth++;
++ dev->dev->power.disable_depth++;
+ #endif
+- if (!amdgpu_device_has_dc_support(adev))
+- drm_helper_hpd_irq_event(dev);
+- else
+- drm_kms_helper_hotplug_event(dev);
++ if (!adev->dc_enabled)
++ drm_helper_hpd_irq_event(dev);
++ else
++ drm_kms_helper_hotplug_event(dev);
+ #ifdef CONFIG_PM
+- dev->dev->power.disable_depth--;
++ dev->dev->power.disable_depth--;
+ #endif
++ }
+ adev->in_suspend = false;
+
+ if (adev->enable_mes)
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+@@ -534,7 +534,7 @@ uint32_t amdgpu_display_supported_domain
+ */
+ if ((bo_flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC) &&
+ amdgpu_bo_support_uswc(bo_flags) &&
+- amdgpu_device_has_dc_support(adev) &&
++ adev->dc_enabled &&
+ adev->mode_info.gpu_vm_support)
+ domain |= AMDGPU_GEM_DOMAIN_GTT;
+ #endif
+@@ -1330,7 +1330,7 @@ int amdgpu_display_modeset_create_props(
+ "dither",
+ amdgpu_dither_enum_list, sz);
+
+- if (amdgpu_device_has_dc_support(adev)) {
++ if (adev->dc_enabled) {
+ adev->mode_info.abm_level_property =
+ drm_property_create_range(adev_to_drm(adev), 0,
+ "abm level", 0, 4);
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+@@ -2530,7 +2530,7 @@ static int amdgpu_runtime_idle_check_dis
+ if (ret)
+ return ret;
+
+- if (amdgpu_device_has_dc_support(adev)) {
++ if (adev->dc_enabled) {
+ struct drm_crtc *crtc;
+
+ drm_for_each_crtc(crtc, drm_dev) {
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -4842,6 +4842,7 @@ static int dm_early_init(void *handle)
+ adev_to_drm(adev)->dev,
+ &dev_attr_s3_debug);
+ #endif
++ adev->dc_enabled = true;
+
+ return 0;
+ }
+--- a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
++++ b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
+@@ -1567,7 +1567,7 @@ static void pp_pm_compute_clocks(void *h
+ struct pp_hwmgr *hwmgr = handle;
+ struct amdgpu_device *adev = hwmgr->adev;
+
+- if (!amdgpu_device_has_dc_support(adev)) {
++ if (!adev->dc_enabled) {
+ amdgpu_dpm_get_active_displays(adev);
+ adev->pm.pm_display_cfg.num_display = adev->pm.dpm.new_active_crtc_count;
+ adev->pm.pm_display_cfg.vrefresh = amdgpu_dpm_get_vrefresh(adev);
--- /dev/null
+From rosenp@gmail.com Sat Mar 21 06:45:14 2026
+From: Rosen Penev <rosenp@gmail.com>
+Date: Fri, 20 Mar 2026 22:44:50 -0700
+Subject: drm/amdgpu: use proper DC check in amdgpu_display_supported_domains()
+To: stable@vger.kernel.org
+Cc: "Alex Deucher" <alexander.deucher@amd.com>, "Christian König" <christian.koenig@amd.com>, "Pan, Xinhui" <Xinhui.Pan@amd.com>, "David Airlie" <airlied@gmail.com>, "Daniel Vetter" <daniel@ffwll.ch>, "Harry Wentland" <harry.wentland@amd.com>, "Leo Li" <sunpeng.li@amd.com>, "Rodrigo Siqueira" <Rodrigo.Siqueira@amd.com>, "Evan Quan" <evan.quan@amd.com>, "Mario Limonciello" <mario.limonciello@amd.com>, "Sasha Levin" <sashal@kernel.org>, "Rosen Penev" <rosenp@gmail.com>, "Lijo Lazar" <lijo.lazar@amd.com>, "Ma Jun" <Jun.Ma2@amd.com>, "Greg Kroah-Hartman" <gregkh@linuxfoundation.org>, "Srinivasan Shanmugam" <srinivasan.shanmugam@amd.com>, "Mario Limonciello (AMD)" <superm1@kernel.org>, "Zhigang Luo" <Zhigang.Luo@amd.com>, "Bert Karwatzki" <spasswolf@web.de>, "Ray Wu" <ray.wu@amd.com>, "Wayne Lin" <Wayne.Lin@amd.com>, "Roman Li" <Roman.Li@amd.com>, "Hersen Wu" <hersenxs.wu@amd.com>, "Timur Kristóf" <timur.kristof@gmail.com>, "Alex Hung" <alex.hung@amd.com>, decce6 <decce6@proton.me>, "Wentao Liang" <vulab@iscas.ac.cn>, amd-gfx@lists.freedesktop.org (open list:RADEON and AMDGPU DRM DRIVERS), dri-devel@lists.freedesktop.org (open list:DRM DRIVERS), linux-kernel@vger.kernel.org (open list)
+Message-ID: <20260321054453.19683-2-rosenp@gmail.com>
+
+From: Alex Deucher <alexander.deucher@amd.com>
+
+[ Upstream commit 96ce96f8773da4814622fd97e5226915a2c30706 ]
+
+amdgpu_device_asic_has_dc_support() just checks the asic itself.
+amdgpu_device_has_dc_support() is a runtime check which not
+only checks the asic, but also other things in the driver
+like whether virtual display is enabled. We want the latter
+here.
+
+Acked-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Rosen Penev <rosenp@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_display.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+@@ -534,7 +534,7 @@ uint32_t amdgpu_display_supported_domain
+ */
+ if ((bo_flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC) &&
+ amdgpu_bo_support_uswc(bo_flags) &&
+- amdgpu_device_asic_has_dc_support(adev->asic_type) &&
++ amdgpu_device_has_dc_support(adev) &&
+ adev->mode_info.gpu_vm_support)
+ domain |= AMDGPU_GEM_DOMAIN_GTT;
+ #endif
--- /dev/null
+From stable+bounces-223292-greg=kroah.com@vger.kernel.org Fri Mar 6 02:20:46 2026
+From: Rahul Sharma <black.hawk@163.com>
+Date: Fri, 6 Mar 2026 09:20:14 +0800
+Subject: f2fs: fix to trigger foreground gc during f2fs_map_blocks() in lfs mode
+To: gregkh@linuxfoundation.org, stable@vger.kernel.org
+Cc: linux-kernel@vger.kernel.org, Chao Yu <chao@kernel.org>, Daeho Jeong <daehojeong@google.com>, Jaegeuk Kim <jaegeuk@kernel.org>, Rahul Sharma <black.hawk@163.com>
+Message-ID: <20260306012014.1948914-1-black.hawk@163.com>
+
+From: Chao Yu <chao@kernel.org>
+
+[ Upstream commit 1005a3ca28e90c7a64fa43023f866b960a60f791 ]
+
+w/ "mode=lfs" mount option, generic/299 will cause system panic as below:
+
+------------[ cut here ]------------
+kernel BUG at fs/f2fs/segment.c:2835!
+Call Trace:
+ <TASK>
+ f2fs_allocate_data_block+0x6f4/0xc50
+ f2fs_map_blocks+0x970/0x1550
+ f2fs_iomap_begin+0xb2/0x1e0
+ iomap_iter+0x1d6/0x430
+ __iomap_dio_rw+0x208/0x9a0
+ f2fs_file_write_iter+0x6b3/0xfa0
+ aio_write+0x15d/0x2e0
+ io_submit_one+0x55e/0xab0
+ __x64_sys_io_submit+0xa5/0x230
+ do_syscall_64+0x84/0x2f0
+ entry_SYSCALL_64_after_hwframe+0x76/0x7e
+RIP: 0010:new_curseg+0x70f/0x720
+
+The root cause of we run out-of-space is: in f2fs_map_blocks(), f2fs may
+trigger foreground gc only if it allocates any physical block, it will be
+a little bit later when there is multiple threads writing data w/
+aio/dio/bufio method in parallel, since we always use OPU in lfs mode, so
+f2fs_map_blocks() does block allocations aggressively.
+
+In order to fix this issue, let's give a chance to trigger foreground
+gc in prior to block allocation in f2fs_map_blocks().
+
+Fixes: 36abef4e796d ("f2fs: introduce mode=lfs mount option")
+Cc: Daeho Jeong <daehojeong@google.com>
+Signed-off-by: Chao Yu <chao@kernel.org>
+Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
+[ The context change is due to the commit 2f51ade9524c
+("f2fs: f2fs_do_map_lock") in v6.3 which is irrelevant to
+the logic of this patch. ]
+Signed-off-by: Rahul Sharma <black.hawk@163.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/f2fs/data.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+--- a/fs/f2fs/data.c
++++ b/fs/f2fs/data.c
+@@ -1537,8 +1537,11 @@ int f2fs_map_blocks(struct inode *inode,
+ end = pgofs + maxblocks;
+
+ next_dnode:
+- if (map->m_may_create)
++ if (map->m_may_create) {
++ if (f2fs_lfs_mode(sbi))
++ f2fs_balance_fs(sbi, true);
+ f2fs_do_map_lock(sbi, flag, true);
++ }
+
+ /* When reading holes, we need its node page */
+ set_new_dnode(&dn, inode, NULL, NULL, 0);
--- /dev/null
+From stable+bounces-223375-greg=kroah.com@vger.kernel.org Fri Mar 6 18:50:26 2026
+From: "Matthieu Baerts (NGI0)" <matttbe@kernel.org>
+Date: Fri, 6 Mar 2026 18:50:10 +0100
+Subject: mptcp: pm: in-kernel: always set ID as avail when rm endp
+To: stable@vger.kernel.org, gregkh@linuxfoundation.org, sashal@kernel.org
+Cc: MPTCP Upstream <mptcp@lists.linux.dev>, "Matthieu Baerts (NGI0)" <matttbe@kernel.org>, syzbot+f56f7d56e2c6e11a01b6@syzkaller.appspotmail.com, Mat Martineau <martineau@kernel.org>, Jakub Kicinski <kuba@kernel.org>
+Message-ID: <20260306175009.2520964-2-matttbe@kernel.org>
+
+From: "Matthieu Baerts (NGI0)" <matttbe@kernel.org>
+
+commit d191101dee25567c2af3b28565f45346c33d65f5 upstream.
+
+Syzkaller managed to find a combination of actions that was generating
+this warning:
+
+ WARNING: net/mptcp/pm_kernel.c:1074 at __mark_subflow_endp_available net/mptcp/pm_kernel.c:1074 [inline], CPU#1: syz.7.48/2535
+ WARNING: net/mptcp/pm_kernel.c:1074 at mptcp_pm_nl_fullmesh net/mptcp/pm_kernel.c:1446 [inline], CPU#1: syz.7.48/2535
+ WARNING: net/mptcp/pm_kernel.c:1074 at mptcp_pm_nl_set_flags_all net/mptcp/pm_kernel.c:1474 [inline], CPU#1: syz.7.48/2535
+ WARNING: net/mptcp/pm_kernel.c:1074 at mptcp_pm_nl_set_flags+0x5de/0x640 net/mptcp/pm_kernel.c:1538, CPU#1: syz.7.48/2535
+ Modules linked in:
+ CPU: 1 UID: 0 PID: 2535 Comm: syz.7.48 Not tainted 6.18.0-03987-gea5f5e676cf5 #17 PREEMPT(voluntary)
+ Hardware name: QEMU Ubuntu 25.10 PC (i440FX + PIIX, 1996), BIOS 1.17.0-debian-1.17.0-1 04/01/2014
+ RIP: 0010:__mark_subflow_endp_available net/mptcp/pm_kernel.c:1074 [inline]
+ RIP: 0010:mptcp_pm_nl_fullmesh net/mptcp/pm_kernel.c:1446 [inline]
+ RIP: 0010:mptcp_pm_nl_set_flags_all net/mptcp/pm_kernel.c:1474 [inline]
+ RIP: 0010:mptcp_pm_nl_set_flags+0x5de/0x640 net/mptcp/pm_kernel.c:1538
+ Code: 89 c7 e8 c5 8c 73 fe e9 f7 fd ff ff 49 83 ef 80 e8 b7 8c 73 fe 4c 89 ff be 03 00 00 00 e8 4a 29 e3 fe eb ac e8 a3 8c 73 fe 90 <0f> 0b 90 e9 3d ff ff ff e8 95 8c 73 fe b8 a1 ff ff ff eb 1a e8 89
+ RSP: 0018:ffffc9001535b820 EFLAGS: 00010287
+ netdevsim0: tun_chr_ioctl cmd 1074025677
+ RAX: ffffffff82da294d RBX: 0000000000000001 RCX: 0000000000080000
+ RDX: ffffc900096d0000 RSI: 00000000000006d6 RDI: 00000000000006d7
+ netdevsim0: linktype set to 823
+ RBP: ffff88802cdb2240 R08: 00000000000104ae R09: ffffffffffffffff
+ R10: ffffffff82da27d4 R11: 0000000000000000 R12: 0000000000000000
+ R13: ffff88801246d8c0 R14: ffffc9001535b8b8 R15: ffff88802cdb1800
+ FS: 00007fc6ac5a76c0(0000) GS:ffff8880f90c8000(0000) knlGS:0000000000000000
+ netlink: 'syz.3.50': attribute type 5 has an invalid length.
+ CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+ netlink: 1232 bytes leftover after parsing attributes in process `syz.3.50'.
+ CR2: 0000200000010000 CR3: 0000000025b1a000 CR4: 0000000000350ef0
+ Call Trace:
+ <TASK>
+ mptcp_pm_set_flags net/mptcp/pm_netlink.c:277 [inline]
+ mptcp_pm_nl_set_flags_doit+0x1d7/0x210 net/mptcp/pm_netlink.c:282
+ genl_family_rcv_msg_doit+0x117/0x180 net/netlink/genetlink.c:1115
+ genl_family_rcv_msg net/netlink/genetlink.c:1195 [inline]
+ genl_rcv_msg+0x3a8/0x3f0 net/netlink/genetlink.c:1210
+ netlink_rcv_skb+0x16d/0x240 net/netlink/af_netlink.c:2550
+ genl_rcv+0x28/0x40 net/netlink/genetlink.c:1219
+ netlink_unicast_kernel net/netlink/af_netlink.c:1318 [inline]
+ netlink_unicast+0x3e9/0x4c0 net/netlink/af_netlink.c:1344
+ netlink_sendmsg+0x4ab/0x5b0 net/netlink/af_netlink.c:1894
+ sock_sendmsg_nosec net/socket.c:718 [inline]
+ __sock_sendmsg+0xc9/0xf0 net/socket.c:733
+ ____sys_sendmsg+0x272/0x3b0 net/socket.c:2608
+ ___sys_sendmsg+0x2de/0x320 net/socket.c:2662
+ __sys_sendmsg net/socket.c:2694 [inline]
+ __do_sys_sendmsg net/socket.c:2699 [inline]
+ __se_sys_sendmsg net/socket.c:2697 [inline]
+ __x64_sys_sendmsg+0x110/0x1a0 net/socket.c:2697
+ do_syscall_x64 arch/x86/entry/syscall_64.c:63 [inline]
+ do_syscall_64+0xed/0x360 arch/x86/entry/syscall_64.c:94
+ entry_SYSCALL_64_after_hwframe+0x77/0x7f
+ RIP: 0033:0x7fc6adb66f6d
+ Code: ff c3 66 2e 0f 1f 84 00 00 00 00 00 90 f3 0f 1e fa 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 c7 c1 e8 ff ff ff f7 d8 64 89 01 48
+ RSP: 002b:00007fc6ac5a6ff8 EFLAGS: 00000246 ORIG_RAX: 000000000000002e
+ RAX: ffffffffffffffda RBX: 00007fc6addf5fa0 RCX: 00007fc6adb66f6d
+ RDX: 0000000000048084 RSI: 00002000000002c0 RDI: 000000000000000e
+ RBP: 0000000000000000 R08: 0000000000000000 R09: 0000000000000000
+ R10: 0000000000000000 R11: 0000000000000246 R12: 0000000000000000
+ netlink: 'syz.5.51': attribute type 2 has an invalid length.
+ R13: 00007fff25e91fe0 R14: 00007fc6ac5a7ce4 R15: 00007fff25e920d7
+ </TASK>
+
+The actions that caused that seem to be:
+
+ - Create an MPTCP endpoint for address A without any flags
+ - Create a new MPTCP connection from address A
+ - Remove the MPTCP endpoint: the corresponding subflows will be removed
+ - Recreate the endpoint with the same ID, but with the subflow flag
+ - Change the same endpoint to add the fullmesh flag
+
+In this case, msk->pm.local_addr_used has been kept to 0 as expected,
+but the corresponding bit in msk->pm.id_avail_bitmap was still unset
+after having removed the endpoint, causing the splat later on.
+
+When removing an endpoint, the corresponding endpoint ID was only marked
+as available for "signal" types with an announced address, plus all
+"subflow" types, but not the other types like an endpoint corresponding
+to the initial subflow. In these cases, re-creating an endpoint with the
+same ID didn't signal/create anything. Here, adding the fullmesh flag
+was creating the splat when calling __mark_subflow_endp_available() from
+mptcp_pm_nl_fullmesh(), because msk->pm.local_addr_used was set to 0
+while the ID was marked as used.
+
+To fix this issue, the corresponding bit in msk->pm.id_avail_bitmap can
+always be set as available when removing an MPTCP in-kernel endpoint. In
+other words, moving the call to __set_bit() to do it in all cases,
+except for "subflow" types where this bit is handled in a dedicated
+helper.
+
+Note: instead of adding a new spin_(un)lock_bh that would be taken in
+all cases, do all the actions requiring the spin lock under the same
+block.
+
+This modification potentially fixes another issue reported by syzbot,
+see [1]. But without a reproducer or more details about what exactly
+happened before, it is hard to confirm.
+
+Fixes: e255683c06df ("mptcp: pm: re-using ID of unused removed ADD_ADDR")
+Cc: stable@vger.kernel.org
+Closes: https://github.com/multipath-tcp/mptcp_net-next/issues/606
+Reported-by: syzbot+f56f7d56e2c6e11a01b6@syzkaller.appspotmail.com
+Closes: https://lore.kernel.org/68fcfc4a.050a0220.346f24.02fb.GAE@google.com [1]
+Reviewed-by: Mat Martineau <martineau@kernel.org>
+Signed-off-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+Link: https://patch.msgid.link/20260205-net-mptcp-misc-fixes-6-19-rc8-v2-1-c2720ce75c34@kernel.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+[ Conflict in pm_netlink.c, because commit 8617e85e04bd ("mptcp: pm:
+ split in-kernel PM specific code") is not in this version, and move
+ code from pm_netlink.c to pm_kernel.c. Also, commit 636113918508
+ ("mptcp: pm: remove '_nl' from mptcp_pm_nl_rm_addr_received") renamed
+ mptcp_pm_nl_rm_subflow_received() to mptcp_pm_rm_subflow(). Apart from
+ that, the same patch can be applied in pm_netlink.c. ]
+Signed-off-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/mptcp/pm_netlink.c | 20 ++++++++------------
+ 1 file changed, 8 insertions(+), 12 deletions(-)
+
+--- a/net/mptcp/pm_netlink.c
++++ b/net/mptcp/pm_netlink.c
+@@ -1643,10 +1643,8 @@ static bool mptcp_pm_remove_anno_addr(st
+ ret = remove_anno_list_by_saddr(msk, addr);
+ if (ret || force) {
+ spin_lock_bh(&msk->pm.lock);
+- if (ret) {
+- __set_bit(addr->id, msk->pm.id_avail_bitmap);
++ if (ret)
+ msk->pm.add_addr_signaled--;
+- }
+ mptcp_pm_remove_addr(msk, &list);
+ spin_unlock_bh(&msk->pm.lock);
+ }
+@@ -1684,17 +1682,15 @@ static int mptcp_nl_remove_subflow_and_s
+ !(entry->flags & MPTCP_PM_ADDR_FLAG_IMPLICIT));
+
+ list.ids[0] = mptcp_endp_get_local_id(msk, addr);
+- if (remove_subflow) {
+- spin_lock_bh(&msk->pm.lock);
+- mptcp_pm_nl_rm_subflow_received(msk, &list);
+- spin_unlock_bh(&msk->pm.lock);
+- }
+
+- if (entry->flags & MPTCP_PM_ADDR_FLAG_SUBFLOW) {
+- spin_lock_bh(&msk->pm.lock);
++ spin_lock_bh(&msk->pm.lock);
++ if (remove_subflow)
++ mptcp_pm_nl_rm_subflow_received(msk, &list);
++ if (entry->flags & MPTCP_PM_ADDR_FLAG_SUBFLOW)
+ __mark_subflow_endp_available(msk, list.ids[0]);
+- spin_unlock_bh(&msk->pm.lock);
+- }
++ else /* mark endp ID as available, e.g. Signal or MPC endp */
++ __set_bit(addr->id, msk->pm.id_avail_bitmap);
++ spin_unlock_bh(&msk->pm.lock);
+
+ if (msk->mpc_endpoint_id == entry->addr.id)
+ msk->mpc_endpoint_id = 0;
--- /dev/null
+From stable+bounces-222520-greg=kroah.com@vger.kernel.org Mon Mar 2 07:51:03 2026
+From: Li hongliang <1468888505@139.com>
+Date: Mon, 2 Mar 2026 14:50:44 +0800
+Subject: net: add support for segmenting TCP fraglist GSO packets
+To: gregkh@linuxfoundation.org, stable@vger.kernel.org, nbd@nbd.name
+Cc: patches@lists.linux.dev, linux-kernel@vger.kernel.org, edumazet@google.com, davem@davemloft.net, yoshfuji@linux-ipv6.org, dsahern@kernel.org, kuba@kernel.org, pabeni@redhat.com, netdev@vger.kernel.org, willemb@google.com
+Message-ID: <20260302065044.2694740-1-1468888505@139.com>
+
+From: Felix Fietkau <nbd@nbd.name>
+
+[ Upstream commit bee88cd5bd83d40b8aec4d6cb729378f707f6197 ]
+
+Preparation for adding TCP fraglist GRO support. It expects packets to be
+combined in a similar way as UDP fraglist GSO packets.
+For IPv4 packets, NAT is handled in the same way as UDP fraglist GSO.
+
+Acked-by: Paolo Abeni <pabeni@redhat.com>
+Reviewed-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+Reviewed-by: David Ahern <dsahern@kernel.org>
+Reviewed-by: Willem de Bruijn <willemb@google.com>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Li hongliang <1468888505@139.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv4/tcp_offload.c | 67 +++++++++++++++++++++++++++++++++++++++++++++++
+ net/ipv6/tcpv6_offload.c | 58 ++++++++++++++++++++++++++++++++++++++++
+ 2 files changed, 125 insertions(+)
+
+--- a/net/ipv4/tcp_offload.c
++++ b/net/ipv4/tcp_offload.c
+@@ -30,6 +30,70 @@ static void tcp_gso_tstamp(struct sk_buf
+ }
+ }
+
++static void __tcpv4_gso_segment_csum(struct sk_buff *seg,
++ __be32 *oldip, __be32 newip,
++ __be16 *oldport, __be16 newport)
++{
++ struct tcphdr *th;
++ struct iphdr *iph;
++
++ if (*oldip == newip && *oldport == newport)
++ return;
++
++ th = tcp_hdr(seg);
++ iph = ip_hdr(seg);
++
++ inet_proto_csum_replace4(&th->check, seg, *oldip, newip, true);
++ inet_proto_csum_replace2(&th->check, seg, *oldport, newport, false);
++ *oldport = newport;
++
++ csum_replace4(&iph->check, *oldip, newip);
++ *oldip = newip;
++}
++
++static struct sk_buff *__tcpv4_gso_segment_list_csum(struct sk_buff *segs)
++{
++ const struct tcphdr *th;
++ const struct iphdr *iph;
++ struct sk_buff *seg;
++ struct tcphdr *th2;
++ struct iphdr *iph2;
++
++ seg = segs;
++ th = tcp_hdr(seg);
++ iph = ip_hdr(seg);
++ th2 = tcp_hdr(seg->next);
++ iph2 = ip_hdr(seg->next);
++
++ if (!(*(const u32 *)&th->source ^ *(const u32 *)&th2->source) &&
++ iph->daddr == iph2->daddr && iph->saddr == iph2->saddr)
++ return segs;
++
++ while ((seg = seg->next)) {
++ th2 = tcp_hdr(seg);
++ iph2 = ip_hdr(seg);
++
++ __tcpv4_gso_segment_csum(seg,
++ &iph2->saddr, iph->saddr,
++ &th2->source, th->source);
++ __tcpv4_gso_segment_csum(seg,
++ &iph2->daddr, iph->daddr,
++ &th2->dest, th->dest);
++ }
++
++ return segs;
++}
++
++static struct sk_buff *__tcp4_gso_segment_list(struct sk_buff *skb,
++ netdev_features_t features)
++{
++ skb = skb_segment_list(skb, features, skb_mac_header_len(skb));
++ if (IS_ERR(skb))
++ return skb;
++
++ return __tcpv4_gso_segment_list_csum(skb);
++}
++
+ static struct sk_buff *tcp4_gso_segment(struct sk_buff *skb,
+ netdev_features_t features)
+ {
+@@ -39,6 +103,9 @@ static struct sk_buff *tcp4_gso_segment(
+ if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
+ return ERR_PTR(-EINVAL);
+
++ if (skb_shinfo(skb)->gso_type & SKB_GSO_FRAGLIST)
++ return __tcp4_gso_segment_list(skb, features);
++
+ if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
+ const struct iphdr *iph = ip_hdr(skb);
+ struct tcphdr *th = tcp_hdr(skb);
+--- a/net/ipv6/tcpv6_offload.c
++++ b/net/ipv6/tcpv6_offload.c
+@@ -39,6 +39,61 @@ INDIRECT_CALLABLE_SCOPE int tcp6_gro_com
+ return tcp_gro_complete(skb);
+ }
+
++static void __tcpv6_gso_segment_csum(struct sk_buff *seg,
++ __be16 *oldport, __be16 newport)
++{
++ struct tcphdr *th;
++
++ if (*oldport == newport)
++ return;
++
++ th = tcp_hdr(seg);
++ inet_proto_csum_replace2(&th->check, seg, *oldport, newport, false);
++ *oldport = newport;
++}
++
++static struct sk_buff *__tcpv6_gso_segment_list_csum(struct sk_buff *segs)
++{
++ const struct tcphdr *th;
++ const struct ipv6hdr *iph;
++ struct sk_buff *seg;
++ struct tcphdr *th2;
++ struct ipv6hdr *iph2;
++
++ seg = segs;
++ th = tcp_hdr(seg);
++ iph = ipv6_hdr(seg);
++ th2 = tcp_hdr(seg->next);
++ iph2 = ipv6_hdr(seg->next);
++
++ if (!(*(const u32 *)&th->source ^ *(const u32 *)&th2->source) &&
++ ipv6_addr_equal(&iph->saddr, &iph2->saddr) &&
++ ipv6_addr_equal(&iph->daddr, &iph2->daddr))
++ return segs;
++
++ while ((seg = seg->next)) {
++ th2 = tcp_hdr(seg);
++ iph2 = ipv6_hdr(seg);
++
++ iph2->saddr = iph->saddr;
++ iph2->daddr = iph->daddr;
++ __tcpv6_gso_segment_csum(seg, &th2->source, th->source);
++ __tcpv6_gso_segment_csum(seg, &th2->dest, th->dest);
++ }
++
++ return segs;
++}
++
++static struct sk_buff *__tcp6_gso_segment_list(struct sk_buff *skb,
++ netdev_features_t features)
++{
++ skb = skb_segment_list(skb, features, skb_mac_header_len(skb));
++ if (IS_ERR(skb))
++ return skb;
++
++ return __tcpv6_gso_segment_list_csum(skb);
++}
++
+ static struct sk_buff *tcp6_gso_segment(struct sk_buff *skb,
+ netdev_features_t features)
+ {
+@@ -50,6 +105,9 @@ static struct sk_buff *tcp6_gso_segment(
+ if (!pskb_may_pull(skb, sizeof(*th)))
+ return ERR_PTR(-EINVAL);
+
++ if (skb_shinfo(skb)->gso_type & SKB_GSO_FRAGLIST)
++ return __tcp6_gso_segment_list(skb, features);
++
+ if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
+ const struct ipv6hdr *ipv6h = ipv6_hdr(skb);
+ struct tcphdr *th = tcp_hdr(skb);
--- /dev/null
+From ba9db6f907ac02215e30128770f85fbd7db2fcf9 Mon Sep 17 00:00:00 2001
+From: Jakub Kicinski <kuba@kernel.org>
+Date: Mon, 9 Jun 2025 17:12:44 -0700
+Subject: net: clear the dst when changing skb protocol
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Jakub Kicinski <kuba@kernel.org>
+
+commit ba9db6f907ac02215e30128770f85fbd7db2fcf9 upstream.
+
+A not-so-careful NAT46 BPF program can crash the kernel
+if it indiscriminately flips ingress packets from v4 to v6:
+
+ BUG: kernel NULL pointer dereference, address: 0000000000000000
+ ip6_rcv_core (net/ipv6/ip6_input.c:190:20)
+ ipv6_rcv (net/ipv6/ip6_input.c:306:8)
+ process_backlog (net/core/dev.c:6186:4)
+ napi_poll (net/core/dev.c:6906:9)
+ net_rx_action (net/core/dev.c:7028:13)
+ do_softirq (kernel/softirq.c:462:3)
+ netif_rx (net/core/dev.c:5326:3)
+ dev_loopback_xmit (net/core/dev.c:4015:2)
+ ip_mc_finish_output (net/ipv4/ip_output.c:363:8)
+ NF_HOOK (./include/linux/netfilter.h:314:9)
+ ip_mc_output (net/ipv4/ip_output.c:400:5)
+ dst_output (./include/net/dst.h:459:9)
+ ip_local_out (net/ipv4/ip_output.c:130:9)
+ ip_send_skb (net/ipv4/ip_output.c:1496:8)
+ udp_send_skb (net/ipv4/udp.c:1040:8)
+ udp_sendmsg (net/ipv4/udp.c:1328:10)
+
+The output interface has a 4->6 program attached at ingress.
+We try to loop the multicast skb back to the sending socket.
+Ingress BPF runs as part of netif_rx(), pushes a valid v6 hdr
+and changes skb->protocol to v6. We enter ip6_rcv_core which
+tries to use skb_dst(). But the dst is still an IPv4 one left
+after IPv4 mcast output.
+
+Clear the dst in all BPF helpers which change the protocol.
+Try to preserve metadata dsts, those may carry non-routing
+metadata.
+
+Cc: stable@vger.kernel.org
+Reviewed-by: Maciej Żenczykowski <maze@google.com>
+Acked-by: Daniel Borkmann <daniel@iogearbox.net>
+Fixes: d219df60a70e ("bpf: Add ipip6 and ip6ip decap support for bpf_skb_adjust_room()")
+Fixes: 1b00e0dfe7d0 ("bpf: update skb->protocol in bpf_skb_net_grow")
+Fixes: 6578171a7ff0 ("bpf: add bpf_skb_change_proto helper")
+Reviewed-by: Willem de Bruijn <willemb@google.com>
+Link: https://patch.msgid.link/20250610001245.1981782-1-kuba@kernel.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+[ The context change is due to the commit d219df60a70e
+ ("bpf: Add ipip6 and ip6ip decap support for bpf_skb_adjust_room()")
+ in v6.3 which is irrelevant to the logic of this patch. ]
+Signed-off-by: Johnny Hao <johnny_haocn@sina.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/core/filter.c | 15 +++++++++++----
+ 1 file changed, 11 insertions(+), 4 deletions(-)
+
+--- a/net/core/filter.c
++++ b/net/core/filter.c
+@@ -3232,6 +3232,13 @@ static const struct bpf_func_proto bpf_s
+ .arg1_type = ARG_PTR_TO_CTX,
+ };
+
++static void bpf_skb_change_protocol(struct sk_buff *skb, u16 proto)
++{
++ skb->protocol = htons(proto);
++ if (skb_valid_dst(skb))
++ skb_dst_drop(skb);
++}
++
+ static int bpf_skb_generic_push(struct sk_buff *skb, u32 off, u32 len)
+ {
+ /* Caller already did skb_cow() with len as headroom,
+@@ -3328,7 +3335,7 @@ static int bpf_skb_proto_4_to_6(struct s
+ }
+ }
+
+- skb->protocol = htons(ETH_P_IPV6);
++ bpf_skb_change_protocol(skb, ETH_P_IPV6);
+ skb_clear_hash(skb);
+
+ return 0;
+@@ -3358,7 +3365,7 @@ static int bpf_skb_proto_6_to_4(struct s
+ }
+ }
+
+- skb->protocol = htons(ETH_P_IP);
++ bpf_skb_change_protocol(skb, ETH_P_IP);
+ skb_clear_hash(skb);
+
+ return 0;
+@@ -3545,10 +3552,10 @@ static int bpf_skb_net_grow(struct sk_bu
+ /* Match skb->protocol to new outer l3 protocol */
+ if (skb->protocol == htons(ETH_P_IP) &&
+ flags & BPF_F_ADJ_ROOM_ENCAP_L3_IPV6)
+- skb->protocol = htons(ETH_P_IPV6);
++ bpf_skb_change_protocol(skb, ETH_P_IPV6);
+ else if (skb->protocol == htons(ETH_P_IPV6) &&
+ flags & BPF_F_ADJ_ROOM_ENCAP_L3_IPV4)
+- skb->protocol = htons(ETH_P_IP);
++ bpf_skb_change_protocol(skb, ETH_P_IP);
+ }
+
+ if (skb_is_gso(skb)) {
--- /dev/null
+From 1468888505@139.com Mon Mar 2 07:54:33 2026
+From: Li hongliang <1468888505@139.com>
+Date: Mon, 2 Mar 2026 14:54:27 +0800
+Subject: net: fix segmentation of forwarding fraglist GRO
+To: gregkh@linuxfoundation.org, stable@vger.kernel.org, jibin.zhang@mediatek.com
+Cc: patches@lists.linux.dev, linux-kernel@vger.kernel.org, ast@kernel.org, daniel@iogearbox.net, andrii@kernel.org, martin.lau@linux.dev, song@kernel.org, yhs@fb.com, john.fastabend@gmail.com, kpsingh@kernel.org, sdf@google.com, haoluo@google.com, jolsa@kernel.org, davem@davemloft.net, edumazet@google.com, kuba@kernel.org, pabeni@redhat.com, yoshfuji@linux-ipv6.org, dsahern@kernel.org, matthias.bgg@gmail.com, willemb@google.com, steffen.klassert@secunet.com, bpf@vger.kernel.org, netdev@vger.kernel.org, linux-arm-kernel@lists.infradead.org, linux-mediatek@lists.infradead.org
+Message-ID: <20260302065427.2695392-1-1468888505@139.com>
+
+From: Jibin Zhang <jibin.zhang@mediatek.com>
+
+[ Upstream commit 426ca15c7f6cb6562a081341ca88893a50c59fa2 ]
+
+This patch enhances GSO segment handling by properly checking
+the SKB_GSO_DODGY flag for frag_list GSO packets, addressing
+low throughput issues observed when a station accesses IPv4
+servers via hotspots with an IPv6-only upstream interface.
+
+Specifically, it fixes a bug in GSO segmentation when forwarding
+GRO packets containing a frag_list. The function skb_segment_list
+cannot correctly process GRO skbs that have been converted by XLAT,
+since XLAT only translates the header of the head skb. Consequently,
+skbs in the frag_list may remain untranslated, resulting in protocol
+inconsistencies and reduced throughput.
+
+To address this, the patch explicitly sets the SKB_GSO_DODGY flag
+for GSO packets in XLAT's IPv4/IPv6 protocol translation helpers
+(bpf_skb_proto_4_to_6 and bpf_skb_proto_6_to_4). This marks GSO
+packets as potentially modified after protocol translation. As a
+result, GSO segmentation will avoid using skb_segment_list and
+instead falls back to skb_segment for packets with the SKB_GSO_DODGY
+flag. This ensures that only safe and fully translated frag_list
+packets are processed by skb_segment_list, resolving protocol
+inconsistencies and improving throughput when forwarding GRO packets
+converted by XLAT.
+
+Signed-off-by: Jibin Zhang <jibin.zhang@mediatek.com>
+Fixes: 9fd1ff5d2ac7 ("udp: Support UDP fraglist GRO/GSO.")
+Cc: stable@vger.kernel.org
+Link: https://patch.msgid.link/20260126152114.1211-1-jibin.zhang@mediatek.com
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Li hongliang <1468888505@139.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/core/filter.c | 2 ++
+ net/ipv4/tcp_offload.c | 3 ++-
+ net/ipv4/udp_offload.c | 3 ++-
+ net/ipv6/tcpv6_offload.c | 3 ++-
+ 4 files changed, 8 insertions(+), 3 deletions(-)
+
+--- a/net/core/filter.c
++++ b/net/core/filter.c
+@@ -3333,6 +3333,7 @@ static int bpf_skb_proto_4_to_6(struct s
+ shinfo->gso_type &= ~SKB_GSO_TCPV4;
+ shinfo->gso_type |= SKB_GSO_TCPV6;
+ }
++ shinfo->gso_type |= SKB_GSO_DODGY;
+ }
+
+ bpf_skb_change_protocol(skb, ETH_P_IPV6);
+@@ -3363,6 +3364,7 @@ static int bpf_skb_proto_6_to_4(struct s
+ shinfo->gso_type &= ~SKB_GSO_TCPV6;
+ shinfo->gso_type |= SKB_GSO_TCPV4;
+ }
++ shinfo->gso_type |= SKB_GSO_DODGY;
+ }
+
+ bpf_skb_change_protocol(skb, ETH_P_IP);
+--- a/net/ipv4/tcp_offload.c
++++ b/net/ipv4/tcp_offload.c
+@@ -106,7 +106,8 @@ static struct sk_buff *tcp4_gso_segment(
+ if (skb_shinfo(skb)->gso_type & SKB_GSO_FRAGLIST) {
+ struct tcphdr *th = tcp_hdr(skb);
+
+- if (skb_pagelen(skb) - th->doff * 4 == skb_shinfo(skb)->gso_size)
++ if ((skb_pagelen(skb) - th->doff * 4 == skb_shinfo(skb)->gso_size) &&
++ !(skb_shinfo(skb)->gso_type & SKB_GSO_DODGY))
+ return __tcp4_gso_segment_list(skb, features);
+
+ skb->ip_summed = CHECKSUM_NONE;
+--- a/net/ipv4/udp_offload.c
++++ b/net/ipv4/udp_offload.c
+@@ -351,7 +351,8 @@ struct sk_buff *__udp_gso_segment(struct
+
+ if (skb_shinfo(gso_skb)->gso_type & SKB_GSO_FRAGLIST) {
+ /* Detect modified geometry and pass those to skb_segment. */
+- if (skb_pagelen(gso_skb) - sizeof(*uh) == skb_shinfo(gso_skb)->gso_size)
++ if ((skb_pagelen(gso_skb) - sizeof(*uh) == skb_shinfo(gso_skb)->gso_size) &&
++ !(skb_shinfo(gso_skb)->gso_type & SKB_GSO_DODGY))
+ return __udp_gso_segment_list(gso_skb, features, is_ipv6);
+
+ ret = __skb_linearize(gso_skb);
+--- a/net/ipv6/tcpv6_offload.c
++++ b/net/ipv6/tcpv6_offload.c
+@@ -108,7 +108,8 @@ static struct sk_buff *tcp6_gso_segment(
+ if (skb_shinfo(skb)->gso_type & SKB_GSO_FRAGLIST) {
+ struct tcphdr *th = tcp_hdr(skb);
+
+- if (skb_pagelen(skb) - th->doff * 4 == skb_shinfo(skb)->gso_size)
++ if ((skb_pagelen(skb) - th->doff * 4 == skb_shinfo(skb)->gso_size) &&
++ !(skb_shinfo(skb)->gso_type & SKB_GSO_DODGY))
+ return __tcp6_gso_segment_list(skb, features);
+
+ skb->ip_summed = CHECKSUM_NONE;
--- /dev/null
+From stable+bounces-222521-greg=kroah.com@vger.kernel.org Mon Mar 2 07:52:17 2026
+From: Li hongliang <1468888505@139.com>
+Date: Mon, 2 Mar 2026 14:51:07 +0800
+Subject: net: gso: fix tcp fraglist segmentation after pull from frag_list
+To: gregkh@linuxfoundation.org, stable@vger.kernel.org, nbd@nbd.name
+Cc: patches@lists.linux.dev, linux-kernel@vger.kernel.org, edumazet@google.com, davem@davemloft.net, dsahern@kernel.org, kuba@kernel.org, pabeni@redhat.com, matthias.bgg@gmail.com, angelogioacchino.delregno@collabora.com, willemb@google.com, netdev@vger.kernel.org, linux-arm-kernel@lists.infradead.org, linux-mediatek@lists.infradead.org, bpf@vger.kernel.org
+Message-ID: <20260302065107.2694835-1-1468888505@139.com>
+
+From: Felix Fietkau <nbd@nbd.name>
+
+[ Upstream commit 17bd3bd82f9f79f3feba15476c2b2c95a9b11ff8 ]
+
+Detect tcp gso fraglist skbs with corrupted geometry (see below) and
+pass these to skb_segment instead of skb_segment_list, as the first
+can segment them correctly.
+
+Valid SKB_GSO_FRAGLIST skbs
+- consist of two or more segments
+- the head_skb holds the protocol headers plus first gso_size
+- one or more frag_list skbs hold exactly one segment
+- all but the last must be gso_size
+
+Optional datapath hooks such as NAT and BPF (bpf_skb_pull_data) can
+modify these skbs, breaking these invariants.
+
+In extreme cases they pull all data into skb linear. For TCP, this
+causes a NULL ptr deref in __tcpv4_gso_segment_list_csum at
+tcp_hdr(seg->next).
+
+Detect invalid geometry due to pull, by checking head_skb size.
+Don't just drop, as this may blackhole a destination. Convert to be
+able to pass to regular skb_segment.
+
+Approach and description based on a patch by Willem de Bruijn.
+
+Link: https://lore.kernel.org/netdev/20240428142913.18666-1-shiming.cheng@mediatek.com/
+Link: https://lore.kernel.org/netdev/20240922150450.3873767-1-willemdebruijn.kernel@gmail.com/
+Fixes: bee88cd5bd83 ("net: add support for segmenting TCP fraglist GSO packets")
+Cc: stable@vger.kernel.org
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+Reviewed-by: Willem de Bruijn <willemb@google.com>
+Link: https://patch.msgid.link/20240926085315.51524-1-nbd@nbd.name
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Li hongliang <1468888505@139.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv4/tcp_offload.c | 10 ++++++++--
+ net/ipv6/tcpv6_offload.c | 10 ++++++++--
+ 2 files changed, 16 insertions(+), 4 deletions(-)
+
+--- a/net/ipv4/tcp_offload.c
++++ b/net/ipv4/tcp_offload.c
+@@ -103,8 +103,14 @@ static struct sk_buff *tcp4_gso_segment(
+ if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
+ return ERR_PTR(-EINVAL);
+
+- if (skb_shinfo(skb)->gso_type & SKB_GSO_FRAGLIST)
+- return __tcp4_gso_segment_list(skb, features);
++ if (skb_shinfo(skb)->gso_type & SKB_GSO_FRAGLIST) {
++ struct tcphdr *th = tcp_hdr(skb);
++
++ if (skb_pagelen(skb) - th->doff * 4 == skb_shinfo(skb)->gso_size)
++ return __tcp4_gso_segment_list(skb, features);
++
++ skb->ip_summed = CHECKSUM_NONE;
++ }
+
+ if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
+ const struct iphdr *iph = ip_hdr(skb);
+--- a/net/ipv6/tcpv6_offload.c
++++ b/net/ipv6/tcpv6_offload.c
+@@ -105,8 +105,14 @@ static struct sk_buff *tcp6_gso_segment(
+ if (!pskb_may_pull(skb, sizeof(*th)))
+ return ERR_PTR(-EINVAL);
+
+- if (skb_shinfo(skb)->gso_type & SKB_GSO_FRAGLIST)
+- return __tcp6_gso_segment_list(skb, features);
++ if (skb_shinfo(skb)->gso_type & SKB_GSO_FRAGLIST) {
++ struct tcphdr *th = tcp_hdr(skb);
++
++ if (skb_pagelen(skb) - th->doff * 4 == skb_shinfo(skb)->gso_size)
++ return __tcp6_gso_segment_list(skb, features);
++
++ skb->ip_summed = CHECKSUM_NONE;
++ }
+
+ if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
+ const struct ipv6hdr *ipv6h = ipv6_hdr(skb);
--- /dev/null
+From stable+bounces-223312-greg=kroah.com@vger.kernel.org Fri Mar 6 07:18:25 2026
+From: Rahul Sharma <black.hawk@163.com>
+Date: Fri, 6 Mar 2026 14:17:26 +0800
+Subject: net: stmmac: fix TSO DMA API usage causing oops
+To: gregkh@linuxfoundation.org, stable@vger.kernel.org
+Cc: linux-kernel@vger.kernel.org, "Russell King (Oracle)" <rmk+kernel@armlinux.org.uk>, Jon Hunter <jonathanh@nvidia.com>, Thierry Reding <thierry.reding@gmail.com>, Furong Xu <0x1207@gmail.com>, Jakub Kicinski <kuba@kernel.org>, Rahul Sharma <black.hawk@163.com>
+Message-ID: <20260306061726.712258-1-black.hawk@163.com>
+
+From: "Russell King (Oracle)" <rmk+kernel@armlinux.org.uk>
+
+[ Upstream commit 4c49f38e20a57f8abaebdf95b369295b153d1f8e ]
+
+Commit 66600fac7a98 ("net: stmmac: TSO: Fix unbalanced DMA map/unmap
+for non-paged SKB data") moved the assignment of tx_skbuff_dma[]'s
+members to be later in stmmac_tso_xmit().
+
+The buf (dma cookie) and len stored in this structure are passed to
+dma_unmap_single() by stmmac_tx_clean(). The DMA API requires that
+the dma cookie passed to dma_unmap_single() is the same as the value
+returned from dma_map_single(). However, by moving the assignment
+later, this is not the case when priv->dma_cap.addr64 > 32 as "des"
+is offset by proto_hdr_len.
+
+This causes problems such as:
+
+ dwc-eth-dwmac 2490000.ethernet eth0: Tx DMA map failed
+
+and with DMA_API_DEBUG enabled:
+
+ DMA-API: dwc-eth-dwmac 2490000.ethernet: device driver tries to +free DMA memory it has not allocated [device address=0x000000ffffcf65c0] [size=66 bytes]
+
+Fix this by maintaining "des" as the original DMA cookie, and use
+tso_des to pass the offset DMA cookie to stmmac_tso_allocator().
+
+Full details of the crashes can be found at:
+https://lore.kernel.org/all/d8112193-0386-4e14-b516-37c2d838171a@nvidia.com/
+https://lore.kernel.org/all/klkzp5yn5kq5efgtrow6wbvnc46bcqfxs65nz3qy77ujr5turc@bwwhelz2l4dw/
+
+Reported-by: Jon Hunter <jonathanh@nvidia.com>
+Reported-by: Thierry Reding <thierry.reding@gmail.com>
+Fixes: 66600fac7a98 ("net: stmmac: TSO: Fix unbalanced DMA map/unmap for non-paged SKB data")
+Tested-by: Jon Hunter <jonathanh@nvidia.com>
+Signed-off-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>
+Reviewed-by: Furong Xu <0x1207@gmail.com>
+Link: https://patch.msgid.link/E1tJXcx-006N4Z-PC@rmk-PC.armlinux.org.uk
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+[ The context change is due to the commit 041cc86b3653
+("net: stmmac: Enable TSO on VLANs") in v6.11 which is irrelevant to
+the logic of this patch. ]
+Signed-off-by: Rahul Sharma <black.hawk@163.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | 7 ++++---
+ 1 file changed, 4 insertions(+), 3 deletions(-)
+
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+@@ -4091,10 +4091,10 @@ static netdev_tx_t stmmac_tso_xmit(struc
+ int tmp_pay_len = 0, first_tx;
+ struct stmmac_tx_queue *tx_q;
+ bool has_vlan, set_ic;
++ dma_addr_t tso_des, des;
+ u8 proto_hdr_len, hdr;
+ unsigned long flags;
+ u32 pay_len, mss;
+- dma_addr_t des;
+ int i;
+
+ tx_q = &priv->dma_conf.tx_queue[queue];
+@@ -4179,14 +4179,15 @@ static netdev_tx_t stmmac_tso_xmit(struc
+
+ /* If needed take extra descriptors to fill the remaining payload */
+ tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
++ tso_des = des;
+ } else {
+ stmmac_set_desc_addr(priv, first, des);
+ tmp_pay_len = pay_len;
+- des += proto_hdr_len;
++ tso_des = des + proto_hdr_len;
+ pay_len = 0;
+ }
+
+- stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
++ stmmac_tso_allocator(priv, tso_des, tmp_pay_len, (nfrags == 0), queue);
+
+ /* In case two or more DMA transmit descriptors are allocated for this
+ * non-paged SKB data, the DMA buffer address should be saved to
--- /dev/null
+From 561399680@139.com Tue Mar 10 09:04:32 2026
+From: XiaoHua Wang <561399680@139.com>
+Date: Tue, 10 Mar 2026 16:02:45 +0800
+Subject: netfilter: nf_tables: missing objects with no memcg accounting
+To: gregkh@linuxfoundation.org, stable@vger.kernel.org
+Cc: pablo@netfilter.org, netdev@vger.kernel.org, sbrivio@redhat.com, XiaoHua Wang <561399680@139.com>
+Message-ID: <20260310080246.3543546-1-561399680@139.com>
+
+From: Pablo Neira Ayuso <pablo@netfilter.org>
+
+[ Upstream commit 69e687cea79fc99a17dfb0116c8644b9391b915e ]
+
+Several ruleset objects are still not using GFP_KERNEL_ACCOUNT for
+memory accounting, update them. This includes:
+
+- catchall elements
+- compat match large info area
+- log prefix
+- meta secctx
+- numgen counters
+- pipapo set backend datastructure
+- tunnel private objects
+
+Fixes: 33758c891479 ("memcg: enable accounting for nft objects")
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+[ The function pipapo_realloc_mt() does not exist in 6.1.y,
+so the fix for pipapo_realloc_mt() was not backported. ]
+Signed-off-by: XiaoHua Wang <561399680@139.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/netfilter/nf_tables_api.c | 2 +-
+ net/netfilter/nft_compat.c | 6 +++---
+ net/netfilter/nft_log.c | 2 +-
+ net/netfilter/nft_meta.c | 2 +-
+ net/netfilter/nft_numgen.c | 2 +-
+ net/netfilter/nft_set_pipapo.c | 10 +++++-----
+ net/netfilter/nft_tunnel.c | 5 +++--
+ 7 files changed, 15 insertions(+), 14 deletions(-)
+
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -6177,7 +6177,7 @@ static int nft_setelem_catchall_insert(c
+ }
+ }
+
+- catchall = kmalloc(sizeof(*catchall), GFP_KERNEL);
++ catchall = kmalloc(sizeof(*catchall), GFP_KERNEL_ACCOUNT);
+ if (!catchall)
+ return -ENOMEM;
+
+--- a/net/netfilter/nft_compat.c
++++ b/net/netfilter/nft_compat.c
+@@ -537,7 +537,7 @@ nft_match_large_init(const struct nft_ct
+ struct xt_match *m = expr->ops->data;
+ int ret;
+
+- priv->info = kmalloc(XT_ALIGN(m->matchsize), GFP_KERNEL);
++ priv->info = kmalloc(XT_ALIGN(m->matchsize), GFP_KERNEL_ACCOUNT);
+ if (!priv->info)
+ return -ENOMEM;
+
+@@ -814,7 +814,7 @@ nft_match_select_ops(const struct nft_ct
+ goto err;
+ }
+
+- ops = kzalloc(sizeof(struct nft_expr_ops), GFP_KERNEL);
++ ops = kzalloc(sizeof(struct nft_expr_ops), GFP_KERNEL_ACCOUNT);
+ if (!ops) {
+ err = -ENOMEM;
+ goto err;
+@@ -904,7 +904,7 @@ nft_target_select_ops(const struct nft_c
+ goto err;
+ }
+
+- ops = kzalloc(sizeof(struct nft_expr_ops), GFP_KERNEL);
++ ops = kzalloc(sizeof(struct nft_expr_ops), GFP_KERNEL_ACCOUNT);
+ if (!ops) {
+ err = -ENOMEM;
+ goto err;
+--- a/net/netfilter/nft_log.c
++++ b/net/netfilter/nft_log.c
+@@ -163,7 +163,7 @@ static int nft_log_init(const struct nft
+
+ nla = tb[NFTA_LOG_PREFIX];
+ if (nla != NULL) {
+- priv->prefix = kmalloc(nla_len(nla) + 1, GFP_KERNEL);
++ priv->prefix = kmalloc(nla_len(nla) + 1, GFP_KERNEL_ACCOUNT);
+ if (priv->prefix == NULL)
+ return -ENOMEM;
+ nla_strscpy(priv->prefix, nla, nla_len(nla) + 1);
+--- a/net/netfilter/nft_meta.c
++++ b/net/netfilter/nft_meta.c
+@@ -888,7 +888,7 @@ static int nft_secmark_obj_init(const st
+ if (tb[NFTA_SECMARK_CTX] == NULL)
+ return -EINVAL;
+
+- priv->ctx = nla_strdup(tb[NFTA_SECMARK_CTX], GFP_KERNEL);
++ priv->ctx = nla_strdup(tb[NFTA_SECMARK_CTX], GFP_KERNEL_ACCOUNT);
+ if (!priv->ctx)
+ return -ENOMEM;
+
+--- a/net/netfilter/nft_numgen.c
++++ b/net/netfilter/nft_numgen.c
+@@ -66,7 +66,7 @@ static int nft_ng_inc_init(const struct
+ if (priv->offset + priv->modulus - 1 < priv->offset)
+ return -EOVERFLOW;
+
+- priv->counter = kmalloc(sizeof(*priv->counter), GFP_KERNEL);
++ priv->counter = kmalloc(sizeof(*priv->counter), GFP_KERNEL_ACCOUNT);
+ if (!priv->counter)
+ return -ENOMEM;
+
+--- a/net/netfilter/nft_set_pipapo.c
++++ b/net/netfilter/nft_set_pipapo.c
+@@ -874,7 +874,7 @@ static void pipapo_lt_bits_adjust(struct
+ return;
+ }
+
+- new_lt = kvzalloc(lt_size + NFT_PIPAPO_ALIGN_HEADROOM, GFP_KERNEL);
++ new_lt = kvzalloc(lt_size + NFT_PIPAPO_ALIGN_HEADROOM, GFP_KERNEL_ACCOUNT);
+ if (!new_lt)
+ return;
+
+@@ -1150,7 +1150,7 @@ static int pipapo_realloc_scratch(struct
+ scratch = kzalloc_node(struct_size(scratch, map,
+ bsize_max * 2) +
+ NFT_PIPAPO_ALIGN_HEADROOM,
+- GFP_KERNEL, cpu_to_node(i));
++ GFP_KERNEL_ACCOUNT, cpu_to_node(i));
+ if (!scratch) {
+ /* On failure, there's no need to undo previous
+ * allocations: this means that some scratch maps have
+@@ -1324,7 +1324,7 @@ static struct nft_pipapo_match *pipapo_c
+ int i;
+
+ new = kmalloc(sizeof(*new) + sizeof(*dst) * old->field_count,
+- GFP_KERNEL);
++ GFP_KERNEL_ACCOUNT);
+ if (!new)
+ return ERR_PTR(-ENOMEM);
+
+@@ -1354,7 +1354,7 @@ static struct nft_pipapo_match *pipapo_c
+ new_lt = kvzalloc(src->groups * NFT_PIPAPO_BUCKETS(src->bb) *
+ src->bsize * sizeof(*dst->lt) +
+ NFT_PIPAPO_ALIGN_HEADROOM,
+- GFP_KERNEL);
++ GFP_KERNEL_ACCOUNT);
+ if (!new_lt)
+ goto out_lt;
+
+@@ -1368,7 +1368,7 @@ static struct nft_pipapo_match *pipapo_c
+ if (src->rules > (INT_MAX / sizeof(*src->mt)))
+ goto out_mt;
+
+- dst->mt = kvmalloc(src->rules * sizeof(*src->mt), GFP_KERNEL);
++ dst->mt = kvmalloc(src->rules * sizeof(*src->mt), GFP_KERNEL_ACCOUNT);
+ if (!dst->mt)
+ goto out_mt;
+
+--- a/net/netfilter/nft_tunnel.c
++++ b/net/netfilter/nft_tunnel.c
+@@ -503,13 +503,14 @@ static int nft_tunnel_obj_init(const str
+ return err;
+ }
+
+- md = metadata_dst_alloc(priv->opts.len, METADATA_IP_TUNNEL, GFP_KERNEL);
++ md = metadata_dst_alloc(priv->opts.len, METADATA_IP_TUNNEL,
++ GFP_KERNEL_ACCOUNT);
+ if (!md)
+ return -ENOMEM;
+
+ memcpy(&md->u.tun_info, &info, sizeof(info));
+ #ifdef CONFIG_DST_CACHE
+- err = dst_cache_init(&md->u.tun_info.dst_cache, GFP_KERNEL);
++ err = dst_cache_init(&md->u.tun_info.dst_cache, GFP_KERNEL_ACCOUNT);
+ if (err < 0) {
+ metadata_dst_free(md);
+ return err;
--- /dev/null
+From 561399680@139.com Tue Mar 10 09:04:34 2026
+From: XiaoHua Wang <561399680@139.com>
+Date: Tue, 10 Mar 2026 16:02:46 +0800
+Subject: netfilter: nft_set_pipapo: prevent overflow in lookup table allocation
+To: gregkh@linuxfoundation.org, stable@vger.kernel.org
+Cc: pablo@netfilter.org, netdev@vger.kernel.org, sbrivio@redhat.com, XiaoHua Wang <561399680@139.com>
+Message-ID: <20260310080246.3543546-2-561399680@139.com>
+
+From: Pablo Neira Ayuso <pablo@netfilter.org>
+
+[ Upstream commit 4c5c6aa9967dbe55bd017bb509885928d0f31206 ]
+
+When calculating the lookup table size, ensure the following
+multiplication does not overflow:
+
+- desc->field_len[] maximum value is U8_MAX multiplied by
+ NFT_PIPAPO_GROUPS_PER_BYTE(f) that can be 2, worst case.
+- NFT_PIPAPO_BUCKETS(f->bb) is 2^8, worst case.
+- sizeof(unsigned long), from sizeof(*f->lt), lt in
+ struct nft_pipapo_field.
+
+Then, use check_mul_overflow() to multiply by bucket size and then use
+check_add_overflow() to the alignment for avx2 (if needed). Finally, add
+lt_size_check_overflow() helper and use it to consolidate this.
+
+While at it, replace leftover allocation using the GFP_KERNEL to
+GFP_KERNEL_ACCOUNT for consistency, in pipapo_resize().
+
+Fixes: 3c4287f62044 ("nf_tables: Add set type for arbitrary concatenation of ranges")
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Reviewed-by: Stefano Brivio <sbrivio@redhat.com>
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+[ Adjust context ]
+Signed-off-by: XiaoHua Wang <561399680@139.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/netfilter/nft_set_pipapo.c | 58 +++++++++++++++++++++++++++++++----------
+ 1 file changed, 44 insertions(+), 14 deletions(-)
+
+--- a/net/netfilter/nft_set_pipapo.c
++++ b/net/netfilter/nft_set_pipapo.c
+@@ -610,6 +610,30 @@ static void *nft_pipapo_get(const struct
+ nft_genmask_cur(net), get_jiffies_64());
+ }
+
++
++/**
++ * lt_calculate_size() - Get storage size for lookup table with overflow check
++ * @groups: Amount of bit groups
++ * @bb: Number of bits grouped together in lookup table buckets
++ * @bsize: Size of each bucket in lookup table, in longs
++ *
++ * Return: allocation size including alignment overhead, negative on overflow
++ */
++static ssize_t lt_calculate_size(unsigned int groups, unsigned int bb,
++ unsigned int bsize)
++{
++ ssize_t ret = groups * NFT_PIPAPO_BUCKETS(bb) * sizeof(long);
++
++ if (check_mul_overflow(ret, bsize, &ret))
++ return -1;
++ if (check_add_overflow(ret, NFT_PIPAPO_ALIGN_HEADROOM, &ret))
++ return -1;
++ if (ret > INT_MAX)
++ return -1;
++
++ return ret;
++}
++
+ /**
+ * pipapo_resize() - Resize lookup or mapping table, or both
+ * @f: Field containing lookup and mapping tables
+@@ -628,6 +652,7 @@ static int pipapo_resize(struct nft_pipa
+ union nft_pipapo_map_bucket *new_mt, *old_mt = f->mt;
+ size_t new_bucket_size, copy;
+ int group, bucket;
++ ssize_t lt_size;
+
+ new_bucket_size = DIV_ROUND_UP(rules, BITS_PER_LONG);
+ #ifdef NFT_PIPAPO_ALIGN
+@@ -643,10 +668,11 @@ static int pipapo_resize(struct nft_pipa
+ else
+ copy = new_bucket_size;
+
+- new_lt = kvzalloc(f->groups * NFT_PIPAPO_BUCKETS(f->bb) *
+- new_bucket_size * sizeof(*new_lt) +
+- NFT_PIPAPO_ALIGN_HEADROOM,
+- GFP_KERNEL);
++ lt_size = lt_calculate_size(f->groups, f->bb, new_bucket_size);
++ if (lt_size < 0)
++ return -ENOMEM;
++
++ new_lt = kvzalloc(lt_size, GFP_KERNEL_ACCOUNT);
+ if (!new_lt)
+ return -ENOMEM;
+
+@@ -845,7 +871,7 @@ static void pipapo_lt_bits_adjust(struct
+ {
+ unsigned long *new_lt;
+ int groups, bb;
+- size_t lt_size;
++ ssize_t lt_size;
+
+ lt_size = f->groups * NFT_PIPAPO_BUCKETS(f->bb) * f->bsize *
+ sizeof(*f->lt);
+@@ -855,15 +881,17 @@ static void pipapo_lt_bits_adjust(struct
+ groups = f->groups * 2;
+ bb = NFT_PIPAPO_GROUP_BITS_LARGE_SET;
+
+- lt_size = groups * NFT_PIPAPO_BUCKETS(bb) * f->bsize *
+- sizeof(*f->lt);
++ lt_size = lt_calculate_size(groups, bb, f->bsize);
++ if (lt_size < 0)
++ return;
+ } else if (f->bb == NFT_PIPAPO_GROUP_BITS_LARGE_SET &&
+ lt_size < NFT_PIPAPO_LT_SIZE_LOW) {
+ groups = f->groups / 2;
+ bb = NFT_PIPAPO_GROUP_BITS_SMALL_SET;
+
+- lt_size = groups * NFT_PIPAPO_BUCKETS(bb) * f->bsize *
+- sizeof(*f->lt);
++ lt_size = lt_calculate_size(groups, bb, f->bsize);
++ if (lt_size < 0)
++ return;
+
+ /* Don't increase group width if the resulting lookup table size
+ * would exceed the upper size threshold for a "small" set.
+@@ -874,7 +902,7 @@ static void pipapo_lt_bits_adjust(struct
+ return;
+ }
+
+- new_lt = kvzalloc(lt_size + NFT_PIPAPO_ALIGN_HEADROOM, GFP_KERNEL_ACCOUNT);
++ new_lt = kvzalloc(lt_size, GFP_KERNEL_ACCOUNT);
+ if (!new_lt)
+ return;
+
+@@ -1348,13 +1376,15 @@ static struct nft_pipapo_match *pipapo_c
+
+ for (i = 0; i < old->field_count; i++) {
+ unsigned long *new_lt;
++ ssize_t lt_size;
+
+ memcpy(dst, src, offsetof(struct nft_pipapo_field, lt));
+
+- new_lt = kvzalloc(src->groups * NFT_PIPAPO_BUCKETS(src->bb) *
+- src->bsize * sizeof(*dst->lt) +
+- NFT_PIPAPO_ALIGN_HEADROOM,
+- GFP_KERNEL_ACCOUNT);
++ lt_size = lt_calculate_size(src->groups, src->bb, src->bsize);
++ if (lt_size < 0)
++ goto out_lt;
++
++ new_lt = kvzalloc(lt_size, GFP_KERNEL_ACCOUNT);
+ if (!new_lt)
+ goto out_lt;
+
--- /dev/null
+From stable+bounces-223164-greg=kroah.com@vger.kernel.org Thu Mar 5 03:14:51 2026
+From: Rahul Sharma <black.hawk@163.com>
+Date: Thu, 5 Mar 2026 10:14:17 +0800
+Subject: ntfs: set dummy blocksize to read boot_block when mounting
+To: gregkh@linuxfoundation.org, stable@vger.kernel.org
+Cc: linux-kernel@vger.kernel.org, Pedro Demarchi Gomes <pedrodemargomes@gmail.com>, syzbot+f4f84b57a01d6b8364ad@syzkaller.appspotmail.com, Konstantin Komarov <almaz.alexandrovich@paragon-software.com>, Rahul Sharma <black.hawk@163.com>
+Message-ID: <20260305021417.3956903-1-black.hawk@163.com>
+
+From: Pedro Demarchi Gomes <pedrodemargomes@gmail.com>
+
+[ Upstream commit d1693a7d5a38acf6424235a6070bcf5b186a360d ]
+
+When mounting, sb->s_blocksize is used to read the boot_block without
+being defined or validated. Set a dummy blocksize before attempting to
+read the boot_block.
+
+The issue can be triggered with the following syz reproducer:
+
+ mkdirat(0xffffffffffffff9c, &(0x7f0000000080)='./file1\x00', 0x0)
+ r4 = openat$nullb(0xffffffffffffff9c, &(0x7f0000000040), 0x121403, 0x0)
+ ioctl$FS_IOC_SETFLAGS(r4, 0x40081271, &(0x7f0000000980)=0x4000)
+ mount(&(0x7f0000000140)=@nullb, &(0x7f0000000040)='./cgroup\x00',
+ &(0x7f0000000000)='ntfs3\x00', 0x2208004, 0x0)
+ syz_clone(0x88200200, 0x0, 0x0, 0x0, 0x0, 0x0)
+
+Here, the ioctl sets the bdev block size to 16384. During mount,
+get_tree_bdev_flags() calls sb_set_blocksize(sb, block_size(bdev)),
+but since block_size(bdev) > PAGE_SIZE, sb_set_blocksize() leaves
+sb->s_blocksize at zero.
+
+Later, ntfs_init_from_boot() attempts to read the boot_block while
+sb->s_blocksize is still zero, which triggers the bug.
+
+Reported-by: syzbot+f4f84b57a01d6b8364ad@syzkaller.appspotmail.com
+Closes: https://syzkaller.appspot.com/bug?extid=f4f84b57a01d6b8364ad
+Signed-off-by: Pedro Demarchi Gomes <pedrodemargomes@gmail.com>
+[almaz.alexandrovich@paragon-software.com: changed comment style, added
+return value handling]
+Signed-off-by: Konstantin Komarov <almaz.alexandrovich@paragon-software.com>
+[ The context change is due to the commit c39de951282d
+("fs/ntfs3: Improve alternative boot processing")
+in v6.8 which is irrelevant to the logic of this patch. ]
+Signed-off-by: Rahul Sharma <black.hawk@163.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/ntfs3/super.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+--- a/fs/ntfs3/super.c
++++ b/fs/ntfs3/super.c
+@@ -693,6 +693,11 @@ static int ntfs_init_from_boot(struct su
+
+ sbi->volume.blocks = dev_size >> PAGE_SHIFT;
+
++ /* Set dummy blocksize to read boot_block. */
++ if (!sb_min_blocksize(sb, PAGE_SIZE)) {
++ return -EINVAL;
++ }
++
+ bh = ntfs_bread(sb, 0);
+ if (!bh)
+ return -EIO;
--- /dev/null
+From stable+bounces-223169-greg=kroah.com@vger.kernel.org Thu Mar 5 04:14:07 2026
+From: Rahul Sharma <black.hawk@163.com>
+Date: Thu, 5 Mar 2026 11:12:42 +0800
+Subject: nvme: fix admin request_queue lifetime
+To: gregkh@linuxfoundation.org, stable@vger.kernel.org
+Cc: linux-kernel@vger.kernel.org, Keith Busch <kbusch@kernel.org>, Casey Chen <cachen@purestorage.com>, Christoph Hellwig <hch@lst.de>, Hannes Reinecke <hare@suse.de>, Ming Lei <ming.lei@redhat.com>, Chaitanya Kulkarni <kch@nvidia.com>, Rahul Sharma <black.hawk@163.com>
+Message-ID: <20260305031242.929016-1-black.hawk@163.com>
+
+From: Keith Busch <kbusch@kernel.org>
+
+[ Upstream commit 03b3bcd319b3ab5182bc9aaa0421351572c78ac0 ]
+
+The namespaces can access the controller's admin request_queue, and
+stale references on the namespaces may exist after tearing down the
+controller. Ensure the admin request_queue is active by moving the
+controller's 'put' to after all controller references have been released
+to ensure no one is can access the request_queue. This fixes a reported
+use-after-free bug:
+
+ BUG: KASAN: slab-use-after-free in blk_queue_enter+0x41c/0x4a0
+ Read of size 8 at addr ffff88c0a53819f8 by task nvme/3287
+ CPU: 67 UID: 0 PID: 3287 Comm: nvme Tainted: G E 6.13.2-ga1582f1a031e #15
+ Tainted: [E]=UNSIGNED_MODULE
+ Hardware name: Jabil /EGS 2S MB1, BIOS 1.00 06/18/2025
+ Call Trace:
+ <TASK>
+ dump_stack_lvl+0x4f/0x60
+ print_report+0xc4/0x620
+ ? _raw_spin_lock_irqsave+0x70/0xb0
+ ? _raw_read_unlock_irqrestore+0x30/0x30
+ ? blk_queue_enter+0x41c/0x4a0
+ kasan_report+0xab/0xe0
+ ? blk_queue_enter+0x41c/0x4a0
+ blk_queue_enter+0x41c/0x4a0
+ ? __irq_work_queue_local+0x75/0x1d0
+ ? blk_queue_start_drain+0x70/0x70
+ ? irq_work_queue+0x18/0x20
+ ? vprintk_emit.part.0+0x1cc/0x350
+ ? wake_up_klogd_work_func+0x60/0x60
+ blk_mq_alloc_request+0x2b7/0x6b0
+ ? __blk_mq_alloc_requests+0x1060/0x1060
+ ? __switch_to+0x5b7/0x1060
+ nvme_submit_user_cmd+0xa9/0x330
+ nvme_user_cmd.isra.0+0x240/0x3f0
+ ? force_sigsegv+0xe0/0xe0
+ ? nvme_user_cmd64+0x400/0x400
+ ? vfs_fileattr_set+0x9b0/0x9b0
+ ? cgroup_update_frozen_flag+0x24/0x1c0
+ ? cgroup_leave_frozen+0x204/0x330
+ ? nvme_ioctl+0x7c/0x2c0
+ blkdev_ioctl+0x1a8/0x4d0
+ ? blkdev_common_ioctl+0x1930/0x1930
+ ? fdget+0x54/0x380
+ __x64_sys_ioctl+0x129/0x190
+ do_syscall_64+0x5b/0x160
+ entry_SYSCALL_64_after_hwframe+0x4b/0x53
+ RIP: 0033:0x7f765f703b0b
+ Code: ff ff ff 85 c0 79 9b 49 c7 c4 ff ff ff ff 5b 5d 4c 89 e0 41 5c c3 66 0f 1f 84 00 00 00 00 00 f3 0f 1e fa b8 10 00 00 00 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 8b 0d dd 52 0f 00 f7 d8 64 89 01 48
+ RSP: 002b:00007ffe2cefe808 EFLAGS: 00000202 ORIG_RAX: 0000000000000010
+ RAX: ffffffffffffffda RBX: 00007ffe2cefe860 RCX: 00007f765f703b0b
+ RDX: 00007ffe2cefe860 RSI: 00000000c0484e41 RDI: 0000000000000003
+ RBP: 0000000000000000 R08: 0000000000000003 R09: 0000000000000000
+ R10: 00007f765f611d50 R11: 0000000000000202 R12: 0000000000000003
+ R13: 00000000c0484e41 R14: 0000000000000001 R15: 00007ffe2cefea60
+ </TASK>
+
+Reported-by: Casey Chen <cachen@purestorage.com>
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+Reviewed-by: Hannes Reinecke <hare@suse.de>
+Reviewed-by: Ming Lei <ming.lei@redhat.com>
+Reviewed-by: Chaitanya Kulkarni <kch@nvidia.com>
+Signed-off-by: Keith Busch <kbusch@kernel.org>
+[ The context change is due to the commit 2b3f056f72e5
+("blk-mq: move the call to blk_put_queue out of blk_mq_destroy_queue")
+in v6.2 which is irrelevant to the logic of this patch. ]
+Signed-off-by: Rahul Sharma <black.hawk@163.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/nvme/host/core.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/drivers/nvme/host/core.c
++++ b/drivers/nvme/host/core.c
+@@ -5180,6 +5180,8 @@ static void nvme_free_ctrl(struct device
+ container_of(dev, struct nvme_ctrl, ctrl_device);
+ struct nvme_subsystem *subsys = ctrl->subsys;
+
++ if (ctrl->admin_q)
++ blk_put_queue(ctrl->admin_q);
+ if (!subsys || ctrl->instance != subsys->instance)
+ ida_free(&nvme_instance_ida, ctrl->instance);
+
--- /dev/null
+From jianqkang@sina.cn Wed Mar 4 03:49:40 2026
+From: Jianqiang kang <jianqkang@sina.cn>
+Date: Wed, 4 Mar 2026 10:49:34 +0800
+Subject: rcu/nocb: Fix possible invalid rdp's->nocb_cb_kthread pointer access
+To: gregkh@linuxfoundation.org, stable@vger.kernel.org, qiang.zhang1211@gmail.com
+Cc: patches@lists.linux.dev, linux-kernel@vger.kernel.org, paulmck@kernel.org, frederic@kernel.org, quic_neeraju@quicinc.com, josh@joshtriplett.org, rostedt@goodmis.org, mathieu.desnoyers@efficios.com, jiangshanlai@gmail.com, joel@joelfernandes.org, rcu@vger.kernel.org, neeraj.upadhyay@kernel.org
+Message-ID: <20260304024934.2364383-1-jianqkang@sina.cn>
+
+From: Zqiang <qiang.zhang1211@gmail.com>
+
+[ Upstream commit 1bba3900ca18bdae28d1b9fa10f16a8f8cb2ada1 ]
+
+In the preparation stage of CPU online, if the corresponding
+the rdp's->nocb_cb_kthread does not exist, will be created,
+there is a situation where the rdp's rcuop kthreads creation fails,
+and then de-offload this CPU's rdp, does not assign this CPU's
+rdp->nocb_cb_kthread pointer, but this rdp's->nocb_gp_rdp and
+rdp's->rdp_gp->nocb_gp_kthread is still valid.
+
+This will cause the subsequent re-offload operation of this offline
+CPU, which will pass the conditional check and the kthread_unpark()
+will access invalid rdp's->nocb_cb_kthread pointer.
+
+This commit therefore use rdp's->nocb_gp_kthread instead of
+rdp_gp's->nocb_gp_kthread for safety check.
+
+Signed-off-by: Zqiang <qiang.zhang1211@gmail.com>
+Reviewed-by: Frederic Weisbecker <frederic@kernel.org>
+Signed-off-by: Neeraj Upadhyay (AMD) <neeraj.upadhyay@kernel.org>
+[ Minor conflict resolved. ]
+Signed-off-by: Jianqiang kang <jianqkang@sina.cn>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/rcu/tree_nocb.h | 5 ++---
+ 1 file changed, 2 insertions(+), 3 deletions(-)
+
+--- a/kernel/rcu/tree_nocb.h
++++ b/kernel/rcu/tree_nocb.h
+@@ -1112,7 +1112,6 @@ static long rcu_nocb_rdp_offload(void *a
+ struct rcu_segcblist *cblist = &rdp->cblist;
+ unsigned long flags;
+ int wake_gp;
+- struct rcu_data *rdp_gp = rdp->nocb_gp_rdp;
+
+ WARN_ON_ONCE(rdp->cpu != raw_smp_processor_id());
+ /*
+@@ -1122,7 +1121,7 @@ static long rcu_nocb_rdp_offload(void *a
+ if (!rdp->nocb_gp_rdp)
+ return -EINVAL;
+
+- if (WARN_ON_ONCE(!rdp_gp->nocb_gp_kthread))
++ if (WARN_ON_ONCE(!rdp->nocb_gp_kthread))
+ return -EINVAL;
+
+ pr_info("Offloading %d\n", rdp->cpu);
+@@ -1151,7 +1150,7 @@ static long rcu_nocb_rdp_offload(void *a
+ */
+ wake_gp = rdp_offload_toggle(rdp, true, flags);
+ if (wake_gp)
+- wake_up_process(rdp_gp->nocb_gp_kthread);
++ wake_up_process(rdp->nocb_gp_kthread);
+ swait_event_exclusive(rdp->nocb_state_wq,
+ rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_CB) &&
+ rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_GP));
--- /dev/null
+From stable+bounces-227888-greg=kroah.com@vger.kernel.org Mon Mar 23 07:32:09 2026
+From: Jianqiang kang <jianqkang@sina.cn>
+Date: Mon, 23 Mar 2026 14:31:14 +0800
+Subject: riscv: stacktrace: Disable KASAN checks for non-current tasks
+To: gregkh@linuxfoundation.org, stable@vger.kernel.org, zhangchunyan@iscas.ac.cn
+Cc: patches@lists.linux.dev, linux-kernel@vger.kernel.org, paul.walmsley@sifive.com, palmer@dabbelt.com, aou@eecs.berkeley.edu, xujiakai2025@iscas.ac.cn, linux-riscv@lists.infradead.org, pjw@kernel.org
+Message-ID: <20260323063115.3555043-1-jianqkang@sina.cn>
+
+From: Chunyan Zhang <zhangchunyan@iscas.ac.cn>
+
+[ Upstream commit 060ea84a484e852b52b938f234bf9b5503a6c910 ]
+
+Unwinding the stack of a task other than current, KASAN would report
+"BUG: KASAN: out-of-bounds in walk_stackframe+0x41c/0x460"
+
+There is a same issue on x86 and has been resolved by the commit
+84936118bdf3 ("x86/unwind: Disable KASAN checks for non-current tasks")
+The solution could be applied to RISC-V too.
+
+This patch also can solve the issue:
+https://seclists.org/oss-sec/2025/q4/23
+
+Fixes: 5d8544e2d007 ("RISC-V: Generic library routines and assembly")
+Co-developed-by: Jiakai Xu <xujiakai2025@iscas.ac.cn>
+Signed-off-by: Jiakai Xu <xujiakai2025@iscas.ac.cn>
+Signed-off-by: Chunyan Zhang <zhangchunyan@iscas.ac.cn>
+Link: https://lore.kernel.org/r/20251022072608.743484-1-zhangchunyan@iscas.ac.cn
+[pjw@kernel.org: clean up checkpatch issues]
+Signed-off-by: Paul Walmsley <pjw@kernel.org>
+[ Minor conflict resolved. ]
+Signed-off-by: Jianqiang kang <jianqkang@sina.cn>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/riscv/kernel/stacktrace.c | 21 +++++++++++++++++++--
+ 1 file changed, 19 insertions(+), 2 deletions(-)
+
+--- a/arch/riscv/kernel/stacktrace.c
++++ b/arch/riscv/kernel/stacktrace.c
+@@ -16,6 +16,22 @@
+
+ #ifdef CONFIG_FRAME_POINTER
+
++/*
++ * This disables KASAN checking when reading a value from another task's stack,
++ * since the other task could be running on another CPU and could have poisoned
++ * the stack in the meantime.
++ */
++#define READ_ONCE_TASK_STACK(task, x) \
++({ \
++ unsigned long val; \
++ unsigned long addr = x; \
++ if ((task) == current) \
++ val = READ_ONCE(addr); \
++ else \
++ val = READ_ONCE_NOCHECK(addr); \
++ val; \
++})
++
+ extern asmlinkage void ret_from_exception(void);
+
+ static inline int fp_is_valid(unsigned long fp, unsigned long sp)
+@@ -68,8 +84,9 @@ void notrace walk_stackframe(struct task
+ fp = frame->ra;
+ pc = regs->ra;
+ } else {
+- fp = frame->fp;
+- pc = ftrace_graph_ret_addr(current, &graph_idx, frame->ra,
++ fp = READ_ONCE_TASK_STACK(task, frame->fp);
++ pc = READ_ONCE_TASK_STACK(task, frame->ra);
++ pc = ftrace_graph_ret_addr(current, &graph_idx, pc,
+ &frame->ra);
+ if (pc == (unsigned long)ret_from_exception) {
+ if (unlikely(!__kernel_text_address(pc) || !fn(arg, pc)))
--- /dev/null
+From f775276edc0c505dc0f782773796c189f31a1123 Mon Sep 17 00:00:00 2001
+From: Heiko Carstens <hca@linux.ibm.com>
+Date: Mon, 2 Mar 2026 14:34:58 +0100
+Subject: s390/xor: Fix xor_xc_2() inline assembly constraints
+
+From: Heiko Carstens <hca@linux.ibm.com>
+
+commit f775276edc0c505dc0f782773796c189f31a1123 upstream.
+
+The inline assembly constraints for xor_xc_2() are incorrect. "bytes",
+"p1", and "p2" are input operands, while all three of them are modified
+within the inline assembly. Given that the function consists only of this
+inline assembly it seems unlikely that this may cause any problems, however
+fix this in any case.
+
+Fixes: 2cfc5f9ce7f5 ("s390/xor: optimized xor routing using the XC instruction")
+Cc: stable@vger.kernel.org
+Signed-off-by: Heiko Carstens <hca@linux.ibm.com>
+Reviewed-by: Vasily Gorbik <gor@linux.ibm.com>
+Link: https://lore.kernel.org/r/20260302133500.1560531-2-hca@linux.ibm.com
+Signed-off-by: Vasily Gorbik <gor@linux.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/s390/lib/xor.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/arch/s390/lib/xor.c
++++ b/arch/s390/lib/xor.c
+@@ -29,8 +29,8 @@ static void xor_xc_2(unsigned long bytes
+ " j 3f\n"
+ "2: xc 0(1,%1),0(%2)\n"
+ "3:\n"
+- : : "d" (bytes), "a" (p1), "a" (p2)
+- : "0", "1", "cc", "memory");
++ : "+d" (bytes), "+a" (p1), "+a" (p2)
++ : : "0", "1", "cc", "memory");
+ }
+
+ static void xor_xc_3(unsigned long bytes, unsigned long * __restrict p1,
tools-bootconfig-fix-fd-leak-in-load_xbc_file-on-fst.patch
netfilter-nf_tables-de-constify-set-commit-ops-function-argument.patch
netfilter-nft_set_pipapo-split-gc-into-unlink-and-reclaim-phase.patch
+dmaengine-mmp_pdma-fix-race-condition-in-mmp_pdma_residue.patch
+s390-xor-fix-xor_xc_2-inline-assembly-constraints.patch
+net-clear-the-dst-when-changing-skb-protocol.patch
+drm-amdgpu-use-proper-dc-check-in-amdgpu_display_supported_domains.patch
+drm-amdgpu-clarify-dc-checks.patch
+drm-amd-display-add-pixel_clock-to-amd_pp_display_configuration.patch
+drm-amd-pm-use-pm_display_cfg-in-legacy-dpm-v2.patch
+net-add-support-for-segmenting-tcp-fraglist-gso-packets.patch
+net-gso-fix-tcp-fraglist-segmentation-after-pull-from-frag_list.patch
+net-fix-segmentation-of-forwarding-fraglist-gro.patch
+rcu-nocb-fix-possible-invalid-rdp-s-nocb_cb_kthread-pointer-access.patch
+ntfs-set-dummy-blocksize-to-read-boot_block-when-mounting.patch
+nvme-fix-admin-request_queue-lifetime.patch
+f2fs-fix-to-trigger-foreground-gc-during-f2fs_map_blocks-in-lfs-mode.patch
+net-stmmac-fix-tso-dma-api-usage-causing-oops.patch
+mptcp-pm-in-kernel-always-set-id-as-avail-when-rm-endp.patch
+dlm-fix-possible-lkb_resource-null-dereference.patch
+netfilter-nf_tables-missing-objects-with-no-memcg-accounting.patch
+netfilter-nft_set_pipapo-prevent-overflow-in-lookup-table-allocation.patch
+wifi-brcmfmac-fix-use-after-free-when-rescheduling-brcmf_btcoex_info-work.patch
+riscv-stacktrace-disable-kasan-checks-for-non-current-tasks.patch
--- /dev/null
+From stable+bounces-224796-greg=kroah.com@vger.kernel.org Thu Mar 12 04:17:00 2026
+From: Robert Garcia <rob_garcia@163.com>
+Date: Thu, 12 Mar 2026 11:14:29 +0800
+Subject: wifi: brcmfmac: fix use-after-free when rescheduling brcmf_btcoex_info work
+To: stable@vger.kernel.org, Duoming Zhou <duoming@zju.edu.cn>
+Cc: Johannes Berg <johannes.berg@intel.com>, Robert Garcia <rob_garcia@163.com>, Arend van Spriel <arend.vanspriel@broadcom.com>, Kalle Valo <kvalo@kernel.org>, Franky Lin <franky.lin@broadcom.com>, Hante Meuleman <hante.meuleman@broadcom.com>, "David S . Miller" <davem@davemloft.net>, Eric Dumazet <edumazet@google.com>, Jakub Kicinski <kuba@kernel.org>, Paolo Abeni <pabeni@redhat.com>, Pieter-Paul Giesberts <pieterpg@broadcom.com>, Piotr Haber <phaber@broadcom.com>, "John W . Linville" <linville@tuxdriver.com>, linux-wireless@vger.kernel.org, brcm80211-dev-list.pdl@broadcom.com, SHA-cyfmac-dev-list@infineon.com, netdev@vger.kernel.org, linux-kernel@vger.kernel.org
+Message-ID: <20260312031429.3432419-1-rob_garcia@163.com>
+
+From: Duoming Zhou <duoming@zju.edu.cn>
+
+[ Upstream commit 9cb83d4be0b9b697eae93d321e0da999f9cdfcfc ]
+
+The brcmf_btcoex_detach() only shuts down the btcoex timer, if the
+flag timer_on is false. However, the brcmf_btcoex_timerfunc(), which
+runs as timer handler, sets timer_on to false. This creates critical
+race conditions:
+
+1.If brcmf_btcoex_detach() is called while brcmf_btcoex_timerfunc()
+is executing, it may observe timer_on as false and skip the call to
+timer_shutdown_sync().
+
+2.The brcmf_btcoex_timerfunc() may then reschedule the brcmf_btcoex_info
+worker after the cancel_work_sync() has been executed, resulting in
+use-after-free bugs.
+
+The use-after-free bugs occur in two distinct scenarios, depending on
+the timing of when the brcmf_btcoex_info struct is freed relative to
+the execution of its worker thread.
+
+Scenario 1: Freed before the worker is scheduled
+
+The brcmf_btcoex_info is deallocated before the worker is scheduled.
+A race condition can occur when schedule_work(&bt_local->work) is
+called after the target memory has been freed. The sequence of events
+is detailed below:
+
+CPU0 | CPU1
+brcmf_btcoex_detach | brcmf_btcoex_timerfunc
+ | bt_local->timer_on = false;
+ if (cfg->btcoex->timer_on) |
+ ... |
+ cancel_work_sync(); |
+ ... |
+ kfree(cfg->btcoex); // FREE |
+ | schedule_work(&bt_local->work); // USE
+
+Scenario 2: Freed after the worker is scheduled
+
+The brcmf_btcoex_info is freed after the worker has been scheduled
+but before or during its execution. In this case, statements within
+the brcmf_btcoex_handler() — such as the container_of macro and
+subsequent dereferences of the brcmf_btcoex_info object will cause
+a use-after-free access. The following timeline illustrates this
+scenario:
+
+CPU0 | CPU1
+brcmf_btcoex_detach | brcmf_btcoex_timerfunc
+ | bt_local->timer_on = false;
+ if (cfg->btcoex->timer_on) |
+ ... |
+ cancel_work_sync(); |
+ ... | schedule_work(); // Reschedule
+ |
+ kfree(cfg->btcoex); // FREE | brcmf_btcoex_handler() // Worker
+ /* | btci = container_of(....); // USE
+ The kfree() above could | ...
+ also occur at any point | btci-> // USE
+ during the worker's execution|
+ */ |
+
+To resolve the race conditions, drop the conditional check and call
+timer_shutdown_sync() directly. It can deactivate the timer reliably,
+regardless of its current state. Once stopped, the timer_on state is
+then set to false.
+
+Fixes: 61730d4dfffc ("brcmfmac: support critical protocol API for DHCP")
+Acked-by: Arend van Spriel <arend.vanspriel@broadcom.com>
+Signed-off-by: Duoming Zhou <duoming@zju.edu.cn>
+Link: https://patch.msgid.link/20250822050839.4413-1-duoming@zju.edu.cn
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+[ Keep del_timer_sync() instead of timer_shutdown_sync() here. ]
+Signed-off-by: Robert Garcia <rob_garcia@163.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/wireless/broadcom/brcm80211/brcmfmac/btcoex.c | 6 ++----
+ 1 file changed, 2 insertions(+), 4 deletions(-)
+
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/btcoex.c
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/btcoex.c
+@@ -392,10 +392,8 @@ void brcmf_btcoex_detach(struct brcmf_cf
+ if (!cfg->btcoex)
+ return;
+
+- if (cfg->btcoex->timer_on) {
+- cfg->btcoex->timer_on = false;
+- del_timer_sync(&cfg->btcoex->timer);
+- }
++ del_timer_sync(&cfg->btcoex->timer);
++ cfg->btcoex->timer_on = false;
+
+ cancel_work_sync(&cfg->btcoex->work);
+