--- /dev/null
+From 8f614469de248a4bc55fb07e55d5f4c340c75b11 Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Wed, 21 Aug 2024 14:32:02 -0400
+Subject: drm/amdgpu: align pp_power_profile_mode with kernel docs
+
+From: Alex Deucher <alexander.deucher@amd.com>
+
+commit 8f614469de248a4bc55fb07e55d5f4c340c75b11 upstream.
+
+The kernel doc says you need to select manual mode to
+adjust this, but the code only allows you to adjust it when
+manual mode is not selected. Remove the manual mode check.
+
+Reviewed-by: Kenneth Feng <kenneth.feng@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+(cherry picked from commit bbb05f8a9cd87f5046d05a0c596fddfb714ee457)
+Cc: stable@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c | 6 ++----
+ 1 file changed, 2 insertions(+), 4 deletions(-)
+
+--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+@@ -1882,8 +1882,7 @@ static int smu_adjust_power_state_dynami
+ smu_dpm_ctx->dpm_level = level;
+ }
+
+- if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL &&
+- smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) {
++ if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) {
+ index = fls(smu->workload_mask);
+ index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
+ workload[0] = smu->workload_setting[index];
+@@ -1960,8 +1959,7 @@ static int smu_switch_power_profile(void
+ workload[0] = smu->workload_setting[index];
+ }
+
+- if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL &&
+- smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM)
++ if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM)
+ smu_bump_power_profile_mode(smu, workload, 0);
+
+ return 0;
--- /dev/null
+From d420c857d85777663e8d16adfc24463f5d5c2dbc Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Thu, 22 Aug 2024 21:54:24 -0400
+Subject: drm/amdgpu/swsmu: always force a state reprogram on init
+
+From: Alex Deucher <alexander.deucher@amd.com>
+
+commit d420c857d85777663e8d16adfc24463f5d5c2dbc upstream.
+
+Always reprogram the hardware state on init. This ensures
+the PMFW state is explicitly programmed and we are not relying
+on the default PMFW state.
+
+Closes: https://gitlab.freedesktop.org/drm/amd/-/issues/3131
+Reviewed-by: Kenneth Feng <kenneth.feng@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+(cherry picked from commit c50fe289ed7207f71df3b5f1720512a9620e84fb)
+Cc: stable@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c | 15 +++++++++------
+ 1 file changed, 9 insertions(+), 6 deletions(-)
+
+--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+@@ -1841,8 +1841,9 @@ static int smu_bump_power_profile_mode(s
+ }
+
+ static int smu_adjust_power_state_dynamic(struct smu_context *smu,
+- enum amd_dpm_forced_level level,
+- bool skip_display_settings)
++ enum amd_dpm_forced_level level,
++ bool skip_display_settings,
++ bool force_update)
+ {
+ int ret = 0;
+ int index = 0;
+@@ -1871,7 +1872,7 @@ static int smu_adjust_power_state_dynami
+ }
+ }
+
+- if (smu_dpm_ctx->dpm_level != level) {
++ if (force_update || smu_dpm_ctx->dpm_level != level) {
+ ret = smu_asic_set_performance_level(smu, level);
+ if (ret) {
+ dev_err(smu->adev->dev, "Failed to set performance level!");
+@@ -1887,7 +1888,7 @@ static int smu_adjust_power_state_dynami
+ index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
+ workload[0] = smu->workload_setting[index];
+
+- if (smu->power_profile_mode != workload[0])
++ if (force_update || smu->power_profile_mode != workload[0])
+ smu_bump_power_profile_mode(smu, workload, 0);
+ }
+
+@@ -1908,11 +1909,13 @@ static int smu_handle_task(struct smu_co
+ ret = smu_pre_display_config_changed(smu);
+ if (ret)
+ return ret;
+- ret = smu_adjust_power_state_dynamic(smu, level, false);
++ ret = smu_adjust_power_state_dynamic(smu, level, false, false);
+ break;
+ case AMD_PP_TASK_COMPLETE_INIT:
++ ret = smu_adjust_power_state_dynamic(smu, level, true, true);
++ break;
+ case AMD_PP_TASK_READJUST_POWER_STATE:
+- ret = smu_adjust_power_state_dynamic(smu, level, true);
++ ret = smu_adjust_power_state_dynamic(smu, level, true, false);
+ break;
+ default:
+ break;
--- /dev/null
+From 50f1199250912568606b3778dc56646c10cb7b04 Mon Sep 17 00:00:00 2001
+From: Zack Rusin <zack.rusin@broadcom.com>
+Date: Fri, 16 Aug 2024 14:32:06 -0400
+Subject: drm/vmwgfx: Fix prime with external buffers
+
+From: Zack Rusin <zack.rusin@broadcom.com>
+
+commit 50f1199250912568606b3778dc56646c10cb7b04 upstream.
+
+Make sure that for external buffers mapping goes through the dma_buf
+interface instead of trying to access pages directly.
+
+External buffers might not provide direct access to readable/writable
+pages so to make sure the bo's created from external dma_bufs can be
+read dma_buf interface has to be used.
+
+Fixes crashes in IGT's kms_prime with vgem. Regular desktop usage won't
+trigger this due to the fact that virtual machines will not have
+multiple GPUs but it enables better test coverage in IGT.
+
+Signed-off-by: Zack Rusin <zack.rusin@broadcom.com>
+Fixes: b32233acceff ("drm/vmwgfx: Fix prime import/export")
+Cc: <stable@vger.kernel.org> # v6.6+
+Cc: Broadcom internal kernel review list <bcm-kernel-feedback-list@broadcom.com>
+Cc: dri-devel@lists.freedesktop.org
+Cc: <stable@vger.kernel.org> # v6.9+
+Link: https://patchwork.freedesktop.org/patch/msgid/20240816183332.31961-3-zack.rusin@broadcom.com
+Reviewed-by: Martin Krastev <martin.krastev@broadcom.com>
+Reviewed-by: Maaz Mombasawala <maaz.mombasawala@broadcom.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/vmwgfx/vmwgfx_blit.c | 114 +++++++++++++++++++++++++++++++++--
+ drivers/gpu/drm/vmwgfx/vmwgfx_drv.h | 4 -
+ drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c | 12 +--
+ 3 files changed, 118 insertions(+), 12 deletions(-)
+
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
+@@ -27,6 +27,8 @@
+ **************************************************************************/
+
+ #include "vmwgfx_drv.h"
++
++#include "vmwgfx_bo.h"
+ #include <linux/highmem.h>
+
+ /*
+@@ -420,13 +422,105 @@ static int vmw_bo_cpu_blit_line(struct v
+ return 0;
+ }
+
++static void *map_external(struct vmw_bo *bo, struct iosys_map *map)
++{
++ struct vmw_private *vmw =
++ container_of(bo->tbo.bdev, struct vmw_private, bdev);
++ void *ptr = NULL;
++ int ret;
++
++ if (bo->tbo.base.import_attach) {
++ ret = dma_buf_vmap(bo->tbo.base.dma_buf, map);
++ if (ret) {
++ drm_dbg_driver(&vmw->drm,
++ "Wasn't able to map external bo!\n");
++ goto out;
++ }
++ ptr = map->vaddr;
++ } else {
++ ptr = vmw_bo_map_and_cache(bo);
++ }
++
++out:
++ return ptr;
++}
++
++static void unmap_external(struct vmw_bo *bo, struct iosys_map *map)
++{
++ if (bo->tbo.base.import_attach)
++ dma_buf_vunmap(bo->tbo.base.dma_buf, map);
++ else
++ vmw_bo_unmap(bo);
++}
++
++static int vmw_external_bo_copy(struct vmw_bo *dst, u32 dst_offset,
++ u32 dst_stride, struct vmw_bo *src,
++ u32 src_offset, u32 src_stride,
++ u32 width_in_bytes, u32 height,
++ struct vmw_diff_cpy *diff)
++{
++ struct vmw_private *vmw =
++ container_of(dst->tbo.bdev, struct vmw_private, bdev);
++ size_t dst_size = dst->tbo.resource->size;
++ size_t src_size = src->tbo.resource->size;
++ struct iosys_map dst_map = {0};
++ struct iosys_map src_map = {0};
++ int ret, i;
++ int x_in_bytes;
++ u8 *vsrc;
++ u8 *vdst;
++
++ vsrc = map_external(src, &src_map);
++ if (!vsrc) {
++ drm_dbg_driver(&vmw->drm, "Wasn't able to map src\n");
++ ret = -ENOMEM;
++ goto out;
++ }
++
++ vdst = map_external(dst, &dst_map);
++ if (!vdst) {
++ drm_dbg_driver(&vmw->drm, "Wasn't able to map dst\n");
++ ret = -ENOMEM;
++ goto out;
++ }
++
++ vsrc += src_offset;
++ vdst += dst_offset;
++ if (src_stride == dst_stride) {
++ dst_size -= dst_offset;
++ src_size -= src_offset;
++ memcpy(vdst, vsrc,
++ min(dst_stride * height, min(dst_size, src_size)));
++ } else {
++ WARN_ON(dst_stride < width_in_bytes);
++ for (i = 0; i < height; ++i) {
++ memcpy(vdst, vsrc, width_in_bytes);
++ vsrc += src_stride;
++ vdst += dst_stride;
++ }
++ }
++
++ x_in_bytes = (dst_offset % dst_stride);
++ diff->rect.x1 = x_in_bytes / diff->cpp;
++ diff->rect.y1 = ((dst_offset - x_in_bytes) / dst_stride);
++ diff->rect.x2 = diff->rect.x1 + width_in_bytes / diff->cpp;
++ diff->rect.y2 = diff->rect.y1 + height;
++
++ ret = 0;
++out:
++ unmap_external(src, &src_map);
++ unmap_external(dst, &dst_map);
++
++ return ret;
++}
++
+ /**
+ * vmw_bo_cpu_blit - in-kernel cpu blit.
+ *
+- * @dst: Destination buffer object.
++ * @vmw_dst: Destination buffer object.
+ * @dst_offset: Destination offset of blit start in bytes.
+ * @dst_stride: Destination stride in bytes.
+- * @src: Source buffer object.
++ * @vmw_src: Source buffer object.
+ * @src_offset: Source offset of blit start in bytes.
+ * @src_stride: Source stride in bytes.
+ * @w: Width of blit.
+@@ -444,13 +538,15 @@ static int vmw_bo_cpu_blit_line(struct v
+ * Neither of the buffer objects may be placed in PCI memory
+ * (Fixed memory in TTM terminology) when using this function.
+ */
+-int vmw_bo_cpu_blit(struct ttm_buffer_object *dst,
++int vmw_bo_cpu_blit(struct vmw_bo *vmw_dst,
+ u32 dst_offset, u32 dst_stride,
+- struct ttm_buffer_object *src,
++ struct vmw_bo *vmw_src,
+ u32 src_offset, u32 src_stride,
+ u32 w, u32 h,
+ struct vmw_diff_cpy *diff)
+ {
++ struct ttm_buffer_object *src = &vmw_src->tbo;
++ struct ttm_buffer_object *dst = &vmw_dst->tbo;
+ struct ttm_operation_ctx ctx = {
+ .interruptible = false,
+ .no_wait_gpu = false
+@@ -460,6 +556,11 @@ int vmw_bo_cpu_blit(struct ttm_buffer_ob
+ int ret = 0;
+ struct page **dst_pages = NULL;
+ struct page **src_pages = NULL;
++ bool src_external = (src->ttm->page_flags & TTM_TT_FLAG_EXTERNAL) != 0;
++ bool dst_external = (dst->ttm->page_flags & TTM_TT_FLAG_EXTERNAL) != 0;
++
++ if (WARN_ON(dst == src))
++ return -EINVAL;
+
+ /* Buffer objects need to be either pinned or reserved: */
+ if (!(dst->pin_count))
+@@ -479,6 +580,11 @@ int vmw_bo_cpu_blit(struct ttm_buffer_ob
+ return ret;
+ }
+
++ if (src_external || dst_external)
++ return vmw_external_bo_copy(vmw_dst, dst_offset, dst_stride,
++ vmw_src, src_offset, src_stride,
++ w, h, diff);
++
+ if (!src->ttm->pages && src->ttm->sg) {
+ src_pages = kvmalloc_array(src->ttm->num_pages,
+ sizeof(struct page *), GFP_KERNEL);
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+@@ -1355,9 +1355,9 @@ void vmw_diff_memcpy(struct vmw_diff_cpy
+
+ void vmw_memcpy(struct vmw_diff_cpy *diff, u8 *dest, const u8 *src, size_t n);
+
+-int vmw_bo_cpu_blit(struct ttm_buffer_object *dst,
++int vmw_bo_cpu_blit(struct vmw_bo *dst,
+ u32 dst_offset, u32 dst_stride,
+- struct ttm_buffer_object *src,
++ struct vmw_bo *src,
+ u32 src_offset, u32 src_stride,
+ u32 w, u32 h,
+ struct vmw_diff_cpy *diff);
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
+@@ -497,7 +497,7 @@ static void vmw_stdu_bo_cpu_commit(struc
+ container_of(dirty->unit, typeof(*stdu), base);
+ s32 width, height;
+ s32 src_pitch, dst_pitch;
+- struct ttm_buffer_object *src_bo, *dst_bo;
++ struct vmw_bo *src_bo, *dst_bo;
+ u32 src_offset, dst_offset;
+ struct vmw_diff_cpy diff = VMW_CPU_BLIT_DIFF_INITIALIZER(stdu->cpp);
+
+@@ -512,11 +512,11 @@ static void vmw_stdu_bo_cpu_commit(struc
+
+ /* Assume we are blitting from Guest (bo) to Host (display_srf) */
+ src_pitch = stdu->display_srf->metadata.base_size.width * stdu->cpp;
+- src_bo = &stdu->display_srf->res.guest_memory_bo->tbo;
++ src_bo = stdu->display_srf->res.guest_memory_bo;
+ src_offset = ddirty->top * src_pitch + ddirty->left * stdu->cpp;
+
+ dst_pitch = ddirty->pitch;
+- dst_bo = &ddirty->buf->tbo;
++ dst_bo = ddirty->buf;
+ dst_offset = ddirty->fb_top * dst_pitch + ddirty->fb_left * stdu->cpp;
+
+ (void) vmw_bo_cpu_blit(dst_bo, dst_offset, dst_pitch,
+@@ -1136,7 +1136,7 @@ vmw_stdu_bo_populate_update_cpu(struct v
+ struct vmw_diff_cpy diff = VMW_CPU_BLIT_DIFF_INITIALIZER(0);
+ struct vmw_stdu_update_gb_image *cmd_img = cmd;
+ struct vmw_stdu_update *cmd_update;
+- struct ttm_buffer_object *src_bo, *dst_bo;
++ struct vmw_bo *src_bo, *dst_bo;
+ u32 src_offset, dst_offset;
+ s32 src_pitch, dst_pitch;
+ s32 width, height;
+@@ -1150,11 +1150,11 @@ vmw_stdu_bo_populate_update_cpu(struct v
+
+ diff.cpp = stdu->cpp;
+
+- dst_bo = &stdu->display_srf->res.guest_memory_bo->tbo;
++ dst_bo = stdu->display_srf->res.guest_memory_bo;
+ dst_pitch = stdu->display_srf->metadata.base_size.width * stdu->cpp;
+ dst_offset = bb->y1 * dst_pitch + bb->x1 * stdu->cpp;
+
+- src_bo = &vfbbo->buffer->tbo;
++ src_bo = vfbbo->buffer;
+ src_pitch = update->vfb->base.pitches[0];
+ src_offset = bo_update->fb_top * src_pitch + bo_update->fb_left *
+ stdu->cpp;
--- /dev/null
+From f09b0ad55a1196f5891663f8888463c0541059cb Mon Sep 17 00:00:00 2001
+From: "Matthieu Baerts (NGI0)" <matttbe@kernel.org>
+Date: Mon, 26 Aug 2024 19:11:18 +0200
+Subject: mptcp: close subflow when receiving TCP+FIN
+
+From: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+
+commit f09b0ad55a1196f5891663f8888463c0541059cb upstream.
+
+When a peer decides to close one subflow in the middle of a connection
+having multiple subflows, the receiver of the first FIN should accept
+that, and close the subflow on its side as well. If not, the subflow
+will stay half closed, and would even continue to be used until the end
+of the MPTCP connection or a reset from the network.
+
+The issue has not been seen before, probably because the in-kernel
+path-manager always sends a RM_ADDR before closing the subflow. Upon the
+reception of this RM_ADDR, the other peer will initiate the closure on
+its side as well. On the other hand, if the RM_ADDR is lost, or if the
+path-manager of the other peer only closes the subflow without sending a
+RM_ADDR, the subflow would switch to TCP_CLOSE_WAIT, but that's it,
+leaving the subflow half-closed.
+
+So now, when the subflow switches to the TCP_CLOSE_WAIT state, and if
+the MPTCP connection has not been closed before with a DATA_FIN, the
+kernel owning the subflow schedules its worker to initiate the closure
+on its side as well.
+
+This issue can be easily reproduced with packetdrill, as visible in [1],
+by creating an additional subflow, injecting a FIN+ACK before sending
+the DATA_FIN, and expecting a FIN+ACK in return.
+
+Fixes: 40947e13997a ("mptcp: schedule worker when subflow is closed")
+Cc: stable@vger.kernel.org
+Link: https://github.com/multipath-tcp/packetdrill/pull/154 [1]
+Reviewed-by: Mat Martineau <martineau@kernel.org>
+Signed-off-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+Link: https://patch.msgid.link/20240826-net-mptcp-close-extra-sf-fin-v1-1-905199fe1172@kernel.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/mptcp/protocol.c | 5 ++++-
+ net/mptcp/subflow.c | 8 ++++++--
+ 2 files changed, 10 insertions(+), 3 deletions(-)
+
+--- a/net/mptcp/protocol.c
++++ b/net/mptcp/protocol.c
+@@ -2496,8 +2496,11 @@ static void __mptcp_close_subflow(struct
+
+ mptcp_for_each_subflow_safe(msk, subflow, tmp) {
+ struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
++ int ssk_state = inet_sk_state_load(ssk);
+
+- if (inet_sk_state_load(ssk) != TCP_CLOSE)
++ if (ssk_state != TCP_CLOSE &&
++ (ssk_state != TCP_CLOSE_WAIT ||
++ inet_sk_state_load(sk) != TCP_ESTABLISHED))
+ continue;
+
+ /* 'subflow_data_ready' will re-sched once rx queue is empty */
+--- a/net/mptcp/subflow.c
++++ b/net/mptcp/subflow.c
+@@ -1226,12 +1226,16 @@ out:
+ /* sched mptcp worker to remove the subflow if no more data is pending */
+ static void subflow_sched_work_if_closed(struct mptcp_sock *msk, struct sock *ssk)
+ {
+- if (likely(ssk->sk_state != TCP_CLOSE))
++ struct sock *sk = (struct sock *)msk;
++
++ if (likely(ssk->sk_state != TCP_CLOSE &&
++ (ssk->sk_state != TCP_CLOSE_WAIT ||
++ inet_sk_state_load(sk) != TCP_ESTABLISHED)))
+ return;
+
+ if (skb_queue_empty(&ssk->sk_receive_queue) &&
+ !test_and_set_bit(MPTCP_WORK_CLOSE_SUBFLOW, &msk->flags))
+- mptcp_schedule_work((struct sock *)msk);
++ mptcp_schedule_work(sk);
+ }
+
+ static bool subflow_can_fallback(struct mptcp_subflow_context *subflow)
--- /dev/null
+From 57f86203b41c98b322119dfdbb1ec54ce5e3369b Mon Sep 17 00:00:00 2001
+From: "Matthieu Baerts (NGI0)" <matttbe@kernel.org>
+Date: Wed, 28 Aug 2024 08:14:37 +0200
+Subject: mptcp: pm: ADD_ADDR 0 is not a new address
+
+From: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+
+commit 57f86203b41c98b322119dfdbb1ec54ce5e3369b upstream.
+
+The ADD_ADDR 0 with the address from the initial subflow should not be
+considered as a new address: this is not something new. If the host
+receives it, it simply means that the address is available again.
+
+When receiving an ADD_ADDR for the ID 0, the PM already doesn't consider
+it as new by not incrementing the 'add_addr_accepted' counter. But the
+'accept_addr' might not be set if the limit has already been reached:
+this can be bypassed in this case. But before, it is important to check
+that this ADD_ADDR for the ID 0 is for the same address as the initial
+subflow. If not, it is not something that should happen, and the
+ADD_ADDR can be ignored.
+
+Note that if an ADD_ADDR is received while there is already a subflow
+opened using the same address, this ADD_ADDR is ignored as well. It
+means that if multiple ADD_ADDR for ID 0 are received, there will not be
+any duplicated subflows created by the client.
+
+Fixes: d0876b2284cf ("mptcp: add the incoming RM_ADDR support")
+Cc: stable@vger.kernel.org
+Reviewed-by: Mat Martineau <martineau@kernel.org>
+Signed-off-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/mptcp/pm.c | 4 +++-
+ net/mptcp/pm_netlink.c | 9 +++++++++
+ net/mptcp/protocol.h | 2 ++
+ 3 files changed, 14 insertions(+), 1 deletion(-)
+
+--- a/net/mptcp/pm.c
++++ b/net/mptcp/pm.c
+@@ -227,7 +227,9 @@ void mptcp_pm_add_addr_received(const st
+ } else {
+ __MPTCP_INC_STATS(sock_net((struct sock *)msk), MPTCP_MIB_ADDADDRDROP);
+ }
+- } else if (!READ_ONCE(pm->accept_addr)) {
++ /* id0 should not have a different address */
++ } else if ((addr->id == 0 && !mptcp_pm_nl_is_init_remote_addr(msk, addr)) ||
++ (addr->id > 0 && !READ_ONCE(pm->accept_addr))) {
+ mptcp_pm_announce_addr(msk, addr, true);
+ mptcp_pm_add_addr_send_ack(msk);
+ } else if (mptcp_pm_schedule_work(msk, MPTCP_PM_ADD_ADDR_RECEIVED)) {
+--- a/net/mptcp/pm_netlink.c
++++ b/net/mptcp/pm_netlink.c
+@@ -768,6 +768,15 @@ static void mptcp_pm_nl_add_addr_receive
+ }
+ }
+
++bool mptcp_pm_nl_is_init_remote_addr(struct mptcp_sock *msk,
++ const struct mptcp_addr_info *remote)
++{
++ struct mptcp_addr_info mpc_remote;
++
++ remote_address((struct sock_common *)msk, &mpc_remote);
++ return mptcp_addresses_equal(&mpc_remote, remote, remote->port);
++}
++
+ void mptcp_pm_nl_addr_send_ack(struct mptcp_sock *msk)
+ {
+ struct mptcp_subflow_context *subflow;
+--- a/net/mptcp/protocol.h
++++ b/net/mptcp/protocol.h
+@@ -907,6 +907,8 @@ void mptcp_pm_add_addr_received(const st
+ void mptcp_pm_add_addr_echoed(struct mptcp_sock *msk,
+ const struct mptcp_addr_info *addr);
+ void mptcp_pm_add_addr_send_ack(struct mptcp_sock *msk);
++bool mptcp_pm_nl_is_init_remote_addr(struct mptcp_sock *msk,
++ const struct mptcp_addr_info *remote);
+ void mptcp_pm_nl_addr_send_ack(struct mptcp_sock *msk);
+ void mptcp_pm_rm_addr_received(struct mptcp_sock *msk,
+ const struct mptcp_rm_list *rm_list);
--- /dev/null
+From 58e1b66b4e4b8a602d3f2843e8eba00a969ecce2 Mon Sep 17 00:00:00 2001
+From: "Matthieu Baerts (NGI0)" <matttbe@kernel.org>
+Date: Wed, 28 Aug 2024 08:14:32 +0200
+Subject: mptcp: pm: do not remove already closed subflows
+
+From: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+
+commit 58e1b66b4e4b8a602d3f2843e8eba00a969ecce2 upstream.
+
+It is possible to have in the list already closed subflows, e.g. the
+initial subflow has been already closed, but still in the list. No need
+to try to close it again, and increments the related counters again.
+
+Fixes: 0ee4261a3681 ("mptcp: implement mptcp_pm_remove_subflow")
+Cc: stable@vger.kernel.org
+Reviewed-by: Mat Martineau <martineau@kernel.org>
+Signed-off-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/mptcp/pm_netlink.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/net/mptcp/pm_netlink.c
++++ b/net/mptcp/pm_netlink.c
+@@ -851,6 +851,8 @@ static void mptcp_pm_nl_rm_addr_or_subfl
+ int how = RCV_SHUTDOWN | SEND_SHUTDOWN;
+ u8 id = subflow_get_local_id(subflow);
+
++ if (inet_sk_state_load(ssk) == TCP_CLOSE)
++ continue;
+ if (rm_type == MPTCP_MIB_RMADDR && remote_id != rm_id)
+ continue;
+ if (rm_type == MPTCP_MIB_RMSUBFLOW && !mptcp_local_id_match(msk, id, rm_id))
--- /dev/null
+From 9366922adc6a71378ca01f898c41be295309f044 Mon Sep 17 00:00:00 2001
+From: "Matthieu Baerts (NGI0)" <matttbe@kernel.org>
+Date: Wed, 28 Aug 2024 08:14:33 +0200
+Subject: mptcp: pm: fix ID 0 endp usage after multiple re-creations
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+
+commit 9366922adc6a71378ca01f898c41be295309f044 upstream.
+
+'local_addr_used' and 'add_addr_accepted' are decremented for addresses
+not related to the initial subflow (ID0), because the source and
+destination addresses of the initial subflows are known from the
+beginning: they don't count as "additional local address being used" or
+"ADD_ADDR being accepted".
+
+It is then required not to increment them when the entrypoint used by
+the initial subflow is removed and re-added during a connection. Without
+this modification, this entrypoint cannot be removed and re-added more
+than once.
+
+Reported-by: Arınç ÜNAL <arinc.unal@arinc9.com>
+Closes: https://github.com/multipath-tcp/mptcp_net-next/issues/512
+Fixes: 3ad14f54bd74 ("mptcp: more accurate MPC endpoint tracking")
+Reported-by: syzbot+455d38ecd5f655fc45cf@syzkaller.appspotmail.com
+Closes: https://lore.kernel.org/00000000000049861306209237f4@google.com
+Cc: stable@vger.kernel.org
+Tested-by: Arınç ÜNAL <arinc.unal@arinc9.com>
+Reviewed-by: Mat Martineau <martineau@kernel.org>
+Signed-off-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/mptcp/pm_netlink.c | 7 +++++--
+ 1 file changed, 5 insertions(+), 2 deletions(-)
+
+--- a/net/mptcp/pm_netlink.c
++++ b/net/mptcp/pm_netlink.c
+@@ -623,12 +623,13 @@ subflow:
+
+ fullmesh = !!(local.flags & MPTCP_PM_ADDR_FLAG_FULLMESH);
+
+- msk->pm.local_addr_used++;
+ __clear_bit(local.addr.id, msk->pm.id_avail_bitmap);
+
+ /* Special case for ID0: set the correct ID */
+ if (local.addr.id == msk->mpc_endpoint_id)
+ local.addr.id = 0;
++ else /* local_addr_used is not decr for ID 0 */
++ msk->pm.local_addr_used++;
+
+ nr = fill_remote_addresses_vec(msk, &local.addr, fullmesh, addrs);
+ if (nr == 0)
+@@ -758,7 +759,9 @@ static void mptcp_pm_nl_add_addr_receive
+ spin_lock_bh(&msk->pm.lock);
+
+ if (sf_created) {
+- msk->pm.add_addr_accepted++;
++ /* add_addr_accepted is not decr for ID 0 */
++ if (remote.id)
++ msk->pm.add_addr_accepted++;
+ if (msk->pm.add_addr_accepted >= add_addr_accept_max ||
+ msk->pm.subflows >= subflows_max)
+ WRITE_ONCE(msk->pm.accept_addr, false);
--- /dev/null
+From dce1c6d1e92535f165219695a826caedcca4e9b9 Mon Sep 17 00:00:00 2001
+From: "Matthieu Baerts (NGI0)" <matttbe@kernel.org>
+Date: Wed, 28 Aug 2024 08:14:29 +0200
+Subject: mptcp: pm: reset MPC endp ID when re-added
+
+From: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+
+commit dce1c6d1e92535f165219695a826caedcca4e9b9 upstream.
+
+The initial subflow has a special local ID: 0. It is specific per
+connection.
+
+When a global endpoint is deleted and re-added later, it can have a
+different ID -- most services managing the endpoints automatically don't
+force the ID to be the same as before. It is then important to track
+these modifications to be consistent with the ID being used for the
+address used by the initial subflow, not to confuse the other peer or to
+send the ID 0 for the wrong address.
+
+Now when removing an endpoint, msk->mpc_endpoint_id is reset if it
+corresponds to this endpoint. When adding a new endpoint, the same
+variable is updated if the address match the one of the initial subflow.
+
+Fixes: 3ad14f54bd74 ("mptcp: more accurate MPC endpoint tracking")
+Cc: stable@vger.kernel.org
+Reviewed-by: Mat Martineau <martineau@kernel.org>
+Signed-off-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/mptcp/pm_netlink.c | 13 +++++++++++--
+ 1 file changed, 11 insertions(+), 2 deletions(-)
+
+--- a/net/mptcp/pm_netlink.c
++++ b/net/mptcp/pm_netlink.c
+@@ -1351,20 +1351,27 @@ static struct pm_nl_pernet *genl_info_pm
+ return pm_nl_get_pernet(genl_info_net(info));
+ }
+
+-static int mptcp_nl_add_subflow_or_signal_addr(struct net *net)
++static int mptcp_nl_add_subflow_or_signal_addr(struct net *net,
++ struct mptcp_addr_info *addr)
+ {
+ struct mptcp_sock *msk;
+ long s_slot = 0, s_num = 0;
+
+ while ((msk = mptcp_token_iter_next(net, &s_slot, &s_num)) != NULL) {
+ struct sock *sk = (struct sock *)msk;
++ struct mptcp_addr_info mpc_addr;
+
+ if (!READ_ONCE(msk->fully_established) ||
+ mptcp_pm_is_userspace(msk))
+ goto next;
+
++ /* if the endp linked to the init sf is re-added with a != ID */
++ mptcp_local_address((struct sock_common *)msk, &mpc_addr);
++
+ lock_sock(sk);
+ spin_lock_bh(&msk->pm.lock);
++ if (mptcp_addresses_equal(addr, &mpc_addr, addr->port))
++ msk->mpc_endpoint_id = addr->id;
+ mptcp_pm_create_subflow_or_signal_addr(msk);
+ spin_unlock_bh(&msk->pm.lock);
+ release_sock(sk);
+@@ -1437,7 +1444,7 @@ static int mptcp_nl_cmd_add_addr(struct
+ goto out_free;
+ }
+
+- mptcp_nl_add_subflow_or_signal_addr(sock_net(skb->sk));
++ mptcp_nl_add_subflow_or_signal_addr(sock_net(skb->sk), &entry->addr);
+ return 0;
+
+ out_free:
+@@ -1553,6 +1560,8 @@ static int mptcp_nl_remove_subflow_and_s
+ spin_unlock_bh(&msk->pm.lock);
+ }
+
++ if (msk->mpc_endpoint_id == entry->addr.id)
++ msk->mpc_endpoint_id = 0;
+ release_sock(sk);
+
+ next:
--- /dev/null
+From 8b8ed1b429f8fa7ebd5632555e7b047bc0620075 Mon Sep 17 00:00:00 2001
+From: "Matthieu Baerts (NGI0)" <matttbe@kernel.org>
+Date: Wed, 28 Aug 2024 08:14:24 +0200
+Subject: mptcp: pm: reuse ID 0 after delete and re-add
+
+From: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+
+commit 8b8ed1b429f8fa7ebd5632555e7b047bc0620075 upstream.
+
+When the endpoint used by the initial subflow is removed and re-added
+later, the PM has to force the ID 0, it is a special case imposed by the
+MPTCP specs.
+
+Note that the endpoint should then need to be re-added reusing the same
+ID.
+
+Fixes: 3ad14f54bd74 ("mptcp: more accurate MPC endpoint tracking")
+Cc: stable@vger.kernel.org
+Reviewed-by: Mat Martineau <martineau@kernel.org>
+Signed-off-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/mptcp/pm_netlink.c | 10 ++++++++++
+ 1 file changed, 10 insertions(+)
+
+--- a/net/mptcp/pm_netlink.c
++++ b/net/mptcp/pm_netlink.c
+@@ -593,6 +593,11 @@ static void mptcp_pm_create_subflow_or_s
+
+ __clear_bit(local.addr.id, msk->pm.id_avail_bitmap);
+ msk->pm.add_addr_signaled++;
++
++ /* Special case for ID0: set the correct ID */
++ if (local.addr.id == msk->mpc_endpoint_id)
++ local.addr.id = 0;
++
+ mptcp_pm_announce_addr(msk, &local.addr, false);
+ mptcp_pm_nl_addr_send_ack(msk);
+
+@@ -617,6 +622,11 @@ subflow:
+
+ msk->pm.local_addr_used++;
+ __clear_bit(local.addr.id, msk->pm.id_avail_bitmap);
++
++ /* Special case for ID0: set the correct ID */
++ if (local.addr.id == msk->mpc_endpoint_id)
++ local.addr.id = 0;
++
+ nr = fill_remote_addresses_vec(msk, &local.addr, fullmesh, addrs);
+ if (nr == 0)
+ continue;
--- /dev/null
+From c07cc3ed895f9bfe0c53b5ed6be710c133b4271c Mon Sep 17 00:00:00 2001
+From: "Matthieu Baerts (NGI0)" <matttbe@kernel.org>
+Date: Wed, 28 Aug 2024 08:14:27 +0200
+Subject: mptcp: pm: send ACK on an active subflow
+
+From: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+
+commit c07cc3ed895f9bfe0c53b5ed6be710c133b4271c upstream.
+
+Taking the first one on the list doesn't work in some cases, e.g. if the
+initial subflow is being removed. Pick another one instead of not
+sending anything.
+
+Fixes: 84dfe3677a6f ("mptcp: send out dedicated ADD_ADDR packet")
+Cc: stable@vger.kernel.org
+Reviewed-by: Mat Martineau <martineau@kernel.org>
+Signed-off-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/mptcp/pm_netlink.c | 9 ++++++---
+ 1 file changed, 6 insertions(+), 3 deletions(-)
+
+--- a/net/mptcp/pm_netlink.c
++++ b/net/mptcp/pm_netlink.c
+@@ -776,9 +776,12 @@ void mptcp_pm_nl_addr_send_ack(struct mp
+ !mptcp_pm_should_rm_signal(msk))
+ return;
+
+- subflow = list_first_entry_or_null(&msk->conn_list, typeof(*subflow), node);
+- if (subflow)
+- mptcp_pm_send_ack(msk, subflow, false, false);
++ mptcp_for_each_subflow(msk, subflow) {
++ if (__mptcp_subflow_active(subflow)) {
++ mptcp_pm_send_ack(msk, subflow, false, false);
++ break;
++ }
++ }
+ }
+
+ int mptcp_pm_nl_mp_prio_send_ack(struct mptcp_sock *msk,
--- /dev/null
+From bc19ff57637ff563d2bdf2b385b48c41e6509e0d Mon Sep 17 00:00:00 2001
+From: "Matthieu Baerts (NGI0)" <matttbe@kernel.org>
+Date: Wed, 28 Aug 2024 08:14:28 +0200
+Subject: mptcp: pm: skip connecting to already established sf
+
+From: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+
+commit bc19ff57637ff563d2bdf2b385b48c41e6509e0d upstream.
+
+The lookup_subflow_by_daddr() helper checks if there is already a
+subflow connected to this address. But there could be a subflow that is
+closing, but taking time due to some reasons: latency, losses, data to
+process, etc.
+
+If an ADD_ADDR is received while the endpoint is being closed, it is
+better to try connecting to it, instead of rejecting it: the peer which
+has sent the ADD_ADDR will not be notified that the ADD_ADDR has been
+rejected for this reason, and the expected subflow will not be created
+at the end.
+
+This helper should then only look for subflows that are established, or
+going to be, but not the ones being closed.
+
+Fixes: d84ad04941c3 ("mptcp: skip connecting the connected address")
+Cc: stable@vger.kernel.org
+Reviewed-by: Mat Martineau <martineau@kernel.org>
+Signed-off-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/mptcp/pm_netlink.c | 9 ++++++---
+ 1 file changed, 6 insertions(+), 3 deletions(-)
+
+--- a/net/mptcp/pm_netlink.c
++++ b/net/mptcp/pm_netlink.c
+@@ -135,12 +135,15 @@ static bool lookup_subflow_by_daddr(cons
+ {
+ struct mptcp_subflow_context *subflow;
+ struct mptcp_addr_info cur;
+- struct sock_common *skc;
+
+ list_for_each_entry(subflow, list, node) {
+- skc = (struct sock_common *)mptcp_subflow_tcp_sock(subflow);
++ struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
+
+- remote_address(skc, &cur);
++ if (!((1 << inet_sk_state_load(ssk)) &
++ (TCPF_ESTABLISHED | TCPF_SYN_SENT | TCPF_SYN_RECV)))
++ continue;
++
++ remote_address((struct sock_common *)ssk, &cur);
+ if (mptcp_addresses_equal(&cur, daddr, daddr->port))
+ return true;
+ }
--- /dev/null
+From 2a1f596ebb23eadc0f9b95a8012e18ef76295fc8 Mon Sep 17 00:00:00 2001
+From: "Matthieu Baerts (NGI0)" <matttbe@kernel.org>
+Date: Mon, 26 Aug 2024 19:11:20 +0200
+Subject: mptcp: sched: check both backup in retrans
+
+From: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+
+commit 2a1f596ebb23eadc0f9b95a8012e18ef76295fc8 upstream.
+
+The 'mptcp_subflow_context' structure has two items related to the
+backup flags:
+
+ - 'backup': the subflow has been marked as backup by the other peer
+
+ - 'request_bkup': the backup flag has been set by the host
+
+Looking only at the 'backup' flag can make sense in some cases, but it
+is not the behaviour of the default packet scheduler when selecting
+paths.
+
+As explained in the commit b6a66e521a20 ("mptcp: sched: check both
+directions for backup"), the packet scheduler should look at both flags,
+because that was the behaviour from the beginning: the 'backup' flag was
+set by accident instead of the 'request_bkup' one. Now that the latter
+has been fixed, get_retrans() needs to be adapted as well.
+
+Fixes: b6a66e521a20 ("mptcp: sched: check both directions for backup")
+Cc: stable@vger.kernel.org
+Reviewed-by: Mat Martineau <martineau@kernel.org>
+Signed-off-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+Link: https://patch.msgid.link/20240826-net-mptcp-close-extra-sf-fin-v1-3-905199fe1172@kernel.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/mptcp/protocol.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/net/mptcp/protocol.c
++++ b/net/mptcp/protocol.c
+@@ -2289,7 +2289,7 @@ struct sock *mptcp_subflow_get_retrans(s
+ continue;
+ }
+
+- if (subflow->backup) {
++ if (subflow->backup || subflow->request_bkup) {
+ if (!backup)
+ backup = ssk;
+ continue;
--- /dev/null
+From 8af174ea863c72f25ce31cee3baad8a301c0cf0f Mon Sep 17 00:00:00 2001
+From: Haiyang Zhang <haiyangz@microsoft.com>
+Date: Wed, 21 Aug 2024 13:42:29 -0700
+Subject: net: mana: Fix race of mana_hwc_post_rx_wqe and new hwc response
+
+From: Haiyang Zhang <haiyangz@microsoft.com>
+
+commit 8af174ea863c72f25ce31cee3baad8a301c0cf0f upstream.
+
+The mana_hwc_rx_event_handler() / mana_hwc_handle_resp() calls
+complete(&ctx->comp_event) before posting the wqe back. It's
+possible that other callers, like mana_create_txq(), start the
+next round of mana_hwc_send_request() before the posting of wqe.
+And if the HW is fast enough to respond, it can hit no_wqe error
+on the HW channel, then the response message is lost. The mana
+driver may fail to create queues and open, because of waiting for
+the HW response and timed out.
+Sample dmesg:
+[ 528.610840] mana 39d4:00:02.0: HWC: Request timed out!
+[ 528.614452] mana 39d4:00:02.0: Failed to send mana message: -110, 0x0
+[ 528.618326] mana 39d4:00:02.0 enP14804s2: Failed to create WQ object: -110
+
+To fix it, move posting of rx wqe before complete(&ctx->comp_event).
+
+Cc: stable@vger.kernel.org
+Fixes: ca9c54d2d6a5 ("net: mana: Add a driver for Microsoft Azure Network Adapter (MANA)")
+Signed-off-by: Haiyang Zhang <haiyangz@microsoft.com>
+Reviewed-by: Long Li <longli@microsoft.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/microsoft/mana/hw_channel.c | 62 ++++++++++++-----------
+ 1 file changed, 34 insertions(+), 28 deletions(-)
+
+--- a/drivers/net/ethernet/microsoft/mana/hw_channel.c
++++ b/drivers/net/ethernet/microsoft/mana/hw_channel.c
+@@ -51,9 +51,33 @@ static int mana_hwc_verify_resp_msg(cons
+ return 0;
+ }
+
++static int mana_hwc_post_rx_wqe(const struct hwc_wq *hwc_rxq,
++ struct hwc_work_request *req)
++{
++ struct device *dev = hwc_rxq->hwc->dev;
++ struct gdma_sge *sge;
++ int err;
++
++ sge = &req->sge;
++ sge->address = (u64)req->buf_sge_addr;
++ sge->mem_key = hwc_rxq->msg_buf->gpa_mkey;
++ sge->size = req->buf_len;
++
++ memset(&req->wqe_req, 0, sizeof(struct gdma_wqe_request));
++ req->wqe_req.sgl = sge;
++ req->wqe_req.num_sge = 1;
++ req->wqe_req.client_data_unit = 0;
++
++ err = mana_gd_post_and_ring(hwc_rxq->gdma_wq, &req->wqe_req, NULL);
++ if (err)
++ dev_err(dev, "Failed to post WQE on HWC RQ: %d\n", err);
++ return err;
++}
++
+ static void mana_hwc_handle_resp(struct hw_channel_context *hwc, u32 resp_len,
+- const struct gdma_resp_hdr *resp_msg)
++ struct hwc_work_request *rx_req)
+ {
++ const struct gdma_resp_hdr *resp_msg = rx_req->buf_va;
+ struct hwc_caller_ctx *ctx;
+ int err;
+
+@@ -61,6 +85,7 @@ static void mana_hwc_handle_resp(struct
+ hwc->inflight_msg_res.map)) {
+ dev_err(hwc->dev, "hwc_rx: invalid msg_id = %u\n",
+ resp_msg->response.hwc_msg_id);
++ mana_hwc_post_rx_wqe(hwc->rxq, rx_req);
+ return;
+ }
+
+@@ -74,30 +99,13 @@ static void mana_hwc_handle_resp(struct
+ memcpy(ctx->output_buf, resp_msg, resp_len);
+ out:
+ ctx->error = err;
+- complete(&ctx->comp_event);
+-}
+-
+-static int mana_hwc_post_rx_wqe(const struct hwc_wq *hwc_rxq,
+- struct hwc_work_request *req)
+-{
+- struct device *dev = hwc_rxq->hwc->dev;
+- struct gdma_sge *sge;
+- int err;
+-
+- sge = &req->sge;
+- sge->address = (u64)req->buf_sge_addr;
+- sge->mem_key = hwc_rxq->msg_buf->gpa_mkey;
+- sge->size = req->buf_len;
+
+- memset(&req->wqe_req, 0, sizeof(struct gdma_wqe_request));
+- req->wqe_req.sgl = sge;
+- req->wqe_req.num_sge = 1;
+- req->wqe_req.client_data_unit = 0;
++ /* Must post rx wqe before complete(), otherwise the next rx may
++ * hit no_wqe error.
++ */
++ mana_hwc_post_rx_wqe(hwc->rxq, rx_req);
+
+- err = mana_gd_post_and_ring(hwc_rxq->gdma_wq, &req->wqe_req, NULL);
+- if (err)
+- dev_err(dev, "Failed to post WQE on HWC RQ: %d\n", err);
+- return err;
++ complete(&ctx->comp_event);
+ }
+
+ static void mana_hwc_init_event_handler(void *ctx, struct gdma_queue *q_self,
+@@ -234,14 +242,12 @@ static void mana_hwc_rx_event_handler(vo
+ return;
+ }
+
+- mana_hwc_handle_resp(hwc, rx_oob->tx_oob_data_size, resp);
++ mana_hwc_handle_resp(hwc, rx_oob->tx_oob_data_size, rx_req);
+
+- /* Do no longer use 'resp', because the buffer is posted to the HW
+- * in the below mana_hwc_post_rx_wqe().
++ /* Can no longer use 'resp', because the buffer is posted to the HW
++ * in mana_hwc_handle_resp() above.
+ */
+ resp = NULL;
+-
+- mana_hwc_post_rx_wqe(hwc_rxq, rx_req);
+ }
+
+ static void mana_hwc_tx_event_handler(void *ctx, u32 gdma_txq_id,
--- /dev/null
+From d397d7246c11ca36c33c932bc36d38e3a79e9aa0 Mon Sep 17 00:00:00 2001
+From: "Matthieu Baerts (NGI0)" <matttbe@kernel.org>
+Date: Wed, 28 Aug 2024 08:14:34 +0200
+Subject: selftests: mptcp: join: check re-re-adding ID 0 endp
+
+From: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+
+commit d397d7246c11ca36c33c932bc36d38e3a79e9aa0 upstream.
+
+This test extends "delete and re-add" to validate the previous commit:
+when the endpoint linked to the initial subflow (ID 0) is re-added
+multiple times, it was no longer being used, because the internal linked
+counters are not decremented for this special endpoint: it is not an
+additional endpoint.
+
+Here, the "del/add id 0" steps are done 3 times to unsure this case is
+validated.
+
+The 'Fixes' tag here below is the same as the one from the previous
+commit: this patch here is not fixing anything wrong in the selftests,
+but it validates the previous fix for an issue introduced by this commit
+ID.
+
+Fixes: 3ad14f54bd74 ("mptcp: more accurate MPC endpoint tracking")
+Cc: stable@vger.kernel.org
+Reviewed-by: Mat Martineau <martineau@kernel.org>
+Signed-off-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ tools/testing/selftests/net/mptcp/mptcp_join.sh | 27 +++++++++++++-----------
+ 1 file changed, 15 insertions(+), 12 deletions(-)
+
+--- a/tools/testing/selftests/net/mptcp/mptcp_join.sh
++++ b/tools/testing/selftests/net/mptcp/mptcp_join.sh
+@@ -3582,7 +3582,7 @@ endpoint_tests()
+ pm_nl_set_limits $ns2 0 3
+ pm_nl_add_endpoint $ns2 10.0.1.2 id 1 dev ns2eth1 flags subflow
+ pm_nl_add_endpoint $ns2 10.0.2.2 id 2 dev ns2eth2 flags subflow
+- test_linkfail=4 speed=20 \
++ test_linkfail=4 speed=5 \
+ run_tests $ns1 $ns2 10.0.1.1 &
+ local tests_pid=$!
+
+@@ -3614,20 +3614,23 @@ endpoint_tests()
+ chk_subflow_nr "after no reject" 3
+ chk_mptcp_info subflows 2 subflows 2
+
+- pm_nl_del_endpoint $ns2 1 10.0.1.2
+- sleep 0.5
+- chk_subflow_nr "after delete id 0" 2
+- chk_mptcp_info subflows 2 subflows 2 # only decr for additional sf
+-
+- pm_nl_add_endpoint $ns2 10.0.1.2 id 1 dev ns2eth1 flags subflow
+- wait_mpj $ns2
+- chk_subflow_nr "after re-add id 0" 3
+- chk_mptcp_info subflows 3 subflows 3
++ local i
++ for i in $(seq 3); do
++ pm_nl_del_endpoint $ns2 1 10.0.1.2
++ sleep 0.5
++ chk_subflow_nr "after delete id 0 ($i)" 2
++ chk_mptcp_info subflows 2 subflows 2 # only decr for additional sf
++
++ pm_nl_add_endpoint $ns2 10.0.1.2 id 1 dev ns2eth1 flags subflow
++ wait_mpj $ns2
++ chk_subflow_nr "after re-add id 0 ($i)" 3
++ chk_mptcp_info subflows 3 subflows 3
++ done
+
+ mptcp_lib_kill_wait $tests_pid
+
+- chk_join_nr 4 4 4
+- chk_rm_nr 2 2
++ chk_join_nr 6 6 6
++ chk_rm_nr 4 4
+ fi
+ }
+
--- /dev/null
+From 5f94b08c001290acda94d9d8868075590931c198 Mon Sep 17 00:00:00 2001
+From: "Matthieu Baerts (NGI0)" <matttbe@kernel.org>
+Date: Wed, 28 Aug 2024 08:14:26 +0200
+Subject: selftests: mptcp: join: check removing ID 0 endpoint
+
+From: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+
+commit 5f94b08c001290acda94d9d8868075590931c198 upstream.
+
+Removing the endpoint linked to the initial subflow should trigger a
+RM_ADDR for the right ID, and the removal of the subflow. That's what is
+now being verified in the "delete and re-add" test.
+
+Note that removing the initial subflow will not decrement the 'subflows'
+counters, which corresponds to the *additional* subflows. On the other
+hand, when the same endpoint is re-added, it will increment this
+counter, as it will be seen as an additional subflow this time.
+
+The 'Fixes' tag here below is the same as the one from the previous
+commit: this patch here is not fixing anything wrong in the selftests,
+but it validates the previous fix for an issue introduced by this commit
+ID.
+
+Fixes: 3ad14f54bd74 ("mptcp: more accurate MPC endpoint tracking")
+Cc: stable@vger.kernel.org
+Reviewed-by: Mat Martineau <martineau@kernel.org>
+Signed-off-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ tools/testing/selftests/net/mptcp/mptcp_join.sh | 25 +++++++++++++++++-------
+ 1 file changed, 18 insertions(+), 7 deletions(-)
+
+--- a/tools/testing/selftests/net/mptcp/mptcp_join.sh
++++ b/tools/testing/selftests/net/mptcp/mptcp_join.sh
+@@ -3578,8 +3578,9 @@ endpoint_tests()
+
+ if reset_with_tcp_filter "delete and re-add" ns2 10.0.3.2 REJECT OUTPUT &&
+ mptcp_lib_kallsyms_has "subflow_rebuild_header$"; then
+- pm_nl_set_limits $ns1 0 2
+- pm_nl_set_limits $ns2 0 2
++ pm_nl_set_limits $ns1 0 3
++ pm_nl_set_limits $ns2 0 3
++ pm_nl_add_endpoint $ns2 10.0.1.2 id 1 dev ns2eth1 flags subflow
+ pm_nl_add_endpoint $ns2 10.0.2.2 id 2 dev ns2eth2 flags subflow
+ test_linkfail=4 speed=20 \
+ run_tests $ns1 $ns2 10.0.1.1 &
+@@ -3588,17 +3589,17 @@ endpoint_tests()
+ wait_mpj $ns2
+ pm_nl_check_endpoint "creation" \
+ $ns2 10.0.2.2 id 2 flags subflow dev ns2eth2
+- chk_subflow_nr "before delete" 2
++ chk_subflow_nr "before delete id 2" 2
+ chk_mptcp_info subflows 1 subflows 1
+
+ pm_nl_del_endpoint $ns2 2 10.0.2.2
+ sleep 0.5
+- chk_subflow_nr "after delete" 1
++ chk_subflow_nr "after delete id 2" 1
+ chk_mptcp_info subflows 0 subflows 0
+
+ pm_nl_add_endpoint $ns2 10.0.2.2 id 2 dev ns2eth2 flags subflow
+ wait_mpj $ns2
+- chk_subflow_nr "after re-add" 2
++ chk_subflow_nr "after re-add id 2" 2
+ chk_mptcp_info subflows 1 subflows 1
+
+ pm_nl_add_endpoint $ns2 10.0.3.2 id 3 flags subflow
+@@ -3613,10 +3614,20 @@ endpoint_tests()
+ chk_subflow_nr "after no reject" 3
+ chk_mptcp_info subflows 2 subflows 2
+
++ pm_nl_del_endpoint $ns2 1 10.0.1.2
++ sleep 0.5
++ chk_subflow_nr "after delete id 0" 2
++ chk_mptcp_info subflows 2 subflows 2 # only decr for additional sf
++
++ pm_nl_add_endpoint $ns2 10.0.1.2 id 1 dev ns2eth1 flags subflow
++ wait_mpj $ns2
++ chk_subflow_nr "after re-add id 0" 3
++ chk_mptcp_info subflows 3 subflows 3
++
+ mptcp_lib_kill_wait $tests_pid
+
+- chk_join_nr 3 3 3
+- chk_rm_nr 1 1
++ chk_join_nr 4 4 4
++ chk_rm_nr 2 2
+ fi
+ }
+
--- /dev/null
+From 76a2d8394cc183df872adf04bf636eaf42746449 Mon Sep 17 00:00:00 2001
+From: "Matthieu Baerts (NGI0)" <matttbe@kernel.org>
+Date: Wed, 28 Aug 2024 08:14:31 +0200
+Subject: selftests: mptcp: join: no extra msg if no counter
+
+From: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+
+commit 76a2d8394cc183df872adf04bf636eaf42746449 upstream.
+
+The checksum and fail counters might not be available. Then no need to
+display an extra message with missing info.
+
+While at it, fix the indentation around, which is wrong since the same
+commit.
+
+Fixes: 47867f0a7e83 ("selftests: mptcp: join: skip check if MIB counter not supported")
+Cc: stable@vger.kernel.org
+Reviewed-by: Geliang Tang <geliang@kernel.org>
+Signed-off-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ tools/testing/selftests/net/mptcp/mptcp_join.sh | 16 ++++++++--------
+ 1 file changed, 8 insertions(+), 8 deletions(-)
+
+--- a/tools/testing/selftests/net/mptcp/mptcp_join.sh
++++ b/tools/testing/selftests/net/mptcp/mptcp_join.sh
+@@ -1256,26 +1256,26 @@ chk_csum_nr()
+
+ print_check "sum"
+ count=$(mptcp_lib_get_counter ${ns1} "MPTcpExtDataCsumErr")
+- if [ "$count" != "$csum_ns1" ]; then
++ if [ -n "$count" ] && [ "$count" != "$csum_ns1" ]; then
+ extra_msg+=" ns1=$count"
+ fi
+ if [ -z "$count" ]; then
+ print_skip
+ elif { [ "$count" != $csum_ns1 ] && [ $allow_multi_errors_ns1 -eq 0 ]; } ||
+- { [ "$count" -lt $csum_ns1 ] && [ $allow_multi_errors_ns1 -eq 1 ]; }; then
++ { [ "$count" -lt $csum_ns1 ] && [ $allow_multi_errors_ns1 -eq 1 ]; }; then
+ fail_test "got $count data checksum error[s] expected $csum_ns1"
+ else
+ print_ok
+ fi
+ print_check "csum"
+ count=$(mptcp_lib_get_counter ${ns2} "MPTcpExtDataCsumErr")
+- if [ "$count" != "$csum_ns2" ]; then
++ if [ -n "$count" ] && [ "$count" != "$csum_ns2" ]; then
+ extra_msg+=" ns2=$count"
+ fi
+ if [ -z "$count" ]; then
+ print_skip
+ elif { [ "$count" != $csum_ns2 ] && [ $allow_multi_errors_ns2 -eq 0 ]; } ||
+- { [ "$count" -lt $csum_ns2 ] && [ $allow_multi_errors_ns2 -eq 1 ]; }; then
++ { [ "$count" -lt $csum_ns2 ] && [ $allow_multi_errors_ns2 -eq 1 ]; }; then
+ fail_test "got $count data checksum error[s] expected $csum_ns2"
+ else
+ print_ok
+@@ -1313,13 +1313,13 @@ chk_fail_nr()
+
+ print_check "ftx"
+ count=$(mptcp_lib_get_counter ${ns_tx} "MPTcpExtMPFailTx")
+- if [ "$count" != "$fail_tx" ]; then
++ if [ -n "$count" ] && [ "$count" != "$fail_tx" ]; then
+ extra_msg+=",tx=$count"
+ fi
+ if [ -z "$count" ]; then
+ print_skip
+ elif { [ "$count" != "$fail_tx" ] && [ $allow_tx_lost -eq 0 ]; } ||
+- { [ "$count" -gt "$fail_tx" ] && [ $allow_tx_lost -eq 1 ]; }; then
++ { [ "$count" -gt "$fail_tx" ] && [ $allow_tx_lost -eq 1 ]; }; then
+ fail_test "got $count MP_FAIL[s] TX expected $fail_tx"
+ else
+ print_ok
+@@ -1327,13 +1327,13 @@ chk_fail_nr()
+
+ print_check "failrx"
+ count=$(mptcp_lib_get_counter ${ns_rx} "MPTcpExtMPFailRx")
+- if [ "$count" != "$fail_rx" ]; then
++ if [ -n "$count" ] && [ "$count" != "$fail_rx" ]; then
+ extra_msg+=",rx=$count"
+ fi
+ if [ -z "$count" ]; then
+ print_skip
+ elif { [ "$count" != "$fail_rx" ] && [ $allow_rx_lost -eq 0 ]; } ||
+- { [ "$count" -gt "$fail_rx" ] && [ $allow_rx_lost -eq 1 ]; }; then
++ { [ "$count" -gt "$fail_rx" ] && [ $allow_rx_lost -eq 1 ]; }; then
+ fail_test "got $count MP_FAIL[s] RX expected $fail_rx"
+ else
+ print_ok
pinctrl-rockchip-correct-rk3328-iomux-width-flag-for-gpio2-b-pins.patch
pinctrl-single-fix-potential-null-dereference-in-pcs_get_function.patch
of-add-cleanup.h-based-auto-release-via-__free-device_node-markings.patch
+wifi-wfx-repair-open-network-ap-mode.patch
+wifi-mwifiex-duplicate-static-structs-used-in-driver-instances.patch
+net-mana-fix-race-of-mana_hwc_post_rx_wqe-and-new-hwc-response.patch
+mptcp-close-subflow-when-receiving-tcp-fin.patch
+mptcp-sched-check-both-backup-in-retrans.patch
+mptcp-pm-reuse-id-0-after-delete-and-re-add.patch
+mptcp-pm-skip-connecting-to-already-established-sf.patch
+mptcp-pm-reset-mpc-endp-id-when-re-added.patch
+mptcp-pm-send-ack-on-an-active-subflow.patch
+mptcp-pm-do-not-remove-already-closed-subflows.patch
+mptcp-pm-fix-id-0-endp-usage-after-multiple-re-creations.patch
+mptcp-pm-add_addr-0-is-not-a-new-address.patch
+selftests-mptcp-join-check-removing-id-0-endpoint.patch
+selftests-mptcp-join-no-extra-msg-if-no-counter.patch
+selftests-mptcp-join-check-re-re-adding-id-0-endp.patch
+drm-amdgpu-align-pp_power_profile_mode-with-kernel-docs.patch
+drm-amdgpu-swsmu-always-force-a-state-reprogram-on-init.patch
+drm-vmwgfx-fix-prime-with-external-buffers.patch
--- /dev/null
+From 27ec3c57fcadb43c79ed05b2ea31bc18c72d798a Mon Sep 17 00:00:00 2001
+From: Sascha Hauer <s.hauer@pengutronix.de>
+Date: Fri, 9 Aug 2024 10:11:33 +0200
+Subject: wifi: mwifiex: duplicate static structs used in driver instances
+
+From: Sascha Hauer <s.hauer@pengutronix.de>
+
+commit 27ec3c57fcadb43c79ed05b2ea31bc18c72d798a upstream.
+
+mwifiex_band_2ghz and mwifiex_band_5ghz are statically allocated, but
+used and modified in driver instances. Duplicate them before using
+them in driver instances so that different driver instances do not
+influence each other.
+
+This was observed on a board which has one PCIe and one SDIO mwifiex
+adapter. It blew up in mwifiex_setup_ht_caps(). This was called with
+the statically allocated struct which is modified in this function.
+
+Cc: stable@vger.kernel.org
+Fixes: d6bffe8bb520 ("mwifiex: support for creation of AP interface")
+Signed-off-by: Sascha Hauer <s.hauer@pengutronix.de>
+Reviewed-by: Francesco Dolcini <francesco.dolcini@toradex.com>
+Acked-by: Brian Norris <briannorris@chromium.org>
+Signed-off-by: Kalle Valo <kvalo@kernel.org>
+Link: https://patch.msgid.link/20240809-mwifiex-duplicate-static-structs-v1-1-6837b903b1a4@pengutronix.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/wireless/marvell/mwifiex/cfg80211.c | 32 +++++++++++++++++++-----
+ 1 file changed, 26 insertions(+), 6 deletions(-)
+
+--- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c
++++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
+@@ -4362,11 +4362,27 @@ int mwifiex_register_cfg80211(struct mwi
+ if (ISSUPP_ADHOC_ENABLED(adapter->fw_cap_info))
+ wiphy->interface_modes |= BIT(NL80211_IFTYPE_ADHOC);
+
+- wiphy->bands[NL80211_BAND_2GHZ] = &mwifiex_band_2ghz;
+- if (adapter->config_bands & BAND_A)
+- wiphy->bands[NL80211_BAND_5GHZ] = &mwifiex_band_5ghz;
+- else
++ wiphy->bands[NL80211_BAND_2GHZ] = devm_kmemdup(adapter->dev,
++ &mwifiex_band_2ghz,
++ sizeof(mwifiex_band_2ghz),
++ GFP_KERNEL);
++ if (!wiphy->bands[NL80211_BAND_2GHZ]) {
++ ret = -ENOMEM;
++ goto err;
++ }
++
++ if (adapter->config_bands & BAND_A) {
++ wiphy->bands[NL80211_BAND_5GHZ] = devm_kmemdup(adapter->dev,
++ &mwifiex_band_5ghz,
++ sizeof(mwifiex_band_5ghz),
++ GFP_KERNEL);
++ if (!wiphy->bands[NL80211_BAND_5GHZ]) {
++ ret = -ENOMEM;
++ goto err;
++ }
++ } else {
+ wiphy->bands[NL80211_BAND_5GHZ] = NULL;
++ }
+
+ if (adapter->drcs_enabled && ISSUPP_DRCS_ENABLED(adapter->fw_cap_info))
+ wiphy->iface_combinations = &mwifiex_iface_comb_ap_sta_drcs;
+@@ -4460,8 +4476,7 @@ int mwifiex_register_cfg80211(struct mwi
+ if (ret < 0) {
+ mwifiex_dbg(adapter, ERROR,
+ "%s: wiphy_register failed: %d\n", __func__, ret);
+- wiphy_free(wiphy);
+- return ret;
++ goto err;
+ }
+
+ if (!adapter->regd) {
+@@ -4503,4 +4518,9 @@ int mwifiex_register_cfg80211(struct mwi
+
+ adapter->wiphy = wiphy;
+ return ret;
++
++err:
++ wiphy_free(wiphy);
++
++ return ret;
+ }
--- /dev/null
+From 6d30bb88f623526197c0e18a366e68a4254a2c83 Mon Sep 17 00:00:00 2001
+From: Alexander Sverdlin <alexander.sverdlin@siemens.com>
+Date: Fri, 23 Aug 2024 15:15:20 +0200
+Subject: wifi: wfx: repair open network AP mode
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Alexander Sverdlin <alexander.sverdlin@siemens.com>
+
+commit 6d30bb88f623526197c0e18a366e68a4254a2c83 upstream.
+
+RSN IE missing in beacon is normal in open networks.
+Avoid returning -EINVAL in this case.
+
+Steps to reproduce:
+
+$ cat /etc/wpa_supplicant.conf
+network={
+ ssid="testNet"
+ mode=2
+ key_mgmt=NONE
+}
+
+$ wpa_supplicant -iwlan0 -c /etc/wpa_supplicant.conf
+nl80211: Beacon set failed: -22 (Invalid argument)
+Failed to set beacon parameters
+Interface initialization failed
+wlan0: interface state UNINITIALIZED->DISABLED
+wlan0: AP-DISABLED
+wlan0: Unable to setup interface.
+Failed to initialize AP interface
+
+After the change:
+
+$ wpa_supplicant -iwlan0 -c /etc/wpa_supplicant.conf
+Successfully initialized wpa_supplicant
+wlan0: interface state UNINITIALIZED->ENABLED
+wlan0: AP-ENABLED
+
+Cc: stable@vger.kernel.org
+Fixes: fe0a7776d4d1 ("wifi: wfx: fix possible NULL pointer dereference in wfx_set_mfp_ap()")
+Signed-off-by: Alexander Sverdlin <alexander.sverdlin@siemens.com>
+Reviewed-by: Jérôme Pouiller <jerome.pouiller@silabs.com>
+Signed-off-by: Kalle Valo <kvalo@kernel.org>
+Link: https://patch.msgid.link/20240823131521.3309073-1-alexander.sverdlin@siemens.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/wireless/silabs/wfx/sta.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+--- a/drivers/net/wireless/silabs/wfx/sta.c
++++ b/drivers/net/wireless/silabs/wfx/sta.c
+@@ -370,8 +370,11 @@ static int wfx_set_mfp_ap(struct wfx_vif
+
+ ptr = (u16 *)cfg80211_find_ie(WLAN_EID_RSN, skb->data + ieoffset,
+ skb->len - ieoffset);
+- if (unlikely(!ptr))
++ if (!ptr) {
++ /* No RSN IE is fine in open networks */
++ ret = 0;
+ goto free_skb;
++ }
+
+ ptr += pairwise_cipher_suite_count_offset;
+ if (WARN_ON(ptr > (u16 *)skb_tail_pointer(skb)))