]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
Fixes for 6.7
authorSasha Levin <sashal@kernel.org>
Sat, 24 Feb 2024 03:37:42 +0000 (22:37 -0500)
committerSasha Levin <sashal@kernel.org>
Sat, 24 Feb 2024 03:37:42 +0000 (22:37 -0500)
Signed-off-by: Sasha Levin <sashal@kernel.org>
queue-6.7/drm-amd-display-add-dpia-display-mode-validation-log.patch [new file with mode: 0644]
queue-6.7/drm-amd-display-fixed-integer-types-and-null-check-l.patch [new file with mode: 0644]
queue-6.7/drm-amd-display-request-usb4-bw-for-mst-streams.patch [new file with mode: 0644]
queue-6.7/mptcp-add-currestab-mib-counter-support.patch [new file with mode: 0644]
queue-6.7/mptcp-corner-case-locking-for-rx-path-fields-initial.patch [new file with mode: 0644]
queue-6.7/mptcp-fix-more-tx-path-fields-initialization.patch [new file with mode: 0644]
queue-6.7/mptcp-use-mptcp_set_state.patch [new file with mode: 0644]
queue-6.7/series

diff --git a/queue-6.7/drm-amd-display-add-dpia-display-mode-validation-log.patch b/queue-6.7/drm-amd-display-add-dpia-display-mode-validation-log.patch
new file mode 100644 (file)
index 0000000..389c0fd
--- /dev/null
@@ -0,0 +1,313 @@
+From 35e725c08655e87c294a85a62735c055fd8f14ac Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 5 Dec 2023 00:01:15 -0500
+Subject: drm/amd/display: Add dpia display mode validation logic
+
+From: Meenakshikumar Somasundaram <meenakshikumar.somasundaram@amd.com>
+
+[ Upstream commit 59f1622a5f05d948a7c665a458a3dd76ba73015e ]
+
+[Why]
+If bandwidth allocation feature is enabled, connection manager wont
+limit the dp tunnel bandwidth. So, need to do display mode validation
+for streams on dpia links to avoid oversubscription of dp tunnel
+bandwidth.
+
+[How]
+- To read non reduced link rate and lane count and update
+  reported link capability.
+- To calculate the bandwidth required for streams of dpia links
+  per host router and validate against the allocated bandwidth for
+  the host router.
+
+Tested-by: Daniel Wheeler <daniel.wheeler@amd.com>
+Reviewed-by: PeiChen Huang <peichen.huang@amd.com>
+Reviewed-by: Aric Cyr <aric.cyr@amd.com>
+Acked-by: Rodrigo Siqueira <rodrigo.siqueira@amd.com>
+Signed-off-by: Meenakshikumar Somasundaram <meenakshikumar.somasundaram@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Stable-dep-of: 0484e05d048b ("drm/amd/display: fixed integer types and null check locations")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../drm/amd/display/dc/core/dc_link_exports.c |   2 +-
+ drivers/gpu/drm/amd/display/dc/dc.h           |   4 +-
+ drivers/gpu/drm/amd/display/dc/dc_dp_types.h  |   6 +
+ drivers/gpu/drm/amd/display/dc/dc_types.h     |   2 +
+ .../dc/link/protocols/link_dp_dpia_bw.c       | 130 +++++++++++++-----
+ 5 files changed, 104 insertions(+), 40 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c
+index ed94187c2afa2..f365773d57148 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c
+@@ -497,7 +497,7 @@ void dc_link_enable_hpd_filter(struct dc_link *link, bool enable)
+       link->dc->link_srv->enable_hpd_filter(link, enable);
+ }
+-bool dc_link_validate(struct dc *dc, const struct dc_stream_state *streams, const unsigned int count)
++bool dc_link_dp_dpia_validate(struct dc *dc, const struct dc_stream_state *streams, const unsigned int count)
+ {
+       return dc->link_srv->validate_dpia_bandwidth(streams, count);
+ }
+diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
+index 2cafd644baff8..8164a534048c4 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc.h
++++ b/drivers/gpu/drm/amd/display/dc/dc.h
+@@ -2187,11 +2187,11 @@ int dc_link_dp_dpia_handle_usb4_bandwidth_allocation_for_link(
+  *
+  * @dc: pointer to dc struct
+  * @stream: pointer to all possible streams
+- * @num_streams: number of valid DPIA streams
++ * @count: number of valid DPIA streams
+  *
+  * return: TRUE if bw used by DPIAs doesn't exceed available BW else return FALSE
+  */
+-bool dc_link_validate(struct dc *dc, const struct dc_stream_state *streams,
++bool dc_link_dp_dpia_validate(struct dc *dc, const struct dc_stream_state *streams,
+               const unsigned int count);
+ /* Sink Interfaces - A sink corresponds to a display output device */
+diff --git a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
+index eeeeeef4d7173..1cb7765f593aa 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
++++ b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
+@@ -1377,6 +1377,12 @@ struct dp_trace {
+ #ifndef DP_TUNNELING_STATUS
+ #define DP_TUNNELING_STATUS                           0xE0025 /* 1.4a */
+ #endif
++#ifndef DP_TUNNELING_MAX_LINK_RATE
++#define DP_TUNNELING_MAX_LINK_RATE                    0xE0028 /* 1.4a */
++#endif
++#ifndef DP_TUNNELING_MAX_LANE_COUNT
++#define DP_TUNNELING_MAX_LANE_COUNT                   0xE0029 /* 1.4a */
++#endif
+ #ifndef DPTX_BW_ALLOCATION_MODE_CONTROL
+ #define DPTX_BW_ALLOCATION_MODE_CONTROL                       0xE0030 /* 1.4a */
+ #endif
+diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h
+index 35d146217aef0..ab91504529c65 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc_types.h
++++ b/drivers/gpu/drm/amd/display/dc/dc_types.h
+@@ -1111,6 +1111,8 @@ struct dc_dpia_bw_alloc {
+       int bw_granularity;    // BW Granularity
+       bool bw_alloc_enabled; // The BW Alloc Mode Support is turned ON for all 3:  DP-Tx & Dpia & CM
+       bool response_ready;   // Response ready from the CM side
++      uint8_t nrd_max_lane_count; // Non-reduced max lane count
++      uint8_t nrd_max_link_rate; // Non-reduced max link rate
+ };
+ #define MAX_SINKS_PER_LINK 4
+diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c
+index d6e1f969bfd52..a7aa8c9da868f 100644
+--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c
++++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c
+@@ -59,6 +59,7 @@ static void reset_bw_alloc_struct(struct dc_link *link)
+       link->dpia_bw_alloc_config.estimated_bw = 0;
+       link->dpia_bw_alloc_config.bw_granularity = 0;
+       link->dpia_bw_alloc_config.response_ready = false;
++      link->dpia_bw_alloc_config.sink_allocated_bw = 0;
+ }
+ #define BW_GRANULARITY_0 4 // 0.25 Gbps
+@@ -104,6 +105,32 @@ static int get_estimated_bw(struct dc_link *link)
+       return bw_estimated_bw * (Kbps_TO_Gbps / link->dpia_bw_alloc_config.bw_granularity);
+ }
++static int get_non_reduced_max_link_rate(struct dc_link *link)
++{
++      uint8_t nrd_max_link_rate = 0;
++
++      core_link_read_dpcd(
++                      link,
++                      DP_TUNNELING_MAX_LINK_RATE,
++                      &nrd_max_link_rate,
++                      sizeof(uint8_t));
++
++      return nrd_max_link_rate;
++}
++
++static int get_non_reduced_max_lane_count(struct dc_link *link)
++{
++      uint8_t nrd_max_lane_count = 0;
++
++      core_link_read_dpcd(
++                      link,
++                      DP_TUNNELING_MAX_LANE_COUNT,
++                      &nrd_max_lane_count,
++                      sizeof(uint8_t));
++
++      return nrd_max_lane_count;
++}
++
+ /*
+  * Read all New BW alloc configuration ex: estimated_bw, allocated_bw,
+  * granuality, Driver_ID, CM_Group, & populate the BW allocation structs
+@@ -111,13 +138,20 @@ static int get_estimated_bw(struct dc_link *link)
+  */
+ static void init_usb4_bw_struct(struct dc_link *link)
+ {
+-      // Init the known values
++      reset_bw_alloc_struct(link);
++
++      /* init the known values */
+       link->dpia_bw_alloc_config.bw_granularity = get_bw_granularity(link);
+       link->dpia_bw_alloc_config.estimated_bw = get_estimated_bw(link);
++      link->dpia_bw_alloc_config.nrd_max_link_rate = get_non_reduced_max_link_rate(link);
++      link->dpia_bw_alloc_config.nrd_max_lane_count = get_non_reduced_max_lane_count(link);
+       DC_LOG_DEBUG("%s: bw_granularity(%d), estimated_bw(%d)\n",
+               __func__, link->dpia_bw_alloc_config.bw_granularity,
+               link->dpia_bw_alloc_config.estimated_bw);
++      DC_LOG_DEBUG("%s: nrd_max_link_rate(%d), nrd_max_lane_count(%d)\n",
++              __func__, link->dpia_bw_alloc_config.nrd_max_link_rate,
++              link->dpia_bw_alloc_config.nrd_max_lane_count);
+ }
+ static uint8_t get_lowest_dpia_index(struct dc_link *link)
+@@ -142,39 +176,50 @@ static uint8_t get_lowest_dpia_index(struct dc_link *link)
+ }
+ /*
+- * Get the Max Available BW or Max Estimated BW for each Host Router
++ * Get the maximum dp tunnel banwidth of host router
+  *
+- * @link: pointer to the dc_link struct instance
+- * @type: ESTIMATD BW or MAX AVAILABLE BW
++ * @dc: pointer to the dc struct instance
++ * @hr_index: host router index
+  *
+- * return: response_ready flag from dc_link struct
++ * return: host router maximum dp tunnel bandwidth
+  */
+-static int get_host_router_total_bw(struct dc_link *link, uint8_t type)
++static int get_host_router_total_dp_tunnel_bw(const struct dc *dc, uint8_t hr_index)
+ {
+-      const struct dc *dc_struct = link->dc;
+-      uint8_t lowest_dpia_index = get_lowest_dpia_index(link);
+-      uint8_t idx = (link->link_index - lowest_dpia_index) / 2, idx_temp = 0;
+-      struct dc_link *link_temp;
++      uint8_t lowest_dpia_index = get_lowest_dpia_index(dc->links[0]);
++      uint8_t hr_index_temp = 0;
++      struct dc_link *link_dpia_primary, *link_dpia_secondary;
+       int total_bw = 0;
+-      int i;
+-
+-      for (i = 0; i < MAX_PIPES * 2; ++i) {
+-              if (!dc_struct->links[i] || dc_struct->links[i]->ep_type != DISPLAY_ENDPOINT_USB4_DPIA)
+-                      continue;
++      for (uint8_t i = 0; i < MAX_PIPES * 2; ++i) {
+-              link_temp = dc_struct->links[i];
+-              if (!link_temp || !link_temp->hpd_status)
++              if (!dc->links[i] || dc->links[i]->ep_type != DISPLAY_ENDPOINT_USB4_DPIA)
+                       continue;
+-              idx_temp = (link_temp->link_index - lowest_dpia_index) / 2;
+-
+-              if (idx_temp == idx) {
+-
+-                      if (type == HOST_ROUTER_BW_ESTIMATED)
+-                              total_bw += link_temp->dpia_bw_alloc_config.estimated_bw;
+-                      else if (type == HOST_ROUTER_BW_ALLOCATED)
+-                              total_bw += link_temp->dpia_bw_alloc_config.sink_allocated_bw;
++              hr_index_temp = (dc->links[i]->link_index - lowest_dpia_index) / 2;
++
++              if (hr_index_temp == hr_index) {
++                      link_dpia_primary = dc->links[i];
++                      link_dpia_secondary = dc->links[i + 1];
++
++                      /**
++                       * If BW allocation enabled on both DPIAs, then
++                       * HR BW = Estimated(dpia_primary) + Allocated(dpia_secondary)
++                       * otherwise HR BW = Estimated(bw alloc enabled dpia)
++                       */
++                      if ((link_dpia_primary->hpd_status &&
++                              link_dpia_primary->dpia_bw_alloc_config.bw_alloc_enabled) &&
++                              (link_dpia_secondary->hpd_status &&
++                              link_dpia_secondary->dpia_bw_alloc_config.bw_alloc_enabled)) {
++                              total_bw += link_dpia_primary->dpia_bw_alloc_config.estimated_bw +
++                                      link_dpia_secondary->dpia_bw_alloc_config.sink_allocated_bw;
++                      } else if (link_dpia_primary->hpd_status &&
++                                      link_dpia_primary->dpia_bw_alloc_config.bw_alloc_enabled) {
++                              total_bw = link_dpia_primary->dpia_bw_alloc_config.estimated_bw;
++                      } else if (link_dpia_secondary->hpd_status &&
++                              link_dpia_secondary->dpia_bw_alloc_config.bw_alloc_enabled) {
++                              total_bw += link_dpia_secondary->dpia_bw_alloc_config.estimated_bw;
++                      }
++                      break;
+               }
+       }
+@@ -194,7 +239,6 @@ static void dpia_bw_alloc_unplug(struct dc_link *link)
+       if (link) {
+               DC_LOG_DEBUG("%s: resetting bw alloc config for link(%d)\n",
+                       __func__, link->link_index);
+-              link->dpia_bw_alloc_config.sink_allocated_bw = 0;
+               reset_bw_alloc_struct(link);
+       }
+ }
+@@ -397,7 +441,7 @@ int dpia_handle_usb4_bandwidth_allocation_for_link(struct dc_link *link, int pea
+               if (!timeout)
+                       ret = 0;// ERROR TIMEOUT waiting for response for allocating bw
+               else if (link->dpia_bw_alloc_config.sink_allocated_bw > 0)
+-                      ret = get_host_router_total_bw(link, HOST_ROUTER_BW_ALLOCATED);
++                      ret = link->dpia_bw_alloc_config.sink_allocated_bw;
+       }
+       //2. Cold Unplug
+       else if (!link->hpd_status)
+@@ -439,29 +483,41 @@ bool link_dp_dpia_allocate_usb4_bandwidth_for_stream(struct dc_link *link, int r
+ bool dpia_validate_usb4_bw(struct dc_link **link, int *bw_needed_per_dpia, const unsigned int num_dpias)
+ {
+       bool ret = true;
+-      int bw_needed_per_hr[MAX_HR_NUM] = { 0, 0 };
+-      uint8_t lowest_dpia_index = 0, dpia_index = 0;
+-      uint8_t i;
++      int bw_needed_per_hr[MAX_HR_NUM] = { 0, 0 }, host_router_total_dp_bw = 0;
++      uint8_t lowest_dpia_index, i, hr_index;
+       if (!num_dpias || num_dpias > MAX_DPIA_NUM)
+               return ret;
+-      //Get total Host Router BW & Validate against each Host Router max BW
++      lowest_dpia_index = get_lowest_dpia_index(link[0]);
++
++      /* get total Host Router BW with granularity for the given modes */
+       for (i = 0; i < num_dpias; ++i) {
++              int granularity_Gbps = 0;
++              int bw_granularity = 0;
+               if (!link[i]->dpia_bw_alloc_config.bw_alloc_enabled)
+                       continue;
+-              lowest_dpia_index = get_lowest_dpia_index(link[i]);
+               if (link[i]->link_index < lowest_dpia_index)
+                       continue;
+-              dpia_index = (link[i]->link_index - lowest_dpia_index) / 2;
+-              bw_needed_per_hr[dpia_index] += bw_needed_per_dpia[i];
+-              if (bw_needed_per_hr[dpia_index] > get_host_router_total_bw(link[i], HOST_ROUTER_BW_ALLOCATED)) {
++              granularity_Gbps = (Kbps_TO_Gbps / link[i]->dpia_bw_alloc_config.bw_granularity);
++              bw_granularity = (bw_needed_per_dpia[i] / granularity_Gbps) * granularity_Gbps +
++                              ((bw_needed_per_dpia[i] % granularity_Gbps) ? granularity_Gbps : 0);
+-                      ret = false;
+-                      break;
++              hr_index = (link[i]->link_index - lowest_dpia_index) / 2;
++              bw_needed_per_hr[hr_index] += bw_granularity;
++      }
++
++      /* validate against each Host Router max BW */
++      for (hr_index = 0; hr_index < MAX_HR_NUM; ++hr_index) {
++              if (bw_needed_per_hr[hr_index]) {
++                      host_router_total_dp_bw = get_host_router_total_dp_tunnel_bw(link[0]->dc, hr_index);
++                      if (bw_needed_per_hr[hr_index] > host_router_total_dp_bw) {
++                              ret = false;
++                              break;
++                      }
+               }
+       }
+-- 
+2.43.0
+
diff --git a/queue-6.7/drm-amd-display-fixed-integer-types-and-null-check-l.patch b/queue-6.7/drm-amd-display-fixed-integer-types-and-null-check-l.patch
new file mode 100644 (file)
index 0000000..b7185ab
--- /dev/null
@@ -0,0 +1,114 @@
+From b235346b80482dac289d32b3c34c8594f8f63302 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 31 Jan 2024 16:40:37 -0500
+Subject: drm/amd/display: fixed integer types and null check locations
+
+From: Sohaib Nadeem <sohaib.nadeem@amd.com>
+
+[ Upstream commit 0484e05d048b66d01d1f3c1d2306010bb57d8738 ]
+
+[why]:
+issues fixed:
+- comparison with wider integer type in loop condition which can cause
+infinite loops
+- pointer dereference before null check
+
+Cc: Mario Limonciello <mario.limonciello@amd.com>
+Cc: Alex Deucher <alexander.deucher@amd.com>
+Cc: stable@vger.kernel.org
+Reviewed-by: Josip Pavic <josip.pavic@amd.com>
+Acked-by: Aurabindo Pillai <aurabindo.pillai@amd.com>
+Signed-off-by: Sohaib Nadeem <sohaib.nadeem@amd.com>
+Tested-by: Daniel Wheeler <daniel.wheeler@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../gpu/drm/amd/display/dc/bios/bios_parser2.c   | 16 ++++++++++------
+ .../drm/amd/display/dc/link/link_validation.c    |  2 +-
+ 2 files changed, 11 insertions(+), 7 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
+index b5b29451d2db8..bc7a375f43c0c 100644
+--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
++++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
+@@ -1850,19 +1850,21 @@ static enum bp_result get_firmware_info_v3_2(
+               /* Vega12 */
+               smu_info_v3_2 = GET_IMAGE(struct atom_smu_info_v3_2,
+                                                       DATA_TABLES(smu_info));
+-              DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", smu_info_v3_2->gpuclk_ss_percentage);
+               if (!smu_info_v3_2)
+                       return BP_RESULT_BADBIOSTABLE;
++              DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", smu_info_v3_2->gpuclk_ss_percentage);
++
+               info->default_engine_clk = smu_info_v3_2->bootup_dcefclk_10khz * 10;
+       } else if (revision.minor == 3) {
+               /* Vega20 */
+               smu_info_v3_3 = GET_IMAGE(struct atom_smu_info_v3_3,
+                                                       DATA_TABLES(smu_info));
+-              DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", smu_info_v3_3->gpuclk_ss_percentage);
+               if (!smu_info_v3_3)
+                       return BP_RESULT_BADBIOSTABLE;
++              DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", smu_info_v3_3->gpuclk_ss_percentage);
++
+               info->default_engine_clk = smu_info_v3_3->bootup_dcefclk_10khz * 10;
+       }
+@@ -2423,10 +2425,11 @@ static enum bp_result get_integrated_info_v11(
+       info_v11 = GET_IMAGE(struct atom_integrated_system_info_v1_11,
+                                       DATA_TABLES(integratedsysteminfo));
+-      DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", info_v11->gpuclk_ss_percentage);
+       if (info_v11 == NULL)
+               return BP_RESULT_BADBIOSTABLE;
++      DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", info_v11->gpuclk_ss_percentage);
++
+       info->gpu_cap_info =
+       le32_to_cpu(info_v11->gpucapinfo);
+       /*
+@@ -2638,11 +2641,12 @@ static enum bp_result get_integrated_info_v2_1(
+       info_v2_1 = GET_IMAGE(struct atom_integrated_system_info_v2_1,
+                                       DATA_TABLES(integratedsysteminfo));
+-      DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", info_v2_1->gpuclk_ss_percentage);
+       if (info_v2_1 == NULL)
+               return BP_RESULT_BADBIOSTABLE;
++      DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", info_v2_1->gpuclk_ss_percentage);
++
+       info->gpu_cap_info =
+       le32_to_cpu(info_v2_1->gpucapinfo);
+       /*
+@@ -2800,11 +2804,11 @@ static enum bp_result get_integrated_info_v2_2(
+       info_v2_2 = GET_IMAGE(struct atom_integrated_system_info_v2_2,
+                                       DATA_TABLES(integratedsysteminfo));
+-      DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", info_v2_2->gpuclk_ss_percentage);
+-
+       if (info_v2_2 == NULL)
+               return BP_RESULT_BADBIOSTABLE;
++      DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", info_v2_2->gpuclk_ss_percentage);
++
+       info->gpu_cap_info =
+       le32_to_cpu(info_v2_2->gpucapinfo);
+       /*
+diff --git a/drivers/gpu/drm/amd/display/dc/link/link_validation.c b/drivers/gpu/drm/amd/display/dc/link/link_validation.c
+index 8fe66c3678508..5b0bc7f6a188c 100644
+--- a/drivers/gpu/drm/amd/display/dc/link/link_validation.c
++++ b/drivers/gpu/drm/amd/display/dc/link/link_validation.c
+@@ -361,7 +361,7 @@ bool link_validate_dpia_bandwidth(const struct dc_stream_state *stream, const un
+       struct dc_link *dpia_link[MAX_DPIA_NUM] = {0};
+       int num_dpias = 0;
+-      for (uint8_t i = 0; i < num_streams; ++i) {
++      for (unsigned int i = 0; i < num_streams; ++i) {
+               if (stream[i].signal == SIGNAL_TYPE_DISPLAY_PORT) {
+                       /* new dpia sst stream, check whether it exceeds max dpia */
+                       if (num_dpias >= MAX_DPIA_NUM)
+-- 
+2.43.0
+
diff --git a/queue-6.7/drm-amd-display-request-usb4-bw-for-mst-streams.patch b/queue-6.7/drm-amd-display-request-usb4-bw-for-mst-streams.patch
new file mode 100644 (file)
index 0000000..eeea0f9
--- /dev/null
@@ -0,0 +1,373 @@
+From 037912cc4ef5064b62c074febfab9eaea41a2af0 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 14 Dec 2023 23:16:34 +0800
+Subject: drm/amd/display: Request usb4 bw for mst streams
+
+From: Peichen Huang <peichen.huang@amd.com>
+
+[ Upstream commit 5f3bce13266e6fe2f7a46f94d8bc94d5274e276b ]
+
+[WHY]
+When usb4 bandwidth allocation mode is enabled, driver need to request
+bandwidth from connection manager. For mst link,  the requested
+bandwidth should be big enough for all remote streams.
+
+[HOW]
+- If mst link, the requested bandwidth should be the sum of all mst
+  streams bandwidth added with dp MTPH overhead.
+- Allocate/deallcate usb4 bandwidth when setting dpms on/off.
+- When doing display mode validation, driver also need to consider total
+  bandwidth of all mst streams for mst link.
+
+Reviewed-by: Cruise Hung <cruise.hung@amd.com>
+Acked-by: Rodrigo Siqueira <rodrigo.siqueira@amd.com>
+Signed-off-by: Peichen Huang <peichen.huang@amd.com>
+Tested-by: Daniel Wheeler <daniel.wheeler@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Stable-dep-of: 0484e05d048b ("drm/amd/display: fixed integer types and null check locations")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/amd/display/dc/dc_types.h     | 12 ++--
+ .../gpu/drm/amd/display/dc/link/link_dpms.c   | 42 ++++++++++---
+ .../drm/amd/display/dc/link/link_validation.c | 60 +++++++++++++++----
+ .../dc/link/protocols/link_dp_dpia_bw.c       | 59 +++++++++++++-----
+ .../dc/link/protocols/link_dp_dpia_bw.h       |  9 +++
+ 5 files changed, 144 insertions(+), 38 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h
+index ab91504529c65..66d0774bef527 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc_types.h
++++ b/drivers/gpu/drm/amd/display/dc/dc_types.h
+@@ -1100,23 +1100,25 @@ struct dc_panel_config {
+       } ilr;
+ };
++#define MAX_SINKS_PER_LINK 4
++
+ /*
+  *  USB4 DPIA BW ALLOCATION STRUCTS
+  */
+ struct dc_dpia_bw_alloc {
+-      int sink_verified_bw;  // The Verified BW that sink can allocated and use that has been verified already
+-      int sink_allocated_bw; // The Actual Allocated BW that sink currently allocated
+-      int sink_max_bw;       // The Max BW that sink can require/support
++      int remote_sink_req_bw[MAX_SINKS_PER_LINK]; // BW requested by remote sinks
++      int link_verified_bw;  // The Verified BW that link can allocated and use that has been verified already
++      int link_max_bw;       // The Max BW that link can require/support
++      int allocated_bw;      // The Actual Allocated BW for this DPIA
+       int estimated_bw;      // The estimated available BW for this DPIA
+       int bw_granularity;    // BW Granularity
++      int dp_overhead;       // DP overhead in dp tunneling
+       bool bw_alloc_enabled; // The BW Alloc Mode Support is turned ON for all 3:  DP-Tx & Dpia & CM
+       bool response_ready;   // Response ready from the CM side
+       uint8_t nrd_max_lane_count; // Non-reduced max lane count
+       uint8_t nrd_max_link_rate; // Non-reduced max link rate
+ };
+-#define MAX_SINKS_PER_LINK 4
+-
+ enum dc_hpd_enable_select {
+       HPD_EN_FOR_ALL_EDP = 0,
+       HPD_EN_FOR_PRIMARY_EDP_ONLY,
+diff --git a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c
+index a08ae59c1ea9f..007ee32c202e8 100644
+--- a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c
++++ b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c
+@@ -2098,17 +2098,11 @@ static enum dc_status enable_link_dp(struct dc_state *state,
+               }
+       }
+-      /*
+-       * If the link is DP-over-USB4 do the following:
+-       * - Train with fallback when enabling DPIA link. Conventional links are
++      /* Train with fallback when enabling DPIA link. Conventional links are
+        * trained with fallback during sink detection.
+-       * - Allocate only what the stream needs for bw in Gbps. Inform the CM
+-       * in case stream needs more or less bw from what has been allocated
+-       * earlier at plug time.
+        */
+-      if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) {
++      if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA)
+               do_fallback = true;
+-      }
+       /*
+        * Temporary w/a to get DP2.0 link rates to work with SST.
+@@ -2290,6 +2284,32 @@ static enum dc_status enable_link(
+       return status;
+ }
++static bool allocate_usb4_bandwidth_for_stream(struct dc_stream_state *stream, int bw)
++{
++      return true;
++}
++
++static bool allocate_usb4_bandwidth(struct dc_stream_state *stream)
++{
++      bool ret;
++
++      int bw = dc_bandwidth_in_kbps_from_timing(&stream->timing,
++                      dc_link_get_highest_encoding_format(stream->sink->link));
++
++      ret = allocate_usb4_bandwidth_for_stream(stream, bw);
++
++      return ret;
++}
++
++static bool deallocate_usb4_bandwidth(struct dc_stream_state *stream)
++{
++      bool ret;
++
++      ret = allocate_usb4_bandwidth_for_stream(stream, 0);
++
++      return ret;
++}
++
+ void link_set_dpms_off(struct pipe_ctx *pipe_ctx)
+ {
+       struct dc  *dc = pipe_ctx->stream->ctx->dc;
+@@ -2325,6 +2345,9 @@ void link_set_dpms_off(struct pipe_ctx *pipe_ctx)
+       update_psp_stream_config(pipe_ctx, true);
+       dc->hwss.blank_stream(pipe_ctx);
++      if (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA)
++              deallocate_usb4_bandwidth(pipe_ctx->stream);
++
+       if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
+               deallocate_mst_payload(pipe_ctx);
+       else if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT &&
+@@ -2567,6 +2590,9 @@ void link_set_dpms_on(
+               }
+       }
++      if (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA)
++              allocate_usb4_bandwidth(pipe_ctx->stream);
++
+       if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
+               allocate_mst_payload(pipe_ctx);
+       else if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT &&
+diff --git a/drivers/gpu/drm/amd/display/dc/link/link_validation.c b/drivers/gpu/drm/amd/display/dc/link/link_validation.c
+index b45fda96eaf64..8fe66c3678508 100644
+--- a/drivers/gpu/drm/amd/display/dc/link/link_validation.c
++++ b/drivers/gpu/drm/amd/display/dc/link/link_validation.c
+@@ -346,23 +346,61 @@ enum dc_status link_validate_mode_timing(
+       return DC_OK;
+ }
++/*
++ * This function calculates the bandwidth required for the stream timing
++ * and aggregates the stream bandwidth for the respective dpia link
++ *
++ * @stream: pointer to the dc_stream_state struct instance
++ * @num_streams: number of streams to be validated
++ *
++ * return: true if validation is succeeded
++ */
+ bool link_validate_dpia_bandwidth(const struct dc_stream_state *stream, const unsigned int num_streams)
+ {
+-      bool ret = true;
+-      int bw_needed[MAX_DPIA_NUM];
+-      struct dc_link *link[MAX_DPIA_NUM];
+-
+-      if (!num_streams || num_streams > MAX_DPIA_NUM)
+-              return ret;
++      int bw_needed[MAX_DPIA_NUM] = {0};
++      struct dc_link *dpia_link[MAX_DPIA_NUM] = {0};
++      int num_dpias = 0;
+       for (uint8_t i = 0; i < num_streams; ++i) {
++              if (stream[i].signal == SIGNAL_TYPE_DISPLAY_PORT) {
++                      /* new dpia sst stream, check whether it exceeds max dpia */
++                      if (num_dpias >= MAX_DPIA_NUM)
++                              return false;
+-              link[i] = stream[i].link;
+-              bw_needed[i] = dc_bandwidth_in_kbps_from_timing(&stream[i].timing,
+-                              dc_link_get_highest_encoding_format(link[i]));
++                      dpia_link[num_dpias] = stream[i].link;
++                      bw_needed[num_dpias] = dc_bandwidth_in_kbps_from_timing(&stream[i].timing,
++                                      dc_link_get_highest_encoding_format(dpia_link[num_dpias]));
++                      num_dpias++;
++              } else if (stream[i].signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
++                      uint8_t j = 0;
++                      /* check whether its a known dpia link */
++                      for (; j < num_dpias; ++j) {
++                              if (dpia_link[j] == stream[i].link)
++                                      break;
++                      }
++
++                      if (j == num_dpias) {
++                              /* new dpia mst stream, check whether it exceeds max dpia */
++                              if (num_dpias >= MAX_DPIA_NUM)
++                                      return false;
++                              else {
++                                      dpia_link[j] = stream[i].link;
++                                      num_dpias++;
++                              }
++                      }
++
++                      bw_needed[j] += dc_bandwidth_in_kbps_from_timing(&stream[i].timing,
++                              dc_link_get_highest_encoding_format(dpia_link[j]));
++              }
+       }
+-      ret = dpia_validate_usb4_bw(link, bw_needed, num_streams);
++      /* Include dp overheads */
++      for (uint8_t i = 0; i < num_dpias; ++i) {
++              int dp_overhead = 0;
++
++              dp_overhead = link_dp_dpia_get_dp_overhead_in_dp_tunneling(dpia_link[i]);
++              bw_needed[i] += dp_overhead;
++      }
+-      return ret;
++      return dpia_validate_usb4_bw(dpia_link, bw_needed, num_dpias);
+ }
+diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c
+index a7aa8c9da868f..4ef1a6a1d1295 100644
+--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c
++++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c
+@@ -54,12 +54,18 @@ static bool get_bw_alloc_proceed_flag(struct dc_link *tmp)
+ static void reset_bw_alloc_struct(struct dc_link *link)
+ {
+       link->dpia_bw_alloc_config.bw_alloc_enabled = false;
+-      link->dpia_bw_alloc_config.sink_verified_bw = 0;
+-      link->dpia_bw_alloc_config.sink_max_bw = 0;
++      link->dpia_bw_alloc_config.link_verified_bw = 0;
++      link->dpia_bw_alloc_config.link_max_bw = 0;
++      link->dpia_bw_alloc_config.allocated_bw = 0;
+       link->dpia_bw_alloc_config.estimated_bw = 0;
+       link->dpia_bw_alloc_config.bw_granularity = 0;
++      link->dpia_bw_alloc_config.dp_overhead = 0;
+       link->dpia_bw_alloc_config.response_ready = false;
+-      link->dpia_bw_alloc_config.sink_allocated_bw = 0;
++      link->dpia_bw_alloc_config.nrd_max_lane_count = 0;
++      link->dpia_bw_alloc_config.nrd_max_link_rate = 0;
++      for (int i = 0; i < MAX_SINKS_PER_LINK; i++)
++              link->dpia_bw_alloc_config.remote_sink_req_bw[i] = 0;
++      DC_LOG_DEBUG("reset usb4 bw alloc of link(%d)\n", link->link_index);
+ }
+ #define BW_GRANULARITY_0 4 // 0.25 Gbps
+@@ -210,8 +216,8 @@ static int get_host_router_total_dp_tunnel_bw(const struct dc *dc, uint8_t hr_in
+                               link_dpia_primary->dpia_bw_alloc_config.bw_alloc_enabled) &&
+                               (link_dpia_secondary->hpd_status &&
+                               link_dpia_secondary->dpia_bw_alloc_config.bw_alloc_enabled)) {
+-                              total_bw += link_dpia_primary->dpia_bw_alloc_config.estimated_bw +
+-                                      link_dpia_secondary->dpia_bw_alloc_config.sink_allocated_bw;
++                                      total_bw += link_dpia_primary->dpia_bw_alloc_config.estimated_bw +
++                                              link_dpia_secondary->dpia_bw_alloc_config.allocated_bw;
+                       } else if (link_dpia_primary->hpd_status &&
+                                       link_dpia_primary->dpia_bw_alloc_config.bw_alloc_enabled) {
+                               total_bw = link_dpia_primary->dpia_bw_alloc_config.estimated_bw;
+@@ -264,7 +270,7 @@ static void set_usb4_req_bw_req(struct dc_link *link, int req_bw)
+       /* Error check whether requested and allocated are equal */
+       req_bw = requested_bw * (Kbps_TO_Gbps / link->dpia_bw_alloc_config.bw_granularity);
+-      if (req_bw == link->dpia_bw_alloc_config.sink_allocated_bw) {
++      if (req_bw == link->dpia_bw_alloc_config.allocated_bw) {
+               DC_LOG_ERROR("%s: Request bw equals to allocated bw for link(%d)\n",
+                       __func__, link->link_index);
+       }
+@@ -387,9 +393,9 @@ void dpia_handle_bw_alloc_response(struct dc_link *link, uint8_t bw, uint8_t res
+               DC_LOG_DEBUG("%s: BW REQ SUCCESS for DP-TX Request for link(%d)\n",
+                       __func__, link->link_index);
+               DC_LOG_DEBUG("%s: current allocated_bw(%d), new allocated_bw(%d)\n",
+-                      __func__, link->dpia_bw_alloc_config.sink_allocated_bw, bw_needed);
++                      __func__, link->dpia_bw_alloc_config.allocated_bw, bw_needed);
+-              link->dpia_bw_alloc_config.sink_allocated_bw = bw_needed;
++              link->dpia_bw_alloc_config.allocated_bw = bw_needed;
+               link->dpia_bw_alloc_config.response_ready = true;
+               break;
+@@ -427,8 +433,8 @@ int dpia_handle_usb4_bandwidth_allocation_for_link(struct dc_link *link, int pea
+       if (link->hpd_status && peak_bw > 0) {
+               // If DP over USB4 then we need to check BW allocation
+-              link->dpia_bw_alloc_config.sink_max_bw = peak_bw;
+-              set_usb4_req_bw_req(link, link->dpia_bw_alloc_config.sink_max_bw);
++              link->dpia_bw_alloc_config.link_max_bw = peak_bw;
++              set_usb4_req_bw_req(link, link->dpia_bw_alloc_config.link_max_bw);
+               do {
+                       if (timeout > 0)
+@@ -440,8 +446,8 @@ int dpia_handle_usb4_bandwidth_allocation_for_link(struct dc_link *link, int pea
+               if (!timeout)
+                       ret = 0;// ERROR TIMEOUT waiting for response for allocating bw
+-              else if (link->dpia_bw_alloc_config.sink_allocated_bw > 0)
+-                      ret = link->dpia_bw_alloc_config.sink_allocated_bw;
++              else if (link->dpia_bw_alloc_config.allocated_bw > 0)
++                      ret = link->dpia_bw_alloc_config.allocated_bw;
+       }
+       //2. Cold Unplug
+       else if (!link->hpd_status)
+@@ -450,7 +456,6 @@ int dpia_handle_usb4_bandwidth_allocation_for_link(struct dc_link *link, int pea
+ out:
+       return ret;
+ }
+-
+ bool link_dp_dpia_allocate_usb4_bandwidth_for_stream(struct dc_link *link, int req_bw)
+ {
+       bool ret = false;
+@@ -458,7 +463,7 @@ bool link_dp_dpia_allocate_usb4_bandwidth_for_stream(struct dc_link *link, int r
+       DC_LOG_DEBUG("%s: ENTER: link(%d), hpd_status(%d), current allocated_bw(%d), req_bw(%d)\n",
+               __func__, link->link_index, link->hpd_status,
+-              link->dpia_bw_alloc_config.sink_allocated_bw, req_bw);
++              link->dpia_bw_alloc_config.allocated_bw, req_bw);
+       if (!get_bw_alloc_proceed_flag(link))
+               goto out;
+@@ -523,3 +528,29 @@ bool dpia_validate_usb4_bw(struct dc_link **link, int *bw_needed_per_dpia, const
+       return ret;
+ }
++
++int link_dp_dpia_get_dp_overhead_in_dp_tunneling(struct dc_link *link)
++{
++      int dp_overhead = 0, link_mst_overhead = 0;
++
++      if (!get_bw_alloc_proceed_flag((link)))
++              return dp_overhead;
++
++      /* if its mst link, add MTPH overhead */
++      if ((link->type == dc_connection_mst_branch) &&
++              !link->dpcd_caps.channel_coding_cap.bits.DP_128b_132b_SUPPORTED) {
++              /* For 8b/10b encoding: MTP is 64 time slots long, slot 0 is used for MTPH
++               * MST overhead is 1/64 of link bandwidth (excluding any overhead)
++               */
++              const struct dc_link_settings *link_cap =
++                      dc_link_get_link_cap(link);
++              uint32_t link_bw_in_kbps =
++                      link_cap->link_rate * link_cap->lane_count * LINK_RATE_REF_FREQ_IN_KHZ * 8;
++              link_mst_overhead = (link_bw_in_kbps / 64) + ((link_bw_in_kbps % 64) ? 1 : 0);
++      }
++
++      /* add all the overheads */
++      dp_overhead = link_mst_overhead;
++
++      return dp_overhead;
++}
+diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.h
+index 981bc4eb6120e..3b6d8494f9d5d 100644
+--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.h
++++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.h
+@@ -99,4 +99,13 @@ void dpia_handle_bw_alloc_response(struct dc_link *link, uint8_t bw, uint8_t res
+  */
+ bool dpia_validate_usb4_bw(struct dc_link **link, int *bw_needed, const unsigned int num_dpias);
++/*
++ * Obtain all the DP overheads in dp tunneling for the dpia link
++ *
++ * @link: pointer to the dc_link struct instance
++ *
++ * return: DP overheads in DP tunneling
++ */
++int link_dp_dpia_get_dp_overhead_in_dp_tunneling(struct dc_link *link);
++
+ #endif /* DC_INC_LINK_DP_DPIA_BW_H_ */
+-- 
+2.43.0
+
diff --git a/queue-6.7/mptcp-add-currestab-mib-counter-support.patch b/queue-6.7/mptcp-add-currestab-mib-counter-support.patch
new file mode 100644 (file)
index 0000000..1efe480
--- /dev/null
@@ -0,0 +1,115 @@
+From bd3cab630c0e7a43430cd31f2bc5af941508f22e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 22 Dec 2023 13:47:22 +0100
+Subject: mptcp: add CurrEstab MIB counter support
+
+From: Geliang Tang <geliang.tang@linux.dev>
+
+[ Upstream commit d9cd27b8cd191133e287e5de107f971136abe8a2 ]
+
+Add a new MIB counter named MPTCP_MIB_CURRESTAB to count current
+established MPTCP connections, similar to TCP_MIB_CURRESTAB. This is
+useful to quickly list the number of MPTCP connections without having to
+iterate over all of them.
+
+This patch adds a new helper function mptcp_set_state(): if the state
+switches from or to ESTABLISHED state, this newly added counter is
+incremented. This helper is going to be used in the following patch.
+
+Similar to MPTCP_INC_STATS(), a new helper called MPTCP_DEC_STATS() is
+also needed to decrement a MIB counter.
+
+Signed-off-by: Geliang Tang <geliang.tang@linux.dev>
+Acked-by: Paolo Abeni <pabeni@redhat.com>
+Reviewed-by: Matthieu Baerts <matttbe@kernel.org>
+Signed-off-by: Matthieu Baerts <matttbe@kernel.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Stable-dep-of: e4a0fa47e816 ("mptcp: corner case locking for rx path fields initialization")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/mptcp/mib.c      |  1 +
+ net/mptcp/mib.h      |  8 ++++++++
+ net/mptcp/protocol.c | 18 ++++++++++++++++++
+ net/mptcp/protocol.h |  1 +
+ 4 files changed, 28 insertions(+)
+
+diff --git a/net/mptcp/mib.c b/net/mptcp/mib.c
+index a0990c365a2ea..c30405e768337 100644
+--- a/net/mptcp/mib.c
++++ b/net/mptcp/mib.c
+@@ -66,6 +66,7 @@ static const struct snmp_mib mptcp_snmp_list[] = {
+       SNMP_MIB_ITEM("RcvWndShared", MPTCP_MIB_RCVWNDSHARED),
+       SNMP_MIB_ITEM("RcvWndConflictUpdate", MPTCP_MIB_RCVWNDCONFLICTUPDATE),
+       SNMP_MIB_ITEM("RcvWndConflict", MPTCP_MIB_RCVWNDCONFLICT),
++      SNMP_MIB_ITEM("MPCurrEstab", MPTCP_MIB_CURRESTAB),
+       SNMP_MIB_SENTINEL
+ };
+diff --git a/net/mptcp/mib.h b/net/mptcp/mib.h
+index cae71d9472529..dd7fd1f246b5f 100644
+--- a/net/mptcp/mib.h
++++ b/net/mptcp/mib.h
+@@ -65,6 +65,7 @@ enum linux_mptcp_mib_field {
+                                        * conflict with another subflow while updating msk rcv wnd
+                                        */
+       MPTCP_MIB_RCVWNDCONFLICT,       /* Conflict with while updating msk rcv wnd */
++      MPTCP_MIB_CURRESTAB,            /* Current established MPTCP connections */
+       __MPTCP_MIB_MAX
+ };
+@@ -95,4 +96,11 @@ static inline void __MPTCP_INC_STATS(struct net *net,
+               __SNMP_INC_STATS(net->mib.mptcp_statistics, field);
+ }
++static inline void MPTCP_DEC_STATS(struct net *net,
++                                 enum linux_mptcp_mib_field field)
++{
++      if (likely(net->mib.mptcp_statistics))
++              SNMP_DEC_STATS(net->mib.mptcp_statistics, field);
++}
++
+ bool mptcp_mib_alloc(struct net *net);
+diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
+index 0b42ce7de45cc..a30ad00470bc7 100644
+--- a/net/mptcp/protocol.c
++++ b/net/mptcp/protocol.c
+@@ -2888,6 +2888,24 @@ void mptcp_subflow_shutdown(struct sock *sk, struct sock *ssk, int how)
+       release_sock(ssk);
+ }
++void mptcp_set_state(struct sock *sk, int state)
++{
++      int oldstate = sk->sk_state;
++
++      switch (state) {
++      case TCP_ESTABLISHED:
++              if (oldstate != TCP_ESTABLISHED)
++                      MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_CURRESTAB);
++              break;
++
++      default:
++              if (oldstate == TCP_ESTABLISHED)
++                      MPTCP_DEC_STATS(sock_net(sk), MPTCP_MIB_CURRESTAB);
++      }
++
++      inet_sk_state_store(sk, state);
++}
++
+ static const unsigned char new_state[16] = {
+       /* current state:     new state:      action:   */
+       [0 /* (Invalid) */] = TCP_CLOSE,
+diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h
+index 27f3fedb9c366..0aae9acef80f7 100644
+--- a/net/mptcp/protocol.h
++++ b/net/mptcp/protocol.h
+@@ -641,6 +641,7 @@ bool __mptcp_close(struct sock *sk, long timeout);
+ void mptcp_cancel_work(struct sock *sk);
+ void __mptcp_unaccepted_force_close(struct sock *sk);
+ void mptcp_set_owner_r(struct sk_buff *skb, struct sock *sk);
++void mptcp_set_state(struct sock *sk, int state);
+ bool mptcp_addresses_equal(const struct mptcp_addr_info *a,
+                          const struct mptcp_addr_info *b, bool use_port);
+-- 
+2.43.0
+
diff --git a/queue-6.7/mptcp-corner-case-locking-for-rx-path-fields-initial.patch b/queue-6.7/mptcp-corner-case-locking-for-rx-path-fields-initial.patch
new file mode 100644 (file)
index 0000000..9b92155
--- /dev/null
@@ -0,0 +1,280 @@
+From 85f4a91ce18e409a0d50911e75172e9dc60d3a35 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 8 Feb 2024 19:03:52 +0100
+Subject: mptcp: corner case locking for rx path fields initialization
+
+From: Paolo Abeni <pabeni@redhat.com>
+
+[ Upstream commit e4a0fa47e816e186f6b4c0055d07eeec42d11871 ]
+
+Most MPTCP-level related fields are under the mptcp data lock
+protection, but are written one-off without such lock at MPC
+complete time, both for the client and the server
+
+Leverage the mptcp_propagate_state() infrastructure to move such
+initialization under the proper lock client-wise.
+
+The server side critical init steps are done by
+mptcp_subflow_fully_established(): ensure the caller properly held the
+relevant lock, and avoid acquiring the same lock in the nested scopes.
+
+There are no real potential races, as write access to such fields
+is implicitly serialized by the MPTCP state machine; the primary
+goal is consistency.
+
+Fixes: d22f4988ffec ("mptcp: process MP_CAPABLE data option")
+Cc: stable@vger.kernel.org
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Reviewed-by: Mat Martineau <martineau@kernel.org>
+Signed-off-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/mptcp/fastopen.c |  6 ++---
+ net/mptcp/options.c  |  9 +++----
+ net/mptcp/protocol.c |  9 ++++---
+ net/mptcp/protocol.h |  9 +++----
+ net/mptcp/subflow.c  | 56 +++++++++++++++++++++++++-------------------
+ 5 files changed, 50 insertions(+), 39 deletions(-)
+
+diff --git a/net/mptcp/fastopen.c b/net/mptcp/fastopen.c
+index 74698582a2859..ad28da655f8bc 100644
+--- a/net/mptcp/fastopen.c
++++ b/net/mptcp/fastopen.c
+@@ -59,13 +59,12 @@ void mptcp_fastopen_subflow_synack_set_params(struct mptcp_subflow_context *subf
+       mptcp_data_unlock(sk);
+ }
+-void mptcp_fastopen_gen_msk_ackseq(struct mptcp_sock *msk, struct mptcp_subflow_context *subflow,
+-                                 const struct mptcp_options_received *mp_opt)
++void __mptcp_fastopen_gen_msk_ackseq(struct mptcp_sock *msk, struct mptcp_subflow_context *subflow,
++                                   const struct mptcp_options_received *mp_opt)
+ {
+       struct sock *sk = (struct sock *)msk;
+       struct sk_buff *skb;
+-      mptcp_data_lock(sk);
+       skb = skb_peek_tail(&sk->sk_receive_queue);
+       if (skb) {
+               WARN_ON_ONCE(MPTCP_SKB_CB(skb)->end_seq);
+@@ -77,5 +76,4 @@ void mptcp_fastopen_gen_msk_ackseq(struct mptcp_sock *msk, struct mptcp_subflow_
+       }
+       pr_debug("msk=%p ack_seq=%llx", msk, msk->ack_seq);
+-      mptcp_data_unlock(sk);
+ }
+diff --git a/net/mptcp/options.c b/net/mptcp/options.c
+index d2527d189a799..e3e96a49f9229 100644
+--- a/net/mptcp/options.c
++++ b/net/mptcp/options.c
+@@ -962,9 +962,7 @@ static bool check_fully_established(struct mptcp_sock *msk, struct sock *ssk,
+               /* subflows are fully established as soon as we get any
+                * additional ack, including ADD_ADDR.
+                */
+-              subflow->fully_established = 1;
+-              WRITE_ONCE(msk->fully_established, true);
+-              goto check_notify;
++              goto set_fully_established;
+       }
+       /* If the first established packet does not contain MP_CAPABLE + data
+@@ -986,7 +984,10 @@ static bool check_fully_established(struct mptcp_sock *msk, struct sock *ssk,
+ set_fully_established:
+       if (unlikely(!READ_ONCE(msk->pm.server_side)))
+               pr_warn_once("bogus mpc option on established client sk");
+-      mptcp_subflow_fully_established(subflow, mp_opt);
++
++      mptcp_data_lock((struct sock *)msk);
++      __mptcp_subflow_fully_established(msk, subflow, mp_opt);
++      mptcp_data_unlock((struct sock *)msk);
+ check_notify:
+       /* if the subflow is not already linked into the conn_list, we can't
+diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
+index 436a6164b2724..fcd09afb98823 100644
+--- a/net/mptcp/protocol.c
++++ b/net/mptcp/protocol.c
+@@ -3200,6 +3200,7 @@ struct sock *mptcp_sk_clone_init(const struct sock *sk,
+ {
+       struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
+       struct sock *nsk = sk_clone_lock(sk, GFP_ATOMIC);
++      struct mptcp_subflow_context *subflow;
+       struct mptcp_sock *msk;
+       if (!nsk)
+@@ -3240,7 +3241,8 @@ struct sock *mptcp_sk_clone_init(const struct sock *sk,
+       /* The msk maintain a ref to each subflow in the connections list */
+       WRITE_ONCE(msk->first, ssk);
+-      list_add(&mptcp_subflow_ctx(ssk)->node, &msk->conn_list);
++      subflow = mptcp_subflow_ctx(ssk);
++      list_add(&subflow->node, &msk->conn_list);
+       sock_hold(ssk);
+       /* new mpc subflow takes ownership of the newly
+@@ -3255,6 +3257,9 @@ struct sock *mptcp_sk_clone_init(const struct sock *sk,
+       __mptcp_propagate_sndbuf(nsk, ssk);
+       mptcp_rcv_space_init(msk, ssk);
++
++      if (mp_opt->suboptions & OPTION_MPTCP_MPC_ACK)
++              __mptcp_subflow_fully_established(msk, subflow, mp_opt);
+       bh_unlock_sock(nsk);
+       /* note: the newly allocated socket refcount is 2 now */
+@@ -3530,8 +3535,6 @@ void mptcp_finish_connect(struct sock *ssk)
+        * accessing the field below
+        */
+       WRITE_ONCE(msk->local_key, subflow->local_key);
+-      WRITE_ONCE(msk->snd_una, subflow->idsn + 1);
+-      WRITE_ONCE(msk->wnd_end, subflow->idsn + 1 + tcp_sk(ssk)->snd_wnd);
+       mptcp_pm_new_connection(msk, ssk, 0);
+ }
+diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h
+index 0aae9acef80f7..8ab87d16b1b70 100644
+--- a/net/mptcp/protocol.h
++++ b/net/mptcp/protocol.h
+@@ -622,8 +622,9 @@ unsigned int mptcp_stale_loss_cnt(const struct net *net);
+ unsigned int mptcp_close_timeout(const struct sock *sk);
+ int mptcp_get_pm_type(const struct net *net);
+ const char *mptcp_get_scheduler(const struct net *net);
+-void mptcp_subflow_fully_established(struct mptcp_subflow_context *subflow,
+-                                   const struct mptcp_options_received *mp_opt);
++void __mptcp_subflow_fully_established(struct mptcp_sock *msk,
++                                     struct mptcp_subflow_context *subflow,
++                                     const struct mptcp_options_received *mp_opt);
+ bool __mptcp_retransmit_pending_data(struct sock *sk);
+ void mptcp_check_and_set_pending(struct sock *sk);
+ void __mptcp_push_pending(struct sock *sk, unsigned int flags);
+@@ -952,8 +953,8 @@ void mptcp_event_pm_listener(const struct sock *ssk,
+                            enum mptcp_event_type event);
+ bool mptcp_userspace_pm_active(const struct mptcp_sock *msk);
+-void mptcp_fastopen_gen_msk_ackseq(struct mptcp_sock *msk, struct mptcp_subflow_context *subflow,
+-                                 const struct mptcp_options_received *mp_opt);
++void __mptcp_fastopen_gen_msk_ackseq(struct mptcp_sock *msk, struct mptcp_subflow_context *subflow,
++                                   const struct mptcp_options_received *mp_opt);
+ void mptcp_fastopen_subflow_synack_set_params(struct mptcp_subflow_context *subflow,
+                                             struct request_sock *req);
+diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
+index c2df34ebcf284..c34ecadee1200 100644
+--- a/net/mptcp/subflow.c
++++ b/net/mptcp/subflow.c
+@@ -441,20 +441,6 @@ void __mptcp_sync_state(struct sock *sk, int state)
+       }
+ }
+-static void mptcp_propagate_state(struct sock *sk, struct sock *ssk)
+-{
+-      struct mptcp_sock *msk = mptcp_sk(sk);
+-
+-      mptcp_data_lock(sk);
+-      if (!sock_owned_by_user(sk)) {
+-              __mptcp_sync_state(sk, ssk->sk_state);
+-      } else {
+-              msk->pending_state = ssk->sk_state;
+-              __set_bit(MPTCP_SYNC_STATE, &msk->cb_flags);
+-      }
+-      mptcp_data_unlock(sk);
+-}
+-
+ static void subflow_set_remote_key(struct mptcp_sock *msk,
+                                  struct mptcp_subflow_context *subflow,
+                                  const struct mptcp_options_received *mp_opt)
+@@ -476,6 +462,31 @@ static void subflow_set_remote_key(struct mptcp_sock *msk,
+       atomic64_set(&msk->rcv_wnd_sent, subflow->iasn);
+ }
++static void mptcp_propagate_state(struct sock *sk, struct sock *ssk,
++                                struct mptcp_subflow_context *subflow,
++                                const struct mptcp_options_received *mp_opt)
++{
++      struct mptcp_sock *msk = mptcp_sk(sk);
++
++      mptcp_data_lock(sk);
++      if (mp_opt) {
++              /* Options are available only in the non fallback cases
++               * avoid updating rx path fields otherwise
++               */
++              WRITE_ONCE(msk->snd_una, subflow->idsn + 1);
++              WRITE_ONCE(msk->wnd_end, subflow->idsn + 1 + tcp_sk(ssk)->snd_wnd);
++              subflow_set_remote_key(msk, subflow, mp_opt);
++      }
++
++      if (!sock_owned_by_user(sk)) {
++              __mptcp_sync_state(sk, ssk->sk_state);
++      } else {
++              msk->pending_state = ssk->sk_state;
++              __set_bit(MPTCP_SYNC_STATE, &msk->cb_flags);
++      }
++      mptcp_data_unlock(sk);
++}
++
+ static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb)
+ {
+       struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
+@@ -510,10 +521,9 @@ static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb)
+               if (mp_opt.deny_join_id0)
+                       WRITE_ONCE(msk->pm.remote_deny_join_id0, true);
+               subflow->mp_capable = 1;
+-              subflow_set_remote_key(msk, subflow, &mp_opt);
+               MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPCAPABLEACTIVEACK);
+               mptcp_finish_connect(sk);
+-              mptcp_propagate_state(parent, sk);
++              mptcp_propagate_state(parent, sk, subflow, &mp_opt);
+       } else if (subflow->request_join) {
+               u8 hmac[SHA256_DIGEST_SIZE];
+@@ -556,7 +566,7 @@ static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb)
+               }
+       } else if (mptcp_check_fallback(sk)) {
+ fallback:
+-              mptcp_propagate_state(parent, sk);
++              mptcp_propagate_state(parent, sk, subflow, NULL);
+       }
+       return;
+@@ -741,17 +751,16 @@ void mptcp_subflow_drop_ctx(struct sock *ssk)
+       kfree_rcu(ctx, rcu);
+ }
+-void mptcp_subflow_fully_established(struct mptcp_subflow_context *subflow,
+-                                   const struct mptcp_options_received *mp_opt)
++void __mptcp_subflow_fully_established(struct mptcp_sock *msk,
++                                     struct mptcp_subflow_context *subflow,
++                                     const struct mptcp_options_received *mp_opt)
+ {
+-      struct mptcp_sock *msk = mptcp_sk(subflow->conn);
+-
+       subflow_set_remote_key(msk, subflow, mp_opt);
+       subflow->fully_established = 1;
+       WRITE_ONCE(msk->fully_established, true);
+       if (subflow->is_mptfo)
+-              mptcp_fastopen_gen_msk_ackseq(msk, subflow, mp_opt);
++              __mptcp_fastopen_gen_msk_ackseq(msk, subflow, mp_opt);
+ }
+ static struct sock *subflow_syn_recv_sock(const struct sock *sk,
+@@ -844,7 +853,6 @@ static struct sock *subflow_syn_recv_sock(const struct sock *sk,
+                        * mpc option
+                        */
+                       if (mp_opt.suboptions & OPTION_MPTCP_MPC_ACK) {
+-                              mptcp_subflow_fully_established(ctx, &mp_opt);
+                               mptcp_pm_fully_established(owner, child);
+                               ctx->pm_notified = 1;
+                       }
+@@ -1756,7 +1764,7 @@ static void subflow_state_change(struct sock *sk)
+               mptcp_do_fallback(sk);
+               pr_fallback(msk);
+               subflow->conn_finished = 1;
+-              mptcp_propagate_state(parent, sk);
++              mptcp_propagate_state(parent, sk, subflow, NULL);
+       }
+       /* as recvmsg() does not acquire the subflow socket for ssk selection
+-- 
+2.43.0
+
diff --git a/queue-6.7/mptcp-fix-more-tx-path-fields-initialization.patch b/queue-6.7/mptcp-fix-more-tx-path-fields-initialization.patch
new file mode 100644 (file)
index 0000000..9fe959f
--- /dev/null
@@ -0,0 +1,79 @@
+From c6bd3680cfc68299340b3f1f59aeeaaf81fc5934 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 8 Feb 2024 19:03:51 +0100
+Subject: mptcp: fix more tx path fields initialization
+
+From: Paolo Abeni <pabeni@redhat.com>
+
+[ Upstream commit 3f83d8a77eeeb47011b990fd766a421ee64f1d73 ]
+
+The 'msk->write_seq' and 'msk->snd_nxt' are always updated under
+the msk socket lock, except at MPC handshake completiont time.
+
+Builds-up on the previous commit to move such init under the relevant
+lock.
+
+There are no known problems caused by the potential race, the
+primary goal is consistency.
+
+Fixes: 6d0060f600ad ("mptcp: Write MPTCP DSS headers to outgoing data packets")
+Cc: stable@vger.kernel.org
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Reviewed-by: Mat Martineau <martineau@kernel.org>
+Signed-off-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Stable-dep-of: e4a0fa47e816 ("mptcp: corner case locking for rx path fields initialization")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/mptcp/protocol.c |  6 ++----
+ net/mptcp/subflow.c  | 13 +++++++++++--
+ 2 files changed, 13 insertions(+), 6 deletions(-)
+
+diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
+index 0acb1881e0b5e..436a6164b2724 100644
+--- a/net/mptcp/protocol.c
++++ b/net/mptcp/protocol.c
+@@ -3530,10 +3530,8 @@ void mptcp_finish_connect(struct sock *ssk)
+        * accessing the field below
+        */
+       WRITE_ONCE(msk->local_key, subflow->local_key);
+-      WRITE_ONCE(msk->write_seq, subflow->idsn + 1);
+-      WRITE_ONCE(msk->snd_nxt, msk->write_seq);
+-      WRITE_ONCE(msk->snd_una, msk->write_seq);
+-      WRITE_ONCE(msk->wnd_end, msk->snd_nxt + tcp_sk(ssk)->snd_wnd);
++      WRITE_ONCE(msk->snd_una, subflow->idsn + 1);
++      WRITE_ONCE(msk->wnd_end, subflow->idsn + 1 + tcp_sk(ssk)->snd_wnd);
+       mptcp_pm_new_connection(msk, ssk, 0);
+ }
+diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
+index 56b2ac2f2f22d..c2df34ebcf284 100644
+--- a/net/mptcp/subflow.c
++++ b/net/mptcp/subflow.c
+@@ -421,12 +421,21 @@ static bool subflow_use_different_dport(struct mptcp_sock *msk, const struct soc
+ void __mptcp_sync_state(struct sock *sk, int state)
+ {
++      struct mptcp_subflow_context *subflow;
+       struct mptcp_sock *msk = mptcp_sk(sk);
++      struct sock *ssk = msk->first;
+-      __mptcp_propagate_sndbuf(sk, msk->first);
++      subflow = mptcp_subflow_ctx(ssk);
++      __mptcp_propagate_sndbuf(sk, ssk);
+       if (!msk->rcvspace_init)
+-              mptcp_rcv_space_init(msk, msk->first);
++              mptcp_rcv_space_init(msk, ssk);
++
+       if (sk->sk_state == TCP_SYN_SENT) {
++              /* subflow->idsn is always available is TCP_SYN_SENT state,
++               * even for the FASTOPEN scenarios
++               */
++              WRITE_ONCE(msk->write_seq, subflow->idsn + 1);
++              WRITE_ONCE(msk->snd_nxt, msk->write_seq);
+               mptcp_set_state(sk, state);
+               sk->sk_state_change(sk);
+       }
+-- 
+2.43.0
+
diff --git a/queue-6.7/mptcp-use-mptcp_set_state.patch b/queue-6.7/mptcp-use-mptcp_set_state.patch
new file mode 100644 (file)
index 0000000..f4c7397
--- /dev/null
@@ -0,0 +1,217 @@
+From 522e9de9cd84ddf75285efb28cd444f3899f9b19 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 22 Dec 2023 13:47:23 +0100
+Subject: mptcp: use mptcp_set_state
+
+From: Geliang Tang <geliang.tang@linux.dev>
+
+[ Upstream commit c693a8516429908da3ea111b0caa3c042ab1e6e9 ]
+
+This patch replaces all the 'inet_sk_state_store()' calls under net/mptcp
+with the new helper mptcp_set_state().
+
+Closes: https://github.com/multipath-tcp/mptcp_net-next/issues/460
+Signed-off-by: Geliang Tang <geliang.tang@linux.dev>
+Acked-by: Paolo Abeni <pabeni@redhat.com>
+Reviewed-by: Matthieu Baerts <matttbe@kernel.org>
+Signed-off-by: Matthieu Baerts <matttbe@kernel.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Stable-dep-of: e4a0fa47e816 ("mptcp: corner case locking for rx path fields initialization")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/mptcp/pm_netlink.c |  5 +++++
+ net/mptcp/protocol.c   | 38 +++++++++++++++++++-------------------
+ net/mptcp/subflow.c    |  2 +-
+ 3 files changed, 25 insertions(+), 20 deletions(-)
+
+diff --git a/net/mptcp/pm_netlink.c b/net/mptcp/pm_netlink.c
+index bf4d96f6f99a6..661c226dad18d 100644
+--- a/net/mptcp/pm_netlink.c
++++ b/net/mptcp/pm_netlink.c
+@@ -1048,6 +1048,11 @@ static int mptcp_pm_nl_create_listen_socket(struct sock *sk,
+       if (err)
+               return err;
++      /* We don't use mptcp_set_state() here because it needs to be called
++       * under the msk socket lock. For the moment, that will not bring
++       * anything more than only calling inet_sk_state_store(), because the
++       * old status is known (TCP_CLOSE).
++       */
+       inet_sk_state_store(newsk, TCP_LISTEN);
+       lock_sock(ssk);
+       err = __inet_listen_sk(ssk, backlog);
+diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
+index a30ad00470bc7..0acb1881e0b5e 100644
+--- a/net/mptcp/protocol.c
++++ b/net/mptcp/protocol.c
+@@ -443,11 +443,11 @@ static void mptcp_check_data_fin_ack(struct sock *sk)
+               switch (sk->sk_state) {
+               case TCP_FIN_WAIT1:
+-                      inet_sk_state_store(sk, TCP_FIN_WAIT2);
++                      mptcp_set_state(sk, TCP_FIN_WAIT2);
+                       break;
+               case TCP_CLOSING:
+               case TCP_LAST_ACK:
+-                      inet_sk_state_store(sk, TCP_CLOSE);
++                      mptcp_set_state(sk, TCP_CLOSE);
+                       break;
+               }
+@@ -608,13 +608,13 @@ static bool mptcp_check_data_fin(struct sock *sk)
+               switch (sk->sk_state) {
+               case TCP_ESTABLISHED:
+-                      inet_sk_state_store(sk, TCP_CLOSE_WAIT);
++                      mptcp_set_state(sk, TCP_CLOSE_WAIT);
+                       break;
+               case TCP_FIN_WAIT1:
+-                      inet_sk_state_store(sk, TCP_CLOSING);
++                      mptcp_set_state(sk, TCP_CLOSING);
+                       break;
+               case TCP_FIN_WAIT2:
+-                      inet_sk_state_store(sk, TCP_CLOSE);
++                      mptcp_set_state(sk, TCP_CLOSE);
+                       break;
+               default:
+                       /* Other states not expected */
+@@ -789,7 +789,7 @@ static bool __mptcp_subflow_error_report(struct sock *sk, struct sock *ssk)
+        */
+       ssk_state = inet_sk_state_load(ssk);
+       if (ssk_state == TCP_CLOSE && !sock_flag(sk, SOCK_DEAD))
+-              inet_sk_state_store(sk, ssk_state);
++              mptcp_set_state(sk, ssk_state);
+       WRITE_ONCE(sk->sk_err, -err);
+       /* This barrier is coupled with smp_rmb() in mptcp_poll() */
+@@ -2480,7 +2480,7 @@ static void __mptcp_close_ssk(struct sock *sk, struct sock *ssk,
+           inet_sk_state_load(msk->first) == TCP_CLOSE) {
+               if (sk->sk_state != TCP_ESTABLISHED ||
+                   msk->in_accept_queue || sock_flag(sk, SOCK_DEAD)) {
+-                      inet_sk_state_store(sk, TCP_CLOSE);
++                      mptcp_set_state(sk, TCP_CLOSE);
+                       mptcp_close_wake_up(sk);
+               } else {
+                       mptcp_start_tout_timer(sk);
+@@ -2575,7 +2575,7 @@ static void mptcp_check_fastclose(struct mptcp_sock *msk)
+               WRITE_ONCE(sk->sk_err, ECONNRESET);
+       }
+-      inet_sk_state_store(sk, TCP_CLOSE);
++      mptcp_set_state(sk, TCP_CLOSE);
+       WRITE_ONCE(sk->sk_shutdown, SHUTDOWN_MASK);
+       smp_mb__before_atomic(); /* SHUTDOWN must be visible first */
+       set_bit(MPTCP_WORK_CLOSE_SUBFLOW, &msk->flags);
+@@ -2710,7 +2710,7 @@ static void mptcp_do_fastclose(struct sock *sk)
+       struct mptcp_subflow_context *subflow, *tmp;
+       struct mptcp_sock *msk = mptcp_sk(sk);
+-      inet_sk_state_store(sk, TCP_CLOSE);
++      mptcp_set_state(sk, TCP_CLOSE);
+       mptcp_for_each_subflow_safe(msk, subflow, tmp)
+               __mptcp_close_ssk(sk, mptcp_subflow_tcp_sock(subflow),
+                                 subflow, MPTCP_CF_FASTCLOSE);
+@@ -2928,7 +2928,7 @@ static int mptcp_close_state(struct sock *sk)
+       int next = (int)new_state[sk->sk_state];
+       int ns = next & TCP_STATE_MASK;
+-      inet_sk_state_store(sk, ns);
++      mptcp_set_state(sk, ns);
+       return next & TCP_ACTION_FIN;
+ }
+@@ -3039,7 +3039,7 @@ bool __mptcp_close(struct sock *sk, long timeout)
+       if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE)) {
+               mptcp_check_listen_stop(sk);
+-              inet_sk_state_store(sk, TCP_CLOSE);
++              mptcp_set_state(sk, TCP_CLOSE);
+               goto cleanup;
+       }
+@@ -3082,7 +3082,7 @@ bool __mptcp_close(struct sock *sk, long timeout)
+        * state, let's not keep resources busy for no reasons
+        */
+       if (subflows_alive == 0)
+-              inet_sk_state_store(sk, TCP_CLOSE);
++              mptcp_set_state(sk, TCP_CLOSE);
+       sock_hold(sk);
+       pr_debug("msk=%p state=%d", sk, sk->sk_state);
+@@ -3148,7 +3148,7 @@ static int mptcp_disconnect(struct sock *sk, int flags)
+               return -EBUSY;
+       mptcp_check_listen_stop(sk);
+-      inet_sk_state_store(sk, TCP_CLOSE);
++      mptcp_set_state(sk, TCP_CLOSE);
+       mptcp_stop_rtx_timer(sk);
+       mptcp_stop_tout_timer(sk);
+@@ -3236,7 +3236,7 @@ struct sock *mptcp_sk_clone_init(const struct sock *sk,
+       /* this can't race with mptcp_close(), as the msk is
+        * not yet exposted to user-space
+        */
+-      inet_sk_state_store(nsk, TCP_ESTABLISHED);
++      mptcp_set_state(nsk, TCP_ESTABLISHED);
+       /* The msk maintain a ref to each subflow in the connections list */
+       WRITE_ONCE(msk->first, ssk);
+@@ -3691,7 +3691,7 @@ static int mptcp_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
+       if (IS_ERR(ssk))
+               return PTR_ERR(ssk);
+-      inet_sk_state_store(sk, TCP_SYN_SENT);
++      mptcp_set_state(sk, TCP_SYN_SENT);
+       subflow = mptcp_subflow_ctx(ssk);
+ #ifdef CONFIG_TCP_MD5SIG
+       /* no MPTCP if MD5SIG is enabled on this socket or we may run out of
+@@ -3741,7 +3741,7 @@ static int mptcp_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
+       if (unlikely(err)) {
+               /* avoid leaving a dangling token in an unconnected socket */
+               mptcp_token_destroy(msk);
+-              inet_sk_state_store(sk, TCP_CLOSE);
++              mptcp_set_state(sk, TCP_CLOSE);
+               return err;
+       }
+@@ -3831,13 +3831,13 @@ static int mptcp_listen(struct socket *sock, int backlog)
+               goto unlock;
+       }
+-      inet_sk_state_store(sk, TCP_LISTEN);
++      mptcp_set_state(sk, TCP_LISTEN);
+       sock_set_flag(sk, SOCK_RCU_FREE);
+       lock_sock(ssk);
+       err = __inet_listen_sk(ssk, backlog);
+       release_sock(ssk);
+-      inet_sk_state_store(sk, inet_sk_state_load(ssk));
++      mptcp_set_state(sk, inet_sk_state_load(ssk));
+       if (!err) {
+               sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
+@@ -3897,7 +3897,7 @@ static int mptcp_stream_accept(struct socket *sock, struct socket *newsock,
+                       __mptcp_close_ssk(newsk, msk->first,
+                                         mptcp_subflow_ctx(msk->first), 0);
+                       if (unlikely(list_is_singular(&msk->conn_list)))
+-                              inet_sk_state_store(newsk, TCP_CLOSE);
++                              mptcp_set_state(newsk, TCP_CLOSE);
+               }
+       }
+       release_sock(newsk);
+diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
+index 5155ce5b71088..56b2ac2f2f22d 100644
+--- a/net/mptcp/subflow.c
++++ b/net/mptcp/subflow.c
+@@ -427,7 +427,7 @@ void __mptcp_sync_state(struct sock *sk, int state)
+       if (!msk->rcvspace_init)
+               mptcp_rcv_space_init(msk, msk->first);
+       if (sk->sk_state == TCP_SYN_SENT) {
+-              inet_sk_state_store(sk, state);
++              mptcp_set_state(sk, state);
+               sk->sk_state_change(sk);
+       }
+ }
+-- 
+2.43.0
+
index 0cee9d2a865d8a0ce90c2eceec2ccd8b494a9af3..337fb1e69f03be1a08f65732d7256e84a5e4f01d 100644 (file)
@@ -131,3 +131,10 @@ virtio-blk-ensure-no-requests-in-virtqueues-before-d.patch
 cifs-change-tcon-status-when-need_reconnect-is-set-o.patch
 cifs-handle-cases-where-multiple-sessions-share-conn.patch
 smb3-clarify-mount-warning.patch
+mptcp-add-currestab-mib-counter-support.patch
+mptcp-use-mptcp_set_state.patch
+mptcp-fix-more-tx-path-fields-initialization.patch
+mptcp-corner-case-locking-for-rx-path-fields-initial.patch
+drm-amd-display-add-dpia-display-mode-validation-log.patch
+drm-amd-display-request-usb4-bw-for-mst-streams.patch
+drm-amd-display-fixed-integer-types-and-null-check-l.patch