]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
6.6-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 2 Oct 2024 09:56:30 +0000 (11:56 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 2 Oct 2024 09:56:30 +0000 (11:56 +0200)
added patches:
thunderbolt-add-support-for-asymmetric-link.patch
thunderbolt-change-bandwidth-reservations-to-comply-usb4-v2.patch
thunderbolt-configure-asymmetric-link-if-needed-and-bandwidth-allows.patch
thunderbolt-create-multiple-displayport-tunnels-if-there-are-more-dp-in-out-pairs.patch
thunderbolt-expose-tb_tunnel_xxx-log-macros-to-the-rest-of-the-driver.patch
thunderbolt-fix-debug-log-when-displayport-adapter-not-available-for-pairing.patch
thunderbolt-improve-displayport-tunnel-setup-process-to-be-more-robust.patch
thunderbolt-introduce-tb_for_each_upstream_port_on_path.patch
thunderbolt-introduce-tb_port_path_direction_downstream.patch
thunderbolt-introduce-tb_switch_depth.patch
thunderbolt-make-is_gen4_link-available-to-the-rest-of-the-driver.patch
thunderbolt-use-constants-for-path-weight-and-priority.patch
thunderbolt-use-tb_tunnel_dbg-where-possible-to-make-logging-more-consistent.patch
thunderbolt-use-weight-constants-in-tb_usb3_consumed_bandwidth.patch

15 files changed:
queue-6.6/series
queue-6.6/thunderbolt-add-support-for-asymmetric-link.patch [new file with mode: 0644]
queue-6.6/thunderbolt-change-bandwidth-reservations-to-comply-usb4-v2.patch [new file with mode: 0644]
queue-6.6/thunderbolt-configure-asymmetric-link-if-needed-and-bandwidth-allows.patch [new file with mode: 0644]
queue-6.6/thunderbolt-create-multiple-displayport-tunnels-if-there-are-more-dp-in-out-pairs.patch [new file with mode: 0644]
queue-6.6/thunderbolt-expose-tb_tunnel_xxx-log-macros-to-the-rest-of-the-driver.patch [new file with mode: 0644]
queue-6.6/thunderbolt-fix-debug-log-when-displayport-adapter-not-available-for-pairing.patch [new file with mode: 0644]
queue-6.6/thunderbolt-improve-displayport-tunnel-setup-process-to-be-more-robust.patch [new file with mode: 0644]
queue-6.6/thunderbolt-introduce-tb_for_each_upstream_port_on_path.patch [new file with mode: 0644]
queue-6.6/thunderbolt-introduce-tb_port_path_direction_downstream.patch [new file with mode: 0644]
queue-6.6/thunderbolt-introduce-tb_switch_depth.patch [new file with mode: 0644]
queue-6.6/thunderbolt-make-is_gen4_link-available-to-the-rest-of-the-driver.patch [new file with mode: 0644]
queue-6.6/thunderbolt-use-constants-for-path-weight-and-priority.patch [new file with mode: 0644]
queue-6.6/thunderbolt-use-tb_tunnel_dbg-where-possible-to-make-logging-more-consistent.patch [new file with mode: 0644]
queue-6.6/thunderbolt-use-weight-constants-in-tb_usb3_consumed_bandwidth.patch [new file with mode: 0644]

index 41c965c31dedcd465e8ad4e5c96ded987da49ecc..b7da06efda70bf8269fbf23d6fb929eb72f18c71 100644 (file)
@@ -499,3 +499,17 @@ btrfs-update-comment-for-struct-btrfs_inode-lock.patch
 btrfs-fix-race-setting-file-private-on-concurrent-ls.patch
 dt-bindings-spi-nxp-fspi-support-i.mx93-and-i.mx95.patch
 dt-bindings-spi-nxp-fspi-add-imx8ulp-support.patch
+thunderbolt-fix-debug-log-when-displayport-adapter-not-available-for-pairing.patch
+thunderbolt-use-tb_tunnel_dbg-where-possible-to-make-logging-more-consistent.patch
+thunderbolt-expose-tb_tunnel_xxx-log-macros-to-the-rest-of-the-driver.patch
+thunderbolt-create-multiple-displayport-tunnels-if-there-are-more-dp-in-out-pairs.patch
+thunderbolt-use-constants-for-path-weight-and-priority.patch
+thunderbolt-use-weight-constants-in-tb_usb3_consumed_bandwidth.patch
+thunderbolt-make-is_gen4_link-available-to-the-rest-of-the-driver.patch
+thunderbolt-change-bandwidth-reservations-to-comply-usb4-v2.patch
+thunderbolt-introduce-tb_port_path_direction_downstream.patch
+thunderbolt-introduce-tb_for_each_upstream_port_on_path.patch
+thunderbolt-introduce-tb_switch_depth.patch
+thunderbolt-add-support-for-asymmetric-link.patch
+thunderbolt-configure-asymmetric-link-if-needed-and-bandwidth-allows.patch
+thunderbolt-improve-displayport-tunnel-setup-process-to-be-more-robust.patch
diff --git a/queue-6.6/thunderbolt-add-support-for-asymmetric-link.patch b/queue-6.6/thunderbolt-add-support-for-asymmetric-link.patch
new file mode 100644 (file)
index 0000000..a52e46a
--- /dev/null
@@ -0,0 +1,701 @@
+From stable+bounces-78576-greg=kroah.com@vger.kernel.org Tue Oct  1 19:35:25 2024
+From: Alexandru Gagniuc <alexandru.gagniuc@hp.com>
+Date: Tue,  1 Oct 2024 17:31:07 +0000
+Subject: thunderbolt: Add support for asymmetric link
+To: gregkh@linuxfoundation.org, stable@vger.kernel.org
+Cc: qin.wan@hp.com, andreas.noever@gmail.com, michael.jamet@intel.com, mika.westerberg@linux.intel.com, YehezkelShB@gmail.com, linux-usb@vger.kernel.org, linux-kernel@vger.kernel.org, Gil Fine <gil.fine@linux.intel.com>, Alexandru Gagniuc <alexandru.gagniuc@hp.com>
+Message-ID: <20241001173109.1513-13-alexandru.gagniuc@hp.com>
+
+From: Gil Fine <gil.fine@linux.intel.com>
+
+[ Upstream commit 81af2952e60603d12415e1a6fd200f8073a2ad8b ]
+
+USB4 v2 spec defines a Gen 4 link that can operate as an aggregated
+symmetric (80/80G) or asymmetric (120/40G). When the link is asymmetric,
+the USB4 port on one side of the link operates with three TX lanes and
+one RX lane, while the USB4 port on the opposite side of the link
+operates with three RX lanes and one TX lane.
+
+Add support for the asymmetric link and provide functions that can be
+used to transition the link to asymmetric and back.
+
+Signed-off-by: Gil Fine <gil.fine@linux.intel.com>
+Co-developed-by: Mika Westerberg <mika.westerberg@linux.intel.com>
+Signed-off-by: Mika Westerberg <mika.westerberg@linux.intel.com>
+Signed-off-by: Qin Wan <qin.wan@hp.com>
+Signed-off-by: Alexandru Gagniuc <alexandru.gagniuc@hp.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/thunderbolt/switch.c  |  294 ++++++++++++++++++++++++++++++++++++------
+ drivers/thunderbolt/tb.c      |   11 +
+ drivers/thunderbolt/tb.h      |   16 +-
+ drivers/thunderbolt/tb_regs.h |    9 +
+ drivers/thunderbolt/usb4.c    |  106 +++++++++++++++
+ 5 files changed, 381 insertions(+), 55 deletions(-)
+
+--- a/drivers/thunderbolt/switch.c
++++ b/drivers/thunderbolt/switch.c
+@@ -947,6 +947,22 @@ int tb_port_get_link_generation(struct t
+       }
+ }
++static const char *width_name(enum tb_link_width width)
++{
++      switch (width) {
++      case TB_LINK_WIDTH_SINGLE:
++              return "symmetric, single lane";
++      case TB_LINK_WIDTH_DUAL:
++              return "symmetric, dual lanes";
++      case TB_LINK_WIDTH_ASYM_TX:
++              return "asymmetric, 3 transmitters, 1 receiver";
++      case TB_LINK_WIDTH_ASYM_RX:
++              return "asymmetric, 3 receivers, 1 transmitter";
++      default:
++              return "unknown";
++      }
++}
++
+ /**
+  * tb_port_get_link_width() - Get current link width
+  * @port: Port to check (USB4 or CIO)
+@@ -972,8 +988,15 @@ int tb_port_get_link_width(struct tb_por
+               LANE_ADP_CS_1_CURRENT_WIDTH_SHIFT;
+ }
+-static bool tb_port_is_width_supported(struct tb_port *port,
+-                                     unsigned int width_mask)
++/**
++ * tb_port_width_supported() - Is the given link width supported
++ * @port: Port to check
++ * @width: Widths to check (bitmask)
++ *
++ * Can be called to any lane adapter. Checks if given @width is
++ * supported by the hardware and returns %true if it is.
++ */
++bool tb_port_width_supported(struct tb_port *port, unsigned int width)
+ {
+       u32 phy, widths;
+       int ret;
+@@ -981,15 +1004,23 @@ static bool tb_port_is_width_supported(s
+       if (!port->cap_phy)
+               return false;
++      if (width & (TB_LINK_WIDTH_ASYM_TX | TB_LINK_WIDTH_ASYM_RX)) {
++              if (tb_port_get_link_generation(port) < 4 ||
++                  !usb4_port_asym_supported(port))
++                      return false;
++      }
++
+       ret = tb_port_read(port, &phy, TB_CFG_PORT,
+                          port->cap_phy + LANE_ADP_CS_0, 1);
+       if (ret)
+               return false;
+-      widths = (phy & LANE_ADP_CS_0_SUPPORTED_WIDTH_MASK) >>
+-              LANE_ADP_CS_0_SUPPORTED_WIDTH_SHIFT;
+-
+-      return widths & width_mask;
++      /*
++       * The field encoding is the same as &enum tb_link_width (which is
++       * passed to @width).
++       */
++      widths = FIELD_GET(LANE_ADP_CS_0_SUPPORTED_WIDTH_MASK, phy);
++      return widths & width;
+ }
+ /**
+@@ -1024,10 +1055,18 @@ int tb_port_set_link_width(struct tb_por
+               val |= LANE_ADP_CS_1_TARGET_WIDTH_SINGLE <<
+                       LANE_ADP_CS_1_TARGET_WIDTH_SHIFT;
+               break;
++
+       case TB_LINK_WIDTH_DUAL:
++              if (tb_port_get_link_generation(port) >= 4)
++                      return usb4_port_asym_set_link_width(port, width);
+               val |= LANE_ADP_CS_1_TARGET_WIDTH_DUAL <<
+                       LANE_ADP_CS_1_TARGET_WIDTH_SHIFT;
+               break;
++
++      case TB_LINK_WIDTH_ASYM_TX:
++      case TB_LINK_WIDTH_ASYM_RX:
++              return usb4_port_asym_set_link_width(port, width);
++
+       default:
+               return -EINVAL;
+       }
+@@ -1152,7 +1191,7 @@ void tb_port_lane_bonding_disable(struct
+ /**
+  * tb_port_wait_for_link_width() - Wait until link reaches specific width
+  * @port: Port to wait for
+- * @width_mask: Expected link width mask
++ * @width: Expected link width (bitmask)
+  * @timeout_msec: Timeout in ms how long to wait
+  *
+  * Should be used after both ends of the link have been bonded (or
+@@ -1161,14 +1200,14 @@ void tb_port_lane_bonding_disable(struct
+  * within the given timeout, %0 if it did. Can be passed a mask of
+  * expected widths and succeeds if any of the widths is reached.
+  */
+-int tb_port_wait_for_link_width(struct tb_port *port, unsigned int width_mask,
++int tb_port_wait_for_link_width(struct tb_port *port, unsigned int width,
+                               int timeout_msec)
+ {
+       ktime_t timeout = ktime_add_ms(ktime_get(), timeout_msec);
+       int ret;
+       /* Gen 4 link does not support single lane */
+-      if ((width_mask & TB_LINK_WIDTH_SINGLE) &&
++      if ((width & TB_LINK_WIDTH_SINGLE) &&
+           tb_port_get_link_generation(port) >= 4)
+               return -EOPNOTSUPP;
+@@ -1182,7 +1221,7 @@ int tb_port_wait_for_link_width(struct t
+                        */
+                       if (ret != -EACCES)
+                               return ret;
+-              } else if (ret & width_mask) {
++              } else if (ret & width) {
+                       return 0;
+               }
+@@ -2821,6 +2860,38 @@ static int tb_switch_update_link_attribu
+       return 0;
+ }
++/* Must be called after tb_switch_update_link_attributes() */
++static void tb_switch_link_init(struct tb_switch *sw)
++{
++      struct tb_port *up, *down;
++      bool bonded;
++
++      if (!tb_route(sw) || tb_switch_is_icm(sw))
++              return;
++
++      tb_sw_dbg(sw, "current link speed %u.0 Gb/s\n", sw->link_speed);
++      tb_sw_dbg(sw, "current link width %s\n", width_name(sw->link_width));
++
++      bonded = sw->link_width >= TB_LINK_WIDTH_DUAL;
++
++      /*
++       * Gen 4 links come up as bonded so update the port structures
++       * accordingly.
++       */
++      up = tb_upstream_port(sw);
++      down = tb_switch_downstream_port(sw);
++
++      up->bonded = bonded;
++      if (up->dual_link_port)
++              up->dual_link_port->bonded = bonded;
++      tb_port_update_credits(up);
++
++      down->bonded = bonded;
++      if (down->dual_link_port)
++              down->dual_link_port->bonded = bonded;
++      tb_port_update_credits(down);
++}
++
+ /**
+  * tb_switch_lane_bonding_enable() - Enable lane bonding
+  * @sw: Switch to enable lane bonding
+@@ -2829,24 +2900,20 @@ static int tb_switch_update_link_attribu
+  * switch. If conditions are correct and both switches support the feature,
+  * lanes are bonded. It is safe to call this to any switch.
+  */
+-int tb_switch_lane_bonding_enable(struct tb_switch *sw)
++static int tb_switch_lane_bonding_enable(struct tb_switch *sw)
+ {
+       struct tb_port *up, *down;
+-      u64 route = tb_route(sw);
+-      unsigned int width_mask;
++      unsigned int width;
+       int ret;
+-      if (!route)
+-              return 0;
+-
+       if (!tb_switch_lane_bonding_possible(sw))
+               return 0;
+       up = tb_upstream_port(sw);
+       down = tb_switch_downstream_port(sw);
+-      if (!tb_port_is_width_supported(up, TB_LINK_WIDTH_DUAL) ||
+-          !tb_port_is_width_supported(down, TB_LINK_WIDTH_DUAL))
++      if (!tb_port_width_supported(up, TB_LINK_WIDTH_DUAL) ||
++          !tb_port_width_supported(down, TB_LINK_WIDTH_DUAL))
+               return 0;
+       /*
+@@ -2870,21 +2937,10 @@ int tb_switch_lane_bonding_enable(struct
+       }
+       /* Any of the widths are all bonded */
+-      width_mask = TB_LINK_WIDTH_DUAL | TB_LINK_WIDTH_ASYM_TX |
+-                   TB_LINK_WIDTH_ASYM_RX;
++      width = TB_LINK_WIDTH_DUAL | TB_LINK_WIDTH_ASYM_TX |
++              TB_LINK_WIDTH_ASYM_RX;
+-      ret = tb_port_wait_for_link_width(down, width_mask, 100);
+-      if (ret) {
+-              tb_port_warn(down, "timeout enabling lane bonding\n");
+-              return ret;
+-      }
+-
+-      tb_port_update_credits(down);
+-      tb_port_update_credits(up);
+-      tb_switch_update_link_attributes(sw);
+-
+-      tb_sw_dbg(sw, "lane bonding enabled\n");
+-      return ret;
++      return tb_port_wait_for_link_width(down, width, 100);
+ }
+ /**
+@@ -2894,20 +2950,27 @@ int tb_switch_lane_bonding_enable(struct
+  * Disables lane bonding between @sw and parent. This can be called even
+  * if lanes were not bonded originally.
+  */
+-void tb_switch_lane_bonding_disable(struct tb_switch *sw)
++static int tb_switch_lane_bonding_disable(struct tb_switch *sw)
+ {
+       struct tb_port *up, *down;
+       int ret;
+-      if (!tb_route(sw))
+-              return;
+-
+       up = tb_upstream_port(sw);
+       if (!up->bonded)
+-              return;
++              return 0;
+-      down = tb_switch_downstream_port(sw);
++      /*
++       * If the link is Gen 4 there is no way to switch the link to
++       * two single lane links so avoid that here. Also don't bother
++       * if the link is not up anymore (sw is unplugged).
++       */
++      ret = tb_port_get_link_generation(up);
++      if (ret < 0)
++              return ret;
++      if (ret >= 4)
++              return -EOPNOTSUPP;
++      down = tb_switch_downstream_port(sw);
+       tb_port_lane_bonding_disable(up);
+       tb_port_lane_bonding_disable(down);
+@@ -2915,15 +2978,160 @@ void tb_switch_lane_bonding_disable(stru
+        * It is fine if we get other errors as the router might have
+        * been unplugged.
+        */
+-      ret = tb_port_wait_for_link_width(down, TB_LINK_WIDTH_SINGLE, 100);
+-      if (ret == -ETIMEDOUT)
+-              tb_sw_warn(sw, "timeout disabling lane bonding\n");
++      return tb_port_wait_for_link_width(down, TB_LINK_WIDTH_SINGLE, 100);
++}
++
++static int tb_switch_asym_enable(struct tb_switch *sw, enum tb_link_width width)
++{
++      struct tb_port *up, *down, *port;
++      enum tb_link_width down_width;
++      int ret;
++
++      up = tb_upstream_port(sw);
++      down = tb_switch_downstream_port(sw);
++
++      if (width == TB_LINK_WIDTH_ASYM_TX) {
++              down_width = TB_LINK_WIDTH_ASYM_RX;
++              port = down;
++      } else {
++              down_width = TB_LINK_WIDTH_ASYM_TX;
++              port = up;
++      }
++
++      ret = tb_port_set_link_width(up, width);
++      if (ret)
++              return ret;
++
++      ret = tb_port_set_link_width(down, down_width);
++      if (ret)
++              return ret;
++
++      /*
++       * Initiate the change in the router that one of its TX lanes is
++       * changing to RX but do so only if there is an actual change.
++       */
++      if (sw->link_width != width) {
++              ret = usb4_port_asym_start(port);
++              if (ret)
++                      return ret;
++
++              ret = tb_port_wait_for_link_width(up, width, 100);
++              if (ret)
++                      return ret;
++      }
++
++      sw->link_width = width;
++      return 0;
++}
++
++static int tb_switch_asym_disable(struct tb_switch *sw)
++{
++      struct tb_port *up, *down;
++      int ret;
++
++      up = tb_upstream_port(sw);
++      down = tb_switch_downstream_port(sw);
++
++      ret = tb_port_set_link_width(up, TB_LINK_WIDTH_DUAL);
++      if (ret)
++              return ret;
++
++      ret = tb_port_set_link_width(down, TB_LINK_WIDTH_DUAL);
++      if (ret)
++              return ret;
++
++      /*
++       * Initiate the change in the router that has three TX lanes and
++       * is changing one of its TX lanes to RX but only if there is a
++       * change in the link width.
++       */
++      if (sw->link_width > TB_LINK_WIDTH_DUAL) {
++              if (sw->link_width == TB_LINK_WIDTH_ASYM_TX)
++                      ret = usb4_port_asym_start(up);
++              else
++                      ret = usb4_port_asym_start(down);
++              if (ret)
++                      return ret;
++
++              ret = tb_port_wait_for_link_width(up, TB_LINK_WIDTH_DUAL, 100);
++              if (ret)
++                      return ret;
++      }
++
++      sw->link_width = TB_LINK_WIDTH_DUAL;
++      return 0;
++}
++
++/**
++ * tb_switch_set_link_width() - Configure router link width
++ * @sw: Router to configure
++ * @width: The new link width
++ *
++ * Set device router link width to @width from router upstream port
++ * perspective. Supports also asymmetric links if the routers boths side
++ * of the link supports it.
++ *
++ * Does nothing for host router.
++ *
++ * Returns %0 in case of success, negative errno otherwise.
++ */
++int tb_switch_set_link_width(struct tb_switch *sw, enum tb_link_width width)
++{
++      struct tb_port *up, *down;
++      int ret = 0;
++
++      if (!tb_route(sw))
++              return 0;
++
++      up = tb_upstream_port(sw);
++      down = tb_switch_downstream_port(sw);
++
++      switch (width) {
++      case TB_LINK_WIDTH_SINGLE:
++              ret = tb_switch_lane_bonding_disable(sw);
++              break;
++
++      case TB_LINK_WIDTH_DUAL:
++              if (sw->link_width == TB_LINK_WIDTH_ASYM_TX ||
++                  sw->link_width == TB_LINK_WIDTH_ASYM_RX) {
++                      ret = tb_switch_asym_disable(sw);
++                      if (ret)
++                              break;
++              }
++              ret = tb_switch_lane_bonding_enable(sw);
++              break;
++
++      case TB_LINK_WIDTH_ASYM_TX:
++      case TB_LINK_WIDTH_ASYM_RX:
++              ret = tb_switch_asym_enable(sw, width);
++              break;
++      }
++
++      switch (ret) {
++      case 0:
++              break;
++
++      case -ETIMEDOUT:
++              tb_sw_warn(sw, "timeout changing link width\n");
++              return ret;
++
++      case -ENOTCONN:
++      case -EOPNOTSUPP:
++      case -ENODEV:
++              return ret;
++
++      default:
++              tb_sw_dbg(sw, "failed to change link width: %d\n", ret);
++              return ret;
++      }
+       tb_port_update_credits(down);
+       tb_port_update_credits(up);
++
+       tb_switch_update_link_attributes(sw);
+-      tb_sw_dbg(sw, "lane bonding disabled\n");
++      tb_sw_dbg(sw, "link width set to %s\n", width_name(width));
++      return ret;
+ }
+ /**
+@@ -3090,6 +3298,8 @@ int tb_switch_add(struct tb_switch *sw)
+               if (ret)
+                       return ret;
++              tb_switch_link_init(sw);
++
+               ret = tb_switch_clx_init(sw);
+               if (ret)
+                       return ret;
+--- a/drivers/thunderbolt/tb.c
++++ b/drivers/thunderbolt/tb.c
+@@ -985,7 +985,7 @@ static void tb_scan_port(struct tb_port
+       }
+       /* Enable lane bonding if supported */
+-      tb_switch_lane_bonding_enable(sw);
++      tb_switch_set_link_width(sw, TB_LINK_WIDTH_DUAL);
+       /* Set the link configured */
+       tb_switch_configure_link(sw);
+       /*
+@@ -1103,7 +1103,8 @@ static void tb_free_unplugged_children(s
+                       tb_retimer_remove_all(port);
+                       tb_remove_dp_resources(port->remote->sw);
+                       tb_switch_unconfigure_link(port->remote->sw);
+-                      tb_switch_lane_bonding_disable(port->remote->sw);
++                      tb_switch_set_link_width(port->remote->sw,
++                                               TB_LINK_WIDTH_SINGLE);
+                       tb_switch_remove(port->remote->sw);
+                       port->remote = NULL;
+                       if (port->dual_link_port)
+@@ -1766,7 +1767,8 @@ static void tb_handle_hotplug(struct wor
+                       tb_remove_dp_resources(port->remote->sw);
+                       tb_switch_tmu_disable(port->remote->sw);
+                       tb_switch_unconfigure_link(port->remote->sw);
+-                      tb_switch_lane_bonding_disable(port->remote->sw);
++                      tb_switch_set_link_width(port->remote->sw,
++                                               TB_LINK_WIDTH_SINGLE);
+                       tb_switch_remove(port->remote->sw);
+                       port->remote = NULL;
+                       if (port->dual_link_port)
+@@ -2258,7 +2260,8 @@ static void tb_restore_children(struct t
+                       continue;
+               if (port->remote) {
+-                      tb_switch_lane_bonding_enable(port->remote->sw);
++                      tb_switch_set_link_width(port->remote->sw,
++                                               port->remote->sw->link_width);
+                       tb_switch_configure_link(port->remote->sw);
+                       tb_restore_children(port->remote->sw);
+--- a/drivers/thunderbolt/tb.h
++++ b/drivers/thunderbolt/tb.h
+@@ -164,11 +164,6 @@ struct tb_switch_tmu {
+  * switches) you need to have domain lock held.
+  *
+  * In USB4 terminology this structure represents a router.
+- *
+- * Note @link_width is not the same as whether link is bonded or not.
+- * For Gen 4 links the link is also bonded when it is asymmetric. The
+- * correct way to find out whether the link is bonded or not is to look
+- * @bonded field of the upstream port.
+  */
+ struct tb_switch {
+       struct device dev;
+@@ -969,8 +964,7 @@ static inline bool tb_switch_is_icm(cons
+       return !sw->config.enabled;
+ }
+-int tb_switch_lane_bonding_enable(struct tb_switch *sw);
+-void tb_switch_lane_bonding_disable(struct tb_switch *sw);
++int tb_switch_set_link_width(struct tb_switch *sw, enum tb_link_width width);
+ int tb_switch_configure_link(struct tb_switch *sw);
+ void tb_switch_unconfigure_link(struct tb_switch *sw);
+@@ -1103,10 +1097,11 @@ static inline bool tb_port_use_credit_al
+ int tb_port_get_link_speed(struct tb_port *port);
+ int tb_port_get_link_generation(struct tb_port *port);
+ int tb_port_get_link_width(struct tb_port *port);
++bool tb_port_width_supported(struct tb_port *port, unsigned int width);
+ int tb_port_set_link_width(struct tb_port *port, enum tb_link_width width);
+ int tb_port_lane_bonding_enable(struct tb_port *port);
+ void tb_port_lane_bonding_disable(struct tb_port *port);
+-int tb_port_wait_for_link_width(struct tb_port *port, unsigned int width_mask,
++int tb_port_wait_for_link_width(struct tb_port *port, unsigned int width,
+                               int timeout_msec);
+ int tb_port_update_credits(struct tb_port *port);
+@@ -1304,6 +1299,11 @@ int usb4_port_router_online(struct tb_po
+ int usb4_port_enumerate_retimers(struct tb_port *port);
+ bool usb4_port_clx_supported(struct tb_port *port);
+ int usb4_port_margining_caps(struct tb_port *port, u32 *caps);
++
++bool usb4_port_asym_supported(struct tb_port *port);
++int usb4_port_asym_set_link_width(struct tb_port *port, enum tb_link_width width);
++int usb4_port_asym_start(struct tb_port *port);
++
+ int usb4_port_hw_margin(struct tb_port *port, unsigned int lanes,
+                       unsigned int ber_level, bool timing, bool right_high,
+                       u32 *results);
+--- a/drivers/thunderbolt/tb_regs.h
++++ b/drivers/thunderbolt/tb_regs.h
+@@ -348,10 +348,14 @@ struct tb_regs_port_header {
+ #define LANE_ADP_CS_1                         0x01
+ #define LANE_ADP_CS_1_TARGET_SPEED_MASK               GENMASK(3, 0)
+ #define LANE_ADP_CS_1_TARGET_SPEED_GEN3               0xc
+-#define LANE_ADP_CS_1_TARGET_WIDTH_MASK               GENMASK(9, 4)
++#define LANE_ADP_CS_1_TARGET_WIDTH_MASK               GENMASK(5, 4)
+ #define LANE_ADP_CS_1_TARGET_WIDTH_SHIFT      4
+ #define LANE_ADP_CS_1_TARGET_WIDTH_SINGLE     0x1
+ #define LANE_ADP_CS_1_TARGET_WIDTH_DUAL               0x3
++#define LANE_ADP_CS_1_TARGET_WIDTH_ASYM_MASK  GENMASK(7, 6)
++#define LANE_ADP_CS_1_TARGET_WIDTH_ASYM_TX    0x1
++#define LANE_ADP_CS_1_TARGET_WIDTH_ASYM_RX    0x2
++#define LANE_ADP_CS_1_TARGET_WIDTH_ASYM_DUAL  0x0
+ #define LANE_ADP_CS_1_CL0S_ENABLE             BIT(10)
+ #define LANE_ADP_CS_1_CL1_ENABLE              BIT(11)
+ #define LANE_ADP_CS_1_CL2_ENABLE              BIT(12)
+@@ -384,6 +388,8 @@ struct tb_regs_port_header {
+ #define PORT_CS_18_WOCS                               BIT(16)
+ #define PORT_CS_18_WODS                               BIT(17)
+ #define PORT_CS_18_WOU4S                      BIT(18)
++#define PORT_CS_18_CSA                                BIT(22)
++#define PORT_CS_18_TIP                                BIT(24)
+ #define PORT_CS_19                            0x13
+ #define PORT_CS_19_DPR                                BIT(0)
+ #define PORT_CS_19_PC                         BIT(3)
+@@ -391,6 +397,7 @@ struct tb_regs_port_header {
+ #define PORT_CS_19_WOC                                BIT(16)
+ #define PORT_CS_19_WOD                                BIT(17)
+ #define PORT_CS_19_WOU4                               BIT(18)
++#define PORT_CS_19_START_ASYM                 BIT(24)
+ /* Display Port adapter registers */
+ #define ADP_DP_CS_0                           0x00
+--- a/drivers/thunderbolt/usb4.c
++++ b/drivers/thunderbolt/usb4.c
+@@ -1495,6 +1495,112 @@ bool usb4_port_clx_supported(struct tb_p
+ }
+ /**
++ * usb4_port_asym_supported() - If the port supports asymmetric link
++ * @port: USB4 port
++ *
++ * Checks if the port and the cable supports asymmetric link and returns
++ * %true in that case.
++ */
++bool usb4_port_asym_supported(struct tb_port *port)
++{
++      u32 val;
++
++      if (!port->cap_usb4)
++              return false;
++
++      if (tb_port_read(port, &val, TB_CFG_PORT, port->cap_usb4 + PORT_CS_18, 1))
++              return false;
++
++      return !!(val & PORT_CS_18_CSA);
++}
++
++/**
++ * usb4_port_asym_set_link_width() - Set link width to asymmetric or symmetric
++ * @port: USB4 port
++ * @width: Asymmetric width to configure
++ *
++ * Sets USB4 port link width to @width. Can be called for widths where
++ * usb4_port_asym_width_supported() returned @true.
++ */
++int usb4_port_asym_set_link_width(struct tb_port *port, enum tb_link_width width)
++{
++      u32 val;
++      int ret;
++
++      if (!port->cap_phy)
++              return -EINVAL;
++
++      ret = tb_port_read(port, &val, TB_CFG_PORT,
++                         port->cap_phy + LANE_ADP_CS_1, 1);
++      if (ret)
++              return ret;
++
++      val &= ~LANE_ADP_CS_1_TARGET_WIDTH_ASYM_MASK;
++      switch (width) {
++      case TB_LINK_WIDTH_DUAL:
++              val |= FIELD_PREP(LANE_ADP_CS_1_TARGET_WIDTH_ASYM_MASK,
++                                LANE_ADP_CS_1_TARGET_WIDTH_ASYM_DUAL);
++              break;
++      case TB_LINK_WIDTH_ASYM_TX:
++              val |= FIELD_PREP(LANE_ADP_CS_1_TARGET_WIDTH_ASYM_MASK,
++                                LANE_ADP_CS_1_TARGET_WIDTH_ASYM_TX);
++              break;
++      case TB_LINK_WIDTH_ASYM_RX:
++              val |= FIELD_PREP(LANE_ADP_CS_1_TARGET_WIDTH_ASYM_MASK,
++                                LANE_ADP_CS_1_TARGET_WIDTH_ASYM_RX);
++              break;
++      default:
++              return -EINVAL;
++      }
++
++      return tb_port_write(port, &val, TB_CFG_PORT,
++                           port->cap_phy + LANE_ADP_CS_1, 1);
++}
++
++/**
++ * usb4_port_asym_start() - Start symmetry change and wait for completion
++ * @port: USB4 port
++ *
++ * Start symmetry change of the link to asymmetric or symmetric
++ * (according to what was previously set in tb_port_set_link_width().
++ * Wait for completion of the change.
++ *
++ * Returns %0 in case of success, %-ETIMEDOUT if case of timeout or
++ * a negative errno in case of a failure.
++ */
++int usb4_port_asym_start(struct tb_port *port)
++{
++      int ret;
++      u32 val;
++
++      ret = tb_port_read(port, &val, TB_CFG_PORT,
++                         port->cap_usb4 + PORT_CS_19, 1);
++      if (ret)
++              return ret;
++
++      val &= ~PORT_CS_19_START_ASYM;
++      val |= FIELD_PREP(PORT_CS_19_START_ASYM, 1);
++
++      ret = tb_port_write(port, &val, TB_CFG_PORT,
++                          port->cap_usb4 + PORT_CS_19, 1);
++      if (ret)
++              return ret;
++
++      /*
++       * Wait for PORT_CS_19_START_ASYM to be 0. This means the USB4
++       * port started the symmetry transition.
++       */
++      ret = usb4_port_wait_for_bit(port, port->cap_usb4 + PORT_CS_19,
++                                   PORT_CS_19_START_ASYM, 0, 1000);
++      if (ret)
++              return ret;
++
++      /* Then wait for the transtion to be completed */
++      return usb4_port_wait_for_bit(port, port->cap_usb4 + PORT_CS_18,
++                                    PORT_CS_18_TIP, 0, 5000);
++}
++
++/**
+  * usb4_port_margining_caps() - Read USB4 port marginig capabilities
+  * @port: USB4 port
+  * @caps: Array with at least two elements to hold the results
diff --git a/queue-6.6/thunderbolt-change-bandwidth-reservations-to-comply-usb4-v2.patch b/queue-6.6/thunderbolt-change-bandwidth-reservations-to-comply-usb4-v2.patch
new file mode 100644 (file)
index 0000000..1d71c9d
--- /dev/null
@@ -0,0 +1,183 @@
+From linux-usb+bounces-15625-greg=kroah.com@vger.kernel.org Tue Oct  1 19:34:14 2024
+From: Alexandru Gagniuc <alexandru.gagniuc@hp.com>
+Date: Tue,  1 Oct 2024 17:31:03 +0000
+Subject: thunderbolt: Change bandwidth reservations to comply USB4 v2
+To: gregkh@linuxfoundation.org, stable@vger.kernel.org
+Cc: qin.wan@hp.com, andreas.noever@gmail.com, michael.jamet@intel.com, mika.westerberg@linux.intel.com, YehezkelShB@gmail.com, linux-usb@vger.kernel.org, linux-kernel@vger.kernel.org, Gil Fine <gil.fine@linux.intel.com>, Alexandru Gagniuc <alexandru.gagniuc@hp.com>
+Message-ID: <20241001173109.1513-9-alexandru.gagniuc@hp.com>
+
+From: Gil Fine <gil.fine@linux.intel.com>
+
+[ Upstream commit 582e70b0d3a412d15389a3c9c07a44791b311715 ]
+
+USB4 v2 Connection Manager guide (section 6.1.2.3) suggests to reserve
+bandwidth in a sligthly different manner. It suggests to keep minimum of
+1500 Mb/s for each path that carry a bulk traffic. Here we change the
+bandwidth reservations to comply to the above for USB 3.x and PCIe
+protocols over Gen 4 link, taking weights into account (that's 1500 Mb/s
+for PCIe and 3000 Mb/s for USB 3.x).
+
+For Gen 3 and below we use the existing reservation.
+
+Signed-off-by: Gil Fine <gil.fine@linux.intel.com>
+Signed-off-by: Mika Westerberg <mika.westerberg@linux.intel.com>
+Signed-off-by: Qin Wan <qin.wan@hp.com>
+Signed-off-by: Alexandru Gagniuc <alexandru.gagniuc@hp.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/thunderbolt/tb.c     |   11 +++++++
+ drivers/thunderbolt/tunnel.c |   66 +++++++++++++++++++++++++++++++++++++++++--
+ drivers/thunderbolt/tunnel.h |    2 +
+ 3 files changed, 76 insertions(+), 3 deletions(-)
+
+--- a/drivers/thunderbolt/tb.c
++++ b/drivers/thunderbolt/tb.c
+@@ -602,6 +602,7 @@ static int tb_available_bandwidth(struct
+       /* Find the minimum available bandwidth over all links */
+       tb_for_each_port_on_path(src_port, dst_port, port) {
+               int link_speed, link_width, up_bw, down_bw;
++              int pci_reserved_up, pci_reserved_down;
+               if (!tb_port_is_null(port))
+                       continue;
+@@ -695,6 +696,16 @@ static int tb_available_bandwidth(struct
+               up_bw -= usb3_consumed_up;
+               down_bw -= usb3_consumed_down;
++              /*
++               * If there is anything reserved for PCIe bulk traffic
++               * take it into account here too.
++               */
++              if (tb_tunnel_reserved_pci(port, &pci_reserved_up,
++                                         &pci_reserved_down)) {
++                      up_bw -= pci_reserved_up;
++                      down_bw -= pci_reserved_down;
++              }
++
+               if (up_bw < *available_up)
+                       *available_up = up_bw;
+               if (down_bw < *available_down)
+--- a/drivers/thunderbolt/tunnel.c
++++ b/drivers/thunderbolt/tunnel.c
+@@ -31,7 +31,7 @@
+ #define TB_USB3_PATH_UP                       1
+ #define TB_USB3_PRIORITY              3
+-#define TB_USB3_WEIGHT                        3
++#define TB_USB3_WEIGHT                        2
+ /* DP adapters use HopID 8 for AUX and 9 for Video */
+ #define TB_DP_AUX_TX_HOPID            8
+@@ -61,6 +61,15 @@
+ #define TB_DMA_PRIORITY                       5
+ #define TB_DMA_WEIGHT                 1
++/*
++ * Reserve additional bandwidth for USB 3.x and PCIe bulk traffic
++ * according to USB4 v2 Connection Manager guide. This ends up reserving
++ * 1500 Mb/s for PCIe and 3000 Mb/s for USB 3.x taking weights into
++ * account.
++ */
++#define USB4_V2_PCI_MIN_BANDWIDTH     (1500 * TB_PCI_WEIGHT)
++#define USB4_V2_USB3_MIN_BANDWIDTH    (1500 * TB_USB3_WEIGHT)
++
+ static unsigned int dma_credits = TB_DMA_CREDITS;
+ module_param(dma_credits, uint, 0444);
+ MODULE_PARM_DESC(dma_credits, "specify custom credits for DMA tunnels (default: "
+@@ -150,11 +159,11 @@ static struct tb_tunnel *tb_tunnel_alloc
+ static int tb_pci_set_ext_encapsulation(struct tb_tunnel *tunnel, bool enable)
+ {
++      struct tb_port *port = tb_upstream_port(tunnel->dst_port->sw);
+       int ret;
+       /* Only supported of both routers are at least USB4 v2 */
+-      if (usb4_switch_version(tunnel->src_port->sw) < 2 ||
+-          usb4_switch_version(tunnel->dst_port->sw) < 2)
++      if (tb_port_get_link_generation(port) < 4)
+               return 0;
+       ret = usb4_pci_port_set_ext_encapsulation(tunnel->src_port, enable);
+@@ -370,6 +379,51 @@ err_free:
+       return NULL;
+ }
++/**
++ * tb_tunnel_reserved_pci() - Amount of bandwidth to reserve for PCIe
++ * @port: Lane 0 adapter
++ * @reserved_up: Upstream bandwidth in Mb/s to reserve
++ * @reserved_down: Downstream bandwidth in Mb/s to reserve
++ *
++ * Can be called to any connected lane 0 adapter to find out how much
++ * bandwidth needs to be left in reserve for possible PCIe bulk traffic.
++ * Returns true if there is something to be reserved and writes the
++ * amount to @reserved_down/@reserved_up. Otherwise returns false and
++ * does not touch the parameters.
++ */
++bool tb_tunnel_reserved_pci(struct tb_port *port, int *reserved_up,
++                          int *reserved_down)
++{
++      if (WARN_ON_ONCE(!port->remote))
++              return false;
++
++      if (!tb_acpi_may_tunnel_pcie())
++              return false;
++
++      if (tb_port_get_link_generation(port) < 4)
++              return false;
++
++      /* Must have PCIe adapters */
++      if (tb_is_upstream_port(port)) {
++              if (!tb_switch_find_port(port->sw, TB_TYPE_PCIE_UP))
++                      return false;
++              if (!tb_switch_find_port(port->remote->sw, TB_TYPE_PCIE_DOWN))
++                      return false;
++      } else {
++              if (!tb_switch_find_port(port->sw, TB_TYPE_PCIE_DOWN))
++                      return false;
++              if (!tb_switch_find_port(port->remote->sw, TB_TYPE_PCIE_UP))
++                      return false;
++      }
++
++      *reserved_up = USB4_V2_PCI_MIN_BANDWIDTH;
++      *reserved_down = USB4_V2_PCI_MIN_BANDWIDTH;
++
++      tb_port_dbg(port, "reserving %u/%u Mb/s for PCIe\n", *reserved_up,
++                  *reserved_down);
++      return true;
++}
++
+ static bool tb_dp_is_usb4(const struct tb_switch *sw)
+ {
+       /* Titan Ridge DP adapters need the same treatment as USB4 */
+@@ -1747,6 +1801,7 @@ static int tb_usb3_activate(struct tb_tu
+ static int tb_usb3_consumed_bandwidth(struct tb_tunnel *tunnel,
+               int *consumed_up, int *consumed_down)
+ {
++      struct tb_port *port = tb_upstream_port(tunnel->dst_port->sw);
+       int pcie_weight = tb_acpi_may_tunnel_pcie() ? TB_PCI_WEIGHT : 0;
+       /*
+@@ -1758,6 +1813,11 @@ static int tb_usb3_consumed_bandwidth(st
+       *consumed_down = tunnel->allocated_down *
+               (TB_USB3_WEIGHT + pcie_weight) / TB_USB3_WEIGHT;
++      if (tb_port_get_link_generation(port) >= 4) {
++              *consumed_up = max(*consumed_up, USB4_V2_USB3_MIN_BANDWIDTH);
++              *consumed_down = max(*consumed_down, USB4_V2_USB3_MIN_BANDWIDTH);
++      }
++
+       return 0;
+ }
+--- a/drivers/thunderbolt/tunnel.h
++++ b/drivers/thunderbolt/tunnel.h
+@@ -80,6 +80,8 @@ struct tb_tunnel *tb_tunnel_discover_pci
+                                        bool alloc_hopid);
+ struct tb_tunnel *tb_tunnel_alloc_pci(struct tb *tb, struct tb_port *up,
+                                     struct tb_port *down);
++bool tb_tunnel_reserved_pci(struct tb_port *port, int *reserved_up,
++                          int *reserved_down);
+ struct tb_tunnel *tb_tunnel_discover_dp(struct tb *tb, struct tb_port *in,
+                                       bool alloc_hopid);
+ struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in,
diff --git a/queue-6.6/thunderbolt-configure-asymmetric-link-if-needed-and-bandwidth-allows.patch b/queue-6.6/thunderbolt-configure-asymmetric-link-if-needed-and-bandwidth-allows.patch
new file mode 100644 (file)
index 0000000..c5fd536
--- /dev/null
@@ -0,0 +1,882 @@
+From stable+bounces-78577-greg=kroah.com@vger.kernel.org Tue Oct  1 19:35:36 2024
+From: Alexandru Gagniuc <alexandru.gagniuc@hp.com>
+Date: Tue,  1 Oct 2024 17:31:08 +0000
+Subject: thunderbolt: Configure asymmetric link if needed and bandwidth allows
+To: gregkh@linuxfoundation.org, stable@vger.kernel.org
+Cc: qin.wan@hp.com, andreas.noever@gmail.com, michael.jamet@intel.com, mika.westerberg@linux.intel.com, YehezkelShB@gmail.com, linux-usb@vger.kernel.org, linux-kernel@vger.kernel.org, Gil Fine <gil.fine@linux.intel.com>, Alexandru Gagniuc <alexandru.gagniuc@hp.com>
+Message-ID: <20241001173109.1513-14-alexandru.gagniuc@hp.com>
+
+From: Gil Fine <gil.fine@linux.intel.com>
+
+[ Upstream commit 3e36528c1127b20492ffaea53930bcc3df46a718 ]
+
+USB4 v2 spec defines a Gen 4 link that can operate as an asymmetric
+120/40G. When the link is asymmetric, the USB4 port on one side of the
+link operates with three TX lanes and one RX lane, while the USB4 port
+on the opposite side of the link operates with three RX lanes and one TX
+lane. Using asymmetric link we can get much more bandwidth from one
+direction and that allows us to support the new Ultra High Bit Rate
+DisplayPort modes (that consume up to 77.37 Gb/s).
+
+Add the basic logic for changing Gen 4 links to asymmetric and back
+following the below rules:
+
+  1) The default threshold is 45 Gb/s (tunable by asym_threshold)
+  2) When DisplayPort tunnel is established, or when there is bandwidth
+     request through bandwidth allocation mode, the links can be
+     transitioned to asymmetric or symmetric (depending on the
+     required bandwidth).
+  3) Only DisplayPort bandwidth on a link, is taken into account when
+     deciding whether a link is transitioned to asymmetric or symmetric
+  4) If bandwidth on a link is >= asym_threshold transition the link to
+     asymmetric
+  5) If bandwidth on a link < asym_threshold transition the link to
+     symmetric (unless the bandwidth request is above currently
+     allocated on a tunnel).
+  6) If a USB4 v2 device router with symmetric link is connected,
+     transition all the links above it to symmetric if the bandwidth
+     allows.
+
+Signed-off-by: Gil Fine <gil.fine@linux.intel.com>
+Co-developed-by: Mika Westerberg <mika.westerberg@linux.intel.com>
+Signed-off-by: Mika Westerberg <mika.westerberg@linux.intel.com>
+Signed-off-by: Qin Wan <qin.wan@hp.com>
+Signed-off-by: Alexandru Gagniuc <alexandru.gagniuc@hp.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/thunderbolt/tb.c |  679 ++++++++++++++++++++++++++++++++++++++---------
+ 1 file changed, 557 insertions(+), 122 deletions(-)
+
+--- a/drivers/thunderbolt/tb.c
++++ b/drivers/thunderbolt/tb.c
+@@ -16,8 +16,31 @@
+ #include "tb_regs.h"
+ #include "tunnel.h"
+-#define TB_TIMEOUT    100     /* ms */
+-#define MAX_GROUPS    7       /* max Group_ID is 7 */
++#define TB_TIMEOUT            100     /* ms */
++
++/*
++ * Minimum bandwidth (in Mb/s) that is needed in the single transmitter/receiver
++ * direction. This is 40G - 10% guard band bandwidth.
++ */
++#define TB_ASYM_MIN           (40000 * 90 / 100)
++
++/*
++ * Threshold bandwidth (in Mb/s) that is used to switch the links to
++ * asymmetric and back. This is selected as 45G which means when the
++ * request is higher than this, we switch the link to asymmetric, and
++ * when it is less than this we switch it back. The 45G is selected so
++ * that we still have 27G (of the total 72G) for bulk PCIe traffic when
++ * switching back to symmetric.
++ */
++#define TB_ASYM_THRESHOLD     45000
++
++#define MAX_GROUPS            7       /* max Group_ID is 7 */
++
++static unsigned int asym_threshold = TB_ASYM_THRESHOLD;
++module_param_named(asym_threshold, asym_threshold, uint, 0444);
++MODULE_PARM_DESC(asym_threshold,
++              "threshold (Mb/s) when to Gen 4 switch link symmetry. 0 disables. (default: "
++              __MODULE_STRING(TB_ASYM_THRESHOLD) ")");
+ /**
+  * struct tb_cm - Simple Thunderbolt connection manager
+@@ -285,14 +308,32 @@ static int tb_enable_clx(struct tb_switc
+       return ret == -EOPNOTSUPP ? 0 : ret;
+ }
+-/* Disables CL states up to the host router */
+-static void tb_disable_clx(struct tb_switch *sw)
++/**
++ * tb_disable_clx() - Disable CL states up to host router
++ * @sw: Router to start
++ *
++ * Disables CL states from @sw up to the host router. Returns true if
++ * any CL state were disabled. This can be used to figure out whether
++ * the link was setup by us or the boot firmware so we don't
++ * accidentally enable them if they were not enabled during discovery.
++ */
++static bool tb_disable_clx(struct tb_switch *sw)
+ {
++      bool disabled = false;
++
+       do {
+-              if (tb_switch_clx_disable(sw) < 0)
++              int ret;
++
++              ret = tb_switch_clx_disable(sw);
++              if (ret > 0)
++                      disabled = true;
++              else if (ret < 0)
+                       tb_sw_warn(sw, "failed to disable CL states\n");
++
+               sw = tb_switch_parent(sw);
+       } while (sw);
++
++      return disabled;
+ }
+ static int tb_increase_switch_tmu_accuracy(struct device *dev, void *data)
+@@ -572,144 +613,294 @@ static struct tb_tunnel *tb_find_first_u
+       return tb_find_tunnel(tb, TB_TUNNEL_USB3, usb3_down, NULL);
+ }
+-static int tb_available_bandwidth(struct tb *tb, struct tb_port *src_port,
+-      struct tb_port *dst_port, int *available_up, int *available_down)
++/**
++ * tb_consumed_usb3_pcie_bandwidth() - Consumed USB3/PCIe bandwidth over a single link
++ * @tb: Domain structure
++ * @src_port: Source protocol adapter
++ * @dst_port: Destination protocol adapter
++ * @port: USB4 port the consumed bandwidth is calculated
++ * @consumed_up: Consumed upsream bandwidth (Mb/s)
++ * @consumed_down: Consumed downstream bandwidth (Mb/s)
++ *
++ * Calculates consumed USB3 and PCIe bandwidth at @port between path
++ * from @src_port to @dst_port. Does not take tunnel starting from
++ * @src_port and ending from @src_port into account.
++ */
++static int tb_consumed_usb3_pcie_bandwidth(struct tb *tb,
++                                         struct tb_port *src_port,
++                                         struct tb_port *dst_port,
++                                         struct tb_port *port,
++                                         int *consumed_up,
++                                         int *consumed_down)
+ {
+-      int usb3_consumed_up, usb3_consumed_down, ret;
+-      struct tb_cm *tcm = tb_priv(tb);
++      int pci_consumed_up, pci_consumed_down;
+       struct tb_tunnel *tunnel;
+-      struct tb_port *port;
+-      tb_dbg(tb, "calculating available bandwidth between %llx:%u <-> %llx:%u\n",
+-             tb_route(src_port->sw), src_port->port, tb_route(dst_port->sw),
+-             dst_port->port);
++      *consumed_up = *consumed_down = 0;
+       tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port);
+       if (tunnel && tunnel->src_port != src_port &&
+           tunnel->dst_port != dst_port) {
+-              ret = tb_tunnel_consumed_bandwidth(tunnel, &usb3_consumed_up,
+-                                                 &usb3_consumed_down);
++              int ret;
++
++              ret = tb_tunnel_consumed_bandwidth(tunnel, consumed_up,
++                                                 consumed_down);
+               if (ret)
+                       return ret;
+-      } else {
+-              usb3_consumed_up = 0;
+-              usb3_consumed_down = 0;
+       }
+-      /* Maximum possible bandwidth asymmetric Gen 4 link is 120 Gb/s */
+-      *available_up = *available_down = 120000;
++      /*
++       * If there is anything reserved for PCIe bulk traffic take it
++       * into account here too.
++       */
++      if (tb_tunnel_reserved_pci(port, &pci_consumed_up, &pci_consumed_down)) {
++              *consumed_up += pci_consumed_up;
++              *consumed_down += pci_consumed_down;
++      }
+-      /* Find the minimum available bandwidth over all links */
+-      tb_for_each_port_on_path(src_port, dst_port, port) {
+-              int link_speed, link_width, up_bw, down_bw;
+-              int pci_reserved_up, pci_reserved_down;
++      return 0;
++}
+-              if (!tb_port_is_null(port))
++/**
++ * tb_consumed_dp_bandwidth() - Consumed DP bandwidth over a single link
++ * @tb: Domain structure
++ * @src_port: Source protocol adapter
++ * @dst_port: Destination protocol adapter
++ * @port: USB4 port the consumed bandwidth is calculated
++ * @consumed_up: Consumed upsream bandwidth (Mb/s)
++ * @consumed_down: Consumed downstream bandwidth (Mb/s)
++ *
++ * Calculates consumed DP bandwidth at @port between path from @src_port
++ * to @dst_port. Does not take tunnel starting from @src_port and ending
++ * from @src_port into account.
++ */
++static int tb_consumed_dp_bandwidth(struct tb *tb,
++                                  struct tb_port *src_port,
++                                  struct tb_port *dst_port,
++                                  struct tb_port *port,
++                                  int *consumed_up,
++                                  int *consumed_down)
++{
++      struct tb_cm *tcm = tb_priv(tb);
++      struct tb_tunnel *tunnel;
++      int ret;
++
++      *consumed_up = *consumed_down = 0;
++
++      /*
++       * Find all DP tunnels that cross the port and reduce
++       * their consumed bandwidth from the available.
++       */
++      list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
++              int dp_consumed_up, dp_consumed_down;
++
++              if (tb_tunnel_is_invalid(tunnel))
+                       continue;
+-              if (tb_is_upstream_port(port)) {
+-                      link_speed = port->sw->link_speed;
++              if (!tb_tunnel_is_dp(tunnel))
++                      continue;
++
++              if (!tb_tunnel_port_on_path(tunnel, port))
++                      continue;
++
++              /*
++               * Ignore the DP tunnel between src_port and dst_port
++               * because it is the same tunnel and we may be
++               * re-calculating estimated bandwidth.
++               */
++              if (tunnel->src_port == src_port &&
++                  tunnel->dst_port == dst_port)
++                      continue;
++
++              ret = tb_tunnel_consumed_bandwidth(tunnel, &dp_consumed_up,
++                                                 &dp_consumed_down);
++              if (ret)
++                      return ret;
++
++              *consumed_up += dp_consumed_up;
++              *consumed_down += dp_consumed_down;
++      }
++
++      return 0;
++}
++
++static bool tb_asym_supported(struct tb_port *src_port, struct tb_port *dst_port,
++                            struct tb_port *port)
++{
++      bool downstream = tb_port_path_direction_downstream(src_port, dst_port);
++      enum tb_link_width width;
++
++      if (tb_is_upstream_port(port))
++              width = downstream ? TB_LINK_WIDTH_ASYM_RX : TB_LINK_WIDTH_ASYM_TX;
++      else
++              width = downstream ? TB_LINK_WIDTH_ASYM_TX : TB_LINK_WIDTH_ASYM_RX;
++
++      return tb_port_width_supported(port, width);
++}
++
++/**
++ * tb_maximum_banwidth() - Maximum bandwidth over a single link
++ * @tb: Domain structure
++ * @src_port: Source protocol adapter
++ * @dst_port: Destination protocol adapter
++ * @port: USB4 port the total bandwidth is calculated
++ * @max_up: Maximum upstream bandwidth (Mb/s)
++ * @max_down: Maximum downstream bandwidth (Mb/s)
++ * @include_asym: Include bandwidth if the link is switched from
++ *              symmetric to asymmetric
++ *
++ * Returns maximum possible bandwidth in @max_up and @max_down over a
++ * single link at @port. If @include_asym is set then includes the
++ * additional banwdith if the links are transitioned into asymmetric to
++ * direction from @src_port to @dst_port.
++ */
++static int tb_maximum_bandwidth(struct tb *tb, struct tb_port *src_port,
++                              struct tb_port *dst_port, struct tb_port *port,
++                              int *max_up, int *max_down, bool include_asym)
++{
++      bool downstream = tb_port_path_direction_downstream(src_port, dst_port);
++      int link_speed, link_width, up_bw, down_bw;
++
++      /*
++       * Can include asymmetric, only if it is actually supported by
++       * the lane adapter.
++       */
++      if (!tb_asym_supported(src_port, dst_port, port))
++              include_asym = false;
++
++      if (tb_is_upstream_port(port)) {
++              link_speed = port->sw->link_speed;
++              /*
++               * sw->link_width is from upstream perspective so we use
++               * the opposite for downstream of the host router.
++               */
++              if (port->sw->link_width == TB_LINK_WIDTH_ASYM_TX) {
++                      up_bw = link_speed * 3 * 1000;
++                      down_bw = link_speed * 1 * 1000;
++              } else if (port->sw->link_width == TB_LINK_WIDTH_ASYM_RX) {
++                      up_bw = link_speed * 1 * 1000;
++                      down_bw = link_speed * 3 * 1000;
++              } else if (include_asym) {
+                       /*
+-                       * sw->link_width is from upstream perspective
+-                       * so we use the opposite for downstream of the
+-                       * host router.
++                       * The link is symmetric at the moment but we
++                       * can switch it to asymmetric as needed. Report
++                       * this bandwidth as available (even though it
++                       * is not yet enabled).
+                        */
+-                      if (port->sw->link_width == TB_LINK_WIDTH_ASYM_TX) {
+-                              up_bw = link_speed * 3 * 1000;
+-                              down_bw = link_speed * 1 * 1000;
+-                      } else if (port->sw->link_width == TB_LINK_WIDTH_ASYM_RX) {
++                      if (downstream) {
+                               up_bw = link_speed * 1 * 1000;
+                               down_bw = link_speed * 3 * 1000;
+                       } else {
+-                              up_bw = link_speed * port->sw->link_width * 1000;
+-                              down_bw = up_bw;
++                              up_bw = link_speed * 3 * 1000;
++                              down_bw = link_speed * 1 * 1000;
+                       }
+               } else {
+-                      link_speed = tb_port_get_link_speed(port);
+-                      if (link_speed < 0)
+-                              return link_speed;
+-
+-                      link_width = tb_port_get_link_width(port);
+-                      if (link_width < 0)
+-                              return link_width;
+-
+-                      if (link_width == TB_LINK_WIDTH_ASYM_TX) {
++                      up_bw = link_speed * port->sw->link_width * 1000;
++                      down_bw = up_bw;
++              }
++      } else {
++              link_speed = tb_port_get_link_speed(port);
++              if (link_speed < 0)
++                      return link_speed;
++
++              link_width = tb_port_get_link_width(port);
++              if (link_width < 0)
++                      return link_width;
++
++              if (link_width == TB_LINK_WIDTH_ASYM_TX) {
++                      up_bw = link_speed * 1 * 1000;
++                      down_bw = link_speed * 3 * 1000;
++              } else if (link_width == TB_LINK_WIDTH_ASYM_RX) {
++                      up_bw = link_speed * 3 * 1000;
++                      down_bw = link_speed * 1 * 1000;
++              } else if (include_asym) {
++                      /*
++                       * The link is symmetric at the moment but we
++                       * can switch it to asymmetric as needed. Report
++                       * this bandwidth as available (even though it
++                       * is not yet enabled).
++                       */
++                      if (downstream) {
+                               up_bw = link_speed * 1 * 1000;
+                               down_bw = link_speed * 3 * 1000;
+-                      } else if (link_width == TB_LINK_WIDTH_ASYM_RX) {
++                      } else {
+                               up_bw = link_speed * 3 * 1000;
+                               down_bw = link_speed * 1 * 1000;
+-                      } else {
+-                              up_bw = link_speed * link_width * 1000;
+-                              down_bw = up_bw;
+                       }
++              } else {
++                      up_bw = link_speed * link_width * 1000;
++                      down_bw = up_bw;
+               }
++      }
+-              /* Leave 10% guard band */
+-              up_bw -= up_bw / 10;
+-              down_bw -= down_bw / 10;
+-
+-              tb_port_dbg(port, "link total bandwidth %d/%d Mb/s\n", up_bw,
+-                          down_bw);
+-
+-              /*
+-               * Find all DP tunnels that cross the port and reduce
+-               * their consumed bandwidth from the available.
+-               */
+-              list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
+-                      int dp_consumed_up, dp_consumed_down;
++      /* Leave 10% guard band */
++      *max_up = up_bw - up_bw / 10;
++      *max_down = down_bw - down_bw / 10;
+-                      if (tb_tunnel_is_invalid(tunnel))
+-                              continue;
++      tb_port_dbg(port, "link maximum bandwidth %d/%d Mb/s\n", *max_up, *max_down);
++      return 0;
++}
+-                      if (!tb_tunnel_is_dp(tunnel))
+-                              continue;
++/**
++ * tb_available_bandwidth() - Available bandwidth for tunneling
++ * @tb: Domain structure
++ * @src_port: Source protocol adapter
++ * @dst_port: Destination protocol adapter
++ * @available_up: Available bandwidth upstream (Mb/s)
++ * @available_down: Available bandwidth downstream (Mb/s)
++ * @include_asym: Include bandwidth if the link is switched from
++ *              symmetric to asymmetric
++ *
++ * Calculates maximum available bandwidth for protocol tunneling between
++ * @src_port and @dst_port at the moment. This is minimum of maximum
++ * link bandwidth across all links reduced by currently consumed
++ * bandwidth on that link.
++ *
++ * If @include_asym is true then includes also bandwidth that can be
++ * added when the links are transitioned into asymmetric (but does not
++ * transition the links).
++ */
++static int tb_available_bandwidth(struct tb *tb, struct tb_port *src_port,
++                               struct tb_port *dst_port, int *available_up,
++                               int *available_down, bool include_asym)
++{
++      struct tb_port *port;
++      int ret;
+-                      if (!tb_tunnel_port_on_path(tunnel, port))
+-                              continue;
++      /* Maximum possible bandwidth asymmetric Gen 4 link is 120 Gb/s */
++      *available_up = *available_down = 120000;
+-                      /*
+-                       * Ignore the DP tunnel between src_port and
+-                       * dst_port because it is the same tunnel and we
+-                       * may be re-calculating estimated bandwidth.
+-                       */
+-                      if (tunnel->src_port == src_port &&
+-                          tunnel->dst_port == dst_port)
+-                              continue;
++      /* Find the minimum available bandwidth over all links */
++      tb_for_each_port_on_path(src_port, dst_port, port) {
++              int max_up, max_down, consumed_up, consumed_down;
+-                      ret = tb_tunnel_consumed_bandwidth(tunnel,
+-                                                         &dp_consumed_up,
+-                                                         &dp_consumed_down);
+-                      if (ret)
+-                              return ret;
++              if (!tb_port_is_null(port))
++                      continue;
+-                      up_bw -= dp_consumed_up;
+-                      down_bw -= dp_consumed_down;
+-              }
++              ret = tb_maximum_bandwidth(tb, src_port, dst_port, port,
++                                         &max_up, &max_down, include_asym);
++              if (ret)
++                      return ret;
+-              /*
+-               * If USB3 is tunneled from the host router down to the
+-               * branch leading to port we need to take USB3 consumed
+-               * bandwidth into account regardless whether it actually
+-               * crosses the port.
+-               */
+-              up_bw -= usb3_consumed_up;
+-              down_bw -= usb3_consumed_down;
++              ret = tb_consumed_usb3_pcie_bandwidth(tb, src_port, dst_port,
++                                                    port, &consumed_up,
++                                                    &consumed_down);
++              if (ret)
++                      return ret;
++              max_up -= consumed_up;
++              max_down -= consumed_down;
+-              /*
+-               * If there is anything reserved for PCIe bulk traffic
+-               * take it into account here too.
+-               */
+-              if (tb_tunnel_reserved_pci(port, &pci_reserved_up,
+-                                         &pci_reserved_down)) {
+-                      up_bw -= pci_reserved_up;
+-                      down_bw -= pci_reserved_down;
+-              }
++              ret = tb_consumed_dp_bandwidth(tb, src_port, dst_port, port,
++                                             &consumed_up, &consumed_down);
++              if (ret)
++                      return ret;
++              max_up -= consumed_up;
++              max_down -= consumed_down;
+-              if (up_bw < *available_up)
+-                      *available_up = up_bw;
+-              if (down_bw < *available_down)
+-                      *available_down = down_bw;
++              if (max_up < *available_up)
++                      *available_up = max_up;
++              if (max_down < *available_down)
++                      *available_down = max_down;
+       }
+       if (*available_up < 0)
+@@ -747,7 +938,7 @@ static void tb_reclaim_usb3_bandwidth(st
+        * That determines the whole USB3 bandwidth for this branch.
+        */
+       ret = tb_available_bandwidth(tb, tunnel->src_port, tunnel->dst_port,
+-                                   &available_up, &available_down);
++                                   &available_up, &available_down, false);
+       if (ret) {
+               tb_warn(tb, "failed to calculate available bandwidth\n");
+               return;
+@@ -805,8 +996,8 @@ static int tb_tunnel_usb3(struct tb *tb,
+                       return ret;
+       }
+-      ret = tb_available_bandwidth(tb, down, up, &available_up,
+-                                   &available_down);
++      ret = tb_available_bandwidth(tb, down, up, &available_up, &available_down,
++                                   false);
+       if (ret)
+               goto err_reclaim;
+@@ -867,6 +1058,225 @@ static int tb_create_usb3_tunnels(struct
+       return 0;
+ }
++/**
++ * tb_configure_asym() - Transition links to asymmetric if needed
++ * @tb: Domain structure
++ * @src_port: Source adapter to start the transition
++ * @dst_port: Destination adapter
++ * @requested_up: Additional bandwidth (Mb/s) required upstream
++ * @requested_down: Additional bandwidth (Mb/s) required downstream
++ *
++ * Transition links between @src_port and @dst_port into asymmetric, with
++ * three lanes in the direction from @src_port towards @dst_port and one lane
++ * in the opposite direction, if the bandwidth requirements
++ * (requested + currently consumed) on that link exceed @asym_threshold.
++ *
++ * Must be called with available >= requested over all links.
++ */
++static int tb_configure_asym(struct tb *tb, struct tb_port *src_port,
++                           struct tb_port *dst_port, int requested_up,
++                           int requested_down)
++{
++      struct tb_switch *sw;
++      bool clx, downstream;
++      struct tb_port *up;
++      int ret = 0;
++
++      if (!asym_threshold)
++              return 0;
++
++      /* Disable CL states before doing any transitions */
++      downstream = tb_port_path_direction_downstream(src_port, dst_port);
++      /* Pick up router deepest in the hierarchy */
++      if (downstream)
++              sw = dst_port->sw;
++      else
++              sw = src_port->sw;
++
++      clx = tb_disable_clx(sw);
++
++      tb_for_each_upstream_port_on_path(src_port, dst_port, up) {
++              int consumed_up, consumed_down;
++              enum tb_link_width width;
++
++              ret = tb_consumed_dp_bandwidth(tb, src_port, dst_port, up,
++                                             &consumed_up, &consumed_down);
++              if (ret)
++                      break;
++
++              if (downstream) {
++                      /*
++                       * Downstream so make sure upstream is within the 36G
++                       * (40G - guard band 10%), and the requested is above
++                       * what the threshold is.
++                       */
++                      if (consumed_up + requested_up >= TB_ASYM_MIN) {
++                              ret = -ENOBUFS;
++                              break;
++                      }
++                      /* Does consumed + requested exceed the threshold */
++                      if (consumed_down + requested_down < asym_threshold)
++                              continue;
++
++                      width = TB_LINK_WIDTH_ASYM_RX;
++              } else {
++                      /* Upstream, the opposite of above */
++                      if (consumed_down + requested_down >= TB_ASYM_MIN) {
++                              ret = -ENOBUFS;
++                              break;
++                      }
++                      if (consumed_up + requested_up < asym_threshold)
++                              continue;
++
++                      width = TB_LINK_WIDTH_ASYM_TX;
++              }
++
++              if (up->sw->link_width == width)
++                      continue;
++
++              if (!tb_port_width_supported(up, width))
++                      continue;
++
++              tb_sw_dbg(up->sw, "configuring asymmetric link\n");
++
++              /*
++               * Here requested + consumed > threshold so we need to
++               * transtion the link into asymmetric now.
++               */
++              ret = tb_switch_set_link_width(up->sw, width);
++              if (ret) {
++                      tb_sw_warn(up->sw, "failed to set link width\n");
++                      break;
++              }
++      }
++
++      /* Re-enable CL states if they were previosly enabled */
++      if (clx)
++              tb_enable_clx(sw);
++
++      return ret;
++}
++
++/**
++ * tb_configure_sym() - Transition links to symmetric if possible
++ * @tb: Domain structure
++ * @src_port: Source adapter to start the transition
++ * @dst_port: Destination adapter
++ * @requested_up: New lower bandwidth request upstream (Mb/s)
++ * @requested_down: New lower bandwidth request downstream (Mb/s)
++ *
++ * Goes over each link from @src_port to @dst_port and tries to
++ * transition the link to symmetric if the currently consumed bandwidth
++ * allows.
++ */
++static int tb_configure_sym(struct tb *tb, struct tb_port *src_port,
++                          struct tb_port *dst_port, int requested_up,
++                          int requested_down)
++{
++      struct tb_switch *sw;
++      bool clx, downstream;
++      struct tb_port *up;
++      int ret = 0;
++
++      if (!asym_threshold)
++              return 0;
++
++      /* Disable CL states before doing any transitions */
++      downstream = tb_port_path_direction_downstream(src_port, dst_port);
++      /* Pick up router deepest in the hierarchy */
++      if (downstream)
++              sw = dst_port->sw;
++      else
++              sw = src_port->sw;
++
++      clx = tb_disable_clx(sw);
++
++      tb_for_each_upstream_port_on_path(src_port, dst_port, up) {
++              int consumed_up, consumed_down;
++
++              /* Already symmetric */
++              if (up->sw->link_width <= TB_LINK_WIDTH_DUAL)
++                      continue;
++              /* Unplugged, no need to switch */
++              if (up->sw->is_unplugged)
++                      continue;
++
++              ret = tb_consumed_dp_bandwidth(tb, src_port, dst_port, up,
++                                             &consumed_up, &consumed_down);
++              if (ret)
++                      break;
++
++              if (downstream) {
++                      /*
++                       * Downstream so we want the consumed_down < threshold.
++                       * Upstream traffic should be less than 36G (40G
++                       * guard band 10%) as the link was configured asymmetric
++                       * already.
++                       */
++                      if (consumed_down + requested_down >= asym_threshold)
++                              continue;
++              } else {
++                      if (consumed_up + requested_up >= asym_threshold)
++                              continue;
++              }
++
++              if (up->sw->link_width == TB_LINK_WIDTH_DUAL)
++                      continue;
++
++              tb_sw_dbg(up->sw, "configuring symmetric link\n");
++
++              ret = tb_switch_set_link_width(up->sw, TB_LINK_WIDTH_DUAL);
++              if (ret) {
++                      tb_sw_warn(up->sw, "failed to set link width\n");
++                      break;
++              }
++      }
++
++      /* Re-enable CL states if they were previosly enabled */
++      if (clx)
++              tb_enable_clx(sw);
++
++      return ret;
++}
++
++static void tb_configure_link(struct tb_port *down, struct tb_port *up,
++                            struct tb_switch *sw)
++{
++      struct tb *tb = sw->tb;
++
++      /* Link the routers using both links if available */
++      down->remote = up;
++      up->remote = down;
++      if (down->dual_link_port && up->dual_link_port) {
++              down->dual_link_port->remote = up->dual_link_port;
++              up->dual_link_port->remote = down->dual_link_port;
++      }
++
++      /*
++       * Enable lane bonding if the link is currently two single lane
++       * links.
++       */
++      if (sw->link_width < TB_LINK_WIDTH_DUAL)
++              tb_switch_set_link_width(sw, TB_LINK_WIDTH_DUAL);
++
++      /*
++       * Device router that comes up as symmetric link is
++       * connected deeper in the hierarchy, we transition the links
++       * above into symmetric if bandwidth allows.
++       */
++      if (tb_switch_depth(sw) > 1 &&
++          tb_port_get_link_generation(up) >= 4 &&
++          up->sw->link_width == TB_LINK_WIDTH_DUAL) {
++              struct tb_port *host_port;
++
++              host_port = tb_port_at(tb_route(sw), tb->root_switch);
++              tb_configure_sym(tb, host_port, up, 0, 0);
++      }
++
++      /* Set the link configured */
++      tb_switch_configure_link(sw);
++}
++
+ static void tb_scan_port(struct tb_port *port);
+ /*
+@@ -975,19 +1385,9 @@ static void tb_scan_port(struct tb_port
+               goto out_rpm_put;
+       }
+-      /* Link the switches using both links if available */
+       upstream_port = tb_upstream_port(sw);
+-      port->remote = upstream_port;
+-      upstream_port->remote = port;
+-      if (port->dual_link_port && upstream_port->dual_link_port) {
+-              port->dual_link_port->remote = upstream_port->dual_link_port;
+-              upstream_port->dual_link_port->remote = port->dual_link_port;
+-      }
++      tb_configure_link(port, upstream_port, sw);
+-      /* Enable lane bonding if supported */
+-      tb_switch_set_link_width(sw, TB_LINK_WIDTH_DUAL);
+-      /* Set the link configured */
+-      tb_switch_configure_link(sw);
+       /*
+        * CL0s and CL1 are enabled and supported together.
+        * Silently ignore CLx enabling in case CLx is not supported.
+@@ -1051,6 +1451,11 @@ static void tb_deactivate_and_free_tunne
+                * deallocated properly.
+                */
+               tb_switch_dealloc_dp_resource(src_port->sw, src_port);
++              /*
++               * If bandwidth on a link is < asym_threshold
++               * transition the link to symmetric.
++               */
++              tb_configure_sym(tb, src_port, dst_port, 0, 0);
+               /* Now we can allow the domain to runtime suspend again */
+               pm_runtime_mark_last_busy(&dst_port->sw->dev);
+               pm_runtime_put_autosuspend(&dst_port->sw->dev);
+@@ -1208,7 +1613,7 @@ tb_recalc_estimated_bandwidth_for_group(
+               out = tunnel->dst_port;
+               ret = tb_available_bandwidth(tb, in, out, &estimated_up,
+-                                           &estimated_down);
++                                           &estimated_down, true);
+               if (ret) {
+                       tb_port_warn(in,
+                               "failed to re-calculate estimated bandwidth\n");
+@@ -1299,6 +1704,7 @@ static bool tb_tunnel_one_dp(struct tb *
+       int available_up, available_down, ret, link_nr;
+       struct tb_cm *tcm = tb_priv(tb);
+       struct tb_port *port, *in, *out;
++      int consumed_up, consumed_down;
+       struct tb_tunnel *tunnel;
+       /*
+@@ -1375,7 +1781,8 @@ static bool tb_tunnel_one_dp(struct tb *
+               goto err_detach_group;
+       }
+-      ret = tb_available_bandwidth(tb, in, out, &available_up, &available_down);
++      ret = tb_available_bandwidth(tb, in, out, &available_up, &available_down,
++                                   true);
+       if (ret)
+               goto err_reclaim_usb;
+@@ -1397,6 +1804,13 @@ static bool tb_tunnel_one_dp(struct tb *
+       list_add_tail(&tunnel->list, &tcm->tunnel_list);
+       tb_reclaim_usb3_bandwidth(tb, in, out);
++      /*
++       * Transition the links to asymmetric if the consumption exceeds
++       * the threshold.
++       */
++      if (!tb_tunnel_consumed_bandwidth(tunnel, &consumed_up, &consumed_down))
++              tb_configure_asym(tb, in, out, consumed_up, consumed_down);
++
+       /* Update the domain with the new bandwidth estimation */
+       tb_recalc_estimated_bandwidth(tb);
+@@ -1904,6 +2318,11 @@ static int tb_alloc_dp_bandwidth(struct
+       if ((*requested_up >= 0 && requested_up_corrected <= allocated_up) ||
+           (*requested_down >= 0 && requested_down_corrected <= allocated_down)) {
+               /*
++               * If bandwidth on a link is < asym_threshold transition
++               * the link to symmetric.
++               */
++              tb_configure_sym(tb, in, out, *requested_up, *requested_down);
++              /*
+                * If requested bandwidth is less or equal than what is
+                * currently allocated to that tunnel we simply change
+                * the reservation of the tunnel. Since all the tunnels
+@@ -1928,7 +2347,8 @@ static int tb_alloc_dp_bandwidth(struct
+        * are also in the same group but we use the same function here
+        * that we use with the normal bandwidth allocation).
+        */
+-      ret = tb_available_bandwidth(tb, in, out, &available_up, &available_down);
++      ret = tb_available_bandwidth(tb, in, out, &available_up, &available_down,
++                                   true);
+       if (ret)
+               goto reclaim;
+@@ -1937,8 +2357,23 @@ static int tb_alloc_dp_bandwidth(struct
+       if ((*requested_up >= 0 && available_up >= requested_up_corrected) ||
+           (*requested_down >= 0 && available_down >= requested_down_corrected)) {
++              /*
++               * If bandwidth on a link is >= asym_threshold
++               * transition the link to asymmetric.
++               */
++              ret = tb_configure_asym(tb, in, out, *requested_up,
++                                      *requested_down);
++              if (ret) {
++                      tb_configure_sym(tb, in, out, 0, 0);
++                      return ret;
++              }
++
+               ret = tb_tunnel_alloc_bandwidth(tunnel, requested_up,
+                                               requested_down);
++              if (ret) {
++                      tb_tunnel_warn(tunnel, "failed to allocate bandwidth\n");
++                      tb_configure_sym(tb, in, out, 0, 0);
++              }
+       } else {
+               ret = -ENOBUFS;
+       }
diff --git a/queue-6.6/thunderbolt-create-multiple-displayport-tunnels-if-there-are-more-dp-in-out-pairs.patch b/queue-6.6/thunderbolt-create-multiple-displayport-tunnels-if-there-are-more-dp-in-out-pairs.patch
new file mode 100644 (file)
index 0000000..69a578b
--- /dev/null
@@ -0,0 +1,91 @@
+From stable+bounces-78568-greg=kroah.com@vger.kernel.org Tue Oct  1 19:32:55 2024
+From: Alexandru Gagniuc <alexandru.gagniuc@hp.com>
+Date: Tue,  1 Oct 2024 17:30:59 +0000
+Subject: thunderbolt: Create multiple DisplayPort tunnels if there are more DP IN/OUT pairs
+To: gregkh@linuxfoundation.org, stable@vger.kernel.org
+Cc: qin.wan@hp.com, andreas.noever@gmail.com, michael.jamet@intel.com, mika.westerberg@linux.intel.com, YehezkelShB@gmail.com, linux-usb@vger.kernel.org, linux-kernel@vger.kernel.org, Gil Fine <gil.fine@linux.intel.com>, Alexandru Gagniuc <alexandru.gagniuc@hp.com>
+Message-ID: <20241001173109.1513-5-alexandru.gagniuc@hp.com>
+
+From: Gil Fine <gil.fine@linux.intel.com>
+
+[ Upstream commit 8648c6465c025c488e2855c209c0dea1a1a15184 ]
+
+Currently we only create one DisplayPort tunnel even if there would be
+more DP IN/OUT pairs available. Specifically this happens when a router
+is unplugged and we check if a new DisplayPort tunnel can be created. To
+cover this create tunnels as long as we find suitable DP IN/OUT pairs.
+
+Signed-off-by: Gil Fine <gil.fine@linux.intel.com>
+Signed-off-by: Mika Westerberg <mika.westerberg@linux.intel.com>
+Signed-off-by: Qin Wan <qin.wan@hp.com>
+Signed-off-by: Alexandru Gagniuc <alexandru.gagniuc@hp.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/thunderbolt/tb.c |   26 +++++++++++++++++---------
+ 1 file changed, 17 insertions(+), 9 deletions(-)
+
+--- a/drivers/thunderbolt/tb.c
++++ b/drivers/thunderbolt/tb.c
+@@ -1282,18 +1282,13 @@ static struct tb_port *tb_find_dp_out(st
+       return NULL;
+ }
+-static void tb_tunnel_dp(struct tb *tb)
++static bool tb_tunnel_one_dp(struct tb *tb)
+ {
+       int available_up, available_down, ret, link_nr;
+       struct tb_cm *tcm = tb_priv(tb);
+       struct tb_port *port, *in, *out;
+       struct tb_tunnel *tunnel;
+-      if (!tb_acpi_may_tunnel_dp()) {
+-              tb_dbg(tb, "DP tunneling disabled, not creating tunnel\n");
+-              return;
+-      }
+-
+       /*
+        * Find pair of inactive DP IN and DP OUT adapters and then
+        * establish a DP tunnel between them.
+@@ -1321,11 +1316,11 @@ static void tb_tunnel_dp(struct tb *tb)
+       if (!in) {
+               tb_dbg(tb, "no suitable DP IN adapter available, not tunneling\n");
+-              return;
++              return false;
+       }
+       if (!out) {
+               tb_dbg(tb, "no suitable DP OUT adapter available, not tunneling\n");
+-              return;
++              return false;
+       }
+       /*
+@@ -1398,7 +1393,7 @@ static void tb_tunnel_dp(struct tb *tb)
+        * TMU mode to HiFi for CL0s to work.
+        */
+       tb_increase_tmu_accuracy(tunnel);
+-      return;
++      return true;
+ err_free:
+       tb_tunnel_free(tunnel);
+@@ -1413,6 +1408,19 @@ err_rpm_put:
+       pm_runtime_put_autosuspend(&out->sw->dev);
+       pm_runtime_mark_last_busy(&in->sw->dev);
+       pm_runtime_put_autosuspend(&in->sw->dev);
++
++      return false;
++}
++
++static void tb_tunnel_dp(struct tb *tb)
++{
++      if (!tb_acpi_may_tunnel_dp()) {
++              tb_dbg(tb, "DP tunneling disabled, not creating tunnel\n");
++              return;
++      }
++
++      while (tb_tunnel_one_dp(tb))
++              ;
+ }
+ static void tb_enter_redrive(struct tb_port *port)
diff --git a/queue-6.6/thunderbolt-expose-tb_tunnel_xxx-log-macros-to-the-rest-of-the-driver.patch b/queue-6.6/thunderbolt-expose-tb_tunnel_xxx-log-macros-to-the-rest-of-the-driver.patch
new file mode 100644 (file)
index 0000000..a72cd33
--- /dev/null
@@ -0,0 +1,94 @@
+From stable+bounces-78567-greg=kroah.com@vger.kernel.org Tue Oct  1 19:32:47 2024
+From: Alexandru Gagniuc <alexandru.gagniuc@hp.com>
+Date: Tue,  1 Oct 2024 17:30:58 +0000
+Subject: thunderbolt: Expose tb_tunnel_xxx() log macros to the rest of the driver
+To: gregkh@linuxfoundation.org, stable@vger.kernel.org
+Cc: qin.wan@hp.com, andreas.noever@gmail.com, michael.jamet@intel.com, mika.westerberg@linux.intel.com, YehezkelShB@gmail.com, linux-usb@vger.kernel.org, linux-kernel@vger.kernel.org, Alexandru Gagniuc <alexandru.gagniuc@hp.com>
+Message-ID: <20241001173109.1513-4-alexandru.gagniuc@hp.com>
+
+From: Mika Westerberg <mika.westerberg@linux.intel.com>
+
+[ Upstream commit d27bd2c37d4666bce25ec4d9ac8c6b169992f0f0 ]
+
+In order to allow more consistent logging of tunnel related information
+make these logging macros available to the rest of the driver.
+
+Signed-off-by: Mika Westerberg <mika.westerberg@linux.intel.com>
+Signed-off-by: Qin Wan <qin.wan@hp.com>
+Signed-off-by: Alexandru Gagniuc <alexandru.gagniuc@hp.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/thunderbolt/tunnel.c |   26 +++++---------------------
+ drivers/thunderbolt/tunnel.h |   24 +++++++++++++++++++++++-
+ 2 files changed, 28 insertions(+), 22 deletions(-)
+
+--- a/drivers/thunderbolt/tunnel.c
++++ b/drivers/thunderbolt/tunnel.c
+@@ -58,27 +58,6 @@ MODULE_PARM_DESC(bw_alloc_mode,
+ static const char * const tb_tunnel_names[] = { "PCI", "DP", "DMA", "USB3" };
+-#define __TB_TUNNEL_PRINT(level, tunnel, fmt, arg...)                   \
+-      do {                                                            \
+-              struct tb_tunnel *__tunnel = (tunnel);                  \
+-              level(__tunnel->tb, "%llx:%u <-> %llx:%u (%s): " fmt,   \
+-                    tb_route(__tunnel->src_port->sw),                 \
+-                    __tunnel->src_port->port,                         \
+-                    tb_route(__tunnel->dst_port->sw),                 \
+-                    __tunnel->dst_port->port,                         \
+-                    tb_tunnel_names[__tunnel->type],                  \
+-                    ## arg);                                          \
+-      } while (0)
+-
+-#define tb_tunnel_WARN(tunnel, fmt, arg...) \
+-      __TB_TUNNEL_PRINT(tb_WARN, tunnel, fmt, ##arg)
+-#define tb_tunnel_warn(tunnel, fmt, arg...) \
+-      __TB_TUNNEL_PRINT(tb_warn, tunnel, fmt, ##arg)
+-#define tb_tunnel_info(tunnel, fmt, arg...) \
+-      __TB_TUNNEL_PRINT(tb_info, tunnel, fmt, ##arg)
+-#define tb_tunnel_dbg(tunnel, fmt, arg...) \
+-      __TB_TUNNEL_PRINT(tb_dbg, tunnel, fmt, ##arg)
+-
+ static inline unsigned int tb_usable_credits(const struct tb_port *port)
+ {
+       return port->total_credits - port->ctl_credits;
+@@ -2382,3 +2361,8 @@ void tb_tunnel_reclaim_available_bandwid
+               tunnel->reclaim_available_bandwidth(tunnel, available_up,
+                                                   available_down);
+ }
++
++const char *tb_tunnel_type_name(const struct tb_tunnel *tunnel)
++{
++      return tb_tunnel_names[tunnel->type];
++}
+--- a/drivers/thunderbolt/tunnel.h
++++ b/drivers/thunderbolt/tunnel.h
+@@ -137,5 +137,27 @@ static inline bool tb_tunnel_is_usb3(con
+       return tunnel->type == TB_TUNNEL_USB3;
+ }
+-#endif
++const char *tb_tunnel_type_name(const struct tb_tunnel *tunnel);
++
++#define __TB_TUNNEL_PRINT(level, tunnel, fmt, arg...)                   \
++      do {                                                            \
++              struct tb_tunnel *__tunnel = (tunnel);                  \
++              level(__tunnel->tb, "%llx:%u <-> %llx:%u (%s): " fmt,   \
++                    tb_route(__tunnel->src_port->sw),                 \
++                    __tunnel->src_port->port,                         \
++                    tb_route(__tunnel->dst_port->sw),                 \
++                    __tunnel->dst_port->port,                         \
++                    tb_tunnel_type_name(__tunnel),                    \
++                    ## arg);                                          \
++      } while (0)
++#define tb_tunnel_WARN(tunnel, fmt, arg...) \
++      __TB_TUNNEL_PRINT(tb_WARN, tunnel, fmt, ##arg)
++#define tb_tunnel_warn(tunnel, fmt, arg...) \
++      __TB_TUNNEL_PRINT(tb_warn, tunnel, fmt, ##arg)
++#define tb_tunnel_info(tunnel, fmt, arg...) \
++      __TB_TUNNEL_PRINT(tb_info, tunnel, fmt, ##arg)
++#define tb_tunnel_dbg(tunnel, fmt, arg...) \
++      __TB_TUNNEL_PRINT(tb_dbg, tunnel, fmt, ##arg)
++
++#endif
diff --git a/queue-6.6/thunderbolt-fix-debug-log-when-displayport-adapter-not-available-for-pairing.patch b/queue-6.6/thunderbolt-fix-debug-log-when-displayport-adapter-not-available-for-pairing.patch
new file mode 100644 (file)
index 0000000..c64fefc
--- /dev/null
@@ -0,0 +1,44 @@
+From linux-usb+bounces-15617-greg=kroah.com@vger.kernel.org Tue Oct  1 19:32:19 2024
+From: Alexandru Gagniuc <alexandru.gagniuc@hp.com>
+Date: Tue,  1 Oct 2024 17:30:56 +0000
+Subject: thunderbolt: Fix debug log when DisplayPort adapter not available for pairing
+To: gregkh@linuxfoundation.org, stable@vger.kernel.org
+Cc: qin.wan@hp.com, andreas.noever@gmail.com, michael.jamet@intel.com, mika.westerberg@linux.intel.com, YehezkelShB@gmail.com, linux-usb@vger.kernel.org, linux-kernel@vger.kernel.org, Gil Fine <gil.fine@linux.intel.com>, Alexandru Gagniuc <alexandru.gagniuc@hp.com>
+Message-ID: <20241001173109.1513-2-alexandru.gagniuc@hp.com>
+
+From: Gil Fine <gil.fine@linux.intel.com>
+
+[ Upstream commit 6b8ac54f31f985d3abb0b4212187838dd8ea4227 ]
+
+Fix debug log when looking for a DisplayPort adapter pair of DP IN and
+DP OUT. In case of no DP adapter available, log the type of the DP
+adapter that is not available.
+
+Signed-off-by: Gil Fine <gil.fine@linux.intel.com>
+Signed-off-by: Mika Westerberg <mika.westerberg@linux.intel.com>
+Signed-off-by: Qin Wan <qin.wan@hp.com>
+Signed-off-by: Alexandru Gagniuc <alexandru.gagniuc@hp.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/thunderbolt/tb.c |    7 +++----
+ 1 file changed, 3 insertions(+), 4 deletions(-)
+
+--- a/drivers/thunderbolt/tb.c
++++ b/drivers/thunderbolt/tb.c
+@@ -1311,13 +1311,12 @@ static void tb_tunnel_dp(struct tb *tb)
+                       continue;
+               }
+-              tb_port_dbg(port, "DP IN available\n");
++              in = port;
++              tb_port_dbg(in, "DP IN available\n");
+               out = tb_find_dp_out(tb, port);
+-              if (out) {
+-                      in = port;
++              if (out)
+                       break;
+-              }
+       }
+       if (!in) {
diff --git a/queue-6.6/thunderbolt-improve-displayport-tunnel-setup-process-to-be-more-robust.patch b/queue-6.6/thunderbolt-improve-displayport-tunnel-setup-process-to-be-more-robust.patch
new file mode 100644 (file)
index 0000000..e026cb1
--- /dev/null
@@ -0,0 +1,160 @@
+From stable+bounces-78578-greg=kroah.com@vger.kernel.org Tue Oct  1 19:35:51 2024
+From: Alexandru Gagniuc <alexandru.gagniuc@hp.com>
+Date: Tue,  1 Oct 2024 17:31:09 +0000
+Subject: thunderbolt: Improve DisplayPort tunnel setup process to be more robust
+To: gregkh@linuxfoundation.org, stable@vger.kernel.org
+Cc: qin.wan@hp.com, andreas.noever@gmail.com, michael.jamet@intel.com, mika.westerberg@linux.intel.com, YehezkelShB@gmail.com, linux-usb@vger.kernel.org, linux-kernel@vger.kernel.org, Gil Fine <gil.fine@linux.intel.com>, Alexandru Gagniuc <alexandru.gagniuc@hp.com>
+Message-ID: <20241001173109.1513-15-alexandru.gagniuc@hp.com>
+
+From: Gil Fine <gil.fine@linux.intel.com>
+
+[ Upstream commit b4734507ac55cc7ea1380e20e83f60fcd7031955 ]
+
+After DisplayPort tunnel setup, we add verification that the DPRX
+capabilities read process completed. Otherwise, we bail out, teardown
+the tunnel, and try setup another DisplayPort tunnel using next
+available DP IN adapter. We do so till all DP IN adapters tried. This
+way, we avoid allocating DP IN adapter and (bandwidth for it) for
+unusable tunnel.
+
+Signed-off-by: Gil Fine <gil.fine@linux.intel.com>
+Signed-off-by: Mika Westerberg <mika.westerberg@linux.intel.com>
+Signed-off-by: Qin Wan <qin.wan@hp.com>
+Signed-off-by: Alexandru Gagniuc <alexandru.gagniuc@hp.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/thunderbolt/tb.c |   84 ++++++++++++++++++++++++-----------------------
+ 1 file changed, 43 insertions(+), 41 deletions(-)
+
+--- a/drivers/thunderbolt/tb.c
++++ b/drivers/thunderbolt/tb.c
+@@ -1699,49 +1699,15 @@ static struct tb_port *tb_find_dp_out(st
+       return NULL;
+ }
+-static bool tb_tunnel_one_dp(struct tb *tb)
++static bool tb_tunnel_one_dp(struct tb *tb, struct tb_port *in,
++                           struct tb_port *out)
+ {
+       int available_up, available_down, ret, link_nr;
+       struct tb_cm *tcm = tb_priv(tb);
+-      struct tb_port *port, *in, *out;
+       int consumed_up, consumed_down;
+       struct tb_tunnel *tunnel;
+       /*
+-       * Find pair of inactive DP IN and DP OUT adapters and then
+-       * establish a DP tunnel between them.
+-       */
+-      tb_dbg(tb, "looking for DP IN <-> DP OUT pairs:\n");
+-
+-      in = NULL;
+-      out = NULL;
+-      list_for_each_entry(port, &tcm->dp_resources, list) {
+-              if (!tb_port_is_dpin(port))
+-                      continue;
+-
+-              if (tb_port_is_enabled(port)) {
+-                      tb_port_dbg(port, "DP IN in use\n");
+-                      continue;
+-              }
+-
+-              in = port;
+-              tb_port_dbg(in, "DP IN available\n");
+-
+-              out = tb_find_dp_out(tb, port);
+-              if (out)
+-                      break;
+-      }
+-
+-      if (!in) {
+-              tb_dbg(tb, "no suitable DP IN adapter available, not tunneling\n");
+-              return false;
+-      }
+-      if (!out) {
+-              tb_dbg(tb, "no suitable DP OUT adapter available, not tunneling\n");
+-              return false;
+-      }
+-
+-      /*
+        * This is only applicable to links that are not bonded (so
+        * when Thunderbolt 1 hardware is involved somewhere in the
+        * topology). For these try to share the DP bandwidth between
+@@ -1801,15 +1767,19 @@ static bool tb_tunnel_one_dp(struct tb *
+               goto err_free;
+       }
++      /* If fail reading tunnel's consumed bandwidth, tear it down */
++      ret = tb_tunnel_consumed_bandwidth(tunnel, &consumed_up, &consumed_down);
++      if (ret)
++              goto err_deactivate;
++
+       list_add_tail(&tunnel->list, &tcm->tunnel_list);
+-      tb_reclaim_usb3_bandwidth(tb, in, out);
++      tb_reclaim_usb3_bandwidth(tb, in, out);
+       /*
+        * Transition the links to asymmetric if the consumption exceeds
+        * the threshold.
+        */
+-      if (!tb_tunnel_consumed_bandwidth(tunnel, &consumed_up, &consumed_down))
+-              tb_configure_asym(tb, in, out, consumed_up, consumed_down);
++      tb_configure_asym(tb, in, out, consumed_up, consumed_down);
+       /* Update the domain with the new bandwidth estimation */
+       tb_recalc_estimated_bandwidth(tb);
+@@ -1821,6 +1791,8 @@ static bool tb_tunnel_one_dp(struct tb *
+       tb_increase_tmu_accuracy(tunnel);
+       return true;
++err_deactivate:
++      tb_tunnel_deactivate(tunnel);
+ err_free:
+       tb_tunnel_free(tunnel);
+ err_reclaim_usb:
+@@ -1840,13 +1812,43 @@ err_rpm_put:
+ static void tb_tunnel_dp(struct tb *tb)
+ {
++      struct tb_cm *tcm = tb_priv(tb);
++      struct tb_port *port, *in, *out;
++
+       if (!tb_acpi_may_tunnel_dp()) {
+               tb_dbg(tb, "DP tunneling disabled, not creating tunnel\n");
+               return;
+       }
+-      while (tb_tunnel_one_dp(tb))
+-              ;
++      /*
++       * Find pair of inactive DP IN and DP OUT adapters and then
++       * establish a DP tunnel between them.
++       */
++      tb_dbg(tb, "looking for DP IN <-> DP OUT pairs:\n");
++
++      in = NULL;
++      out = NULL;
++      list_for_each_entry(port, &tcm->dp_resources, list) {
++              if (!tb_port_is_dpin(port))
++                      continue;
++
++              if (tb_port_is_enabled(port)) {
++                      tb_port_dbg(port, "DP IN in use\n");
++                      continue;
++              }
++
++              in = port;
++              tb_port_dbg(in, "DP IN available\n");
++
++              out = tb_find_dp_out(tb, port);
++              if (out)
++                      tb_tunnel_one_dp(tb, in, out);
++              else
++                      tb_port_dbg(in, "no suitable DP OUT adapter available, not tunneling\n");
++      }
++
++      if (!in)
++              tb_dbg(tb, "no suitable DP IN adapter available, not tunneling\n");
+ }
+ static void tb_enter_redrive(struct tb_port *port)
diff --git a/queue-6.6/thunderbolt-introduce-tb_for_each_upstream_port_on_path.patch b/queue-6.6/thunderbolt-introduce-tb_for_each_upstream_port_on_path.patch
new file mode 100644 (file)
index 0000000..2960e5e
--- /dev/null
@@ -0,0 +1,46 @@
+From stable+bounces-78574-greg=kroah.com@vger.kernel.org Tue Oct  1 19:34:43 2024
+From: Alexandru Gagniuc <alexandru.gagniuc@hp.com>
+Date: Tue,  1 Oct 2024 17:31:05 +0000
+Subject: thunderbolt: Introduce tb_for_each_upstream_port_on_path()
+To: gregkh@linuxfoundation.org, stable@vger.kernel.org
+Cc: qin.wan@hp.com, andreas.noever@gmail.com, michael.jamet@intel.com, mika.westerberg@linux.intel.com, YehezkelShB@gmail.com, linux-usb@vger.kernel.org, linux-kernel@vger.kernel.org, Alexandru Gagniuc <alexandru.gagniuc@hp.com>
+Message-ID: <20241001173109.1513-11-alexandru.gagniuc@hp.com>
+
+From: Mika Westerberg <mika.westerberg@linux.intel.com>
+
+[ Upstream commit 956c3abe72fb6a651b8cf77c28462f7e5b6a48b1 ]
+
+This is useful when walking over upstream lane adapters over given path.
+
+Signed-off-by: Mika Westerberg <mika.westerberg@linux.intel.com>
+Signed-off-by: Qin Wan <qin.wan@hp.com>
+Signed-off-by: Alexandru Gagniuc <alexandru.gagniuc@hp.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/thunderbolt/tb.h |   15 +++++++++++++++
+ 1 file changed, 15 insertions(+)
+
+--- a/drivers/thunderbolt/tb.h
++++ b/drivers/thunderbolt/tb.h
+@@ -1076,6 +1076,21 @@ static inline bool tb_port_use_credit_al
+       for ((p) = tb_next_port_on_path((src), (dst), NULL); (p);       \
+            (p) = tb_next_port_on_path((src), (dst), (p)))
++/**
++ * tb_for_each_upstream_port_on_path() - Iterate over each upstreamm port on path
++ * @src: Source port
++ * @dst: Destination port
++ * @p: Port used as iterator
++ *
++ * Walks over each upstream lane adapter on path from @src to @dst.
++ */
++#define tb_for_each_upstream_port_on_path(src, dst, p)                        \
++      for ((p) = tb_next_port_on_path((src), (dst), NULL); (p);       \
++           (p) = tb_next_port_on_path((src), (dst), (p)))             \
++              if (!tb_port_is_null((p)) || !tb_is_upstream_port((p))) {\
++                      continue;                                       \
++              } else
++
+ int tb_port_get_link_speed(struct tb_port *port);
+ int tb_port_get_link_generation(struct tb_port *port);
+ int tb_port_get_link_width(struct tb_port *port);
diff --git a/queue-6.6/thunderbolt-introduce-tb_port_path_direction_downstream.patch b/queue-6.6/thunderbolt-introduce-tb_port_path_direction_downstream.patch
new file mode 100644 (file)
index 0000000..1bc51a4
--- /dev/null
@@ -0,0 +1,147 @@
+From linux-usb+bounces-15626-greg=kroah.com@vger.kernel.org Tue Oct  1 19:34:26 2024
+From: Alexandru Gagniuc <alexandru.gagniuc@hp.com>
+Date: Tue,  1 Oct 2024 17:31:04 +0000
+Subject: thunderbolt: Introduce tb_port_path_direction_downstream()
+To: gregkh@linuxfoundation.org, stable@vger.kernel.org
+Cc: qin.wan@hp.com, andreas.noever@gmail.com, michael.jamet@intel.com, mika.westerberg@linux.intel.com, YehezkelShB@gmail.com, linux-usb@vger.kernel.org, linux-kernel@vger.kernel.org, Gil Fine <gil.fine@linux.intel.com>, Alexandru Gagniuc <alexandru.gagniuc@hp.com>
+Message-ID: <20241001173109.1513-10-alexandru.gagniuc@hp.com>
+
+From: Gil Fine <gil.fine@linux.intel.com>
+
+[ Upstream commit 2bfeca73e94567c1a117ca45d2e8a25d63e5bd2c ]
+
+Introduce tb_port_path_direction_downstream() to check if path from
+source adapter to destination adapter is directed towards downstream.
+Convert existing users to call this helper instead of open-coding.
+
+No functional changes.
+
+Signed-off-by: Gil Fine <gil.fine@linux.intel.com>
+Signed-off-by: Mika Westerberg <mika.westerberg@linux.intel.com>
+Signed-off-by: Qin Wan <qin.wan@hp.com>
+Signed-off-by: Alexandru Gagniuc <alexandru.gagniuc@hp.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/thunderbolt/tb.c     |    6 +++---
+ drivers/thunderbolt/tb.h     |   15 +++++++++++++++
+ drivers/thunderbolt/tunnel.c |   14 +++++++-------
+ 3 files changed, 25 insertions(+), 10 deletions(-)
+
+--- a/drivers/thunderbolt/tb.c
++++ b/drivers/thunderbolt/tb.c
+@@ -553,7 +553,7 @@ static struct tb_tunnel *tb_find_first_u
+       struct tb_switch *sw;
+       /* Pick the router that is deepest in the topology */
+-      if (dst_port->sw->config.depth > src_port->sw->config.depth)
++      if (tb_port_path_direction_downstream(src_port, dst_port))
+               sw = dst_port->sw;
+       else
+               sw = src_port->sw;
+@@ -1223,7 +1223,7 @@ tb_recalc_estimated_bandwidth_for_group(
+               tb_port_dbg(in, "re-calculated estimated bandwidth %u/%u Mb/s\n",
+                           estimated_up, estimated_down);
+-              if (in->sw->config.depth < out->sw->config.depth)
++              if (tb_port_path_direction_downstream(in, out))
+                       estimated_bw = estimated_down;
+               else
+                       estimated_bw = estimated_up;
+@@ -2002,7 +2002,7 @@ static void tb_handle_dp_bandwidth_reque
+       out = tunnel->dst_port;
+-      if (in->sw->config.depth < out->sw->config.depth) {
++      if (tb_port_path_direction_downstream(in, out)) {
+               requested_up = -1;
+               requested_down = requested_bw;
+       } else {
+--- a/drivers/thunderbolt/tb.h
++++ b/drivers/thunderbolt/tb.h
+@@ -1044,6 +1044,21 @@ void tb_port_release_out_hopid(struct tb
+ struct tb_port *tb_next_port_on_path(struct tb_port *start, struct tb_port *end,
+                                    struct tb_port *prev);
++/**
++ * tb_port_path_direction_downstream() - Checks if path directed downstream
++ * @src: Source adapter
++ * @dst: Destination adapter
++ *
++ * Returns %true only if the specified path from source adapter (@src)
++ * to destination adapter (@dst) is directed downstream.
++ */
++static inline bool
++tb_port_path_direction_downstream(const struct tb_port *src,
++                                const struct tb_port *dst)
++{
++      return src->sw->config.depth < dst->sw->config.depth;
++}
++
+ static inline bool tb_port_use_credit_allocation(const struct tb_port *port)
+ {
+       return tb_port_is_null(port) && port->sw->credit_allocation;
+--- a/drivers/thunderbolt/tunnel.c
++++ b/drivers/thunderbolt/tunnel.c
+@@ -677,7 +677,7 @@ static int tb_dp_xchg_caps(struct tb_tun
+                     "DP OUT maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
+                     out_rate, out_lanes, bw);
+-      if (in->sw->config.depth < out->sw->config.depth)
++      if (tb_port_path_direction_downstream(in, out))
+               max_bw = tunnel->max_down;
+       else
+               max_bw = tunnel->max_up;
+@@ -802,7 +802,7 @@ static int tb_dp_bandwidth_alloc_mode_en
+        * max_up/down fields. For discovery we just read what the
+        * estimation was set to.
+        */
+-      if (in->sw->config.depth < out->sw->config.depth)
++      if (tb_port_path_direction_downstream(in, out))
+               estimated_bw = tunnel->max_down;
+       else
+               estimated_bw = tunnel->max_up;
+@@ -972,7 +972,7 @@ static int tb_dp_bandwidth_mode_consumed
+       if (allocated_bw == max_bw)
+               allocated_bw = ret;
+-      if (in->sw->config.depth < out->sw->config.depth) {
++      if (tb_port_path_direction_downstream(in, out)) {
+               *consumed_up = 0;
+               *consumed_down = allocated_bw;
+       } else {
+@@ -1007,7 +1007,7 @@ static int tb_dp_allocated_bandwidth(str
+               if (allocated_bw == max_bw)
+                       allocated_bw = ret;
+-              if (in->sw->config.depth < out->sw->config.depth) {
++              if (tb_port_path_direction_downstream(in, out)) {
+                       *allocated_up = 0;
+                       *allocated_down = allocated_bw;
+               } else {
+@@ -1035,7 +1035,7 @@ static int tb_dp_alloc_bandwidth(struct
+       if (ret < 0)
+               return ret;
+-      if (in->sw->config.depth < out->sw->config.depth) {
++      if (tb_port_path_direction_downstream(in, out)) {
+               tmp = min(*alloc_down, max_bw);
+               ret = usb4_dp_port_allocate_bandwidth(in, tmp);
+               if (ret)
+@@ -1133,7 +1133,7 @@ static int tb_dp_maximum_bandwidth(struc
+       if (ret < 0)
+               return ret;
+-      if (in->sw->config.depth < tunnel->dst_port->sw->config.depth) {
++      if (tb_port_path_direction_downstream(in, tunnel->dst_port)) {
+               *max_up = 0;
+               *max_down = ret;
+       } else {
+@@ -1191,7 +1191,7 @@ static int tb_dp_consumed_bandwidth(stru
+               return 0;
+       }
+-      if (in->sw->config.depth < tunnel->dst_port->sw->config.depth) {
++      if (tb_port_path_direction_downstream(in, tunnel->dst_port)) {
+               *consumed_up = 0;
+               *consumed_down = tb_dp_bandwidth(rate, lanes);
+       } else {
diff --git a/queue-6.6/thunderbolt-introduce-tb_switch_depth.patch b/queue-6.6/thunderbolt-introduce-tb_switch_depth.patch
new file mode 100644 (file)
index 0000000..06d567e
--- /dev/null
@@ -0,0 +1,62 @@
+From stable+bounces-78575-greg=kroah.com@vger.kernel.org Tue Oct  1 19:34:54 2024
+From: Alexandru Gagniuc <alexandru.gagniuc@hp.com>
+Date: Tue,  1 Oct 2024 17:31:06 +0000
+Subject: thunderbolt: Introduce tb_switch_depth()
+To: gregkh@linuxfoundation.org, stable@vger.kernel.org
+Cc: qin.wan@hp.com, andreas.noever@gmail.com, michael.jamet@intel.com, mika.westerberg@linux.intel.com, YehezkelShB@gmail.com, linux-usb@vger.kernel.org, linux-kernel@vger.kernel.org, Alexandru Gagniuc <alexandru.gagniuc@hp.com>
+Message-ID: <20241001173109.1513-12-alexandru.gagniuc@hp.com>
+
+From: Mika Westerberg <mika.westerberg@linux.intel.com>
+
+[ Upstream commit c4ff14436952c3d0dd05769d76cf48e73a253b48 ]
+
+This is useful helper to find out the depth of a connected router.
+Convert the existing users to call this helper instead of open-coding.
+
+No functional changes.
+
+Signed-off-by: Mika Westerberg <mika.westerberg@linux.intel.com>
+Signed-off-by: Qin Wan <qin.wan@hp.com>
+Signed-off-by: Alexandru Gagniuc <alexandru.gagniuc@hp.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/thunderbolt/tb.c |    4 ++--
+ drivers/thunderbolt/tb.h |    9 +++++++++
+ 2 files changed, 11 insertions(+), 2 deletions(-)
+
+--- a/drivers/thunderbolt/tb.c
++++ b/drivers/thunderbolt/tb.c
+@@ -255,13 +255,13 @@ static int tb_enable_clx(struct tb_switc
+        * this in the future to cover the whole topology if it turns
+        * out to be beneficial.
+        */
+-      while (sw && sw->config.depth > 1)
++      while (sw && tb_switch_depth(sw) > 1)
+               sw = tb_switch_parent(sw);
+       if (!sw)
+               return 0;
+-      if (sw->config.depth != 1)
++      if (tb_switch_depth(sw) != 1)
+               return 0;
+       /*
+--- a/drivers/thunderbolt/tb.h
++++ b/drivers/thunderbolt/tb.h
+@@ -868,6 +868,15 @@ static inline struct tb_port *tb_switch_
+       return tb_port_at(tb_route(sw), tb_switch_parent(sw));
+ }
++/**
++ * tb_switch_depth() - Returns depth of the connected router
++ * @sw: Router
++ */
++static inline int tb_switch_depth(const struct tb_switch *sw)
++{
++      return sw->config.depth;
++}
++
+ static inline bool tb_switch_is_light_ridge(const struct tb_switch *sw)
+ {
+       return sw->config.vendor_id == PCI_VENDOR_ID_INTEL &&
diff --git a/queue-6.6/thunderbolt-make-is_gen4_link-available-to-the-rest-of-the-driver.patch b/queue-6.6/thunderbolt-make-is_gen4_link-available-to-the-rest-of-the-driver.patch
new file mode 100644 (file)
index 0000000..7c6ae72
--- /dev/null
@@ -0,0 +1,102 @@
+From stable+bounces-78571-greg=kroah.com@vger.kernel.org Tue Oct  1 19:34:04 2024
+From: Alexandru Gagniuc <alexandru.gagniuc@hp.com>
+Date: Tue,  1 Oct 2024 17:31:02 +0000
+Subject: thunderbolt: Make is_gen4_link() available to the rest of the driver
+To: gregkh@linuxfoundation.org, stable@vger.kernel.org
+Cc: qin.wan@hp.com, andreas.noever@gmail.com, michael.jamet@intel.com, mika.westerberg@linux.intel.com, YehezkelShB@gmail.com, linux-usb@vger.kernel.org, linux-kernel@vger.kernel.org, Gil Fine <gil.fine@linux.intel.com>, Alexandru Gagniuc <alexandru.gagniuc@hp.com>
+Message-ID: <20241001173109.1513-8-alexandru.gagniuc@hp.com>
+
+From: Gil Fine <gil.fine@linux.intel.com>
+
+[ Upstream commit aa673d606078da36ebc379f041c794228ac08cb5 ]
+
+Rework the function to return the link generation, update the name to
+tb_port_get_link_generation(), and make available to the rest of the
+driver. This is needed in the subsequent patches.
+
+Signed-off-by: Gil Fine <gil.fine@linux.intel.com>
+Signed-off-by: Mika Westerberg <mika.westerberg@linux.intel.com>
+Signed-off-by: Qin Wan <qin.wan@hp.com>
+Signed-off-by: Alexandru Gagniuc <alexandru.gagniuc@hp.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/thunderbolt/switch.c |   36 +++++++++++++++++++++++++++++-------
+ drivers/thunderbolt/tb.h     |    1 +
+ 2 files changed, 30 insertions(+), 7 deletions(-)
+
+--- a/drivers/thunderbolt/switch.c
++++ b/drivers/thunderbolt/switch.c
+@@ -922,6 +922,32 @@ int tb_port_get_link_speed(struct tb_por
+ }
+ /**
++ * tb_port_get_link_generation() - Returns link generation
++ * @port: Lane adapter
++ *
++ * Returns link generation as number or negative errno in case of
++ * failure. Does not distinguish between Thunderbolt 1 and Thunderbolt 2
++ * links so for those always returns 2.
++ */
++int tb_port_get_link_generation(struct tb_port *port)
++{
++      int ret;
++
++      ret = tb_port_get_link_speed(port);
++      if (ret < 0)
++              return ret;
++
++      switch (ret) {
++      case 40:
++              return 4;
++      case 20:
++              return 3;
++      default:
++              return 2;
++      }
++}
++
++/**
+  * tb_port_get_link_width() - Get current link width
+  * @port: Port to check (USB4 or CIO)
+  *
+@@ -966,11 +992,6 @@ static bool tb_port_is_width_supported(s
+       return widths & width_mask;
+ }
+-static bool is_gen4_link(struct tb_port *port)
+-{
+-      return tb_port_get_link_speed(port) > 20;
+-}
+-
+ /**
+  * tb_port_set_link_width() - Set target link width of the lane adapter
+  * @port: Lane adapter
+@@ -998,7 +1019,7 @@ int tb_port_set_link_width(struct tb_por
+       switch (width) {
+       case TB_LINK_WIDTH_SINGLE:
+               /* Gen 4 link cannot be single */
+-              if (is_gen4_link(port))
++              if (tb_port_get_link_generation(port) >= 4)
+                       return -EOPNOTSUPP;
+               val |= LANE_ADP_CS_1_TARGET_WIDTH_SINGLE <<
+                       LANE_ADP_CS_1_TARGET_WIDTH_SHIFT;
+@@ -1147,7 +1168,8 @@ int tb_port_wait_for_link_width(struct t
+       int ret;
+       /* Gen 4 link does not support single lane */
+-      if ((width_mask & TB_LINK_WIDTH_SINGLE) && is_gen4_link(port))
++      if ((width_mask & TB_LINK_WIDTH_SINGLE) &&
++          tb_port_get_link_generation(port) >= 4)
+               return -EOPNOTSUPP;
+       do {
+--- a/drivers/thunderbolt/tb.h
++++ b/drivers/thunderbolt/tb.h
+@@ -1062,6 +1062,7 @@ static inline bool tb_port_use_credit_al
+            (p) = tb_next_port_on_path((src), (dst), (p)))
+ int tb_port_get_link_speed(struct tb_port *port);
++int tb_port_get_link_generation(struct tb_port *port);
+ int tb_port_get_link_width(struct tb_port *port);
+ int tb_port_set_link_width(struct tb_port *port, enum tb_link_width width);
+ int tb_port_lane_bonding_enable(struct tb_port *port);
diff --git a/queue-6.6/thunderbolt-use-constants-for-path-weight-and-priority.patch b/queue-6.6/thunderbolt-use-constants-for-path-weight-and-priority.patch
new file mode 100644 (file)
index 0000000..9042944
--- /dev/null
@@ -0,0 +1,132 @@
+From stable+bounces-78569-greg=kroah.com@vger.kernel.org Tue Oct  1 19:33:11 2024
+From: Alexandru Gagniuc <alexandru.gagniuc@hp.com>
+Date: Tue,  1 Oct 2024 17:31:00 +0000
+Subject: thunderbolt: Use constants for path weight and priority
+To: gregkh@linuxfoundation.org, stable@vger.kernel.org
+Cc: qin.wan@hp.com, andreas.noever@gmail.com, michael.jamet@intel.com, mika.westerberg@linux.intel.com, YehezkelShB@gmail.com, linux-usb@vger.kernel.org, linux-kernel@vger.kernel.org, Alexandru Gagniuc <alexandru.gagniuc@hp.com>
+Message-ID: <20241001173109.1513-6-alexandru.gagniuc@hp.com>
+
+From: Mika Westerberg <mika.westerberg@linux.intel.com>
+
+[ Upstream commit f73edddfa2a64a185c65a33f100778169c92fc25 ]
+
+Makes it easier to follow and update. No functional changes.
+
+Signed-off-by: Mika Westerberg <mika.westerberg@linux.intel.com>
+Signed-off-by: Qin Wan <qin.wan@hp.com>
+Signed-off-by: Alexandru Gagniuc <alexandru.gagniuc@hp.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/thunderbolt/tunnel.c |   39 +++++++++++++++++++++++++++------------
+ 1 file changed, 27 insertions(+), 12 deletions(-)
+
+--- a/drivers/thunderbolt/tunnel.c
++++ b/drivers/thunderbolt/tunnel.c
+@@ -21,12 +21,18 @@
+ #define TB_PCI_PATH_DOWN              0
+ #define TB_PCI_PATH_UP                        1
++#define TB_PCI_PRIORITY                       3
++#define TB_PCI_WEIGHT                 1
++
+ /* USB3 adapters use always HopID of 8 for both directions */
+ #define TB_USB3_HOPID                 8
+ #define TB_USB3_PATH_DOWN             0
+ #define TB_USB3_PATH_UP                       1
++#define TB_USB3_PRIORITY              3
++#define TB_USB3_WEIGHT                        3
++
+ /* DP adapters use HopID 8 for AUX and 9 for Video */
+ #define TB_DP_AUX_TX_HOPID            8
+ #define TB_DP_AUX_RX_HOPID            8
+@@ -36,6 +42,12 @@
+ #define TB_DP_AUX_PATH_OUT            1
+ #define TB_DP_AUX_PATH_IN             2
++#define TB_DP_VIDEO_PRIORITY          1
++#define TB_DP_VIDEO_WEIGHT            1
++
++#define TB_DP_AUX_PRIORITY            2
++#define TB_DP_AUX_WEIGHT              1
++
+ /* Minimum number of credits needed for PCIe path */
+ #define TB_MIN_PCIE_CREDITS           6U
+ /*
+@@ -46,6 +58,9 @@
+ /* Minimum number of credits for DMA path */
+ #define TB_MIN_DMA_CREDITS            1
++#define TB_DMA_PRIORITY                       5
++#define TB_DMA_WEIGHT                 1
++
+ static unsigned int dma_credits = TB_DMA_CREDITS;
+ module_param(dma_credits, uint, 0444);
+ MODULE_PARM_DESC(dma_credits, "specify custom credits for DMA tunnels (default: "
+@@ -213,8 +228,8 @@ static int tb_pci_init_path(struct tb_pa
+       path->egress_shared_buffer = TB_PATH_NONE;
+       path->ingress_fc_enable = TB_PATH_ALL;
+       path->ingress_shared_buffer = TB_PATH_NONE;
+-      path->priority = 3;
+-      path->weight = 1;
++      path->priority = TB_PCI_PRIORITY;
++      path->weight = TB_PCI_WEIGHT;
+       path->drop_packages = 0;
+       tb_path_for_each_hop(path, hop) {
+@@ -1152,8 +1167,8 @@ static void tb_dp_init_aux_path(struct t
+       path->egress_shared_buffer = TB_PATH_NONE;
+       path->ingress_fc_enable = TB_PATH_ALL;
+       path->ingress_shared_buffer = TB_PATH_NONE;
+-      path->priority = 2;
+-      path->weight = 1;
++      path->priority = TB_DP_AUX_PRIORITY;
++      path->weight = TB_DP_AUX_WEIGHT;
+       tb_path_for_each_hop(path, hop)
+               tb_dp_init_aux_credits(hop);
+@@ -1196,8 +1211,8 @@ static int tb_dp_init_video_path(struct
+       path->egress_shared_buffer = TB_PATH_NONE;
+       path->ingress_fc_enable = TB_PATH_NONE;
+       path->ingress_shared_buffer = TB_PATH_NONE;
+-      path->priority = 1;
+-      path->weight = 1;
++      path->priority = TB_DP_VIDEO_PRIORITY;
++      path->weight = TB_DP_VIDEO_WEIGHT;
+       tb_path_for_each_hop(path, hop) {
+               int ret;
+@@ -1471,8 +1486,8 @@ static int tb_dma_init_rx_path(struct tb
+       path->ingress_fc_enable = TB_PATH_ALL;
+       path->egress_shared_buffer = TB_PATH_NONE;
+       path->ingress_shared_buffer = TB_PATH_NONE;
+-      path->priority = 5;
+-      path->weight = 1;
++      path->priority = TB_DMA_PRIORITY;
++      path->weight = TB_DMA_WEIGHT;
+       path->clear_fc = true;
+       /*
+@@ -1505,8 +1520,8 @@ static int tb_dma_init_tx_path(struct tb
+       path->ingress_fc_enable = TB_PATH_ALL;
+       path->egress_shared_buffer = TB_PATH_NONE;
+       path->ingress_shared_buffer = TB_PATH_NONE;
+-      path->priority = 5;
+-      path->weight = 1;
++      path->priority = TB_DMA_PRIORITY;
++      path->weight = TB_DMA_WEIGHT;
+       path->clear_fc = true;
+       tb_path_for_each_hop(path, hop) {
+@@ -1845,8 +1860,8 @@ static void tb_usb3_init_path(struct tb_
+       path->egress_shared_buffer = TB_PATH_NONE;
+       path->ingress_fc_enable = TB_PATH_ALL;
+       path->ingress_shared_buffer = TB_PATH_NONE;
+-      path->priority = 3;
+-      path->weight = 3;
++      path->priority = TB_USB3_PRIORITY;
++      path->weight = TB_USB3_WEIGHT;
+       path->drop_packages = 0;
+       tb_path_for_each_hop(path, hop)
diff --git a/queue-6.6/thunderbolt-use-tb_tunnel_dbg-where-possible-to-make-logging-more-consistent.patch b/queue-6.6/thunderbolt-use-tb_tunnel_dbg-where-possible-to-make-logging-more-consistent.patch
new file mode 100644 (file)
index 0000000..d0808ae
--- /dev/null
@@ -0,0 +1,218 @@
+From stable+bounces-78566-greg=kroah.com@vger.kernel.org Tue Oct  1 19:32:37 2024
+From: Alexandru Gagniuc <alexandru.gagniuc@hp.com>
+Date: Tue,  1 Oct 2024 17:30:57 +0000
+Subject: thunderbolt: Use tb_tunnel_dbg() where possible to make logging more consistent
+To: gregkh@linuxfoundation.org, stable@vger.kernel.org
+Cc: qin.wan@hp.com, andreas.noever@gmail.com, michael.jamet@intel.com, mika.westerberg@linux.intel.com, YehezkelShB@gmail.com, linux-usb@vger.kernel.org, linux-kernel@vger.kernel.org, Alexandru Gagniuc <alexandru.gagniuc@hp.com>
+Message-ID: <20241001173109.1513-3-alexandru.gagniuc@hp.com>
+
+From: Mika Westerberg <mika.westerberg@linux.intel.com>
+
+[ Upstream commit fe8a0293c922ee8bc1ff0cf9048075afb264004a ]
+
+This makes it easier to find out the tunnel in question. Also drop a
+couple of lines that generate duplicate information.
+
+Signed-off-by: Mika Westerberg <mika.westerberg@linux.intel.com>
+Signed-off-by: Qin Wan <qin.wan@hp.com>
+Signed-off-by: Alexandru Gagniuc <alexandru.gagniuc@hp.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/thunderbolt/tunnel.c |   65 +++++++++++++++++++------------------------
+ 1 file changed, 30 insertions(+), 35 deletions(-)
+
+--- a/drivers/thunderbolt/tunnel.c
++++ b/drivers/thunderbolt/tunnel.c
+@@ -614,8 +614,9 @@ static int tb_dp_xchg_caps(struct tb_tun
+       in_rate = tb_dp_cap_get_rate(in_dp_cap);
+       in_lanes = tb_dp_cap_get_lanes(in_dp_cap);
+-      tb_port_dbg(in, "maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
+-                  in_rate, in_lanes, tb_dp_bandwidth(in_rate, in_lanes));
++      tb_tunnel_dbg(tunnel,
++                    "DP IN maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
++                    in_rate, in_lanes, tb_dp_bandwidth(in_rate, in_lanes));
+       /*
+        * If the tunnel bandwidth is limited (max_bw is set) then see
+@@ -624,8 +625,9 @@ static int tb_dp_xchg_caps(struct tb_tun
+       out_rate = tb_dp_cap_get_rate(out_dp_cap);
+       out_lanes = tb_dp_cap_get_lanes(out_dp_cap);
+       bw = tb_dp_bandwidth(out_rate, out_lanes);
+-      tb_port_dbg(out, "maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
+-                  out_rate, out_lanes, bw);
++      tb_tunnel_dbg(tunnel,
++                    "DP OUT maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
++                    out_rate, out_lanes, bw);
+       if (in->sw->config.depth < out->sw->config.depth)
+               max_bw = tunnel->max_down;
+@@ -639,13 +641,14 @@ static int tb_dp_xchg_caps(struct tb_tun
+                                            out_rate, out_lanes, &new_rate,
+                                            &new_lanes);
+               if (ret) {
+-                      tb_port_info(out, "not enough bandwidth for DP tunnel\n");
++                      tb_tunnel_info(tunnel, "not enough bandwidth\n");
+                       return ret;
+               }
+               new_bw = tb_dp_bandwidth(new_rate, new_lanes);
+-              tb_port_dbg(out, "bandwidth reduced to %u Mb/s x%u = %u Mb/s\n",
+-                          new_rate, new_lanes, new_bw);
++              tb_tunnel_dbg(tunnel,
++                            "bandwidth reduced to %u Mb/s x%u = %u Mb/s\n",
++                            new_rate, new_lanes, new_bw);
+               /*
+                * Set new rate and number of lanes before writing it to
+@@ -662,7 +665,7 @@ static int tb_dp_xchg_caps(struct tb_tun
+        */
+       if (tb_route(out->sw) && tb_switch_is_titan_ridge(out->sw)) {
+               out_dp_cap |= DP_COMMON_CAP_LTTPR_NS;
+-              tb_port_dbg(out, "disabling LTTPR\n");
++              tb_tunnel_dbg(tunnel, "disabling LTTPR\n");
+       }
+       return tb_port_write(in, &out_dp_cap, TB_CFG_PORT,
+@@ -712,8 +715,8 @@ static int tb_dp_bandwidth_alloc_mode_en
+       lanes = min(in_lanes, out_lanes);
+       tmp = tb_dp_bandwidth(rate, lanes);
+-      tb_port_dbg(in, "non-reduced bandwidth %u Mb/s x%u = %u Mb/s\n", rate,
+-                  lanes, tmp);
++      tb_tunnel_dbg(tunnel, "non-reduced bandwidth %u Mb/s x%u = %u Mb/s\n",
++                    rate, lanes, tmp);
+       ret = usb4_dp_port_set_nrd(in, rate, lanes);
+       if (ret)
+@@ -728,15 +731,15 @@ static int tb_dp_bandwidth_alloc_mode_en
+       rate = min(in_rate, out_rate);
+       tmp = tb_dp_bandwidth(rate, lanes);
+-      tb_port_dbg(in,
+-                  "maximum bandwidth through allocation mode %u Mb/s x%u = %u Mb/s\n",
+-                  rate, lanes, tmp);
++      tb_tunnel_dbg(tunnel,
++                    "maximum bandwidth through allocation mode %u Mb/s x%u = %u Mb/s\n",
++                    rate, lanes, tmp);
+       for (granularity = 250; tmp / granularity > 255 && granularity <= 1000;
+            granularity *= 2)
+               ;
+-      tb_port_dbg(in, "granularity %d Mb/s\n", granularity);
++      tb_tunnel_dbg(tunnel, "granularity %d Mb/s\n", granularity);
+       /*
+        * Returns -EINVAL if granularity above is outside of the
+@@ -756,7 +759,7 @@ static int tb_dp_bandwidth_alloc_mode_en
+       else
+               estimated_bw = tunnel->max_up;
+-      tb_port_dbg(in, "estimated bandwidth %d Mb/s\n", estimated_bw);
++      tb_tunnel_dbg(tunnel, "estimated bandwidth %d Mb/s\n", estimated_bw);
+       ret = usb4_dp_port_set_estimated_bandwidth(in, estimated_bw);
+       if (ret)
+@@ -767,7 +770,7 @@ static int tb_dp_bandwidth_alloc_mode_en
+       if (ret)
+               return ret;
+-      tb_port_dbg(in, "bandwidth allocation mode enabled\n");
++      tb_tunnel_dbg(tunnel, "bandwidth allocation mode enabled\n");
+       return 0;
+ }
+@@ -788,7 +791,7 @@ static int tb_dp_init(struct tb_tunnel *
+       if (!usb4_dp_port_bandwidth_mode_supported(in))
+               return 0;
+-      tb_port_dbg(in, "bandwidth allocation mode supported\n");
++      tb_tunnel_dbg(tunnel, "bandwidth allocation mode supported\n");
+       ret = usb4_dp_port_set_cm_id(in, tb->index);
+       if (ret)
+@@ -805,7 +808,7 @@ static void tb_dp_deinit(struct tb_tunne
+               return;
+       if (usb4_dp_port_bandwidth_mode_enabled(in)) {
+               usb4_dp_port_set_cm_bandwidth_mode_supported(in, false);
+-              tb_port_dbg(in, "bandwidth allocation mode disabled\n");
++              tb_tunnel_dbg(tunnel, "bandwidth allocation mode disabled\n");
+       }
+ }
+@@ -921,9 +924,6 @@ static int tb_dp_bandwidth_mode_consumed
+       if (allocated_bw == max_bw)
+               allocated_bw = ret;
+-      tb_port_dbg(in, "consumed bandwidth through allocation mode %d Mb/s\n",
+-                  allocated_bw);
+-
+       if (in->sw->config.depth < out->sw->config.depth) {
+               *consumed_up = 0;
+               *consumed_down = allocated_bw;
+@@ -1006,9 +1006,6 @@ static int tb_dp_alloc_bandwidth(struct
+       /* Now we can use BW mode registers to figure out the bandwidth */
+       /* TODO: need to handle discovery too */
+       tunnel->bw_mode = true;
+-
+-      tb_port_dbg(in, "allocated bandwidth through allocation mode %d Mb/s\n",
+-                  tmp);
+       return 0;
+ }
+@@ -1035,8 +1032,7 @@ static int tb_dp_read_dprx(struct tb_tun
+                       *rate = tb_dp_cap_get_rate(val);
+                       *lanes = tb_dp_cap_get_lanes(val);
+-                      tb_port_dbg(in, "consumed bandwidth through DPRX %d Mb/s\n",
+-                                  tb_dp_bandwidth(*rate, *lanes));
++                      tb_tunnel_dbg(tunnel, "DPRX read done\n");
+                       return 0;
+               }
+               usleep_range(100, 150);
+@@ -1073,9 +1069,6 @@ static int tb_dp_read_cap(struct tb_tunn
+       *rate = tb_dp_cap_get_rate(val);
+       *lanes = tb_dp_cap_get_lanes(val);
+-
+-      tb_port_dbg(in, "bandwidth from %#x capability %d Mb/s\n", cap,
+-                  tb_dp_bandwidth(*rate, *lanes));
+       return 0;
+ }
+@@ -1253,8 +1246,9 @@ static void tb_dp_dump(struct tb_tunnel
+       rate = tb_dp_cap_get_rate(dp_cap);
+       lanes = tb_dp_cap_get_lanes(dp_cap);
+-      tb_port_dbg(in, "maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
+-                  rate, lanes, tb_dp_bandwidth(rate, lanes));
++      tb_tunnel_dbg(tunnel,
++                    "DP IN maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
++                    rate, lanes, tb_dp_bandwidth(rate, lanes));
+       out = tunnel->dst_port;
+@@ -1265,8 +1259,9 @@ static void tb_dp_dump(struct tb_tunnel
+       rate = tb_dp_cap_get_rate(dp_cap);
+       lanes = tb_dp_cap_get_lanes(dp_cap);
+-      tb_port_dbg(out, "maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
+-                  rate, lanes, tb_dp_bandwidth(rate, lanes));
++      tb_tunnel_dbg(tunnel,
++                    "DP OUT maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
++                    rate, lanes, tb_dp_bandwidth(rate, lanes));
+       if (tb_port_read(in, &dp_cap, TB_CFG_PORT,
+                        in->cap_adap + DP_REMOTE_CAP, 1))
+@@ -1275,8 +1270,8 @@ static void tb_dp_dump(struct tb_tunnel
+       rate = tb_dp_cap_get_rate(dp_cap);
+       lanes = tb_dp_cap_get_lanes(dp_cap);
+-      tb_port_dbg(in, "reduced bandwidth %u Mb/s x%u = %u Mb/s\n",
+-                  rate, lanes, tb_dp_bandwidth(rate, lanes));
++      tb_tunnel_dbg(tunnel, "reduced bandwidth %u Mb/s x%u = %u Mb/s\n",
++                    rate, lanes, tb_dp_bandwidth(rate, lanes));
+ }
+ /**
diff --git a/queue-6.6/thunderbolt-use-weight-constants-in-tb_usb3_consumed_bandwidth.patch b/queue-6.6/thunderbolt-use-weight-constants-in-tb_usb3_consumed_bandwidth.patch
new file mode 100644 (file)
index 0000000..49e4032
--- /dev/null
@@ -0,0 +1,46 @@
+From stable+bounces-78570-greg=kroah.com@vger.kernel.org Tue Oct  1 19:33:31 2024
+From: Alexandru Gagniuc <alexandru.gagniuc@hp.com>
+Date: Tue,  1 Oct 2024 17:31:01 +0000
+Subject: thunderbolt: Use weight constants in tb_usb3_consumed_bandwidth()
+To: gregkh@linuxfoundation.org, stable@vger.kernel.org
+Cc: qin.wan@hp.com, andreas.noever@gmail.com, michael.jamet@intel.com, mika.westerberg@linux.intel.com, YehezkelShB@gmail.com, linux-usb@vger.kernel.org, linux-kernel@vger.kernel.org, Alexandru Gagniuc <alexandru.gagniuc@hp.com>
+Message-ID: <20241001173109.1513-7-alexandru.gagniuc@hp.com>
+
+From: Mika Westerberg <mika.westerberg@linux.intel.com>
+
+[ Upstream commit 4d24db0c801461adeefd7e0bdc98c79c60ccefb0 ]
+
+Instead of magic numbers use the constants we introduced in the previous
+commit to make the code more readable. No functional changes.
+
+Signed-off-by: Mika Westerberg <mika.westerberg@linux.intel.com>
+Signed-off-by: Qin Wan <qin.wan@hp.com>
+Signed-off-by: Alexandru Gagniuc <alexandru.gagniuc@hp.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/thunderbolt/tunnel.c |    9 ++++++---
+ 1 file changed, 6 insertions(+), 3 deletions(-)
+
+--- a/drivers/thunderbolt/tunnel.c
++++ b/drivers/thunderbolt/tunnel.c
+@@ -1747,14 +1747,17 @@ static int tb_usb3_activate(struct tb_tu
+ static int tb_usb3_consumed_bandwidth(struct tb_tunnel *tunnel,
+               int *consumed_up, int *consumed_down)
+ {
+-      int pcie_enabled = tb_acpi_may_tunnel_pcie();
++      int pcie_weight = tb_acpi_may_tunnel_pcie() ? TB_PCI_WEIGHT : 0;
+       /*
+        * PCIe tunneling, if enabled, affects the USB3 bandwidth so
+        * take that it into account here.
+        */
+-      *consumed_up = tunnel->allocated_up * (3 + pcie_enabled) / 3;
+-      *consumed_down = tunnel->allocated_down * (3 + pcie_enabled) / 3;
++      *consumed_up = tunnel->allocated_up *
++              (TB_USB3_WEIGHT + pcie_weight) / TB_USB3_WEIGHT;
++      *consumed_down = tunnel->allocated_down *
++              (TB_USB3_WEIGHT + pcie_weight) / TB_USB3_WEIGHT;
++
+       return 0;
+ }