1 // SPDX-License-Identifier: GPL-2.0
3 * Thunderbolt driver - switch/port utility functions
5 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
6 * Copyright (C) 2018, Intel Corporation
9 #include <linux/delay.h>
10 #include <linux/idr.h>
11 #include <linux/module.h>
12 #include <linux/nvmem-provider.h>
13 #include <linux/pm_runtime.h>
14 #include <linux/sched/signal.h>
15 #include <linux/sizes.h>
16 #include <linux/slab.h>
17 #include <linux/string_helpers.h>
21 /* Switch NVM support */
23 struct nvm_auth_status
{
24 struct list_head list
;
30 * Hold NVM authentication failure status per switch This information
31 * needs to stay around even when the switch gets power cycled so we
34 static LIST_HEAD(nvm_auth_status_cache
);
35 static DEFINE_MUTEX(nvm_auth_status_lock
);
37 static struct nvm_auth_status
*__nvm_get_auth_status(const struct tb_switch
*sw
)
39 struct nvm_auth_status
*st
;
41 list_for_each_entry(st
, &nvm_auth_status_cache
, list
) {
42 if (uuid_equal(&st
->uuid
, sw
->uuid
))
49 static void nvm_get_auth_status(const struct tb_switch
*sw
, u32
*status
)
51 struct nvm_auth_status
*st
;
53 mutex_lock(&nvm_auth_status_lock
);
54 st
= __nvm_get_auth_status(sw
);
55 mutex_unlock(&nvm_auth_status_lock
);
57 *status
= st
? st
->status
: 0;
60 static void nvm_set_auth_status(const struct tb_switch
*sw
, u32 status
)
62 struct nvm_auth_status
*st
;
64 if (WARN_ON(!sw
->uuid
))
67 mutex_lock(&nvm_auth_status_lock
);
68 st
= __nvm_get_auth_status(sw
);
71 st
= kzalloc(sizeof(*st
), GFP_KERNEL
);
75 memcpy(&st
->uuid
, sw
->uuid
, sizeof(st
->uuid
));
76 INIT_LIST_HEAD(&st
->list
);
77 list_add_tail(&st
->list
, &nvm_auth_status_cache
);
82 mutex_unlock(&nvm_auth_status_lock
);
85 static void nvm_clear_auth_status(const struct tb_switch
*sw
)
87 struct nvm_auth_status
*st
;
89 mutex_lock(&nvm_auth_status_lock
);
90 st
= __nvm_get_auth_status(sw
);
95 mutex_unlock(&nvm_auth_status_lock
);
98 static int nvm_validate_and_write(struct tb_switch
*sw
)
100 unsigned int image_size
;
104 ret
= tb_nvm_validate(sw
->nvm
);
108 ret
= tb_nvm_write_headers(sw
->nvm
);
112 buf
= sw
->nvm
->buf_data_start
;
113 image_size
= sw
->nvm
->buf_data_size
;
115 if (tb_switch_is_usb4(sw
))
116 ret
= usb4_switch_nvm_write(sw
, 0, buf
, image_size
);
118 ret
= dma_port_flash_write(sw
->dma_port
, 0, buf
, image_size
);
122 sw
->nvm
->flushed
= true;
126 static int nvm_authenticate_host_dma_port(struct tb_switch
*sw
)
131 * Root switch NVM upgrade requires that we disconnect the
132 * existing paths first (in case it is not in safe mode
135 if (!sw
->safe_mode
) {
138 ret
= tb_domain_disconnect_all_paths(sw
->tb
);
142 * The host controller goes away pretty soon after this if
143 * everything goes well so getting timeout is expected.
145 ret
= dma_port_flash_update_auth(sw
->dma_port
);
146 if (!ret
|| ret
== -ETIMEDOUT
)
150 * Any error from update auth operation requires power
151 * cycling of the host router.
153 tb_sw_warn(sw
, "failed to authenticate NVM, power cycling\n");
154 if (dma_port_flash_update_auth_status(sw
->dma_port
, &status
) > 0)
155 nvm_set_auth_status(sw
, status
);
159 * From safe mode we can get out by just power cycling the
162 dma_port_power_cycle(sw
->dma_port
);
166 static int nvm_authenticate_device_dma_port(struct tb_switch
*sw
)
168 int ret
, retries
= 10;
170 ret
= dma_port_flash_update_auth(sw
->dma_port
);
176 /* Power cycle is required */
183 * Poll here for the authentication status. It takes some time
184 * for the device to respond (we get timeout for a while). Once
185 * we get response the device needs to be power cycled in order
186 * to the new NVM to be taken into use.
191 ret
= dma_port_flash_update_auth_status(sw
->dma_port
, &status
);
192 if (ret
< 0 && ret
!= -ETIMEDOUT
)
196 tb_sw_warn(sw
, "failed to authenticate NVM\n");
197 nvm_set_auth_status(sw
, status
);
200 tb_sw_info(sw
, "power cycling the switch now\n");
201 dma_port_power_cycle(sw
->dma_port
);
211 static void nvm_authenticate_start_dma_port(struct tb_switch
*sw
)
213 struct pci_dev
*root_port
;
216 * During host router NVM upgrade we should not allow root port to
217 * go into D3cold because some root ports cannot trigger PME
218 * itself. To be on the safe side keep the root port in D0 during
219 * the whole upgrade process.
221 root_port
= pcie_find_root_port(sw
->tb
->nhi
->pdev
);
223 pm_runtime_get_noresume(&root_port
->dev
);
226 static void nvm_authenticate_complete_dma_port(struct tb_switch
*sw
)
228 struct pci_dev
*root_port
;
230 root_port
= pcie_find_root_port(sw
->tb
->nhi
->pdev
);
232 pm_runtime_put(&root_port
->dev
);
235 static inline bool nvm_readable(struct tb_switch
*sw
)
237 if (tb_switch_is_usb4(sw
)) {
239 * USB4 devices must support NVM operations but it is
240 * optional for hosts. Therefore we query the NVM sector
241 * size here and if it is supported assume NVM
242 * operations are implemented.
244 return usb4_switch_nvm_sector_size(sw
) > 0;
247 /* Thunderbolt 2 and 3 devices support NVM through DMA port */
248 return !!sw
->dma_port
;
251 static inline bool nvm_upgradeable(struct tb_switch
*sw
)
253 if (sw
->no_nvm_upgrade
)
255 return nvm_readable(sw
);
258 static int nvm_authenticate(struct tb_switch
*sw
, bool auth_only
)
262 if (tb_switch_is_usb4(sw
)) {
264 ret
= usb4_switch_nvm_set_offset(sw
, 0);
268 sw
->nvm
->authenticating
= true;
269 return usb4_switch_nvm_authenticate(sw
);
274 sw
->nvm
->authenticating
= true;
276 nvm_authenticate_start_dma_port(sw
);
277 ret
= nvm_authenticate_host_dma_port(sw
);
279 ret
= nvm_authenticate_device_dma_port(sw
);
286 * tb_switch_nvm_read() - Read router NVM
287 * @sw: Router whose NVM to read
288 * @address: Start address on the NVM
289 * @buf: Buffer where the read data is copied
290 * @size: Size of the buffer in bytes
292 * Reads from router NVM and returns the requested data in @buf. Locking
293 * is up to the caller. Returns %0 in success and negative errno in case
296 int tb_switch_nvm_read(struct tb_switch
*sw
, unsigned int address
, void *buf
,
299 if (tb_switch_is_usb4(sw
))
300 return usb4_switch_nvm_read(sw
, address
, buf
, size
);
301 return dma_port_flash_read(sw
->dma_port
, address
, buf
, size
);
304 static int nvm_read(void *priv
, unsigned int offset
, void *val
, size_t bytes
)
306 struct tb_nvm
*nvm
= priv
;
307 struct tb_switch
*sw
= tb_to_switch(nvm
->dev
);
310 pm_runtime_get_sync(&sw
->dev
);
312 if (!mutex_trylock(&sw
->tb
->lock
)) {
313 ret
= restart_syscall();
317 ret
= tb_switch_nvm_read(sw
, offset
, val
, bytes
);
318 mutex_unlock(&sw
->tb
->lock
);
321 pm_runtime_mark_last_busy(&sw
->dev
);
322 pm_runtime_put_autosuspend(&sw
->dev
);
327 static int nvm_write(void *priv
, unsigned int offset
, void *val
, size_t bytes
)
329 struct tb_nvm
*nvm
= priv
;
330 struct tb_switch
*sw
= tb_to_switch(nvm
->dev
);
333 if (!mutex_trylock(&sw
->tb
->lock
))
334 return restart_syscall();
337 * Since writing the NVM image might require some special steps,
338 * for example when CSS headers are written, we cache the image
339 * locally here and handle the special cases when the user asks
340 * us to authenticate the image.
342 ret
= tb_nvm_write_buf(nvm
, offset
, val
, bytes
);
343 mutex_unlock(&sw
->tb
->lock
);
348 static int tb_switch_nvm_add(struct tb_switch
*sw
)
353 if (!nvm_readable(sw
))
356 nvm
= tb_nvm_alloc(&sw
->dev
);
358 ret
= PTR_ERR(nvm
) == -EOPNOTSUPP
? 0 : PTR_ERR(nvm
);
362 ret
= tb_nvm_read_version(nvm
);
367 * If the switch is in safe-mode the only accessible portion of
368 * the NVM is the non-active one where userspace is expected to
369 * write new functional NVM.
371 if (!sw
->safe_mode
) {
372 ret
= tb_nvm_add_active(nvm
, nvm_read
);
375 tb_sw_dbg(sw
, "NVM version %x.%x\n", nvm
->major
, nvm
->minor
);
378 if (!sw
->no_nvm_upgrade
) {
379 ret
= tb_nvm_add_non_active(nvm
, nvm_write
);
388 tb_sw_dbg(sw
, "NVM upgrade disabled\n");
389 sw
->no_nvm_upgrade
= true;
396 static void tb_switch_nvm_remove(struct tb_switch
*sw
)
406 /* Remove authentication status in case the switch is unplugged */
407 if (!nvm
->authenticating
)
408 nvm_clear_auth_status(sw
);
413 /* port utility functions */
415 static const char *tb_port_type(const struct tb_regs_port_header
*port
)
417 switch (port
->type
>> 16) {
419 switch ((u8
) port
->type
) {
444 static void tb_dump_port(struct tb
*tb
, const struct tb_port
*port
)
446 const struct tb_regs_port_header
*regs
= &port
->config
;
449 " Port %d: %x:%x (Revision: %d, TB Version: %d, Type: %s (%#x))\n",
450 regs
->port_number
, regs
->vendor_id
, regs
->device_id
,
451 regs
->revision
, regs
->thunderbolt_version
, tb_port_type(regs
),
453 tb_dbg(tb
, " Max hop id (in/out): %d/%d\n",
454 regs
->max_in_hop_id
, regs
->max_out_hop_id
);
455 tb_dbg(tb
, " Max counters: %d\n", regs
->max_counters
);
456 tb_dbg(tb
, " NFC Credits: %#x\n", regs
->nfc_credits
);
457 tb_dbg(tb
, " Credits (total/control): %u/%u\n", port
->total_credits
,
462 * tb_port_state() - get connectedness state of a port
463 * @port: the port to check
465 * The port must have a TB_CAP_PHY (i.e. it should be a real port).
467 * Return: Returns an enum tb_port_state on success or an error code on failure.
469 int tb_port_state(struct tb_port
*port
)
471 struct tb_cap_phy phy
;
473 if (port
->cap_phy
== 0) {
474 tb_port_WARN(port
, "does not have a PHY\n");
477 res
= tb_port_read(port
, &phy
, TB_CFG_PORT
, port
->cap_phy
, 2);
484 * tb_wait_for_port() - wait for a port to become ready
485 * @port: Port to wait
486 * @wait_if_unplugged: Wait also when port is unplugged
488 * Wait up to 1 second for a port to reach state TB_PORT_UP. If
489 * wait_if_unplugged is set then we also wait if the port is in state
490 * TB_PORT_UNPLUGGED (it takes a while for the device to be registered after
491 * switch resume). Otherwise we only wait if a device is registered but the link
492 * has not yet been established.
494 * Return: Returns an error code on failure. Returns 0 if the port is not
495 * connected or failed to reach state TB_PORT_UP within one second. Returns 1
496 * if the port is connected and in state TB_PORT_UP.
498 int tb_wait_for_port(struct tb_port
*port
, bool wait_if_unplugged
)
502 if (!port
->cap_phy
) {
503 tb_port_WARN(port
, "does not have PHY\n");
506 if (tb_is_upstream_port(port
)) {
507 tb_port_WARN(port
, "is the upstream port\n");
512 state
= tb_port_state(port
);
514 case TB_PORT_DISABLED
:
515 tb_port_dbg(port
, "is disabled (state: 0)\n");
518 case TB_PORT_UNPLUGGED
:
519 if (wait_if_unplugged
) {
520 /* used during resume */
522 "is unplugged (state: 7), retrying...\n");
526 tb_port_dbg(port
, "is unplugged (state: 7)\n");
530 case TB_PORT_TX_CL0S
:
531 case TB_PORT_RX_CL0S
:
534 tb_port_dbg(port
, "is connected, link is up (state: %d)\n", state
);
542 * After plug-in the state is TB_PORT_CONNECTING. Give it some
546 "is connected, link is not up (state: %d), retrying...\n",
553 "failed to reach state TB_PORT_UP. Ignoring port...\n");
558 * tb_port_add_nfc_credits() - add/remove non flow controlled credits to port
559 * @port: Port to add/remove NFC credits
560 * @credits: Credits to add/remove
562 * Change the number of NFC credits allocated to @port by @credits. To remove
563 * NFC credits pass a negative amount of credits.
565 * Return: Returns 0 on success or an error code on failure.
567 int tb_port_add_nfc_credits(struct tb_port
*port
, int credits
)
571 if (credits
== 0 || port
->sw
->is_unplugged
)
575 * USB4 restricts programming NFC buffers to lane adapters only
576 * so skip other ports.
578 if (tb_switch_is_usb4(port
->sw
) && !tb_port_is_null(port
))
581 nfc_credits
= port
->config
.nfc_credits
& ADP_CS_4_NFC_BUFFERS_MASK
;
583 credits
= max_t(int, -nfc_credits
, credits
);
585 nfc_credits
+= credits
;
587 tb_port_dbg(port
, "adding %d NFC credits to %lu", credits
,
588 port
->config
.nfc_credits
& ADP_CS_4_NFC_BUFFERS_MASK
);
590 port
->config
.nfc_credits
&= ~ADP_CS_4_NFC_BUFFERS_MASK
;
591 port
->config
.nfc_credits
|= nfc_credits
;
593 return tb_port_write(port
, &port
->config
.nfc_credits
,
594 TB_CFG_PORT
, ADP_CS_4
, 1);
598 * tb_port_clear_counter() - clear a counter in TB_CFG_COUNTER
599 * @port: Port whose counters to clear
600 * @counter: Counter index to clear
602 * Return: Returns 0 on success or an error code on failure.
604 int tb_port_clear_counter(struct tb_port
*port
, int counter
)
606 u32 zero
[3] = { 0, 0, 0 };
607 tb_port_dbg(port
, "clearing counter %d\n", counter
);
608 return tb_port_write(port
, zero
, TB_CFG_COUNTERS
, 3 * counter
, 3);
612 * tb_port_unlock() - Unlock downstream port
613 * @port: Port to unlock
615 * Needed for USB4 but can be called for any CIO/USB4 ports. Makes the
616 * downstream router accessible for CM.
618 int tb_port_unlock(struct tb_port
*port
)
620 if (tb_switch_is_icm(port
->sw
))
622 if (!tb_port_is_null(port
))
624 if (tb_switch_is_usb4(port
->sw
))
625 return usb4_port_unlock(port
);
629 static int __tb_port_enable(struct tb_port
*port
, bool enable
)
634 if (!tb_port_is_null(port
))
637 ret
= tb_port_read(port
, &phy
, TB_CFG_PORT
,
638 port
->cap_phy
+ LANE_ADP_CS_1
, 1);
643 phy
&= ~LANE_ADP_CS_1_LD
;
645 phy
|= LANE_ADP_CS_1_LD
;
648 ret
= tb_port_write(port
, &phy
, TB_CFG_PORT
,
649 port
->cap_phy
+ LANE_ADP_CS_1
, 1);
653 tb_port_dbg(port
, "lane %s\n", str_enabled_disabled(enable
));
658 * tb_port_enable() - Enable lane adapter
659 * @port: Port to enable (can be %NULL)
661 * This is used for lane 0 and 1 adapters to enable it.
663 int tb_port_enable(struct tb_port
*port
)
665 return __tb_port_enable(port
, true);
669 * tb_port_disable() - Disable lane adapter
670 * @port: Port to disable (can be %NULL)
672 * This is used for lane 0 and 1 adapters to disable it.
674 int tb_port_disable(struct tb_port
*port
)
676 return __tb_port_enable(port
, false);
680 * tb_init_port() - initialize a port
682 * This is a helper method for tb_switch_alloc. Does not check or initialize
683 * any downstream switches.
685 * Return: Returns 0 on success or an error code on failure.
687 static int tb_init_port(struct tb_port
*port
)
692 INIT_LIST_HEAD(&port
->list
);
694 /* Control adapter does not have configuration space */
698 res
= tb_port_read(port
, &port
->config
, TB_CFG_PORT
, 0, 8);
700 if (res
== -ENODEV
) {
701 tb_dbg(port
->sw
->tb
, " Port %d: not implemented\n",
703 port
->disabled
= true;
709 /* Port 0 is the switch itself and has no PHY. */
710 if (port
->config
.type
== TB_TYPE_PORT
) {
711 cap
= tb_port_find_cap(port
, TB_PORT_CAP_PHY
);
716 tb_port_WARN(port
, "non switch port without a PHY\n");
718 cap
= tb_port_find_cap(port
, TB_PORT_CAP_USB4
);
720 port
->cap_usb4
= cap
;
723 * USB4 ports the buffers allocated for the control path
724 * can be read from the path config space. Legacy
725 * devices we use hard-coded value.
727 if (port
->cap_usb4
) {
728 struct tb_regs_hop hop
;
730 if (!tb_port_read(port
, &hop
, TB_CFG_HOPS
, 0, 2))
731 port
->ctl_credits
= hop
.initial_credits
;
733 if (!port
->ctl_credits
)
734 port
->ctl_credits
= 2;
737 cap
= tb_port_find_cap(port
, TB_PORT_CAP_ADAP
);
739 port
->cap_adap
= cap
;
742 port
->total_credits
=
743 (port
->config
.nfc_credits
& ADP_CS_4_TOTAL_BUFFERS_MASK
) >>
744 ADP_CS_4_TOTAL_BUFFERS_SHIFT
;
746 tb_dump_port(port
->sw
->tb
, port
);
750 static int tb_port_alloc_hopid(struct tb_port
*port
, bool in
, int min_hopid
,
757 port_max_hopid
= port
->config
.max_in_hop_id
;
758 ida
= &port
->in_hopids
;
760 port_max_hopid
= port
->config
.max_out_hop_id
;
761 ida
= &port
->out_hopids
;
765 * NHI can use HopIDs 1-max for other adapters HopIDs 0-7 are
768 if (!tb_port_is_nhi(port
) && min_hopid
< TB_PATH_MIN_HOPID
)
769 min_hopid
= TB_PATH_MIN_HOPID
;
771 if (max_hopid
< 0 || max_hopid
> port_max_hopid
)
772 max_hopid
= port_max_hopid
;
774 return ida_simple_get(ida
, min_hopid
, max_hopid
+ 1, GFP_KERNEL
);
778 * tb_port_alloc_in_hopid() - Allocate input HopID from port
779 * @port: Port to allocate HopID for
780 * @min_hopid: Minimum acceptable input HopID
781 * @max_hopid: Maximum acceptable input HopID
783 * Return: HopID between @min_hopid and @max_hopid or negative errno in
786 int tb_port_alloc_in_hopid(struct tb_port
*port
, int min_hopid
, int max_hopid
)
788 return tb_port_alloc_hopid(port
, true, min_hopid
, max_hopid
);
792 * tb_port_alloc_out_hopid() - Allocate output HopID from port
793 * @port: Port to allocate HopID for
794 * @min_hopid: Minimum acceptable output HopID
795 * @max_hopid: Maximum acceptable output HopID
797 * Return: HopID between @min_hopid and @max_hopid or negative errno in
800 int tb_port_alloc_out_hopid(struct tb_port
*port
, int min_hopid
, int max_hopid
)
802 return tb_port_alloc_hopid(port
, false, min_hopid
, max_hopid
);
806 * tb_port_release_in_hopid() - Release allocated input HopID from port
807 * @port: Port whose HopID to release
808 * @hopid: HopID to release
810 void tb_port_release_in_hopid(struct tb_port
*port
, int hopid
)
812 ida_simple_remove(&port
->in_hopids
, hopid
);
816 * tb_port_release_out_hopid() - Release allocated output HopID from port
817 * @port: Port whose HopID to release
818 * @hopid: HopID to release
820 void tb_port_release_out_hopid(struct tb_port
*port
, int hopid
)
822 ida_simple_remove(&port
->out_hopids
, hopid
);
825 static inline bool tb_switch_is_reachable(const struct tb_switch
*parent
,
826 const struct tb_switch
*sw
)
828 u64 mask
= (1ULL << parent
->config
.depth
* 8) - 1;
829 return (tb_route(parent
) & mask
) == (tb_route(sw
) & mask
);
833 * tb_next_port_on_path() - Return next port for given port on a path
834 * @start: Start port of the walk
835 * @end: End port of the walk
836 * @prev: Previous port (%NULL if this is the first)
838 * This function can be used to walk from one port to another if they
839 * are connected through zero or more switches. If the @prev is dual
840 * link port, the function follows that link and returns another end on
843 * If the @end port has been reached, return %NULL.
845 * Domain tb->lock must be held when this function is called.
847 struct tb_port
*tb_next_port_on_path(struct tb_port
*start
, struct tb_port
*end
,
848 struct tb_port
*prev
)
850 struct tb_port
*next
;
855 if (prev
->sw
== end
->sw
) {
861 if (tb_switch_is_reachable(prev
->sw
, end
->sw
)) {
862 next
= tb_port_at(tb_route(end
->sw
), prev
->sw
);
863 /* Walk down the topology if next == prev */
865 (next
== prev
|| next
->dual_link_port
== prev
))
868 if (tb_is_upstream_port(prev
)) {
871 next
= tb_upstream_port(prev
->sw
);
873 * Keep the same link if prev and next are both
876 if (next
->dual_link_port
&&
877 next
->link_nr
!= prev
->link_nr
) {
878 next
= next
->dual_link_port
;
883 return next
!= prev
? next
: NULL
;
887 * tb_port_get_link_speed() - Get current link speed
888 * @port: Port to check (USB4 or CIO)
890 * Returns link speed in Gb/s or negative errno in case of failure.
892 int tb_port_get_link_speed(struct tb_port
*port
)
900 ret
= tb_port_read(port
, &val
, TB_CFG_PORT
,
901 port
->cap_phy
+ LANE_ADP_CS_1
, 1);
905 speed
= (val
& LANE_ADP_CS_1_CURRENT_SPEED_MASK
) >>
906 LANE_ADP_CS_1_CURRENT_SPEED_SHIFT
;
909 case LANE_ADP_CS_1_CURRENT_SPEED_GEN4
:
911 case LANE_ADP_CS_1_CURRENT_SPEED_GEN3
:
919 * tb_port_get_link_generation() - Returns link generation
920 * @port: Lane adapter
922 * Returns link generation as number or negative errno in case of
923 * failure. Does not distinguish between Thunderbolt 1 and Thunderbolt 2
924 * links so for those always returns 2.
926 int tb_port_get_link_generation(struct tb_port
*port
)
930 ret
= tb_port_get_link_speed(port
);
945 * tb_port_get_link_width() - Get current link width
946 * @port: Port to check (USB4 or CIO)
948 * Returns link width. Return the link width as encoded in &enum
949 * tb_link_width or negative errno in case of failure.
951 int tb_port_get_link_width(struct tb_port
*port
)
959 ret
= tb_port_read(port
, &val
, TB_CFG_PORT
,
960 port
->cap_phy
+ LANE_ADP_CS_1
, 1);
964 /* Matches the values in enum tb_link_width */
965 return (val
& LANE_ADP_CS_1_CURRENT_WIDTH_MASK
) >>
966 LANE_ADP_CS_1_CURRENT_WIDTH_SHIFT
;
970 * tb_port_width_supported() - Is the given link width supported
971 * @port: Port to check
972 * @width: Widths to check (bitmask)
974 * Can be called to any lane adapter. Checks if given @width is
975 * supported by the hardware and returns %true if it is.
977 bool tb_port_width_supported(struct tb_port
*port
, unsigned int width
)
985 if (width
& (TB_LINK_WIDTH_ASYM_TX
| TB_LINK_WIDTH_ASYM_RX
)) {
986 if (tb_port_get_link_generation(port
) < 4 ||
987 !usb4_port_asym_supported(port
))
991 ret
= tb_port_read(port
, &phy
, TB_CFG_PORT
,
992 port
->cap_phy
+ LANE_ADP_CS_0
, 1);
997 * The field encoding is the same as &enum tb_link_width (which is
1000 widths
= FIELD_GET(LANE_ADP_CS_0_SUPPORTED_WIDTH_MASK
, phy
);
1001 return widths
& width
;
1005 * tb_port_set_link_width() - Set target link width of the lane adapter
1006 * @port: Lane adapter
1007 * @width: Target link width
1009 * Sets the target link width of the lane adapter to @width. Does not
1010 * enable/disable lane bonding. For that call tb_port_set_lane_bonding().
1012 * Return: %0 in case of success and negative errno in case of error
1014 int tb_port_set_link_width(struct tb_port
*port
, enum tb_link_width width
)
1022 ret
= tb_port_read(port
, &val
, TB_CFG_PORT
,
1023 port
->cap_phy
+ LANE_ADP_CS_1
, 1);
1027 val
&= ~LANE_ADP_CS_1_TARGET_WIDTH_MASK
;
1029 case TB_LINK_WIDTH_SINGLE
:
1030 /* Gen 4 link cannot be single */
1031 if (tb_port_get_link_generation(port
) >= 4)
1033 val
|= LANE_ADP_CS_1_TARGET_WIDTH_SINGLE
<<
1034 LANE_ADP_CS_1_TARGET_WIDTH_SHIFT
;
1037 case TB_LINK_WIDTH_DUAL
:
1038 if (tb_port_get_link_generation(port
) >= 4)
1039 return usb4_port_asym_set_link_width(port
, width
);
1040 val
|= LANE_ADP_CS_1_TARGET_WIDTH_DUAL
<<
1041 LANE_ADP_CS_1_TARGET_WIDTH_SHIFT
;
1044 case TB_LINK_WIDTH_ASYM_TX
:
1045 case TB_LINK_WIDTH_ASYM_RX
:
1046 return usb4_port_asym_set_link_width(port
, width
);
1052 return tb_port_write(port
, &val
, TB_CFG_PORT
,
1053 port
->cap_phy
+ LANE_ADP_CS_1
, 1);
1057 * tb_port_set_lane_bonding() - Enable/disable lane bonding
1058 * @port: Lane adapter
1059 * @bonding: enable/disable bonding
1061 * Enables or disables lane bonding. This should be called after target
1062 * link width has been set (tb_port_set_link_width()). Note in most
1063 * cases one should use tb_port_lane_bonding_enable() instead to enable
1066 * Return: %0 in case of success and negative errno in case of error
1068 static int tb_port_set_lane_bonding(struct tb_port
*port
, bool bonding
)
1076 ret
= tb_port_read(port
, &val
, TB_CFG_PORT
,
1077 port
->cap_phy
+ LANE_ADP_CS_1
, 1);
1082 val
|= LANE_ADP_CS_1_LB
;
1084 val
&= ~LANE_ADP_CS_1_LB
;
1086 return tb_port_write(port
, &val
, TB_CFG_PORT
,
1087 port
->cap_phy
+ LANE_ADP_CS_1
, 1);
1091 * tb_port_lane_bonding_enable() - Enable bonding on port
1092 * @port: port to enable
1094 * Enable bonding by setting the link width of the port and the other
1095 * port in case of dual link port. Does not wait for the link to
1096 * actually reach the bonded state so caller needs to call
1097 * tb_port_wait_for_link_width() before enabling any paths through the
1098 * link to make sure the link is in expected state.
1100 * Return: %0 in case of success and negative errno in case of error
1102 int tb_port_lane_bonding_enable(struct tb_port
*port
)
1104 enum tb_link_width width
;
1108 * Enable lane bonding for both links if not already enabled by
1109 * for example the boot firmware.
1111 width
= tb_port_get_link_width(port
);
1112 if (width
== TB_LINK_WIDTH_SINGLE
) {
1113 ret
= tb_port_set_link_width(port
, TB_LINK_WIDTH_DUAL
);
1118 width
= tb_port_get_link_width(port
->dual_link_port
);
1119 if (width
== TB_LINK_WIDTH_SINGLE
) {
1120 ret
= tb_port_set_link_width(port
->dual_link_port
,
1121 TB_LINK_WIDTH_DUAL
);
1127 * Only set bonding if the link was not already bonded. This
1128 * avoids the lane adapter to re-enter bonding state.
1130 if (width
== TB_LINK_WIDTH_SINGLE
&& !tb_is_upstream_port(port
)) {
1131 ret
= tb_port_set_lane_bonding(port
, true);
1137 * When lane 0 bonding is set it will affect lane 1 too so
1140 port
->bonded
= true;
1141 port
->dual_link_port
->bonded
= true;
1146 tb_port_set_link_width(port
->dual_link_port
, TB_LINK_WIDTH_SINGLE
);
1148 tb_port_set_link_width(port
, TB_LINK_WIDTH_SINGLE
);
1154 * tb_port_lane_bonding_disable() - Disable bonding on port
1155 * @port: port to disable
1157 * Disable bonding by setting the link width of the port and the
1158 * other port in case of dual link port.
1160 void tb_port_lane_bonding_disable(struct tb_port
*port
)
1162 tb_port_set_lane_bonding(port
, false);
1163 tb_port_set_link_width(port
->dual_link_port
, TB_LINK_WIDTH_SINGLE
);
1164 tb_port_set_link_width(port
, TB_LINK_WIDTH_SINGLE
);
1165 port
->dual_link_port
->bonded
= false;
1166 port
->bonded
= false;
1170 * tb_port_wait_for_link_width() - Wait until link reaches specific width
1171 * @port: Port to wait for
1172 * @width: Expected link width (bitmask)
1173 * @timeout_msec: Timeout in ms how long to wait
1175 * Should be used after both ends of the link have been bonded (or
1176 * bonding has been disabled) to wait until the link actually reaches
1177 * the expected state. Returns %-ETIMEDOUT if the width was not reached
1178 * within the given timeout, %0 if it did. Can be passed a mask of
1179 * expected widths and succeeds if any of the widths is reached.
1181 int tb_port_wait_for_link_width(struct tb_port
*port
, unsigned int width
,
1184 ktime_t timeout
= ktime_add_ms(ktime_get(), timeout_msec
);
1187 /* Gen 4 link does not support single lane */
1188 if ((width
& TB_LINK_WIDTH_SINGLE
) &&
1189 tb_port_get_link_generation(port
) >= 4)
1193 ret
= tb_port_get_link_width(port
);
1196 * Sometimes we get port locked error when
1197 * polling the lanes so we can ignore it and
1202 } else if (ret
& width
) {
1206 usleep_range(1000, 2000);
1207 } while (ktime_before(ktime_get(), timeout
));
1212 static int tb_port_do_update_credits(struct tb_port
*port
)
1217 ret
= tb_port_read(port
, &nfc_credits
, TB_CFG_PORT
, ADP_CS_4
, 1);
1221 if (nfc_credits
!= port
->config
.nfc_credits
) {
1224 total
= (nfc_credits
& ADP_CS_4_TOTAL_BUFFERS_MASK
) >>
1225 ADP_CS_4_TOTAL_BUFFERS_SHIFT
;
1227 tb_port_dbg(port
, "total credits changed %u -> %u\n",
1228 port
->total_credits
, total
);
1230 port
->config
.nfc_credits
= nfc_credits
;
1231 port
->total_credits
= total
;
1238 * tb_port_update_credits() - Re-read port total credits
1239 * @port: Port to update
1241 * After the link is bonded (or bonding was disabled) the port total
1242 * credits may change, so this function needs to be called to re-read
1243 * the credits. Updates also the second lane adapter.
1245 int tb_port_update_credits(struct tb_port
*port
)
1249 ret
= tb_port_do_update_credits(port
);
1253 if (!port
->dual_link_port
)
1255 return tb_port_do_update_credits(port
->dual_link_port
);
1258 static int tb_port_start_lane_initialization(struct tb_port
*port
)
1262 if (tb_switch_is_usb4(port
->sw
))
1265 ret
= tb_lc_start_lane_initialization(port
);
1266 return ret
== -EINVAL
? 0 : ret
;
1270 * Returns true if the port had something (router, XDomain) connected
1273 static bool tb_port_resume(struct tb_port
*port
)
1275 bool has_remote
= tb_port_has_remote(port
);
1278 usb4_port_device_resume(port
->usb4
);
1279 } else if (!has_remote
) {
1281 * For disconnected downstream lane adapters start lane
1282 * initialization now so we detect future connects.
1284 * For XDomain start the lane initialzation now so the
1285 * link gets re-established.
1287 * This is only needed for non-USB4 ports.
1289 if (!tb_is_upstream_port(port
) || port
->xdomain
)
1290 tb_port_start_lane_initialization(port
);
1293 return has_remote
|| port
->xdomain
;
1297 * tb_port_is_enabled() - Is the adapter port enabled
1298 * @port: Port to check
1300 bool tb_port_is_enabled(struct tb_port
*port
)
1302 switch (port
->config
.type
) {
1303 case TB_TYPE_PCIE_UP
:
1304 case TB_TYPE_PCIE_DOWN
:
1305 return tb_pci_port_is_enabled(port
);
1307 case TB_TYPE_DP_HDMI_IN
:
1308 case TB_TYPE_DP_HDMI_OUT
:
1309 return tb_dp_port_is_enabled(port
);
1311 case TB_TYPE_USB3_UP
:
1312 case TB_TYPE_USB3_DOWN
:
1313 return tb_usb3_port_is_enabled(port
);
1321 * tb_usb3_port_is_enabled() - Is the USB3 adapter port enabled
1322 * @port: USB3 adapter port to check
1324 bool tb_usb3_port_is_enabled(struct tb_port
*port
)
1328 if (tb_port_read(port
, &data
, TB_CFG_PORT
,
1329 port
->cap_adap
+ ADP_USB3_CS_0
, 1))
1332 return !!(data
& ADP_USB3_CS_0_PE
);
1336 * tb_usb3_port_enable() - Enable USB3 adapter port
1337 * @port: USB3 adapter port to enable
1338 * @enable: Enable/disable the USB3 adapter
1340 int tb_usb3_port_enable(struct tb_port
*port
, bool enable
)
1342 u32 word
= enable
? (ADP_USB3_CS_0_PE
| ADP_USB3_CS_0_V
)
1345 if (!port
->cap_adap
)
1347 return tb_port_write(port
, &word
, TB_CFG_PORT
,
1348 port
->cap_adap
+ ADP_USB3_CS_0
, 1);
1352 * tb_pci_port_is_enabled() - Is the PCIe adapter port enabled
1353 * @port: PCIe port to check
1355 bool tb_pci_port_is_enabled(struct tb_port
*port
)
1359 if (tb_port_read(port
, &data
, TB_CFG_PORT
,
1360 port
->cap_adap
+ ADP_PCIE_CS_0
, 1))
1363 return !!(data
& ADP_PCIE_CS_0_PE
);
1367 * tb_pci_port_enable() - Enable PCIe adapter port
1368 * @port: PCIe port to enable
1369 * @enable: Enable/disable the PCIe adapter
1371 int tb_pci_port_enable(struct tb_port
*port
, bool enable
)
1373 u32 word
= enable
? ADP_PCIE_CS_0_PE
: 0x0;
1374 if (!port
->cap_adap
)
1376 return tb_port_write(port
, &word
, TB_CFG_PORT
,
1377 port
->cap_adap
+ ADP_PCIE_CS_0
, 1);
1381 * tb_dp_port_hpd_is_active() - Is HPD already active
1382 * @port: DP out port to check
1384 * Checks if the DP OUT adapter port has HPD bit already set.
1386 int tb_dp_port_hpd_is_active(struct tb_port
*port
)
1391 ret
= tb_port_read(port
, &data
, TB_CFG_PORT
,
1392 port
->cap_adap
+ ADP_DP_CS_2
, 1);
1396 return !!(data
& ADP_DP_CS_2_HPD
);
1400 * tb_dp_port_hpd_clear() - Clear HPD from DP IN port
1401 * @port: Port to clear HPD
1403 * If the DP IN port has HPD set, this function can be used to clear it.
1405 int tb_dp_port_hpd_clear(struct tb_port
*port
)
1410 ret
= tb_port_read(port
, &data
, TB_CFG_PORT
,
1411 port
->cap_adap
+ ADP_DP_CS_3
, 1);
1415 data
|= ADP_DP_CS_3_HPDC
;
1416 return tb_port_write(port
, &data
, TB_CFG_PORT
,
1417 port
->cap_adap
+ ADP_DP_CS_3
, 1);
1421 * tb_dp_port_set_hops() - Set video/aux Hop IDs for DP port
1422 * @port: DP IN/OUT port to set hops
1423 * @video: Video Hop ID
1424 * @aux_tx: AUX TX Hop ID
1425 * @aux_rx: AUX RX Hop ID
1427 * Programs specified Hop IDs for DP IN/OUT port. Can be called for USB4
1428 * router DP adapters too but does not program the values as the fields
1431 int tb_dp_port_set_hops(struct tb_port
*port
, unsigned int video
,
1432 unsigned int aux_tx
, unsigned int aux_rx
)
1437 if (tb_switch_is_usb4(port
->sw
))
1440 ret
= tb_port_read(port
, data
, TB_CFG_PORT
,
1441 port
->cap_adap
+ ADP_DP_CS_0
, ARRAY_SIZE(data
));
1445 data
[0] &= ~ADP_DP_CS_0_VIDEO_HOPID_MASK
;
1446 data
[1] &= ~ADP_DP_CS_1_AUX_RX_HOPID_MASK
;
1447 data
[1] &= ~ADP_DP_CS_1_AUX_RX_HOPID_MASK
;
1449 data
[0] |= (video
<< ADP_DP_CS_0_VIDEO_HOPID_SHIFT
) &
1450 ADP_DP_CS_0_VIDEO_HOPID_MASK
;
1451 data
[1] |= aux_tx
& ADP_DP_CS_1_AUX_TX_HOPID_MASK
;
1452 data
[1] |= (aux_rx
<< ADP_DP_CS_1_AUX_RX_HOPID_SHIFT
) &
1453 ADP_DP_CS_1_AUX_RX_HOPID_MASK
;
1455 return tb_port_write(port
, data
, TB_CFG_PORT
,
1456 port
->cap_adap
+ ADP_DP_CS_0
, ARRAY_SIZE(data
));
1460 * tb_dp_port_is_enabled() - Is DP adapter port enabled
1461 * @port: DP adapter port to check
1463 bool tb_dp_port_is_enabled(struct tb_port
*port
)
1467 if (tb_port_read(port
, data
, TB_CFG_PORT
, port
->cap_adap
+ ADP_DP_CS_0
,
1471 return !!(data
[0] & (ADP_DP_CS_0_VE
| ADP_DP_CS_0_AE
));
1475 * tb_dp_port_enable() - Enables/disables DP paths of a port
1476 * @port: DP IN/OUT port
1477 * @enable: Enable/disable DP path
1479 * Once Hop IDs are programmed DP paths can be enabled or disabled by
1480 * calling this function.
1482 int tb_dp_port_enable(struct tb_port
*port
, bool enable
)
1487 ret
= tb_port_read(port
, data
, TB_CFG_PORT
,
1488 port
->cap_adap
+ ADP_DP_CS_0
, ARRAY_SIZE(data
));
1493 data
[0] |= ADP_DP_CS_0_VE
| ADP_DP_CS_0_AE
;
1495 data
[0] &= ~(ADP_DP_CS_0_VE
| ADP_DP_CS_0_AE
);
1497 return tb_port_write(port
, data
, TB_CFG_PORT
,
1498 port
->cap_adap
+ ADP_DP_CS_0
, ARRAY_SIZE(data
));
1501 /* switch utility functions */
1503 static const char *tb_switch_generation_name(const struct tb_switch
*sw
)
1505 switch (sw
->generation
) {
1507 return "Thunderbolt 1";
1509 return "Thunderbolt 2";
1511 return "Thunderbolt 3";
1519 static void tb_dump_switch(const struct tb
*tb
, const struct tb_switch
*sw
)
1521 const struct tb_regs_switch_header
*regs
= &sw
->config
;
1523 tb_dbg(tb
, " %s Switch: %x:%x (Revision: %d, TB Version: %d)\n",
1524 tb_switch_generation_name(sw
), regs
->vendor_id
, regs
->device_id
,
1525 regs
->revision
, regs
->thunderbolt_version
);
1526 tb_dbg(tb
, " Max Port Number: %d\n", regs
->max_port_number
);
1527 tb_dbg(tb
, " Config:\n");
1529 " Upstream Port Number: %d Depth: %d Route String: %#llx Enabled: %d, PlugEventsDelay: %dms\n",
1530 regs
->upstream_port_number
, regs
->depth
,
1531 (((u64
) regs
->route_hi
) << 32) | regs
->route_lo
,
1532 regs
->enabled
, regs
->plug_events_delay
);
1533 tb_dbg(tb
, " unknown1: %#x unknown4: %#x\n",
1534 regs
->__unknown1
, regs
->__unknown4
);
1538 * tb_switch_reset() - reconfigure route, enable and send TB_CFG_PKG_RESET
1539 * @sw: Switch to reset
1541 * Return: Returns 0 on success or an error code on failure.
1543 int tb_switch_reset(struct tb_switch
*sw
)
1545 struct tb_cfg_result res
;
1547 if (sw
->generation
> 1)
1550 tb_sw_dbg(sw
, "resetting switch\n");
1552 res
.err
= tb_sw_write(sw
, ((u32
*) &sw
->config
) + 2,
1553 TB_CFG_SWITCH
, 2, 2);
1556 res
= tb_cfg_reset(sw
->tb
->ctl
, tb_route(sw
));
1563 * tb_switch_wait_for_bit() - Wait for specified value of bits in offset
1564 * @sw: Router to read the offset value from
1565 * @offset: Offset in the router config space to read from
1566 * @bit: Bit mask in the offset to wait for
1567 * @value: Value of the bits to wait for
1568 * @timeout_msec: Timeout in ms how long to wait
1570 * Wait till the specified bits in specified offset reach specified value.
1571 * Returns %0 in case of success, %-ETIMEDOUT if the @value was not reached
1572 * within the given timeout or a negative errno in case of failure.
1574 int tb_switch_wait_for_bit(struct tb_switch
*sw
, u32 offset
, u32 bit
,
1575 u32 value
, int timeout_msec
)
1577 ktime_t timeout
= ktime_add_ms(ktime_get(), timeout_msec
);
1583 ret
= tb_sw_read(sw
, &val
, TB_CFG_SWITCH
, offset
, 1);
1587 if ((val
& bit
) == value
)
1590 usleep_range(50, 100);
1591 } while (ktime_before(ktime_get(), timeout
));
1597 * tb_plug_events_active() - enable/disable plug events on a switch
1599 * Also configures a sane plug_events_delay of 255ms.
1601 * Return: Returns 0 on success or an error code on failure.
1603 static int tb_plug_events_active(struct tb_switch
*sw
, bool active
)
1608 if (tb_switch_is_icm(sw
) || tb_switch_is_usb4(sw
))
1611 sw
->config
.plug_events_delay
= 0xff;
1612 res
= tb_sw_write(sw
, ((u32
*) &sw
->config
) + 4, TB_CFG_SWITCH
, 4, 1);
1616 res
= tb_sw_read(sw
, &data
, TB_CFG_SWITCH
, sw
->cap_plug_events
+ 1, 1);
1621 data
= data
& 0xFFFFFF83;
1622 switch (sw
->config
.device_id
) {
1623 case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE
:
1624 case PCI_DEVICE_ID_INTEL_EAGLE_RIDGE
:
1625 case PCI_DEVICE_ID_INTEL_PORT_RIDGE
:
1629 * Skip Alpine Ridge, it needs to have vendor
1630 * specific USB hotplug event enabled for the
1631 * internal xHCI to work.
1633 if (!tb_switch_is_alpine_ridge(sw
))
1634 data
|= TB_PLUG_EVENTS_USB_DISABLE
;
1639 return tb_sw_write(sw
, &data
, TB_CFG_SWITCH
,
1640 sw
->cap_plug_events
+ 1, 1);
1643 static ssize_t
authorized_show(struct device
*dev
,
1644 struct device_attribute
*attr
,
1647 struct tb_switch
*sw
= tb_to_switch(dev
);
1649 return sysfs_emit(buf
, "%u\n", sw
->authorized
);
1652 static int disapprove_switch(struct device
*dev
, void *not_used
)
1654 char *envp
[] = { "AUTHORIZED=0", NULL
};
1655 struct tb_switch
*sw
;
1657 sw
= tb_to_switch(dev
);
1658 if (sw
&& sw
->authorized
) {
1661 /* First children */
1662 ret
= device_for_each_child_reverse(&sw
->dev
, NULL
, disapprove_switch
);
1666 ret
= tb_domain_disapprove_switch(sw
->tb
, sw
);
1671 kobject_uevent_env(&sw
->dev
.kobj
, KOBJ_CHANGE
, envp
);
1677 static int tb_switch_set_authorized(struct tb_switch
*sw
, unsigned int val
)
1679 char envp_string
[13];
1681 char *envp
[] = { envp_string
, NULL
};
1683 if (!mutex_trylock(&sw
->tb
->lock
))
1684 return restart_syscall();
1686 if (!!sw
->authorized
== !!val
)
1690 /* Disapprove switch */
1693 ret
= disapprove_switch(&sw
->dev
, NULL
);
1698 /* Approve switch */
1701 ret
= tb_domain_approve_switch_key(sw
->tb
, sw
);
1703 ret
= tb_domain_approve_switch(sw
->tb
, sw
);
1706 /* Challenge switch */
1709 ret
= tb_domain_challenge_switch_key(sw
->tb
, sw
);
1717 sw
->authorized
= val
;
1719 * Notify status change to the userspace, informing the new
1720 * value of /sys/bus/thunderbolt/devices/.../authorized.
1722 sprintf(envp_string
, "AUTHORIZED=%u", sw
->authorized
);
1723 kobject_uevent_env(&sw
->dev
.kobj
, KOBJ_CHANGE
, envp
);
1727 mutex_unlock(&sw
->tb
->lock
);
1731 static ssize_t
authorized_store(struct device
*dev
,
1732 struct device_attribute
*attr
,
1733 const char *buf
, size_t count
)
1735 struct tb_switch
*sw
= tb_to_switch(dev
);
1739 ret
= kstrtouint(buf
, 0, &val
);
1745 pm_runtime_get_sync(&sw
->dev
);
1746 ret
= tb_switch_set_authorized(sw
, val
);
1747 pm_runtime_mark_last_busy(&sw
->dev
);
1748 pm_runtime_put_autosuspend(&sw
->dev
);
1750 return ret
? ret
: count
;
1752 static DEVICE_ATTR_RW(authorized
);
1754 static ssize_t
boot_show(struct device
*dev
, struct device_attribute
*attr
,
1757 struct tb_switch
*sw
= tb_to_switch(dev
);
1759 return sysfs_emit(buf
, "%u\n", sw
->boot
);
1761 static DEVICE_ATTR_RO(boot
);
1763 static ssize_t
device_show(struct device
*dev
, struct device_attribute
*attr
,
1766 struct tb_switch
*sw
= tb_to_switch(dev
);
1768 return sysfs_emit(buf
, "%#x\n", sw
->device
);
1770 static DEVICE_ATTR_RO(device
);
1773 device_name_show(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
1775 struct tb_switch
*sw
= tb_to_switch(dev
);
1777 return sysfs_emit(buf
, "%s\n", sw
->device_name
?: "");
1779 static DEVICE_ATTR_RO(device_name
);
1782 generation_show(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
1784 struct tb_switch
*sw
= tb_to_switch(dev
);
1786 return sysfs_emit(buf
, "%u\n", sw
->generation
);
1788 static DEVICE_ATTR_RO(generation
);
1790 static ssize_t
key_show(struct device
*dev
, struct device_attribute
*attr
,
1793 struct tb_switch
*sw
= tb_to_switch(dev
);
1796 if (!mutex_trylock(&sw
->tb
->lock
))
1797 return restart_syscall();
1800 ret
= sysfs_emit(buf
, "%*phN\n", TB_SWITCH_KEY_SIZE
, sw
->key
);
1802 ret
= sysfs_emit(buf
, "\n");
1804 mutex_unlock(&sw
->tb
->lock
);
1808 static ssize_t
key_store(struct device
*dev
, struct device_attribute
*attr
,
1809 const char *buf
, size_t count
)
1811 struct tb_switch
*sw
= tb_to_switch(dev
);
1812 u8 key
[TB_SWITCH_KEY_SIZE
];
1813 ssize_t ret
= count
;
1816 if (!strcmp(buf
, "\n"))
1818 else if (hex2bin(key
, buf
, sizeof(key
)))
1821 if (!mutex_trylock(&sw
->tb
->lock
))
1822 return restart_syscall();
1824 if (sw
->authorized
) {
1831 sw
->key
= kmemdup(key
, sizeof(key
), GFP_KERNEL
);
1837 mutex_unlock(&sw
->tb
->lock
);
1840 static DEVICE_ATTR(key
, 0600, key_show
, key_store
);
1842 static ssize_t
speed_show(struct device
*dev
, struct device_attribute
*attr
,
1845 struct tb_switch
*sw
= tb_to_switch(dev
);
1847 return sysfs_emit(buf
, "%u.0 Gb/s\n", sw
->link_speed
);
1851 * Currently all lanes must run at the same speed but we expose here
1852 * both directions to allow possible asymmetric links in the future.
1854 static DEVICE_ATTR(rx_speed
, 0444, speed_show
, NULL
);
1855 static DEVICE_ATTR(tx_speed
, 0444, speed_show
, NULL
);
1857 static ssize_t
rx_lanes_show(struct device
*dev
, struct device_attribute
*attr
,
1860 struct tb_switch
*sw
= tb_to_switch(dev
);
1863 switch (sw
->link_width
) {
1864 case TB_LINK_WIDTH_SINGLE
:
1865 case TB_LINK_WIDTH_ASYM_TX
:
1868 case TB_LINK_WIDTH_DUAL
:
1871 case TB_LINK_WIDTH_ASYM_RX
:
1879 return sysfs_emit(buf
, "%u\n", width
);
1881 static DEVICE_ATTR(rx_lanes
, 0444, rx_lanes_show
, NULL
);
1883 static ssize_t
tx_lanes_show(struct device
*dev
, struct device_attribute
*attr
,
1886 struct tb_switch
*sw
= tb_to_switch(dev
);
1889 switch (sw
->link_width
) {
1890 case TB_LINK_WIDTH_SINGLE
:
1891 case TB_LINK_WIDTH_ASYM_RX
:
1894 case TB_LINK_WIDTH_DUAL
:
1897 case TB_LINK_WIDTH_ASYM_TX
:
1905 return sysfs_emit(buf
, "%u\n", width
);
1907 static DEVICE_ATTR(tx_lanes
, 0444, tx_lanes_show
, NULL
);
1909 static ssize_t
nvm_authenticate_show(struct device
*dev
,
1910 struct device_attribute
*attr
, char *buf
)
1912 struct tb_switch
*sw
= tb_to_switch(dev
);
1915 nvm_get_auth_status(sw
, &status
);
1916 return sysfs_emit(buf
, "%#x\n", status
);
1919 static ssize_t
nvm_authenticate_sysfs(struct device
*dev
, const char *buf
,
1922 struct tb_switch
*sw
= tb_to_switch(dev
);
1925 pm_runtime_get_sync(&sw
->dev
);
1927 if (!mutex_trylock(&sw
->tb
->lock
)) {
1928 ret
= restart_syscall();
1932 if (sw
->no_nvm_upgrade
) {
1937 /* If NVMem devices are not yet added */
1943 ret
= kstrtoint(buf
, 10, &val
);
1947 /* Always clear the authentication status */
1948 nvm_clear_auth_status(sw
);
1951 if (val
== AUTHENTICATE_ONLY
) {
1955 ret
= nvm_authenticate(sw
, true);
1957 if (!sw
->nvm
->flushed
) {
1958 if (!sw
->nvm
->buf
) {
1963 ret
= nvm_validate_and_write(sw
);
1964 if (ret
|| val
== WRITE_ONLY
)
1967 if (val
== WRITE_AND_AUTHENTICATE
) {
1969 ret
= tb_lc_force_power(sw
);
1971 ret
= nvm_authenticate(sw
, false);
1977 mutex_unlock(&sw
->tb
->lock
);
1979 pm_runtime_mark_last_busy(&sw
->dev
);
1980 pm_runtime_put_autosuspend(&sw
->dev
);
1985 static ssize_t
nvm_authenticate_store(struct device
*dev
,
1986 struct device_attribute
*attr
, const char *buf
, size_t count
)
1988 int ret
= nvm_authenticate_sysfs(dev
, buf
, false);
1993 static DEVICE_ATTR_RW(nvm_authenticate
);
1995 static ssize_t
nvm_authenticate_on_disconnect_show(struct device
*dev
,
1996 struct device_attribute
*attr
, char *buf
)
1998 return nvm_authenticate_show(dev
, attr
, buf
);
2001 static ssize_t
nvm_authenticate_on_disconnect_store(struct device
*dev
,
2002 struct device_attribute
*attr
, const char *buf
, size_t count
)
2006 ret
= nvm_authenticate_sysfs(dev
, buf
, true);
2007 return ret
? ret
: count
;
2009 static DEVICE_ATTR_RW(nvm_authenticate_on_disconnect
);
2011 static ssize_t
nvm_version_show(struct device
*dev
,
2012 struct device_attribute
*attr
, char *buf
)
2014 struct tb_switch
*sw
= tb_to_switch(dev
);
2017 if (!mutex_trylock(&sw
->tb
->lock
))
2018 return restart_syscall();
2025 ret
= sysfs_emit(buf
, "%x.%x\n", sw
->nvm
->major
, sw
->nvm
->minor
);
2027 mutex_unlock(&sw
->tb
->lock
);
2031 static DEVICE_ATTR_RO(nvm_version
);
2033 static ssize_t
vendor_show(struct device
*dev
, struct device_attribute
*attr
,
2036 struct tb_switch
*sw
= tb_to_switch(dev
);
2038 return sysfs_emit(buf
, "%#x\n", sw
->vendor
);
2040 static DEVICE_ATTR_RO(vendor
);
2043 vendor_name_show(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
2045 struct tb_switch
*sw
= tb_to_switch(dev
);
2047 return sysfs_emit(buf
, "%s\n", sw
->vendor_name
?: "");
2049 static DEVICE_ATTR_RO(vendor_name
);
2051 static ssize_t
unique_id_show(struct device
*dev
, struct device_attribute
*attr
,
2054 struct tb_switch
*sw
= tb_to_switch(dev
);
2056 return sysfs_emit(buf
, "%pUb\n", sw
->uuid
);
2058 static DEVICE_ATTR_RO(unique_id
);
2060 static struct attribute
*switch_attrs
[] = {
2061 &dev_attr_authorized
.attr
,
2062 &dev_attr_boot
.attr
,
2063 &dev_attr_device
.attr
,
2064 &dev_attr_device_name
.attr
,
2065 &dev_attr_generation
.attr
,
2067 &dev_attr_nvm_authenticate
.attr
,
2068 &dev_attr_nvm_authenticate_on_disconnect
.attr
,
2069 &dev_attr_nvm_version
.attr
,
2070 &dev_attr_rx_speed
.attr
,
2071 &dev_attr_rx_lanes
.attr
,
2072 &dev_attr_tx_speed
.attr
,
2073 &dev_attr_tx_lanes
.attr
,
2074 &dev_attr_vendor
.attr
,
2075 &dev_attr_vendor_name
.attr
,
2076 &dev_attr_unique_id
.attr
,
2080 static umode_t
switch_attr_is_visible(struct kobject
*kobj
,
2081 struct attribute
*attr
, int n
)
2083 struct device
*dev
= kobj_to_dev(kobj
);
2084 struct tb_switch
*sw
= tb_to_switch(dev
);
2086 if (attr
== &dev_attr_authorized
.attr
) {
2087 if (sw
->tb
->security_level
== TB_SECURITY_NOPCIE
||
2088 sw
->tb
->security_level
== TB_SECURITY_DPONLY
)
2090 } else if (attr
== &dev_attr_device
.attr
) {
2093 } else if (attr
== &dev_attr_device_name
.attr
) {
2094 if (!sw
->device_name
)
2096 } else if (attr
== &dev_attr_vendor
.attr
) {
2099 } else if (attr
== &dev_attr_vendor_name
.attr
) {
2100 if (!sw
->vendor_name
)
2102 } else if (attr
== &dev_attr_key
.attr
) {
2104 sw
->tb
->security_level
== TB_SECURITY_SECURE
&&
2105 sw
->security_level
== TB_SECURITY_SECURE
)
2108 } else if (attr
== &dev_attr_rx_speed
.attr
||
2109 attr
== &dev_attr_rx_lanes
.attr
||
2110 attr
== &dev_attr_tx_speed
.attr
||
2111 attr
== &dev_attr_tx_lanes
.attr
) {
2115 } else if (attr
== &dev_attr_nvm_authenticate
.attr
) {
2116 if (nvm_upgradeable(sw
))
2119 } else if (attr
== &dev_attr_nvm_version
.attr
) {
2120 if (nvm_readable(sw
))
2123 } else if (attr
== &dev_attr_boot
.attr
) {
2127 } else if (attr
== &dev_attr_nvm_authenticate_on_disconnect
.attr
) {
2128 if (sw
->quirks
& QUIRK_FORCE_POWER_LINK_CONTROLLER
)
2133 return sw
->safe_mode
? 0 : attr
->mode
;
2136 static const struct attribute_group switch_group
= {
2137 .is_visible
= switch_attr_is_visible
,
2138 .attrs
= switch_attrs
,
2141 static const struct attribute_group
*switch_groups
[] = {
2146 static void tb_switch_release(struct device
*dev
)
2148 struct tb_switch
*sw
= tb_to_switch(dev
);
2149 struct tb_port
*port
;
2151 dma_port_free(sw
->dma_port
);
2153 tb_switch_for_each_port(sw
, port
) {
2154 ida_destroy(&port
->in_hopids
);
2155 ida_destroy(&port
->out_hopids
);
2159 kfree(sw
->device_name
);
2160 kfree(sw
->vendor_name
);
2167 static int tb_switch_uevent(const struct device
*dev
, struct kobj_uevent_env
*env
)
2169 const struct tb_switch
*sw
= tb_to_switch(dev
);
2172 if (tb_switch_is_usb4(sw
)) {
2173 if (add_uevent_var(env
, "USB4_VERSION=%u.0",
2174 usb4_switch_version(sw
)))
2178 if (!tb_route(sw
)) {
2181 const struct tb_port
*port
;
2184 /* Device is hub if it has any downstream ports */
2185 tb_switch_for_each_port(sw
, port
) {
2186 if (!port
->disabled
&& !tb_is_upstream_port(port
) &&
2187 tb_port_is_null(port
)) {
2193 type
= hub
? "hub" : "device";
2196 if (add_uevent_var(env
, "USB4_TYPE=%s", type
))
2202 * Currently only need to provide the callbacks. Everything else is handled
2203 * in the connection manager.
2205 static int __maybe_unused
tb_switch_runtime_suspend(struct device
*dev
)
2207 struct tb_switch
*sw
= tb_to_switch(dev
);
2208 const struct tb_cm_ops
*cm_ops
= sw
->tb
->cm_ops
;
2210 if (cm_ops
->runtime_suspend_switch
)
2211 return cm_ops
->runtime_suspend_switch(sw
);
2216 static int __maybe_unused
tb_switch_runtime_resume(struct device
*dev
)
2218 struct tb_switch
*sw
= tb_to_switch(dev
);
2219 const struct tb_cm_ops
*cm_ops
= sw
->tb
->cm_ops
;
2221 if (cm_ops
->runtime_resume_switch
)
2222 return cm_ops
->runtime_resume_switch(sw
);
2226 static const struct dev_pm_ops tb_switch_pm_ops
= {
2227 SET_RUNTIME_PM_OPS(tb_switch_runtime_suspend
, tb_switch_runtime_resume
,
2231 struct device_type tb_switch_type
= {
2232 .name
= "thunderbolt_device",
2233 .release
= tb_switch_release
,
2234 .uevent
= tb_switch_uevent
,
2235 .pm
= &tb_switch_pm_ops
,
2238 static int tb_switch_get_generation(struct tb_switch
*sw
)
2240 if (tb_switch_is_usb4(sw
))
2243 if (sw
->config
.vendor_id
== PCI_VENDOR_ID_INTEL
) {
2244 switch (sw
->config
.device_id
) {
2245 case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE
:
2246 case PCI_DEVICE_ID_INTEL_EAGLE_RIDGE
:
2247 case PCI_DEVICE_ID_INTEL_LIGHT_PEAK
:
2248 case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_2C
:
2249 case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C
:
2250 case PCI_DEVICE_ID_INTEL_PORT_RIDGE
:
2251 case PCI_DEVICE_ID_INTEL_REDWOOD_RIDGE_2C_BRIDGE
:
2252 case PCI_DEVICE_ID_INTEL_REDWOOD_RIDGE_4C_BRIDGE
:
2255 case PCI_DEVICE_ID_INTEL_WIN_RIDGE_2C_BRIDGE
:
2256 case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_BRIDGE
:
2257 case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_BRIDGE
:
2260 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_BRIDGE
:
2261 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_BRIDGE
:
2262 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_BRIDGE
:
2263 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_BRIDGE
:
2264 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_BRIDGE
:
2265 case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_BRIDGE
:
2266 case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_BRIDGE
:
2267 case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_BRIDGE
:
2268 case PCI_DEVICE_ID_INTEL_ICL_NHI0
:
2269 case PCI_DEVICE_ID_INTEL_ICL_NHI1
:
2275 * For unknown switches assume generation to be 1 to be on the
2278 tb_sw_warn(sw
, "unsupported switch device id %#x\n",
2279 sw
->config
.device_id
);
2283 static bool tb_switch_exceeds_max_depth(const struct tb_switch
*sw
, int depth
)
2287 if (tb_switch_is_usb4(sw
) ||
2288 (sw
->tb
->root_switch
&& tb_switch_is_usb4(sw
->tb
->root_switch
)))
2289 max_depth
= USB4_SWITCH_MAX_DEPTH
;
2291 max_depth
= TB_SWITCH_MAX_DEPTH
;
2293 return depth
> max_depth
;
2297 * tb_switch_alloc() - allocate a switch
2298 * @tb: Pointer to the owning domain
2299 * @parent: Parent device for this switch
2300 * @route: Route string for this switch
2302 * Allocates and initializes a switch. Will not upload configuration to
2303 * the switch. For that you need to call tb_switch_configure()
2304 * separately. The returned switch should be released by calling
2307 * Return: Pointer to the allocated switch or ERR_PTR() in case of
2310 struct tb_switch
*tb_switch_alloc(struct tb
*tb
, struct device
*parent
,
2313 struct tb_switch
*sw
;
2317 /* Unlock the downstream port so we can access the switch below */
2319 struct tb_switch
*parent_sw
= tb_to_switch(parent
);
2320 struct tb_port
*down
;
2322 down
= tb_port_at(route
, parent_sw
);
2323 tb_port_unlock(down
);
2326 depth
= tb_route_length(route
);
2328 upstream_port
= tb_cfg_get_upstream_port(tb
->ctl
, route
);
2329 if (upstream_port
< 0)
2330 return ERR_PTR(upstream_port
);
2332 sw
= kzalloc(sizeof(*sw
), GFP_KERNEL
);
2334 return ERR_PTR(-ENOMEM
);
2337 ret
= tb_cfg_read(tb
->ctl
, &sw
->config
, route
, 0, TB_CFG_SWITCH
, 0, 5);
2339 goto err_free_sw_ports
;
2341 sw
->generation
= tb_switch_get_generation(sw
);
2343 tb_dbg(tb
, "current switch config:\n");
2344 tb_dump_switch(tb
, sw
);
2346 /* configure switch */
2347 sw
->config
.upstream_port_number
= upstream_port
;
2348 sw
->config
.depth
= depth
;
2349 sw
->config
.route_hi
= upper_32_bits(route
);
2350 sw
->config
.route_lo
= lower_32_bits(route
);
2351 sw
->config
.enabled
= 0;
2353 /* Make sure we do not exceed maximum topology limit */
2354 if (tb_switch_exceeds_max_depth(sw
, depth
)) {
2355 ret
= -EADDRNOTAVAIL
;
2356 goto err_free_sw_ports
;
2359 /* initialize ports */
2360 sw
->ports
= kcalloc(sw
->config
.max_port_number
+ 1, sizeof(*sw
->ports
),
2364 goto err_free_sw_ports
;
2367 for (i
= 0; i
<= sw
->config
.max_port_number
; i
++) {
2368 /* minimum setup for tb_find_cap and tb_drom_read to work */
2369 sw
->ports
[i
].sw
= sw
;
2370 sw
->ports
[i
].port
= i
;
2372 /* Control port does not need HopID allocation */
2374 ida_init(&sw
->ports
[i
].in_hopids
);
2375 ida_init(&sw
->ports
[i
].out_hopids
);
2379 ret
= tb_switch_find_vse_cap(sw
, TB_VSE_CAP_PLUG_EVENTS
);
2381 sw
->cap_plug_events
= ret
;
2383 ret
= tb_switch_find_vse_cap(sw
, TB_VSE_CAP_TIME2
);
2385 sw
->cap_vsec_tmu
= ret
;
2387 ret
= tb_switch_find_vse_cap(sw
, TB_VSE_CAP_LINK_CONTROLLER
);
2391 ret
= tb_switch_find_vse_cap(sw
, TB_VSE_CAP_CP_LP
);
2395 /* Root switch is always authorized */
2397 sw
->authorized
= true;
2399 device_initialize(&sw
->dev
);
2400 sw
->dev
.parent
= parent
;
2401 sw
->dev
.bus
= &tb_bus_type
;
2402 sw
->dev
.type
= &tb_switch_type
;
2403 sw
->dev
.groups
= switch_groups
;
2404 dev_set_name(&sw
->dev
, "%u-%llx", tb
->index
, tb_route(sw
));
2412 return ERR_PTR(ret
);
2416 * tb_switch_alloc_safe_mode() - allocate a switch that is in safe mode
2417 * @tb: Pointer to the owning domain
2418 * @parent: Parent device for this switch
2419 * @route: Route string for this switch
2421 * This creates a switch in safe mode. This means the switch pretty much
2422 * lacks all capabilities except DMA configuration port before it is
2423 * flashed with a valid NVM firmware.
2425 * The returned switch must be released by calling tb_switch_put().
2427 * Return: Pointer to the allocated switch or ERR_PTR() in case of failure
2430 tb_switch_alloc_safe_mode(struct tb
*tb
, struct device
*parent
, u64 route
)
2432 struct tb_switch
*sw
;
2434 sw
= kzalloc(sizeof(*sw
), GFP_KERNEL
);
2436 return ERR_PTR(-ENOMEM
);
2439 sw
->config
.depth
= tb_route_length(route
);
2440 sw
->config
.route_hi
= upper_32_bits(route
);
2441 sw
->config
.route_lo
= lower_32_bits(route
);
2442 sw
->safe_mode
= true;
2444 device_initialize(&sw
->dev
);
2445 sw
->dev
.parent
= parent
;
2446 sw
->dev
.bus
= &tb_bus_type
;
2447 sw
->dev
.type
= &tb_switch_type
;
2448 sw
->dev
.groups
= switch_groups
;
2449 dev_set_name(&sw
->dev
, "%u-%llx", tb
->index
, tb_route(sw
));
2455 * tb_switch_configure() - Uploads configuration to the switch
2456 * @sw: Switch to configure
2458 * Call this function before the switch is added to the system. It will
2459 * upload configuration to the switch and makes it available for the
2460 * connection manager to use. Can be called to the switch again after
2461 * resume from low power states to re-initialize it.
2463 * Return: %0 in case of success and negative errno in case of failure
2465 int tb_switch_configure(struct tb_switch
*sw
)
2467 struct tb
*tb
= sw
->tb
;
2471 route
= tb_route(sw
);
2473 tb_dbg(tb
, "%s Switch at %#llx (depth: %d, up port: %d)\n",
2474 sw
->config
.enabled
? "restoring" : "initializing", route
,
2475 tb_route_length(route
), sw
->config
.upstream_port_number
);
2477 sw
->config
.enabled
= 1;
2479 if (tb_switch_is_usb4(sw
)) {
2481 * For USB4 devices, we need to program the CM version
2482 * accordingly so that it knows to expose all the
2483 * additional capabilities. Program it according to USB4
2484 * version to avoid changing existing (v1) routers behaviour.
2486 if (usb4_switch_version(sw
) < 2)
2487 sw
->config
.cmuv
= ROUTER_CS_4_CMUV_V1
;
2489 sw
->config
.cmuv
= ROUTER_CS_4_CMUV_V2
;
2490 sw
->config
.plug_events_delay
= 0xa;
2492 /* Enumerate the switch */
2493 ret
= tb_sw_write(sw
, (u32
*)&sw
->config
+ 1, TB_CFG_SWITCH
,
2498 ret
= usb4_switch_setup(sw
);
2500 if (sw
->config
.vendor_id
!= PCI_VENDOR_ID_INTEL
)
2501 tb_sw_warn(sw
, "unknown switch vendor id %#x\n",
2502 sw
->config
.vendor_id
);
2504 if (!sw
->cap_plug_events
) {
2505 tb_sw_warn(sw
, "cannot find TB_VSE_CAP_PLUG_EVENTS aborting\n");
2509 /* Enumerate the switch */
2510 ret
= tb_sw_write(sw
, (u32
*)&sw
->config
+ 1, TB_CFG_SWITCH
,
2516 return tb_plug_events_active(sw
, true);
2520 * tb_switch_configuration_valid() - Set the tunneling configuration to be valid
2521 * @sw: Router to configure
2523 * Needs to be called before any tunnels can be setup through the
2524 * router. Can be called to any router.
2526 * Returns %0 in success and negative errno otherwise.
2528 int tb_switch_configuration_valid(struct tb_switch
*sw
)
2530 if (tb_switch_is_usb4(sw
))
2531 return usb4_switch_configuration_valid(sw
);
2535 static int tb_switch_set_uuid(struct tb_switch
*sw
)
2544 if (tb_switch_is_usb4(sw
)) {
2545 ret
= usb4_switch_read_uid(sw
, &sw
->uid
);
2551 * The newer controllers include fused UUID as part of
2552 * link controller specific registers
2554 ret
= tb_lc_read_uuid(sw
, uuid
);
2564 * ICM generates UUID based on UID and fills the upper
2565 * two words with ones. This is not strictly following
2566 * UUID format but we want to be compatible with it so
2567 * we do the same here.
2569 uuid
[0] = sw
->uid
& 0xffffffff;
2570 uuid
[1] = (sw
->uid
>> 32) & 0xffffffff;
2571 uuid
[2] = 0xffffffff;
2572 uuid
[3] = 0xffffffff;
2575 sw
->uuid
= kmemdup(uuid
, sizeof(uuid
), GFP_KERNEL
);
2581 static int tb_switch_add_dma_port(struct tb_switch
*sw
)
2586 switch (sw
->generation
) {
2588 /* Only root switch can be upgraded */
2595 ret
= tb_switch_set_uuid(sw
);
2602 * DMA port is the only thing available when the switch
2610 if (sw
->no_nvm_upgrade
)
2613 if (tb_switch_is_usb4(sw
)) {
2614 ret
= usb4_switch_nvm_authenticate_status(sw
, &status
);
2619 tb_sw_info(sw
, "switch flash authentication failed\n");
2620 nvm_set_auth_status(sw
, status
);
2626 /* Root switch DMA port requires running firmware */
2627 if (!tb_route(sw
) && !tb_switch_is_icm(sw
))
2630 sw
->dma_port
= dma_port_alloc(sw
);
2635 * If there is status already set then authentication failed
2636 * when the dma_port_flash_update_auth() returned. Power cycling
2637 * is not needed (it was done already) so only thing we do here
2638 * is to unblock runtime PM of the root port.
2640 nvm_get_auth_status(sw
, &status
);
2643 nvm_authenticate_complete_dma_port(sw
);
2648 * Check status of the previous flash authentication. If there
2649 * is one we need to power cycle the switch in any case to make
2650 * it functional again.
2652 ret
= dma_port_flash_update_auth_status(sw
->dma_port
, &status
);
2656 /* Now we can allow root port to suspend again */
2658 nvm_authenticate_complete_dma_port(sw
);
2661 tb_sw_info(sw
, "switch flash authentication failed\n");
2662 nvm_set_auth_status(sw
, status
);
2665 tb_sw_info(sw
, "power cycling the switch now\n");
2666 dma_port_power_cycle(sw
->dma_port
);
2669 * We return error here which causes the switch adding failure.
2670 * It should appear back after power cycle is complete.
2675 static void tb_switch_default_link_ports(struct tb_switch
*sw
)
2679 for (i
= 1; i
<= sw
->config
.max_port_number
; i
++) {
2680 struct tb_port
*port
= &sw
->ports
[i
];
2681 struct tb_port
*subordinate
;
2683 if (!tb_port_is_null(port
))
2686 /* Check for the subordinate port */
2687 if (i
== sw
->config
.max_port_number
||
2688 !tb_port_is_null(&sw
->ports
[i
+ 1]))
2691 /* Link them if not already done so (by DROM) */
2692 subordinate
= &sw
->ports
[i
+ 1];
2693 if (!port
->dual_link_port
&& !subordinate
->dual_link_port
) {
2695 port
->dual_link_port
= subordinate
;
2696 subordinate
->link_nr
= 1;
2697 subordinate
->dual_link_port
= port
;
2699 tb_sw_dbg(sw
, "linked ports %d <-> %d\n",
2700 port
->port
, subordinate
->port
);
2705 static bool tb_switch_lane_bonding_possible(struct tb_switch
*sw
)
2707 const struct tb_port
*up
= tb_upstream_port(sw
);
2709 if (!up
->dual_link_port
|| !up
->dual_link_port
->remote
)
2712 if (tb_switch_is_usb4(sw
))
2713 return usb4_switch_lane_bonding_possible(sw
);
2714 return tb_lc_lane_bonding_possible(sw
);
2717 static int tb_switch_update_link_attributes(struct tb_switch
*sw
)
2720 bool change
= false;
2723 if (!tb_route(sw
) || tb_switch_is_icm(sw
))
2726 up
= tb_upstream_port(sw
);
2728 ret
= tb_port_get_link_speed(up
);
2731 if (sw
->link_speed
!= ret
)
2733 sw
->link_speed
= ret
;
2735 ret
= tb_port_get_link_width(up
);
2738 if (sw
->link_width
!= ret
)
2740 sw
->link_width
= ret
;
2742 /* Notify userspace that there is possible link attribute change */
2743 if (device_is_registered(&sw
->dev
) && change
)
2744 kobject_uevent(&sw
->dev
.kobj
, KOBJ_CHANGE
);
2749 /* Must be called after tb_switch_update_link_attributes() */
2750 static void tb_switch_link_init(struct tb_switch
*sw
)
2752 struct tb_port
*up
, *down
;
2755 if (!tb_route(sw
) || tb_switch_is_icm(sw
))
2758 tb_sw_dbg(sw
, "current link speed %u.0 Gb/s\n", sw
->link_speed
);
2759 tb_sw_dbg(sw
, "current link width %s\n", tb_width_name(sw
->link_width
));
2761 bonded
= sw
->link_width
>= TB_LINK_WIDTH_DUAL
;
2764 * Gen 4 links come up as bonded so update the port structures
2767 up
= tb_upstream_port(sw
);
2768 down
= tb_switch_downstream_port(sw
);
2770 up
->bonded
= bonded
;
2771 if (up
->dual_link_port
)
2772 up
->dual_link_port
->bonded
= bonded
;
2773 tb_port_update_credits(up
);
2775 down
->bonded
= bonded
;
2776 if (down
->dual_link_port
)
2777 down
->dual_link_port
->bonded
= bonded
;
2778 tb_port_update_credits(down
);
2780 if (tb_port_get_link_generation(up
) < 4)
2784 * Set the Gen 4 preferred link width. This is what the router
2785 * prefers when the link is brought up. If the router does not
2786 * support asymmetric link configuration, this also will be set
2787 * to TB_LINK_WIDTH_DUAL.
2789 sw
->preferred_link_width
= sw
->link_width
;
2790 tb_sw_dbg(sw
, "preferred link width %s\n",
2791 tb_width_name(sw
->preferred_link_width
));
2795 * tb_switch_lane_bonding_enable() - Enable lane bonding
2796 * @sw: Switch to enable lane bonding
2798 * Connection manager can call this function to enable lane bonding of a
2799 * switch. If conditions are correct and both switches support the feature,
2800 * lanes are bonded. It is safe to call this to any switch.
2802 static int tb_switch_lane_bonding_enable(struct tb_switch
*sw
)
2804 struct tb_port
*up
, *down
;
2808 if (!tb_switch_lane_bonding_possible(sw
))
2811 up
= tb_upstream_port(sw
);
2812 down
= tb_switch_downstream_port(sw
);
2814 if (!tb_port_width_supported(up
, TB_LINK_WIDTH_DUAL
) ||
2815 !tb_port_width_supported(down
, TB_LINK_WIDTH_DUAL
))
2819 * Both lanes need to be in CL0. Here we assume lane 0 already be in
2820 * CL0 and check just for lane 1.
2822 if (tb_wait_for_port(down
->dual_link_port
, false) <= 0)
2825 ret
= tb_port_lane_bonding_enable(up
);
2827 tb_port_warn(up
, "failed to enable lane bonding\n");
2831 ret
= tb_port_lane_bonding_enable(down
);
2833 tb_port_warn(down
, "failed to enable lane bonding\n");
2834 tb_port_lane_bonding_disable(up
);
2838 /* Any of the widths are all bonded */
2839 width
= TB_LINK_WIDTH_DUAL
| TB_LINK_WIDTH_ASYM_TX
|
2840 TB_LINK_WIDTH_ASYM_RX
;
2842 return tb_port_wait_for_link_width(down
, width
, 100);
2846 * tb_switch_lane_bonding_disable() - Disable lane bonding
2847 * @sw: Switch whose lane bonding to disable
2849 * Disables lane bonding between @sw and parent. This can be called even
2850 * if lanes were not bonded originally.
2852 static int tb_switch_lane_bonding_disable(struct tb_switch
*sw
)
2854 struct tb_port
*up
, *down
;
2857 up
= tb_upstream_port(sw
);
2862 * If the link is Gen 4 there is no way to switch the link to
2863 * two single lane links so avoid that here. Also don't bother
2864 * if the link is not up anymore (sw is unplugged).
2866 ret
= tb_port_get_link_generation(up
);
2872 down
= tb_switch_downstream_port(sw
);
2873 tb_port_lane_bonding_disable(up
);
2874 tb_port_lane_bonding_disable(down
);
2877 * It is fine if we get other errors as the router might have
2880 return tb_port_wait_for_link_width(down
, TB_LINK_WIDTH_SINGLE
, 100);
2883 /* Note updating sw->link_width done in tb_switch_update_link_attributes() */
2884 static int tb_switch_asym_enable(struct tb_switch
*sw
, enum tb_link_width width
)
2886 struct tb_port
*up
, *down
, *port
;
2887 enum tb_link_width down_width
;
2890 up
= tb_upstream_port(sw
);
2891 down
= tb_switch_downstream_port(sw
);
2893 if (width
== TB_LINK_WIDTH_ASYM_TX
) {
2894 down_width
= TB_LINK_WIDTH_ASYM_RX
;
2897 down_width
= TB_LINK_WIDTH_ASYM_TX
;
2901 ret
= tb_port_set_link_width(up
, width
);
2905 ret
= tb_port_set_link_width(down
, down_width
);
2910 * Initiate the change in the router that one of its TX lanes is
2911 * changing to RX but do so only if there is an actual change.
2913 if (sw
->link_width
!= width
) {
2914 ret
= usb4_port_asym_start(port
);
2918 ret
= tb_port_wait_for_link_width(up
, width
, 100);
2926 /* Note updating sw->link_width done in tb_switch_update_link_attributes() */
2927 static int tb_switch_asym_disable(struct tb_switch
*sw
)
2929 struct tb_port
*up
, *down
;
2932 up
= tb_upstream_port(sw
);
2933 down
= tb_switch_downstream_port(sw
);
2935 ret
= tb_port_set_link_width(up
, TB_LINK_WIDTH_DUAL
);
2939 ret
= tb_port_set_link_width(down
, TB_LINK_WIDTH_DUAL
);
2944 * Initiate the change in the router that has three TX lanes and
2945 * is changing one of its TX lanes to RX but only if there is a
2946 * change in the link width.
2948 if (sw
->link_width
> TB_LINK_WIDTH_DUAL
) {
2949 if (sw
->link_width
== TB_LINK_WIDTH_ASYM_TX
)
2950 ret
= usb4_port_asym_start(up
);
2952 ret
= usb4_port_asym_start(down
);
2956 ret
= tb_port_wait_for_link_width(up
, TB_LINK_WIDTH_DUAL
, 100);
2965 * tb_switch_set_link_width() - Configure router link width
2966 * @sw: Router to configure
2967 * @width: The new link width
2969 * Set device router link width to @width from router upstream port
2970 * perspective. Supports also asymmetric links if the routers boths side
2971 * of the link supports it.
2973 * Does nothing for host router.
2975 * Returns %0 in case of success, negative errno otherwise.
2977 int tb_switch_set_link_width(struct tb_switch
*sw
, enum tb_link_width width
)
2979 struct tb_port
*up
, *down
;
2985 up
= tb_upstream_port(sw
);
2986 down
= tb_switch_downstream_port(sw
);
2989 case TB_LINK_WIDTH_SINGLE
:
2990 ret
= tb_switch_lane_bonding_disable(sw
);
2993 case TB_LINK_WIDTH_DUAL
:
2994 if (sw
->link_width
== TB_LINK_WIDTH_ASYM_TX
||
2995 sw
->link_width
== TB_LINK_WIDTH_ASYM_RX
) {
2996 ret
= tb_switch_asym_disable(sw
);
3000 ret
= tb_switch_lane_bonding_enable(sw
);
3003 case TB_LINK_WIDTH_ASYM_TX
:
3004 case TB_LINK_WIDTH_ASYM_RX
:
3005 ret
= tb_switch_asym_enable(sw
, width
);
3014 tb_sw_warn(sw
, "timeout changing link width\n");
3023 tb_sw_dbg(sw
, "failed to change link width: %d\n", ret
);
3027 tb_port_update_credits(down
);
3028 tb_port_update_credits(up
);
3030 tb_switch_update_link_attributes(sw
);
3032 tb_sw_dbg(sw
, "link width set to %s\n", tb_width_name(width
));
3037 * tb_switch_configure_link() - Set link configured
3038 * @sw: Switch whose link is configured
3040 * Sets the link upstream from @sw configured (from both ends) so that
3041 * it will not be disconnected when the domain exits sleep. Can be
3042 * called for any switch.
3044 * It is recommended that this is called after lane bonding is enabled.
3046 * Returns %0 on success and negative errno in case of error.
3048 int tb_switch_configure_link(struct tb_switch
*sw
)
3050 struct tb_port
*up
, *down
;
3053 if (!tb_route(sw
) || tb_switch_is_icm(sw
))
3056 up
= tb_upstream_port(sw
);
3057 if (tb_switch_is_usb4(up
->sw
))
3058 ret
= usb4_port_configure(up
);
3060 ret
= tb_lc_configure_port(up
);
3065 if (tb_switch_is_usb4(down
->sw
))
3066 return usb4_port_configure(down
);
3067 return tb_lc_configure_port(down
);
3071 * tb_switch_unconfigure_link() - Unconfigure link
3072 * @sw: Switch whose link is unconfigured
3074 * Sets the link unconfigured so the @sw will be disconnected if the
3075 * domain exists sleep.
3077 void tb_switch_unconfigure_link(struct tb_switch
*sw
)
3079 struct tb_port
*up
, *down
;
3081 if (sw
->is_unplugged
)
3083 if (!tb_route(sw
) || tb_switch_is_icm(sw
))
3086 up
= tb_upstream_port(sw
);
3087 if (tb_switch_is_usb4(up
->sw
))
3088 usb4_port_unconfigure(up
);
3090 tb_lc_unconfigure_port(up
);
3093 if (tb_switch_is_usb4(down
->sw
))
3094 usb4_port_unconfigure(down
);
3096 tb_lc_unconfigure_port(down
);
3099 static void tb_switch_credits_init(struct tb_switch
*sw
)
3101 if (tb_switch_is_icm(sw
))
3103 if (!tb_switch_is_usb4(sw
))
3105 if (usb4_switch_credits_init(sw
))
3106 tb_sw_info(sw
, "failed to determine preferred buffer allocation, using defaults\n");
3109 static int tb_switch_port_hotplug_enable(struct tb_switch
*sw
)
3111 struct tb_port
*port
;
3113 if (tb_switch_is_icm(sw
))
3116 tb_switch_for_each_port(sw
, port
) {
3119 if (!port
->cap_usb4
)
3122 res
= usb4_port_hotplug_enable(port
);
3130 * tb_switch_add() - Add a switch to the domain
3131 * @sw: Switch to add
3133 * This is the last step in adding switch to the domain. It will read
3134 * identification information from DROM and initializes ports so that
3135 * they can be used to connect other switches. The switch will be
3136 * exposed to the userspace when this function successfully returns. To
3137 * remove and release the switch, call tb_switch_remove().
3139 * Return: %0 in case of success and negative errno in case of failure
3141 int tb_switch_add(struct tb_switch
*sw
)
3146 * Initialize DMA control port now before we read DROM. Recent
3147 * host controllers have more complete DROM on NVM that includes
3148 * vendor and model identification strings which we then expose
3149 * to the userspace. NVM can be accessed through DMA
3150 * configuration based mailbox.
3152 ret
= tb_switch_add_dma_port(sw
);
3154 dev_err(&sw
->dev
, "failed to add DMA port\n");
3158 if (!sw
->safe_mode
) {
3159 tb_switch_credits_init(sw
);
3162 ret
= tb_drom_read(sw
);
3164 dev_warn(&sw
->dev
, "reading DROM failed: %d\n", ret
);
3165 tb_sw_dbg(sw
, "uid: %#llx\n", sw
->uid
);
3167 ret
= tb_switch_set_uuid(sw
);
3169 dev_err(&sw
->dev
, "failed to set UUID\n");
3173 for (i
= 0; i
<= sw
->config
.max_port_number
; i
++) {
3174 if (sw
->ports
[i
].disabled
) {
3175 tb_port_dbg(&sw
->ports
[i
], "disabled by eeprom\n");
3178 ret
= tb_init_port(&sw
->ports
[i
]);
3180 dev_err(&sw
->dev
, "failed to initialize port %d\n", i
);
3185 tb_check_quirks(sw
);
3187 tb_switch_default_link_ports(sw
);
3189 ret
= tb_switch_update_link_attributes(sw
);
3193 tb_switch_link_init(sw
);
3195 ret
= tb_switch_clx_init(sw
);
3199 ret
= tb_switch_tmu_init(sw
);
3204 ret
= tb_switch_port_hotplug_enable(sw
);
3208 ret
= device_add(&sw
->dev
);
3210 dev_err(&sw
->dev
, "failed to add device: %d\n", ret
);
3215 dev_info(&sw
->dev
, "new device found, vendor=%#x device=%#x\n",
3216 sw
->vendor
, sw
->device
);
3217 if (sw
->vendor_name
&& sw
->device_name
)
3218 dev_info(&sw
->dev
, "%s %s\n", sw
->vendor_name
,
3222 ret
= usb4_switch_add_ports(sw
);
3224 dev_err(&sw
->dev
, "failed to add USB4 ports\n");
3228 ret
= tb_switch_nvm_add(sw
);
3230 dev_err(&sw
->dev
, "failed to add NVM devices\n");
3235 * Thunderbolt routers do not generate wakeups themselves but
3236 * they forward wakeups from tunneled protocols, so enable it
3239 device_init_wakeup(&sw
->dev
, true);
3241 pm_runtime_set_active(&sw
->dev
);
3243 pm_runtime_set_autosuspend_delay(&sw
->dev
, TB_AUTOSUSPEND_DELAY
);
3244 pm_runtime_use_autosuspend(&sw
->dev
);
3245 pm_runtime_mark_last_busy(&sw
->dev
);
3246 pm_runtime_enable(&sw
->dev
);
3247 pm_request_autosuspend(&sw
->dev
);
3250 tb_switch_debugfs_init(sw
);
3254 usb4_switch_remove_ports(sw
);
3256 device_del(&sw
->dev
);
3262 * tb_switch_remove() - Remove and release a switch
3263 * @sw: Switch to remove
3265 * This will remove the switch from the domain and release it after last
3266 * reference count drops to zero. If there are switches connected below
3267 * this switch, they will be removed as well.
3269 void tb_switch_remove(struct tb_switch
*sw
)
3271 struct tb_port
*port
;
3273 tb_switch_debugfs_remove(sw
);
3276 pm_runtime_get_sync(&sw
->dev
);
3277 pm_runtime_disable(&sw
->dev
);
3280 /* port 0 is the switch itself and never has a remote */
3281 tb_switch_for_each_port(sw
, port
) {
3282 if (tb_port_has_remote(port
)) {
3283 tb_switch_remove(port
->remote
->sw
);
3284 port
->remote
= NULL
;
3285 } else if (port
->xdomain
) {
3286 tb_xdomain_remove(port
->xdomain
);
3287 port
->xdomain
= NULL
;
3290 /* Remove any downstream retimers */
3291 tb_retimer_remove_all(port
);
3294 if (!sw
->is_unplugged
)
3295 tb_plug_events_active(sw
, false);
3297 tb_switch_nvm_remove(sw
);
3298 usb4_switch_remove_ports(sw
);
3301 dev_info(&sw
->dev
, "device disconnected\n");
3302 device_unregister(&sw
->dev
);
3306 * tb_sw_set_unplugged() - set is_unplugged on switch and downstream switches
3307 * @sw: Router to mark unplugged
3309 void tb_sw_set_unplugged(struct tb_switch
*sw
)
3311 struct tb_port
*port
;
3313 if (sw
== sw
->tb
->root_switch
) {
3314 tb_sw_WARN(sw
, "cannot unplug root switch\n");
3317 if (sw
->is_unplugged
) {
3318 tb_sw_WARN(sw
, "is_unplugged already set\n");
3321 sw
->is_unplugged
= true;
3322 tb_switch_for_each_port(sw
, port
) {
3323 if (tb_port_has_remote(port
))
3324 tb_sw_set_unplugged(port
->remote
->sw
);
3325 else if (port
->xdomain
)
3326 port
->xdomain
->is_unplugged
= true;
3330 static int tb_switch_set_wake(struct tb_switch
*sw
, unsigned int flags
)
3333 tb_sw_dbg(sw
, "enabling wakeup: %#x\n", flags
);
3335 tb_sw_dbg(sw
, "disabling wakeup\n");
3337 if (tb_switch_is_usb4(sw
))
3338 return usb4_switch_set_wake(sw
, flags
);
3339 return tb_lc_set_wake(sw
, flags
);
3342 int tb_switch_resume(struct tb_switch
*sw
)
3344 struct tb_port
*port
;
3347 tb_sw_dbg(sw
, "resuming switch\n");
3350 * Check for UID of the connected switches except for root
3351 * switch which we assume cannot be removed.
3357 * Check first that we can still read the switch config
3358 * space. It may be that there is now another domain
3361 err
= tb_cfg_get_upstream_port(sw
->tb
->ctl
, tb_route(sw
));
3363 tb_sw_info(sw
, "switch not present anymore\n");
3367 /* We don't have any way to confirm this was the same device */
3371 if (tb_switch_is_usb4(sw
))
3372 err
= usb4_switch_read_uid(sw
, &uid
);
3374 err
= tb_drom_read_uid_only(sw
, &uid
);
3376 tb_sw_warn(sw
, "uid read failed\n");
3379 if (sw
->uid
!= uid
) {
3381 "changed while suspended (uid %#llx -> %#llx)\n",
3387 err
= tb_switch_configure(sw
);
3392 tb_switch_set_wake(sw
, 0);
3394 err
= tb_switch_tmu_init(sw
);
3398 /* check for surviving downstream switches */
3399 tb_switch_for_each_port(sw
, port
) {
3400 if (!tb_port_is_null(port
))
3403 if (!tb_port_resume(port
))
3406 if (tb_wait_for_port(port
, true) <= 0) {
3408 "lost during suspend, disconnecting\n");
3409 if (tb_port_has_remote(port
))
3410 tb_sw_set_unplugged(port
->remote
->sw
);
3411 else if (port
->xdomain
)
3412 port
->xdomain
->is_unplugged
= true;
3415 * Always unlock the port so the downstream
3416 * switch/domain is accessible.
3418 if (tb_port_unlock(port
))
3419 tb_port_warn(port
, "failed to unlock port\n");
3420 if (port
->remote
&& tb_switch_resume(port
->remote
->sw
)) {
3422 "lost during suspend, disconnecting\n");
3423 tb_sw_set_unplugged(port
->remote
->sw
);
3431 * tb_switch_suspend() - Put a switch to sleep
3432 * @sw: Switch to suspend
3433 * @runtime: Is this runtime suspend or system sleep
3435 * Suspends router and all its children. Enables wakes according to
3436 * value of @runtime and then sets sleep bit for the router. If @sw is
3437 * host router the domain is ready to go to sleep once this function
3440 void tb_switch_suspend(struct tb_switch
*sw
, bool runtime
)
3442 unsigned int flags
= 0;
3443 struct tb_port
*port
;
3446 tb_sw_dbg(sw
, "suspending switch\n");
3449 * Actually only needed for Titan Ridge but for simplicity can be
3450 * done for USB4 device too as CLx is re-enabled at resume.
3452 tb_switch_clx_disable(sw
);
3454 err
= tb_plug_events_active(sw
, false);
3458 tb_switch_for_each_port(sw
, port
) {
3459 if (tb_port_has_remote(port
))
3460 tb_switch_suspend(port
->remote
->sw
, runtime
);
3464 /* Trigger wake when something is plugged in/out */
3465 flags
|= TB_WAKE_ON_CONNECT
| TB_WAKE_ON_DISCONNECT
;
3466 flags
|= TB_WAKE_ON_USB4
;
3467 flags
|= TB_WAKE_ON_USB3
| TB_WAKE_ON_PCIE
| TB_WAKE_ON_DP
;
3468 } else if (device_may_wakeup(&sw
->dev
)) {
3469 flags
|= TB_WAKE_ON_USB4
| TB_WAKE_ON_USB3
| TB_WAKE_ON_PCIE
;
3472 tb_switch_set_wake(sw
, flags
);
3474 if (tb_switch_is_usb4(sw
))
3475 usb4_switch_set_sleep(sw
);
3477 tb_lc_set_sleep(sw
);
3481 * tb_switch_query_dp_resource() - Query availability of DP resource
3482 * @sw: Switch whose DP resource is queried
3485 * Queries availability of DP resource for DP tunneling using switch
3486 * specific means. Returns %true if resource is available.
3488 bool tb_switch_query_dp_resource(struct tb_switch
*sw
, struct tb_port
*in
)
3490 if (tb_switch_is_usb4(sw
))
3491 return usb4_switch_query_dp_resource(sw
, in
);
3492 return tb_lc_dp_sink_query(sw
, in
);
3496 * tb_switch_alloc_dp_resource() - Allocate available DP resource
3497 * @sw: Switch whose DP resource is allocated
3500 * Allocates DP resource for DP tunneling. The resource must be
3501 * available for this to succeed (see tb_switch_query_dp_resource()).
3502 * Returns %0 in success and negative errno otherwise.
3504 int tb_switch_alloc_dp_resource(struct tb_switch
*sw
, struct tb_port
*in
)
3508 if (tb_switch_is_usb4(sw
))
3509 ret
= usb4_switch_alloc_dp_resource(sw
, in
);
3511 ret
= tb_lc_dp_sink_alloc(sw
, in
);
3514 tb_sw_warn(sw
, "failed to allocate DP resource for port %d\n",
3517 tb_sw_dbg(sw
, "allocated DP resource for port %d\n", in
->port
);
3523 * tb_switch_dealloc_dp_resource() - De-allocate DP resource
3524 * @sw: Switch whose DP resource is de-allocated
3527 * De-allocates DP resource that was previously allocated for DP
3530 void tb_switch_dealloc_dp_resource(struct tb_switch
*sw
, struct tb_port
*in
)
3534 if (tb_switch_is_usb4(sw
))
3535 ret
= usb4_switch_dealloc_dp_resource(sw
, in
);
3537 ret
= tb_lc_dp_sink_dealloc(sw
, in
);
3540 tb_sw_warn(sw
, "failed to de-allocate DP resource for port %d\n",
3543 tb_sw_dbg(sw
, "released DP resource for port %d\n", in
->port
);
3546 struct tb_sw_lookup
{
3554 static int tb_switch_match(struct device
*dev
, const void *data
)
3556 struct tb_switch
*sw
= tb_to_switch(dev
);
3557 const struct tb_sw_lookup
*lookup
= data
;
3561 if (sw
->tb
!= lookup
->tb
)
3565 return !memcmp(sw
->uuid
, lookup
->uuid
, sizeof(*lookup
->uuid
));
3567 if (lookup
->route
) {
3568 return sw
->config
.route_lo
== lower_32_bits(lookup
->route
) &&
3569 sw
->config
.route_hi
== upper_32_bits(lookup
->route
);
3572 /* Root switch is matched only by depth */
3576 return sw
->link
== lookup
->link
&& sw
->depth
== lookup
->depth
;
3580 * tb_switch_find_by_link_depth() - Find switch by link and depth
3581 * @tb: Domain the switch belongs
3582 * @link: Link number the switch is connected
3583 * @depth: Depth of the switch in link
3585 * Returned switch has reference count increased so the caller needs to
3586 * call tb_switch_put() when done with the switch.
3588 struct tb_switch
*tb_switch_find_by_link_depth(struct tb
*tb
, u8 link
, u8 depth
)
3590 struct tb_sw_lookup lookup
;
3593 memset(&lookup
, 0, sizeof(lookup
));
3596 lookup
.depth
= depth
;
3598 dev
= bus_find_device(&tb_bus_type
, NULL
, &lookup
, tb_switch_match
);
3600 return tb_to_switch(dev
);
3606 * tb_switch_find_by_uuid() - Find switch by UUID
3607 * @tb: Domain the switch belongs
3608 * @uuid: UUID to look for
3610 * Returned switch has reference count increased so the caller needs to
3611 * call tb_switch_put() when done with the switch.
3613 struct tb_switch
*tb_switch_find_by_uuid(struct tb
*tb
, const uuid_t
*uuid
)
3615 struct tb_sw_lookup lookup
;
3618 memset(&lookup
, 0, sizeof(lookup
));
3622 dev
= bus_find_device(&tb_bus_type
, NULL
, &lookup
, tb_switch_match
);
3624 return tb_to_switch(dev
);
3630 * tb_switch_find_by_route() - Find switch by route string
3631 * @tb: Domain the switch belongs
3632 * @route: Route string to look for
3634 * Returned switch has reference count increased so the caller needs to
3635 * call tb_switch_put() when done with the switch.
3637 struct tb_switch
*tb_switch_find_by_route(struct tb
*tb
, u64 route
)
3639 struct tb_sw_lookup lookup
;
3643 return tb_switch_get(tb
->root_switch
);
3645 memset(&lookup
, 0, sizeof(lookup
));
3647 lookup
.route
= route
;
3649 dev
= bus_find_device(&tb_bus_type
, NULL
, &lookup
, tb_switch_match
);
3651 return tb_to_switch(dev
);
3657 * tb_switch_find_port() - return the first port of @type on @sw or NULL
3658 * @sw: Switch to find the port from
3659 * @type: Port type to look for
3661 struct tb_port
*tb_switch_find_port(struct tb_switch
*sw
,
3662 enum tb_port_type type
)
3664 struct tb_port
*port
;
3666 tb_switch_for_each_port(sw
, port
) {
3667 if (port
->config
.type
== type
)
3675 * Can be used for read/write a specified PCIe bridge for any Thunderbolt 3
3676 * device. For now used only for Titan Ridge.
3678 static int tb_switch_pcie_bridge_write(struct tb_switch
*sw
, unsigned int bridge
,
3679 unsigned int pcie_offset
, u32 value
)
3681 u32 offset
, command
, val
;
3684 if (sw
->generation
!= 3)
3687 offset
= sw
->cap_plug_events
+ TB_PLUG_EVENTS_PCIE_WR_DATA
;
3688 ret
= tb_sw_write(sw
, &value
, TB_CFG_SWITCH
, offset
, 1);
3692 command
= pcie_offset
& TB_PLUG_EVENTS_PCIE_CMD_DW_OFFSET_MASK
;
3693 command
|= BIT(bridge
+ TB_PLUG_EVENTS_PCIE_CMD_BR_SHIFT
);
3694 command
|= TB_PLUG_EVENTS_PCIE_CMD_RD_WR_MASK
;
3695 command
|= TB_PLUG_EVENTS_PCIE_CMD_COMMAND_VAL
3696 << TB_PLUG_EVENTS_PCIE_CMD_COMMAND_SHIFT
;
3697 command
|= TB_PLUG_EVENTS_PCIE_CMD_REQ_ACK_MASK
;
3699 offset
= sw
->cap_plug_events
+ TB_PLUG_EVENTS_PCIE_CMD
;
3701 ret
= tb_sw_write(sw
, &command
, TB_CFG_SWITCH
, offset
, 1);
3705 ret
= tb_switch_wait_for_bit(sw
, offset
,
3706 TB_PLUG_EVENTS_PCIE_CMD_REQ_ACK_MASK
, 0, 100);
3710 ret
= tb_sw_read(sw
, &val
, TB_CFG_SWITCH
, offset
, 1);
3714 if (val
& TB_PLUG_EVENTS_PCIE_CMD_TIMEOUT_MASK
)
3721 * tb_switch_pcie_l1_enable() - Enable PCIe link to enter L1 state
3722 * @sw: Router to enable PCIe L1
3724 * For Titan Ridge switch to enter CLx state, its PCIe bridges shall enable
3725 * entry to PCIe L1 state. Shall be called after the upstream PCIe tunnel
3726 * was configured. Due to Intel platforms limitation, shall be called only
3727 * for first hop switch.
3729 int tb_switch_pcie_l1_enable(struct tb_switch
*sw
)
3731 struct tb_switch
*parent
= tb_switch_parent(sw
);
3737 if (!tb_switch_is_titan_ridge(sw
))
3740 /* Enable PCIe L1 enable only for first hop router (depth = 1) */
3741 if (tb_route(parent
))
3744 /* Write to downstream PCIe bridge #5 aka Dn4 */
3745 ret
= tb_switch_pcie_bridge_write(sw
, 5, 0x143, 0x0c7806b1);
3749 /* Write to Upstream PCIe bridge #0 aka Up0 */
3750 return tb_switch_pcie_bridge_write(sw
, 0, 0x143, 0x0c5806b1);
3754 * tb_switch_xhci_connect() - Connect internal xHCI
3755 * @sw: Router whose xHCI to connect
3757 * Can be called to any router. For Alpine Ridge and Titan Ridge
3758 * performs special flows that bring the xHCI functional for any device
3759 * connected to the type-C port. Call only after PCIe tunnel has been
3760 * established. The function only does the connect if not done already
3761 * so can be called several times for the same router.
3763 int tb_switch_xhci_connect(struct tb_switch
*sw
)
3765 struct tb_port
*port1
, *port3
;
3768 if (sw
->generation
!= 3)
3771 port1
= &sw
->ports
[1];
3772 port3
= &sw
->ports
[3];
3774 if (tb_switch_is_alpine_ridge(sw
)) {
3775 bool usb_port1
, usb_port3
, xhci_port1
, xhci_port3
;
3777 usb_port1
= tb_lc_is_usb_plugged(port1
);
3778 usb_port3
= tb_lc_is_usb_plugged(port3
);
3779 xhci_port1
= tb_lc_is_xhci_connected(port1
);
3780 xhci_port3
= tb_lc_is_xhci_connected(port3
);
3782 /* Figure out correct USB port to connect */
3783 if (usb_port1
&& !xhci_port1
) {
3784 ret
= tb_lc_xhci_connect(port1
);
3788 if (usb_port3
&& !xhci_port3
)
3789 return tb_lc_xhci_connect(port3
);
3790 } else if (tb_switch_is_titan_ridge(sw
)) {
3791 ret
= tb_lc_xhci_connect(port1
);
3794 return tb_lc_xhci_connect(port3
);
3801 * tb_switch_xhci_disconnect() - Disconnect internal xHCI
3802 * @sw: Router whose xHCI to disconnect
3804 * The opposite of tb_switch_xhci_connect(). Disconnects xHCI on both
3807 void tb_switch_xhci_disconnect(struct tb_switch
*sw
)
3809 if (sw
->generation
== 3) {
3810 struct tb_port
*port1
= &sw
->ports
[1];
3811 struct tb_port
*port3
= &sw
->ports
[3];
3813 tb_lc_xhci_disconnect(port1
);
3814 tb_port_dbg(port1
, "disconnected xHCI\n");
3815 tb_lc_xhci_disconnect(port3
);
3816 tb_port_dbg(port3
, "disconnected xHCI\n");