1 // SPDX-License-Identifier: GPL-2.0
3 * Thunderbolt driver - switch/port utility functions
5 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
6 * Copyright (C) 2018, Intel Corporation
9 #include <linux/delay.h>
10 #include <linux/idr.h>
11 #include <linux/module.h>
12 #include <linux/nvmem-provider.h>
13 #include <linux/pm_runtime.h>
14 #include <linux/sched/signal.h>
15 #include <linux/sizes.h>
16 #include <linux/slab.h>
17 #include <linux/string_helpers.h>
21 /* Switch NVM support */
23 struct nvm_auth_status
{
24 struct list_head list
;
30 * Hold NVM authentication failure status per switch This information
31 * needs to stay around even when the switch gets power cycled so we
34 static LIST_HEAD(nvm_auth_status_cache
);
35 static DEFINE_MUTEX(nvm_auth_status_lock
);
37 static struct nvm_auth_status
*__nvm_get_auth_status(const struct tb_switch
*sw
)
39 struct nvm_auth_status
*st
;
41 list_for_each_entry(st
, &nvm_auth_status_cache
, list
) {
42 if (uuid_equal(&st
->uuid
, sw
->uuid
))
49 static void nvm_get_auth_status(const struct tb_switch
*sw
, u32
*status
)
51 struct nvm_auth_status
*st
;
53 mutex_lock(&nvm_auth_status_lock
);
54 st
= __nvm_get_auth_status(sw
);
55 mutex_unlock(&nvm_auth_status_lock
);
57 *status
= st
? st
->status
: 0;
60 static void nvm_set_auth_status(const struct tb_switch
*sw
, u32 status
)
62 struct nvm_auth_status
*st
;
64 if (WARN_ON(!sw
->uuid
))
67 mutex_lock(&nvm_auth_status_lock
);
68 st
= __nvm_get_auth_status(sw
);
71 st
= kzalloc(sizeof(*st
), GFP_KERNEL
);
75 memcpy(&st
->uuid
, sw
->uuid
, sizeof(st
->uuid
));
76 INIT_LIST_HEAD(&st
->list
);
77 list_add_tail(&st
->list
, &nvm_auth_status_cache
);
82 mutex_unlock(&nvm_auth_status_lock
);
85 static void nvm_clear_auth_status(const struct tb_switch
*sw
)
87 struct nvm_auth_status
*st
;
89 mutex_lock(&nvm_auth_status_lock
);
90 st
= __nvm_get_auth_status(sw
);
95 mutex_unlock(&nvm_auth_status_lock
);
98 static int nvm_validate_and_write(struct tb_switch
*sw
)
100 unsigned int image_size
;
104 ret
= tb_nvm_validate(sw
->nvm
);
108 ret
= tb_nvm_write_headers(sw
->nvm
);
112 buf
= sw
->nvm
->buf_data_start
;
113 image_size
= sw
->nvm
->buf_data_size
;
115 if (tb_switch_is_usb4(sw
))
116 ret
= usb4_switch_nvm_write(sw
, 0, buf
, image_size
);
118 ret
= dma_port_flash_write(sw
->dma_port
, 0, buf
, image_size
);
122 sw
->nvm
->flushed
= true;
126 static int nvm_authenticate_host_dma_port(struct tb_switch
*sw
)
131 * Root switch NVM upgrade requires that we disconnect the
132 * existing paths first (in case it is not in safe mode
135 if (!sw
->safe_mode
) {
138 ret
= tb_domain_disconnect_all_paths(sw
->tb
);
142 * The host controller goes away pretty soon after this if
143 * everything goes well so getting timeout is expected.
145 ret
= dma_port_flash_update_auth(sw
->dma_port
);
146 if (!ret
|| ret
== -ETIMEDOUT
)
150 * Any error from update auth operation requires power
151 * cycling of the host router.
153 tb_sw_warn(sw
, "failed to authenticate NVM, power cycling\n");
154 if (dma_port_flash_update_auth_status(sw
->dma_port
, &status
) > 0)
155 nvm_set_auth_status(sw
, status
);
159 * From safe mode we can get out by just power cycling the
162 dma_port_power_cycle(sw
->dma_port
);
166 static int nvm_authenticate_device_dma_port(struct tb_switch
*sw
)
168 int ret
, retries
= 10;
170 ret
= dma_port_flash_update_auth(sw
->dma_port
);
176 /* Power cycle is required */
183 * Poll here for the authentication status. It takes some time
184 * for the device to respond (we get timeout for a while). Once
185 * we get response the device needs to be power cycled in order
186 * to the new NVM to be taken into use.
191 ret
= dma_port_flash_update_auth_status(sw
->dma_port
, &status
);
192 if (ret
< 0 && ret
!= -ETIMEDOUT
)
196 tb_sw_warn(sw
, "failed to authenticate NVM\n");
197 nvm_set_auth_status(sw
, status
);
200 tb_sw_info(sw
, "power cycling the switch now\n");
201 dma_port_power_cycle(sw
->dma_port
);
211 static void nvm_authenticate_start_dma_port(struct tb_switch
*sw
)
213 struct pci_dev
*root_port
;
216 * During host router NVM upgrade we should not allow root port to
217 * go into D3cold because some root ports cannot trigger PME
218 * itself. To be on the safe side keep the root port in D0 during
219 * the whole upgrade process.
221 root_port
= pcie_find_root_port(sw
->tb
->nhi
->pdev
);
223 pm_runtime_get_noresume(&root_port
->dev
);
226 static void nvm_authenticate_complete_dma_port(struct tb_switch
*sw
)
228 struct pci_dev
*root_port
;
230 root_port
= pcie_find_root_port(sw
->tb
->nhi
->pdev
);
232 pm_runtime_put(&root_port
->dev
);
235 static inline bool nvm_readable(struct tb_switch
*sw
)
237 if (tb_switch_is_usb4(sw
)) {
239 * USB4 devices must support NVM operations but it is
240 * optional for hosts. Therefore we query the NVM sector
241 * size here and if it is supported assume NVM
242 * operations are implemented.
244 return usb4_switch_nvm_sector_size(sw
) > 0;
247 /* Thunderbolt 2 and 3 devices support NVM through DMA port */
248 return !!sw
->dma_port
;
251 static inline bool nvm_upgradeable(struct tb_switch
*sw
)
253 if (sw
->no_nvm_upgrade
)
255 return nvm_readable(sw
);
258 static int nvm_authenticate(struct tb_switch
*sw
, bool auth_only
)
262 if (tb_switch_is_usb4(sw
)) {
264 ret
= usb4_switch_nvm_set_offset(sw
, 0);
268 sw
->nvm
->authenticating
= true;
269 return usb4_switch_nvm_authenticate(sw
);
274 sw
->nvm
->authenticating
= true;
276 nvm_authenticate_start_dma_port(sw
);
277 ret
= nvm_authenticate_host_dma_port(sw
);
279 ret
= nvm_authenticate_device_dma_port(sw
);
286 * tb_switch_nvm_read() - Read router NVM
287 * @sw: Router whose NVM to read
288 * @address: Start address on the NVM
289 * @buf: Buffer where the read data is copied
290 * @size: Size of the buffer in bytes
292 * Reads from router NVM and returns the requested data in @buf. Locking
293 * is up to the caller. Returns %0 in success and negative errno in case
296 int tb_switch_nvm_read(struct tb_switch
*sw
, unsigned int address
, void *buf
,
299 if (tb_switch_is_usb4(sw
))
300 return usb4_switch_nvm_read(sw
, address
, buf
, size
);
301 return dma_port_flash_read(sw
->dma_port
, address
, buf
, size
);
304 static int nvm_read(void *priv
, unsigned int offset
, void *val
, size_t bytes
)
306 struct tb_nvm
*nvm
= priv
;
307 struct tb_switch
*sw
= tb_to_switch(nvm
->dev
);
310 pm_runtime_get_sync(&sw
->dev
);
312 if (!mutex_trylock(&sw
->tb
->lock
)) {
313 ret
= restart_syscall();
317 ret
= tb_switch_nvm_read(sw
, offset
, val
, bytes
);
318 mutex_unlock(&sw
->tb
->lock
);
321 pm_runtime_mark_last_busy(&sw
->dev
);
322 pm_runtime_put_autosuspend(&sw
->dev
);
327 static int nvm_write(void *priv
, unsigned int offset
, void *val
, size_t bytes
)
329 struct tb_nvm
*nvm
= priv
;
330 struct tb_switch
*sw
= tb_to_switch(nvm
->dev
);
333 if (!mutex_trylock(&sw
->tb
->lock
))
334 return restart_syscall();
337 * Since writing the NVM image might require some special steps,
338 * for example when CSS headers are written, we cache the image
339 * locally here and handle the special cases when the user asks
340 * us to authenticate the image.
342 ret
= tb_nvm_write_buf(nvm
, offset
, val
, bytes
);
343 mutex_unlock(&sw
->tb
->lock
);
348 static int tb_switch_nvm_add(struct tb_switch
*sw
)
353 if (!nvm_readable(sw
))
356 nvm
= tb_nvm_alloc(&sw
->dev
);
358 ret
= PTR_ERR(nvm
) == -EOPNOTSUPP
? 0 : PTR_ERR(nvm
);
362 ret
= tb_nvm_read_version(nvm
);
367 * If the switch is in safe-mode the only accessible portion of
368 * the NVM is the non-active one where userspace is expected to
369 * write new functional NVM.
371 if (!sw
->safe_mode
) {
372 ret
= tb_nvm_add_active(nvm
, nvm_read
);
375 tb_sw_dbg(sw
, "NVM version %x.%x\n", nvm
->major
, nvm
->minor
);
378 if (!sw
->no_nvm_upgrade
) {
379 ret
= tb_nvm_add_non_active(nvm
, nvm_write
);
388 tb_sw_dbg(sw
, "NVM upgrade disabled\n");
389 sw
->no_nvm_upgrade
= true;
396 static void tb_switch_nvm_remove(struct tb_switch
*sw
)
406 /* Remove authentication status in case the switch is unplugged */
407 if (!nvm
->authenticating
)
408 nvm_clear_auth_status(sw
);
413 /* port utility functions */
415 static const char *tb_port_type(const struct tb_regs_port_header
*port
)
417 switch (port
->type
>> 16) {
419 switch ((u8
) port
->type
) {
444 static void tb_dump_port(struct tb
*tb
, const struct tb_port
*port
)
446 const struct tb_regs_port_header
*regs
= &port
->config
;
449 " Port %d: %x:%x (Revision: %d, TB Version: %d, Type: %s (%#x))\n",
450 regs
->port_number
, regs
->vendor_id
, regs
->device_id
,
451 regs
->revision
, regs
->thunderbolt_version
, tb_port_type(regs
),
453 tb_dbg(tb
, " Max hop id (in/out): %d/%d\n",
454 regs
->max_in_hop_id
, regs
->max_out_hop_id
);
455 tb_dbg(tb
, " Max counters: %d\n", regs
->max_counters
);
456 tb_dbg(tb
, " NFC Credits: %#x\n", regs
->nfc_credits
);
457 tb_dbg(tb
, " Credits (total/control): %u/%u\n", port
->total_credits
,
462 * tb_port_state() - get connectedness state of a port
463 * @port: the port to check
465 * The port must have a TB_CAP_PHY (i.e. it should be a real port).
467 * Return: Returns an enum tb_port_state on success or an error code on failure.
469 int tb_port_state(struct tb_port
*port
)
471 struct tb_cap_phy phy
;
473 if (port
->cap_phy
== 0) {
474 tb_port_WARN(port
, "does not have a PHY\n");
477 res
= tb_port_read(port
, &phy
, TB_CFG_PORT
, port
->cap_phy
, 2);
484 * tb_wait_for_port() - wait for a port to become ready
485 * @port: Port to wait
486 * @wait_if_unplugged: Wait also when port is unplugged
488 * Wait up to 1 second for a port to reach state TB_PORT_UP. If
489 * wait_if_unplugged is set then we also wait if the port is in state
490 * TB_PORT_UNPLUGGED (it takes a while for the device to be registered after
491 * switch resume). Otherwise we only wait if a device is registered but the link
492 * has not yet been established.
494 * Return: Returns an error code on failure. Returns 0 if the port is not
495 * connected or failed to reach state TB_PORT_UP within one second. Returns 1
496 * if the port is connected and in state TB_PORT_UP.
498 int tb_wait_for_port(struct tb_port
*port
, bool wait_if_unplugged
)
502 if (!port
->cap_phy
) {
503 tb_port_WARN(port
, "does not have PHY\n");
506 if (tb_is_upstream_port(port
)) {
507 tb_port_WARN(port
, "is the upstream port\n");
512 state
= tb_port_state(port
);
514 case TB_PORT_DISABLED
:
515 tb_port_dbg(port
, "is disabled (state: 0)\n");
518 case TB_PORT_UNPLUGGED
:
519 if (wait_if_unplugged
) {
520 /* used during resume */
522 "is unplugged (state: 7), retrying...\n");
526 tb_port_dbg(port
, "is unplugged (state: 7)\n");
530 case TB_PORT_TX_CL0S
:
531 case TB_PORT_RX_CL0S
:
534 tb_port_dbg(port
, "is connected, link is up (state: %d)\n", state
);
542 * After plug-in the state is TB_PORT_CONNECTING. Give it some
546 "is connected, link is not up (state: %d), retrying...\n",
553 "failed to reach state TB_PORT_UP. Ignoring port...\n");
558 * tb_port_add_nfc_credits() - add/remove non flow controlled credits to port
559 * @port: Port to add/remove NFC credits
560 * @credits: Credits to add/remove
562 * Change the number of NFC credits allocated to @port by @credits. To remove
563 * NFC credits pass a negative amount of credits.
565 * Return: Returns 0 on success or an error code on failure.
567 int tb_port_add_nfc_credits(struct tb_port
*port
, int credits
)
571 if (credits
== 0 || port
->sw
->is_unplugged
)
575 * USB4 restricts programming NFC buffers to lane adapters only
576 * so skip other ports.
578 if (tb_switch_is_usb4(port
->sw
) && !tb_port_is_null(port
))
581 nfc_credits
= port
->config
.nfc_credits
& ADP_CS_4_NFC_BUFFERS_MASK
;
583 credits
= max_t(int, -nfc_credits
, credits
);
585 nfc_credits
+= credits
;
587 tb_port_dbg(port
, "adding %d NFC credits to %lu", credits
,
588 port
->config
.nfc_credits
& ADP_CS_4_NFC_BUFFERS_MASK
);
590 port
->config
.nfc_credits
&= ~ADP_CS_4_NFC_BUFFERS_MASK
;
591 port
->config
.nfc_credits
|= nfc_credits
;
593 return tb_port_write(port
, &port
->config
.nfc_credits
,
594 TB_CFG_PORT
, ADP_CS_4
, 1);
598 * tb_port_clear_counter() - clear a counter in TB_CFG_COUNTER
599 * @port: Port whose counters to clear
600 * @counter: Counter index to clear
602 * Return: Returns 0 on success or an error code on failure.
604 int tb_port_clear_counter(struct tb_port
*port
, int counter
)
606 u32 zero
[3] = { 0, 0, 0 };
607 tb_port_dbg(port
, "clearing counter %d\n", counter
);
608 return tb_port_write(port
, zero
, TB_CFG_COUNTERS
, 3 * counter
, 3);
612 * tb_port_unlock() - Unlock downstream port
613 * @port: Port to unlock
615 * Needed for USB4 but can be called for any CIO/USB4 ports. Makes the
616 * downstream router accessible for CM.
618 int tb_port_unlock(struct tb_port
*port
)
620 if (tb_switch_is_icm(port
->sw
))
622 if (!tb_port_is_null(port
))
624 if (tb_switch_is_usb4(port
->sw
))
625 return usb4_port_unlock(port
);
629 static int __tb_port_enable(struct tb_port
*port
, bool enable
)
634 if (!tb_port_is_null(port
))
637 ret
= tb_port_read(port
, &phy
, TB_CFG_PORT
,
638 port
->cap_phy
+ LANE_ADP_CS_1
, 1);
643 phy
&= ~LANE_ADP_CS_1_LD
;
645 phy
|= LANE_ADP_CS_1_LD
;
648 ret
= tb_port_write(port
, &phy
, TB_CFG_PORT
,
649 port
->cap_phy
+ LANE_ADP_CS_1
, 1);
653 tb_port_dbg(port
, "lane %s\n", str_enabled_disabled(enable
));
658 * tb_port_enable() - Enable lane adapter
659 * @port: Port to enable (can be %NULL)
661 * This is used for lane 0 and 1 adapters to enable it.
663 int tb_port_enable(struct tb_port
*port
)
665 return __tb_port_enable(port
, true);
669 * tb_port_disable() - Disable lane adapter
670 * @port: Port to disable (can be %NULL)
672 * This is used for lane 0 and 1 adapters to disable it.
674 int tb_port_disable(struct tb_port
*port
)
676 return __tb_port_enable(port
, false);
680 * tb_init_port() - initialize a port
682 * This is a helper method for tb_switch_alloc. Does not check or initialize
683 * any downstream switches.
685 * Return: Returns 0 on success or an error code on failure.
687 static int tb_init_port(struct tb_port
*port
)
692 INIT_LIST_HEAD(&port
->list
);
694 /* Control adapter does not have configuration space */
698 res
= tb_port_read(port
, &port
->config
, TB_CFG_PORT
, 0, 8);
700 if (res
== -ENODEV
) {
701 tb_dbg(port
->sw
->tb
, " Port %d: not implemented\n",
703 port
->disabled
= true;
709 /* Port 0 is the switch itself and has no PHY. */
710 if (port
->config
.type
== TB_TYPE_PORT
) {
711 cap
= tb_port_find_cap(port
, TB_PORT_CAP_PHY
);
716 tb_port_WARN(port
, "non switch port without a PHY\n");
718 cap
= tb_port_find_cap(port
, TB_PORT_CAP_USB4
);
720 port
->cap_usb4
= cap
;
723 * USB4 ports the buffers allocated for the control path
724 * can be read from the path config space. Legacy
725 * devices we use hard-coded value.
727 if (port
->cap_usb4
) {
728 struct tb_regs_hop hop
;
730 if (!tb_port_read(port
, &hop
, TB_CFG_HOPS
, 0, 2))
731 port
->ctl_credits
= hop
.initial_credits
;
733 if (!port
->ctl_credits
)
734 port
->ctl_credits
= 2;
737 cap
= tb_port_find_cap(port
, TB_PORT_CAP_ADAP
);
739 port
->cap_adap
= cap
;
742 port
->total_credits
=
743 (port
->config
.nfc_credits
& ADP_CS_4_TOTAL_BUFFERS_MASK
) >>
744 ADP_CS_4_TOTAL_BUFFERS_SHIFT
;
746 tb_dump_port(port
->sw
->tb
, port
);
750 static int tb_port_alloc_hopid(struct tb_port
*port
, bool in
, int min_hopid
,
757 port_max_hopid
= port
->config
.max_in_hop_id
;
758 ida
= &port
->in_hopids
;
760 port_max_hopid
= port
->config
.max_out_hop_id
;
761 ida
= &port
->out_hopids
;
765 * NHI can use HopIDs 1-max for other adapters HopIDs 0-7 are
768 if (!tb_port_is_nhi(port
) && min_hopid
< TB_PATH_MIN_HOPID
)
769 min_hopid
= TB_PATH_MIN_HOPID
;
771 if (max_hopid
< 0 || max_hopid
> port_max_hopid
)
772 max_hopid
= port_max_hopid
;
774 return ida_simple_get(ida
, min_hopid
, max_hopid
+ 1, GFP_KERNEL
);
778 * tb_port_alloc_in_hopid() - Allocate input HopID from port
779 * @port: Port to allocate HopID for
780 * @min_hopid: Minimum acceptable input HopID
781 * @max_hopid: Maximum acceptable input HopID
783 * Return: HopID between @min_hopid and @max_hopid or negative errno in
786 int tb_port_alloc_in_hopid(struct tb_port
*port
, int min_hopid
, int max_hopid
)
788 return tb_port_alloc_hopid(port
, true, min_hopid
, max_hopid
);
792 * tb_port_alloc_out_hopid() - Allocate output HopID from port
793 * @port: Port to allocate HopID for
794 * @min_hopid: Minimum acceptable output HopID
795 * @max_hopid: Maximum acceptable output HopID
797 * Return: HopID between @min_hopid and @max_hopid or negative errno in
800 int tb_port_alloc_out_hopid(struct tb_port
*port
, int min_hopid
, int max_hopid
)
802 return tb_port_alloc_hopid(port
, false, min_hopid
, max_hopid
);
806 * tb_port_release_in_hopid() - Release allocated input HopID from port
807 * @port: Port whose HopID to release
808 * @hopid: HopID to release
810 void tb_port_release_in_hopid(struct tb_port
*port
, int hopid
)
812 ida_simple_remove(&port
->in_hopids
, hopid
);
816 * tb_port_release_out_hopid() - Release allocated output HopID from port
817 * @port: Port whose HopID to release
818 * @hopid: HopID to release
820 void tb_port_release_out_hopid(struct tb_port
*port
, int hopid
)
822 ida_simple_remove(&port
->out_hopids
, hopid
);
825 static inline bool tb_switch_is_reachable(const struct tb_switch
*parent
,
826 const struct tb_switch
*sw
)
828 u64 mask
= (1ULL << parent
->config
.depth
* 8) - 1;
829 return (tb_route(parent
) & mask
) == (tb_route(sw
) & mask
);
833 * tb_next_port_on_path() - Return next port for given port on a path
834 * @start: Start port of the walk
835 * @end: End port of the walk
836 * @prev: Previous port (%NULL if this is the first)
838 * This function can be used to walk from one port to another if they
839 * are connected through zero or more switches. If the @prev is dual
840 * link port, the function follows that link and returns another end on
843 * If the @end port has been reached, return %NULL.
845 * Domain tb->lock must be held when this function is called.
847 struct tb_port
*tb_next_port_on_path(struct tb_port
*start
, struct tb_port
*end
,
848 struct tb_port
*prev
)
850 struct tb_port
*next
;
855 if (prev
->sw
== end
->sw
) {
861 if (tb_switch_is_reachable(prev
->sw
, end
->sw
)) {
862 next
= tb_port_at(tb_route(end
->sw
), prev
->sw
);
863 /* Walk down the topology if next == prev */
865 (next
== prev
|| next
->dual_link_port
== prev
))
868 if (tb_is_upstream_port(prev
)) {
871 next
= tb_upstream_port(prev
->sw
);
873 * Keep the same link if prev and next are both
876 if (next
->dual_link_port
&&
877 next
->link_nr
!= prev
->link_nr
) {
878 next
= next
->dual_link_port
;
883 return next
!= prev
? next
: NULL
;
887 * tb_port_get_link_speed() - Get current link speed
888 * @port: Port to check (USB4 or CIO)
890 * Returns link speed in Gb/s or negative errno in case of failure.
892 int tb_port_get_link_speed(struct tb_port
*port
)
900 ret
= tb_port_read(port
, &val
, TB_CFG_PORT
,
901 port
->cap_phy
+ LANE_ADP_CS_1
, 1);
905 speed
= (val
& LANE_ADP_CS_1_CURRENT_SPEED_MASK
) >>
906 LANE_ADP_CS_1_CURRENT_SPEED_SHIFT
;
909 case LANE_ADP_CS_1_CURRENT_SPEED_GEN4
:
911 case LANE_ADP_CS_1_CURRENT_SPEED_GEN3
:
919 * tb_port_get_link_generation() - Returns link generation
920 * @port: Lane adapter
922 * Returns link generation as number or negative errno in case of
923 * failure. Does not distinguish between Thunderbolt 1 and Thunderbolt 2
924 * links so for those always returns 2.
926 int tb_port_get_link_generation(struct tb_port
*port
)
930 ret
= tb_port_get_link_speed(port
);
944 static const char *width_name(enum tb_link_width width
)
947 case TB_LINK_WIDTH_SINGLE
:
948 return "symmetric, single lane";
949 case TB_LINK_WIDTH_DUAL
:
950 return "symmetric, dual lanes";
951 case TB_LINK_WIDTH_ASYM_TX
:
952 return "asymmetric, 3 transmitters, 1 receiver";
953 case TB_LINK_WIDTH_ASYM_RX
:
954 return "asymmetric, 3 receivers, 1 transmitter";
961 * tb_port_get_link_width() - Get current link width
962 * @port: Port to check (USB4 or CIO)
964 * Returns link width. Return the link width as encoded in &enum
965 * tb_link_width or negative errno in case of failure.
967 int tb_port_get_link_width(struct tb_port
*port
)
975 ret
= tb_port_read(port
, &val
, TB_CFG_PORT
,
976 port
->cap_phy
+ LANE_ADP_CS_1
, 1);
980 /* Matches the values in enum tb_link_width */
981 return (val
& LANE_ADP_CS_1_CURRENT_WIDTH_MASK
) >>
982 LANE_ADP_CS_1_CURRENT_WIDTH_SHIFT
;
986 * tb_port_width_supported() - Is the given link width supported
987 * @port: Port to check
988 * @width: Widths to check (bitmask)
990 * Can be called to any lane adapter. Checks if given @width is
991 * supported by the hardware and returns %true if it is.
993 bool tb_port_width_supported(struct tb_port
*port
, unsigned int width
)
1001 if (width
& (TB_LINK_WIDTH_ASYM_TX
| TB_LINK_WIDTH_ASYM_RX
)) {
1002 if (tb_port_get_link_generation(port
) < 4 ||
1003 !usb4_port_asym_supported(port
))
1007 ret
= tb_port_read(port
, &phy
, TB_CFG_PORT
,
1008 port
->cap_phy
+ LANE_ADP_CS_0
, 1);
1013 * The field encoding is the same as &enum tb_link_width (which is
1014 * passed to @width).
1016 widths
= FIELD_GET(LANE_ADP_CS_0_SUPPORTED_WIDTH_MASK
, phy
);
1017 return widths
& width
;
1021 * tb_port_set_link_width() - Set target link width of the lane adapter
1022 * @port: Lane adapter
1023 * @width: Target link width
1025 * Sets the target link width of the lane adapter to @width. Does not
1026 * enable/disable lane bonding. For that call tb_port_set_lane_bonding().
1028 * Return: %0 in case of success and negative errno in case of error
1030 int tb_port_set_link_width(struct tb_port
*port
, enum tb_link_width width
)
1038 ret
= tb_port_read(port
, &val
, TB_CFG_PORT
,
1039 port
->cap_phy
+ LANE_ADP_CS_1
, 1);
1043 val
&= ~LANE_ADP_CS_1_TARGET_WIDTH_MASK
;
1045 case TB_LINK_WIDTH_SINGLE
:
1046 /* Gen 4 link cannot be single */
1047 if (tb_port_get_link_generation(port
) >= 4)
1049 val
|= LANE_ADP_CS_1_TARGET_WIDTH_SINGLE
<<
1050 LANE_ADP_CS_1_TARGET_WIDTH_SHIFT
;
1053 case TB_LINK_WIDTH_DUAL
:
1054 if (tb_port_get_link_generation(port
) >= 4)
1055 return usb4_port_asym_set_link_width(port
, width
);
1056 val
|= LANE_ADP_CS_1_TARGET_WIDTH_DUAL
<<
1057 LANE_ADP_CS_1_TARGET_WIDTH_SHIFT
;
1060 case TB_LINK_WIDTH_ASYM_TX
:
1061 case TB_LINK_WIDTH_ASYM_RX
:
1062 return usb4_port_asym_set_link_width(port
, width
);
1068 return tb_port_write(port
, &val
, TB_CFG_PORT
,
1069 port
->cap_phy
+ LANE_ADP_CS_1
, 1);
1073 * tb_port_set_lane_bonding() - Enable/disable lane bonding
1074 * @port: Lane adapter
1075 * @bonding: enable/disable bonding
1077 * Enables or disables lane bonding. This should be called after target
1078 * link width has been set (tb_port_set_link_width()). Note in most
1079 * cases one should use tb_port_lane_bonding_enable() instead to enable
1082 * Return: %0 in case of success and negative errno in case of error
1084 static int tb_port_set_lane_bonding(struct tb_port
*port
, bool bonding
)
1092 ret
= tb_port_read(port
, &val
, TB_CFG_PORT
,
1093 port
->cap_phy
+ LANE_ADP_CS_1
, 1);
1098 val
|= LANE_ADP_CS_1_LB
;
1100 val
&= ~LANE_ADP_CS_1_LB
;
1102 return tb_port_write(port
, &val
, TB_CFG_PORT
,
1103 port
->cap_phy
+ LANE_ADP_CS_1
, 1);
1107 * tb_port_lane_bonding_enable() - Enable bonding on port
1108 * @port: port to enable
1110 * Enable bonding by setting the link width of the port and the other
1111 * port in case of dual link port. Does not wait for the link to
1112 * actually reach the bonded state so caller needs to call
1113 * tb_port_wait_for_link_width() before enabling any paths through the
1114 * link to make sure the link is in expected state.
1116 * Return: %0 in case of success and negative errno in case of error
1118 int tb_port_lane_bonding_enable(struct tb_port
*port
)
1120 enum tb_link_width width
;
1124 * Enable lane bonding for both links if not already enabled by
1125 * for example the boot firmware.
1127 width
= tb_port_get_link_width(port
);
1128 if (width
== TB_LINK_WIDTH_SINGLE
) {
1129 ret
= tb_port_set_link_width(port
, TB_LINK_WIDTH_DUAL
);
1134 width
= tb_port_get_link_width(port
->dual_link_port
);
1135 if (width
== TB_LINK_WIDTH_SINGLE
) {
1136 ret
= tb_port_set_link_width(port
->dual_link_port
,
1137 TB_LINK_WIDTH_DUAL
);
1143 * Only set bonding if the link was not already bonded. This
1144 * avoids the lane adapter to re-enter bonding state.
1146 if (width
== TB_LINK_WIDTH_SINGLE
&& !tb_is_upstream_port(port
)) {
1147 ret
= tb_port_set_lane_bonding(port
, true);
1153 * When lane 0 bonding is set it will affect lane 1 too so
1156 port
->bonded
= true;
1157 port
->dual_link_port
->bonded
= true;
1162 tb_port_set_link_width(port
->dual_link_port
, TB_LINK_WIDTH_SINGLE
);
1164 tb_port_set_link_width(port
, TB_LINK_WIDTH_SINGLE
);
1170 * tb_port_lane_bonding_disable() - Disable bonding on port
1171 * @port: port to disable
1173 * Disable bonding by setting the link width of the port and the
1174 * other port in case of dual link port.
1176 void tb_port_lane_bonding_disable(struct tb_port
*port
)
1178 tb_port_set_lane_bonding(port
, false);
1179 tb_port_set_link_width(port
->dual_link_port
, TB_LINK_WIDTH_SINGLE
);
1180 tb_port_set_link_width(port
, TB_LINK_WIDTH_SINGLE
);
1181 port
->dual_link_port
->bonded
= false;
1182 port
->bonded
= false;
1186 * tb_port_wait_for_link_width() - Wait until link reaches specific width
1187 * @port: Port to wait for
1188 * @width: Expected link width (bitmask)
1189 * @timeout_msec: Timeout in ms how long to wait
1191 * Should be used after both ends of the link have been bonded (or
1192 * bonding has been disabled) to wait until the link actually reaches
1193 * the expected state. Returns %-ETIMEDOUT if the width was not reached
1194 * within the given timeout, %0 if it did. Can be passed a mask of
1195 * expected widths and succeeds if any of the widths is reached.
1197 int tb_port_wait_for_link_width(struct tb_port
*port
, unsigned int width
,
1200 ktime_t timeout
= ktime_add_ms(ktime_get(), timeout_msec
);
1203 /* Gen 4 link does not support single lane */
1204 if ((width
& TB_LINK_WIDTH_SINGLE
) &&
1205 tb_port_get_link_generation(port
) >= 4)
1209 ret
= tb_port_get_link_width(port
);
1212 * Sometimes we get port locked error when
1213 * polling the lanes so we can ignore it and
1218 } else if (ret
& width
) {
1222 usleep_range(1000, 2000);
1223 } while (ktime_before(ktime_get(), timeout
));
1228 static int tb_port_do_update_credits(struct tb_port
*port
)
1233 ret
= tb_port_read(port
, &nfc_credits
, TB_CFG_PORT
, ADP_CS_4
, 1);
1237 if (nfc_credits
!= port
->config
.nfc_credits
) {
1240 total
= (nfc_credits
& ADP_CS_4_TOTAL_BUFFERS_MASK
) >>
1241 ADP_CS_4_TOTAL_BUFFERS_SHIFT
;
1243 tb_port_dbg(port
, "total credits changed %u -> %u\n",
1244 port
->total_credits
, total
);
1246 port
->config
.nfc_credits
= nfc_credits
;
1247 port
->total_credits
= total
;
1254 * tb_port_update_credits() - Re-read port total credits
1255 * @port: Port to update
1257 * After the link is bonded (or bonding was disabled) the port total
1258 * credits may change, so this function needs to be called to re-read
1259 * the credits. Updates also the second lane adapter.
1261 int tb_port_update_credits(struct tb_port
*port
)
1265 ret
= tb_port_do_update_credits(port
);
1268 return tb_port_do_update_credits(port
->dual_link_port
);
1271 static int tb_port_start_lane_initialization(struct tb_port
*port
)
1275 if (tb_switch_is_usb4(port
->sw
))
1278 ret
= tb_lc_start_lane_initialization(port
);
1279 return ret
== -EINVAL
? 0 : ret
;
1283 * Returns true if the port had something (router, XDomain) connected
1286 static bool tb_port_resume(struct tb_port
*port
)
1288 bool has_remote
= tb_port_has_remote(port
);
1291 usb4_port_device_resume(port
->usb4
);
1292 } else if (!has_remote
) {
1294 * For disconnected downstream lane adapters start lane
1295 * initialization now so we detect future connects.
1297 * For XDomain start the lane initialzation now so the
1298 * link gets re-established.
1300 * This is only needed for non-USB4 ports.
1302 if (!tb_is_upstream_port(port
) || port
->xdomain
)
1303 tb_port_start_lane_initialization(port
);
1306 return has_remote
|| port
->xdomain
;
1310 * tb_port_is_enabled() - Is the adapter port enabled
1311 * @port: Port to check
1313 bool tb_port_is_enabled(struct tb_port
*port
)
1315 switch (port
->config
.type
) {
1316 case TB_TYPE_PCIE_UP
:
1317 case TB_TYPE_PCIE_DOWN
:
1318 return tb_pci_port_is_enabled(port
);
1320 case TB_TYPE_DP_HDMI_IN
:
1321 case TB_TYPE_DP_HDMI_OUT
:
1322 return tb_dp_port_is_enabled(port
);
1324 case TB_TYPE_USB3_UP
:
1325 case TB_TYPE_USB3_DOWN
:
1326 return tb_usb3_port_is_enabled(port
);
1334 * tb_usb3_port_is_enabled() - Is the USB3 adapter port enabled
1335 * @port: USB3 adapter port to check
1337 bool tb_usb3_port_is_enabled(struct tb_port
*port
)
1341 if (tb_port_read(port
, &data
, TB_CFG_PORT
,
1342 port
->cap_adap
+ ADP_USB3_CS_0
, 1))
1345 return !!(data
& ADP_USB3_CS_0_PE
);
1349 * tb_usb3_port_enable() - Enable USB3 adapter port
1350 * @port: USB3 adapter port to enable
1351 * @enable: Enable/disable the USB3 adapter
1353 int tb_usb3_port_enable(struct tb_port
*port
, bool enable
)
1355 u32 word
= enable
? (ADP_USB3_CS_0_PE
| ADP_USB3_CS_0_V
)
1358 if (!port
->cap_adap
)
1360 return tb_port_write(port
, &word
, TB_CFG_PORT
,
1361 port
->cap_adap
+ ADP_USB3_CS_0
, 1);
1365 * tb_pci_port_is_enabled() - Is the PCIe adapter port enabled
1366 * @port: PCIe port to check
1368 bool tb_pci_port_is_enabled(struct tb_port
*port
)
1372 if (tb_port_read(port
, &data
, TB_CFG_PORT
,
1373 port
->cap_adap
+ ADP_PCIE_CS_0
, 1))
1376 return !!(data
& ADP_PCIE_CS_0_PE
);
1380 * tb_pci_port_enable() - Enable PCIe adapter port
1381 * @port: PCIe port to enable
1382 * @enable: Enable/disable the PCIe adapter
1384 int tb_pci_port_enable(struct tb_port
*port
, bool enable
)
1386 u32 word
= enable
? ADP_PCIE_CS_0_PE
: 0x0;
1387 if (!port
->cap_adap
)
1389 return tb_port_write(port
, &word
, TB_CFG_PORT
,
1390 port
->cap_adap
+ ADP_PCIE_CS_0
, 1);
1394 * tb_dp_port_hpd_is_active() - Is HPD already active
1395 * @port: DP out port to check
1397 * Checks if the DP OUT adapter port has HPD bit already set.
1399 int tb_dp_port_hpd_is_active(struct tb_port
*port
)
1404 ret
= tb_port_read(port
, &data
, TB_CFG_PORT
,
1405 port
->cap_adap
+ ADP_DP_CS_2
, 1);
1409 return !!(data
& ADP_DP_CS_2_HPD
);
1413 * tb_dp_port_hpd_clear() - Clear HPD from DP IN port
1414 * @port: Port to clear HPD
1416 * If the DP IN port has HPD set, this function can be used to clear it.
1418 int tb_dp_port_hpd_clear(struct tb_port
*port
)
1423 ret
= tb_port_read(port
, &data
, TB_CFG_PORT
,
1424 port
->cap_adap
+ ADP_DP_CS_3
, 1);
1428 data
|= ADP_DP_CS_3_HPDC
;
1429 return tb_port_write(port
, &data
, TB_CFG_PORT
,
1430 port
->cap_adap
+ ADP_DP_CS_3
, 1);
1434 * tb_dp_port_set_hops() - Set video/aux Hop IDs for DP port
1435 * @port: DP IN/OUT port to set hops
1436 * @video: Video Hop ID
1437 * @aux_tx: AUX TX Hop ID
1438 * @aux_rx: AUX RX Hop ID
1440 * Programs specified Hop IDs for DP IN/OUT port. Can be called for USB4
1441 * router DP adapters too but does not program the values as the fields
1444 int tb_dp_port_set_hops(struct tb_port
*port
, unsigned int video
,
1445 unsigned int aux_tx
, unsigned int aux_rx
)
1450 if (tb_switch_is_usb4(port
->sw
))
1453 ret
= tb_port_read(port
, data
, TB_CFG_PORT
,
1454 port
->cap_adap
+ ADP_DP_CS_0
, ARRAY_SIZE(data
));
1458 data
[0] &= ~ADP_DP_CS_0_VIDEO_HOPID_MASK
;
1459 data
[1] &= ~ADP_DP_CS_1_AUX_RX_HOPID_MASK
;
1460 data
[1] &= ~ADP_DP_CS_1_AUX_RX_HOPID_MASK
;
1462 data
[0] |= (video
<< ADP_DP_CS_0_VIDEO_HOPID_SHIFT
) &
1463 ADP_DP_CS_0_VIDEO_HOPID_MASK
;
1464 data
[1] |= aux_tx
& ADP_DP_CS_1_AUX_TX_HOPID_MASK
;
1465 data
[1] |= (aux_rx
<< ADP_DP_CS_1_AUX_RX_HOPID_SHIFT
) &
1466 ADP_DP_CS_1_AUX_RX_HOPID_MASK
;
1468 return tb_port_write(port
, data
, TB_CFG_PORT
,
1469 port
->cap_adap
+ ADP_DP_CS_0
, ARRAY_SIZE(data
));
1473 * tb_dp_port_is_enabled() - Is DP adapter port enabled
1474 * @port: DP adapter port to check
1476 bool tb_dp_port_is_enabled(struct tb_port
*port
)
1480 if (tb_port_read(port
, data
, TB_CFG_PORT
, port
->cap_adap
+ ADP_DP_CS_0
,
1484 return !!(data
[0] & (ADP_DP_CS_0_VE
| ADP_DP_CS_0_AE
));
1488 * tb_dp_port_enable() - Enables/disables DP paths of a port
1489 * @port: DP IN/OUT port
1490 * @enable: Enable/disable DP path
1492 * Once Hop IDs are programmed DP paths can be enabled or disabled by
1493 * calling this function.
1495 int tb_dp_port_enable(struct tb_port
*port
, bool enable
)
1500 ret
= tb_port_read(port
, data
, TB_CFG_PORT
,
1501 port
->cap_adap
+ ADP_DP_CS_0
, ARRAY_SIZE(data
));
1506 data
[0] |= ADP_DP_CS_0_VE
| ADP_DP_CS_0_AE
;
1508 data
[0] &= ~(ADP_DP_CS_0_VE
| ADP_DP_CS_0_AE
);
1510 return tb_port_write(port
, data
, TB_CFG_PORT
,
1511 port
->cap_adap
+ ADP_DP_CS_0
, ARRAY_SIZE(data
));
1514 /* switch utility functions */
1516 static const char *tb_switch_generation_name(const struct tb_switch
*sw
)
1518 switch (sw
->generation
) {
1520 return "Thunderbolt 1";
1522 return "Thunderbolt 2";
1524 return "Thunderbolt 3";
1532 static void tb_dump_switch(const struct tb
*tb
, const struct tb_switch
*sw
)
1534 const struct tb_regs_switch_header
*regs
= &sw
->config
;
1536 tb_dbg(tb
, " %s Switch: %x:%x (Revision: %d, TB Version: %d)\n",
1537 tb_switch_generation_name(sw
), regs
->vendor_id
, regs
->device_id
,
1538 regs
->revision
, regs
->thunderbolt_version
);
1539 tb_dbg(tb
, " Max Port Number: %d\n", regs
->max_port_number
);
1540 tb_dbg(tb
, " Config:\n");
1542 " Upstream Port Number: %d Depth: %d Route String: %#llx Enabled: %d, PlugEventsDelay: %dms\n",
1543 regs
->upstream_port_number
, regs
->depth
,
1544 (((u64
) regs
->route_hi
) << 32) | regs
->route_lo
,
1545 regs
->enabled
, regs
->plug_events_delay
);
1546 tb_dbg(tb
, " unknown1: %#x unknown4: %#x\n",
1547 regs
->__unknown1
, regs
->__unknown4
);
1551 * tb_switch_reset() - reconfigure route, enable and send TB_CFG_PKG_RESET
1552 * @sw: Switch to reset
1554 * Return: Returns 0 on success or an error code on failure.
1556 int tb_switch_reset(struct tb_switch
*sw
)
1558 struct tb_cfg_result res
;
1560 if (sw
->generation
> 1)
1563 tb_sw_dbg(sw
, "resetting switch\n");
1565 res
.err
= tb_sw_write(sw
, ((u32
*) &sw
->config
) + 2,
1566 TB_CFG_SWITCH
, 2, 2);
1569 res
= tb_cfg_reset(sw
->tb
->ctl
, tb_route(sw
));
1576 * tb_switch_wait_for_bit() - Wait for specified value of bits in offset
1577 * @sw: Router to read the offset value from
1578 * @offset: Offset in the router config space to read from
1579 * @bit: Bit mask in the offset to wait for
1580 * @value: Value of the bits to wait for
1581 * @timeout_msec: Timeout in ms how long to wait
1583 * Wait till the specified bits in specified offset reach specified value.
1584 * Returns %0 in case of success, %-ETIMEDOUT if the @value was not reached
1585 * within the given timeout or a negative errno in case of failure.
1587 int tb_switch_wait_for_bit(struct tb_switch
*sw
, u32 offset
, u32 bit
,
1588 u32 value
, int timeout_msec
)
1590 ktime_t timeout
= ktime_add_ms(ktime_get(), timeout_msec
);
1596 ret
= tb_sw_read(sw
, &val
, TB_CFG_SWITCH
, offset
, 1);
1600 if ((val
& bit
) == value
)
1603 usleep_range(50, 100);
1604 } while (ktime_before(ktime_get(), timeout
));
1610 * tb_plug_events_active() - enable/disable plug events on a switch
1612 * Also configures a sane plug_events_delay of 255ms.
1614 * Return: Returns 0 on success or an error code on failure.
1616 static int tb_plug_events_active(struct tb_switch
*sw
, bool active
)
1621 if (tb_switch_is_icm(sw
) || tb_switch_is_usb4(sw
))
1624 sw
->config
.plug_events_delay
= 0xff;
1625 res
= tb_sw_write(sw
, ((u32
*) &sw
->config
) + 4, TB_CFG_SWITCH
, 4, 1);
1629 res
= tb_sw_read(sw
, &data
, TB_CFG_SWITCH
, sw
->cap_plug_events
+ 1, 1);
1634 data
= data
& 0xFFFFFF83;
1635 switch (sw
->config
.device_id
) {
1636 case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE
:
1637 case PCI_DEVICE_ID_INTEL_EAGLE_RIDGE
:
1638 case PCI_DEVICE_ID_INTEL_PORT_RIDGE
:
1642 * Skip Alpine Ridge, it needs to have vendor
1643 * specific USB hotplug event enabled for the
1644 * internal xHCI to work.
1646 if (!tb_switch_is_alpine_ridge(sw
))
1647 data
|= TB_PLUG_EVENTS_USB_DISABLE
;
1652 return tb_sw_write(sw
, &data
, TB_CFG_SWITCH
,
1653 sw
->cap_plug_events
+ 1, 1);
1656 static ssize_t
authorized_show(struct device
*dev
,
1657 struct device_attribute
*attr
,
1660 struct tb_switch
*sw
= tb_to_switch(dev
);
1662 return sysfs_emit(buf
, "%u\n", sw
->authorized
);
1665 static int disapprove_switch(struct device
*dev
, void *not_used
)
1667 char *envp
[] = { "AUTHORIZED=0", NULL
};
1668 struct tb_switch
*sw
;
1670 sw
= tb_to_switch(dev
);
1671 if (sw
&& sw
->authorized
) {
1674 /* First children */
1675 ret
= device_for_each_child_reverse(&sw
->dev
, NULL
, disapprove_switch
);
1679 ret
= tb_domain_disapprove_switch(sw
->tb
, sw
);
1684 kobject_uevent_env(&sw
->dev
.kobj
, KOBJ_CHANGE
, envp
);
1690 static int tb_switch_set_authorized(struct tb_switch
*sw
, unsigned int val
)
1692 char envp_string
[13];
1694 char *envp
[] = { envp_string
, NULL
};
1696 if (!mutex_trylock(&sw
->tb
->lock
))
1697 return restart_syscall();
1699 if (!!sw
->authorized
== !!val
)
1703 /* Disapprove switch */
1706 ret
= disapprove_switch(&sw
->dev
, NULL
);
1711 /* Approve switch */
1714 ret
= tb_domain_approve_switch_key(sw
->tb
, sw
);
1716 ret
= tb_domain_approve_switch(sw
->tb
, sw
);
1719 /* Challenge switch */
1722 ret
= tb_domain_challenge_switch_key(sw
->tb
, sw
);
1730 sw
->authorized
= val
;
1732 * Notify status change to the userspace, informing the new
1733 * value of /sys/bus/thunderbolt/devices/.../authorized.
1735 sprintf(envp_string
, "AUTHORIZED=%u", sw
->authorized
);
1736 kobject_uevent_env(&sw
->dev
.kobj
, KOBJ_CHANGE
, envp
);
1740 mutex_unlock(&sw
->tb
->lock
);
1744 static ssize_t
authorized_store(struct device
*dev
,
1745 struct device_attribute
*attr
,
1746 const char *buf
, size_t count
)
1748 struct tb_switch
*sw
= tb_to_switch(dev
);
1752 ret
= kstrtouint(buf
, 0, &val
);
1758 pm_runtime_get_sync(&sw
->dev
);
1759 ret
= tb_switch_set_authorized(sw
, val
);
1760 pm_runtime_mark_last_busy(&sw
->dev
);
1761 pm_runtime_put_autosuspend(&sw
->dev
);
1763 return ret
? ret
: count
;
1765 static DEVICE_ATTR_RW(authorized
);
1767 static ssize_t
boot_show(struct device
*dev
, struct device_attribute
*attr
,
1770 struct tb_switch
*sw
= tb_to_switch(dev
);
1772 return sysfs_emit(buf
, "%u\n", sw
->boot
);
1774 static DEVICE_ATTR_RO(boot
);
1776 static ssize_t
device_show(struct device
*dev
, struct device_attribute
*attr
,
1779 struct tb_switch
*sw
= tb_to_switch(dev
);
1781 return sysfs_emit(buf
, "%#x\n", sw
->device
);
1783 static DEVICE_ATTR_RO(device
);
1786 device_name_show(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
1788 struct tb_switch
*sw
= tb_to_switch(dev
);
1790 return sysfs_emit(buf
, "%s\n", sw
->device_name
?: "");
1792 static DEVICE_ATTR_RO(device_name
);
1795 generation_show(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
1797 struct tb_switch
*sw
= tb_to_switch(dev
);
1799 return sysfs_emit(buf
, "%u\n", sw
->generation
);
1801 static DEVICE_ATTR_RO(generation
);
1803 static ssize_t
key_show(struct device
*dev
, struct device_attribute
*attr
,
1806 struct tb_switch
*sw
= tb_to_switch(dev
);
1809 if (!mutex_trylock(&sw
->tb
->lock
))
1810 return restart_syscall();
1813 ret
= sysfs_emit(buf
, "%*phN\n", TB_SWITCH_KEY_SIZE
, sw
->key
);
1815 ret
= sysfs_emit(buf
, "\n");
1817 mutex_unlock(&sw
->tb
->lock
);
1821 static ssize_t
key_store(struct device
*dev
, struct device_attribute
*attr
,
1822 const char *buf
, size_t count
)
1824 struct tb_switch
*sw
= tb_to_switch(dev
);
1825 u8 key
[TB_SWITCH_KEY_SIZE
];
1826 ssize_t ret
= count
;
1829 if (!strcmp(buf
, "\n"))
1831 else if (hex2bin(key
, buf
, sizeof(key
)))
1834 if (!mutex_trylock(&sw
->tb
->lock
))
1835 return restart_syscall();
1837 if (sw
->authorized
) {
1844 sw
->key
= kmemdup(key
, sizeof(key
), GFP_KERNEL
);
1850 mutex_unlock(&sw
->tb
->lock
);
1853 static DEVICE_ATTR(key
, 0600, key_show
, key_store
);
1855 static ssize_t
speed_show(struct device
*dev
, struct device_attribute
*attr
,
1858 struct tb_switch
*sw
= tb_to_switch(dev
);
1860 return sysfs_emit(buf
, "%u.0 Gb/s\n", sw
->link_speed
);
1864 * Currently all lanes must run at the same speed but we expose here
1865 * both directions to allow possible asymmetric links in the future.
1867 static DEVICE_ATTR(rx_speed
, 0444, speed_show
, NULL
);
1868 static DEVICE_ATTR(tx_speed
, 0444, speed_show
, NULL
);
1870 static ssize_t
rx_lanes_show(struct device
*dev
, struct device_attribute
*attr
,
1873 struct tb_switch
*sw
= tb_to_switch(dev
);
1876 switch (sw
->link_width
) {
1877 case TB_LINK_WIDTH_SINGLE
:
1878 case TB_LINK_WIDTH_ASYM_TX
:
1881 case TB_LINK_WIDTH_DUAL
:
1884 case TB_LINK_WIDTH_ASYM_RX
:
1892 return sysfs_emit(buf
, "%u\n", width
);
1894 static DEVICE_ATTR(rx_lanes
, 0444, rx_lanes_show
, NULL
);
1896 static ssize_t
tx_lanes_show(struct device
*dev
, struct device_attribute
*attr
,
1899 struct tb_switch
*sw
= tb_to_switch(dev
);
1902 switch (sw
->link_width
) {
1903 case TB_LINK_WIDTH_SINGLE
:
1904 case TB_LINK_WIDTH_ASYM_RX
:
1907 case TB_LINK_WIDTH_DUAL
:
1910 case TB_LINK_WIDTH_ASYM_TX
:
1918 return sysfs_emit(buf
, "%u\n", width
);
1920 static DEVICE_ATTR(tx_lanes
, 0444, tx_lanes_show
, NULL
);
1922 static ssize_t
nvm_authenticate_show(struct device
*dev
,
1923 struct device_attribute
*attr
, char *buf
)
1925 struct tb_switch
*sw
= tb_to_switch(dev
);
1928 nvm_get_auth_status(sw
, &status
);
1929 return sysfs_emit(buf
, "%#x\n", status
);
1932 static ssize_t
nvm_authenticate_sysfs(struct device
*dev
, const char *buf
,
1935 struct tb_switch
*sw
= tb_to_switch(dev
);
1938 pm_runtime_get_sync(&sw
->dev
);
1940 if (!mutex_trylock(&sw
->tb
->lock
)) {
1941 ret
= restart_syscall();
1945 if (sw
->no_nvm_upgrade
) {
1950 /* If NVMem devices are not yet added */
1956 ret
= kstrtoint(buf
, 10, &val
);
1960 /* Always clear the authentication status */
1961 nvm_clear_auth_status(sw
);
1964 if (val
== AUTHENTICATE_ONLY
) {
1968 ret
= nvm_authenticate(sw
, true);
1970 if (!sw
->nvm
->flushed
) {
1971 if (!sw
->nvm
->buf
) {
1976 ret
= nvm_validate_and_write(sw
);
1977 if (ret
|| val
== WRITE_ONLY
)
1980 if (val
== WRITE_AND_AUTHENTICATE
) {
1982 ret
= tb_lc_force_power(sw
);
1984 ret
= nvm_authenticate(sw
, false);
1990 mutex_unlock(&sw
->tb
->lock
);
1992 pm_runtime_mark_last_busy(&sw
->dev
);
1993 pm_runtime_put_autosuspend(&sw
->dev
);
1998 static ssize_t
nvm_authenticate_store(struct device
*dev
,
1999 struct device_attribute
*attr
, const char *buf
, size_t count
)
2001 int ret
= nvm_authenticate_sysfs(dev
, buf
, false);
2006 static DEVICE_ATTR_RW(nvm_authenticate
);
2008 static ssize_t
nvm_authenticate_on_disconnect_show(struct device
*dev
,
2009 struct device_attribute
*attr
, char *buf
)
2011 return nvm_authenticate_show(dev
, attr
, buf
);
2014 static ssize_t
nvm_authenticate_on_disconnect_store(struct device
*dev
,
2015 struct device_attribute
*attr
, const char *buf
, size_t count
)
2019 ret
= nvm_authenticate_sysfs(dev
, buf
, true);
2020 return ret
? ret
: count
;
2022 static DEVICE_ATTR_RW(nvm_authenticate_on_disconnect
);
2024 static ssize_t
nvm_version_show(struct device
*dev
,
2025 struct device_attribute
*attr
, char *buf
)
2027 struct tb_switch
*sw
= tb_to_switch(dev
);
2030 if (!mutex_trylock(&sw
->tb
->lock
))
2031 return restart_syscall();
2038 ret
= sysfs_emit(buf
, "%x.%x\n", sw
->nvm
->major
, sw
->nvm
->minor
);
2040 mutex_unlock(&sw
->tb
->lock
);
2044 static DEVICE_ATTR_RO(nvm_version
);
2046 static ssize_t
vendor_show(struct device
*dev
, struct device_attribute
*attr
,
2049 struct tb_switch
*sw
= tb_to_switch(dev
);
2051 return sysfs_emit(buf
, "%#x\n", sw
->vendor
);
2053 static DEVICE_ATTR_RO(vendor
);
2056 vendor_name_show(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
2058 struct tb_switch
*sw
= tb_to_switch(dev
);
2060 return sysfs_emit(buf
, "%s\n", sw
->vendor_name
?: "");
2062 static DEVICE_ATTR_RO(vendor_name
);
2064 static ssize_t
unique_id_show(struct device
*dev
, struct device_attribute
*attr
,
2067 struct tb_switch
*sw
= tb_to_switch(dev
);
2069 return sysfs_emit(buf
, "%pUb\n", sw
->uuid
);
2071 static DEVICE_ATTR_RO(unique_id
);
2073 static struct attribute
*switch_attrs
[] = {
2074 &dev_attr_authorized
.attr
,
2075 &dev_attr_boot
.attr
,
2076 &dev_attr_device
.attr
,
2077 &dev_attr_device_name
.attr
,
2078 &dev_attr_generation
.attr
,
2080 &dev_attr_nvm_authenticate
.attr
,
2081 &dev_attr_nvm_authenticate_on_disconnect
.attr
,
2082 &dev_attr_nvm_version
.attr
,
2083 &dev_attr_rx_speed
.attr
,
2084 &dev_attr_rx_lanes
.attr
,
2085 &dev_attr_tx_speed
.attr
,
2086 &dev_attr_tx_lanes
.attr
,
2087 &dev_attr_vendor
.attr
,
2088 &dev_attr_vendor_name
.attr
,
2089 &dev_attr_unique_id
.attr
,
2093 static umode_t
switch_attr_is_visible(struct kobject
*kobj
,
2094 struct attribute
*attr
, int n
)
2096 struct device
*dev
= kobj_to_dev(kobj
);
2097 struct tb_switch
*sw
= tb_to_switch(dev
);
2099 if (attr
== &dev_attr_authorized
.attr
) {
2100 if (sw
->tb
->security_level
== TB_SECURITY_NOPCIE
||
2101 sw
->tb
->security_level
== TB_SECURITY_DPONLY
)
2103 } else if (attr
== &dev_attr_device
.attr
) {
2106 } else if (attr
== &dev_attr_device_name
.attr
) {
2107 if (!sw
->device_name
)
2109 } else if (attr
== &dev_attr_vendor
.attr
) {
2112 } else if (attr
== &dev_attr_vendor_name
.attr
) {
2113 if (!sw
->vendor_name
)
2115 } else if (attr
== &dev_attr_key
.attr
) {
2117 sw
->tb
->security_level
== TB_SECURITY_SECURE
&&
2118 sw
->security_level
== TB_SECURITY_SECURE
)
2121 } else if (attr
== &dev_attr_rx_speed
.attr
||
2122 attr
== &dev_attr_rx_lanes
.attr
||
2123 attr
== &dev_attr_tx_speed
.attr
||
2124 attr
== &dev_attr_tx_lanes
.attr
) {
2128 } else if (attr
== &dev_attr_nvm_authenticate
.attr
) {
2129 if (nvm_upgradeable(sw
))
2132 } else if (attr
== &dev_attr_nvm_version
.attr
) {
2133 if (nvm_readable(sw
))
2136 } else if (attr
== &dev_attr_boot
.attr
) {
2140 } else if (attr
== &dev_attr_nvm_authenticate_on_disconnect
.attr
) {
2141 if (sw
->quirks
& QUIRK_FORCE_POWER_LINK_CONTROLLER
)
2146 return sw
->safe_mode
? 0 : attr
->mode
;
2149 static const struct attribute_group switch_group
= {
2150 .is_visible
= switch_attr_is_visible
,
2151 .attrs
= switch_attrs
,
2154 static const struct attribute_group
*switch_groups
[] = {
2159 static void tb_switch_release(struct device
*dev
)
2161 struct tb_switch
*sw
= tb_to_switch(dev
);
2162 struct tb_port
*port
;
2164 dma_port_free(sw
->dma_port
);
2166 tb_switch_for_each_port(sw
, port
) {
2167 ida_destroy(&port
->in_hopids
);
2168 ida_destroy(&port
->out_hopids
);
2172 kfree(sw
->device_name
);
2173 kfree(sw
->vendor_name
);
2180 static int tb_switch_uevent(const struct device
*dev
, struct kobj_uevent_env
*env
)
2182 const struct tb_switch
*sw
= tb_to_switch(dev
);
2185 if (tb_switch_is_usb4(sw
)) {
2186 if (add_uevent_var(env
, "USB4_VERSION=%u.0",
2187 usb4_switch_version(sw
)))
2191 if (!tb_route(sw
)) {
2194 const struct tb_port
*port
;
2197 /* Device is hub if it has any downstream ports */
2198 tb_switch_for_each_port(sw
, port
) {
2199 if (!port
->disabled
&& !tb_is_upstream_port(port
) &&
2200 tb_port_is_null(port
)) {
2206 type
= hub
? "hub" : "device";
2209 if (add_uevent_var(env
, "USB4_TYPE=%s", type
))
2215 * Currently only need to provide the callbacks. Everything else is handled
2216 * in the connection manager.
2218 static int __maybe_unused
tb_switch_runtime_suspend(struct device
*dev
)
2220 struct tb_switch
*sw
= tb_to_switch(dev
);
2221 const struct tb_cm_ops
*cm_ops
= sw
->tb
->cm_ops
;
2223 if (cm_ops
->runtime_suspend_switch
)
2224 return cm_ops
->runtime_suspend_switch(sw
);
2229 static int __maybe_unused
tb_switch_runtime_resume(struct device
*dev
)
2231 struct tb_switch
*sw
= tb_to_switch(dev
);
2232 const struct tb_cm_ops
*cm_ops
= sw
->tb
->cm_ops
;
2234 if (cm_ops
->runtime_resume_switch
)
2235 return cm_ops
->runtime_resume_switch(sw
);
2239 static const struct dev_pm_ops tb_switch_pm_ops
= {
2240 SET_RUNTIME_PM_OPS(tb_switch_runtime_suspend
, tb_switch_runtime_resume
,
2244 struct device_type tb_switch_type
= {
2245 .name
= "thunderbolt_device",
2246 .release
= tb_switch_release
,
2247 .uevent
= tb_switch_uevent
,
2248 .pm
= &tb_switch_pm_ops
,
2251 static int tb_switch_get_generation(struct tb_switch
*sw
)
2253 if (tb_switch_is_usb4(sw
))
2256 if (sw
->config
.vendor_id
== PCI_VENDOR_ID_INTEL
) {
2257 switch (sw
->config
.device_id
) {
2258 case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE
:
2259 case PCI_DEVICE_ID_INTEL_EAGLE_RIDGE
:
2260 case PCI_DEVICE_ID_INTEL_LIGHT_PEAK
:
2261 case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_2C
:
2262 case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C
:
2263 case PCI_DEVICE_ID_INTEL_PORT_RIDGE
:
2264 case PCI_DEVICE_ID_INTEL_REDWOOD_RIDGE_2C_BRIDGE
:
2265 case PCI_DEVICE_ID_INTEL_REDWOOD_RIDGE_4C_BRIDGE
:
2268 case PCI_DEVICE_ID_INTEL_WIN_RIDGE_2C_BRIDGE
:
2269 case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_BRIDGE
:
2270 case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_BRIDGE
:
2273 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_BRIDGE
:
2274 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_BRIDGE
:
2275 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_BRIDGE
:
2276 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_BRIDGE
:
2277 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_BRIDGE
:
2278 case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_BRIDGE
:
2279 case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_BRIDGE
:
2280 case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_BRIDGE
:
2281 case PCI_DEVICE_ID_INTEL_ICL_NHI0
:
2282 case PCI_DEVICE_ID_INTEL_ICL_NHI1
:
2288 * For unknown switches assume generation to be 1 to be on the
2291 tb_sw_warn(sw
, "unsupported switch device id %#x\n",
2292 sw
->config
.device_id
);
2296 static bool tb_switch_exceeds_max_depth(const struct tb_switch
*sw
, int depth
)
2300 if (tb_switch_is_usb4(sw
) ||
2301 (sw
->tb
->root_switch
&& tb_switch_is_usb4(sw
->tb
->root_switch
)))
2302 max_depth
= USB4_SWITCH_MAX_DEPTH
;
2304 max_depth
= TB_SWITCH_MAX_DEPTH
;
2306 return depth
> max_depth
;
2310 * tb_switch_alloc() - allocate a switch
2311 * @tb: Pointer to the owning domain
2312 * @parent: Parent device for this switch
2313 * @route: Route string for this switch
2315 * Allocates and initializes a switch. Will not upload configuration to
2316 * the switch. For that you need to call tb_switch_configure()
2317 * separately. The returned switch should be released by calling
2320 * Return: Pointer to the allocated switch or ERR_PTR() in case of
2323 struct tb_switch
*tb_switch_alloc(struct tb
*tb
, struct device
*parent
,
2326 struct tb_switch
*sw
;
2330 /* Unlock the downstream port so we can access the switch below */
2332 struct tb_switch
*parent_sw
= tb_to_switch(parent
);
2333 struct tb_port
*down
;
2335 down
= tb_port_at(route
, parent_sw
);
2336 tb_port_unlock(down
);
2339 depth
= tb_route_length(route
);
2341 upstream_port
= tb_cfg_get_upstream_port(tb
->ctl
, route
);
2342 if (upstream_port
< 0)
2343 return ERR_PTR(upstream_port
);
2345 sw
= kzalloc(sizeof(*sw
), GFP_KERNEL
);
2347 return ERR_PTR(-ENOMEM
);
2350 ret
= tb_cfg_read(tb
->ctl
, &sw
->config
, route
, 0, TB_CFG_SWITCH
, 0, 5);
2352 goto err_free_sw_ports
;
2354 sw
->generation
= tb_switch_get_generation(sw
);
2356 tb_dbg(tb
, "current switch config:\n");
2357 tb_dump_switch(tb
, sw
);
2359 /* configure switch */
2360 sw
->config
.upstream_port_number
= upstream_port
;
2361 sw
->config
.depth
= depth
;
2362 sw
->config
.route_hi
= upper_32_bits(route
);
2363 sw
->config
.route_lo
= lower_32_bits(route
);
2364 sw
->config
.enabled
= 0;
2366 /* Make sure we do not exceed maximum topology limit */
2367 if (tb_switch_exceeds_max_depth(sw
, depth
)) {
2368 ret
= -EADDRNOTAVAIL
;
2369 goto err_free_sw_ports
;
2372 /* initialize ports */
2373 sw
->ports
= kcalloc(sw
->config
.max_port_number
+ 1, sizeof(*sw
->ports
),
2377 goto err_free_sw_ports
;
2380 for (i
= 0; i
<= sw
->config
.max_port_number
; i
++) {
2381 /* minimum setup for tb_find_cap and tb_drom_read to work */
2382 sw
->ports
[i
].sw
= sw
;
2383 sw
->ports
[i
].port
= i
;
2385 /* Control port does not need HopID allocation */
2387 ida_init(&sw
->ports
[i
].in_hopids
);
2388 ida_init(&sw
->ports
[i
].out_hopids
);
2392 ret
= tb_switch_find_vse_cap(sw
, TB_VSE_CAP_PLUG_EVENTS
);
2394 sw
->cap_plug_events
= ret
;
2396 ret
= tb_switch_find_vse_cap(sw
, TB_VSE_CAP_TIME2
);
2398 sw
->cap_vsec_tmu
= ret
;
2400 ret
= tb_switch_find_vse_cap(sw
, TB_VSE_CAP_LINK_CONTROLLER
);
2404 ret
= tb_switch_find_vse_cap(sw
, TB_VSE_CAP_CP_LP
);
2408 /* Root switch is always authorized */
2410 sw
->authorized
= true;
2412 device_initialize(&sw
->dev
);
2413 sw
->dev
.parent
= parent
;
2414 sw
->dev
.bus
= &tb_bus_type
;
2415 sw
->dev
.type
= &tb_switch_type
;
2416 sw
->dev
.groups
= switch_groups
;
2417 dev_set_name(&sw
->dev
, "%u-%llx", tb
->index
, tb_route(sw
));
2425 return ERR_PTR(ret
);
2429 * tb_switch_alloc_safe_mode() - allocate a switch that is in safe mode
2430 * @tb: Pointer to the owning domain
2431 * @parent: Parent device for this switch
2432 * @route: Route string for this switch
2434 * This creates a switch in safe mode. This means the switch pretty much
2435 * lacks all capabilities except DMA configuration port before it is
2436 * flashed with a valid NVM firmware.
2438 * The returned switch must be released by calling tb_switch_put().
2440 * Return: Pointer to the allocated switch or ERR_PTR() in case of failure
2443 tb_switch_alloc_safe_mode(struct tb
*tb
, struct device
*parent
, u64 route
)
2445 struct tb_switch
*sw
;
2447 sw
= kzalloc(sizeof(*sw
), GFP_KERNEL
);
2449 return ERR_PTR(-ENOMEM
);
2452 sw
->config
.depth
= tb_route_length(route
);
2453 sw
->config
.route_hi
= upper_32_bits(route
);
2454 sw
->config
.route_lo
= lower_32_bits(route
);
2455 sw
->safe_mode
= true;
2457 device_initialize(&sw
->dev
);
2458 sw
->dev
.parent
= parent
;
2459 sw
->dev
.bus
= &tb_bus_type
;
2460 sw
->dev
.type
= &tb_switch_type
;
2461 sw
->dev
.groups
= switch_groups
;
2462 dev_set_name(&sw
->dev
, "%u-%llx", tb
->index
, tb_route(sw
));
2468 * tb_switch_configure() - Uploads configuration to the switch
2469 * @sw: Switch to configure
2471 * Call this function before the switch is added to the system. It will
2472 * upload configuration to the switch and makes it available for the
2473 * connection manager to use. Can be called to the switch again after
2474 * resume from low power states to re-initialize it.
2476 * Return: %0 in case of success and negative errno in case of failure
2478 int tb_switch_configure(struct tb_switch
*sw
)
2480 struct tb
*tb
= sw
->tb
;
2484 route
= tb_route(sw
);
2486 tb_dbg(tb
, "%s Switch at %#llx (depth: %d, up port: %d)\n",
2487 sw
->config
.enabled
? "restoring" : "initializing", route
,
2488 tb_route_length(route
), sw
->config
.upstream_port_number
);
2490 sw
->config
.enabled
= 1;
2492 if (tb_switch_is_usb4(sw
)) {
2494 * For USB4 devices, we need to program the CM version
2495 * accordingly so that it knows to expose all the
2496 * additional capabilities. Program it according to USB4
2497 * version to avoid changing existing (v1) routers behaviour.
2499 if (usb4_switch_version(sw
) < 2)
2500 sw
->config
.cmuv
= ROUTER_CS_4_CMUV_V1
;
2502 sw
->config
.cmuv
= ROUTER_CS_4_CMUV_V2
;
2503 sw
->config
.plug_events_delay
= 0xa;
2505 /* Enumerate the switch */
2506 ret
= tb_sw_write(sw
, (u32
*)&sw
->config
+ 1, TB_CFG_SWITCH
,
2511 ret
= usb4_switch_setup(sw
);
2513 if (sw
->config
.vendor_id
!= PCI_VENDOR_ID_INTEL
)
2514 tb_sw_warn(sw
, "unknown switch vendor id %#x\n",
2515 sw
->config
.vendor_id
);
2517 if (!sw
->cap_plug_events
) {
2518 tb_sw_warn(sw
, "cannot find TB_VSE_CAP_PLUG_EVENTS aborting\n");
2522 /* Enumerate the switch */
2523 ret
= tb_sw_write(sw
, (u32
*)&sw
->config
+ 1, TB_CFG_SWITCH
,
2529 return tb_plug_events_active(sw
, true);
2533 * tb_switch_configuration_valid() - Set the tunneling configuration to be valid
2534 * @sw: Router to configure
2536 * Needs to be called before any tunnels can be setup through the
2537 * router. Can be called to any router.
2539 * Returns %0 in success and negative errno otherwise.
2541 int tb_switch_configuration_valid(struct tb_switch
*sw
)
2543 if (tb_switch_is_usb4(sw
))
2544 return usb4_switch_configuration_valid(sw
);
2548 static int tb_switch_set_uuid(struct tb_switch
*sw
)
2557 if (tb_switch_is_usb4(sw
)) {
2558 ret
= usb4_switch_read_uid(sw
, &sw
->uid
);
2564 * The newer controllers include fused UUID as part of
2565 * link controller specific registers
2567 ret
= tb_lc_read_uuid(sw
, uuid
);
2577 * ICM generates UUID based on UID and fills the upper
2578 * two words with ones. This is not strictly following
2579 * UUID format but we want to be compatible with it so
2580 * we do the same here.
2582 uuid
[0] = sw
->uid
& 0xffffffff;
2583 uuid
[1] = (sw
->uid
>> 32) & 0xffffffff;
2584 uuid
[2] = 0xffffffff;
2585 uuid
[3] = 0xffffffff;
2588 sw
->uuid
= kmemdup(uuid
, sizeof(uuid
), GFP_KERNEL
);
2594 static int tb_switch_add_dma_port(struct tb_switch
*sw
)
2599 switch (sw
->generation
) {
2601 /* Only root switch can be upgraded */
2608 ret
= tb_switch_set_uuid(sw
);
2615 * DMA port is the only thing available when the switch
2623 if (sw
->no_nvm_upgrade
)
2626 if (tb_switch_is_usb4(sw
)) {
2627 ret
= usb4_switch_nvm_authenticate_status(sw
, &status
);
2632 tb_sw_info(sw
, "switch flash authentication failed\n");
2633 nvm_set_auth_status(sw
, status
);
2639 /* Root switch DMA port requires running firmware */
2640 if (!tb_route(sw
) && !tb_switch_is_icm(sw
))
2643 sw
->dma_port
= dma_port_alloc(sw
);
2648 * If there is status already set then authentication failed
2649 * when the dma_port_flash_update_auth() returned. Power cycling
2650 * is not needed (it was done already) so only thing we do here
2651 * is to unblock runtime PM of the root port.
2653 nvm_get_auth_status(sw
, &status
);
2656 nvm_authenticate_complete_dma_port(sw
);
2661 * Check status of the previous flash authentication. If there
2662 * is one we need to power cycle the switch in any case to make
2663 * it functional again.
2665 ret
= dma_port_flash_update_auth_status(sw
->dma_port
, &status
);
2669 /* Now we can allow root port to suspend again */
2671 nvm_authenticate_complete_dma_port(sw
);
2674 tb_sw_info(sw
, "switch flash authentication failed\n");
2675 nvm_set_auth_status(sw
, status
);
2678 tb_sw_info(sw
, "power cycling the switch now\n");
2679 dma_port_power_cycle(sw
->dma_port
);
2682 * We return error here which causes the switch adding failure.
2683 * It should appear back after power cycle is complete.
2688 static void tb_switch_default_link_ports(struct tb_switch
*sw
)
2692 for (i
= 1; i
<= sw
->config
.max_port_number
; i
++) {
2693 struct tb_port
*port
= &sw
->ports
[i
];
2694 struct tb_port
*subordinate
;
2696 if (!tb_port_is_null(port
))
2699 /* Check for the subordinate port */
2700 if (i
== sw
->config
.max_port_number
||
2701 !tb_port_is_null(&sw
->ports
[i
+ 1]))
2704 /* Link them if not already done so (by DROM) */
2705 subordinate
= &sw
->ports
[i
+ 1];
2706 if (!port
->dual_link_port
&& !subordinate
->dual_link_port
) {
2708 port
->dual_link_port
= subordinate
;
2709 subordinate
->link_nr
= 1;
2710 subordinate
->dual_link_port
= port
;
2712 tb_sw_dbg(sw
, "linked ports %d <-> %d\n",
2713 port
->port
, subordinate
->port
);
2718 static bool tb_switch_lane_bonding_possible(struct tb_switch
*sw
)
2720 const struct tb_port
*up
= tb_upstream_port(sw
);
2722 if (!up
->dual_link_port
|| !up
->dual_link_port
->remote
)
2725 if (tb_switch_is_usb4(sw
))
2726 return usb4_switch_lane_bonding_possible(sw
);
2727 return tb_lc_lane_bonding_possible(sw
);
2730 static int tb_switch_update_link_attributes(struct tb_switch
*sw
)
2733 bool change
= false;
2736 if (!tb_route(sw
) || tb_switch_is_icm(sw
))
2739 up
= tb_upstream_port(sw
);
2741 ret
= tb_port_get_link_speed(up
);
2744 if (sw
->link_speed
!= ret
)
2746 sw
->link_speed
= ret
;
2748 ret
= tb_port_get_link_width(up
);
2751 if (sw
->link_width
!= ret
)
2753 sw
->link_width
= ret
;
2755 /* Notify userspace that there is possible link attribute change */
2756 if (device_is_registered(&sw
->dev
) && change
)
2757 kobject_uevent(&sw
->dev
.kobj
, KOBJ_CHANGE
);
2762 /* Must be called after tb_switch_update_link_attributes() */
2763 static void tb_switch_link_init(struct tb_switch
*sw
)
2765 struct tb_port
*up
, *down
;
2768 if (!tb_route(sw
) || tb_switch_is_icm(sw
))
2771 tb_sw_dbg(sw
, "current link speed %u.0 Gb/s\n", sw
->link_speed
);
2772 tb_sw_dbg(sw
, "current link width %s\n", width_name(sw
->link_width
));
2774 bonded
= sw
->link_width
>= TB_LINK_WIDTH_DUAL
;
2777 * Gen 4 links come up as bonded so update the port structures
2780 up
= tb_upstream_port(sw
);
2781 down
= tb_switch_downstream_port(sw
);
2783 up
->bonded
= bonded
;
2784 if (up
->dual_link_port
)
2785 up
->dual_link_port
->bonded
= bonded
;
2786 tb_port_update_credits(up
);
2788 down
->bonded
= bonded
;
2789 if (down
->dual_link_port
)
2790 down
->dual_link_port
->bonded
= bonded
;
2791 tb_port_update_credits(down
);
2795 * tb_switch_lane_bonding_enable() - Enable lane bonding
2796 * @sw: Switch to enable lane bonding
2798 * Connection manager can call this function to enable lane bonding of a
2799 * switch. If conditions are correct and both switches support the feature,
2800 * lanes are bonded. It is safe to call this to any switch.
2802 static int tb_switch_lane_bonding_enable(struct tb_switch
*sw
)
2804 struct tb_port
*up
, *down
;
2808 if (!tb_switch_lane_bonding_possible(sw
))
2811 up
= tb_upstream_port(sw
);
2812 down
= tb_switch_downstream_port(sw
);
2814 if (!tb_port_width_supported(up
, TB_LINK_WIDTH_DUAL
) ||
2815 !tb_port_width_supported(down
, TB_LINK_WIDTH_DUAL
))
2819 * Both lanes need to be in CL0. Here we assume lane 0 already be in
2820 * CL0 and check just for lane 1.
2822 if (tb_wait_for_port(down
->dual_link_port
, false) <= 0)
2825 ret
= tb_port_lane_bonding_enable(up
);
2827 tb_port_warn(up
, "failed to enable lane bonding\n");
2831 ret
= tb_port_lane_bonding_enable(down
);
2833 tb_port_warn(down
, "failed to enable lane bonding\n");
2834 tb_port_lane_bonding_disable(up
);
2838 /* Any of the widths are all bonded */
2839 width
= TB_LINK_WIDTH_DUAL
| TB_LINK_WIDTH_ASYM_TX
|
2840 TB_LINK_WIDTH_ASYM_RX
;
2842 return tb_port_wait_for_link_width(down
, width
, 100);
2846 * tb_switch_lane_bonding_disable() - Disable lane bonding
2847 * @sw: Switch whose lane bonding to disable
2849 * Disables lane bonding between @sw and parent. This can be called even
2850 * if lanes were not bonded originally.
2852 static int tb_switch_lane_bonding_disable(struct tb_switch
*sw
)
2854 struct tb_port
*up
, *down
;
2857 up
= tb_upstream_port(sw
);
2862 * If the link is Gen 4 there is no way to switch the link to
2863 * two single lane links so avoid that here. Also don't bother
2864 * if the link is not up anymore (sw is unplugged).
2866 ret
= tb_port_get_link_generation(up
);
2872 down
= tb_switch_downstream_port(sw
);
2873 tb_port_lane_bonding_disable(up
);
2874 tb_port_lane_bonding_disable(down
);
2877 * It is fine if we get other errors as the router might have
2880 return tb_port_wait_for_link_width(down
, TB_LINK_WIDTH_SINGLE
, 100);
2883 /* Note updating sw->link_width done in tb_switch_update_link_attributes() */
2884 static int tb_switch_asym_enable(struct tb_switch
*sw
, enum tb_link_width width
)
2886 struct tb_port
*up
, *down
, *port
;
2887 enum tb_link_width down_width
;
2890 up
= tb_upstream_port(sw
);
2891 down
= tb_switch_downstream_port(sw
);
2893 if (width
== TB_LINK_WIDTH_ASYM_TX
) {
2894 down_width
= TB_LINK_WIDTH_ASYM_RX
;
2897 down_width
= TB_LINK_WIDTH_ASYM_TX
;
2901 ret
= tb_port_set_link_width(up
, width
);
2905 ret
= tb_port_set_link_width(down
, down_width
);
2910 * Initiate the change in the router that one of its TX lanes is
2911 * changing to RX but do so only if there is an actual change.
2913 if (sw
->link_width
!= width
) {
2914 ret
= usb4_port_asym_start(port
);
2918 ret
= tb_port_wait_for_link_width(up
, width
, 100);
2926 /* Note updating sw->link_width done in tb_switch_update_link_attributes() */
2927 static int tb_switch_asym_disable(struct tb_switch
*sw
)
2929 struct tb_port
*up
, *down
;
2932 up
= tb_upstream_port(sw
);
2933 down
= tb_switch_downstream_port(sw
);
2935 ret
= tb_port_set_link_width(up
, TB_LINK_WIDTH_DUAL
);
2939 ret
= tb_port_set_link_width(down
, TB_LINK_WIDTH_DUAL
);
2944 * Initiate the change in the router that has three TX lanes and
2945 * is changing one of its TX lanes to RX but only if there is a
2946 * change in the link width.
2948 if (sw
->link_width
> TB_LINK_WIDTH_DUAL
) {
2949 if (sw
->link_width
== TB_LINK_WIDTH_ASYM_TX
)
2950 ret
= usb4_port_asym_start(up
);
2952 ret
= usb4_port_asym_start(down
);
2956 ret
= tb_port_wait_for_link_width(up
, TB_LINK_WIDTH_DUAL
, 100);
2965 * tb_switch_set_link_width() - Configure router link width
2966 * @sw: Router to configure
2967 * @width: The new link width
2969 * Set device router link width to @width from router upstream port
2970 * perspective. Supports also asymmetric links if the routers boths side
2971 * of the link supports it.
2973 * Does nothing for host router.
2975 * Returns %0 in case of success, negative errno otherwise.
2977 int tb_switch_set_link_width(struct tb_switch
*sw
, enum tb_link_width width
)
2979 struct tb_port
*up
, *down
;
2985 up
= tb_upstream_port(sw
);
2986 down
= tb_switch_downstream_port(sw
);
2989 case TB_LINK_WIDTH_SINGLE
:
2990 ret
= tb_switch_lane_bonding_disable(sw
);
2993 case TB_LINK_WIDTH_DUAL
:
2994 if (sw
->link_width
== TB_LINK_WIDTH_ASYM_TX
||
2995 sw
->link_width
== TB_LINK_WIDTH_ASYM_RX
) {
2996 ret
= tb_switch_asym_disable(sw
);
3000 ret
= tb_switch_lane_bonding_enable(sw
);
3003 case TB_LINK_WIDTH_ASYM_TX
:
3004 case TB_LINK_WIDTH_ASYM_RX
:
3005 ret
= tb_switch_asym_enable(sw
, width
);
3014 tb_sw_warn(sw
, "timeout changing link width\n");
3023 tb_sw_dbg(sw
, "failed to change link width: %d\n", ret
);
3027 tb_port_update_credits(down
);
3028 tb_port_update_credits(up
);
3030 tb_switch_update_link_attributes(sw
);
3032 tb_sw_dbg(sw
, "link width set to %s\n", width_name(width
));
3037 * tb_switch_configure_link() - Set link configured
3038 * @sw: Switch whose link is configured
3040 * Sets the link upstream from @sw configured (from both ends) so that
3041 * it will not be disconnected when the domain exits sleep. Can be
3042 * called for any switch.
3044 * It is recommended that this is called after lane bonding is enabled.
3046 * Returns %0 on success and negative errno in case of error.
3048 int tb_switch_configure_link(struct tb_switch
*sw
)
3050 struct tb_port
*up
, *down
;
3053 if (!tb_route(sw
) || tb_switch_is_icm(sw
))
3056 up
= tb_upstream_port(sw
);
3057 if (tb_switch_is_usb4(up
->sw
))
3058 ret
= usb4_port_configure(up
);
3060 ret
= tb_lc_configure_port(up
);
3065 if (tb_switch_is_usb4(down
->sw
))
3066 return usb4_port_configure(down
);
3067 return tb_lc_configure_port(down
);
3071 * tb_switch_unconfigure_link() - Unconfigure link
3072 * @sw: Switch whose link is unconfigured
3074 * Sets the link unconfigured so the @sw will be disconnected if the
3075 * domain exists sleep.
3077 void tb_switch_unconfigure_link(struct tb_switch
*sw
)
3079 struct tb_port
*up
, *down
;
3081 if (sw
->is_unplugged
)
3083 if (!tb_route(sw
) || tb_switch_is_icm(sw
))
3086 up
= tb_upstream_port(sw
);
3087 if (tb_switch_is_usb4(up
->sw
))
3088 usb4_port_unconfigure(up
);
3090 tb_lc_unconfigure_port(up
);
3093 if (tb_switch_is_usb4(down
->sw
))
3094 usb4_port_unconfigure(down
);
3096 tb_lc_unconfigure_port(down
);
3099 static void tb_switch_credits_init(struct tb_switch
*sw
)
3101 if (tb_switch_is_icm(sw
))
3103 if (!tb_switch_is_usb4(sw
))
3105 if (usb4_switch_credits_init(sw
))
3106 tb_sw_info(sw
, "failed to determine preferred buffer allocation, using defaults\n");
3109 static int tb_switch_port_hotplug_enable(struct tb_switch
*sw
)
3111 struct tb_port
*port
;
3113 if (tb_switch_is_icm(sw
))
3116 tb_switch_for_each_port(sw
, port
) {
3119 if (!port
->cap_usb4
)
3122 res
= usb4_port_hotplug_enable(port
);
3130 * tb_switch_add() - Add a switch to the domain
3131 * @sw: Switch to add
3133 * This is the last step in adding switch to the domain. It will read
3134 * identification information from DROM and initializes ports so that
3135 * they can be used to connect other switches. The switch will be
3136 * exposed to the userspace when this function successfully returns. To
3137 * remove and release the switch, call tb_switch_remove().
3139 * Return: %0 in case of success and negative errno in case of failure
3141 int tb_switch_add(struct tb_switch
*sw
)
3146 * Initialize DMA control port now before we read DROM. Recent
3147 * host controllers have more complete DROM on NVM that includes
3148 * vendor and model identification strings which we then expose
3149 * to the userspace. NVM can be accessed through DMA
3150 * configuration based mailbox.
3152 ret
= tb_switch_add_dma_port(sw
);
3154 dev_err(&sw
->dev
, "failed to add DMA port\n");
3158 if (!sw
->safe_mode
) {
3159 tb_switch_credits_init(sw
);
3162 ret
= tb_drom_read(sw
);
3164 dev_warn(&sw
->dev
, "reading DROM failed: %d\n", ret
);
3165 tb_sw_dbg(sw
, "uid: %#llx\n", sw
->uid
);
3167 ret
= tb_switch_set_uuid(sw
);
3169 dev_err(&sw
->dev
, "failed to set UUID\n");
3173 for (i
= 0; i
<= sw
->config
.max_port_number
; i
++) {
3174 if (sw
->ports
[i
].disabled
) {
3175 tb_port_dbg(&sw
->ports
[i
], "disabled by eeprom\n");
3178 ret
= tb_init_port(&sw
->ports
[i
]);
3180 dev_err(&sw
->dev
, "failed to initialize port %d\n", i
);
3185 tb_check_quirks(sw
);
3187 tb_switch_default_link_ports(sw
);
3189 ret
= tb_switch_update_link_attributes(sw
);
3193 tb_switch_link_init(sw
);
3195 ret
= tb_switch_clx_init(sw
);
3199 ret
= tb_switch_tmu_init(sw
);
3204 ret
= tb_switch_port_hotplug_enable(sw
);
3208 ret
= device_add(&sw
->dev
);
3210 dev_err(&sw
->dev
, "failed to add device: %d\n", ret
);
3215 dev_info(&sw
->dev
, "new device found, vendor=%#x device=%#x\n",
3216 sw
->vendor
, sw
->device
);
3217 if (sw
->vendor_name
&& sw
->device_name
)
3218 dev_info(&sw
->dev
, "%s %s\n", sw
->vendor_name
,
3222 ret
= usb4_switch_add_ports(sw
);
3224 dev_err(&sw
->dev
, "failed to add USB4 ports\n");
3228 ret
= tb_switch_nvm_add(sw
);
3230 dev_err(&sw
->dev
, "failed to add NVM devices\n");
3235 * Thunderbolt routers do not generate wakeups themselves but
3236 * they forward wakeups from tunneled protocols, so enable it
3239 device_init_wakeup(&sw
->dev
, true);
3241 pm_runtime_set_active(&sw
->dev
);
3243 pm_runtime_set_autosuspend_delay(&sw
->dev
, TB_AUTOSUSPEND_DELAY
);
3244 pm_runtime_use_autosuspend(&sw
->dev
);
3245 pm_runtime_mark_last_busy(&sw
->dev
);
3246 pm_runtime_enable(&sw
->dev
);
3247 pm_request_autosuspend(&sw
->dev
);
3250 tb_switch_debugfs_init(sw
);
3254 usb4_switch_remove_ports(sw
);
3256 device_del(&sw
->dev
);
3262 * tb_switch_remove() - Remove and release a switch
3263 * @sw: Switch to remove
3265 * This will remove the switch from the domain and release it after last
3266 * reference count drops to zero. If there are switches connected below
3267 * this switch, they will be removed as well.
3269 void tb_switch_remove(struct tb_switch
*sw
)
3271 struct tb_port
*port
;
3273 tb_switch_debugfs_remove(sw
);
3276 pm_runtime_get_sync(&sw
->dev
);
3277 pm_runtime_disable(&sw
->dev
);
3280 /* port 0 is the switch itself and never has a remote */
3281 tb_switch_for_each_port(sw
, port
) {
3282 if (tb_port_has_remote(port
)) {
3283 tb_switch_remove(port
->remote
->sw
);
3284 port
->remote
= NULL
;
3285 } else if (port
->xdomain
) {
3286 tb_xdomain_remove(port
->xdomain
);
3287 port
->xdomain
= NULL
;
3290 /* Remove any downstream retimers */
3291 tb_retimer_remove_all(port
);
3294 if (!sw
->is_unplugged
)
3295 tb_plug_events_active(sw
, false);
3297 tb_switch_nvm_remove(sw
);
3298 usb4_switch_remove_ports(sw
);
3301 dev_info(&sw
->dev
, "device disconnected\n");
3302 device_unregister(&sw
->dev
);
3306 * tb_sw_set_unplugged() - set is_unplugged on switch and downstream switches
3307 * @sw: Router to mark unplugged
3309 void tb_sw_set_unplugged(struct tb_switch
*sw
)
3311 struct tb_port
*port
;
3313 if (sw
== sw
->tb
->root_switch
) {
3314 tb_sw_WARN(sw
, "cannot unplug root switch\n");
3317 if (sw
->is_unplugged
) {
3318 tb_sw_WARN(sw
, "is_unplugged already set\n");
3321 sw
->is_unplugged
= true;
3322 tb_switch_for_each_port(sw
, port
) {
3323 if (tb_port_has_remote(port
))
3324 tb_sw_set_unplugged(port
->remote
->sw
);
3325 else if (port
->xdomain
)
3326 port
->xdomain
->is_unplugged
= true;
3330 static int tb_switch_set_wake(struct tb_switch
*sw
, unsigned int flags
)
3333 tb_sw_dbg(sw
, "enabling wakeup: %#x\n", flags
);
3335 tb_sw_dbg(sw
, "disabling wakeup\n");
3337 if (tb_switch_is_usb4(sw
))
3338 return usb4_switch_set_wake(sw
, flags
);
3339 return tb_lc_set_wake(sw
, flags
);
3342 int tb_switch_resume(struct tb_switch
*sw
)
3344 struct tb_port
*port
;
3347 tb_sw_dbg(sw
, "resuming switch\n");
3350 * Check for UID of the connected switches except for root
3351 * switch which we assume cannot be removed.
3357 * Check first that we can still read the switch config
3358 * space. It may be that there is now another domain
3361 err
= tb_cfg_get_upstream_port(sw
->tb
->ctl
, tb_route(sw
));
3363 tb_sw_info(sw
, "switch not present anymore\n");
3367 /* We don't have any way to confirm this was the same device */
3371 if (tb_switch_is_usb4(sw
))
3372 err
= usb4_switch_read_uid(sw
, &uid
);
3374 err
= tb_drom_read_uid_only(sw
, &uid
);
3376 tb_sw_warn(sw
, "uid read failed\n");
3379 if (sw
->uid
!= uid
) {
3381 "changed while suspended (uid %#llx -> %#llx)\n",
3387 err
= tb_switch_configure(sw
);
3392 tb_switch_set_wake(sw
, 0);
3394 err
= tb_switch_tmu_init(sw
);
3398 /* check for surviving downstream switches */
3399 tb_switch_for_each_port(sw
, port
) {
3400 if (!tb_port_is_null(port
))
3403 if (!tb_port_resume(port
))
3406 if (tb_wait_for_port(port
, true) <= 0) {
3408 "lost during suspend, disconnecting\n");
3409 if (tb_port_has_remote(port
))
3410 tb_sw_set_unplugged(port
->remote
->sw
);
3411 else if (port
->xdomain
)
3412 port
->xdomain
->is_unplugged
= true;
3415 * Always unlock the port so the downstream
3416 * switch/domain is accessible.
3418 if (tb_port_unlock(port
))
3419 tb_port_warn(port
, "failed to unlock port\n");
3420 if (port
->remote
&& tb_switch_resume(port
->remote
->sw
)) {
3422 "lost during suspend, disconnecting\n");
3423 tb_sw_set_unplugged(port
->remote
->sw
);
3431 * tb_switch_suspend() - Put a switch to sleep
3432 * @sw: Switch to suspend
3433 * @runtime: Is this runtime suspend or system sleep
3435 * Suspends router and all its children. Enables wakes according to
3436 * value of @runtime and then sets sleep bit for the router. If @sw is
3437 * host router the domain is ready to go to sleep once this function
3440 void tb_switch_suspend(struct tb_switch
*sw
, bool runtime
)
3442 unsigned int flags
= 0;
3443 struct tb_port
*port
;
3446 tb_sw_dbg(sw
, "suspending switch\n");
3449 * Actually only needed for Titan Ridge but for simplicity can be
3450 * done for USB4 device too as CLx is re-enabled at resume.
3452 tb_switch_clx_disable(sw
);
3454 err
= tb_plug_events_active(sw
, false);
3458 tb_switch_for_each_port(sw
, port
) {
3459 if (tb_port_has_remote(port
))
3460 tb_switch_suspend(port
->remote
->sw
, runtime
);
3464 /* Trigger wake when something is plugged in/out */
3465 flags
|= TB_WAKE_ON_CONNECT
| TB_WAKE_ON_DISCONNECT
;
3466 flags
|= TB_WAKE_ON_USB4
;
3467 flags
|= TB_WAKE_ON_USB3
| TB_WAKE_ON_PCIE
| TB_WAKE_ON_DP
;
3468 } else if (device_may_wakeup(&sw
->dev
)) {
3469 flags
|= TB_WAKE_ON_USB4
| TB_WAKE_ON_USB3
| TB_WAKE_ON_PCIE
;
3472 tb_switch_set_wake(sw
, flags
);
3474 if (tb_switch_is_usb4(sw
))
3475 usb4_switch_set_sleep(sw
);
3477 tb_lc_set_sleep(sw
);
3481 * tb_switch_query_dp_resource() - Query availability of DP resource
3482 * @sw: Switch whose DP resource is queried
3485 * Queries availability of DP resource for DP tunneling using switch
3486 * specific means. Returns %true if resource is available.
3488 bool tb_switch_query_dp_resource(struct tb_switch
*sw
, struct tb_port
*in
)
3490 if (tb_switch_is_usb4(sw
))
3491 return usb4_switch_query_dp_resource(sw
, in
);
3492 return tb_lc_dp_sink_query(sw
, in
);
3496 * tb_switch_alloc_dp_resource() - Allocate available DP resource
3497 * @sw: Switch whose DP resource is allocated
3500 * Allocates DP resource for DP tunneling. The resource must be
3501 * available for this to succeed (see tb_switch_query_dp_resource()).
3502 * Returns %0 in success and negative errno otherwise.
3504 int tb_switch_alloc_dp_resource(struct tb_switch
*sw
, struct tb_port
*in
)
3508 if (tb_switch_is_usb4(sw
))
3509 ret
= usb4_switch_alloc_dp_resource(sw
, in
);
3511 ret
= tb_lc_dp_sink_alloc(sw
, in
);
3514 tb_sw_warn(sw
, "failed to allocate DP resource for port %d\n",
3517 tb_sw_dbg(sw
, "allocated DP resource for port %d\n", in
->port
);
3523 * tb_switch_dealloc_dp_resource() - De-allocate DP resource
3524 * @sw: Switch whose DP resource is de-allocated
3527 * De-allocates DP resource that was previously allocated for DP
3530 void tb_switch_dealloc_dp_resource(struct tb_switch
*sw
, struct tb_port
*in
)
3534 if (tb_switch_is_usb4(sw
))
3535 ret
= usb4_switch_dealloc_dp_resource(sw
, in
);
3537 ret
= tb_lc_dp_sink_dealloc(sw
, in
);
3540 tb_sw_warn(sw
, "failed to de-allocate DP resource for port %d\n",
3543 tb_sw_dbg(sw
, "released DP resource for port %d\n", in
->port
);
3546 struct tb_sw_lookup
{
3554 static int tb_switch_match(struct device
*dev
, const void *data
)
3556 struct tb_switch
*sw
= tb_to_switch(dev
);
3557 const struct tb_sw_lookup
*lookup
= data
;
3561 if (sw
->tb
!= lookup
->tb
)
3565 return !memcmp(sw
->uuid
, lookup
->uuid
, sizeof(*lookup
->uuid
));
3567 if (lookup
->route
) {
3568 return sw
->config
.route_lo
== lower_32_bits(lookup
->route
) &&
3569 sw
->config
.route_hi
== upper_32_bits(lookup
->route
);
3572 /* Root switch is matched only by depth */
3576 return sw
->link
== lookup
->link
&& sw
->depth
== lookup
->depth
;
3580 * tb_switch_find_by_link_depth() - Find switch by link and depth
3581 * @tb: Domain the switch belongs
3582 * @link: Link number the switch is connected
3583 * @depth: Depth of the switch in link
3585 * Returned switch has reference count increased so the caller needs to
3586 * call tb_switch_put() when done with the switch.
3588 struct tb_switch
*tb_switch_find_by_link_depth(struct tb
*tb
, u8 link
, u8 depth
)
3590 struct tb_sw_lookup lookup
;
3593 memset(&lookup
, 0, sizeof(lookup
));
3596 lookup
.depth
= depth
;
3598 dev
= bus_find_device(&tb_bus_type
, NULL
, &lookup
, tb_switch_match
);
3600 return tb_to_switch(dev
);
3606 * tb_switch_find_by_uuid() - Find switch by UUID
3607 * @tb: Domain the switch belongs
3608 * @uuid: UUID to look for
3610 * Returned switch has reference count increased so the caller needs to
3611 * call tb_switch_put() when done with the switch.
3613 struct tb_switch
*tb_switch_find_by_uuid(struct tb
*tb
, const uuid_t
*uuid
)
3615 struct tb_sw_lookup lookup
;
3618 memset(&lookup
, 0, sizeof(lookup
));
3622 dev
= bus_find_device(&tb_bus_type
, NULL
, &lookup
, tb_switch_match
);
3624 return tb_to_switch(dev
);
3630 * tb_switch_find_by_route() - Find switch by route string
3631 * @tb: Domain the switch belongs
3632 * @route: Route string to look for
3634 * Returned switch has reference count increased so the caller needs to
3635 * call tb_switch_put() when done with the switch.
3637 struct tb_switch
*tb_switch_find_by_route(struct tb
*tb
, u64 route
)
3639 struct tb_sw_lookup lookup
;
3643 return tb_switch_get(tb
->root_switch
);
3645 memset(&lookup
, 0, sizeof(lookup
));
3647 lookup
.route
= route
;
3649 dev
= bus_find_device(&tb_bus_type
, NULL
, &lookup
, tb_switch_match
);
3651 return tb_to_switch(dev
);
3657 * tb_switch_find_port() - return the first port of @type on @sw or NULL
3658 * @sw: Switch to find the port from
3659 * @type: Port type to look for
3661 struct tb_port
*tb_switch_find_port(struct tb_switch
*sw
,
3662 enum tb_port_type type
)
3664 struct tb_port
*port
;
3666 tb_switch_for_each_port(sw
, port
) {
3667 if (port
->config
.type
== type
)
3675 * Can be used for read/write a specified PCIe bridge for any Thunderbolt 3
3676 * device. For now used only for Titan Ridge.
3678 static int tb_switch_pcie_bridge_write(struct tb_switch
*sw
, unsigned int bridge
,
3679 unsigned int pcie_offset
, u32 value
)
3681 u32 offset
, command
, val
;
3684 if (sw
->generation
!= 3)
3687 offset
= sw
->cap_plug_events
+ TB_PLUG_EVENTS_PCIE_WR_DATA
;
3688 ret
= tb_sw_write(sw
, &value
, TB_CFG_SWITCH
, offset
, 1);
3692 command
= pcie_offset
& TB_PLUG_EVENTS_PCIE_CMD_DW_OFFSET_MASK
;
3693 command
|= BIT(bridge
+ TB_PLUG_EVENTS_PCIE_CMD_BR_SHIFT
);
3694 command
|= TB_PLUG_EVENTS_PCIE_CMD_RD_WR_MASK
;
3695 command
|= TB_PLUG_EVENTS_PCIE_CMD_COMMAND_VAL
3696 << TB_PLUG_EVENTS_PCIE_CMD_COMMAND_SHIFT
;
3697 command
|= TB_PLUG_EVENTS_PCIE_CMD_REQ_ACK_MASK
;
3699 offset
= sw
->cap_plug_events
+ TB_PLUG_EVENTS_PCIE_CMD
;
3701 ret
= tb_sw_write(sw
, &command
, TB_CFG_SWITCH
, offset
, 1);
3705 ret
= tb_switch_wait_for_bit(sw
, offset
,
3706 TB_PLUG_EVENTS_PCIE_CMD_REQ_ACK_MASK
, 0, 100);
3710 ret
= tb_sw_read(sw
, &val
, TB_CFG_SWITCH
, offset
, 1);
3714 if (val
& TB_PLUG_EVENTS_PCIE_CMD_TIMEOUT_MASK
)
3721 * tb_switch_pcie_l1_enable() - Enable PCIe link to enter L1 state
3722 * @sw: Router to enable PCIe L1
3724 * For Titan Ridge switch to enter CLx state, its PCIe bridges shall enable
3725 * entry to PCIe L1 state. Shall be called after the upstream PCIe tunnel
3726 * was configured. Due to Intel platforms limitation, shall be called only
3727 * for first hop switch.
3729 int tb_switch_pcie_l1_enable(struct tb_switch
*sw
)
3731 struct tb_switch
*parent
= tb_switch_parent(sw
);
3737 if (!tb_switch_is_titan_ridge(sw
))
3740 /* Enable PCIe L1 enable only for first hop router (depth = 1) */
3741 if (tb_route(parent
))
3744 /* Write to downstream PCIe bridge #5 aka Dn4 */
3745 ret
= tb_switch_pcie_bridge_write(sw
, 5, 0x143, 0x0c7806b1);
3749 /* Write to Upstream PCIe bridge #0 aka Up0 */
3750 return tb_switch_pcie_bridge_write(sw
, 0, 0x143, 0x0c5806b1);
3754 * tb_switch_xhci_connect() - Connect internal xHCI
3755 * @sw: Router whose xHCI to connect
3757 * Can be called to any router. For Alpine Ridge and Titan Ridge
3758 * performs special flows that bring the xHCI functional for any device
3759 * connected to the type-C port. Call only after PCIe tunnel has been
3760 * established. The function only does the connect if not done already
3761 * so can be called several times for the same router.
3763 int tb_switch_xhci_connect(struct tb_switch
*sw
)
3765 struct tb_port
*port1
, *port3
;
3768 if (sw
->generation
!= 3)
3771 port1
= &sw
->ports
[1];
3772 port3
= &sw
->ports
[3];
3774 if (tb_switch_is_alpine_ridge(sw
)) {
3775 bool usb_port1
, usb_port3
, xhci_port1
, xhci_port3
;
3777 usb_port1
= tb_lc_is_usb_plugged(port1
);
3778 usb_port3
= tb_lc_is_usb_plugged(port3
);
3779 xhci_port1
= tb_lc_is_xhci_connected(port1
);
3780 xhci_port3
= tb_lc_is_xhci_connected(port3
);
3782 /* Figure out correct USB port to connect */
3783 if (usb_port1
&& !xhci_port1
) {
3784 ret
= tb_lc_xhci_connect(port1
);
3788 if (usb_port3
&& !xhci_port3
)
3789 return tb_lc_xhci_connect(port3
);
3790 } else if (tb_switch_is_titan_ridge(sw
)) {
3791 ret
= tb_lc_xhci_connect(port1
);
3794 return tb_lc_xhci_connect(port3
);
3801 * tb_switch_xhci_disconnect() - Disconnect internal xHCI
3802 * @sw: Router whose xHCI to disconnect
3804 * The opposite of tb_switch_xhci_connect(). Disconnects xHCI on both
3807 void tb_switch_xhci_disconnect(struct tb_switch
*sw
)
3809 if (sw
->generation
== 3) {
3810 struct tb_port
*port1
= &sw
->ports
[1];
3811 struct tb_port
*port3
= &sw
->ports
[3];
3813 tb_lc_xhci_disconnect(port1
);
3814 tb_port_dbg(port1
, "disconnected xHCI\n");
3815 tb_lc_xhci_disconnect(port3
);
3816 tb_port_dbg(port3
, "disconnected xHCI\n");