1 // SPDX-License-Identifier: GPL-2.0
3 * USB4 specific functionality
5 * Copyright (C) 2019, Intel Corporation
6 * Authors: Mika Westerberg <mika.westerberg@linux.intel.com>
7 * Rajmohan Mani <rajmohan.mani@intel.com>
10 #include <linux/delay.h>
11 #include <linux/ktime.h>
15 #define USB4_DATA_DWORDS 16
16 #define USB4_DATA_RETRIES 3
19 USB4_SWITCH_OP_QUERY_DP_RESOURCE
= 0x10,
20 USB4_SWITCH_OP_ALLOC_DP_RESOURCE
= 0x11,
21 USB4_SWITCH_OP_DEALLOC_DP_RESOURCE
= 0x12,
22 USB4_SWITCH_OP_NVM_WRITE
= 0x20,
23 USB4_SWITCH_OP_NVM_AUTH
= 0x21,
24 USB4_SWITCH_OP_NVM_READ
= 0x22,
25 USB4_SWITCH_OP_NVM_SET_OFFSET
= 0x23,
26 USB4_SWITCH_OP_DROM_READ
= 0x24,
27 USB4_SWITCH_OP_NVM_SECTOR_SIZE
= 0x25,
30 #define USB4_NVM_READ_OFFSET_MASK GENMASK(23, 2)
31 #define USB4_NVM_READ_OFFSET_SHIFT 2
32 #define USB4_NVM_READ_LENGTH_MASK GENMASK(27, 24)
33 #define USB4_NVM_READ_LENGTH_SHIFT 24
35 #define USB4_NVM_SET_OFFSET_MASK USB4_NVM_READ_OFFSET_MASK
36 #define USB4_NVM_SET_OFFSET_SHIFT USB4_NVM_READ_OFFSET_SHIFT
38 #define USB4_DROM_ADDRESS_MASK GENMASK(14, 2)
39 #define USB4_DROM_ADDRESS_SHIFT 2
40 #define USB4_DROM_SIZE_MASK GENMASK(19, 15)
41 #define USB4_DROM_SIZE_SHIFT 15
43 #define USB4_NVM_SECTOR_SIZE_MASK GENMASK(23, 0)
45 typedef int (*read_block_fn
)(struct tb_switch
*, unsigned int, void *, size_t);
46 typedef int (*write_block_fn
)(struct tb_switch
*, const void *, size_t);
48 static int usb4_switch_wait_for_bit(struct tb_switch
*sw
, u32 offset
, u32 bit
,
49 u32 value
, int timeout_msec
)
51 ktime_t timeout
= ktime_add_ms(ktime_get(), timeout_msec
);
57 ret
= tb_sw_read(sw
, &val
, TB_CFG_SWITCH
, offset
, 1);
61 if ((val
& bit
) == value
)
64 usleep_range(50, 100);
65 } while (ktime_before(ktime_get(), timeout
));
70 static int usb4_switch_op_read_data(struct tb_switch
*sw
, void *data
,
73 if (dwords
> USB4_DATA_DWORDS
)
76 return tb_sw_read(sw
, data
, TB_CFG_SWITCH
, ROUTER_CS_9
, dwords
);
79 static int usb4_switch_op_write_data(struct tb_switch
*sw
, const void *data
,
82 if (dwords
> USB4_DATA_DWORDS
)
85 return tb_sw_write(sw
, data
, TB_CFG_SWITCH
, ROUTER_CS_9
, dwords
);
88 static int usb4_switch_op_read_metadata(struct tb_switch
*sw
, u32
*metadata
)
90 return tb_sw_read(sw
, metadata
, TB_CFG_SWITCH
, ROUTER_CS_25
, 1);
93 static int usb4_switch_op_write_metadata(struct tb_switch
*sw
, u32 metadata
)
95 return tb_sw_write(sw
, &metadata
, TB_CFG_SWITCH
, ROUTER_CS_25
, 1);
98 static int usb4_switch_do_read_data(struct tb_switch
*sw
, u16 address
,
99 void *buf
, size_t size
, read_block_fn read_block
)
101 unsigned int retries
= USB4_DATA_RETRIES
;
104 offset
= address
& 3;
105 address
= address
& ~3;
108 size_t nbytes
= min_t(size_t, size
, USB4_DATA_DWORDS
* 4);
109 unsigned int dwaddress
, dwords
;
110 u8 data
[USB4_DATA_DWORDS
* 4];
113 dwaddress
= address
/ 4;
114 dwords
= ALIGN(nbytes
, 4) / 4;
116 ret
= read_block(sw
, dwaddress
, data
, dwords
);
118 if (ret
== -ETIMEDOUT
) {
126 memcpy(buf
, data
+ offset
, nbytes
);
136 static int usb4_switch_do_write_data(struct tb_switch
*sw
, u16 address
,
137 const void *buf
, size_t size
, write_block_fn write_next_block
)
139 unsigned int retries
= USB4_DATA_RETRIES
;
142 offset
= address
& 3;
143 address
= address
& ~3;
146 u32 nbytes
= min_t(u32
, size
, USB4_DATA_DWORDS
* 4);
147 u8 data
[USB4_DATA_DWORDS
* 4];
150 memcpy(data
+ offset
, buf
, nbytes
);
152 ret
= write_next_block(sw
, data
, nbytes
/ 4);
154 if (ret
== -ETIMEDOUT
) {
170 static int usb4_switch_op(struct tb_switch
*sw
, u16 opcode
, u8
*status
)
175 val
= opcode
| ROUTER_CS_26_OV
;
176 ret
= tb_sw_write(sw
, &val
, TB_CFG_SWITCH
, ROUTER_CS_26
, 1);
180 ret
= usb4_switch_wait_for_bit(sw
, ROUTER_CS_26
, ROUTER_CS_26_OV
, 0, 500);
184 ret
= tb_sw_read(sw
, &val
, TB_CFG_SWITCH
, ROUTER_CS_26
, 1);
188 if (val
& ROUTER_CS_26_ONS
)
191 *status
= (val
& ROUTER_CS_26_STATUS_MASK
) >> ROUTER_CS_26_STATUS_SHIFT
;
196 * usb4_switch_setup() - Additional setup for USB4 device
197 * @sw: USB4 router to setup
199 * USB4 routers need additional settings in order to enable all the
200 * tunneling. This function enables USB and PCIe tunneling if it can be
201 * enabled (e.g the parent switch also supports them). If USB tunneling
202 * is not available for some reason (like that there is Thunderbolt 3
203 * switch upstream) then the internal xHCI controller is enabled
206 int usb4_switch_setup(struct tb_switch
*sw
)
208 struct tb_switch
*parent
;
216 ret
= tb_sw_read(sw
, &val
, TB_CFG_SWITCH
, ROUTER_CS_6
, 1);
220 xhci
= val
& ROUTER_CS_6_HCI
;
221 tbt3
= !(val
& ROUTER_CS_6_TNS
);
223 tb_sw_dbg(sw
, "TBT3 support: %s, xHCI: %s\n",
224 tbt3
? "yes" : "no", xhci
? "yes" : "no");
226 ret
= tb_sw_read(sw
, &val
, TB_CFG_SWITCH
, ROUTER_CS_5
, 1);
230 parent
= tb_switch_parent(sw
);
232 if (tb_switch_find_port(parent
, TB_TYPE_USB3_DOWN
)) {
233 val
|= ROUTER_CS_5_UTO
;
237 /* Only enable PCIe tunneling if the parent router supports it */
238 if (tb_switch_find_port(parent
, TB_TYPE_PCIE_DOWN
)) {
239 val
|= ROUTER_CS_5_PTO
;
241 * xHCI can be enabled if PCIe tunneling is supported
242 * and the parent does not have any USB3 dowstream
243 * adapters (so we cannot do USB 3.x tunneling).
246 val
|= ROUTER_CS_5_HCO
;
249 /* TBT3 supported by the CM */
250 val
|= ROUTER_CS_5_C3S
;
251 /* Tunneling configuration is ready now */
252 val
|= ROUTER_CS_5_CV
;
254 ret
= tb_sw_write(sw
, &val
, TB_CFG_SWITCH
, ROUTER_CS_5
, 1);
258 return usb4_switch_wait_for_bit(sw
, ROUTER_CS_6
, ROUTER_CS_6_CR
,
263 * usb4_switch_read_uid() - Read UID from USB4 router
265 * @uid: UID is stored here
267 * Reads 64-bit UID from USB4 router config space.
269 int usb4_switch_read_uid(struct tb_switch
*sw
, u64
*uid
)
271 return tb_sw_read(sw
, uid
, TB_CFG_SWITCH
, ROUTER_CS_7
, 2);
274 static int usb4_switch_drom_read_block(struct tb_switch
*sw
,
275 unsigned int dwaddress
, void *buf
,
282 metadata
= (dwords
<< USB4_DROM_SIZE_SHIFT
) & USB4_DROM_SIZE_MASK
;
283 metadata
|= (dwaddress
<< USB4_DROM_ADDRESS_SHIFT
) &
284 USB4_DROM_ADDRESS_MASK
;
286 ret
= usb4_switch_op_write_metadata(sw
, metadata
);
290 ret
= usb4_switch_op(sw
, USB4_SWITCH_OP_DROM_READ
, &status
);
297 return usb4_switch_op_read_data(sw
, buf
, dwords
);
301 * usb4_switch_drom_read() - Read arbitrary bytes from USB4 router DROM
303 * @address: Byte address inside DROM to start reading
304 * @buf: Buffer where the DROM content is stored
305 * @size: Number of bytes to read from DROM
307 * Uses USB4 router operations to read router DROM. For devices this
308 * should always work but for hosts it may return %-EOPNOTSUPP in which
309 * case the host router does not have DROM.
311 int usb4_switch_drom_read(struct tb_switch
*sw
, unsigned int address
, void *buf
,
314 return usb4_switch_do_read_data(sw
, address
, buf
, size
,
315 usb4_switch_drom_read_block
);
318 static int usb4_set_port_configured(struct tb_port
*port
, bool configured
)
323 ret
= tb_port_read(port
, &val
, TB_CFG_PORT
,
324 port
->cap_usb4
+ PORT_CS_19
, 1);
329 val
|= PORT_CS_19_PC
;
331 val
&= ~PORT_CS_19_PC
;
333 return tb_port_write(port
, &val
, TB_CFG_PORT
,
334 port
->cap_usb4
+ PORT_CS_19
, 1);
338 * usb4_switch_configure_link() - Set upstream USB4 link configured
341 * Sets the upstream USB4 link to be configured for power management
344 int usb4_switch_configure_link(struct tb_switch
*sw
)
351 up
= tb_upstream_port(sw
);
352 return usb4_set_port_configured(up
, true);
356 * usb4_switch_unconfigure_link() - Un-set upstream USB4 link configuration
359 * Reverse of usb4_switch_configure_link().
361 void usb4_switch_unconfigure_link(struct tb_switch
*sw
)
365 if (sw
->is_unplugged
|| !tb_route(sw
))
368 up
= tb_upstream_port(sw
);
369 usb4_set_port_configured(up
, false);
373 * usb4_switch_lane_bonding_possible() - Are conditions met for lane bonding
376 * Checks whether conditions are met so that lane bonding can be
377 * established with the upstream router. Call only for device routers.
379 bool usb4_switch_lane_bonding_possible(struct tb_switch
*sw
)
385 up
= tb_upstream_port(sw
);
386 ret
= tb_port_read(up
, &val
, TB_CFG_PORT
, up
->cap_usb4
+ PORT_CS_18
, 1);
390 return !!(val
& PORT_CS_18_BE
);
394 * usb4_switch_set_sleep() - Prepare the router to enter sleep
397 * Enables wakes and sets sleep bit for the router. Returns when the
398 * router sleep ready bit has been asserted.
400 int usb4_switch_set_sleep(struct tb_switch
*sw
)
405 /* Set sleep bit and wait for sleep ready to be asserted */
406 ret
= tb_sw_read(sw
, &val
, TB_CFG_SWITCH
, ROUTER_CS_5
, 1);
410 val
|= ROUTER_CS_5_SLP
;
412 ret
= tb_sw_write(sw
, &val
, TB_CFG_SWITCH
, ROUTER_CS_5
, 1);
416 return usb4_switch_wait_for_bit(sw
, ROUTER_CS_6
, ROUTER_CS_6_SLPR
,
417 ROUTER_CS_6_SLPR
, 500);
421 * usb4_switch_nvm_sector_size() - Return router NVM sector size
424 * If the router supports NVM operations this function returns the NVM
425 * sector size in bytes. If NVM operations are not supported returns
428 int usb4_switch_nvm_sector_size(struct tb_switch
*sw
)
434 ret
= usb4_switch_op(sw
, USB4_SWITCH_OP_NVM_SECTOR_SIZE
, &status
);
439 return status
== 0x2 ? -EOPNOTSUPP
: -EIO
;
441 ret
= usb4_switch_op_read_metadata(sw
, &metadata
);
445 return metadata
& USB4_NVM_SECTOR_SIZE_MASK
;
448 static int usb4_switch_nvm_read_block(struct tb_switch
*sw
,
449 unsigned int dwaddress
, void *buf
, size_t dwords
)
455 metadata
= (dwords
<< USB4_NVM_READ_LENGTH_SHIFT
) &
456 USB4_NVM_READ_LENGTH_MASK
;
457 metadata
|= (dwaddress
<< USB4_NVM_READ_OFFSET_SHIFT
) &
458 USB4_NVM_READ_OFFSET_MASK
;
460 ret
= usb4_switch_op_write_metadata(sw
, metadata
);
464 ret
= usb4_switch_op(sw
, USB4_SWITCH_OP_NVM_READ
, &status
);
471 return usb4_switch_op_read_data(sw
, buf
, dwords
);
475 * usb4_switch_nvm_read() - Read arbitrary bytes from router NVM
477 * @address: Starting address in bytes
478 * @buf: Read data is placed here
479 * @size: How many bytes to read
481 * Reads NVM contents of the router. If NVM is not supported returns
484 int usb4_switch_nvm_read(struct tb_switch
*sw
, unsigned int address
, void *buf
,
487 return usb4_switch_do_read_data(sw
, address
, buf
, size
,
488 usb4_switch_nvm_read_block
);
491 static int usb4_switch_nvm_set_offset(struct tb_switch
*sw
,
492 unsigned int address
)
494 u32 metadata
, dwaddress
;
498 dwaddress
= address
/ 4;
499 metadata
= (dwaddress
<< USB4_NVM_SET_OFFSET_SHIFT
) &
500 USB4_NVM_SET_OFFSET_MASK
;
502 ret
= usb4_switch_op_write_metadata(sw
, metadata
);
506 ret
= usb4_switch_op(sw
, USB4_SWITCH_OP_NVM_SET_OFFSET
, &status
);
510 return status
? -EIO
: 0;
513 static int usb4_switch_nvm_write_next_block(struct tb_switch
*sw
,
514 const void *buf
, size_t dwords
)
519 ret
= usb4_switch_op_write_data(sw
, buf
, dwords
);
523 ret
= usb4_switch_op(sw
, USB4_SWITCH_OP_NVM_WRITE
, &status
);
527 return status
? -EIO
: 0;
531 * usb4_switch_nvm_write() - Write to the router NVM
533 * @address: Start address where to write in bytes
534 * @buf: Pointer to the data to write
535 * @size: Size of @buf in bytes
537 * Writes @buf to the router NVM using USB4 router operations. If NVM
538 * write is not supported returns %-EOPNOTSUPP.
540 int usb4_switch_nvm_write(struct tb_switch
*sw
, unsigned int address
,
541 const void *buf
, size_t size
)
545 ret
= usb4_switch_nvm_set_offset(sw
, address
);
549 return usb4_switch_do_write_data(sw
, address
, buf
, size
,
550 usb4_switch_nvm_write_next_block
);
554 * usb4_switch_nvm_authenticate() - Authenticate new NVM
557 * After the new NVM has been written via usb4_switch_nvm_write(), this
558 * function triggers NVM authentication process. If the authentication
559 * is successful the router is power cycled and the new NVM starts
560 * running. In case of failure returns negative errno.
562 int usb4_switch_nvm_authenticate(struct tb_switch
*sw
)
567 ret
= usb4_switch_op(sw
, USB4_SWITCH_OP_NVM_AUTH
, &status
);
573 tb_sw_dbg(sw
, "NVM authentication successful\n");
587 * usb4_switch_query_dp_resource() - Query availability of DP IN resource
591 * For DP tunneling this function can be used to query availability of
592 * DP IN resource. Returns true if the resource is available for DP
593 * tunneling, false otherwise.
595 bool usb4_switch_query_dp_resource(struct tb_switch
*sw
, struct tb_port
*in
)
600 ret
= usb4_switch_op_write_metadata(sw
, in
->port
);
604 ret
= usb4_switch_op(sw
, USB4_SWITCH_OP_QUERY_DP_RESOURCE
, &status
);
606 * If DP resource allocation is not supported assume it is
609 if (ret
== -EOPNOTSUPP
)
618 * usb4_switch_alloc_dp_resource() - Allocate DP IN resource
622 * Allocates DP IN resource for DP tunneling using USB4 router
623 * operations. If the resource was allocated returns %0. Otherwise
624 * returns negative errno, in particular %-EBUSY if the resource is
627 int usb4_switch_alloc_dp_resource(struct tb_switch
*sw
, struct tb_port
*in
)
632 ret
= usb4_switch_op_write_metadata(sw
, in
->port
);
636 ret
= usb4_switch_op(sw
, USB4_SWITCH_OP_ALLOC_DP_RESOURCE
, &status
);
637 if (ret
== -EOPNOTSUPP
)
642 return status
? -EBUSY
: 0;
646 * usb4_switch_dealloc_dp_resource() - Releases allocated DP IN resource
650 * Releases the previously allocated DP IN resource.
652 int usb4_switch_dealloc_dp_resource(struct tb_switch
*sw
, struct tb_port
*in
)
657 ret
= usb4_switch_op_write_metadata(sw
, in
->port
);
661 ret
= usb4_switch_op(sw
, USB4_SWITCH_OP_DEALLOC_DP_RESOURCE
, &status
);
662 if (ret
== -EOPNOTSUPP
)
667 return status
? -EIO
: 0;
670 static int usb4_port_idx(const struct tb_switch
*sw
, const struct tb_port
*port
)
675 /* Assume port is primary */
676 tb_switch_for_each_port(sw
, p
) {
677 if (!tb_port_is_null(p
))
679 if (tb_is_upstream_port(p
))
692 * usb4_switch_map_pcie_down() - Map USB4 port to a PCIe downstream adapter
696 * USB4 routers have direct mapping between USB4 ports and PCIe
697 * downstream adapters where the PCIe topology is extended. This
698 * function returns the corresponding downstream PCIe adapter or %NULL
699 * if no such mapping was possible.
701 struct tb_port
*usb4_switch_map_pcie_down(struct tb_switch
*sw
,
702 const struct tb_port
*port
)
704 int usb4_idx
= usb4_port_idx(sw
, port
);
708 /* Find PCIe down port matching usb4_port */
709 tb_switch_for_each_port(sw
, p
) {
710 if (!tb_port_is_pcie_down(p
))
713 if (pcie_idx
== usb4_idx
&& !tb_pci_port_is_enabled(p
))
723 * usb4_switch_map_usb3_down() - Map USB4 port to a USB3 downstream adapter
727 * USB4 routers have direct mapping between USB4 ports and USB 3.x
728 * downstream adapters where the USB 3.x topology is extended. This
729 * function returns the corresponding downstream USB 3.x adapter or
730 * %NULL if no such mapping was possible.
732 struct tb_port
*usb4_switch_map_usb3_down(struct tb_switch
*sw
,
733 const struct tb_port
*port
)
735 int usb4_idx
= usb4_port_idx(sw
, port
);
739 /* Find USB3 down port matching usb4_port */
740 tb_switch_for_each_port(sw
, p
) {
741 if (!tb_port_is_usb3_down(p
))
744 if (usb_idx
== usb4_idx
&& !tb_usb3_port_is_enabled(p
))
754 * usb4_port_unlock() - Unlock USB4 downstream port
755 * @port: USB4 port to unlock
757 * Unlocks USB4 downstream port so that the connection manager can
758 * access the router below this port.
760 int usb4_port_unlock(struct tb_port
*port
)
765 ret
= tb_port_read(port
, &val
, TB_CFG_PORT
, ADP_CS_4
, 1);
769 val
&= ~ADP_CS_4_LCK
;
770 return tb_port_write(port
, &val
, TB_CFG_PORT
, ADP_CS_4
, 1);