]> git.ipfire.org Git - thirdparty/linux.git/blob - drivers/usb/typec/tcpm/tcpm.c
Merge tag 'for-6.9/dm-vdo' of git://git.kernel.org/pub/scm/linux/kernel/git/device...
[thirdparty/linux.git] / drivers / usb / typec / tcpm / tcpm.c
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * Copyright 2015-2017 Google, Inc
4 *
5 * USB Power Delivery protocol stack.
6 */
7
8 #include <linux/completion.h>
9 #include <linux/debugfs.h>
10 #include <linux/device.h>
11 #include <linux/hrtimer.h>
12 #include <linux/jiffies.h>
13 #include <linux/kernel.h>
14 #include <linux/kthread.h>
15 #include <linux/module.h>
16 #include <linux/mutex.h>
17 #include <linux/power_supply.h>
18 #include <linux/proc_fs.h>
19 #include <linux/property.h>
20 #include <linux/sched/clock.h>
21 #include <linux/seq_file.h>
22 #include <linux/slab.h>
23 #include <linux/spinlock.h>
24 #include <linux/usb.h>
25 #include <linux/usb/pd.h>
26 #include <linux/usb/pd_ado.h>
27 #include <linux/usb/pd_bdo.h>
28 #include <linux/usb/pd_ext_sdb.h>
29 #include <linux/usb/pd_vdo.h>
30 #include <linux/usb/role.h>
31 #include <linux/usb/tcpm.h>
32 #include <linux/usb/typec_altmode.h>
33
34 #include <uapi/linux/sched/types.h>
35
36 #define FOREACH_STATE(S) \
37 S(INVALID_STATE), \
38 S(TOGGLING), \
39 S(CHECK_CONTAMINANT), \
40 S(SRC_UNATTACHED), \
41 S(SRC_ATTACH_WAIT), \
42 S(SRC_ATTACHED), \
43 S(SRC_STARTUP), \
44 S(SRC_SEND_CAPABILITIES), \
45 S(SRC_SEND_CAPABILITIES_TIMEOUT), \
46 S(SRC_NEGOTIATE_CAPABILITIES), \
47 S(SRC_TRANSITION_SUPPLY), \
48 S(SRC_READY), \
49 S(SRC_WAIT_NEW_CAPABILITIES), \
50 \
51 S(SNK_UNATTACHED), \
52 S(SNK_ATTACH_WAIT), \
53 S(SNK_DEBOUNCED), \
54 S(SNK_ATTACHED), \
55 S(SNK_STARTUP), \
56 S(SNK_DISCOVERY), \
57 S(SNK_DISCOVERY_DEBOUNCE), \
58 S(SNK_DISCOVERY_DEBOUNCE_DONE), \
59 S(SNK_WAIT_CAPABILITIES), \
60 S(SNK_NEGOTIATE_CAPABILITIES), \
61 S(SNK_NEGOTIATE_PPS_CAPABILITIES), \
62 S(SNK_TRANSITION_SINK), \
63 S(SNK_TRANSITION_SINK_VBUS), \
64 S(SNK_READY), \
65 \
66 S(ACC_UNATTACHED), \
67 S(DEBUG_ACC_ATTACHED), \
68 S(AUDIO_ACC_ATTACHED), \
69 S(AUDIO_ACC_DEBOUNCE), \
70 \
71 S(HARD_RESET_SEND), \
72 S(HARD_RESET_START), \
73 S(SRC_HARD_RESET_VBUS_OFF), \
74 S(SRC_HARD_RESET_VBUS_ON), \
75 S(SNK_HARD_RESET_SINK_OFF), \
76 S(SNK_HARD_RESET_WAIT_VBUS), \
77 S(SNK_HARD_RESET_SINK_ON), \
78 \
79 S(SOFT_RESET), \
80 S(SRC_SOFT_RESET_WAIT_SNK_TX), \
81 S(SNK_SOFT_RESET), \
82 S(SOFT_RESET_SEND), \
83 \
84 S(DR_SWAP_ACCEPT), \
85 S(DR_SWAP_SEND), \
86 S(DR_SWAP_SEND_TIMEOUT), \
87 S(DR_SWAP_CANCEL), \
88 S(DR_SWAP_CHANGE_DR), \
89 \
90 S(PR_SWAP_ACCEPT), \
91 S(PR_SWAP_SEND), \
92 S(PR_SWAP_SEND_TIMEOUT), \
93 S(PR_SWAP_CANCEL), \
94 S(PR_SWAP_START), \
95 S(PR_SWAP_SRC_SNK_TRANSITION_OFF), \
96 S(PR_SWAP_SRC_SNK_SOURCE_OFF), \
97 S(PR_SWAP_SRC_SNK_SOURCE_OFF_CC_DEBOUNCED), \
98 S(PR_SWAP_SRC_SNK_SINK_ON), \
99 S(PR_SWAP_SNK_SRC_SINK_OFF), \
100 S(PR_SWAP_SNK_SRC_SOURCE_ON), \
101 S(PR_SWAP_SNK_SRC_SOURCE_ON_VBUS_RAMPED_UP), \
102 \
103 S(VCONN_SWAP_ACCEPT), \
104 S(VCONN_SWAP_SEND), \
105 S(VCONN_SWAP_SEND_TIMEOUT), \
106 S(VCONN_SWAP_CANCEL), \
107 S(VCONN_SWAP_START), \
108 S(VCONN_SWAP_WAIT_FOR_VCONN), \
109 S(VCONN_SWAP_TURN_ON_VCONN), \
110 S(VCONN_SWAP_TURN_OFF_VCONN), \
111 \
112 S(FR_SWAP_SEND), \
113 S(FR_SWAP_SEND_TIMEOUT), \
114 S(FR_SWAP_SNK_SRC_TRANSITION_TO_OFF), \
115 S(FR_SWAP_SNK_SRC_NEW_SINK_READY), \
116 S(FR_SWAP_SNK_SRC_SOURCE_VBUS_APPLIED), \
117 S(FR_SWAP_CANCEL), \
118 \
119 S(SNK_TRY), \
120 S(SNK_TRY_WAIT), \
121 S(SNK_TRY_WAIT_DEBOUNCE), \
122 S(SNK_TRY_WAIT_DEBOUNCE_CHECK_VBUS), \
123 S(SRC_TRYWAIT), \
124 S(SRC_TRYWAIT_DEBOUNCE), \
125 S(SRC_TRYWAIT_UNATTACHED), \
126 \
127 S(SRC_TRY), \
128 S(SRC_TRY_WAIT), \
129 S(SRC_TRY_DEBOUNCE), \
130 S(SNK_TRYWAIT), \
131 S(SNK_TRYWAIT_DEBOUNCE), \
132 S(SNK_TRYWAIT_VBUS), \
133 S(BIST_RX), \
134 \
135 S(GET_STATUS_SEND), \
136 S(GET_STATUS_SEND_TIMEOUT), \
137 S(GET_PPS_STATUS_SEND), \
138 S(GET_PPS_STATUS_SEND_TIMEOUT), \
139 \
140 S(GET_SINK_CAP), \
141 S(GET_SINK_CAP_TIMEOUT), \
142 \
143 S(ERROR_RECOVERY), \
144 S(PORT_RESET), \
145 S(PORT_RESET_WAIT_OFF), \
146 \
147 S(AMS_START), \
148 S(CHUNK_NOT_SUPP)
149
150 #define FOREACH_AMS(S) \
151 S(NONE_AMS), \
152 S(POWER_NEGOTIATION), \
153 S(GOTOMIN), \
154 S(SOFT_RESET_AMS), \
155 S(HARD_RESET), \
156 S(CABLE_RESET), \
157 S(GET_SOURCE_CAPABILITIES), \
158 S(GET_SINK_CAPABILITIES), \
159 S(POWER_ROLE_SWAP), \
160 S(FAST_ROLE_SWAP), \
161 S(DATA_ROLE_SWAP), \
162 S(VCONN_SWAP), \
163 S(SOURCE_ALERT), \
164 S(GETTING_SOURCE_EXTENDED_CAPABILITIES),\
165 S(GETTING_SOURCE_SINK_STATUS), \
166 S(GETTING_BATTERY_CAPABILITIES), \
167 S(GETTING_BATTERY_STATUS), \
168 S(GETTING_MANUFACTURER_INFORMATION), \
169 S(SECURITY), \
170 S(FIRMWARE_UPDATE), \
171 S(DISCOVER_IDENTITY), \
172 S(SOURCE_STARTUP_CABLE_PLUG_DISCOVER_IDENTITY), \
173 S(DISCOVER_SVIDS), \
174 S(DISCOVER_MODES), \
175 S(DFP_TO_UFP_ENTER_MODE), \
176 S(DFP_TO_UFP_EXIT_MODE), \
177 S(DFP_TO_CABLE_PLUG_ENTER_MODE), \
178 S(DFP_TO_CABLE_PLUG_EXIT_MODE), \
179 S(ATTENTION), \
180 S(BIST), \
181 S(UNSTRUCTURED_VDMS), \
182 S(STRUCTURED_VDMS), \
183 S(COUNTRY_INFO), \
184 S(COUNTRY_CODES)
185
186 #define GENERATE_ENUM(e) e
187 #define GENERATE_STRING(s) #s
188
189 enum tcpm_state {
190 FOREACH_STATE(GENERATE_ENUM)
191 };
192
193 static const char * const tcpm_states[] = {
194 FOREACH_STATE(GENERATE_STRING)
195 };
196
197 enum tcpm_ams {
198 FOREACH_AMS(GENERATE_ENUM)
199 };
200
201 static const char * const tcpm_ams_str[] = {
202 FOREACH_AMS(GENERATE_STRING)
203 };
204
205 enum vdm_states {
206 VDM_STATE_ERR_BUSY = -3,
207 VDM_STATE_ERR_SEND = -2,
208 VDM_STATE_ERR_TMOUT = -1,
209 VDM_STATE_DONE = 0,
210 /* Anything >0 represents an active state */
211 VDM_STATE_READY = 1,
212 VDM_STATE_BUSY = 2,
213 VDM_STATE_WAIT_RSP_BUSY = 3,
214 VDM_STATE_SEND_MESSAGE = 4,
215 };
216
217 enum pd_msg_request {
218 PD_MSG_NONE = 0,
219 PD_MSG_CTRL_REJECT,
220 PD_MSG_CTRL_WAIT,
221 PD_MSG_CTRL_NOT_SUPP,
222 PD_MSG_DATA_SINK_CAP,
223 PD_MSG_DATA_SOURCE_CAP,
224 };
225
226 enum adev_actions {
227 ADEV_NONE = 0,
228 ADEV_NOTIFY_USB_AND_QUEUE_VDM,
229 ADEV_QUEUE_VDM,
230 ADEV_QUEUE_VDM_SEND_EXIT_MODE_ON_FAIL,
231 ADEV_ATTENTION,
232 };
233
234 /*
235 * Initial current capability of the new source when vSafe5V is applied during PD3.0 Fast Role Swap.
236 * Based on "Table 6-14 Fixed Supply PDO - Sink" of "USB Power Delivery Specification Revision 3.0,
237 * Version 1.2"
238 */
239 enum frs_typec_current {
240 FRS_NOT_SUPPORTED,
241 FRS_DEFAULT_POWER,
242 FRS_5V_1P5A,
243 FRS_5V_3A,
244 };
245
246 /* Events from low level driver */
247
248 #define TCPM_CC_EVENT BIT(0)
249 #define TCPM_VBUS_EVENT BIT(1)
250 #define TCPM_RESET_EVENT BIT(2)
251 #define TCPM_FRS_EVENT BIT(3)
252 #define TCPM_SOURCING_VBUS BIT(4)
253 #define TCPM_PORT_CLEAN BIT(5)
254 #define TCPM_PORT_ERROR BIT(6)
255
256 #define LOG_BUFFER_ENTRIES 1024
257 #define LOG_BUFFER_ENTRY_SIZE 128
258
259 /* Alternate mode support */
260
261 #define SVID_DISCOVERY_MAX 16
262 #define ALTMODE_DISCOVERY_MAX (SVID_DISCOVERY_MAX * MODE_DISCOVERY_MAX)
263
264 #define GET_SINK_CAP_RETRY_MS 100
265 #define SEND_DISCOVER_RETRY_MS 100
266
267 struct pd_mode_data {
268 int svid_index; /* current SVID index */
269 int nsvids;
270 u16 svids[SVID_DISCOVERY_MAX];
271 int altmodes; /* number of alternate modes */
272 struct typec_altmode_desc altmode_desc[ALTMODE_DISCOVERY_MAX];
273 };
274
275 /*
276 * @min_volt: Actual min voltage at the local port
277 * @req_min_volt: Requested min voltage to the port partner
278 * @max_volt: Actual max voltage at the local port
279 * @req_max_volt: Requested max voltage to the port partner
280 * @max_curr: Actual max current at the local port
281 * @req_max_curr: Requested max current of the port partner
282 * @req_out_volt: Requested output voltage to the port partner
283 * @req_op_curr: Requested operating current to the port partner
284 * @supported: Parter has at least one APDO hence supports PPS
285 * @active: PPS mode is active
286 */
287 struct pd_pps_data {
288 u32 min_volt;
289 u32 req_min_volt;
290 u32 max_volt;
291 u32 req_max_volt;
292 u32 max_curr;
293 u32 req_max_curr;
294 u32 req_out_volt;
295 u32 req_op_curr;
296 bool supported;
297 bool active;
298 };
299
300 struct pd_data {
301 struct usb_power_delivery *pd;
302 struct usb_power_delivery_capabilities *source_cap;
303 struct usb_power_delivery_capabilities_desc source_desc;
304 struct usb_power_delivery_capabilities *sink_cap;
305 struct usb_power_delivery_capabilities_desc sink_desc;
306 unsigned int operating_snk_mw;
307 };
308
309 struct tcpm_port {
310 struct device *dev;
311
312 struct mutex lock; /* tcpm state machine lock */
313 struct kthread_worker *wq;
314
315 struct typec_capability typec_caps;
316 struct typec_port *typec_port;
317
318 struct tcpc_dev *tcpc;
319 struct usb_role_switch *role_sw;
320
321 enum typec_role vconn_role;
322 enum typec_role pwr_role;
323 enum typec_data_role data_role;
324 enum typec_pwr_opmode pwr_opmode;
325
326 struct usb_pd_identity partner_ident;
327 struct typec_partner_desc partner_desc;
328 struct typec_partner *partner;
329
330 enum typec_cc_status cc_req;
331 enum typec_cc_status src_rp; /* work only if pd_supported == false */
332
333 enum typec_cc_status cc1;
334 enum typec_cc_status cc2;
335 enum typec_cc_polarity polarity;
336
337 bool attached;
338 bool connected;
339 bool registered;
340 bool pd_supported;
341 enum typec_port_type port_type;
342
343 /*
344 * Set to true when vbus is greater than VSAFE5V min.
345 * Set to false when vbus falls below vSinkDisconnect max threshold.
346 */
347 bool vbus_present;
348
349 /*
350 * Set to true when vbus is less than VSAFE0V max.
351 * Set to false when vbus is greater than VSAFE0V max.
352 */
353 bool vbus_vsafe0v;
354
355 bool vbus_never_low;
356 bool vbus_source;
357 bool vbus_charge;
358
359 /* Set to true when Discover_Identity Command is expected to be sent in Ready states. */
360 bool send_discover;
361 bool op_vsafe5v;
362
363 int try_role;
364 int try_snk_count;
365 int try_src_count;
366
367 enum pd_msg_request queued_message;
368
369 enum tcpm_state enter_state;
370 enum tcpm_state prev_state;
371 enum tcpm_state state;
372 enum tcpm_state delayed_state;
373 ktime_t delayed_runtime;
374 unsigned long delay_ms;
375
376 spinlock_t pd_event_lock;
377 u32 pd_events;
378
379 struct kthread_work event_work;
380 struct hrtimer state_machine_timer;
381 struct kthread_work state_machine;
382 struct hrtimer vdm_state_machine_timer;
383 struct kthread_work vdm_state_machine;
384 struct hrtimer enable_frs_timer;
385 struct kthread_work enable_frs;
386 struct hrtimer send_discover_timer;
387 struct kthread_work send_discover_work;
388 bool state_machine_running;
389 /* Set to true when VDM State Machine has following actions. */
390 bool vdm_sm_running;
391
392 struct completion tx_complete;
393 enum tcpm_transmit_status tx_status;
394
395 struct mutex swap_lock; /* swap command lock */
396 bool swap_pending;
397 bool non_pd_role_swap;
398 struct completion swap_complete;
399 int swap_status;
400
401 unsigned int negotiated_rev;
402 unsigned int message_id;
403 unsigned int caps_count;
404 unsigned int hard_reset_count;
405 bool pd_capable;
406 bool explicit_contract;
407 unsigned int rx_msgid;
408
409 /* USB PD objects */
410 struct usb_power_delivery **pds;
411 struct pd_data **pd_list;
412 struct usb_power_delivery_capabilities *port_source_caps;
413 struct usb_power_delivery_capabilities *port_sink_caps;
414 struct usb_power_delivery *partner_pd;
415 struct usb_power_delivery_capabilities *partner_source_caps;
416 struct usb_power_delivery_capabilities *partner_sink_caps;
417 struct usb_power_delivery *selected_pd;
418
419 /* Partner capabilities/requests */
420 u32 sink_request;
421 u32 source_caps[PDO_MAX_OBJECTS];
422 unsigned int nr_source_caps;
423 u32 sink_caps[PDO_MAX_OBJECTS];
424 unsigned int nr_sink_caps;
425
426 /* Local capabilities */
427 unsigned int pd_count;
428 u32 src_pdo[PDO_MAX_OBJECTS];
429 unsigned int nr_src_pdo;
430 u32 snk_pdo[PDO_MAX_OBJECTS];
431 unsigned int nr_snk_pdo;
432 u32 snk_vdo_v1[VDO_MAX_OBJECTS];
433 unsigned int nr_snk_vdo_v1;
434 u32 snk_vdo[VDO_MAX_OBJECTS];
435 unsigned int nr_snk_vdo;
436
437 unsigned int operating_snk_mw;
438 bool update_sink_caps;
439
440 /* Requested current / voltage to the port partner */
441 u32 req_current_limit;
442 u32 req_supply_voltage;
443 /* Actual current / voltage limit of the local port */
444 u32 current_limit;
445 u32 supply_voltage;
446
447 /* Used to export TA voltage and current */
448 struct power_supply *psy;
449 struct power_supply_desc psy_desc;
450 enum power_supply_usb_type usb_type;
451
452 u32 bist_request;
453
454 /* PD state for Vendor Defined Messages */
455 enum vdm_states vdm_state;
456 u32 vdm_retries;
457 /* next Vendor Defined Message to send */
458 u32 vdo_data[VDO_MAX_SIZE];
459 u8 vdo_count;
460 /* VDO to retry if UFP responder replied busy */
461 u32 vdo_retry;
462
463 /* PPS */
464 struct pd_pps_data pps_data;
465 struct completion pps_complete;
466 bool pps_pending;
467 int pps_status;
468
469 /* Alternate mode data */
470 struct pd_mode_data mode_data;
471 struct typec_altmode *partner_altmode[ALTMODE_DISCOVERY_MAX];
472 struct typec_altmode *port_altmode[ALTMODE_DISCOVERY_MAX];
473
474 /* Deadline in jiffies to exit src_try_wait state */
475 unsigned long max_wait;
476
477 /* port belongs to a self powered device */
478 bool self_powered;
479
480 /* Sink FRS */
481 enum frs_typec_current new_source_frs_current;
482
483 /* Sink caps have been queried */
484 bool sink_cap_done;
485
486 /* Collision Avoidance and Atomic Message Sequence */
487 enum tcpm_state upcoming_state;
488 enum tcpm_ams ams;
489 enum tcpm_ams next_ams;
490 bool in_ams;
491
492 /* Auto vbus discharge status */
493 bool auto_vbus_discharge_enabled;
494
495 /*
496 * When set, port requests PD_P_SNK_STDBY_MW upon entering SNK_DISCOVERY and
497 * the actual current limit after RX of PD_CTRL_PSRDY for PD link,
498 * SNK_READY for non-pd link.
499 */
500 bool slow_charger_loop;
501
502 /*
503 * When true indicates that the lower level drivers indicate potential presence
504 * of contaminant in the connector pins based on the tcpm state machine
505 * transitions.
506 */
507 bool potential_contaminant;
508 #ifdef CONFIG_DEBUG_FS
509 struct dentry *dentry;
510 struct mutex logbuffer_lock; /* log buffer access lock */
511 int logbuffer_head;
512 int logbuffer_tail;
513 u8 *logbuffer[LOG_BUFFER_ENTRIES];
514 #endif
515 };
516
517 struct pd_rx_event {
518 struct kthread_work work;
519 struct tcpm_port *port;
520 struct pd_message msg;
521 };
522
523 static const char * const pd_rev[] = {
524 [PD_REV10] = "rev1",
525 [PD_REV20] = "rev2",
526 [PD_REV30] = "rev3",
527 };
528
529 #define tcpm_cc_is_sink(cc) \
530 ((cc) == TYPEC_CC_RP_DEF || (cc) == TYPEC_CC_RP_1_5 || \
531 (cc) == TYPEC_CC_RP_3_0)
532
533 /* As long as cc is pulled up, we can consider it as sink. */
534 #define tcpm_port_is_sink(port) \
535 (tcpm_cc_is_sink((port)->cc1) || tcpm_cc_is_sink((port)->cc2))
536
537 #define tcpm_cc_is_source(cc) ((cc) == TYPEC_CC_RD)
538 #define tcpm_cc_is_audio(cc) ((cc) == TYPEC_CC_RA)
539 #define tcpm_cc_is_open(cc) ((cc) == TYPEC_CC_OPEN)
540
541 #define tcpm_port_is_source(port) \
542 ((tcpm_cc_is_source((port)->cc1) && \
543 !tcpm_cc_is_source((port)->cc2)) || \
544 (tcpm_cc_is_source((port)->cc2) && \
545 !tcpm_cc_is_source((port)->cc1)))
546
547 #define tcpm_port_is_debug(port) \
548 (tcpm_cc_is_source((port)->cc1) && tcpm_cc_is_source((port)->cc2))
549
550 #define tcpm_port_is_audio(port) \
551 (tcpm_cc_is_audio((port)->cc1) && tcpm_cc_is_audio((port)->cc2))
552
553 #define tcpm_port_is_audio_detached(port) \
554 ((tcpm_cc_is_audio((port)->cc1) && tcpm_cc_is_open((port)->cc2)) || \
555 (tcpm_cc_is_audio((port)->cc2) && tcpm_cc_is_open((port)->cc1)))
556
557 #define tcpm_try_snk(port) \
558 ((port)->try_snk_count == 0 && (port)->try_role == TYPEC_SINK && \
559 (port)->port_type == TYPEC_PORT_DRP)
560
561 #define tcpm_try_src(port) \
562 ((port)->try_src_count == 0 && (port)->try_role == TYPEC_SOURCE && \
563 (port)->port_type == TYPEC_PORT_DRP)
564
565 #define tcpm_data_role_for_source(port) \
566 ((port)->typec_caps.data == TYPEC_PORT_UFP ? \
567 TYPEC_DEVICE : TYPEC_HOST)
568
569 #define tcpm_data_role_for_sink(port) \
570 ((port)->typec_caps.data == TYPEC_PORT_DFP ? \
571 TYPEC_HOST : TYPEC_DEVICE)
572
573 #define tcpm_sink_tx_ok(port) \
574 (tcpm_port_is_sink(port) && \
575 ((port)->cc1 == TYPEC_CC_RP_3_0 || (port)->cc2 == TYPEC_CC_RP_3_0))
576
577 #define tcpm_wait_for_discharge(port) \
578 (((port)->auto_vbus_discharge_enabled && !(port)->vbus_vsafe0v) ? PD_T_SAFE_0V : 0)
579
580 static enum tcpm_state tcpm_default_state(struct tcpm_port *port)
581 {
582 if (port->port_type == TYPEC_PORT_DRP) {
583 if (port->try_role == TYPEC_SINK)
584 return SNK_UNATTACHED;
585 else if (port->try_role == TYPEC_SOURCE)
586 return SRC_UNATTACHED;
587 /* Fall through to return SRC_UNATTACHED */
588 } else if (port->port_type == TYPEC_PORT_SNK) {
589 return SNK_UNATTACHED;
590 }
591 return SRC_UNATTACHED;
592 }
593
594 static bool tcpm_port_is_disconnected(struct tcpm_port *port)
595 {
596 return (!port->attached && port->cc1 == TYPEC_CC_OPEN &&
597 port->cc2 == TYPEC_CC_OPEN) ||
598 (port->attached && ((port->polarity == TYPEC_POLARITY_CC1 &&
599 port->cc1 == TYPEC_CC_OPEN) ||
600 (port->polarity == TYPEC_POLARITY_CC2 &&
601 port->cc2 == TYPEC_CC_OPEN)));
602 }
603
604 /*
605 * Logging
606 */
607
608 #ifdef CONFIG_DEBUG_FS
609
610 static bool tcpm_log_full(struct tcpm_port *port)
611 {
612 return port->logbuffer_tail ==
613 (port->logbuffer_head + 1) % LOG_BUFFER_ENTRIES;
614 }
615
616 __printf(2, 0)
617 static void _tcpm_log(struct tcpm_port *port, const char *fmt, va_list args)
618 {
619 char tmpbuffer[LOG_BUFFER_ENTRY_SIZE];
620 u64 ts_nsec = local_clock();
621 unsigned long rem_nsec;
622
623 mutex_lock(&port->logbuffer_lock);
624 if (!port->logbuffer[port->logbuffer_head]) {
625 port->logbuffer[port->logbuffer_head] =
626 kzalloc(LOG_BUFFER_ENTRY_SIZE, GFP_KERNEL);
627 if (!port->logbuffer[port->logbuffer_head]) {
628 mutex_unlock(&port->logbuffer_lock);
629 return;
630 }
631 }
632
633 vsnprintf(tmpbuffer, sizeof(tmpbuffer), fmt, args);
634
635 if (tcpm_log_full(port)) {
636 port->logbuffer_head = max(port->logbuffer_head - 1, 0);
637 strcpy(tmpbuffer, "overflow");
638 }
639
640 if (port->logbuffer_head < 0 ||
641 port->logbuffer_head >= LOG_BUFFER_ENTRIES) {
642 dev_warn(port->dev,
643 "Bad log buffer index %d\n", port->logbuffer_head);
644 goto abort;
645 }
646
647 if (!port->logbuffer[port->logbuffer_head]) {
648 dev_warn(port->dev,
649 "Log buffer index %d is NULL\n", port->logbuffer_head);
650 goto abort;
651 }
652
653 rem_nsec = do_div(ts_nsec, 1000000000);
654 scnprintf(port->logbuffer[port->logbuffer_head],
655 LOG_BUFFER_ENTRY_SIZE, "[%5lu.%06lu] %s",
656 (unsigned long)ts_nsec, rem_nsec / 1000,
657 tmpbuffer);
658 port->logbuffer_head = (port->logbuffer_head + 1) % LOG_BUFFER_ENTRIES;
659
660 abort:
661 mutex_unlock(&port->logbuffer_lock);
662 }
663
664 __printf(2, 3)
665 static void tcpm_log(struct tcpm_port *port, const char *fmt, ...)
666 {
667 va_list args;
668
669 /* Do not log while disconnected and unattached */
670 if (tcpm_port_is_disconnected(port) &&
671 (port->state == SRC_UNATTACHED || port->state == SNK_UNATTACHED ||
672 port->state == TOGGLING || port->state == CHECK_CONTAMINANT))
673 return;
674
675 va_start(args, fmt);
676 _tcpm_log(port, fmt, args);
677 va_end(args);
678 }
679
680 __printf(2, 3)
681 static void tcpm_log_force(struct tcpm_port *port, const char *fmt, ...)
682 {
683 va_list args;
684
685 va_start(args, fmt);
686 _tcpm_log(port, fmt, args);
687 va_end(args);
688 }
689
690 static void tcpm_log_source_caps(struct tcpm_port *port)
691 {
692 int i;
693
694 for (i = 0; i < port->nr_source_caps; i++) {
695 u32 pdo = port->source_caps[i];
696 enum pd_pdo_type type = pdo_type(pdo);
697 char msg[64];
698
699 switch (type) {
700 case PDO_TYPE_FIXED:
701 scnprintf(msg, sizeof(msg),
702 "%u mV, %u mA [%s%s%s%s%s%s]",
703 pdo_fixed_voltage(pdo),
704 pdo_max_current(pdo),
705 (pdo & PDO_FIXED_DUAL_ROLE) ?
706 "R" : "",
707 (pdo & PDO_FIXED_SUSPEND) ?
708 "S" : "",
709 (pdo & PDO_FIXED_HIGHER_CAP) ?
710 "H" : "",
711 (pdo & PDO_FIXED_USB_COMM) ?
712 "U" : "",
713 (pdo & PDO_FIXED_DATA_SWAP) ?
714 "D" : "",
715 (pdo & PDO_FIXED_EXTPOWER) ?
716 "E" : "");
717 break;
718 case PDO_TYPE_VAR:
719 scnprintf(msg, sizeof(msg),
720 "%u-%u mV, %u mA",
721 pdo_min_voltage(pdo),
722 pdo_max_voltage(pdo),
723 pdo_max_current(pdo));
724 break;
725 case PDO_TYPE_BATT:
726 scnprintf(msg, sizeof(msg),
727 "%u-%u mV, %u mW",
728 pdo_min_voltage(pdo),
729 pdo_max_voltage(pdo),
730 pdo_max_power(pdo));
731 break;
732 case PDO_TYPE_APDO:
733 if (pdo_apdo_type(pdo) == APDO_TYPE_PPS)
734 scnprintf(msg, sizeof(msg),
735 "%u-%u mV, %u mA",
736 pdo_pps_apdo_min_voltage(pdo),
737 pdo_pps_apdo_max_voltage(pdo),
738 pdo_pps_apdo_max_current(pdo));
739 else
740 strcpy(msg, "undefined APDO");
741 break;
742 default:
743 strcpy(msg, "undefined");
744 break;
745 }
746 tcpm_log(port, " PDO %d: type %d, %s",
747 i, type, msg);
748 }
749 }
750
751 static int tcpm_debug_show(struct seq_file *s, void *v)
752 {
753 struct tcpm_port *port = s->private;
754 int tail;
755
756 mutex_lock(&port->logbuffer_lock);
757 tail = port->logbuffer_tail;
758 while (tail != port->logbuffer_head) {
759 seq_printf(s, "%s\n", port->logbuffer[tail]);
760 tail = (tail + 1) % LOG_BUFFER_ENTRIES;
761 }
762 if (!seq_has_overflowed(s))
763 port->logbuffer_tail = tail;
764 mutex_unlock(&port->logbuffer_lock);
765
766 return 0;
767 }
768 DEFINE_SHOW_ATTRIBUTE(tcpm_debug);
769
770 static void tcpm_debugfs_init(struct tcpm_port *port)
771 {
772 char name[NAME_MAX];
773
774 mutex_init(&port->logbuffer_lock);
775 snprintf(name, NAME_MAX, "tcpm-%s", dev_name(port->dev));
776 port->dentry = debugfs_create_dir(name, usb_debug_root);
777 debugfs_create_file("log", S_IFREG | 0444, port->dentry, port,
778 &tcpm_debug_fops);
779 }
780
781 static void tcpm_debugfs_exit(struct tcpm_port *port)
782 {
783 int i;
784
785 mutex_lock(&port->logbuffer_lock);
786 for (i = 0; i < LOG_BUFFER_ENTRIES; i++) {
787 kfree(port->logbuffer[i]);
788 port->logbuffer[i] = NULL;
789 }
790 mutex_unlock(&port->logbuffer_lock);
791
792 debugfs_remove(port->dentry);
793 }
794
795 #else
796
797 __printf(2, 3)
798 static void tcpm_log(const struct tcpm_port *port, const char *fmt, ...) { }
799 __printf(2, 3)
800 static void tcpm_log_force(struct tcpm_port *port, const char *fmt, ...) { }
801 static void tcpm_log_source_caps(struct tcpm_port *port) { }
802 static void tcpm_debugfs_init(const struct tcpm_port *port) { }
803 static void tcpm_debugfs_exit(const struct tcpm_port *port) { }
804
805 #endif
806
807 static void tcpm_set_cc(struct tcpm_port *port, enum typec_cc_status cc)
808 {
809 tcpm_log(port, "cc:=%d", cc);
810 port->cc_req = cc;
811 port->tcpc->set_cc(port->tcpc, cc);
812 }
813
814 static int tcpm_enable_auto_vbus_discharge(struct tcpm_port *port, bool enable)
815 {
816 int ret = 0;
817
818 if (port->tcpc->enable_auto_vbus_discharge) {
819 ret = port->tcpc->enable_auto_vbus_discharge(port->tcpc, enable);
820 tcpm_log_force(port, "%s vbus discharge ret:%d", enable ? "enable" : "disable",
821 ret);
822 if (!ret)
823 port->auto_vbus_discharge_enabled = enable;
824 }
825
826 return ret;
827 }
828
829 static void tcpm_apply_rc(struct tcpm_port *port)
830 {
831 /*
832 * TCPCI: Move to APPLY_RC state to prevent disconnect during PR_SWAP
833 * when Vbus auto discharge on disconnect is enabled.
834 */
835 if (port->tcpc->enable_auto_vbus_discharge && port->tcpc->apply_rc) {
836 tcpm_log(port, "Apply_RC");
837 port->tcpc->apply_rc(port->tcpc, port->cc_req, port->polarity);
838 tcpm_enable_auto_vbus_discharge(port, false);
839 }
840 }
841
842 /*
843 * Determine RP value to set based on maximum current supported
844 * by a port if configured as source.
845 * Returns CC value to report to link partner.
846 */
847 static enum typec_cc_status tcpm_rp_cc(struct tcpm_port *port)
848 {
849 const u32 *src_pdo = port->src_pdo;
850 int nr_pdo = port->nr_src_pdo;
851 int i;
852
853 if (!port->pd_supported)
854 return port->src_rp;
855
856 /*
857 * Search for first entry with matching voltage.
858 * It should report the maximum supported current.
859 */
860 for (i = 0; i < nr_pdo; i++) {
861 const u32 pdo = src_pdo[i];
862
863 if (pdo_type(pdo) == PDO_TYPE_FIXED &&
864 pdo_fixed_voltage(pdo) == 5000) {
865 unsigned int curr = pdo_max_current(pdo);
866
867 if (curr >= 3000)
868 return TYPEC_CC_RP_3_0;
869 else if (curr >= 1500)
870 return TYPEC_CC_RP_1_5;
871 return TYPEC_CC_RP_DEF;
872 }
873 }
874
875 return TYPEC_CC_RP_DEF;
876 }
877
878 static void tcpm_ams_finish(struct tcpm_port *port)
879 {
880 tcpm_log(port, "AMS %s finished", tcpm_ams_str[port->ams]);
881
882 if (port->pd_capable && port->pwr_role == TYPEC_SOURCE) {
883 if (port->negotiated_rev >= PD_REV30)
884 tcpm_set_cc(port, SINK_TX_OK);
885 else
886 tcpm_set_cc(port, SINK_TX_NG);
887 } else if (port->pwr_role == TYPEC_SOURCE) {
888 tcpm_set_cc(port, tcpm_rp_cc(port));
889 }
890
891 port->in_ams = false;
892 port->ams = NONE_AMS;
893 }
894
895 static int tcpm_pd_transmit(struct tcpm_port *port,
896 enum tcpm_transmit_type type,
897 const struct pd_message *msg)
898 {
899 unsigned long timeout;
900 int ret;
901
902 if (msg)
903 tcpm_log(port, "PD TX, header: %#x", le16_to_cpu(msg->header));
904 else
905 tcpm_log(port, "PD TX, type: %#x", type);
906
907 reinit_completion(&port->tx_complete);
908 ret = port->tcpc->pd_transmit(port->tcpc, type, msg, port->negotiated_rev);
909 if (ret < 0)
910 return ret;
911
912 mutex_unlock(&port->lock);
913 timeout = wait_for_completion_timeout(&port->tx_complete,
914 msecs_to_jiffies(PD_T_TCPC_TX_TIMEOUT));
915 mutex_lock(&port->lock);
916 if (!timeout)
917 return -ETIMEDOUT;
918
919 switch (port->tx_status) {
920 case TCPC_TX_SUCCESS:
921 port->message_id = (port->message_id + 1) & PD_HEADER_ID_MASK;
922 /*
923 * USB PD rev 2.0, 8.3.2.2.1:
924 * USB PD rev 3.0, 8.3.2.1.3:
925 * "... Note that every AMS is Interruptible until the first
926 * Message in the sequence has been successfully sent (GoodCRC
927 * Message received)."
928 */
929 if (port->ams != NONE_AMS)
930 port->in_ams = true;
931 break;
932 case TCPC_TX_DISCARDED:
933 ret = -EAGAIN;
934 break;
935 case TCPC_TX_FAILED:
936 default:
937 ret = -EIO;
938 break;
939 }
940
941 /* Some AMS don't expect responses. Finish them here. */
942 if (port->ams == ATTENTION || port->ams == SOURCE_ALERT)
943 tcpm_ams_finish(port);
944
945 return ret;
946 }
947
948 void tcpm_pd_transmit_complete(struct tcpm_port *port,
949 enum tcpm_transmit_status status)
950 {
951 tcpm_log(port, "PD TX complete, status: %u", status);
952 port->tx_status = status;
953 complete(&port->tx_complete);
954 }
955 EXPORT_SYMBOL_GPL(tcpm_pd_transmit_complete);
956
957 static int tcpm_mux_set(struct tcpm_port *port, int state,
958 enum usb_role usb_role,
959 enum typec_orientation orientation)
960 {
961 int ret;
962
963 tcpm_log(port, "Requesting mux state %d, usb-role %d, orientation %d",
964 state, usb_role, orientation);
965
966 ret = typec_set_orientation(port->typec_port, orientation);
967 if (ret)
968 return ret;
969
970 if (port->role_sw) {
971 ret = usb_role_switch_set_role(port->role_sw, usb_role);
972 if (ret)
973 return ret;
974 }
975
976 return typec_set_mode(port->typec_port, state);
977 }
978
979 static int tcpm_set_polarity(struct tcpm_port *port,
980 enum typec_cc_polarity polarity)
981 {
982 int ret;
983
984 tcpm_log(port, "polarity %d", polarity);
985
986 ret = port->tcpc->set_polarity(port->tcpc, polarity);
987 if (ret < 0)
988 return ret;
989
990 port->polarity = polarity;
991
992 return 0;
993 }
994
995 static int tcpm_set_vconn(struct tcpm_port *port, bool enable)
996 {
997 int ret;
998
999 tcpm_log(port, "vconn:=%d", enable);
1000
1001 ret = port->tcpc->set_vconn(port->tcpc, enable);
1002 if (!ret) {
1003 port->vconn_role = enable ? TYPEC_SOURCE : TYPEC_SINK;
1004 typec_set_vconn_role(port->typec_port, port->vconn_role);
1005 }
1006
1007 return ret;
1008 }
1009
1010 static u32 tcpm_get_current_limit(struct tcpm_port *port)
1011 {
1012 enum typec_cc_status cc;
1013 u32 limit;
1014
1015 cc = port->polarity ? port->cc2 : port->cc1;
1016 switch (cc) {
1017 case TYPEC_CC_RP_1_5:
1018 limit = 1500;
1019 break;
1020 case TYPEC_CC_RP_3_0:
1021 limit = 3000;
1022 break;
1023 case TYPEC_CC_RP_DEF:
1024 default:
1025 if (port->tcpc->get_current_limit)
1026 limit = port->tcpc->get_current_limit(port->tcpc);
1027 else
1028 limit = 0;
1029 break;
1030 }
1031
1032 return limit;
1033 }
1034
1035 static int tcpm_set_current_limit(struct tcpm_port *port, u32 max_ma, u32 mv)
1036 {
1037 int ret = -EOPNOTSUPP;
1038
1039 tcpm_log(port, "Setting voltage/current limit %u mV %u mA", mv, max_ma);
1040
1041 port->supply_voltage = mv;
1042 port->current_limit = max_ma;
1043 power_supply_changed(port->psy);
1044
1045 if (port->tcpc->set_current_limit)
1046 ret = port->tcpc->set_current_limit(port->tcpc, max_ma, mv);
1047
1048 return ret;
1049 }
1050
1051 static int tcpm_set_attached_state(struct tcpm_port *port, bool attached)
1052 {
1053 return port->tcpc->set_roles(port->tcpc, attached, port->pwr_role,
1054 port->data_role);
1055 }
1056
1057 static int tcpm_set_roles(struct tcpm_port *port, bool attached,
1058 enum typec_role role, enum typec_data_role data)
1059 {
1060 enum typec_orientation orientation;
1061 enum usb_role usb_role;
1062 int ret;
1063
1064 if (port->polarity == TYPEC_POLARITY_CC1)
1065 orientation = TYPEC_ORIENTATION_NORMAL;
1066 else
1067 orientation = TYPEC_ORIENTATION_REVERSE;
1068
1069 if (port->typec_caps.data == TYPEC_PORT_DRD) {
1070 if (data == TYPEC_HOST)
1071 usb_role = USB_ROLE_HOST;
1072 else
1073 usb_role = USB_ROLE_DEVICE;
1074 } else if (port->typec_caps.data == TYPEC_PORT_DFP) {
1075 if (data == TYPEC_HOST) {
1076 if (role == TYPEC_SOURCE)
1077 usb_role = USB_ROLE_HOST;
1078 else
1079 usb_role = USB_ROLE_NONE;
1080 } else {
1081 return -ENOTSUPP;
1082 }
1083 } else {
1084 if (data == TYPEC_DEVICE) {
1085 if (role == TYPEC_SINK)
1086 usb_role = USB_ROLE_DEVICE;
1087 else
1088 usb_role = USB_ROLE_NONE;
1089 } else {
1090 return -ENOTSUPP;
1091 }
1092 }
1093
1094 ret = tcpm_mux_set(port, TYPEC_STATE_USB, usb_role, orientation);
1095 if (ret < 0)
1096 return ret;
1097
1098 ret = port->tcpc->set_roles(port->tcpc, attached, role, data);
1099 if (ret < 0)
1100 return ret;
1101
1102 port->pwr_role = role;
1103 port->data_role = data;
1104 typec_set_data_role(port->typec_port, data);
1105 typec_set_pwr_role(port->typec_port, role);
1106
1107 return 0;
1108 }
1109
1110 static int tcpm_set_pwr_role(struct tcpm_port *port, enum typec_role role)
1111 {
1112 int ret;
1113
1114 ret = port->tcpc->set_roles(port->tcpc, true, role,
1115 port->data_role);
1116 if (ret < 0)
1117 return ret;
1118
1119 port->pwr_role = role;
1120 typec_set_pwr_role(port->typec_port, role);
1121
1122 return 0;
1123 }
1124
1125 /*
1126 * Transform the PDO to be compliant to PD rev2.0.
1127 * Return 0 if the PDO type is not defined in PD rev2.0.
1128 * Otherwise, return the converted PDO.
1129 */
1130 static u32 tcpm_forge_legacy_pdo(struct tcpm_port *port, u32 pdo, enum typec_role role)
1131 {
1132 switch (pdo_type(pdo)) {
1133 case PDO_TYPE_FIXED:
1134 if (role == TYPEC_SINK)
1135 return pdo & ~PDO_FIXED_FRS_CURR_MASK;
1136 else
1137 return pdo & ~PDO_FIXED_UNCHUNK_EXT;
1138 case PDO_TYPE_VAR:
1139 case PDO_TYPE_BATT:
1140 return pdo;
1141 case PDO_TYPE_APDO:
1142 default:
1143 return 0;
1144 }
1145 }
1146
1147 static int tcpm_pd_send_source_caps(struct tcpm_port *port)
1148 {
1149 struct pd_message msg;
1150 u32 pdo;
1151 unsigned int i, nr_pdo = 0;
1152
1153 memset(&msg, 0, sizeof(msg));
1154
1155 for (i = 0; i < port->nr_src_pdo; i++) {
1156 if (port->negotiated_rev >= PD_REV30) {
1157 msg.payload[nr_pdo++] = cpu_to_le32(port->src_pdo[i]);
1158 } else {
1159 pdo = tcpm_forge_legacy_pdo(port, port->src_pdo[i], TYPEC_SOURCE);
1160 if (pdo)
1161 msg.payload[nr_pdo++] = cpu_to_le32(pdo);
1162 }
1163 }
1164
1165 if (!nr_pdo) {
1166 /* No source capabilities defined, sink only */
1167 msg.header = PD_HEADER_LE(PD_CTRL_REJECT,
1168 port->pwr_role,
1169 port->data_role,
1170 port->negotiated_rev,
1171 port->message_id, 0);
1172 } else {
1173 msg.header = PD_HEADER_LE(PD_DATA_SOURCE_CAP,
1174 port->pwr_role,
1175 port->data_role,
1176 port->negotiated_rev,
1177 port->message_id,
1178 nr_pdo);
1179 }
1180
1181 return tcpm_pd_transmit(port, TCPC_TX_SOP, &msg);
1182 }
1183
1184 static int tcpm_pd_send_sink_caps(struct tcpm_port *port)
1185 {
1186 struct pd_message msg;
1187 u32 pdo;
1188 unsigned int i, nr_pdo = 0;
1189
1190 memset(&msg, 0, sizeof(msg));
1191
1192 for (i = 0; i < port->nr_snk_pdo; i++) {
1193 if (port->negotiated_rev >= PD_REV30) {
1194 msg.payload[nr_pdo++] = cpu_to_le32(port->snk_pdo[i]);
1195 } else {
1196 pdo = tcpm_forge_legacy_pdo(port, port->snk_pdo[i], TYPEC_SINK);
1197 if (pdo)
1198 msg.payload[nr_pdo++] = cpu_to_le32(pdo);
1199 }
1200 }
1201
1202 if (!nr_pdo) {
1203 /* No sink capabilities defined, source only */
1204 msg.header = PD_HEADER_LE(PD_CTRL_REJECT,
1205 port->pwr_role,
1206 port->data_role,
1207 port->negotiated_rev,
1208 port->message_id, 0);
1209 } else {
1210 msg.header = PD_HEADER_LE(PD_DATA_SINK_CAP,
1211 port->pwr_role,
1212 port->data_role,
1213 port->negotiated_rev,
1214 port->message_id,
1215 nr_pdo);
1216 }
1217
1218 return tcpm_pd_transmit(port, TCPC_TX_SOP, &msg);
1219 }
1220
1221 static void mod_tcpm_delayed_work(struct tcpm_port *port, unsigned int delay_ms)
1222 {
1223 if (delay_ms) {
1224 hrtimer_start(&port->state_machine_timer, ms_to_ktime(delay_ms), HRTIMER_MODE_REL);
1225 } else {
1226 hrtimer_cancel(&port->state_machine_timer);
1227 kthread_queue_work(port->wq, &port->state_machine);
1228 }
1229 }
1230
1231 static void mod_vdm_delayed_work(struct tcpm_port *port, unsigned int delay_ms)
1232 {
1233 if (delay_ms) {
1234 hrtimer_start(&port->vdm_state_machine_timer, ms_to_ktime(delay_ms),
1235 HRTIMER_MODE_REL);
1236 } else {
1237 hrtimer_cancel(&port->vdm_state_machine_timer);
1238 kthread_queue_work(port->wq, &port->vdm_state_machine);
1239 }
1240 }
1241
1242 static void mod_enable_frs_delayed_work(struct tcpm_port *port, unsigned int delay_ms)
1243 {
1244 if (delay_ms) {
1245 hrtimer_start(&port->enable_frs_timer, ms_to_ktime(delay_ms), HRTIMER_MODE_REL);
1246 } else {
1247 hrtimer_cancel(&port->enable_frs_timer);
1248 kthread_queue_work(port->wq, &port->enable_frs);
1249 }
1250 }
1251
1252 static void mod_send_discover_delayed_work(struct tcpm_port *port, unsigned int delay_ms)
1253 {
1254 if (delay_ms) {
1255 hrtimer_start(&port->send_discover_timer, ms_to_ktime(delay_ms), HRTIMER_MODE_REL);
1256 } else {
1257 hrtimer_cancel(&port->send_discover_timer);
1258 kthread_queue_work(port->wq, &port->send_discover_work);
1259 }
1260 }
1261
1262 static void tcpm_set_state(struct tcpm_port *port, enum tcpm_state state,
1263 unsigned int delay_ms)
1264 {
1265 if (delay_ms) {
1266 tcpm_log(port, "pending state change %s -> %s @ %u ms [%s %s]",
1267 tcpm_states[port->state], tcpm_states[state], delay_ms,
1268 pd_rev[port->negotiated_rev], tcpm_ams_str[port->ams]);
1269 port->delayed_state = state;
1270 mod_tcpm_delayed_work(port, delay_ms);
1271 port->delayed_runtime = ktime_add(ktime_get(), ms_to_ktime(delay_ms));
1272 port->delay_ms = delay_ms;
1273 } else {
1274 tcpm_log(port, "state change %s -> %s [%s %s]",
1275 tcpm_states[port->state], tcpm_states[state],
1276 pd_rev[port->negotiated_rev], tcpm_ams_str[port->ams]);
1277 port->delayed_state = INVALID_STATE;
1278 port->prev_state = port->state;
1279 port->state = state;
1280 /*
1281 * Don't re-queue the state machine work item if we're currently
1282 * in the state machine and we're immediately changing states.
1283 * tcpm_state_machine_work() will continue running the state
1284 * machine.
1285 */
1286 if (!port->state_machine_running)
1287 mod_tcpm_delayed_work(port, 0);
1288 }
1289 }
1290
1291 static void tcpm_set_state_cond(struct tcpm_port *port, enum tcpm_state state,
1292 unsigned int delay_ms)
1293 {
1294 if (port->enter_state == port->state)
1295 tcpm_set_state(port, state, delay_ms);
1296 else
1297 tcpm_log(port,
1298 "skipped %sstate change %s -> %s [%u ms], context state %s [%s %s]",
1299 delay_ms ? "delayed " : "",
1300 tcpm_states[port->state], tcpm_states[state],
1301 delay_ms, tcpm_states[port->enter_state],
1302 pd_rev[port->negotiated_rev], tcpm_ams_str[port->ams]);
1303 }
1304
1305 static void tcpm_queue_message(struct tcpm_port *port,
1306 enum pd_msg_request message)
1307 {
1308 port->queued_message = message;
1309 mod_tcpm_delayed_work(port, 0);
1310 }
1311
1312 static bool tcpm_vdm_ams(struct tcpm_port *port)
1313 {
1314 switch (port->ams) {
1315 case DISCOVER_IDENTITY:
1316 case SOURCE_STARTUP_CABLE_PLUG_DISCOVER_IDENTITY:
1317 case DISCOVER_SVIDS:
1318 case DISCOVER_MODES:
1319 case DFP_TO_UFP_ENTER_MODE:
1320 case DFP_TO_UFP_EXIT_MODE:
1321 case DFP_TO_CABLE_PLUG_ENTER_MODE:
1322 case DFP_TO_CABLE_PLUG_EXIT_MODE:
1323 case ATTENTION:
1324 case UNSTRUCTURED_VDMS:
1325 case STRUCTURED_VDMS:
1326 break;
1327 default:
1328 return false;
1329 }
1330
1331 return true;
1332 }
1333
1334 static bool tcpm_ams_interruptible(struct tcpm_port *port)
1335 {
1336 switch (port->ams) {
1337 /* Interruptible AMS */
1338 case NONE_AMS:
1339 case SECURITY:
1340 case FIRMWARE_UPDATE:
1341 case DISCOVER_IDENTITY:
1342 case SOURCE_STARTUP_CABLE_PLUG_DISCOVER_IDENTITY:
1343 case DISCOVER_SVIDS:
1344 case DISCOVER_MODES:
1345 case DFP_TO_UFP_ENTER_MODE:
1346 case DFP_TO_UFP_EXIT_MODE:
1347 case DFP_TO_CABLE_PLUG_ENTER_MODE:
1348 case DFP_TO_CABLE_PLUG_EXIT_MODE:
1349 case UNSTRUCTURED_VDMS:
1350 case STRUCTURED_VDMS:
1351 case COUNTRY_INFO:
1352 case COUNTRY_CODES:
1353 break;
1354 /* Non-Interruptible AMS */
1355 default:
1356 if (port->in_ams)
1357 return false;
1358 break;
1359 }
1360
1361 return true;
1362 }
1363
1364 static int tcpm_ams_start(struct tcpm_port *port, enum tcpm_ams ams)
1365 {
1366 int ret = 0;
1367
1368 tcpm_log(port, "AMS %s start", tcpm_ams_str[ams]);
1369
1370 if (!tcpm_ams_interruptible(port) &&
1371 !(ams == HARD_RESET || ams == SOFT_RESET_AMS)) {
1372 port->upcoming_state = INVALID_STATE;
1373 tcpm_log(port, "AMS %s not interruptible, aborting",
1374 tcpm_ams_str[port->ams]);
1375 return -EAGAIN;
1376 }
1377
1378 if (port->pwr_role == TYPEC_SOURCE) {
1379 enum typec_cc_status cc_req = port->cc_req;
1380
1381 port->ams = ams;
1382
1383 if (ams == HARD_RESET) {
1384 tcpm_set_cc(port, tcpm_rp_cc(port));
1385 tcpm_pd_transmit(port, TCPC_TX_HARD_RESET, NULL);
1386 tcpm_set_state(port, HARD_RESET_START, 0);
1387 return ret;
1388 } else if (ams == SOFT_RESET_AMS) {
1389 if (!port->explicit_contract)
1390 tcpm_set_cc(port, tcpm_rp_cc(port));
1391 tcpm_set_state(port, SOFT_RESET_SEND, 0);
1392 return ret;
1393 } else if (tcpm_vdm_ams(port)) {
1394 /* tSinkTx is enforced in vdm_run_state_machine */
1395 if (port->negotiated_rev >= PD_REV30)
1396 tcpm_set_cc(port, SINK_TX_NG);
1397 return ret;
1398 }
1399
1400 if (port->negotiated_rev >= PD_REV30)
1401 tcpm_set_cc(port, SINK_TX_NG);
1402
1403 switch (port->state) {
1404 case SRC_READY:
1405 case SRC_STARTUP:
1406 case SRC_SOFT_RESET_WAIT_SNK_TX:
1407 case SOFT_RESET:
1408 case SOFT_RESET_SEND:
1409 if (port->negotiated_rev >= PD_REV30)
1410 tcpm_set_state(port, AMS_START,
1411 cc_req == SINK_TX_OK ?
1412 PD_T_SINK_TX : 0);
1413 else
1414 tcpm_set_state(port, AMS_START, 0);
1415 break;
1416 default:
1417 if (port->negotiated_rev >= PD_REV30)
1418 tcpm_set_state(port, SRC_READY,
1419 cc_req == SINK_TX_OK ?
1420 PD_T_SINK_TX : 0);
1421 else
1422 tcpm_set_state(port, SRC_READY, 0);
1423 break;
1424 }
1425 } else {
1426 if (port->negotiated_rev >= PD_REV30 &&
1427 !tcpm_sink_tx_ok(port) &&
1428 ams != SOFT_RESET_AMS &&
1429 ams != HARD_RESET) {
1430 port->upcoming_state = INVALID_STATE;
1431 tcpm_log(port, "Sink TX No Go");
1432 return -EAGAIN;
1433 }
1434
1435 port->ams = ams;
1436
1437 if (ams == HARD_RESET) {
1438 tcpm_pd_transmit(port, TCPC_TX_HARD_RESET, NULL);
1439 tcpm_set_state(port, HARD_RESET_START, 0);
1440 return ret;
1441 } else if (tcpm_vdm_ams(port)) {
1442 return ret;
1443 }
1444
1445 if (port->state == SNK_READY ||
1446 port->state == SNK_SOFT_RESET)
1447 tcpm_set_state(port, AMS_START, 0);
1448 else
1449 tcpm_set_state(port, SNK_READY, 0);
1450 }
1451
1452 return ret;
1453 }
1454
1455 /*
1456 * VDM/VDO handling functions
1457 */
1458 static void tcpm_queue_vdm(struct tcpm_port *port, const u32 header,
1459 const u32 *data, int cnt)
1460 {
1461 u32 vdo_hdr = port->vdo_data[0];
1462
1463 WARN_ON(!mutex_is_locked(&port->lock));
1464
1465 /* If is sending discover_identity, handle received message first */
1466 if (PD_VDO_SVDM(vdo_hdr) && PD_VDO_CMD(vdo_hdr) == CMD_DISCOVER_IDENT) {
1467 port->send_discover = true;
1468 mod_send_discover_delayed_work(port, SEND_DISCOVER_RETRY_MS);
1469 } else {
1470 /* Make sure we are not still processing a previous VDM packet */
1471 WARN_ON(port->vdm_state > VDM_STATE_DONE);
1472 }
1473
1474 port->vdo_count = cnt + 1;
1475 port->vdo_data[0] = header;
1476 memcpy(&port->vdo_data[1], data, sizeof(u32) * cnt);
1477 /* Set ready, vdm state machine will actually send */
1478 port->vdm_retries = 0;
1479 port->vdm_state = VDM_STATE_READY;
1480 port->vdm_sm_running = true;
1481
1482 mod_vdm_delayed_work(port, 0);
1483 }
1484
1485 static void tcpm_queue_vdm_unlocked(struct tcpm_port *port, const u32 header,
1486 const u32 *data, int cnt)
1487 {
1488 mutex_lock(&port->lock);
1489 tcpm_queue_vdm(port, header, data, cnt);
1490 mutex_unlock(&port->lock);
1491 }
1492
1493 static void svdm_consume_identity(struct tcpm_port *port, const u32 *p, int cnt)
1494 {
1495 u32 vdo = p[VDO_INDEX_IDH];
1496 u32 product = p[VDO_INDEX_PRODUCT];
1497
1498 memset(&port->mode_data, 0, sizeof(port->mode_data));
1499
1500 port->partner_ident.id_header = vdo;
1501 port->partner_ident.cert_stat = p[VDO_INDEX_CSTAT];
1502 port->partner_ident.product = product;
1503
1504 typec_partner_set_identity(port->partner);
1505
1506 tcpm_log(port, "Identity: %04x:%04x.%04x",
1507 PD_IDH_VID(vdo),
1508 PD_PRODUCT_PID(product), product & 0xffff);
1509 }
1510
1511 static bool svdm_consume_svids(struct tcpm_port *port, const u32 *p, int cnt)
1512 {
1513 struct pd_mode_data *pmdata = &port->mode_data;
1514 int i;
1515
1516 for (i = 1; i < cnt; i++) {
1517 u16 svid;
1518
1519 svid = (p[i] >> 16) & 0xffff;
1520 if (!svid)
1521 return false;
1522
1523 if (pmdata->nsvids >= SVID_DISCOVERY_MAX)
1524 goto abort;
1525
1526 pmdata->svids[pmdata->nsvids++] = svid;
1527 tcpm_log(port, "SVID %d: 0x%x", pmdata->nsvids, svid);
1528
1529 svid = p[i] & 0xffff;
1530 if (!svid)
1531 return false;
1532
1533 if (pmdata->nsvids >= SVID_DISCOVERY_MAX)
1534 goto abort;
1535
1536 pmdata->svids[pmdata->nsvids++] = svid;
1537 tcpm_log(port, "SVID %d: 0x%x", pmdata->nsvids, svid);
1538 }
1539
1540 /*
1541 * PD3.0 Spec 6.4.4.3.2: The SVIDs are returned 2 per VDO (see Table
1542 * 6-43), and can be returned maximum 6 VDOs per response (see Figure
1543 * 6-19). If the Respondersupports 12 or more SVID then the Discover
1544 * SVIDs Command Shall be executed multiple times until a Discover
1545 * SVIDs VDO is returned ending either with a SVID value of 0x0000 in
1546 * the last part of the last VDO or with a VDO containing two SVIDs
1547 * with values of 0x0000.
1548 *
1549 * However, some odd dockers support SVIDs less than 12 but without
1550 * 0x0000 in the last VDO, so we need to break the Discover SVIDs
1551 * request and return false here.
1552 */
1553 return cnt == 7;
1554 abort:
1555 tcpm_log(port, "SVID_DISCOVERY_MAX(%d) too low!", SVID_DISCOVERY_MAX);
1556 return false;
1557 }
1558
1559 static void svdm_consume_modes(struct tcpm_port *port, const u32 *p, int cnt)
1560 {
1561 struct pd_mode_data *pmdata = &port->mode_data;
1562 struct typec_altmode_desc *paltmode;
1563 int i;
1564
1565 if (pmdata->altmodes >= ARRAY_SIZE(port->partner_altmode)) {
1566 /* Already logged in svdm_consume_svids() */
1567 return;
1568 }
1569
1570 for (i = 1; i < cnt; i++) {
1571 paltmode = &pmdata->altmode_desc[pmdata->altmodes];
1572 memset(paltmode, 0, sizeof(*paltmode));
1573
1574 paltmode->svid = pmdata->svids[pmdata->svid_index];
1575 paltmode->mode = i;
1576 paltmode->vdo = p[i];
1577
1578 tcpm_log(port, " Alternate mode %d: SVID 0x%04x, VDO %d: 0x%08x",
1579 pmdata->altmodes, paltmode->svid,
1580 paltmode->mode, paltmode->vdo);
1581
1582 pmdata->altmodes++;
1583 }
1584 }
1585
1586 static void tcpm_register_partner_altmodes(struct tcpm_port *port)
1587 {
1588 struct pd_mode_data *modep = &port->mode_data;
1589 struct typec_altmode *altmode;
1590 int i;
1591
1592 for (i = 0; i < modep->altmodes; i++) {
1593 altmode = typec_partner_register_altmode(port->partner,
1594 &modep->altmode_desc[i]);
1595 if (IS_ERR(altmode)) {
1596 tcpm_log(port, "Failed to register partner SVID 0x%04x",
1597 modep->altmode_desc[i].svid);
1598 altmode = NULL;
1599 }
1600 port->partner_altmode[i] = altmode;
1601 }
1602 }
1603
1604 #define supports_modal(port) PD_IDH_MODAL_SUPP((port)->partner_ident.id_header)
1605
1606 static int tcpm_pd_svdm(struct tcpm_port *port, struct typec_altmode *adev,
1607 const u32 *p, int cnt, u32 *response,
1608 enum adev_actions *adev_action)
1609 {
1610 struct typec_port *typec = port->typec_port;
1611 struct typec_altmode *pdev;
1612 struct pd_mode_data *modep;
1613 int svdm_version;
1614 int rlen = 0;
1615 int cmd_type;
1616 int cmd;
1617 int i;
1618
1619 cmd_type = PD_VDO_CMDT(p[0]);
1620 cmd = PD_VDO_CMD(p[0]);
1621
1622 tcpm_log(port, "Rx VDM cmd 0x%x type %d cmd %d len %d",
1623 p[0], cmd_type, cmd, cnt);
1624
1625 modep = &port->mode_data;
1626
1627 pdev = typec_match_altmode(port->partner_altmode, ALTMODE_DISCOVERY_MAX,
1628 PD_VDO_VID(p[0]), PD_VDO_OPOS(p[0]));
1629
1630 svdm_version = typec_get_negotiated_svdm_version(typec);
1631 if (svdm_version < 0)
1632 return 0;
1633
1634 switch (cmd_type) {
1635 case CMDT_INIT:
1636 switch (cmd) {
1637 case CMD_DISCOVER_IDENT:
1638 if (PD_VDO_VID(p[0]) != USB_SID_PD)
1639 break;
1640
1641 if (IS_ERR_OR_NULL(port->partner))
1642 break;
1643
1644 if (PD_VDO_SVDM_VER(p[0]) < svdm_version) {
1645 typec_partner_set_svdm_version(port->partner,
1646 PD_VDO_SVDM_VER(p[0]));
1647 svdm_version = PD_VDO_SVDM_VER(p[0]);
1648 }
1649
1650 port->ams = DISCOVER_IDENTITY;
1651 /*
1652 * PD2.0 Spec 6.10.3: respond with NAK as DFP (data host)
1653 * PD3.1 Spec 6.4.4.2.5.1: respond with NAK if "invalid field" or
1654 * "wrong configuation" or "Unrecognized"
1655 */
1656 if ((port->data_role == TYPEC_DEVICE || svdm_version >= SVDM_VER_2_0) &&
1657 port->nr_snk_vdo) {
1658 if (svdm_version < SVDM_VER_2_0) {
1659 for (i = 0; i < port->nr_snk_vdo_v1; i++)
1660 response[i + 1] = port->snk_vdo_v1[i];
1661 rlen = port->nr_snk_vdo_v1 + 1;
1662
1663 } else {
1664 for (i = 0; i < port->nr_snk_vdo; i++)
1665 response[i + 1] = port->snk_vdo[i];
1666 rlen = port->nr_snk_vdo + 1;
1667 }
1668 }
1669 break;
1670 case CMD_DISCOVER_SVID:
1671 port->ams = DISCOVER_SVIDS;
1672 break;
1673 case CMD_DISCOVER_MODES:
1674 port->ams = DISCOVER_MODES;
1675 break;
1676 case CMD_ENTER_MODE:
1677 port->ams = DFP_TO_UFP_ENTER_MODE;
1678 break;
1679 case CMD_EXIT_MODE:
1680 port->ams = DFP_TO_UFP_EXIT_MODE;
1681 break;
1682 case CMD_ATTENTION:
1683 /* Attention command does not have response */
1684 *adev_action = ADEV_ATTENTION;
1685 return 0;
1686 default:
1687 break;
1688 }
1689 if (rlen >= 1) {
1690 response[0] = p[0] | VDO_CMDT(CMDT_RSP_ACK);
1691 } else if (rlen == 0) {
1692 response[0] = p[0] | VDO_CMDT(CMDT_RSP_NAK);
1693 rlen = 1;
1694 } else {
1695 response[0] = p[0] | VDO_CMDT(CMDT_RSP_BUSY);
1696 rlen = 1;
1697 }
1698 response[0] = (response[0] & ~VDO_SVDM_VERS_MASK) |
1699 (VDO_SVDM_VERS(typec_get_negotiated_svdm_version(typec)));
1700 break;
1701 case CMDT_RSP_ACK:
1702 /* silently drop message if we are not connected */
1703 if (IS_ERR_OR_NULL(port->partner))
1704 break;
1705
1706 tcpm_ams_finish(port);
1707
1708 switch (cmd) {
1709 case CMD_DISCOVER_IDENT:
1710 if (PD_VDO_SVDM_VER(p[0]) < svdm_version)
1711 typec_partner_set_svdm_version(port->partner,
1712 PD_VDO_SVDM_VER(p[0]));
1713 /* 6.4.4.3.1 */
1714 svdm_consume_identity(port, p, cnt);
1715 response[0] = VDO(USB_SID_PD, 1, typec_get_negotiated_svdm_version(typec),
1716 CMD_DISCOVER_SVID);
1717 rlen = 1;
1718 break;
1719 case CMD_DISCOVER_SVID:
1720 /* 6.4.4.3.2 */
1721 if (svdm_consume_svids(port, p, cnt)) {
1722 response[0] = VDO(USB_SID_PD, 1, svdm_version, CMD_DISCOVER_SVID);
1723 rlen = 1;
1724 } else if (modep->nsvids && supports_modal(port)) {
1725 response[0] = VDO(modep->svids[0], 1, svdm_version,
1726 CMD_DISCOVER_MODES);
1727 rlen = 1;
1728 }
1729 break;
1730 case CMD_DISCOVER_MODES:
1731 /* 6.4.4.3.3 */
1732 svdm_consume_modes(port, p, cnt);
1733 modep->svid_index++;
1734 if (modep->svid_index < modep->nsvids) {
1735 u16 svid = modep->svids[modep->svid_index];
1736 response[0] = VDO(svid, 1, svdm_version, CMD_DISCOVER_MODES);
1737 rlen = 1;
1738 } else {
1739 tcpm_register_partner_altmodes(port);
1740 }
1741 break;
1742 case CMD_ENTER_MODE:
1743 if (adev && pdev)
1744 *adev_action = ADEV_QUEUE_VDM_SEND_EXIT_MODE_ON_FAIL;
1745 return 0;
1746 case CMD_EXIT_MODE:
1747 if (adev && pdev) {
1748 /* Back to USB Operation */
1749 *adev_action = ADEV_NOTIFY_USB_AND_QUEUE_VDM;
1750 return 0;
1751 }
1752 break;
1753 case VDO_CMD_VENDOR(0) ... VDO_CMD_VENDOR(15):
1754 break;
1755 default:
1756 /* Unrecognized SVDM */
1757 response[0] = p[0] | VDO_CMDT(CMDT_RSP_NAK);
1758 rlen = 1;
1759 response[0] = (response[0] & ~VDO_SVDM_VERS_MASK) |
1760 (VDO_SVDM_VERS(svdm_version));
1761 break;
1762 }
1763 break;
1764 case CMDT_RSP_NAK:
1765 tcpm_ams_finish(port);
1766 switch (cmd) {
1767 case CMD_DISCOVER_IDENT:
1768 case CMD_DISCOVER_SVID:
1769 case CMD_DISCOVER_MODES:
1770 case VDO_CMD_VENDOR(0) ... VDO_CMD_VENDOR(15):
1771 break;
1772 case CMD_ENTER_MODE:
1773 /* Back to USB Operation */
1774 *adev_action = ADEV_NOTIFY_USB_AND_QUEUE_VDM;
1775 return 0;
1776 default:
1777 /* Unrecognized SVDM */
1778 response[0] = p[0] | VDO_CMDT(CMDT_RSP_NAK);
1779 rlen = 1;
1780 response[0] = (response[0] & ~VDO_SVDM_VERS_MASK) |
1781 (VDO_SVDM_VERS(svdm_version));
1782 break;
1783 }
1784 break;
1785 default:
1786 response[0] = p[0] | VDO_CMDT(CMDT_RSP_NAK);
1787 rlen = 1;
1788 response[0] = (response[0] & ~VDO_SVDM_VERS_MASK) |
1789 (VDO_SVDM_VERS(svdm_version));
1790 break;
1791 }
1792
1793 /* Informing the alternate mode drivers about everything */
1794 *adev_action = ADEV_QUEUE_VDM;
1795 return rlen;
1796 }
1797
1798 static void tcpm_pd_handle_msg(struct tcpm_port *port,
1799 enum pd_msg_request message,
1800 enum tcpm_ams ams);
1801
1802 static void tcpm_handle_vdm_request(struct tcpm_port *port,
1803 const __le32 *payload, int cnt)
1804 {
1805 enum adev_actions adev_action = ADEV_NONE;
1806 struct typec_altmode *adev;
1807 u32 p[PD_MAX_PAYLOAD];
1808 u32 response[8] = { };
1809 int i, rlen = 0;
1810
1811 for (i = 0; i < cnt; i++)
1812 p[i] = le32_to_cpu(payload[i]);
1813
1814 adev = typec_match_altmode(port->port_altmode, ALTMODE_DISCOVERY_MAX,
1815 PD_VDO_VID(p[0]), PD_VDO_OPOS(p[0]));
1816
1817 if (port->vdm_state == VDM_STATE_BUSY) {
1818 /* If UFP responded busy retry after timeout */
1819 if (PD_VDO_CMDT(p[0]) == CMDT_RSP_BUSY) {
1820 port->vdm_state = VDM_STATE_WAIT_RSP_BUSY;
1821 port->vdo_retry = (p[0] & ~VDO_CMDT_MASK) |
1822 CMDT_INIT;
1823 mod_vdm_delayed_work(port, PD_T_VDM_BUSY);
1824 return;
1825 }
1826 port->vdm_state = VDM_STATE_DONE;
1827 }
1828
1829 if (PD_VDO_SVDM(p[0]) && (adev || tcpm_vdm_ams(port) || port->nr_snk_vdo)) {
1830 /*
1831 * Here a SVDM is received (INIT or RSP or unknown). Set the vdm_sm_running in
1832 * advance because we are dropping the lock but may send VDMs soon.
1833 * For the cases of INIT received:
1834 * - If no response to send, it will be cleared later in this function.
1835 * - If there are responses to send, it will be cleared in the state machine.
1836 * For the cases of RSP received:
1837 * - If no further INIT to send, it will be cleared later in this function.
1838 * - Otherwise, it will be cleared in the state machine if timeout or it will go
1839 * back here until no further INIT to send.
1840 * For the cases of unknown type received:
1841 * - We will send NAK and the flag will be cleared in the state machine.
1842 */
1843 port->vdm_sm_running = true;
1844 rlen = tcpm_pd_svdm(port, adev, p, cnt, response, &adev_action);
1845 } else {
1846 if (port->negotiated_rev >= PD_REV30)
1847 tcpm_pd_handle_msg(port, PD_MSG_CTRL_NOT_SUPP, NONE_AMS);
1848 }
1849
1850 /*
1851 * We are done with any state stored in the port struct now, except
1852 * for any port struct changes done by the tcpm_queue_vdm() call
1853 * below, which is a separate operation.
1854 *
1855 * So we can safely release the lock here; and we MUST release the
1856 * lock here to avoid an AB BA lock inversion:
1857 *
1858 * If we keep the lock here then the lock ordering in this path is:
1859 * 1. tcpm_pd_rx_handler take the tcpm port lock
1860 * 2. One of the typec_altmode_* calls below takes the alt-mode's lock
1861 *
1862 * And we also have this ordering:
1863 * 1. alt-mode driver takes the alt-mode's lock
1864 * 2. alt-mode driver calls tcpm_altmode_enter which takes the
1865 * tcpm port lock
1866 *
1867 * Dropping our lock here avoids this.
1868 */
1869 mutex_unlock(&port->lock);
1870
1871 if (adev) {
1872 switch (adev_action) {
1873 case ADEV_NONE:
1874 break;
1875 case ADEV_NOTIFY_USB_AND_QUEUE_VDM:
1876 WARN_ON(typec_altmode_notify(adev, TYPEC_STATE_USB, NULL));
1877 typec_altmode_vdm(adev, p[0], &p[1], cnt);
1878 break;
1879 case ADEV_QUEUE_VDM:
1880 typec_altmode_vdm(adev, p[0], &p[1], cnt);
1881 break;
1882 case ADEV_QUEUE_VDM_SEND_EXIT_MODE_ON_FAIL:
1883 if (typec_altmode_vdm(adev, p[0], &p[1], cnt)) {
1884 int svdm_version = typec_get_negotiated_svdm_version(
1885 port->typec_port);
1886 if (svdm_version < 0)
1887 break;
1888
1889 response[0] = VDO(adev->svid, 1, svdm_version,
1890 CMD_EXIT_MODE);
1891 response[0] |= VDO_OPOS(adev->mode);
1892 rlen = 1;
1893 }
1894 break;
1895 case ADEV_ATTENTION:
1896 if (typec_altmode_attention(adev, p[1]))
1897 tcpm_log(port, "typec_altmode_attention no port partner altmode");
1898 break;
1899 }
1900 }
1901
1902 /*
1903 * We must re-take the lock here to balance the unlock in
1904 * tcpm_pd_rx_handler, note that no changes, other then the
1905 * tcpm_queue_vdm call, are made while the lock is held again.
1906 * All that is done after the call is unwinding the call stack until
1907 * we return to tcpm_pd_rx_handler and do the unlock there.
1908 */
1909 mutex_lock(&port->lock);
1910
1911 if (rlen > 0)
1912 tcpm_queue_vdm(port, response[0], &response[1], rlen - 1);
1913 else
1914 port->vdm_sm_running = false;
1915 }
1916
1917 static void tcpm_send_vdm(struct tcpm_port *port, u32 vid, int cmd,
1918 const u32 *data, int count)
1919 {
1920 int svdm_version = typec_get_negotiated_svdm_version(port->typec_port);
1921 u32 header;
1922
1923 if (svdm_version < 0)
1924 return;
1925
1926 if (WARN_ON(count > VDO_MAX_SIZE - 1))
1927 count = VDO_MAX_SIZE - 1;
1928
1929 /* set VDM header with VID & CMD */
1930 header = VDO(vid, ((vid & USB_SID_PD) == USB_SID_PD) ?
1931 1 : (PD_VDO_CMD(cmd) <= CMD_ATTENTION),
1932 svdm_version, cmd);
1933 tcpm_queue_vdm(port, header, data, count);
1934 }
1935
1936 static unsigned int vdm_ready_timeout(u32 vdm_hdr)
1937 {
1938 unsigned int timeout;
1939 int cmd = PD_VDO_CMD(vdm_hdr);
1940
1941 /* its not a structured VDM command */
1942 if (!PD_VDO_SVDM(vdm_hdr))
1943 return PD_T_VDM_UNSTRUCTURED;
1944
1945 switch (PD_VDO_CMDT(vdm_hdr)) {
1946 case CMDT_INIT:
1947 if (cmd == CMD_ENTER_MODE || cmd == CMD_EXIT_MODE)
1948 timeout = PD_T_VDM_WAIT_MODE_E;
1949 else
1950 timeout = PD_T_VDM_SNDR_RSP;
1951 break;
1952 default:
1953 if (cmd == CMD_ENTER_MODE || cmd == CMD_EXIT_MODE)
1954 timeout = PD_T_VDM_E_MODE;
1955 else
1956 timeout = PD_T_VDM_RCVR_RSP;
1957 break;
1958 }
1959 return timeout;
1960 }
1961
1962 static void vdm_run_state_machine(struct tcpm_port *port)
1963 {
1964 struct pd_message msg;
1965 int i, res = 0;
1966 u32 vdo_hdr = port->vdo_data[0];
1967
1968 switch (port->vdm_state) {
1969 case VDM_STATE_READY:
1970 /* Only transmit VDM if attached */
1971 if (!port->attached) {
1972 port->vdm_state = VDM_STATE_ERR_BUSY;
1973 break;
1974 }
1975
1976 /*
1977 * if there's traffic or we're not in PDO ready state don't send
1978 * a VDM.
1979 */
1980 if (port->state != SRC_READY && port->state != SNK_READY) {
1981 port->vdm_sm_running = false;
1982 break;
1983 }
1984
1985 /* TODO: AMS operation for Unstructured VDM */
1986 if (PD_VDO_SVDM(vdo_hdr) && PD_VDO_CMDT(vdo_hdr) == CMDT_INIT) {
1987 switch (PD_VDO_CMD(vdo_hdr)) {
1988 case CMD_DISCOVER_IDENT:
1989 res = tcpm_ams_start(port, DISCOVER_IDENTITY);
1990 if (res == 0) {
1991 port->send_discover = false;
1992 } else if (res == -EAGAIN) {
1993 port->vdo_data[0] = 0;
1994 mod_send_discover_delayed_work(port,
1995 SEND_DISCOVER_RETRY_MS);
1996 }
1997 break;
1998 case CMD_DISCOVER_SVID:
1999 res = tcpm_ams_start(port, DISCOVER_SVIDS);
2000 break;
2001 case CMD_DISCOVER_MODES:
2002 res = tcpm_ams_start(port, DISCOVER_MODES);
2003 break;
2004 case CMD_ENTER_MODE:
2005 res = tcpm_ams_start(port, DFP_TO_UFP_ENTER_MODE);
2006 break;
2007 case CMD_EXIT_MODE:
2008 res = tcpm_ams_start(port, DFP_TO_UFP_EXIT_MODE);
2009 break;
2010 case CMD_ATTENTION:
2011 res = tcpm_ams_start(port, ATTENTION);
2012 break;
2013 case VDO_CMD_VENDOR(0) ... VDO_CMD_VENDOR(15):
2014 res = tcpm_ams_start(port, STRUCTURED_VDMS);
2015 break;
2016 default:
2017 res = -EOPNOTSUPP;
2018 break;
2019 }
2020
2021 if (res < 0) {
2022 port->vdm_state = VDM_STATE_ERR_BUSY;
2023 return;
2024 }
2025 }
2026
2027 port->vdm_state = VDM_STATE_SEND_MESSAGE;
2028 mod_vdm_delayed_work(port, (port->negotiated_rev >= PD_REV30 &&
2029 port->pwr_role == TYPEC_SOURCE &&
2030 PD_VDO_SVDM(vdo_hdr) &&
2031 PD_VDO_CMDT(vdo_hdr) == CMDT_INIT) ?
2032 PD_T_SINK_TX : 0);
2033 break;
2034 case VDM_STATE_WAIT_RSP_BUSY:
2035 port->vdo_data[0] = port->vdo_retry;
2036 port->vdo_count = 1;
2037 port->vdm_state = VDM_STATE_READY;
2038 tcpm_ams_finish(port);
2039 break;
2040 case VDM_STATE_BUSY:
2041 port->vdm_state = VDM_STATE_ERR_TMOUT;
2042 if (port->ams != NONE_AMS)
2043 tcpm_ams_finish(port);
2044 break;
2045 case VDM_STATE_ERR_SEND:
2046 /*
2047 * A partner which does not support USB PD will not reply,
2048 * so this is not a fatal error. At the same time, some
2049 * devices may not return GoodCRC under some circumstances,
2050 * so we need to retry.
2051 */
2052 if (port->vdm_retries < 3) {
2053 tcpm_log(port, "VDM Tx error, retry");
2054 port->vdm_retries++;
2055 port->vdm_state = VDM_STATE_READY;
2056 if (PD_VDO_SVDM(vdo_hdr) && PD_VDO_CMDT(vdo_hdr) == CMDT_INIT)
2057 tcpm_ams_finish(port);
2058 } else {
2059 tcpm_ams_finish(port);
2060 }
2061 break;
2062 case VDM_STATE_SEND_MESSAGE:
2063 /* Prepare and send VDM */
2064 memset(&msg, 0, sizeof(msg));
2065 msg.header = PD_HEADER_LE(PD_DATA_VENDOR_DEF,
2066 port->pwr_role,
2067 port->data_role,
2068 port->negotiated_rev,
2069 port->message_id, port->vdo_count);
2070 for (i = 0; i < port->vdo_count; i++)
2071 msg.payload[i] = cpu_to_le32(port->vdo_data[i]);
2072 res = tcpm_pd_transmit(port, TCPC_TX_SOP, &msg);
2073 if (res < 0) {
2074 port->vdm_state = VDM_STATE_ERR_SEND;
2075 } else {
2076 unsigned long timeout;
2077
2078 port->vdm_retries = 0;
2079 port->vdo_data[0] = 0;
2080 port->vdm_state = VDM_STATE_BUSY;
2081 timeout = vdm_ready_timeout(vdo_hdr);
2082 mod_vdm_delayed_work(port, timeout);
2083 }
2084 break;
2085 default:
2086 break;
2087 }
2088 }
2089
2090 static void vdm_state_machine_work(struct kthread_work *work)
2091 {
2092 struct tcpm_port *port = container_of(work, struct tcpm_port, vdm_state_machine);
2093 enum vdm_states prev_state;
2094
2095 mutex_lock(&port->lock);
2096
2097 /*
2098 * Continue running as long as the port is not busy and there was
2099 * a state change.
2100 */
2101 do {
2102 prev_state = port->vdm_state;
2103 vdm_run_state_machine(port);
2104 } while (port->vdm_state != prev_state &&
2105 port->vdm_state != VDM_STATE_BUSY &&
2106 port->vdm_state != VDM_STATE_SEND_MESSAGE);
2107
2108 if (port->vdm_state < VDM_STATE_READY)
2109 port->vdm_sm_running = false;
2110
2111 mutex_unlock(&port->lock);
2112 }
2113
2114 enum pdo_err {
2115 PDO_NO_ERR,
2116 PDO_ERR_NO_VSAFE5V,
2117 PDO_ERR_VSAFE5V_NOT_FIRST,
2118 PDO_ERR_PDO_TYPE_NOT_IN_ORDER,
2119 PDO_ERR_FIXED_NOT_SORTED,
2120 PDO_ERR_VARIABLE_BATT_NOT_SORTED,
2121 PDO_ERR_DUPE_PDO,
2122 PDO_ERR_PPS_APDO_NOT_SORTED,
2123 PDO_ERR_DUPE_PPS_APDO,
2124 };
2125
2126 static const char * const pdo_err_msg[] = {
2127 [PDO_ERR_NO_VSAFE5V] =
2128 " err: source/sink caps should at least have vSafe5V",
2129 [PDO_ERR_VSAFE5V_NOT_FIRST] =
2130 " err: vSafe5V Fixed Supply Object Shall always be the first object",
2131 [PDO_ERR_PDO_TYPE_NOT_IN_ORDER] =
2132 " err: PDOs should be in the following order: Fixed; Battery; Variable",
2133 [PDO_ERR_FIXED_NOT_SORTED] =
2134 " err: Fixed supply pdos should be in increasing order of their fixed voltage",
2135 [PDO_ERR_VARIABLE_BATT_NOT_SORTED] =
2136 " err: Variable/Battery supply pdos should be in increasing order of their minimum voltage",
2137 [PDO_ERR_DUPE_PDO] =
2138 " err: Variable/Batt supply pdos cannot have same min/max voltage",
2139 [PDO_ERR_PPS_APDO_NOT_SORTED] =
2140 " err: Programmable power supply apdos should be in increasing order of their maximum voltage",
2141 [PDO_ERR_DUPE_PPS_APDO] =
2142 " err: Programmable power supply apdos cannot have same min/max voltage and max current",
2143 };
2144
2145 static enum pdo_err tcpm_caps_err(struct tcpm_port *port, const u32 *pdo,
2146 unsigned int nr_pdo)
2147 {
2148 unsigned int i;
2149
2150 /* Should at least contain vSafe5v */
2151 if (nr_pdo < 1)
2152 return PDO_ERR_NO_VSAFE5V;
2153
2154 /* The vSafe5V Fixed Supply Object Shall always be the first object */
2155 if (pdo_type(pdo[0]) != PDO_TYPE_FIXED ||
2156 pdo_fixed_voltage(pdo[0]) != VSAFE5V)
2157 return PDO_ERR_VSAFE5V_NOT_FIRST;
2158
2159 for (i = 1; i < nr_pdo; i++) {
2160 if (pdo_type(pdo[i]) < pdo_type(pdo[i - 1])) {
2161 return PDO_ERR_PDO_TYPE_NOT_IN_ORDER;
2162 } else if (pdo_type(pdo[i]) == pdo_type(pdo[i - 1])) {
2163 enum pd_pdo_type type = pdo_type(pdo[i]);
2164
2165 switch (type) {
2166 /*
2167 * The remaining Fixed Supply Objects, if
2168 * present, shall be sent in voltage order;
2169 * lowest to highest.
2170 */
2171 case PDO_TYPE_FIXED:
2172 if (pdo_fixed_voltage(pdo[i]) <=
2173 pdo_fixed_voltage(pdo[i - 1]))
2174 return PDO_ERR_FIXED_NOT_SORTED;
2175 break;
2176 /*
2177 * The Battery Supply Objects and Variable
2178 * supply, if present shall be sent in Minimum
2179 * Voltage order; lowest to highest.
2180 */
2181 case PDO_TYPE_VAR:
2182 case PDO_TYPE_BATT:
2183 if (pdo_min_voltage(pdo[i]) <
2184 pdo_min_voltage(pdo[i - 1]))
2185 return PDO_ERR_VARIABLE_BATT_NOT_SORTED;
2186 else if ((pdo_min_voltage(pdo[i]) ==
2187 pdo_min_voltage(pdo[i - 1])) &&
2188 (pdo_max_voltage(pdo[i]) ==
2189 pdo_max_voltage(pdo[i - 1])))
2190 return PDO_ERR_DUPE_PDO;
2191 break;
2192 /*
2193 * The Programmable Power Supply APDOs, if present,
2194 * shall be sent in Maximum Voltage order;
2195 * lowest to highest.
2196 */
2197 case PDO_TYPE_APDO:
2198 if (pdo_apdo_type(pdo[i]) != APDO_TYPE_PPS)
2199 break;
2200
2201 if (pdo_pps_apdo_max_voltage(pdo[i]) <
2202 pdo_pps_apdo_max_voltage(pdo[i - 1]))
2203 return PDO_ERR_PPS_APDO_NOT_SORTED;
2204 else if (pdo_pps_apdo_min_voltage(pdo[i]) ==
2205 pdo_pps_apdo_min_voltage(pdo[i - 1]) &&
2206 pdo_pps_apdo_max_voltage(pdo[i]) ==
2207 pdo_pps_apdo_max_voltage(pdo[i - 1]) &&
2208 pdo_pps_apdo_max_current(pdo[i]) ==
2209 pdo_pps_apdo_max_current(pdo[i - 1]))
2210 return PDO_ERR_DUPE_PPS_APDO;
2211 break;
2212 default:
2213 tcpm_log_force(port, " Unknown pdo type");
2214 }
2215 }
2216 }
2217
2218 return PDO_NO_ERR;
2219 }
2220
2221 static int tcpm_validate_caps(struct tcpm_port *port, const u32 *pdo,
2222 unsigned int nr_pdo)
2223 {
2224 enum pdo_err err_index = tcpm_caps_err(port, pdo, nr_pdo);
2225
2226 if (err_index != PDO_NO_ERR) {
2227 tcpm_log_force(port, " %s", pdo_err_msg[err_index]);
2228 return -EINVAL;
2229 }
2230
2231 return 0;
2232 }
2233
2234 static int tcpm_altmode_enter(struct typec_altmode *altmode, u32 *vdo)
2235 {
2236 struct tcpm_port *port = typec_altmode_get_drvdata(altmode);
2237 int svdm_version;
2238 u32 header;
2239
2240 svdm_version = typec_get_negotiated_svdm_version(port->typec_port);
2241 if (svdm_version < 0)
2242 return svdm_version;
2243
2244 header = VDO(altmode->svid, vdo ? 2 : 1, svdm_version, CMD_ENTER_MODE);
2245 header |= VDO_OPOS(altmode->mode);
2246
2247 tcpm_queue_vdm_unlocked(port, header, vdo, vdo ? 1 : 0);
2248 return 0;
2249 }
2250
2251 static int tcpm_altmode_exit(struct typec_altmode *altmode)
2252 {
2253 struct tcpm_port *port = typec_altmode_get_drvdata(altmode);
2254 int svdm_version;
2255 u32 header;
2256
2257 svdm_version = typec_get_negotiated_svdm_version(port->typec_port);
2258 if (svdm_version < 0)
2259 return svdm_version;
2260
2261 header = VDO(altmode->svid, 1, svdm_version, CMD_EXIT_MODE);
2262 header |= VDO_OPOS(altmode->mode);
2263
2264 tcpm_queue_vdm_unlocked(port, header, NULL, 0);
2265 return 0;
2266 }
2267
2268 static int tcpm_altmode_vdm(struct typec_altmode *altmode,
2269 u32 header, const u32 *data, int count)
2270 {
2271 struct tcpm_port *port = typec_altmode_get_drvdata(altmode);
2272
2273 tcpm_queue_vdm_unlocked(port, header, data, count - 1);
2274
2275 return 0;
2276 }
2277
2278 static const struct typec_altmode_ops tcpm_altmode_ops = {
2279 .enter = tcpm_altmode_enter,
2280 .exit = tcpm_altmode_exit,
2281 .vdm = tcpm_altmode_vdm,
2282 };
2283
2284 /*
2285 * PD (data, control) command handling functions
2286 */
2287 static inline enum tcpm_state ready_state(struct tcpm_port *port)
2288 {
2289 if (port->pwr_role == TYPEC_SOURCE)
2290 return SRC_READY;
2291 else
2292 return SNK_READY;
2293 }
2294
2295 static int tcpm_pd_send_control(struct tcpm_port *port,
2296 enum pd_ctrl_msg_type type);
2297
2298 static void tcpm_handle_alert(struct tcpm_port *port, const __le32 *payload,
2299 int cnt)
2300 {
2301 u32 p0 = le32_to_cpu(payload[0]);
2302 unsigned int type = usb_pd_ado_type(p0);
2303
2304 if (!type) {
2305 tcpm_log(port, "Alert message received with no type");
2306 tcpm_queue_message(port, PD_MSG_CTRL_NOT_SUPP);
2307 return;
2308 }
2309
2310 /* Just handling non-battery alerts for now */
2311 if (!(type & USB_PD_ADO_TYPE_BATT_STATUS_CHANGE)) {
2312 if (port->pwr_role == TYPEC_SOURCE) {
2313 port->upcoming_state = GET_STATUS_SEND;
2314 tcpm_ams_start(port, GETTING_SOURCE_SINK_STATUS);
2315 } else {
2316 /*
2317 * Do not check SinkTxOk here in case the Source doesn't set its Rp to
2318 * SinkTxOk in time.
2319 */
2320 port->ams = GETTING_SOURCE_SINK_STATUS;
2321 tcpm_set_state(port, GET_STATUS_SEND, 0);
2322 }
2323 } else {
2324 tcpm_queue_message(port, PD_MSG_CTRL_NOT_SUPP);
2325 }
2326 }
2327
2328 static int tcpm_set_auto_vbus_discharge_threshold(struct tcpm_port *port,
2329 enum typec_pwr_opmode mode, bool pps_active,
2330 u32 requested_vbus_voltage)
2331 {
2332 int ret;
2333
2334 if (!port->tcpc->set_auto_vbus_discharge_threshold)
2335 return 0;
2336
2337 ret = port->tcpc->set_auto_vbus_discharge_threshold(port->tcpc, mode, pps_active,
2338 requested_vbus_voltage);
2339 tcpm_log_force(port,
2340 "set_auto_vbus_discharge_threshold mode:%d pps_active:%c vbus:%u ret:%d",
2341 mode, pps_active ? 'y' : 'n', requested_vbus_voltage, ret);
2342
2343 return ret;
2344 }
2345
2346 static void tcpm_pd_handle_state(struct tcpm_port *port,
2347 enum tcpm_state state,
2348 enum tcpm_ams ams,
2349 unsigned int delay_ms)
2350 {
2351 switch (port->state) {
2352 case SRC_READY:
2353 case SNK_READY:
2354 port->ams = ams;
2355 tcpm_set_state(port, state, delay_ms);
2356 break;
2357 /* 8.3.3.4.1.1 and 6.8.1 power transitioning */
2358 case SNK_TRANSITION_SINK:
2359 case SNK_TRANSITION_SINK_VBUS:
2360 case SRC_TRANSITION_SUPPLY:
2361 tcpm_set_state(port, HARD_RESET_SEND, 0);
2362 break;
2363 default:
2364 if (!tcpm_ams_interruptible(port)) {
2365 tcpm_set_state(port, port->pwr_role == TYPEC_SOURCE ?
2366 SRC_SOFT_RESET_WAIT_SNK_TX :
2367 SNK_SOFT_RESET,
2368 0);
2369 } else {
2370 /* process the Message 6.8.1 */
2371 port->upcoming_state = state;
2372 port->next_ams = ams;
2373 tcpm_set_state(port, ready_state(port), delay_ms);
2374 }
2375 break;
2376 }
2377 }
2378
2379 static void tcpm_pd_handle_msg(struct tcpm_port *port,
2380 enum pd_msg_request message,
2381 enum tcpm_ams ams)
2382 {
2383 switch (port->state) {
2384 case SRC_READY:
2385 case SNK_READY:
2386 port->ams = ams;
2387 tcpm_queue_message(port, message);
2388 break;
2389 /* PD 3.0 Spec 8.3.3.4.1.1 and 6.8.1 */
2390 case SNK_TRANSITION_SINK:
2391 case SNK_TRANSITION_SINK_VBUS:
2392 case SRC_TRANSITION_SUPPLY:
2393 tcpm_set_state(port, HARD_RESET_SEND, 0);
2394 break;
2395 default:
2396 if (!tcpm_ams_interruptible(port)) {
2397 tcpm_set_state(port, port->pwr_role == TYPEC_SOURCE ?
2398 SRC_SOFT_RESET_WAIT_SNK_TX :
2399 SNK_SOFT_RESET,
2400 0);
2401 } else {
2402 port->next_ams = ams;
2403 tcpm_set_state(port, ready_state(port), 0);
2404 /* 6.8.1 process the Message */
2405 tcpm_queue_message(port, message);
2406 }
2407 break;
2408 }
2409 }
2410
2411 static int tcpm_register_source_caps(struct tcpm_port *port)
2412 {
2413 struct usb_power_delivery_desc desc = { port->negotiated_rev };
2414 struct usb_power_delivery_capabilities_desc caps = { };
2415 struct usb_power_delivery_capabilities *cap;
2416
2417 if (!port->partner_pd)
2418 port->partner_pd = usb_power_delivery_register(NULL, &desc);
2419 if (IS_ERR(port->partner_pd))
2420 return PTR_ERR(port->partner_pd);
2421
2422 memcpy(caps.pdo, port->source_caps, sizeof(u32) * port->nr_source_caps);
2423 caps.role = TYPEC_SOURCE;
2424
2425 cap = usb_power_delivery_register_capabilities(port->partner_pd, &caps);
2426 if (IS_ERR(cap))
2427 return PTR_ERR(cap);
2428
2429 port->partner_source_caps = cap;
2430
2431 return 0;
2432 }
2433
2434 static int tcpm_register_sink_caps(struct tcpm_port *port)
2435 {
2436 struct usb_power_delivery_desc desc = { port->negotiated_rev };
2437 struct usb_power_delivery_capabilities_desc caps = { };
2438 struct usb_power_delivery_capabilities *cap;
2439
2440 if (!port->partner_pd)
2441 port->partner_pd = usb_power_delivery_register(NULL, &desc);
2442 if (IS_ERR(port->partner_pd))
2443 return PTR_ERR(port->partner_pd);
2444
2445 memcpy(caps.pdo, port->sink_caps, sizeof(u32) * port->nr_sink_caps);
2446 caps.role = TYPEC_SINK;
2447
2448 cap = usb_power_delivery_register_capabilities(port->partner_pd, &caps);
2449 if (IS_ERR(cap))
2450 return PTR_ERR(cap);
2451
2452 port->partner_sink_caps = cap;
2453
2454 return 0;
2455 }
2456
2457 static void tcpm_pd_data_request(struct tcpm_port *port,
2458 const struct pd_message *msg)
2459 {
2460 enum pd_data_msg_type type = pd_header_type_le(msg->header);
2461 unsigned int cnt = pd_header_cnt_le(msg->header);
2462 unsigned int rev = pd_header_rev_le(msg->header);
2463 unsigned int i;
2464 enum frs_typec_current partner_frs_current;
2465 bool frs_enable;
2466 int ret;
2467
2468 if (tcpm_vdm_ams(port) && type != PD_DATA_VENDOR_DEF) {
2469 port->vdm_state = VDM_STATE_ERR_BUSY;
2470 tcpm_ams_finish(port);
2471 mod_vdm_delayed_work(port, 0);
2472 }
2473
2474 switch (type) {
2475 case PD_DATA_SOURCE_CAP:
2476 for (i = 0; i < cnt; i++)
2477 port->source_caps[i] = le32_to_cpu(msg->payload[i]);
2478
2479 port->nr_source_caps = cnt;
2480
2481 tcpm_log_source_caps(port);
2482
2483 tcpm_validate_caps(port, port->source_caps,
2484 port->nr_source_caps);
2485
2486 tcpm_register_source_caps(port);
2487
2488 /*
2489 * Adjust revision in subsequent message headers, as required,
2490 * to comply with 6.2.1.1.5 of the USB PD 3.0 spec. We don't
2491 * support Rev 1.0 so just do nothing in that scenario.
2492 */
2493 if (rev == PD_REV10) {
2494 if (port->ams == GET_SOURCE_CAPABILITIES)
2495 tcpm_ams_finish(port);
2496 break;
2497 }
2498
2499 if (rev < PD_MAX_REV)
2500 port->negotiated_rev = rev;
2501
2502 if (port->pwr_role == TYPEC_SOURCE) {
2503 if (port->ams == GET_SOURCE_CAPABILITIES)
2504 tcpm_pd_handle_state(port, SRC_READY, NONE_AMS, 0);
2505 /* Unexpected Source Capabilities */
2506 else
2507 tcpm_pd_handle_msg(port,
2508 port->negotiated_rev < PD_REV30 ?
2509 PD_MSG_CTRL_REJECT :
2510 PD_MSG_CTRL_NOT_SUPP,
2511 NONE_AMS);
2512 } else if (port->state == SNK_WAIT_CAPABILITIES) {
2513 /*
2514 * This message may be received even if VBUS is not
2515 * present. This is quite unexpected; see USB PD
2516 * specification, sections 8.3.3.6.3.1 and 8.3.3.6.3.2.
2517 * However, at the same time, we must be ready to
2518 * receive this message and respond to it 15ms after
2519 * receiving PS_RDY during power swap operations, no matter
2520 * if VBUS is available or not (USB PD specification,
2521 * section 6.5.9.2).
2522 * So we need to accept the message either way,
2523 * but be prepared to keep waiting for VBUS after it was
2524 * handled.
2525 */
2526 port->ams = POWER_NEGOTIATION;
2527 port->in_ams = true;
2528 tcpm_set_state(port, SNK_NEGOTIATE_CAPABILITIES, 0);
2529 } else {
2530 if (port->ams == GET_SOURCE_CAPABILITIES)
2531 tcpm_ams_finish(port);
2532 tcpm_pd_handle_state(port, SNK_NEGOTIATE_CAPABILITIES,
2533 POWER_NEGOTIATION, 0);
2534 }
2535 break;
2536 case PD_DATA_REQUEST:
2537 /*
2538 * Adjust revision in subsequent message headers, as required,
2539 * to comply with 6.2.1.1.5 of the USB PD 3.0 spec. We don't
2540 * support Rev 1.0 so just reject in that scenario.
2541 */
2542 if (rev == PD_REV10) {
2543 tcpm_pd_handle_msg(port,
2544 port->negotiated_rev < PD_REV30 ?
2545 PD_MSG_CTRL_REJECT :
2546 PD_MSG_CTRL_NOT_SUPP,
2547 NONE_AMS);
2548 break;
2549 }
2550
2551 if (rev < PD_MAX_REV)
2552 port->negotiated_rev = rev;
2553
2554 if (port->pwr_role != TYPEC_SOURCE || cnt != 1) {
2555 tcpm_pd_handle_msg(port,
2556 port->negotiated_rev < PD_REV30 ?
2557 PD_MSG_CTRL_REJECT :
2558 PD_MSG_CTRL_NOT_SUPP,
2559 NONE_AMS);
2560 break;
2561 }
2562
2563 port->sink_request = le32_to_cpu(msg->payload[0]);
2564
2565 if (port->vdm_sm_running && port->explicit_contract) {
2566 tcpm_pd_handle_msg(port, PD_MSG_CTRL_WAIT, port->ams);
2567 break;
2568 }
2569
2570 if (port->state == SRC_SEND_CAPABILITIES)
2571 tcpm_set_state(port, SRC_NEGOTIATE_CAPABILITIES, 0);
2572 else
2573 tcpm_pd_handle_state(port, SRC_NEGOTIATE_CAPABILITIES,
2574 POWER_NEGOTIATION, 0);
2575 break;
2576 case PD_DATA_SINK_CAP:
2577 /* We don't do anything with this at the moment... */
2578 for (i = 0; i < cnt; i++)
2579 port->sink_caps[i] = le32_to_cpu(msg->payload[i]);
2580
2581 partner_frs_current = (port->sink_caps[0] & PDO_FIXED_FRS_CURR_MASK) >>
2582 PDO_FIXED_FRS_CURR_SHIFT;
2583 frs_enable = partner_frs_current && (partner_frs_current <=
2584 port->new_source_frs_current);
2585 tcpm_log(port,
2586 "Port partner FRS capable partner_frs_current:%u port_frs_current:%u enable:%c",
2587 partner_frs_current, port->new_source_frs_current, frs_enable ? 'y' : 'n');
2588 if (frs_enable) {
2589 ret = port->tcpc->enable_frs(port->tcpc, true);
2590 tcpm_log(port, "Enable FRS %s, ret:%d\n", ret ? "fail" : "success", ret);
2591 }
2592
2593 port->nr_sink_caps = cnt;
2594 port->sink_cap_done = true;
2595 tcpm_register_sink_caps(port);
2596
2597 if (port->ams == GET_SINK_CAPABILITIES)
2598 tcpm_set_state(port, ready_state(port), 0);
2599 /* Unexpected Sink Capabilities */
2600 else
2601 tcpm_pd_handle_msg(port,
2602 port->negotiated_rev < PD_REV30 ?
2603 PD_MSG_CTRL_REJECT :
2604 PD_MSG_CTRL_NOT_SUPP,
2605 NONE_AMS);
2606 break;
2607 case PD_DATA_VENDOR_DEF:
2608 tcpm_handle_vdm_request(port, msg->payload, cnt);
2609 break;
2610 case PD_DATA_BIST:
2611 port->bist_request = le32_to_cpu(msg->payload[0]);
2612 tcpm_pd_handle_state(port, BIST_RX, BIST, 0);
2613 break;
2614 case PD_DATA_ALERT:
2615 if (port->state != SRC_READY && port->state != SNK_READY)
2616 tcpm_pd_handle_state(port, port->pwr_role == TYPEC_SOURCE ?
2617 SRC_SOFT_RESET_WAIT_SNK_TX : SNK_SOFT_RESET,
2618 NONE_AMS, 0);
2619 else
2620 tcpm_handle_alert(port, msg->payload, cnt);
2621 break;
2622 case PD_DATA_BATT_STATUS:
2623 case PD_DATA_GET_COUNTRY_INFO:
2624 /* Currently unsupported */
2625 tcpm_pd_handle_msg(port, port->negotiated_rev < PD_REV30 ?
2626 PD_MSG_CTRL_REJECT :
2627 PD_MSG_CTRL_NOT_SUPP,
2628 NONE_AMS);
2629 break;
2630 default:
2631 tcpm_pd_handle_msg(port, port->negotiated_rev < PD_REV30 ?
2632 PD_MSG_CTRL_REJECT :
2633 PD_MSG_CTRL_NOT_SUPP,
2634 NONE_AMS);
2635 tcpm_log(port, "Unrecognized data message type %#x", type);
2636 break;
2637 }
2638 }
2639
2640 static void tcpm_pps_complete(struct tcpm_port *port, int result)
2641 {
2642 if (port->pps_pending) {
2643 port->pps_status = result;
2644 port->pps_pending = false;
2645 complete(&port->pps_complete);
2646 }
2647 }
2648
2649 static void tcpm_pd_ctrl_request(struct tcpm_port *port,
2650 const struct pd_message *msg)
2651 {
2652 enum pd_ctrl_msg_type type = pd_header_type_le(msg->header);
2653 enum tcpm_state next_state;
2654
2655 /*
2656 * Stop VDM state machine if interrupted by other Messages while NOT_SUPP is allowed in
2657 * VDM AMS if waiting for VDM responses and will be handled later.
2658 */
2659 if (tcpm_vdm_ams(port) && type != PD_CTRL_NOT_SUPP && type != PD_CTRL_GOOD_CRC) {
2660 port->vdm_state = VDM_STATE_ERR_BUSY;
2661 tcpm_ams_finish(port);
2662 mod_vdm_delayed_work(port, 0);
2663 }
2664
2665 switch (type) {
2666 case PD_CTRL_GOOD_CRC:
2667 case PD_CTRL_PING:
2668 break;
2669 case PD_CTRL_GET_SOURCE_CAP:
2670 tcpm_pd_handle_msg(port, PD_MSG_DATA_SOURCE_CAP, GET_SOURCE_CAPABILITIES);
2671 break;
2672 case PD_CTRL_GET_SINK_CAP:
2673 tcpm_pd_handle_msg(port, PD_MSG_DATA_SINK_CAP, GET_SINK_CAPABILITIES);
2674 break;
2675 case PD_CTRL_GOTO_MIN:
2676 break;
2677 case PD_CTRL_PS_RDY:
2678 switch (port->state) {
2679 case SNK_TRANSITION_SINK:
2680 if (port->vbus_present) {
2681 tcpm_set_current_limit(port,
2682 port->req_current_limit,
2683 port->req_supply_voltage);
2684 port->explicit_contract = true;
2685 tcpm_set_auto_vbus_discharge_threshold(port,
2686 TYPEC_PWR_MODE_PD,
2687 port->pps_data.active,
2688 port->supply_voltage);
2689 tcpm_set_state(port, SNK_READY, 0);
2690 } else {
2691 /*
2692 * Seen after power swap. Keep waiting for VBUS
2693 * in a transitional state.
2694 */
2695 tcpm_set_state(port,
2696 SNK_TRANSITION_SINK_VBUS, 0);
2697 }
2698 break;
2699 case PR_SWAP_SRC_SNK_SOURCE_OFF_CC_DEBOUNCED:
2700 tcpm_set_state(port, PR_SWAP_SRC_SNK_SINK_ON, 0);
2701 break;
2702 case PR_SWAP_SNK_SRC_SINK_OFF:
2703 tcpm_set_state(port, PR_SWAP_SNK_SRC_SOURCE_ON, 0);
2704 break;
2705 case VCONN_SWAP_WAIT_FOR_VCONN:
2706 tcpm_set_state(port, VCONN_SWAP_TURN_OFF_VCONN, 0);
2707 break;
2708 case FR_SWAP_SNK_SRC_TRANSITION_TO_OFF:
2709 tcpm_set_state(port, FR_SWAP_SNK_SRC_NEW_SINK_READY, 0);
2710 break;
2711 default:
2712 tcpm_pd_handle_state(port,
2713 port->pwr_role == TYPEC_SOURCE ?
2714 SRC_SOFT_RESET_WAIT_SNK_TX :
2715 SNK_SOFT_RESET,
2716 NONE_AMS, 0);
2717 break;
2718 }
2719 break;
2720 case PD_CTRL_REJECT:
2721 case PD_CTRL_WAIT:
2722 case PD_CTRL_NOT_SUPP:
2723 switch (port->state) {
2724 case SNK_NEGOTIATE_CAPABILITIES:
2725 /* USB PD specification, Figure 8-43 */
2726 if (port->explicit_contract)
2727 next_state = SNK_READY;
2728 else
2729 next_state = SNK_WAIT_CAPABILITIES;
2730
2731 /* Threshold was relaxed before sending Request. Restore it back. */
2732 tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_PD,
2733 port->pps_data.active,
2734 port->supply_voltage);
2735 tcpm_set_state(port, next_state, 0);
2736 break;
2737 case SNK_NEGOTIATE_PPS_CAPABILITIES:
2738 /* Revert data back from any requested PPS updates */
2739 port->pps_data.req_out_volt = port->supply_voltage;
2740 port->pps_data.req_op_curr = port->current_limit;
2741 port->pps_status = (type == PD_CTRL_WAIT ?
2742 -EAGAIN : -EOPNOTSUPP);
2743
2744 /* Threshold was relaxed before sending Request. Restore it back. */
2745 tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_PD,
2746 port->pps_data.active,
2747 port->supply_voltage);
2748
2749 tcpm_set_state(port, SNK_READY, 0);
2750 break;
2751 case DR_SWAP_SEND:
2752 port->swap_status = (type == PD_CTRL_WAIT ?
2753 -EAGAIN : -EOPNOTSUPP);
2754 tcpm_set_state(port, DR_SWAP_CANCEL, 0);
2755 break;
2756 case PR_SWAP_SEND:
2757 port->swap_status = (type == PD_CTRL_WAIT ?
2758 -EAGAIN : -EOPNOTSUPP);
2759 tcpm_set_state(port, PR_SWAP_CANCEL, 0);
2760 break;
2761 case VCONN_SWAP_SEND:
2762 port->swap_status = (type == PD_CTRL_WAIT ?
2763 -EAGAIN : -EOPNOTSUPP);
2764 tcpm_set_state(port, VCONN_SWAP_CANCEL, 0);
2765 break;
2766 case FR_SWAP_SEND:
2767 tcpm_set_state(port, FR_SWAP_CANCEL, 0);
2768 break;
2769 case GET_SINK_CAP:
2770 port->sink_cap_done = true;
2771 tcpm_set_state(port, ready_state(port), 0);
2772 break;
2773 /*
2774 * Some port partners do not support GET_STATUS, avoid soft reset the link to
2775 * prevent redundant power re-negotiation
2776 */
2777 case GET_STATUS_SEND:
2778 tcpm_set_state(port, ready_state(port), 0);
2779 break;
2780 case SRC_READY:
2781 case SNK_READY:
2782 if (port->vdm_state > VDM_STATE_READY) {
2783 port->vdm_state = VDM_STATE_DONE;
2784 if (tcpm_vdm_ams(port))
2785 tcpm_ams_finish(port);
2786 mod_vdm_delayed_work(port, 0);
2787 break;
2788 }
2789 fallthrough;
2790 default:
2791 tcpm_pd_handle_state(port,
2792 port->pwr_role == TYPEC_SOURCE ?
2793 SRC_SOFT_RESET_WAIT_SNK_TX :
2794 SNK_SOFT_RESET,
2795 NONE_AMS, 0);
2796 break;
2797 }
2798 break;
2799 case PD_CTRL_ACCEPT:
2800 switch (port->state) {
2801 case SNK_NEGOTIATE_CAPABILITIES:
2802 port->pps_data.active = false;
2803 tcpm_set_state(port, SNK_TRANSITION_SINK, 0);
2804 break;
2805 case SNK_NEGOTIATE_PPS_CAPABILITIES:
2806 port->pps_data.active = true;
2807 port->pps_data.min_volt = port->pps_data.req_min_volt;
2808 port->pps_data.max_volt = port->pps_data.req_max_volt;
2809 port->pps_data.max_curr = port->pps_data.req_max_curr;
2810 port->req_supply_voltage = port->pps_data.req_out_volt;
2811 port->req_current_limit = port->pps_data.req_op_curr;
2812 power_supply_changed(port->psy);
2813 tcpm_set_state(port, SNK_TRANSITION_SINK, 0);
2814 break;
2815 case SOFT_RESET_SEND:
2816 if (port->ams == SOFT_RESET_AMS)
2817 tcpm_ams_finish(port);
2818 if (port->pwr_role == TYPEC_SOURCE) {
2819 port->upcoming_state = SRC_SEND_CAPABILITIES;
2820 tcpm_ams_start(port, POWER_NEGOTIATION);
2821 } else {
2822 tcpm_set_state(port, SNK_WAIT_CAPABILITIES, 0);
2823 }
2824 break;
2825 case DR_SWAP_SEND:
2826 tcpm_set_state(port, DR_SWAP_CHANGE_DR, 0);
2827 break;
2828 case PR_SWAP_SEND:
2829 tcpm_set_state(port, PR_SWAP_START, 0);
2830 break;
2831 case VCONN_SWAP_SEND:
2832 tcpm_set_state(port, VCONN_SWAP_START, 0);
2833 break;
2834 case FR_SWAP_SEND:
2835 tcpm_set_state(port, FR_SWAP_SNK_SRC_TRANSITION_TO_OFF, 0);
2836 break;
2837 default:
2838 tcpm_pd_handle_state(port,
2839 port->pwr_role == TYPEC_SOURCE ?
2840 SRC_SOFT_RESET_WAIT_SNK_TX :
2841 SNK_SOFT_RESET,
2842 NONE_AMS, 0);
2843 break;
2844 }
2845 break;
2846 case PD_CTRL_SOFT_RESET:
2847 port->ams = SOFT_RESET_AMS;
2848 tcpm_set_state(port, SOFT_RESET, 0);
2849 break;
2850 case PD_CTRL_DR_SWAP:
2851 /*
2852 * XXX
2853 * 6.3.9: If an alternate mode is active, a request to swap
2854 * alternate modes shall trigger a port reset.
2855 */
2856 if (port->typec_caps.data != TYPEC_PORT_DRD) {
2857 tcpm_pd_handle_msg(port,
2858 port->negotiated_rev < PD_REV30 ?
2859 PD_MSG_CTRL_REJECT :
2860 PD_MSG_CTRL_NOT_SUPP,
2861 NONE_AMS);
2862 } else {
2863 if (port->send_discover && port->negotiated_rev < PD_REV30) {
2864 tcpm_queue_message(port, PD_MSG_CTRL_WAIT);
2865 break;
2866 }
2867
2868 tcpm_pd_handle_state(port, DR_SWAP_ACCEPT, DATA_ROLE_SWAP, 0);
2869 }
2870 break;
2871 case PD_CTRL_PR_SWAP:
2872 if (port->port_type != TYPEC_PORT_DRP) {
2873 tcpm_pd_handle_msg(port,
2874 port->negotiated_rev < PD_REV30 ?
2875 PD_MSG_CTRL_REJECT :
2876 PD_MSG_CTRL_NOT_SUPP,
2877 NONE_AMS);
2878 } else {
2879 if (port->send_discover && port->negotiated_rev < PD_REV30) {
2880 tcpm_queue_message(port, PD_MSG_CTRL_WAIT);
2881 break;
2882 }
2883
2884 tcpm_pd_handle_state(port, PR_SWAP_ACCEPT, POWER_ROLE_SWAP, 0);
2885 }
2886 break;
2887 case PD_CTRL_VCONN_SWAP:
2888 if (port->send_discover && port->negotiated_rev < PD_REV30) {
2889 tcpm_queue_message(port, PD_MSG_CTRL_WAIT);
2890 break;
2891 }
2892
2893 tcpm_pd_handle_state(port, VCONN_SWAP_ACCEPT, VCONN_SWAP, 0);
2894 break;
2895 case PD_CTRL_GET_SOURCE_CAP_EXT:
2896 case PD_CTRL_GET_STATUS:
2897 case PD_CTRL_FR_SWAP:
2898 case PD_CTRL_GET_PPS_STATUS:
2899 case PD_CTRL_GET_COUNTRY_CODES:
2900 /* Currently not supported */
2901 tcpm_pd_handle_msg(port,
2902 port->negotiated_rev < PD_REV30 ?
2903 PD_MSG_CTRL_REJECT :
2904 PD_MSG_CTRL_NOT_SUPP,
2905 NONE_AMS);
2906 break;
2907 default:
2908 tcpm_pd_handle_msg(port,
2909 port->negotiated_rev < PD_REV30 ?
2910 PD_MSG_CTRL_REJECT :
2911 PD_MSG_CTRL_NOT_SUPP,
2912 NONE_AMS);
2913 tcpm_log(port, "Unrecognized ctrl message type %#x", type);
2914 break;
2915 }
2916 }
2917
2918 static void tcpm_pd_ext_msg_request(struct tcpm_port *port,
2919 const struct pd_message *msg)
2920 {
2921 enum pd_ext_msg_type type = pd_header_type_le(msg->header);
2922 unsigned int data_size = pd_ext_header_data_size_le(msg->ext_msg.header);
2923
2924 /* stopping VDM state machine if interrupted by other Messages */
2925 if (tcpm_vdm_ams(port)) {
2926 port->vdm_state = VDM_STATE_ERR_BUSY;
2927 tcpm_ams_finish(port);
2928 mod_vdm_delayed_work(port, 0);
2929 }
2930
2931 if (!(le16_to_cpu(msg->ext_msg.header) & PD_EXT_HDR_CHUNKED)) {
2932 tcpm_pd_handle_msg(port, PD_MSG_CTRL_NOT_SUPP, NONE_AMS);
2933 tcpm_log(port, "Unchunked extended messages unsupported");
2934 return;
2935 }
2936
2937 if (data_size > PD_EXT_MAX_CHUNK_DATA) {
2938 tcpm_pd_handle_state(port, CHUNK_NOT_SUPP, NONE_AMS, PD_T_CHUNK_NOT_SUPP);
2939 tcpm_log(port, "Chunk handling not yet supported");
2940 return;
2941 }
2942
2943 switch (type) {
2944 case PD_EXT_STATUS:
2945 case PD_EXT_PPS_STATUS:
2946 if (port->ams == GETTING_SOURCE_SINK_STATUS) {
2947 tcpm_ams_finish(port);
2948 tcpm_set_state(port, ready_state(port), 0);
2949 } else {
2950 /* unexpected Status or PPS_Status Message */
2951 tcpm_pd_handle_state(port, port->pwr_role == TYPEC_SOURCE ?
2952 SRC_SOFT_RESET_WAIT_SNK_TX : SNK_SOFT_RESET,
2953 NONE_AMS, 0);
2954 }
2955 break;
2956 case PD_EXT_SOURCE_CAP_EXT:
2957 case PD_EXT_GET_BATT_CAP:
2958 case PD_EXT_GET_BATT_STATUS:
2959 case PD_EXT_BATT_CAP:
2960 case PD_EXT_GET_MANUFACTURER_INFO:
2961 case PD_EXT_MANUFACTURER_INFO:
2962 case PD_EXT_SECURITY_REQUEST:
2963 case PD_EXT_SECURITY_RESPONSE:
2964 case PD_EXT_FW_UPDATE_REQUEST:
2965 case PD_EXT_FW_UPDATE_RESPONSE:
2966 case PD_EXT_COUNTRY_INFO:
2967 case PD_EXT_COUNTRY_CODES:
2968 tcpm_pd_handle_msg(port, PD_MSG_CTRL_NOT_SUPP, NONE_AMS);
2969 break;
2970 default:
2971 tcpm_pd_handle_msg(port, PD_MSG_CTRL_NOT_SUPP, NONE_AMS);
2972 tcpm_log(port, "Unrecognized extended message type %#x", type);
2973 break;
2974 }
2975 }
2976
2977 static void tcpm_pd_rx_handler(struct kthread_work *work)
2978 {
2979 struct pd_rx_event *event = container_of(work,
2980 struct pd_rx_event, work);
2981 const struct pd_message *msg = &event->msg;
2982 unsigned int cnt = pd_header_cnt_le(msg->header);
2983 struct tcpm_port *port = event->port;
2984
2985 mutex_lock(&port->lock);
2986
2987 tcpm_log(port, "PD RX, header: %#x [%d]", le16_to_cpu(msg->header),
2988 port->attached);
2989
2990 if (port->attached) {
2991 enum pd_ctrl_msg_type type = pd_header_type_le(msg->header);
2992 unsigned int msgid = pd_header_msgid_le(msg->header);
2993
2994 /*
2995 * USB PD standard, 6.6.1.2:
2996 * "... if MessageID value in a received Message is the
2997 * same as the stored value, the receiver shall return a
2998 * GoodCRC Message with that MessageID value and drop
2999 * the Message (this is a retry of an already received
3000 * Message). Note: this shall not apply to the Soft_Reset
3001 * Message which always has a MessageID value of zero."
3002 */
3003 if (msgid == port->rx_msgid && type != PD_CTRL_SOFT_RESET)
3004 goto done;
3005 port->rx_msgid = msgid;
3006
3007 /*
3008 * If both ends believe to be DFP/host, we have a data role
3009 * mismatch.
3010 */
3011 if (!!(le16_to_cpu(msg->header) & PD_HEADER_DATA_ROLE) ==
3012 (port->data_role == TYPEC_HOST)) {
3013 tcpm_log(port,
3014 "Data role mismatch, initiating error recovery");
3015 tcpm_set_state(port, ERROR_RECOVERY, 0);
3016 } else {
3017 if (le16_to_cpu(msg->header) & PD_HEADER_EXT_HDR)
3018 tcpm_pd_ext_msg_request(port, msg);
3019 else if (cnt)
3020 tcpm_pd_data_request(port, msg);
3021 else
3022 tcpm_pd_ctrl_request(port, msg);
3023 }
3024 }
3025
3026 done:
3027 mutex_unlock(&port->lock);
3028 kfree(event);
3029 }
3030
3031 void tcpm_pd_receive(struct tcpm_port *port, const struct pd_message *msg)
3032 {
3033 struct pd_rx_event *event;
3034
3035 event = kzalloc(sizeof(*event), GFP_ATOMIC);
3036 if (!event)
3037 return;
3038
3039 kthread_init_work(&event->work, tcpm_pd_rx_handler);
3040 event->port = port;
3041 memcpy(&event->msg, msg, sizeof(*msg));
3042 kthread_queue_work(port->wq, &event->work);
3043 }
3044 EXPORT_SYMBOL_GPL(tcpm_pd_receive);
3045
3046 static int tcpm_pd_send_control(struct tcpm_port *port,
3047 enum pd_ctrl_msg_type type)
3048 {
3049 struct pd_message msg;
3050
3051 memset(&msg, 0, sizeof(msg));
3052 msg.header = PD_HEADER_LE(type, port->pwr_role,
3053 port->data_role,
3054 port->negotiated_rev,
3055 port->message_id, 0);
3056
3057 return tcpm_pd_transmit(port, TCPC_TX_SOP, &msg);
3058 }
3059
3060 /*
3061 * Send queued message without affecting state.
3062 * Return true if state machine should go back to sleep,
3063 * false otherwise.
3064 */
3065 static bool tcpm_send_queued_message(struct tcpm_port *port)
3066 {
3067 enum pd_msg_request queued_message;
3068 int ret;
3069
3070 do {
3071 queued_message = port->queued_message;
3072 port->queued_message = PD_MSG_NONE;
3073
3074 switch (queued_message) {
3075 case PD_MSG_CTRL_WAIT:
3076 tcpm_pd_send_control(port, PD_CTRL_WAIT);
3077 break;
3078 case PD_MSG_CTRL_REJECT:
3079 tcpm_pd_send_control(port, PD_CTRL_REJECT);
3080 break;
3081 case PD_MSG_CTRL_NOT_SUPP:
3082 tcpm_pd_send_control(port, PD_CTRL_NOT_SUPP);
3083 break;
3084 case PD_MSG_DATA_SINK_CAP:
3085 ret = tcpm_pd_send_sink_caps(port);
3086 if (ret < 0) {
3087 tcpm_log(port, "Unable to send snk caps, ret=%d", ret);
3088 tcpm_set_state(port, SNK_SOFT_RESET, 0);
3089 }
3090 tcpm_ams_finish(port);
3091 break;
3092 case PD_MSG_DATA_SOURCE_CAP:
3093 ret = tcpm_pd_send_source_caps(port);
3094 if (ret < 0) {
3095 tcpm_log(port,
3096 "Unable to send src caps, ret=%d",
3097 ret);
3098 tcpm_set_state(port, SOFT_RESET_SEND, 0);
3099 } else if (port->pwr_role == TYPEC_SOURCE) {
3100 tcpm_ams_finish(port);
3101 tcpm_set_state(port, HARD_RESET_SEND,
3102 PD_T_SENDER_RESPONSE);
3103 } else {
3104 tcpm_ams_finish(port);
3105 }
3106 break;
3107 default:
3108 break;
3109 }
3110 } while (port->queued_message != PD_MSG_NONE);
3111
3112 if (port->delayed_state != INVALID_STATE) {
3113 if (ktime_after(port->delayed_runtime, ktime_get())) {
3114 mod_tcpm_delayed_work(port, ktime_to_ms(ktime_sub(port->delayed_runtime,
3115 ktime_get())));
3116 return true;
3117 }
3118 port->delayed_state = INVALID_STATE;
3119 }
3120 return false;
3121 }
3122
3123 static int tcpm_pd_check_request(struct tcpm_port *port)
3124 {
3125 u32 pdo, rdo = port->sink_request;
3126 unsigned int max, op, pdo_max, index;
3127 enum pd_pdo_type type;
3128
3129 index = rdo_index(rdo);
3130 if (!index || index > port->nr_src_pdo)
3131 return -EINVAL;
3132
3133 pdo = port->src_pdo[index - 1];
3134 type = pdo_type(pdo);
3135 switch (type) {
3136 case PDO_TYPE_FIXED:
3137 case PDO_TYPE_VAR:
3138 max = rdo_max_current(rdo);
3139 op = rdo_op_current(rdo);
3140 pdo_max = pdo_max_current(pdo);
3141
3142 if (op > pdo_max)
3143 return -EINVAL;
3144 if (max > pdo_max && !(rdo & RDO_CAP_MISMATCH))
3145 return -EINVAL;
3146
3147 if (type == PDO_TYPE_FIXED)
3148 tcpm_log(port,
3149 "Requested %u mV, %u mA for %u / %u mA",
3150 pdo_fixed_voltage(pdo), pdo_max, op, max);
3151 else
3152 tcpm_log(port,
3153 "Requested %u -> %u mV, %u mA for %u / %u mA",
3154 pdo_min_voltage(pdo), pdo_max_voltage(pdo),
3155 pdo_max, op, max);
3156 break;
3157 case PDO_TYPE_BATT:
3158 max = rdo_max_power(rdo);
3159 op = rdo_op_power(rdo);
3160 pdo_max = pdo_max_power(pdo);
3161
3162 if (op > pdo_max)
3163 return -EINVAL;
3164 if (max > pdo_max && !(rdo & RDO_CAP_MISMATCH))
3165 return -EINVAL;
3166 tcpm_log(port,
3167 "Requested %u -> %u mV, %u mW for %u / %u mW",
3168 pdo_min_voltage(pdo), pdo_max_voltage(pdo),
3169 pdo_max, op, max);
3170 break;
3171 default:
3172 return -EINVAL;
3173 }
3174
3175 port->op_vsafe5v = index == 1;
3176
3177 return 0;
3178 }
3179
3180 #define min_power(x, y) min(pdo_max_power(x), pdo_max_power(y))
3181 #define min_current(x, y) min(pdo_max_current(x), pdo_max_current(y))
3182
3183 static int tcpm_pd_select_pdo(struct tcpm_port *port, int *sink_pdo,
3184 int *src_pdo)
3185 {
3186 unsigned int i, j, max_src_mv = 0, min_src_mv = 0, max_mw = 0,
3187 max_mv = 0, src_mw = 0, src_ma = 0, max_snk_mv = 0,
3188 min_snk_mv = 0;
3189 int ret = -EINVAL;
3190
3191 port->pps_data.supported = false;
3192 port->usb_type = POWER_SUPPLY_USB_TYPE_PD;
3193 power_supply_changed(port->psy);
3194
3195 /*
3196 * Select the source PDO providing the most power which has a
3197 * matchig sink cap.
3198 */
3199 for (i = 0; i < port->nr_source_caps; i++) {
3200 u32 pdo = port->source_caps[i];
3201 enum pd_pdo_type type = pdo_type(pdo);
3202
3203 switch (type) {
3204 case PDO_TYPE_FIXED:
3205 max_src_mv = pdo_fixed_voltage(pdo);
3206 min_src_mv = max_src_mv;
3207 break;
3208 case PDO_TYPE_BATT:
3209 case PDO_TYPE_VAR:
3210 max_src_mv = pdo_max_voltage(pdo);
3211 min_src_mv = pdo_min_voltage(pdo);
3212 break;
3213 case PDO_TYPE_APDO:
3214 if (pdo_apdo_type(pdo) == APDO_TYPE_PPS) {
3215 port->pps_data.supported = true;
3216 port->usb_type =
3217 POWER_SUPPLY_USB_TYPE_PD_PPS;
3218 power_supply_changed(port->psy);
3219 }
3220 continue;
3221 default:
3222 tcpm_log(port, "Invalid source PDO type, ignoring");
3223 continue;
3224 }
3225
3226 switch (type) {
3227 case PDO_TYPE_FIXED:
3228 case PDO_TYPE_VAR:
3229 src_ma = pdo_max_current(pdo);
3230 src_mw = src_ma * min_src_mv / 1000;
3231 break;
3232 case PDO_TYPE_BATT:
3233 src_mw = pdo_max_power(pdo);
3234 break;
3235 case PDO_TYPE_APDO:
3236 continue;
3237 default:
3238 tcpm_log(port, "Invalid source PDO type, ignoring");
3239 continue;
3240 }
3241
3242 for (j = 0; j < port->nr_snk_pdo; j++) {
3243 pdo = port->snk_pdo[j];
3244
3245 switch (pdo_type(pdo)) {
3246 case PDO_TYPE_FIXED:
3247 max_snk_mv = pdo_fixed_voltage(pdo);
3248 min_snk_mv = max_snk_mv;
3249 break;
3250 case PDO_TYPE_BATT:
3251 case PDO_TYPE_VAR:
3252 max_snk_mv = pdo_max_voltage(pdo);
3253 min_snk_mv = pdo_min_voltage(pdo);
3254 break;
3255 case PDO_TYPE_APDO:
3256 continue;
3257 default:
3258 tcpm_log(port, "Invalid sink PDO type, ignoring");
3259 continue;
3260 }
3261
3262 if (max_src_mv <= max_snk_mv &&
3263 min_src_mv >= min_snk_mv) {
3264 /* Prefer higher voltages if available */
3265 if ((src_mw == max_mw && min_src_mv > max_mv) ||
3266 src_mw > max_mw) {
3267 *src_pdo = i;
3268 *sink_pdo = j;
3269 max_mw = src_mw;
3270 max_mv = min_src_mv;
3271 ret = 0;
3272 }
3273 }
3274 }
3275 }
3276
3277 return ret;
3278 }
3279
3280 static unsigned int tcpm_pd_select_pps_apdo(struct tcpm_port *port)
3281 {
3282 unsigned int i, src_ma, max_temp_mw = 0, max_op_ma, op_mw;
3283 unsigned int src_pdo = 0;
3284 u32 pdo, src;
3285
3286 for (i = 1; i < port->nr_source_caps; ++i) {
3287 pdo = port->source_caps[i];
3288
3289 switch (pdo_type(pdo)) {
3290 case PDO_TYPE_APDO:
3291 if (pdo_apdo_type(pdo) != APDO_TYPE_PPS) {
3292 tcpm_log(port, "Not PPS APDO (source), ignoring");
3293 continue;
3294 }
3295
3296 if (port->pps_data.req_out_volt > pdo_pps_apdo_max_voltage(pdo) ||
3297 port->pps_data.req_out_volt < pdo_pps_apdo_min_voltage(pdo))
3298 continue;
3299
3300 src_ma = pdo_pps_apdo_max_current(pdo);
3301 max_op_ma = min(src_ma, port->pps_data.req_op_curr);
3302 op_mw = max_op_ma * port->pps_data.req_out_volt / 1000;
3303 if (op_mw > max_temp_mw) {
3304 src_pdo = i;
3305 max_temp_mw = op_mw;
3306 }
3307 break;
3308 default:
3309 tcpm_log(port, "Not APDO type (source), ignoring");
3310 continue;
3311 }
3312 }
3313
3314 if (src_pdo) {
3315 src = port->source_caps[src_pdo];
3316
3317 port->pps_data.req_min_volt = pdo_pps_apdo_min_voltage(src);
3318 port->pps_data.req_max_volt = pdo_pps_apdo_max_voltage(src);
3319 port->pps_data.req_max_curr = pdo_pps_apdo_max_current(src);
3320 port->pps_data.req_op_curr = min(port->pps_data.req_max_curr,
3321 port->pps_data.req_op_curr);
3322 }
3323
3324 return src_pdo;
3325 }
3326
3327 static int tcpm_pd_build_request(struct tcpm_port *port, u32 *rdo)
3328 {
3329 unsigned int mv, ma, mw, flags;
3330 unsigned int max_ma, max_mw;
3331 enum pd_pdo_type type;
3332 u32 pdo, matching_snk_pdo;
3333 int src_pdo_index = 0;
3334 int snk_pdo_index = 0;
3335 int ret;
3336
3337 ret = tcpm_pd_select_pdo(port, &snk_pdo_index, &src_pdo_index);
3338 if (ret < 0)
3339 return ret;
3340
3341 pdo = port->source_caps[src_pdo_index];
3342 matching_snk_pdo = port->snk_pdo[snk_pdo_index];
3343 type = pdo_type(pdo);
3344
3345 switch (type) {
3346 case PDO_TYPE_FIXED:
3347 mv = pdo_fixed_voltage(pdo);
3348 break;
3349 case PDO_TYPE_BATT:
3350 case PDO_TYPE_VAR:
3351 mv = pdo_min_voltage(pdo);
3352 break;
3353 default:
3354 tcpm_log(port, "Invalid PDO selected!");
3355 return -EINVAL;
3356 }
3357
3358 /* Select maximum available current within the sink pdo's limit */
3359 if (type == PDO_TYPE_BATT) {
3360 mw = min_power(pdo, matching_snk_pdo);
3361 ma = 1000 * mw / mv;
3362 } else {
3363 ma = min_current(pdo, matching_snk_pdo);
3364 mw = ma * mv / 1000;
3365 }
3366
3367 flags = RDO_USB_COMM | RDO_NO_SUSPEND;
3368
3369 /* Set mismatch bit if offered power is less than operating power */
3370 max_ma = ma;
3371 max_mw = mw;
3372 if (mw < port->operating_snk_mw) {
3373 flags |= RDO_CAP_MISMATCH;
3374 if (type == PDO_TYPE_BATT &&
3375 (pdo_max_power(matching_snk_pdo) > pdo_max_power(pdo)))
3376 max_mw = pdo_max_power(matching_snk_pdo);
3377 else if (pdo_max_current(matching_snk_pdo) >
3378 pdo_max_current(pdo))
3379 max_ma = pdo_max_current(matching_snk_pdo);
3380 }
3381
3382 tcpm_log(port, "cc=%d cc1=%d cc2=%d vbus=%d vconn=%s polarity=%d",
3383 port->cc_req, port->cc1, port->cc2, port->vbus_source,
3384 port->vconn_role == TYPEC_SOURCE ? "source" : "sink",
3385 port->polarity);
3386
3387 if (type == PDO_TYPE_BATT) {
3388 *rdo = RDO_BATT(src_pdo_index + 1, mw, max_mw, flags);
3389
3390 tcpm_log(port, "Requesting PDO %d: %u mV, %u mW%s",
3391 src_pdo_index, mv, mw,
3392 flags & RDO_CAP_MISMATCH ? " [mismatch]" : "");
3393 } else {
3394 *rdo = RDO_FIXED(src_pdo_index + 1, ma, max_ma, flags);
3395
3396 tcpm_log(port, "Requesting PDO %d: %u mV, %u mA%s",
3397 src_pdo_index, mv, ma,
3398 flags & RDO_CAP_MISMATCH ? " [mismatch]" : "");
3399 }
3400
3401 port->req_current_limit = ma;
3402 port->req_supply_voltage = mv;
3403
3404 return 0;
3405 }
3406
3407 static int tcpm_pd_send_request(struct tcpm_port *port)
3408 {
3409 struct pd_message msg;
3410 int ret;
3411 u32 rdo;
3412
3413 ret = tcpm_pd_build_request(port, &rdo);
3414 if (ret < 0)
3415 return ret;
3416
3417 /*
3418 * Relax the threshold as voltage will be adjusted after Accept Message plus tSrcTransition.
3419 * It is safer to modify the threshold here.
3420 */
3421 tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_USB, false, 0);
3422
3423 memset(&msg, 0, sizeof(msg));
3424 msg.header = PD_HEADER_LE(PD_DATA_REQUEST,
3425 port->pwr_role,
3426 port->data_role,
3427 port->negotiated_rev,
3428 port->message_id, 1);
3429 msg.payload[0] = cpu_to_le32(rdo);
3430
3431 return tcpm_pd_transmit(port, TCPC_TX_SOP, &msg);
3432 }
3433
3434 static int tcpm_pd_build_pps_request(struct tcpm_port *port, u32 *rdo)
3435 {
3436 unsigned int out_mv, op_ma, op_mw, max_mv, max_ma, flags;
3437 unsigned int src_pdo_index;
3438
3439 src_pdo_index = tcpm_pd_select_pps_apdo(port);
3440 if (!src_pdo_index)
3441 return -EOPNOTSUPP;
3442
3443 max_mv = port->pps_data.req_max_volt;
3444 max_ma = port->pps_data.req_max_curr;
3445 out_mv = port->pps_data.req_out_volt;
3446 op_ma = port->pps_data.req_op_curr;
3447
3448 flags = RDO_USB_COMM | RDO_NO_SUSPEND;
3449
3450 op_mw = (op_ma * out_mv) / 1000;
3451 if (op_mw < port->operating_snk_mw) {
3452 /*
3453 * Try raising current to meet power needs. If that's not enough
3454 * then try upping the voltage. If that's still not enough
3455 * then we've obviously chosen a PPS APDO which really isn't
3456 * suitable so abandon ship.
3457 */
3458 op_ma = (port->operating_snk_mw * 1000) / out_mv;
3459 if ((port->operating_snk_mw * 1000) % out_mv)
3460 ++op_ma;
3461 op_ma += RDO_PROG_CURR_MA_STEP - (op_ma % RDO_PROG_CURR_MA_STEP);
3462
3463 if (op_ma > max_ma) {
3464 op_ma = max_ma;
3465 out_mv = (port->operating_snk_mw * 1000) / op_ma;
3466 if ((port->operating_snk_mw * 1000) % op_ma)
3467 ++out_mv;
3468 out_mv += RDO_PROG_VOLT_MV_STEP -
3469 (out_mv % RDO_PROG_VOLT_MV_STEP);
3470
3471 if (out_mv > max_mv) {
3472 tcpm_log(port, "Invalid PPS APDO selected!");
3473 return -EINVAL;
3474 }
3475 }
3476 }
3477
3478 tcpm_log(port, "cc=%d cc1=%d cc2=%d vbus=%d vconn=%s polarity=%d",
3479 port->cc_req, port->cc1, port->cc2, port->vbus_source,
3480 port->vconn_role == TYPEC_SOURCE ? "source" : "sink",
3481 port->polarity);
3482
3483 *rdo = RDO_PROG(src_pdo_index + 1, out_mv, op_ma, flags);
3484
3485 tcpm_log(port, "Requesting APDO %d: %u mV, %u mA",
3486 src_pdo_index, out_mv, op_ma);
3487
3488 port->pps_data.req_op_curr = op_ma;
3489 port->pps_data.req_out_volt = out_mv;
3490
3491 return 0;
3492 }
3493
3494 static int tcpm_pd_send_pps_request(struct tcpm_port *port)
3495 {
3496 struct pd_message msg;
3497 int ret;
3498 u32 rdo;
3499
3500 ret = tcpm_pd_build_pps_request(port, &rdo);
3501 if (ret < 0)
3502 return ret;
3503
3504 /* Relax the threshold as voltage will be adjusted right after Accept Message. */
3505 tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_USB, false, 0);
3506
3507 memset(&msg, 0, sizeof(msg));
3508 msg.header = PD_HEADER_LE(PD_DATA_REQUEST,
3509 port->pwr_role,
3510 port->data_role,
3511 port->negotiated_rev,
3512 port->message_id, 1);
3513 msg.payload[0] = cpu_to_le32(rdo);
3514
3515 return tcpm_pd_transmit(port, TCPC_TX_SOP, &msg);
3516 }
3517
3518 static int tcpm_set_vbus(struct tcpm_port *port, bool enable)
3519 {
3520 int ret;
3521
3522 if (enable && port->vbus_charge)
3523 return -EINVAL;
3524
3525 tcpm_log(port, "vbus:=%d charge=%d", enable, port->vbus_charge);
3526
3527 ret = port->tcpc->set_vbus(port->tcpc, enable, port->vbus_charge);
3528 if (ret < 0)
3529 return ret;
3530
3531 port->vbus_source = enable;
3532 return 0;
3533 }
3534
3535 static int tcpm_set_charge(struct tcpm_port *port, bool charge)
3536 {
3537 int ret;
3538
3539 if (charge && port->vbus_source)
3540 return -EINVAL;
3541
3542 if (charge != port->vbus_charge) {
3543 tcpm_log(port, "vbus=%d charge:=%d", port->vbus_source, charge);
3544 ret = port->tcpc->set_vbus(port->tcpc, port->vbus_source,
3545 charge);
3546 if (ret < 0)
3547 return ret;
3548 }
3549 port->vbus_charge = charge;
3550 power_supply_changed(port->psy);
3551 return 0;
3552 }
3553
3554 static bool tcpm_start_toggling(struct tcpm_port *port, enum typec_cc_status cc)
3555 {
3556 int ret;
3557
3558 if (!port->tcpc->start_toggling)
3559 return false;
3560
3561 tcpm_log_force(port, "Start toggling");
3562 ret = port->tcpc->start_toggling(port->tcpc, port->port_type, cc);
3563 return ret == 0;
3564 }
3565
3566 static int tcpm_init_vbus(struct tcpm_port *port)
3567 {
3568 int ret;
3569
3570 ret = port->tcpc->set_vbus(port->tcpc, false, false);
3571 port->vbus_source = false;
3572 port->vbus_charge = false;
3573 return ret;
3574 }
3575
3576 static int tcpm_init_vconn(struct tcpm_port *port)
3577 {
3578 int ret;
3579
3580 ret = port->tcpc->set_vconn(port->tcpc, false);
3581 port->vconn_role = TYPEC_SINK;
3582 return ret;
3583 }
3584
3585 static void tcpm_typec_connect(struct tcpm_port *port)
3586 {
3587 if (!port->connected) {
3588 /* Make sure we don't report stale identity information */
3589 memset(&port->partner_ident, 0, sizeof(port->partner_ident));
3590 port->partner_desc.usb_pd = port->pd_capable;
3591 if (tcpm_port_is_debug(port))
3592 port->partner_desc.accessory = TYPEC_ACCESSORY_DEBUG;
3593 else if (tcpm_port_is_audio(port))
3594 port->partner_desc.accessory = TYPEC_ACCESSORY_AUDIO;
3595 else
3596 port->partner_desc.accessory = TYPEC_ACCESSORY_NONE;
3597 port->partner = typec_register_partner(port->typec_port,
3598 &port->partner_desc);
3599 port->connected = true;
3600 typec_partner_set_usb_power_delivery(port->partner, port->partner_pd);
3601 }
3602 }
3603
3604 static int tcpm_src_attach(struct tcpm_port *port)
3605 {
3606 enum typec_cc_polarity polarity =
3607 port->cc2 == TYPEC_CC_RD ? TYPEC_POLARITY_CC2
3608 : TYPEC_POLARITY_CC1;
3609 int ret;
3610
3611 if (port->attached)
3612 return 0;
3613
3614 ret = tcpm_set_polarity(port, polarity);
3615 if (ret < 0)
3616 return ret;
3617
3618 tcpm_enable_auto_vbus_discharge(port, true);
3619
3620 ret = tcpm_set_roles(port, true, TYPEC_SOURCE, tcpm_data_role_for_source(port));
3621 if (ret < 0)
3622 return ret;
3623
3624 if (port->pd_supported) {
3625 ret = port->tcpc->set_pd_rx(port->tcpc, true);
3626 if (ret < 0)
3627 goto out_disable_mux;
3628 }
3629
3630 /*
3631 * USB Type-C specification, version 1.2,
3632 * chapter 4.5.2.2.8.1 (Attached.SRC Requirements)
3633 * Enable VCONN only if the non-RD port is set to RA.
3634 */
3635 if ((polarity == TYPEC_POLARITY_CC1 && port->cc2 == TYPEC_CC_RA) ||
3636 (polarity == TYPEC_POLARITY_CC2 && port->cc1 == TYPEC_CC_RA)) {
3637 ret = tcpm_set_vconn(port, true);
3638 if (ret < 0)
3639 goto out_disable_pd;
3640 }
3641
3642 ret = tcpm_set_vbus(port, true);
3643 if (ret < 0)
3644 goto out_disable_vconn;
3645
3646 port->pd_capable = false;
3647
3648 port->partner = NULL;
3649
3650 port->attached = true;
3651 port->send_discover = true;
3652
3653 return 0;
3654
3655 out_disable_vconn:
3656 tcpm_set_vconn(port, false);
3657 out_disable_pd:
3658 if (port->pd_supported)
3659 port->tcpc->set_pd_rx(port->tcpc, false);
3660 out_disable_mux:
3661 tcpm_mux_set(port, TYPEC_STATE_SAFE, USB_ROLE_NONE,
3662 TYPEC_ORIENTATION_NONE);
3663 return ret;
3664 }
3665
3666 static void tcpm_typec_disconnect(struct tcpm_port *port)
3667 {
3668 if (port->connected) {
3669 typec_partner_set_usb_power_delivery(port->partner, NULL);
3670 typec_unregister_partner(port->partner);
3671 port->partner = NULL;
3672 port->connected = false;
3673 }
3674 }
3675
3676 static void tcpm_unregister_altmodes(struct tcpm_port *port)
3677 {
3678 struct pd_mode_data *modep = &port->mode_data;
3679 int i;
3680
3681 for (i = 0; i < modep->altmodes; i++) {
3682 typec_unregister_altmode(port->partner_altmode[i]);
3683 port->partner_altmode[i] = NULL;
3684 }
3685
3686 memset(modep, 0, sizeof(*modep));
3687 }
3688
3689 static void tcpm_set_partner_usb_comm_capable(struct tcpm_port *port, bool capable)
3690 {
3691 tcpm_log(port, "Setting usb_comm capable %s", capable ? "true" : "false");
3692
3693 if (port->tcpc->set_partner_usb_comm_capable)
3694 port->tcpc->set_partner_usb_comm_capable(port->tcpc, capable);
3695 }
3696
3697 static void tcpm_reset_port(struct tcpm_port *port)
3698 {
3699 tcpm_enable_auto_vbus_discharge(port, false);
3700 port->in_ams = false;
3701 port->ams = NONE_AMS;
3702 port->vdm_sm_running = false;
3703 tcpm_unregister_altmodes(port);
3704 tcpm_typec_disconnect(port);
3705 port->attached = false;
3706 port->pd_capable = false;
3707 port->pps_data.supported = false;
3708 tcpm_set_partner_usb_comm_capable(port, false);
3709
3710 /*
3711 * First Rx ID should be 0; set this to a sentinel of -1 so that
3712 * we can check tcpm_pd_rx_handler() if we had seen it before.
3713 */
3714 port->rx_msgid = -1;
3715
3716 port->tcpc->set_pd_rx(port->tcpc, false);
3717 tcpm_init_vbus(port); /* also disables charging */
3718 tcpm_init_vconn(port);
3719 tcpm_set_current_limit(port, 0, 0);
3720 tcpm_set_polarity(port, TYPEC_POLARITY_CC1);
3721 tcpm_mux_set(port, TYPEC_STATE_SAFE, USB_ROLE_NONE,
3722 TYPEC_ORIENTATION_NONE);
3723 tcpm_set_attached_state(port, false);
3724 port->try_src_count = 0;
3725 port->try_snk_count = 0;
3726 port->usb_type = POWER_SUPPLY_USB_TYPE_C;
3727 power_supply_changed(port->psy);
3728 port->nr_sink_caps = 0;
3729 port->sink_cap_done = false;
3730 if (port->tcpc->enable_frs)
3731 port->tcpc->enable_frs(port->tcpc, false);
3732
3733 usb_power_delivery_unregister_capabilities(port->partner_sink_caps);
3734 port->partner_sink_caps = NULL;
3735 usb_power_delivery_unregister_capabilities(port->partner_source_caps);
3736 port->partner_source_caps = NULL;
3737 usb_power_delivery_unregister(port->partner_pd);
3738 port->partner_pd = NULL;
3739 }
3740
3741 static void tcpm_detach(struct tcpm_port *port)
3742 {
3743 if (tcpm_port_is_disconnected(port))
3744 port->hard_reset_count = 0;
3745
3746 if (!port->attached)
3747 return;
3748
3749 if (port->tcpc->set_bist_data) {
3750 tcpm_log(port, "disable BIST MODE TESTDATA");
3751 port->tcpc->set_bist_data(port->tcpc, false);
3752 }
3753
3754 tcpm_reset_port(port);
3755 }
3756
3757 static void tcpm_src_detach(struct tcpm_port *port)
3758 {
3759 tcpm_detach(port);
3760 }
3761
3762 static int tcpm_snk_attach(struct tcpm_port *port)
3763 {
3764 int ret;
3765
3766 if (port->attached)
3767 return 0;
3768
3769 ret = tcpm_set_polarity(port, port->cc2 != TYPEC_CC_OPEN ?
3770 TYPEC_POLARITY_CC2 : TYPEC_POLARITY_CC1);
3771 if (ret < 0)
3772 return ret;
3773
3774 tcpm_enable_auto_vbus_discharge(port, true);
3775
3776 ret = tcpm_set_roles(port, true, TYPEC_SINK, tcpm_data_role_for_sink(port));
3777 if (ret < 0)
3778 return ret;
3779
3780 port->pd_capable = false;
3781
3782 port->partner = NULL;
3783
3784 port->attached = true;
3785 port->send_discover = true;
3786
3787 return 0;
3788 }
3789
3790 static void tcpm_snk_detach(struct tcpm_port *port)
3791 {
3792 tcpm_detach(port);
3793 }
3794
3795 static int tcpm_acc_attach(struct tcpm_port *port)
3796 {
3797 int ret;
3798
3799 if (port->attached)
3800 return 0;
3801
3802 ret = tcpm_set_roles(port, true, TYPEC_SOURCE,
3803 tcpm_data_role_for_source(port));
3804 if (ret < 0)
3805 return ret;
3806
3807 port->partner = NULL;
3808
3809 tcpm_typec_connect(port);
3810
3811 port->attached = true;
3812
3813 return 0;
3814 }
3815
3816 static void tcpm_acc_detach(struct tcpm_port *port)
3817 {
3818 tcpm_detach(port);
3819 }
3820
3821 static inline enum tcpm_state hard_reset_state(struct tcpm_port *port)
3822 {
3823 if (port->hard_reset_count < PD_N_HARD_RESET_COUNT)
3824 return HARD_RESET_SEND;
3825 if (port->pd_capable)
3826 return ERROR_RECOVERY;
3827 if (port->pwr_role == TYPEC_SOURCE)
3828 return SRC_UNATTACHED;
3829 if (port->state == SNK_WAIT_CAPABILITIES)
3830 return SNK_READY;
3831 return SNK_UNATTACHED;
3832 }
3833
3834 static inline enum tcpm_state unattached_state(struct tcpm_port *port)
3835 {
3836 if (port->port_type == TYPEC_PORT_DRP) {
3837 if (port->pwr_role == TYPEC_SOURCE)
3838 return SRC_UNATTACHED;
3839 else
3840 return SNK_UNATTACHED;
3841 } else if (port->port_type == TYPEC_PORT_SRC) {
3842 return SRC_UNATTACHED;
3843 }
3844
3845 return SNK_UNATTACHED;
3846 }
3847
3848 static void tcpm_swap_complete(struct tcpm_port *port, int result)
3849 {
3850 if (port->swap_pending) {
3851 port->swap_status = result;
3852 port->swap_pending = false;
3853 port->non_pd_role_swap = false;
3854 complete(&port->swap_complete);
3855 }
3856 }
3857
3858 static enum typec_pwr_opmode tcpm_get_pwr_opmode(enum typec_cc_status cc)
3859 {
3860 switch (cc) {
3861 case TYPEC_CC_RP_1_5:
3862 return TYPEC_PWR_MODE_1_5A;
3863 case TYPEC_CC_RP_3_0:
3864 return TYPEC_PWR_MODE_3_0A;
3865 case TYPEC_CC_RP_DEF:
3866 default:
3867 return TYPEC_PWR_MODE_USB;
3868 }
3869 }
3870
3871 static enum typec_cc_status tcpm_pwr_opmode_to_rp(enum typec_pwr_opmode opmode)
3872 {
3873 switch (opmode) {
3874 case TYPEC_PWR_MODE_USB:
3875 return TYPEC_CC_RP_DEF;
3876 case TYPEC_PWR_MODE_1_5A:
3877 return TYPEC_CC_RP_1_5;
3878 case TYPEC_PWR_MODE_3_0A:
3879 case TYPEC_PWR_MODE_PD:
3880 default:
3881 return TYPEC_CC_RP_3_0;
3882 }
3883 }
3884
3885 static void tcpm_set_initial_svdm_version(struct tcpm_port *port)
3886 {
3887 switch (port->negotiated_rev) {
3888 case PD_REV30:
3889 break;
3890 /*
3891 * 6.4.4.2.3 Structured VDM Version
3892 * 2.0 states "At this time, there is only one version (1.0) defined.
3893 * This field Shall be set to zero to indicate Version 1.0."
3894 * 3.0 states "This field Shall be set to 01b to indicate Version 2.0."
3895 * To ensure that we follow the Power Delivery revision we are currently
3896 * operating on, downgrade the SVDM version to the highest one supported
3897 * by the Power Delivery revision.
3898 */
3899 case PD_REV20:
3900 typec_partner_set_svdm_version(port->partner, SVDM_VER_1_0);
3901 break;
3902 default:
3903 typec_partner_set_svdm_version(port->partner, SVDM_VER_1_0);
3904 break;
3905 }
3906 }
3907
3908 static void run_state_machine(struct tcpm_port *port)
3909 {
3910 int ret;
3911 enum typec_pwr_opmode opmode;
3912 unsigned int msecs;
3913 enum tcpm_state upcoming_state;
3914
3915 if (port->tcpc->check_contaminant && port->state != CHECK_CONTAMINANT)
3916 port->potential_contaminant = ((port->enter_state == SRC_ATTACH_WAIT &&
3917 port->state == SRC_UNATTACHED) ||
3918 (port->enter_state == SNK_ATTACH_WAIT &&
3919 port->state == SNK_UNATTACHED) ||
3920 (port->enter_state == SNK_DEBOUNCED &&
3921 port->state == SNK_UNATTACHED));
3922
3923 port->enter_state = port->state;
3924 switch (port->state) {
3925 case TOGGLING:
3926 break;
3927 case CHECK_CONTAMINANT:
3928 port->tcpc->check_contaminant(port->tcpc);
3929 break;
3930 /* SRC states */
3931 case SRC_UNATTACHED:
3932 if (!port->non_pd_role_swap)
3933 tcpm_swap_complete(port, -ENOTCONN);
3934 tcpm_src_detach(port);
3935 if (port->potential_contaminant) {
3936 tcpm_set_state(port, CHECK_CONTAMINANT, 0);
3937 break;
3938 }
3939 if (tcpm_start_toggling(port, tcpm_rp_cc(port))) {
3940 tcpm_set_state(port, TOGGLING, 0);
3941 break;
3942 }
3943 tcpm_set_cc(port, tcpm_rp_cc(port));
3944 if (port->port_type == TYPEC_PORT_DRP)
3945 tcpm_set_state(port, SNK_UNATTACHED, PD_T_DRP_SNK);
3946 break;
3947 case SRC_ATTACH_WAIT:
3948 if (tcpm_port_is_debug(port))
3949 tcpm_set_state(port, DEBUG_ACC_ATTACHED,
3950 PD_T_CC_DEBOUNCE);
3951 else if (tcpm_port_is_audio(port))
3952 tcpm_set_state(port, AUDIO_ACC_ATTACHED,
3953 PD_T_CC_DEBOUNCE);
3954 else if (tcpm_port_is_source(port) && port->vbus_vsafe0v)
3955 tcpm_set_state(port,
3956 tcpm_try_snk(port) ? SNK_TRY
3957 : SRC_ATTACHED,
3958 PD_T_CC_DEBOUNCE);
3959 break;
3960
3961 case SNK_TRY:
3962 port->try_snk_count++;
3963 /*
3964 * Requirements:
3965 * - Do not drive vconn or vbus
3966 * - Terminate CC pins (both) to Rd
3967 * Action:
3968 * - Wait for tDRPTry (PD_T_DRP_TRY).
3969 * Until then, ignore any state changes.
3970 */
3971 tcpm_set_cc(port, TYPEC_CC_RD);
3972 tcpm_set_state(port, SNK_TRY_WAIT, PD_T_DRP_TRY);
3973 break;
3974 case SNK_TRY_WAIT:
3975 if (tcpm_port_is_sink(port)) {
3976 tcpm_set_state(port, SNK_TRY_WAIT_DEBOUNCE, 0);
3977 } else {
3978 tcpm_set_state(port, SRC_TRYWAIT, 0);
3979 port->max_wait = 0;
3980 }
3981 break;
3982 case SNK_TRY_WAIT_DEBOUNCE:
3983 tcpm_set_state(port, SNK_TRY_WAIT_DEBOUNCE_CHECK_VBUS,
3984 PD_T_TRY_CC_DEBOUNCE);
3985 break;
3986 case SNK_TRY_WAIT_DEBOUNCE_CHECK_VBUS:
3987 if (port->vbus_present && tcpm_port_is_sink(port))
3988 tcpm_set_state(port, SNK_ATTACHED, 0);
3989 else
3990 port->max_wait = 0;
3991 break;
3992 case SRC_TRYWAIT:
3993 tcpm_set_cc(port, tcpm_rp_cc(port));
3994 if (port->max_wait == 0) {
3995 port->max_wait = jiffies +
3996 msecs_to_jiffies(PD_T_DRP_TRY);
3997 tcpm_set_state(port, SRC_TRYWAIT_UNATTACHED,
3998 PD_T_DRP_TRY);
3999 } else {
4000 if (time_is_after_jiffies(port->max_wait))
4001 tcpm_set_state(port, SRC_TRYWAIT_UNATTACHED,
4002 jiffies_to_msecs(port->max_wait -
4003 jiffies));
4004 else
4005 tcpm_set_state(port, SNK_UNATTACHED, 0);
4006 }
4007 break;
4008 case SRC_TRYWAIT_DEBOUNCE:
4009 tcpm_set_state(port, SRC_ATTACHED, PD_T_CC_DEBOUNCE);
4010 break;
4011 case SRC_TRYWAIT_UNATTACHED:
4012 tcpm_set_state(port, SNK_UNATTACHED, 0);
4013 break;
4014
4015 case SRC_ATTACHED:
4016 ret = tcpm_src_attach(port);
4017 tcpm_set_state(port, SRC_UNATTACHED,
4018 ret < 0 ? 0 : PD_T_PS_SOURCE_ON);
4019 break;
4020 case SRC_STARTUP:
4021 opmode = tcpm_get_pwr_opmode(tcpm_rp_cc(port));
4022 typec_set_pwr_opmode(port->typec_port, opmode);
4023 port->pwr_opmode = TYPEC_PWR_MODE_USB;
4024 port->caps_count = 0;
4025 port->negotiated_rev = PD_MAX_REV;
4026 port->message_id = 0;
4027 port->rx_msgid = -1;
4028 port->explicit_contract = false;
4029 /* SNK -> SRC POWER/FAST_ROLE_SWAP finished */
4030 if (port->ams == POWER_ROLE_SWAP ||
4031 port->ams == FAST_ROLE_SWAP)
4032 tcpm_ams_finish(port);
4033 if (!port->pd_supported) {
4034 tcpm_set_state(port, SRC_READY, 0);
4035 break;
4036 }
4037 port->upcoming_state = SRC_SEND_CAPABILITIES;
4038 tcpm_ams_start(port, POWER_NEGOTIATION);
4039 break;
4040 case SRC_SEND_CAPABILITIES:
4041 port->caps_count++;
4042 if (port->caps_count > PD_N_CAPS_COUNT) {
4043 tcpm_set_state(port, SRC_READY, 0);
4044 break;
4045 }
4046 ret = tcpm_pd_send_source_caps(port);
4047 if (ret < 0) {
4048 tcpm_set_state(port, SRC_SEND_CAPABILITIES,
4049 PD_T_SEND_SOURCE_CAP);
4050 } else {
4051 /*
4052 * Per standard, we should clear the reset counter here.
4053 * However, that can result in state machine hang-ups.
4054 * Reset it only in READY state to improve stability.
4055 */
4056 /* port->hard_reset_count = 0; */
4057 port->caps_count = 0;
4058 port->pd_capable = true;
4059 tcpm_set_state_cond(port, SRC_SEND_CAPABILITIES_TIMEOUT,
4060 PD_T_SEND_SOURCE_CAP);
4061 }
4062 break;
4063 case SRC_SEND_CAPABILITIES_TIMEOUT:
4064 /*
4065 * Error recovery for a PD_DATA_SOURCE_CAP reply timeout.
4066 *
4067 * PD 2.0 sinks are supposed to accept src-capabilities with a
4068 * 3.0 header and simply ignore any src PDOs which the sink does
4069 * not understand such as PPS but some 2.0 sinks instead ignore
4070 * the entire PD_DATA_SOURCE_CAP message, causing contract
4071 * negotiation to fail.
4072 *
4073 * After PD_N_HARD_RESET_COUNT hard-reset attempts, we try
4074 * sending src-capabilities with a lower PD revision to
4075 * make these broken sinks work.
4076 */
4077 if (port->hard_reset_count < PD_N_HARD_RESET_COUNT) {
4078 tcpm_set_state(port, HARD_RESET_SEND, 0);
4079 } else if (port->negotiated_rev > PD_REV20) {
4080 port->negotiated_rev--;
4081 port->hard_reset_count = 0;
4082 tcpm_set_state(port, SRC_SEND_CAPABILITIES, 0);
4083 } else {
4084 tcpm_set_state(port, hard_reset_state(port), 0);
4085 }
4086 break;
4087 case SRC_NEGOTIATE_CAPABILITIES:
4088 ret = tcpm_pd_check_request(port);
4089 if (ret < 0) {
4090 tcpm_pd_send_control(port, PD_CTRL_REJECT);
4091 if (!port->explicit_contract) {
4092 tcpm_set_state(port,
4093 SRC_WAIT_NEW_CAPABILITIES, 0);
4094 } else {
4095 tcpm_set_state(port, SRC_READY, 0);
4096 }
4097 } else {
4098 tcpm_pd_send_control(port, PD_CTRL_ACCEPT);
4099 tcpm_set_partner_usb_comm_capable(port,
4100 !!(port->sink_request & RDO_USB_COMM));
4101 tcpm_set_state(port, SRC_TRANSITION_SUPPLY,
4102 PD_T_SRC_TRANSITION);
4103 }
4104 break;
4105 case SRC_TRANSITION_SUPPLY:
4106 /* XXX: regulator_set_voltage(vbus, ...) */
4107 tcpm_pd_send_control(port, PD_CTRL_PS_RDY);
4108 port->explicit_contract = true;
4109 typec_set_pwr_opmode(port->typec_port, TYPEC_PWR_MODE_PD);
4110 port->pwr_opmode = TYPEC_PWR_MODE_PD;
4111 tcpm_set_state_cond(port, SRC_READY, 0);
4112 break;
4113 case SRC_READY:
4114 #if 1
4115 port->hard_reset_count = 0;
4116 #endif
4117 port->try_src_count = 0;
4118
4119 tcpm_swap_complete(port, 0);
4120 tcpm_typec_connect(port);
4121
4122 if (port->ams != NONE_AMS)
4123 tcpm_ams_finish(port);
4124 if (port->next_ams != NONE_AMS) {
4125 port->ams = port->next_ams;
4126 port->next_ams = NONE_AMS;
4127 }
4128
4129 /*
4130 * If previous AMS is interrupted, switch to the upcoming
4131 * state.
4132 */
4133 if (port->upcoming_state != INVALID_STATE) {
4134 upcoming_state = port->upcoming_state;
4135 port->upcoming_state = INVALID_STATE;
4136 tcpm_set_state(port, upcoming_state, 0);
4137 break;
4138 }
4139
4140 /*
4141 * 6.4.4.3.1 Discover Identity
4142 * "The Discover Identity Command Shall only be sent to SOP when there is an
4143 * Explicit Contract."
4144 * For now, this driver only supports SOP for DISCOVER_IDENTITY, thus using
4145 * port->explicit_contract to decide whether to send the command.
4146 */
4147 if (port->explicit_contract) {
4148 tcpm_set_initial_svdm_version(port);
4149 mod_send_discover_delayed_work(port, 0);
4150 } else {
4151 port->send_discover = false;
4152 }
4153
4154 /*
4155 * 6.3.5
4156 * Sending ping messages is not necessary if
4157 * - the source operates at vSafe5V
4158 * or
4159 * - The system is not operating in PD mode
4160 * or
4161 * - Both partners are connected using a Type-C connector
4162 *
4163 * There is no actual need to send PD messages since the local
4164 * port type-c and the spec does not clearly say whether PD is
4165 * possible when type-c is connected to Type-A/B
4166 */
4167 break;
4168 case SRC_WAIT_NEW_CAPABILITIES:
4169 /* Nothing to do... */
4170 break;
4171
4172 /* SNK states */
4173 case SNK_UNATTACHED:
4174 if (!port->non_pd_role_swap)
4175 tcpm_swap_complete(port, -ENOTCONN);
4176 tcpm_pps_complete(port, -ENOTCONN);
4177 tcpm_snk_detach(port);
4178 if (port->potential_contaminant) {
4179 tcpm_set_state(port, CHECK_CONTAMINANT, 0);
4180 break;
4181 }
4182 if (tcpm_start_toggling(port, TYPEC_CC_RD)) {
4183 tcpm_set_state(port, TOGGLING, 0);
4184 break;
4185 }
4186 tcpm_set_cc(port, TYPEC_CC_RD);
4187 if (port->port_type == TYPEC_PORT_DRP)
4188 tcpm_set_state(port, SRC_UNATTACHED, PD_T_DRP_SRC);
4189 break;
4190 case SNK_ATTACH_WAIT:
4191 if ((port->cc1 == TYPEC_CC_OPEN &&
4192 port->cc2 != TYPEC_CC_OPEN) ||
4193 (port->cc1 != TYPEC_CC_OPEN &&
4194 port->cc2 == TYPEC_CC_OPEN))
4195 tcpm_set_state(port, SNK_DEBOUNCED,
4196 PD_T_CC_DEBOUNCE);
4197 else if (tcpm_port_is_disconnected(port))
4198 tcpm_set_state(port, SNK_UNATTACHED,
4199 PD_T_PD_DEBOUNCE);
4200 break;
4201 case SNK_DEBOUNCED:
4202 if (tcpm_port_is_disconnected(port))
4203 tcpm_set_state(port, SNK_UNATTACHED,
4204 PD_T_PD_DEBOUNCE);
4205 else if (port->vbus_present)
4206 tcpm_set_state(port,
4207 tcpm_try_src(port) ? SRC_TRY
4208 : SNK_ATTACHED,
4209 0);
4210 break;
4211 case SRC_TRY:
4212 port->try_src_count++;
4213 tcpm_set_cc(port, tcpm_rp_cc(port));
4214 port->max_wait = 0;
4215 tcpm_set_state(port, SRC_TRY_WAIT, 0);
4216 break;
4217 case SRC_TRY_WAIT:
4218 if (port->max_wait == 0) {
4219 port->max_wait = jiffies +
4220 msecs_to_jiffies(PD_T_DRP_TRY);
4221 msecs = PD_T_DRP_TRY;
4222 } else {
4223 if (time_is_after_jiffies(port->max_wait))
4224 msecs = jiffies_to_msecs(port->max_wait -
4225 jiffies);
4226 else
4227 msecs = 0;
4228 }
4229 tcpm_set_state(port, SNK_TRYWAIT, msecs);
4230 break;
4231 case SRC_TRY_DEBOUNCE:
4232 tcpm_set_state(port, SRC_ATTACHED, PD_T_PD_DEBOUNCE);
4233 break;
4234 case SNK_TRYWAIT:
4235 tcpm_set_cc(port, TYPEC_CC_RD);
4236 tcpm_set_state(port, SNK_TRYWAIT_VBUS, PD_T_CC_DEBOUNCE);
4237 break;
4238 case SNK_TRYWAIT_VBUS:
4239 /*
4240 * TCPM stays in this state indefinitely until VBUS
4241 * is detected as long as Rp is not detected for
4242 * more than a time period of tPDDebounce.
4243 */
4244 if (port->vbus_present && tcpm_port_is_sink(port)) {
4245 tcpm_set_state(port, SNK_ATTACHED, 0);
4246 break;
4247 }
4248 if (!tcpm_port_is_sink(port))
4249 tcpm_set_state(port, SNK_TRYWAIT_DEBOUNCE, 0);
4250 break;
4251 case SNK_TRYWAIT_DEBOUNCE:
4252 tcpm_set_state(port, SNK_UNATTACHED, PD_T_PD_DEBOUNCE);
4253 break;
4254 case SNK_ATTACHED:
4255 ret = tcpm_snk_attach(port);
4256 if (ret < 0)
4257 tcpm_set_state(port, SNK_UNATTACHED, 0);
4258 else
4259 tcpm_set_state(port, SNK_STARTUP, 0);
4260 break;
4261 case SNK_STARTUP:
4262 opmode = tcpm_get_pwr_opmode(port->polarity ?
4263 port->cc2 : port->cc1);
4264 typec_set_pwr_opmode(port->typec_port, opmode);
4265 port->pwr_opmode = TYPEC_PWR_MODE_USB;
4266 port->negotiated_rev = PD_MAX_REV;
4267 port->message_id = 0;
4268 port->rx_msgid = -1;
4269 port->explicit_contract = false;
4270
4271 if (port->ams == POWER_ROLE_SWAP ||
4272 port->ams == FAST_ROLE_SWAP)
4273 /* SRC -> SNK POWER/FAST_ROLE_SWAP finished */
4274 tcpm_ams_finish(port);
4275
4276 tcpm_set_state(port, SNK_DISCOVERY, 0);
4277 break;
4278 case SNK_DISCOVERY:
4279 if (port->vbus_present) {
4280 u32 current_lim = tcpm_get_current_limit(port);
4281
4282 if (port->slow_charger_loop && (current_lim > PD_P_SNK_STDBY_MW / 5))
4283 current_lim = PD_P_SNK_STDBY_MW / 5;
4284 tcpm_set_current_limit(port, current_lim, 5000);
4285 /* Not sink vbus if operational current is 0mA */
4286 tcpm_set_charge(port, !port->pd_supported ||
4287 pdo_max_current(port->snk_pdo[0]));
4288
4289 if (!port->pd_supported)
4290 tcpm_set_state(port, SNK_READY, 0);
4291 else
4292 tcpm_set_state(port, SNK_WAIT_CAPABILITIES, 0);
4293 break;
4294 }
4295 /*
4296 * For DRP, timeouts differ. Also, handling is supposed to be
4297 * different and much more complex (dead battery detection;
4298 * see USB power delivery specification, section 8.3.3.6.1.5.1).
4299 */
4300 tcpm_set_state(port, hard_reset_state(port),
4301 port->port_type == TYPEC_PORT_DRP ?
4302 PD_T_DB_DETECT : PD_T_NO_RESPONSE);
4303 break;
4304 case SNK_DISCOVERY_DEBOUNCE:
4305 tcpm_set_state(port, SNK_DISCOVERY_DEBOUNCE_DONE,
4306 PD_T_CC_DEBOUNCE);
4307 break;
4308 case SNK_DISCOVERY_DEBOUNCE_DONE:
4309 if (!tcpm_port_is_disconnected(port) &&
4310 tcpm_port_is_sink(port) &&
4311 ktime_after(port->delayed_runtime, ktime_get())) {
4312 tcpm_set_state(port, SNK_DISCOVERY,
4313 ktime_to_ms(ktime_sub(port->delayed_runtime, ktime_get())));
4314 break;
4315 }
4316 tcpm_set_state(port, unattached_state(port), 0);
4317 break;
4318 case SNK_WAIT_CAPABILITIES:
4319 ret = port->tcpc->set_pd_rx(port->tcpc, true);
4320 if (ret < 0) {
4321 tcpm_set_state(port, SNK_READY, 0);
4322 break;
4323 }
4324 /*
4325 * If VBUS has never been low, and we time out waiting
4326 * for source cap, try a soft reset first, in case we
4327 * were already in a stable contract before this boot.
4328 * Do this only once.
4329 */
4330 if (port->vbus_never_low) {
4331 port->vbus_never_low = false;
4332 tcpm_set_state(port, SNK_SOFT_RESET,
4333 PD_T_SINK_WAIT_CAP);
4334 } else {
4335 tcpm_set_state(port, hard_reset_state(port),
4336 PD_T_SINK_WAIT_CAP);
4337 }
4338 break;
4339 case SNK_NEGOTIATE_CAPABILITIES:
4340 port->pd_capable = true;
4341 tcpm_set_partner_usb_comm_capable(port,
4342 !!(port->source_caps[0] & PDO_FIXED_USB_COMM));
4343 port->hard_reset_count = 0;
4344 ret = tcpm_pd_send_request(port);
4345 if (ret < 0) {
4346 /* Restore back to the original state */
4347 tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_PD,
4348 port->pps_data.active,
4349 port->supply_voltage);
4350 /* Let the Source send capabilities again. */
4351 tcpm_set_state(port, SNK_WAIT_CAPABILITIES, 0);
4352 } else {
4353 tcpm_set_state_cond(port, hard_reset_state(port),
4354 PD_T_SENDER_RESPONSE);
4355 }
4356 break;
4357 case SNK_NEGOTIATE_PPS_CAPABILITIES:
4358 ret = tcpm_pd_send_pps_request(port);
4359 if (ret < 0) {
4360 /* Restore back to the original state */
4361 tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_PD,
4362 port->pps_data.active,
4363 port->supply_voltage);
4364 port->pps_status = ret;
4365 /*
4366 * If this was called due to updates to sink
4367 * capabilities, and pps is no longer valid, we should
4368 * safely fall back to a standard PDO.
4369 */
4370 if (port->update_sink_caps)
4371 tcpm_set_state(port, SNK_NEGOTIATE_CAPABILITIES, 0);
4372 else
4373 tcpm_set_state(port, SNK_READY, 0);
4374 } else {
4375 tcpm_set_state_cond(port, hard_reset_state(port),
4376 PD_T_SENDER_RESPONSE);
4377 }
4378 break;
4379 case SNK_TRANSITION_SINK:
4380 /* From the USB PD spec:
4381 * "The Sink Shall transition to Sink Standby before a positive or
4382 * negative voltage transition of VBUS. During Sink Standby
4383 * the Sink Shall reduce its power draw to pSnkStdby."
4384 *
4385 * This is not applicable to PPS though as the port can continue
4386 * to draw negotiated power without switching to standby.
4387 */
4388 if (port->supply_voltage != port->req_supply_voltage && !port->pps_data.active &&
4389 port->current_limit * port->supply_voltage / 1000 > PD_P_SNK_STDBY_MW) {
4390 u32 stdby_ma = PD_P_SNK_STDBY_MW * 1000 / port->supply_voltage;
4391
4392 tcpm_log(port, "Setting standby current %u mV @ %u mA",
4393 port->supply_voltage, stdby_ma);
4394 tcpm_set_current_limit(port, stdby_ma, port->supply_voltage);
4395 }
4396 fallthrough;
4397 case SNK_TRANSITION_SINK_VBUS:
4398 tcpm_set_state(port, hard_reset_state(port),
4399 PD_T_PS_TRANSITION);
4400 break;
4401 case SNK_READY:
4402 port->try_snk_count = 0;
4403 port->update_sink_caps = false;
4404 if (port->explicit_contract) {
4405 typec_set_pwr_opmode(port->typec_port,
4406 TYPEC_PWR_MODE_PD);
4407 port->pwr_opmode = TYPEC_PWR_MODE_PD;
4408 }
4409
4410 if (!port->pd_capable && port->slow_charger_loop)
4411 tcpm_set_current_limit(port, tcpm_get_current_limit(port), 5000);
4412 tcpm_swap_complete(port, 0);
4413 tcpm_typec_connect(port);
4414 if (port->pd_capable && port->source_caps[0] & PDO_FIXED_DUAL_ROLE)
4415 mod_enable_frs_delayed_work(port, 0);
4416 tcpm_pps_complete(port, port->pps_status);
4417
4418 if (port->ams != NONE_AMS)
4419 tcpm_ams_finish(port);
4420 if (port->next_ams != NONE_AMS) {
4421 port->ams = port->next_ams;
4422 port->next_ams = NONE_AMS;
4423 }
4424
4425 /*
4426 * If previous AMS is interrupted, switch to the upcoming
4427 * state.
4428 */
4429 if (port->upcoming_state != INVALID_STATE) {
4430 upcoming_state = port->upcoming_state;
4431 port->upcoming_state = INVALID_STATE;
4432 tcpm_set_state(port, upcoming_state, 0);
4433 break;
4434 }
4435
4436 /*
4437 * 6.4.4.3.1 Discover Identity
4438 * "The Discover Identity Command Shall only be sent to SOP when there is an
4439 * Explicit Contract."
4440 * For now, this driver only supports SOP for DISCOVER_IDENTITY, thus using
4441 * port->explicit_contract.
4442 */
4443 if (port->explicit_contract) {
4444 tcpm_set_initial_svdm_version(port);
4445 mod_send_discover_delayed_work(port, 0);
4446 } else {
4447 port->send_discover = false;
4448 }
4449
4450 power_supply_changed(port->psy);
4451 break;
4452
4453 /* Accessory states */
4454 case ACC_UNATTACHED:
4455 tcpm_acc_detach(port);
4456 tcpm_set_state(port, SRC_UNATTACHED, 0);
4457 break;
4458 case DEBUG_ACC_ATTACHED:
4459 case AUDIO_ACC_ATTACHED:
4460 ret = tcpm_acc_attach(port);
4461 if (ret < 0)
4462 tcpm_set_state(port, ACC_UNATTACHED, 0);
4463 break;
4464 case AUDIO_ACC_DEBOUNCE:
4465 tcpm_set_state(port, ACC_UNATTACHED, PD_T_CC_DEBOUNCE);
4466 break;
4467
4468 /* Hard_Reset states */
4469 case HARD_RESET_SEND:
4470 if (port->ams != NONE_AMS)
4471 tcpm_ams_finish(port);
4472 /*
4473 * State machine will be directed to HARD_RESET_START,
4474 * thus set upcoming_state to INVALID_STATE.
4475 */
4476 port->upcoming_state = INVALID_STATE;
4477 tcpm_ams_start(port, HARD_RESET);
4478 break;
4479 case HARD_RESET_START:
4480 port->sink_cap_done = false;
4481 if (port->tcpc->enable_frs)
4482 port->tcpc->enable_frs(port->tcpc, false);
4483 port->hard_reset_count++;
4484 port->tcpc->set_pd_rx(port->tcpc, false);
4485 tcpm_unregister_altmodes(port);
4486 port->nr_sink_caps = 0;
4487 port->send_discover = true;
4488 if (port->pwr_role == TYPEC_SOURCE)
4489 tcpm_set_state(port, SRC_HARD_RESET_VBUS_OFF,
4490 PD_T_PS_HARD_RESET);
4491 else
4492 tcpm_set_state(port, SNK_HARD_RESET_SINK_OFF, 0);
4493 break;
4494 case SRC_HARD_RESET_VBUS_OFF:
4495 /*
4496 * 7.1.5 Response to Hard Resets
4497 * Hard Reset Signaling indicates a communication failure has occurred and the
4498 * Source Shall stop driving VCONN, Shall remove Rp from the VCONN pin and Shall
4499 * drive VBUS to vSafe0V as shown in Figure 7-9.
4500 */
4501 tcpm_set_vconn(port, false);
4502 tcpm_set_vbus(port, false);
4503 tcpm_set_roles(port, port->self_powered, TYPEC_SOURCE,
4504 tcpm_data_role_for_source(port));
4505 /*
4506 * If tcpc fails to notify vbus off, TCPM will wait for PD_T_SAFE_0V +
4507 * PD_T_SRC_RECOVER before turning vbus back on.
4508 * From Table 7-12 Sequence Description for a Source Initiated Hard Reset:
4509 * 4. Policy Engine waits tPSHardReset after sending Hard Reset Signaling and then
4510 * tells the Device Policy Manager to instruct the power supply to perform a
4511 * Hard Reset. The transition to vSafe0V Shall occur within tSafe0V (t2).
4512 * 5. After tSrcRecover the Source applies power to VBUS in an attempt to
4513 * re-establish communication with the Sink and resume USB Default Operation.
4514 * The transition to vSafe5V Shall occur within tSrcTurnOn(t4).
4515 */
4516 tcpm_set_state(port, SRC_HARD_RESET_VBUS_ON, PD_T_SAFE_0V + PD_T_SRC_RECOVER);
4517 break;
4518 case SRC_HARD_RESET_VBUS_ON:
4519 tcpm_set_vconn(port, true);
4520 tcpm_set_vbus(port, true);
4521 if (port->ams == HARD_RESET)
4522 tcpm_ams_finish(port);
4523 if (port->pd_supported)
4524 port->tcpc->set_pd_rx(port->tcpc, true);
4525 tcpm_set_attached_state(port, true);
4526 tcpm_set_state(port, SRC_UNATTACHED, PD_T_PS_SOURCE_ON);
4527 break;
4528 case SNK_HARD_RESET_SINK_OFF:
4529 /* Do not discharge/disconnect during hard reseet */
4530 tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_USB, false, 0);
4531 memset(&port->pps_data, 0, sizeof(port->pps_data));
4532 tcpm_set_vconn(port, false);
4533 if (port->pd_capable)
4534 tcpm_set_charge(port, false);
4535 tcpm_set_roles(port, port->self_powered, TYPEC_SINK,
4536 tcpm_data_role_for_sink(port));
4537 /*
4538 * VBUS may or may not toggle, depending on the adapter.
4539 * If it doesn't toggle, transition to SNK_HARD_RESET_SINK_ON
4540 * directly after timeout.
4541 */
4542 tcpm_set_state(port, SNK_HARD_RESET_SINK_ON, PD_T_SAFE_0V);
4543 break;
4544 case SNK_HARD_RESET_WAIT_VBUS:
4545 if (port->ams == HARD_RESET)
4546 tcpm_ams_finish(port);
4547 /* Assume we're disconnected if VBUS doesn't come back. */
4548 tcpm_set_state(port, SNK_UNATTACHED,
4549 PD_T_SRC_RECOVER_MAX + PD_T_SRC_TURN_ON);
4550 break;
4551 case SNK_HARD_RESET_SINK_ON:
4552 /* Note: There is no guarantee that VBUS is on in this state */
4553 /*
4554 * XXX:
4555 * The specification suggests that dual mode ports in sink
4556 * mode should transition to state PE_SRC_Transition_to_default.
4557 * See USB power delivery specification chapter 8.3.3.6.1.3.
4558 * This would mean to
4559 * - turn off VCONN, reset power supply
4560 * - request hardware reset
4561 * - turn on VCONN
4562 * - Transition to state PE_Src_Startup
4563 * SNK only ports shall transition to state Snk_Startup
4564 * (see chapter 8.3.3.3.8).
4565 * Similar, dual-mode ports in source mode should transition
4566 * to PE_SNK_Transition_to_default.
4567 */
4568 if (port->pd_capable) {
4569 tcpm_set_current_limit(port,
4570 tcpm_get_current_limit(port),
4571 5000);
4572 /* Not sink vbus if operational current is 0mA */
4573 tcpm_set_charge(port, !!pdo_max_current(port->snk_pdo[0]));
4574 }
4575 if (port->ams == HARD_RESET)
4576 tcpm_ams_finish(port);
4577 tcpm_set_attached_state(port, true);
4578 tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_USB, false, VSAFE5V);
4579 tcpm_set_state(port, SNK_STARTUP, 0);
4580 break;
4581
4582 /* Soft_Reset states */
4583 case SOFT_RESET:
4584 port->message_id = 0;
4585 port->rx_msgid = -1;
4586 /* remove existing capabilities */
4587 usb_power_delivery_unregister_capabilities(port->partner_source_caps);
4588 port->partner_source_caps = NULL;
4589 tcpm_pd_send_control(port, PD_CTRL_ACCEPT);
4590 tcpm_ams_finish(port);
4591 if (port->pwr_role == TYPEC_SOURCE) {
4592 port->upcoming_state = SRC_SEND_CAPABILITIES;
4593 tcpm_ams_start(port, POWER_NEGOTIATION);
4594 } else {
4595 tcpm_set_state(port, SNK_WAIT_CAPABILITIES, 0);
4596 }
4597 break;
4598 case SRC_SOFT_RESET_WAIT_SNK_TX:
4599 case SNK_SOFT_RESET:
4600 if (port->ams != NONE_AMS)
4601 tcpm_ams_finish(port);
4602 port->upcoming_state = SOFT_RESET_SEND;
4603 tcpm_ams_start(port, SOFT_RESET_AMS);
4604 break;
4605 case SOFT_RESET_SEND:
4606 port->message_id = 0;
4607 port->rx_msgid = -1;
4608 /* remove existing capabilities */
4609 usb_power_delivery_unregister_capabilities(port->partner_source_caps);
4610 port->partner_source_caps = NULL;
4611 if (tcpm_pd_send_control(port, PD_CTRL_SOFT_RESET))
4612 tcpm_set_state_cond(port, hard_reset_state(port), 0);
4613 else
4614 tcpm_set_state_cond(port, hard_reset_state(port),
4615 PD_T_SENDER_RESPONSE);
4616 break;
4617
4618 /* DR_Swap states */
4619 case DR_SWAP_SEND:
4620 tcpm_pd_send_control(port, PD_CTRL_DR_SWAP);
4621 if (port->data_role == TYPEC_DEVICE || port->negotiated_rev > PD_REV20)
4622 port->send_discover = true;
4623 tcpm_set_state_cond(port, DR_SWAP_SEND_TIMEOUT,
4624 PD_T_SENDER_RESPONSE);
4625 break;
4626 case DR_SWAP_ACCEPT:
4627 tcpm_pd_send_control(port, PD_CTRL_ACCEPT);
4628 if (port->data_role == TYPEC_DEVICE || port->negotiated_rev > PD_REV20)
4629 port->send_discover = true;
4630 tcpm_set_state_cond(port, DR_SWAP_CHANGE_DR, 0);
4631 break;
4632 case DR_SWAP_SEND_TIMEOUT:
4633 tcpm_swap_complete(port, -ETIMEDOUT);
4634 port->send_discover = false;
4635 tcpm_ams_finish(port);
4636 tcpm_set_state(port, ready_state(port), 0);
4637 break;
4638 case DR_SWAP_CHANGE_DR:
4639 tcpm_unregister_altmodes(port);
4640 if (port->data_role == TYPEC_HOST)
4641 tcpm_set_roles(port, true, port->pwr_role,
4642 TYPEC_DEVICE);
4643 else
4644 tcpm_set_roles(port, true, port->pwr_role,
4645 TYPEC_HOST);
4646 tcpm_ams_finish(port);
4647 tcpm_set_state(port, ready_state(port), 0);
4648 break;
4649
4650 case FR_SWAP_SEND:
4651 if (tcpm_pd_send_control(port, PD_CTRL_FR_SWAP)) {
4652 tcpm_set_state(port, ERROR_RECOVERY, 0);
4653 break;
4654 }
4655 tcpm_set_state_cond(port, FR_SWAP_SEND_TIMEOUT, PD_T_SENDER_RESPONSE);
4656 break;
4657 case FR_SWAP_SEND_TIMEOUT:
4658 tcpm_set_state(port, ERROR_RECOVERY, 0);
4659 break;
4660 case FR_SWAP_SNK_SRC_TRANSITION_TO_OFF:
4661 tcpm_set_state(port, ERROR_RECOVERY, PD_T_PS_SOURCE_OFF);
4662 break;
4663 case FR_SWAP_SNK_SRC_NEW_SINK_READY:
4664 if (port->vbus_source)
4665 tcpm_set_state(port, FR_SWAP_SNK_SRC_SOURCE_VBUS_APPLIED, 0);
4666 else
4667 tcpm_set_state(port, ERROR_RECOVERY, PD_T_RECEIVER_RESPONSE);
4668 break;
4669 case FR_SWAP_SNK_SRC_SOURCE_VBUS_APPLIED:
4670 tcpm_set_pwr_role(port, TYPEC_SOURCE);
4671 if (tcpm_pd_send_control(port, PD_CTRL_PS_RDY)) {
4672 tcpm_set_state(port, ERROR_RECOVERY, 0);
4673 break;
4674 }
4675 tcpm_set_cc(port, tcpm_rp_cc(port));
4676 tcpm_set_state(port, SRC_STARTUP, PD_T_SWAP_SRC_START);
4677 break;
4678
4679 /* PR_Swap states */
4680 case PR_SWAP_ACCEPT:
4681 tcpm_pd_send_control(port, PD_CTRL_ACCEPT);
4682 tcpm_set_state(port, PR_SWAP_START, 0);
4683 break;
4684 case PR_SWAP_SEND:
4685 tcpm_pd_send_control(port, PD_CTRL_PR_SWAP);
4686 tcpm_set_state_cond(port, PR_SWAP_SEND_TIMEOUT,
4687 PD_T_SENDER_RESPONSE);
4688 break;
4689 case PR_SWAP_SEND_TIMEOUT:
4690 tcpm_swap_complete(port, -ETIMEDOUT);
4691 tcpm_set_state(port, ready_state(port), 0);
4692 break;
4693 case PR_SWAP_START:
4694 tcpm_apply_rc(port);
4695 if (port->pwr_role == TYPEC_SOURCE)
4696 tcpm_set_state(port, PR_SWAP_SRC_SNK_TRANSITION_OFF,
4697 PD_T_SRC_TRANSITION);
4698 else
4699 tcpm_set_state(port, PR_SWAP_SNK_SRC_SINK_OFF, 0);
4700 break;
4701 case PR_SWAP_SRC_SNK_TRANSITION_OFF:
4702 /*
4703 * Prevent vbus discharge circuit from turning on during PR_SWAP
4704 * as this is not a disconnect.
4705 */
4706 tcpm_set_vbus(port, false);
4707 port->explicit_contract = false;
4708 /* allow time for Vbus discharge, must be < tSrcSwapStdby */
4709 tcpm_set_state(port, PR_SWAP_SRC_SNK_SOURCE_OFF,
4710 PD_T_SRCSWAPSTDBY);
4711 break;
4712 case PR_SWAP_SRC_SNK_SOURCE_OFF:
4713 tcpm_set_cc(port, TYPEC_CC_RD);
4714 /* allow CC debounce */
4715 tcpm_set_state(port, PR_SWAP_SRC_SNK_SOURCE_OFF_CC_DEBOUNCED,
4716 PD_T_CC_DEBOUNCE);
4717 break;
4718 case PR_SWAP_SRC_SNK_SOURCE_OFF_CC_DEBOUNCED:
4719 /*
4720 * USB-PD standard, 6.2.1.4, Port Power Role:
4721 * "During the Power Role Swap Sequence, for the initial Source
4722 * Port, the Port Power Role field shall be set to Sink in the
4723 * PS_RDY Message indicating that the initial Source’s power
4724 * supply is turned off"
4725 */
4726 tcpm_set_pwr_role(port, TYPEC_SINK);
4727 if (tcpm_pd_send_control(port, PD_CTRL_PS_RDY)) {
4728 tcpm_set_state(port, ERROR_RECOVERY, 0);
4729 break;
4730 }
4731 tcpm_set_state(port, ERROR_RECOVERY, PD_T_PS_SOURCE_ON_PRS);
4732 break;
4733 case PR_SWAP_SRC_SNK_SINK_ON:
4734 tcpm_enable_auto_vbus_discharge(port, true);
4735 /* Set the vbus disconnect threshold for implicit contract */
4736 tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_USB, false, VSAFE5V);
4737 tcpm_set_state(port, SNK_STARTUP, 0);
4738 break;
4739 case PR_SWAP_SNK_SRC_SINK_OFF:
4740 /* will be source, remove existing capabilities */
4741 usb_power_delivery_unregister_capabilities(port->partner_source_caps);
4742 port->partner_source_caps = NULL;
4743 /*
4744 * Prevent vbus discharge circuit from turning on during PR_SWAP
4745 * as this is not a disconnect.
4746 */
4747 tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_USB,
4748 port->pps_data.active, 0);
4749 tcpm_set_charge(port, false);
4750 tcpm_set_state(port, hard_reset_state(port),
4751 PD_T_PS_SOURCE_OFF);
4752 break;
4753 case PR_SWAP_SNK_SRC_SOURCE_ON:
4754 tcpm_enable_auto_vbus_discharge(port, true);
4755 tcpm_set_cc(port, tcpm_rp_cc(port));
4756 tcpm_set_vbus(port, true);
4757 /*
4758 * allow time VBUS ramp-up, must be < tNewSrc
4759 * Also, this window overlaps with CC debounce as well.
4760 * So, Wait for the max of two which is PD_T_NEWSRC
4761 */
4762 tcpm_set_state(port, PR_SWAP_SNK_SRC_SOURCE_ON_VBUS_RAMPED_UP,
4763 PD_T_NEWSRC);
4764 break;
4765 case PR_SWAP_SNK_SRC_SOURCE_ON_VBUS_RAMPED_UP:
4766 /*
4767 * USB PD standard, 6.2.1.4:
4768 * "Subsequent Messages initiated by the Policy Engine,
4769 * such as the PS_RDY Message sent to indicate that Vbus
4770 * is ready, will have the Port Power Role field set to
4771 * Source."
4772 */
4773 tcpm_set_pwr_role(port, TYPEC_SOURCE);
4774 tcpm_pd_send_control(port, PD_CTRL_PS_RDY);
4775 tcpm_set_state(port, SRC_STARTUP, PD_T_SWAP_SRC_START);
4776 break;
4777
4778 case VCONN_SWAP_ACCEPT:
4779 tcpm_pd_send_control(port, PD_CTRL_ACCEPT);
4780 tcpm_ams_finish(port);
4781 tcpm_set_state(port, VCONN_SWAP_START, 0);
4782 break;
4783 case VCONN_SWAP_SEND:
4784 tcpm_pd_send_control(port, PD_CTRL_VCONN_SWAP);
4785 tcpm_set_state(port, VCONN_SWAP_SEND_TIMEOUT,
4786 PD_T_SENDER_RESPONSE);
4787 break;
4788 case VCONN_SWAP_SEND_TIMEOUT:
4789 tcpm_swap_complete(port, -ETIMEDOUT);
4790 tcpm_set_state(port, ready_state(port), 0);
4791 break;
4792 case VCONN_SWAP_START:
4793 if (port->vconn_role == TYPEC_SOURCE)
4794 tcpm_set_state(port, VCONN_SWAP_WAIT_FOR_VCONN, 0);
4795 else
4796 tcpm_set_state(port, VCONN_SWAP_TURN_ON_VCONN, 0);
4797 break;
4798 case VCONN_SWAP_WAIT_FOR_VCONN:
4799 tcpm_set_state(port, hard_reset_state(port),
4800 PD_T_VCONN_SOURCE_ON);
4801 break;
4802 case VCONN_SWAP_TURN_ON_VCONN:
4803 tcpm_set_vconn(port, true);
4804 tcpm_pd_send_control(port, PD_CTRL_PS_RDY);
4805 tcpm_set_state(port, ready_state(port), 0);
4806 break;
4807 case VCONN_SWAP_TURN_OFF_VCONN:
4808 tcpm_set_vconn(port, false);
4809 tcpm_set_state(port, ready_state(port), 0);
4810 break;
4811
4812 case DR_SWAP_CANCEL:
4813 case PR_SWAP_CANCEL:
4814 case VCONN_SWAP_CANCEL:
4815 tcpm_swap_complete(port, port->swap_status);
4816 if (port->pwr_role == TYPEC_SOURCE)
4817 tcpm_set_state(port, SRC_READY, 0);
4818 else
4819 tcpm_set_state(port, SNK_READY, 0);
4820 break;
4821 case FR_SWAP_CANCEL:
4822 if (port->pwr_role == TYPEC_SOURCE)
4823 tcpm_set_state(port, SRC_READY, 0);
4824 else
4825 tcpm_set_state(port, SNK_READY, 0);
4826 break;
4827
4828 case BIST_RX:
4829 switch (BDO_MODE_MASK(port->bist_request)) {
4830 case BDO_MODE_CARRIER2:
4831 tcpm_pd_transmit(port, TCPC_TX_BIST_MODE_2, NULL);
4832 tcpm_set_state(port, unattached_state(port),
4833 PD_T_BIST_CONT_MODE);
4834 break;
4835 case BDO_MODE_TESTDATA:
4836 if (port->tcpc->set_bist_data) {
4837 tcpm_log(port, "Enable BIST MODE TESTDATA");
4838 port->tcpc->set_bist_data(port->tcpc, true);
4839 }
4840 break;
4841 default:
4842 break;
4843 }
4844 break;
4845 case GET_STATUS_SEND:
4846 tcpm_pd_send_control(port, PD_CTRL_GET_STATUS);
4847 tcpm_set_state(port, GET_STATUS_SEND_TIMEOUT,
4848 PD_T_SENDER_RESPONSE);
4849 break;
4850 case GET_STATUS_SEND_TIMEOUT:
4851 tcpm_set_state(port, ready_state(port), 0);
4852 break;
4853 case GET_PPS_STATUS_SEND:
4854 tcpm_pd_send_control(port, PD_CTRL_GET_PPS_STATUS);
4855 tcpm_set_state(port, GET_PPS_STATUS_SEND_TIMEOUT,
4856 PD_T_SENDER_RESPONSE);
4857 break;
4858 case GET_PPS_STATUS_SEND_TIMEOUT:
4859 tcpm_set_state(port, ready_state(port), 0);
4860 break;
4861 case GET_SINK_CAP:
4862 tcpm_pd_send_control(port, PD_CTRL_GET_SINK_CAP);
4863 tcpm_set_state(port, GET_SINK_CAP_TIMEOUT, PD_T_SENDER_RESPONSE);
4864 break;
4865 case GET_SINK_CAP_TIMEOUT:
4866 port->sink_cap_done = true;
4867 tcpm_set_state(port, ready_state(port), 0);
4868 break;
4869 case ERROR_RECOVERY:
4870 tcpm_swap_complete(port, -EPROTO);
4871 tcpm_pps_complete(port, -EPROTO);
4872 tcpm_set_state(port, PORT_RESET, 0);
4873 break;
4874 case PORT_RESET:
4875 tcpm_reset_port(port);
4876 if (port->self_powered)
4877 tcpm_set_cc(port, TYPEC_CC_OPEN);
4878 else
4879 tcpm_set_cc(port, tcpm_default_state(port) == SNK_UNATTACHED ?
4880 TYPEC_CC_RD : tcpm_rp_cc(port));
4881 tcpm_set_state(port, PORT_RESET_WAIT_OFF,
4882 PD_T_ERROR_RECOVERY);
4883 break;
4884 case PORT_RESET_WAIT_OFF:
4885 tcpm_set_state(port,
4886 tcpm_default_state(port),
4887 port->vbus_present ? PD_T_PS_SOURCE_OFF : 0);
4888 break;
4889
4890 /* AMS intermediate state */
4891 case AMS_START:
4892 if (port->upcoming_state == INVALID_STATE) {
4893 tcpm_set_state(port, port->pwr_role == TYPEC_SOURCE ?
4894 SRC_READY : SNK_READY, 0);
4895 break;
4896 }
4897
4898 upcoming_state = port->upcoming_state;
4899 port->upcoming_state = INVALID_STATE;
4900 tcpm_set_state(port, upcoming_state, 0);
4901 break;
4902
4903 /* Chunk state */
4904 case CHUNK_NOT_SUPP:
4905 tcpm_pd_send_control(port, PD_CTRL_NOT_SUPP);
4906 tcpm_set_state(port, port->pwr_role == TYPEC_SOURCE ? SRC_READY : SNK_READY, 0);
4907 break;
4908 default:
4909 WARN(1, "Unexpected port state %d\n", port->state);
4910 break;
4911 }
4912 }
4913
4914 static void tcpm_state_machine_work(struct kthread_work *work)
4915 {
4916 struct tcpm_port *port = container_of(work, struct tcpm_port, state_machine);
4917 enum tcpm_state prev_state;
4918
4919 mutex_lock(&port->lock);
4920 port->state_machine_running = true;
4921
4922 if (port->queued_message && tcpm_send_queued_message(port))
4923 goto done;
4924
4925 /* If we were queued due to a delayed state change, update it now */
4926 if (port->delayed_state) {
4927 tcpm_log(port, "state change %s -> %s [delayed %ld ms]",
4928 tcpm_states[port->state],
4929 tcpm_states[port->delayed_state], port->delay_ms);
4930 port->prev_state = port->state;
4931 port->state = port->delayed_state;
4932 port->delayed_state = INVALID_STATE;
4933 }
4934
4935 /*
4936 * Continue running as long as we have (non-delayed) state changes
4937 * to make.
4938 */
4939 do {
4940 prev_state = port->state;
4941 run_state_machine(port);
4942 if (port->queued_message)
4943 tcpm_send_queued_message(port);
4944 } while (port->state != prev_state && !port->delayed_state);
4945
4946 done:
4947 port->state_machine_running = false;
4948 mutex_unlock(&port->lock);
4949 }
4950
4951 static void _tcpm_cc_change(struct tcpm_port *port, enum typec_cc_status cc1,
4952 enum typec_cc_status cc2)
4953 {
4954 enum typec_cc_status old_cc1, old_cc2;
4955 enum tcpm_state new_state;
4956
4957 old_cc1 = port->cc1;
4958 old_cc2 = port->cc2;
4959 port->cc1 = cc1;
4960 port->cc2 = cc2;
4961
4962 tcpm_log_force(port,
4963 "CC1: %u -> %u, CC2: %u -> %u [state %s, polarity %d, %s]",
4964 old_cc1, cc1, old_cc2, cc2, tcpm_states[port->state],
4965 port->polarity,
4966 tcpm_port_is_disconnected(port) ? "disconnected"
4967 : "connected");
4968
4969 switch (port->state) {
4970 case TOGGLING:
4971 if (tcpm_port_is_debug(port) || tcpm_port_is_audio(port) ||
4972 tcpm_port_is_source(port))
4973 tcpm_set_state(port, SRC_ATTACH_WAIT, 0);
4974 else if (tcpm_port_is_sink(port))
4975 tcpm_set_state(port, SNK_ATTACH_WAIT, 0);
4976 break;
4977 case CHECK_CONTAMINANT:
4978 /* Wait for Toggling to be resumed */
4979 break;
4980 case SRC_UNATTACHED:
4981 case ACC_UNATTACHED:
4982 if (tcpm_port_is_debug(port) || tcpm_port_is_audio(port) ||
4983 tcpm_port_is_source(port))
4984 tcpm_set_state(port, SRC_ATTACH_WAIT, 0);
4985 break;
4986 case SRC_ATTACH_WAIT:
4987 if (tcpm_port_is_disconnected(port) ||
4988 tcpm_port_is_audio_detached(port))
4989 tcpm_set_state(port, SRC_UNATTACHED, 0);
4990 else if (cc1 != old_cc1 || cc2 != old_cc2)
4991 tcpm_set_state(port, SRC_ATTACH_WAIT, 0);
4992 break;
4993 case SRC_ATTACHED:
4994 case SRC_STARTUP:
4995 case SRC_SEND_CAPABILITIES:
4996 case SRC_READY:
4997 if (tcpm_port_is_disconnected(port) ||
4998 !tcpm_port_is_source(port)) {
4999 if (port->port_type == TYPEC_PORT_SRC)
5000 tcpm_set_state(port, SRC_UNATTACHED, tcpm_wait_for_discharge(port));
5001 else
5002 tcpm_set_state(port, SNK_UNATTACHED, tcpm_wait_for_discharge(port));
5003 }
5004 break;
5005 case SNK_UNATTACHED:
5006 if (tcpm_port_is_sink(port))
5007 tcpm_set_state(port, SNK_ATTACH_WAIT, 0);
5008 break;
5009 case SNK_ATTACH_WAIT:
5010 if ((port->cc1 == TYPEC_CC_OPEN &&
5011 port->cc2 != TYPEC_CC_OPEN) ||
5012 (port->cc1 != TYPEC_CC_OPEN &&
5013 port->cc2 == TYPEC_CC_OPEN))
5014 new_state = SNK_DEBOUNCED;
5015 else if (tcpm_port_is_disconnected(port))
5016 new_state = SNK_UNATTACHED;
5017 else
5018 break;
5019 if (new_state != port->delayed_state)
5020 tcpm_set_state(port, SNK_ATTACH_WAIT, 0);
5021 break;
5022 case SNK_DEBOUNCED:
5023 if (tcpm_port_is_disconnected(port))
5024 new_state = SNK_UNATTACHED;
5025 else if (port->vbus_present)
5026 new_state = tcpm_try_src(port) ? SRC_TRY : SNK_ATTACHED;
5027 else
5028 new_state = SNK_UNATTACHED;
5029 if (new_state != port->delayed_state)
5030 tcpm_set_state(port, SNK_DEBOUNCED, 0);
5031 break;
5032 case SNK_READY:
5033 /*
5034 * EXIT condition is based primarily on vbus disconnect and CC is secondary.
5035 * "A port that has entered into USB PD communications with the Source and
5036 * has seen the CC voltage exceed vRd-USB may monitor the CC pin to detect
5037 * cable disconnect in addition to monitoring VBUS.
5038 *
5039 * A port that is monitoring the CC voltage for disconnect (but is not in
5040 * the process of a USB PD PR_Swap or USB PD FR_Swap) shall transition to
5041 * Unattached.SNK within tSinkDisconnect after the CC voltage remains below
5042 * vRd-USB for tPDDebounce."
5043 *
5044 * When set_auto_vbus_discharge_threshold is enabled, CC pins go
5045 * away before vbus decays to disconnect threshold. Allow
5046 * disconnect to be driven by vbus disconnect when auto vbus
5047 * discharge is enabled.
5048 */
5049 if (!port->auto_vbus_discharge_enabled && tcpm_port_is_disconnected(port))
5050 tcpm_set_state(port, unattached_state(port), 0);
5051 else if (!port->pd_capable &&
5052 (cc1 != old_cc1 || cc2 != old_cc2))
5053 tcpm_set_current_limit(port,
5054 tcpm_get_current_limit(port),
5055 5000);
5056 break;
5057
5058 case AUDIO_ACC_ATTACHED:
5059 if (cc1 == TYPEC_CC_OPEN || cc2 == TYPEC_CC_OPEN)
5060 tcpm_set_state(port, AUDIO_ACC_DEBOUNCE, 0);
5061 break;
5062 case AUDIO_ACC_DEBOUNCE:
5063 if (tcpm_port_is_audio(port))
5064 tcpm_set_state(port, AUDIO_ACC_ATTACHED, 0);
5065 break;
5066
5067 case DEBUG_ACC_ATTACHED:
5068 if (cc1 == TYPEC_CC_OPEN || cc2 == TYPEC_CC_OPEN)
5069 tcpm_set_state(port, ACC_UNATTACHED, 0);
5070 break;
5071
5072 case SNK_TRY:
5073 /* Do nothing, waiting for timeout */
5074 break;
5075
5076 case SNK_DISCOVERY:
5077 /* CC line is unstable, wait for debounce */
5078 if (tcpm_port_is_disconnected(port))
5079 tcpm_set_state(port, SNK_DISCOVERY_DEBOUNCE, 0);
5080 break;
5081 case SNK_DISCOVERY_DEBOUNCE:
5082 break;
5083
5084 case SRC_TRYWAIT:
5085 /* Hand over to state machine if needed */
5086 if (!port->vbus_present && tcpm_port_is_source(port))
5087 tcpm_set_state(port, SRC_TRYWAIT_DEBOUNCE, 0);
5088 break;
5089 case SRC_TRYWAIT_DEBOUNCE:
5090 if (port->vbus_present || !tcpm_port_is_source(port))
5091 tcpm_set_state(port, SRC_TRYWAIT, 0);
5092 break;
5093 case SNK_TRY_WAIT_DEBOUNCE:
5094 if (!tcpm_port_is_sink(port)) {
5095 port->max_wait = 0;
5096 tcpm_set_state(port, SRC_TRYWAIT, 0);
5097 }
5098 break;
5099 case SRC_TRY_WAIT:
5100 if (tcpm_port_is_source(port))
5101 tcpm_set_state(port, SRC_TRY_DEBOUNCE, 0);
5102 break;
5103 case SRC_TRY_DEBOUNCE:
5104 tcpm_set_state(port, SRC_TRY_WAIT, 0);
5105 break;
5106 case SNK_TRYWAIT_DEBOUNCE:
5107 if (tcpm_port_is_sink(port))
5108 tcpm_set_state(port, SNK_TRYWAIT_VBUS, 0);
5109 break;
5110 case SNK_TRYWAIT_VBUS:
5111 if (!tcpm_port_is_sink(port))
5112 tcpm_set_state(port, SNK_TRYWAIT_DEBOUNCE, 0);
5113 break;
5114 case SNK_TRY_WAIT_DEBOUNCE_CHECK_VBUS:
5115 if (!tcpm_port_is_sink(port))
5116 tcpm_set_state(port, SRC_TRYWAIT, PD_T_TRY_CC_DEBOUNCE);
5117 else
5118 tcpm_set_state(port, SNK_TRY_WAIT_DEBOUNCE_CHECK_VBUS, 0);
5119 break;
5120 case SNK_TRYWAIT:
5121 /* Do nothing, waiting for tCCDebounce */
5122 break;
5123 case PR_SWAP_SNK_SRC_SINK_OFF:
5124 case PR_SWAP_SRC_SNK_TRANSITION_OFF:
5125 case PR_SWAP_SRC_SNK_SOURCE_OFF:
5126 case PR_SWAP_SRC_SNK_SOURCE_OFF_CC_DEBOUNCED:
5127 case PR_SWAP_SNK_SRC_SOURCE_ON:
5128 /*
5129 * CC state change is expected in PR_SWAP
5130 * Ignore it.
5131 */
5132 break;
5133 case FR_SWAP_SEND:
5134 case FR_SWAP_SEND_TIMEOUT:
5135 case FR_SWAP_SNK_SRC_TRANSITION_TO_OFF:
5136 case FR_SWAP_SNK_SRC_NEW_SINK_READY:
5137 case FR_SWAP_SNK_SRC_SOURCE_VBUS_APPLIED:
5138 /* Do nothing, CC change expected */
5139 break;
5140
5141 case PORT_RESET:
5142 case PORT_RESET_WAIT_OFF:
5143 /*
5144 * State set back to default mode once the timer completes.
5145 * Ignore CC changes here.
5146 */
5147 break;
5148 default:
5149 /*
5150 * While acting as sink and auto vbus discharge is enabled, Allow disconnect
5151 * to be driven by vbus disconnect.
5152 */
5153 if (tcpm_port_is_disconnected(port) && !(port->pwr_role == TYPEC_SINK &&
5154 port->auto_vbus_discharge_enabled))
5155 tcpm_set_state(port, unattached_state(port), 0);
5156 break;
5157 }
5158 }
5159
5160 static void _tcpm_pd_vbus_on(struct tcpm_port *port)
5161 {
5162 tcpm_log_force(port, "VBUS on");
5163 port->vbus_present = true;
5164 /*
5165 * When vbus_present is true i.e. Voltage at VBUS is greater than VSAFE5V implicitly
5166 * states that vbus is not at VSAFE0V, hence clear the vbus_vsafe0v flag here.
5167 */
5168 port->vbus_vsafe0v = false;
5169
5170 switch (port->state) {
5171 case SNK_TRANSITION_SINK_VBUS:
5172 port->explicit_contract = true;
5173 tcpm_set_state(port, SNK_READY, 0);
5174 break;
5175 case SNK_DISCOVERY:
5176 tcpm_set_state(port, SNK_DISCOVERY, 0);
5177 break;
5178
5179 case SNK_DEBOUNCED:
5180 tcpm_set_state(port, tcpm_try_src(port) ? SRC_TRY
5181 : SNK_ATTACHED,
5182 0);
5183 break;
5184 case SNK_HARD_RESET_WAIT_VBUS:
5185 tcpm_set_state(port, SNK_HARD_RESET_SINK_ON, 0);
5186 break;
5187 case SRC_ATTACHED:
5188 tcpm_set_state(port, SRC_STARTUP, 0);
5189 break;
5190 case SRC_HARD_RESET_VBUS_ON:
5191 tcpm_set_state(port, SRC_STARTUP, 0);
5192 break;
5193
5194 case SNK_TRY:
5195 /* Do nothing, waiting for timeout */
5196 break;
5197 case SRC_TRYWAIT:
5198 /* Do nothing, Waiting for Rd to be detected */
5199 break;
5200 case SRC_TRYWAIT_DEBOUNCE:
5201 tcpm_set_state(port, SRC_TRYWAIT, 0);
5202 break;
5203 case SNK_TRY_WAIT_DEBOUNCE:
5204 /* Do nothing, waiting for PD_DEBOUNCE to do be done */
5205 break;
5206 case SNK_TRYWAIT:
5207 /* Do nothing, waiting for tCCDebounce */
5208 break;
5209 case SNK_TRYWAIT_VBUS:
5210 if (tcpm_port_is_sink(port))
5211 tcpm_set_state(port, SNK_ATTACHED, 0);
5212 break;
5213 case SNK_TRYWAIT_DEBOUNCE:
5214 /* Do nothing, waiting for Rp */
5215 break;
5216 case SNK_TRY_WAIT_DEBOUNCE_CHECK_VBUS:
5217 if (port->vbus_present && tcpm_port_is_sink(port))
5218 tcpm_set_state(port, SNK_ATTACHED, 0);
5219 break;
5220 case SRC_TRY_WAIT:
5221 case SRC_TRY_DEBOUNCE:
5222 /* Do nothing, waiting for sink detection */
5223 break;
5224 case FR_SWAP_SEND:
5225 case FR_SWAP_SEND_TIMEOUT:
5226 case FR_SWAP_SNK_SRC_TRANSITION_TO_OFF:
5227 case FR_SWAP_SNK_SRC_SOURCE_VBUS_APPLIED:
5228 if (port->tcpc->frs_sourcing_vbus)
5229 port->tcpc->frs_sourcing_vbus(port->tcpc);
5230 break;
5231 case FR_SWAP_SNK_SRC_NEW_SINK_READY:
5232 if (port->tcpc->frs_sourcing_vbus)
5233 port->tcpc->frs_sourcing_vbus(port->tcpc);
5234 tcpm_set_state(port, FR_SWAP_SNK_SRC_SOURCE_VBUS_APPLIED, 0);
5235 break;
5236
5237 case PORT_RESET:
5238 case PORT_RESET_WAIT_OFF:
5239 /*
5240 * State set back to default mode once the timer completes.
5241 * Ignore vbus changes here.
5242 */
5243 break;
5244
5245 default:
5246 break;
5247 }
5248 }
5249
5250 static void _tcpm_pd_vbus_off(struct tcpm_port *port)
5251 {
5252 tcpm_log_force(port, "VBUS off");
5253 port->vbus_present = false;
5254 port->vbus_never_low = false;
5255 switch (port->state) {
5256 case SNK_HARD_RESET_SINK_OFF:
5257 tcpm_set_state(port, SNK_HARD_RESET_WAIT_VBUS, 0);
5258 break;
5259 case HARD_RESET_SEND:
5260 break;
5261 case SNK_TRY:
5262 /* Do nothing, waiting for timeout */
5263 break;
5264 case SRC_TRYWAIT:
5265 /* Hand over to state machine if needed */
5266 if (tcpm_port_is_source(port))
5267 tcpm_set_state(port, SRC_TRYWAIT_DEBOUNCE, 0);
5268 break;
5269 case SNK_TRY_WAIT_DEBOUNCE:
5270 /* Do nothing, waiting for PD_DEBOUNCE to do be done */
5271 break;
5272 case SNK_TRYWAIT:
5273 case SNK_TRYWAIT_VBUS:
5274 case SNK_TRYWAIT_DEBOUNCE:
5275 break;
5276 case SNK_ATTACH_WAIT:
5277 case SNK_DEBOUNCED:
5278 /* Do nothing, as TCPM is still waiting for vbus to reaach VSAFE5V to connect */
5279 break;
5280
5281 case SNK_NEGOTIATE_CAPABILITIES:
5282 break;
5283
5284 case PR_SWAP_SRC_SNK_TRANSITION_OFF:
5285 tcpm_set_state(port, PR_SWAP_SRC_SNK_SOURCE_OFF, 0);
5286 break;
5287
5288 case PR_SWAP_SNK_SRC_SINK_OFF:
5289 /* Do nothing, expected */
5290 break;
5291
5292 case PR_SWAP_SNK_SRC_SOURCE_ON:
5293 /*
5294 * Do nothing when vbus off notification is received.
5295 * TCPM can wait for PD_T_NEWSRC in PR_SWAP_SNK_SRC_SOURCE_ON
5296 * for the vbus source to ramp up.
5297 */
5298 break;
5299
5300 case PORT_RESET_WAIT_OFF:
5301 tcpm_set_state(port, tcpm_default_state(port), 0);
5302 break;
5303
5304 case SRC_TRY_WAIT:
5305 case SRC_TRY_DEBOUNCE:
5306 /* Do nothing, waiting for sink detection */
5307 break;
5308
5309 case SRC_STARTUP:
5310 case SRC_SEND_CAPABILITIES:
5311 case SRC_SEND_CAPABILITIES_TIMEOUT:
5312 case SRC_NEGOTIATE_CAPABILITIES:
5313 case SRC_TRANSITION_SUPPLY:
5314 case SRC_READY:
5315 case SRC_WAIT_NEW_CAPABILITIES:
5316 /*
5317 * Force to unattached state to re-initiate connection.
5318 * DRP port should move to Unattached.SNK instead of Unattached.SRC if
5319 * sink removed. Although sink removal here is due to source's vbus collapse,
5320 * treat it the same way for consistency.
5321 */
5322 if (port->port_type == TYPEC_PORT_SRC)
5323 tcpm_set_state(port, SRC_UNATTACHED, tcpm_wait_for_discharge(port));
5324 else
5325 tcpm_set_state(port, SNK_UNATTACHED, tcpm_wait_for_discharge(port));
5326 break;
5327
5328 case PORT_RESET:
5329 /*
5330 * State set back to default mode once the timer completes.
5331 * Ignore vbus changes here.
5332 */
5333 break;
5334
5335 case FR_SWAP_SEND:
5336 case FR_SWAP_SEND_TIMEOUT:
5337 case FR_SWAP_SNK_SRC_TRANSITION_TO_OFF:
5338 case FR_SWAP_SNK_SRC_NEW_SINK_READY:
5339 case FR_SWAP_SNK_SRC_SOURCE_VBUS_APPLIED:
5340 /* Do nothing, vbus drop expected */
5341 break;
5342
5343 case SNK_HARD_RESET_WAIT_VBUS:
5344 /* Do nothing, its OK to receive vbus off events */
5345 break;
5346
5347 default:
5348 if (port->pwr_role == TYPEC_SINK && port->attached)
5349 tcpm_set_state(port, SNK_UNATTACHED, tcpm_wait_for_discharge(port));
5350 break;
5351 }
5352 }
5353
5354 static void _tcpm_pd_vbus_vsafe0v(struct tcpm_port *port)
5355 {
5356 tcpm_log_force(port, "VBUS VSAFE0V");
5357 port->vbus_vsafe0v = true;
5358 switch (port->state) {
5359 case SRC_HARD_RESET_VBUS_OFF:
5360 /*
5361 * After establishing the vSafe0V voltage condition on VBUS, the Source Shall wait
5362 * tSrcRecover before re-applying VCONN and restoring VBUS to vSafe5V.
5363 */
5364 tcpm_set_state(port, SRC_HARD_RESET_VBUS_ON, PD_T_SRC_RECOVER);
5365 break;
5366 case SRC_ATTACH_WAIT:
5367 if (tcpm_port_is_source(port))
5368 tcpm_set_state(port, tcpm_try_snk(port) ? SNK_TRY : SRC_ATTACHED,
5369 PD_T_CC_DEBOUNCE);
5370 break;
5371 case SRC_STARTUP:
5372 case SRC_SEND_CAPABILITIES:
5373 case SRC_SEND_CAPABILITIES_TIMEOUT:
5374 case SRC_NEGOTIATE_CAPABILITIES:
5375 case SRC_TRANSITION_SUPPLY:
5376 case SRC_READY:
5377 case SRC_WAIT_NEW_CAPABILITIES:
5378 if (port->auto_vbus_discharge_enabled) {
5379 if (port->port_type == TYPEC_PORT_SRC)
5380 tcpm_set_state(port, SRC_UNATTACHED, 0);
5381 else
5382 tcpm_set_state(port, SNK_UNATTACHED, 0);
5383 }
5384 break;
5385 case PR_SWAP_SNK_SRC_SINK_OFF:
5386 case PR_SWAP_SNK_SRC_SOURCE_ON:
5387 /* Do nothing, vsafe0v is expected during transition */
5388 break;
5389 case SNK_ATTACH_WAIT:
5390 case SNK_DEBOUNCED:
5391 /*Do nothing, still waiting for VSAFE5V for connect */
5392 break;
5393 case SNK_HARD_RESET_WAIT_VBUS:
5394 /* Do nothing, its OK to receive vbus off events */
5395 break;
5396 default:
5397 if (port->pwr_role == TYPEC_SINK && port->auto_vbus_discharge_enabled)
5398 tcpm_set_state(port, SNK_UNATTACHED, 0);
5399 break;
5400 }
5401 }
5402
5403 static void _tcpm_pd_hard_reset(struct tcpm_port *port)
5404 {
5405 tcpm_log_force(port, "Received hard reset");
5406 if (port->bist_request == BDO_MODE_TESTDATA && port->tcpc->set_bist_data)
5407 port->tcpc->set_bist_data(port->tcpc, false);
5408
5409 switch (port->state) {
5410 case ERROR_RECOVERY:
5411 case PORT_RESET:
5412 case PORT_RESET_WAIT_OFF:
5413 return;
5414 default:
5415 break;
5416 }
5417
5418 if (port->ams != NONE_AMS)
5419 port->ams = NONE_AMS;
5420 if (port->hard_reset_count < PD_N_HARD_RESET_COUNT)
5421 port->ams = HARD_RESET;
5422 /*
5423 * If we keep receiving hard reset requests, executing the hard reset
5424 * must have failed. Revert to error recovery if that happens.
5425 */
5426 tcpm_set_state(port,
5427 port->hard_reset_count < PD_N_HARD_RESET_COUNT ?
5428 HARD_RESET_START : ERROR_RECOVERY,
5429 0);
5430 }
5431
5432 static void tcpm_pd_event_handler(struct kthread_work *work)
5433 {
5434 struct tcpm_port *port = container_of(work, struct tcpm_port,
5435 event_work);
5436 u32 events;
5437
5438 mutex_lock(&port->lock);
5439
5440 spin_lock(&port->pd_event_lock);
5441 while (port->pd_events) {
5442 events = port->pd_events;
5443 port->pd_events = 0;
5444 spin_unlock(&port->pd_event_lock);
5445 if (events & TCPM_RESET_EVENT)
5446 _tcpm_pd_hard_reset(port);
5447 if (events & TCPM_VBUS_EVENT) {
5448 bool vbus;
5449
5450 vbus = port->tcpc->get_vbus(port->tcpc);
5451 if (vbus) {
5452 _tcpm_pd_vbus_on(port);
5453 } else {
5454 _tcpm_pd_vbus_off(port);
5455 /*
5456 * When TCPC does not support detecting vsafe0v voltage level,
5457 * treat vbus absent as vsafe0v. Else invoke is_vbus_vsafe0v
5458 * to see if vbus has discharge to VSAFE0V.
5459 */
5460 if (!port->tcpc->is_vbus_vsafe0v ||
5461 port->tcpc->is_vbus_vsafe0v(port->tcpc))
5462 _tcpm_pd_vbus_vsafe0v(port);
5463 }
5464 }
5465 if (events & TCPM_CC_EVENT) {
5466 enum typec_cc_status cc1, cc2;
5467
5468 if (port->tcpc->get_cc(port->tcpc, &cc1, &cc2) == 0)
5469 _tcpm_cc_change(port, cc1, cc2);
5470 }
5471 if (events & TCPM_FRS_EVENT) {
5472 if (port->state == SNK_READY) {
5473 int ret;
5474
5475 port->upcoming_state = FR_SWAP_SEND;
5476 ret = tcpm_ams_start(port, FAST_ROLE_SWAP);
5477 if (ret == -EAGAIN)
5478 port->upcoming_state = INVALID_STATE;
5479 } else {
5480 tcpm_log(port, "Discarding FRS_SIGNAL! Not in sink ready");
5481 }
5482 }
5483 if (events & TCPM_SOURCING_VBUS) {
5484 tcpm_log(port, "sourcing vbus");
5485 /*
5486 * In fast role swap case TCPC autonomously sources vbus. Set vbus_source
5487 * true as TCPM wouldn't have called tcpm_set_vbus.
5488 *
5489 * When vbus is sourced on the command on TCPM i.e. TCPM called
5490 * tcpm_set_vbus to source vbus, vbus_source would already be true.
5491 */
5492 port->vbus_source = true;
5493 _tcpm_pd_vbus_on(port);
5494 }
5495 if (events & TCPM_PORT_CLEAN) {
5496 tcpm_log(port, "port clean");
5497 if (port->state == CHECK_CONTAMINANT) {
5498 if (tcpm_start_toggling(port, tcpm_rp_cc(port)))
5499 tcpm_set_state(port, TOGGLING, 0);
5500 else
5501 tcpm_set_state(port, tcpm_default_state(port), 0);
5502 }
5503 }
5504 if (events & TCPM_PORT_ERROR) {
5505 tcpm_log(port, "port triggering error recovery");
5506 tcpm_set_state(port, ERROR_RECOVERY, 0);
5507 }
5508
5509 spin_lock(&port->pd_event_lock);
5510 }
5511 spin_unlock(&port->pd_event_lock);
5512 mutex_unlock(&port->lock);
5513 }
5514
5515 void tcpm_cc_change(struct tcpm_port *port)
5516 {
5517 spin_lock(&port->pd_event_lock);
5518 port->pd_events |= TCPM_CC_EVENT;
5519 spin_unlock(&port->pd_event_lock);
5520 kthread_queue_work(port->wq, &port->event_work);
5521 }
5522 EXPORT_SYMBOL_GPL(tcpm_cc_change);
5523
5524 void tcpm_vbus_change(struct tcpm_port *port)
5525 {
5526 spin_lock(&port->pd_event_lock);
5527 port->pd_events |= TCPM_VBUS_EVENT;
5528 spin_unlock(&port->pd_event_lock);
5529 kthread_queue_work(port->wq, &port->event_work);
5530 }
5531 EXPORT_SYMBOL_GPL(tcpm_vbus_change);
5532
5533 void tcpm_pd_hard_reset(struct tcpm_port *port)
5534 {
5535 spin_lock(&port->pd_event_lock);
5536 port->pd_events = TCPM_RESET_EVENT;
5537 spin_unlock(&port->pd_event_lock);
5538 kthread_queue_work(port->wq, &port->event_work);
5539 }
5540 EXPORT_SYMBOL_GPL(tcpm_pd_hard_reset);
5541
5542 void tcpm_sink_frs(struct tcpm_port *port)
5543 {
5544 spin_lock(&port->pd_event_lock);
5545 port->pd_events |= TCPM_FRS_EVENT;
5546 spin_unlock(&port->pd_event_lock);
5547 kthread_queue_work(port->wq, &port->event_work);
5548 }
5549 EXPORT_SYMBOL_GPL(tcpm_sink_frs);
5550
5551 void tcpm_sourcing_vbus(struct tcpm_port *port)
5552 {
5553 spin_lock(&port->pd_event_lock);
5554 port->pd_events |= TCPM_SOURCING_VBUS;
5555 spin_unlock(&port->pd_event_lock);
5556 kthread_queue_work(port->wq, &port->event_work);
5557 }
5558 EXPORT_SYMBOL_GPL(tcpm_sourcing_vbus);
5559
5560 void tcpm_port_clean(struct tcpm_port *port)
5561 {
5562 spin_lock(&port->pd_event_lock);
5563 port->pd_events |= TCPM_PORT_CLEAN;
5564 spin_unlock(&port->pd_event_lock);
5565 kthread_queue_work(port->wq, &port->event_work);
5566 }
5567 EXPORT_SYMBOL_GPL(tcpm_port_clean);
5568
5569 bool tcpm_port_is_toggling(struct tcpm_port *port)
5570 {
5571 return port->port_type == TYPEC_PORT_DRP && port->state == TOGGLING;
5572 }
5573 EXPORT_SYMBOL_GPL(tcpm_port_is_toggling);
5574
5575 void tcpm_port_error_recovery(struct tcpm_port *port)
5576 {
5577 spin_lock(&port->pd_event_lock);
5578 port->pd_events |= TCPM_PORT_ERROR;
5579 spin_unlock(&port->pd_event_lock);
5580 kthread_queue_work(port->wq, &port->event_work);
5581 }
5582 EXPORT_SYMBOL_GPL(tcpm_port_error_recovery);
5583
5584 static void tcpm_enable_frs_work(struct kthread_work *work)
5585 {
5586 struct tcpm_port *port = container_of(work, struct tcpm_port, enable_frs);
5587 int ret;
5588
5589 mutex_lock(&port->lock);
5590 /* Not FRS capable */
5591 if (!port->connected || port->port_type != TYPEC_PORT_DRP ||
5592 port->pwr_opmode != TYPEC_PWR_MODE_PD ||
5593 !port->tcpc->enable_frs ||
5594 /* Sink caps queried */
5595 port->sink_cap_done || port->negotiated_rev < PD_REV30)
5596 goto unlock;
5597
5598 /* Send when the state machine is idle */
5599 if (port->state != SNK_READY || port->vdm_sm_running || port->send_discover)
5600 goto resched;
5601
5602 port->upcoming_state = GET_SINK_CAP;
5603 ret = tcpm_ams_start(port, GET_SINK_CAPABILITIES);
5604 if (ret == -EAGAIN) {
5605 port->upcoming_state = INVALID_STATE;
5606 } else {
5607 port->sink_cap_done = true;
5608 goto unlock;
5609 }
5610 resched:
5611 mod_enable_frs_delayed_work(port, GET_SINK_CAP_RETRY_MS);
5612 unlock:
5613 mutex_unlock(&port->lock);
5614 }
5615
5616 static void tcpm_send_discover_work(struct kthread_work *work)
5617 {
5618 struct tcpm_port *port = container_of(work, struct tcpm_port, send_discover_work);
5619
5620 mutex_lock(&port->lock);
5621 /* No need to send DISCOVER_IDENTITY anymore */
5622 if (!port->send_discover)
5623 goto unlock;
5624
5625 if (port->data_role == TYPEC_DEVICE && port->negotiated_rev < PD_REV30) {
5626 port->send_discover = false;
5627 goto unlock;
5628 }
5629
5630 /* Retry if the port is not idle */
5631 if ((port->state != SRC_READY && port->state != SNK_READY) || port->vdm_sm_running) {
5632 mod_send_discover_delayed_work(port, SEND_DISCOVER_RETRY_MS);
5633 goto unlock;
5634 }
5635
5636 tcpm_send_vdm(port, USB_SID_PD, CMD_DISCOVER_IDENT, NULL, 0);
5637
5638 unlock:
5639 mutex_unlock(&port->lock);
5640 }
5641
5642 static int tcpm_dr_set(struct typec_port *p, enum typec_data_role data)
5643 {
5644 struct tcpm_port *port = typec_get_drvdata(p);
5645 int ret;
5646
5647 mutex_lock(&port->swap_lock);
5648 mutex_lock(&port->lock);
5649
5650 if (port->typec_caps.data != TYPEC_PORT_DRD) {
5651 ret = -EINVAL;
5652 goto port_unlock;
5653 }
5654 if (port->state != SRC_READY && port->state != SNK_READY) {
5655 ret = -EAGAIN;
5656 goto port_unlock;
5657 }
5658
5659 if (port->data_role == data) {
5660 ret = 0;
5661 goto port_unlock;
5662 }
5663
5664 /*
5665 * XXX
5666 * 6.3.9: If an alternate mode is active, a request to swap
5667 * alternate modes shall trigger a port reset.
5668 * Reject data role swap request in this case.
5669 */
5670
5671 if (!port->pd_capable) {
5672 /*
5673 * If the partner is not PD capable, reset the port to
5674 * trigger a role change. This can only work if a preferred
5675 * role is configured, and if it matches the requested role.
5676 */
5677 if (port->try_role == TYPEC_NO_PREFERRED_ROLE ||
5678 port->try_role == port->pwr_role) {
5679 ret = -EINVAL;
5680 goto port_unlock;
5681 }
5682 port->non_pd_role_swap = true;
5683 tcpm_set_state(port, PORT_RESET, 0);
5684 } else {
5685 port->upcoming_state = DR_SWAP_SEND;
5686 ret = tcpm_ams_start(port, DATA_ROLE_SWAP);
5687 if (ret == -EAGAIN) {
5688 port->upcoming_state = INVALID_STATE;
5689 goto port_unlock;
5690 }
5691 }
5692
5693 port->swap_status = 0;
5694 port->swap_pending = true;
5695 reinit_completion(&port->swap_complete);
5696 mutex_unlock(&port->lock);
5697
5698 if (!wait_for_completion_timeout(&port->swap_complete,
5699 msecs_to_jiffies(PD_ROLE_SWAP_TIMEOUT)))
5700 ret = -ETIMEDOUT;
5701 else
5702 ret = port->swap_status;
5703
5704 port->non_pd_role_swap = false;
5705 goto swap_unlock;
5706
5707 port_unlock:
5708 mutex_unlock(&port->lock);
5709 swap_unlock:
5710 mutex_unlock(&port->swap_lock);
5711 return ret;
5712 }
5713
5714 static int tcpm_pr_set(struct typec_port *p, enum typec_role role)
5715 {
5716 struct tcpm_port *port = typec_get_drvdata(p);
5717 int ret;
5718
5719 mutex_lock(&port->swap_lock);
5720 mutex_lock(&port->lock);
5721
5722 if (port->port_type != TYPEC_PORT_DRP) {
5723 ret = -EINVAL;
5724 goto port_unlock;
5725 }
5726 if (port->state != SRC_READY && port->state != SNK_READY) {
5727 ret = -EAGAIN;
5728 goto port_unlock;
5729 }
5730
5731 if (role == port->pwr_role) {
5732 ret = 0;
5733 goto port_unlock;
5734 }
5735
5736 port->upcoming_state = PR_SWAP_SEND;
5737 ret = tcpm_ams_start(port, POWER_ROLE_SWAP);
5738 if (ret == -EAGAIN) {
5739 port->upcoming_state = INVALID_STATE;
5740 goto port_unlock;
5741 }
5742
5743 port->swap_status = 0;
5744 port->swap_pending = true;
5745 reinit_completion(&port->swap_complete);
5746 mutex_unlock(&port->lock);
5747
5748 if (!wait_for_completion_timeout(&port->swap_complete,
5749 msecs_to_jiffies(PD_ROLE_SWAP_TIMEOUT)))
5750 ret = -ETIMEDOUT;
5751 else
5752 ret = port->swap_status;
5753
5754 goto swap_unlock;
5755
5756 port_unlock:
5757 mutex_unlock(&port->lock);
5758 swap_unlock:
5759 mutex_unlock(&port->swap_lock);
5760 return ret;
5761 }
5762
5763 static int tcpm_vconn_set(struct typec_port *p, enum typec_role role)
5764 {
5765 struct tcpm_port *port = typec_get_drvdata(p);
5766 int ret;
5767
5768 mutex_lock(&port->swap_lock);
5769 mutex_lock(&port->lock);
5770
5771 if (port->state != SRC_READY && port->state != SNK_READY) {
5772 ret = -EAGAIN;
5773 goto port_unlock;
5774 }
5775
5776 if (role == port->vconn_role) {
5777 ret = 0;
5778 goto port_unlock;
5779 }
5780
5781 port->upcoming_state = VCONN_SWAP_SEND;
5782 ret = tcpm_ams_start(port, VCONN_SWAP);
5783 if (ret == -EAGAIN) {
5784 port->upcoming_state = INVALID_STATE;
5785 goto port_unlock;
5786 }
5787
5788 port->swap_status = 0;
5789 port->swap_pending = true;
5790 reinit_completion(&port->swap_complete);
5791 mutex_unlock(&port->lock);
5792
5793 if (!wait_for_completion_timeout(&port->swap_complete,
5794 msecs_to_jiffies(PD_ROLE_SWAP_TIMEOUT)))
5795 ret = -ETIMEDOUT;
5796 else
5797 ret = port->swap_status;
5798
5799 goto swap_unlock;
5800
5801 port_unlock:
5802 mutex_unlock(&port->lock);
5803 swap_unlock:
5804 mutex_unlock(&port->swap_lock);
5805 return ret;
5806 }
5807
5808 static int tcpm_try_role(struct typec_port *p, int role)
5809 {
5810 struct tcpm_port *port = typec_get_drvdata(p);
5811 struct tcpc_dev *tcpc = port->tcpc;
5812 int ret = 0;
5813
5814 mutex_lock(&port->lock);
5815 if (tcpc->try_role)
5816 ret = tcpc->try_role(tcpc, role);
5817 if (!ret)
5818 port->try_role = role;
5819 port->try_src_count = 0;
5820 port->try_snk_count = 0;
5821 mutex_unlock(&port->lock);
5822
5823 return ret;
5824 }
5825
5826 static int tcpm_pps_set_op_curr(struct tcpm_port *port, u16 req_op_curr)
5827 {
5828 unsigned int target_mw;
5829 int ret;
5830
5831 mutex_lock(&port->swap_lock);
5832 mutex_lock(&port->lock);
5833
5834 if (!port->pps_data.active) {
5835 ret = -EOPNOTSUPP;
5836 goto port_unlock;
5837 }
5838
5839 if (port->state != SNK_READY) {
5840 ret = -EAGAIN;
5841 goto port_unlock;
5842 }
5843
5844 if (req_op_curr > port->pps_data.max_curr) {
5845 ret = -EINVAL;
5846 goto port_unlock;
5847 }
5848
5849 target_mw = (req_op_curr * port->supply_voltage) / 1000;
5850 if (target_mw < port->operating_snk_mw) {
5851 ret = -EINVAL;
5852 goto port_unlock;
5853 }
5854
5855 port->upcoming_state = SNK_NEGOTIATE_PPS_CAPABILITIES;
5856 ret = tcpm_ams_start(port, POWER_NEGOTIATION);
5857 if (ret == -EAGAIN) {
5858 port->upcoming_state = INVALID_STATE;
5859 goto port_unlock;
5860 }
5861
5862 /* Round down operating current to align with PPS valid steps */
5863 req_op_curr = req_op_curr - (req_op_curr % RDO_PROG_CURR_MA_STEP);
5864
5865 reinit_completion(&port->pps_complete);
5866 port->pps_data.req_op_curr = req_op_curr;
5867 port->pps_status = 0;
5868 port->pps_pending = true;
5869 mutex_unlock(&port->lock);
5870
5871 if (!wait_for_completion_timeout(&port->pps_complete,
5872 msecs_to_jiffies(PD_PPS_CTRL_TIMEOUT)))
5873 ret = -ETIMEDOUT;
5874 else
5875 ret = port->pps_status;
5876
5877 goto swap_unlock;
5878
5879 port_unlock:
5880 mutex_unlock(&port->lock);
5881 swap_unlock:
5882 mutex_unlock(&port->swap_lock);
5883
5884 return ret;
5885 }
5886
5887 static int tcpm_pps_set_out_volt(struct tcpm_port *port, u16 req_out_volt)
5888 {
5889 unsigned int target_mw;
5890 int ret;
5891
5892 mutex_lock(&port->swap_lock);
5893 mutex_lock(&port->lock);
5894
5895 if (!port->pps_data.active) {
5896 ret = -EOPNOTSUPP;
5897 goto port_unlock;
5898 }
5899
5900 if (port->state != SNK_READY) {
5901 ret = -EAGAIN;
5902 goto port_unlock;
5903 }
5904
5905 target_mw = (port->current_limit * req_out_volt) / 1000;
5906 if (target_mw < port->operating_snk_mw) {
5907 ret = -EINVAL;
5908 goto port_unlock;
5909 }
5910
5911 port->upcoming_state = SNK_NEGOTIATE_PPS_CAPABILITIES;
5912 ret = tcpm_ams_start(port, POWER_NEGOTIATION);
5913 if (ret == -EAGAIN) {
5914 port->upcoming_state = INVALID_STATE;
5915 goto port_unlock;
5916 }
5917
5918 /* Round down output voltage to align with PPS valid steps */
5919 req_out_volt = req_out_volt - (req_out_volt % RDO_PROG_VOLT_MV_STEP);
5920
5921 reinit_completion(&port->pps_complete);
5922 port->pps_data.req_out_volt = req_out_volt;
5923 port->pps_status = 0;
5924 port->pps_pending = true;
5925 mutex_unlock(&port->lock);
5926
5927 if (!wait_for_completion_timeout(&port->pps_complete,
5928 msecs_to_jiffies(PD_PPS_CTRL_TIMEOUT)))
5929 ret = -ETIMEDOUT;
5930 else
5931 ret = port->pps_status;
5932
5933 goto swap_unlock;
5934
5935 port_unlock:
5936 mutex_unlock(&port->lock);
5937 swap_unlock:
5938 mutex_unlock(&port->swap_lock);
5939
5940 return ret;
5941 }
5942
5943 static int tcpm_pps_activate(struct tcpm_port *port, bool activate)
5944 {
5945 int ret = 0;
5946
5947 mutex_lock(&port->swap_lock);
5948 mutex_lock(&port->lock);
5949
5950 if (!port->pps_data.supported) {
5951 ret = -EOPNOTSUPP;
5952 goto port_unlock;
5953 }
5954
5955 /* Trying to deactivate PPS when already deactivated so just bail */
5956 if (!port->pps_data.active && !activate)
5957 goto port_unlock;
5958
5959 if (port->state != SNK_READY) {
5960 ret = -EAGAIN;
5961 goto port_unlock;
5962 }
5963
5964 if (activate)
5965 port->upcoming_state = SNK_NEGOTIATE_PPS_CAPABILITIES;
5966 else
5967 port->upcoming_state = SNK_NEGOTIATE_CAPABILITIES;
5968 ret = tcpm_ams_start(port, POWER_NEGOTIATION);
5969 if (ret == -EAGAIN) {
5970 port->upcoming_state = INVALID_STATE;
5971 goto port_unlock;
5972 }
5973
5974 reinit_completion(&port->pps_complete);
5975 port->pps_status = 0;
5976 port->pps_pending = true;
5977
5978 /* Trigger PPS request or move back to standard PDO contract */
5979 if (activate) {
5980 port->pps_data.req_out_volt = port->supply_voltage;
5981 port->pps_data.req_op_curr = port->current_limit;
5982 }
5983 mutex_unlock(&port->lock);
5984
5985 if (!wait_for_completion_timeout(&port->pps_complete,
5986 msecs_to_jiffies(PD_PPS_CTRL_TIMEOUT)))
5987 ret = -ETIMEDOUT;
5988 else
5989 ret = port->pps_status;
5990
5991 goto swap_unlock;
5992
5993 port_unlock:
5994 mutex_unlock(&port->lock);
5995 swap_unlock:
5996 mutex_unlock(&port->swap_lock);
5997
5998 return ret;
5999 }
6000
6001 static void tcpm_init(struct tcpm_port *port)
6002 {
6003 enum typec_cc_status cc1, cc2;
6004
6005 port->tcpc->init(port->tcpc);
6006
6007 tcpm_reset_port(port);
6008
6009 /*
6010 * XXX
6011 * Should possibly wait for VBUS to settle if it was enabled locally
6012 * since tcpm_reset_port() will disable VBUS.
6013 */
6014 port->vbus_present = port->tcpc->get_vbus(port->tcpc);
6015 if (port->vbus_present)
6016 port->vbus_never_low = true;
6017
6018 /*
6019 * 1. When vbus_present is true, voltage on VBUS is already at VSAFE5V.
6020 * So implicitly vbus_vsafe0v = false.
6021 *
6022 * 2. When vbus_present is false and TCPC does NOT support querying
6023 * vsafe0v status, then, it's best to assume vbus is at VSAFE0V i.e.
6024 * vbus_vsafe0v is true.
6025 *
6026 * 3. When vbus_present is false and TCPC does support querying vsafe0v,
6027 * then, query tcpc for vsafe0v status.
6028 */
6029 if (port->vbus_present)
6030 port->vbus_vsafe0v = false;
6031 else if (!port->tcpc->is_vbus_vsafe0v)
6032 port->vbus_vsafe0v = true;
6033 else
6034 port->vbus_vsafe0v = port->tcpc->is_vbus_vsafe0v(port->tcpc);
6035
6036 tcpm_set_state(port, tcpm_default_state(port), 0);
6037
6038 if (port->tcpc->get_cc(port->tcpc, &cc1, &cc2) == 0)
6039 _tcpm_cc_change(port, cc1, cc2);
6040
6041 /*
6042 * Some adapters need a clean slate at startup, and won't recover
6043 * otherwise. So do not try to be fancy and force a clean disconnect.
6044 */
6045 tcpm_set_state(port, PORT_RESET, 0);
6046 }
6047
6048 static int tcpm_port_type_set(struct typec_port *p, enum typec_port_type type)
6049 {
6050 struct tcpm_port *port = typec_get_drvdata(p);
6051
6052 mutex_lock(&port->lock);
6053 if (type == port->port_type)
6054 goto port_unlock;
6055
6056 port->port_type = type;
6057
6058 if (!port->connected) {
6059 tcpm_set_state(port, PORT_RESET, 0);
6060 } else if (type == TYPEC_PORT_SNK) {
6061 if (!(port->pwr_role == TYPEC_SINK &&
6062 port->data_role == TYPEC_DEVICE))
6063 tcpm_set_state(port, PORT_RESET, 0);
6064 } else if (type == TYPEC_PORT_SRC) {
6065 if (!(port->pwr_role == TYPEC_SOURCE &&
6066 port->data_role == TYPEC_HOST))
6067 tcpm_set_state(port, PORT_RESET, 0);
6068 }
6069
6070 port_unlock:
6071 mutex_unlock(&port->lock);
6072 return 0;
6073 }
6074
6075 static struct pd_data *tcpm_find_pd_data(struct tcpm_port *port, struct usb_power_delivery *pd)
6076 {
6077 int i;
6078
6079 for (i = 0; port->pd_list[i]; i++) {
6080 if (port->pd_list[i]->pd == pd)
6081 return port->pd_list[i];
6082 }
6083
6084 return ERR_PTR(-ENODATA);
6085 }
6086
6087 static struct usb_power_delivery **tcpm_pd_get(struct typec_port *p)
6088 {
6089 struct tcpm_port *port = typec_get_drvdata(p);
6090
6091 return port->pds;
6092 }
6093
6094 static int tcpm_pd_set(struct typec_port *p, struct usb_power_delivery *pd)
6095 {
6096 struct tcpm_port *port = typec_get_drvdata(p);
6097 struct pd_data *data;
6098 int i, ret = 0;
6099
6100 mutex_lock(&port->lock);
6101
6102 if (port->selected_pd == pd)
6103 goto unlock;
6104
6105 data = tcpm_find_pd_data(port, pd);
6106 if (IS_ERR(data)) {
6107 ret = PTR_ERR(data);
6108 goto unlock;
6109 }
6110
6111 if (data->sink_desc.pdo[0]) {
6112 for (i = 0; i < PDO_MAX_OBJECTS && data->sink_desc.pdo[i]; i++)
6113 port->snk_pdo[i] = data->sink_desc.pdo[i];
6114 port->nr_snk_pdo = i + 1;
6115 port->operating_snk_mw = data->operating_snk_mw;
6116 }
6117
6118 if (data->source_desc.pdo[0]) {
6119 for (i = 0; i < PDO_MAX_OBJECTS && data->source_desc.pdo[i]; i++)
6120 port->snk_pdo[i] = data->source_desc.pdo[i];
6121 port->nr_src_pdo = i + 1;
6122 }
6123
6124 switch (port->state) {
6125 case SRC_UNATTACHED:
6126 case SRC_ATTACH_WAIT:
6127 case SRC_TRYWAIT:
6128 tcpm_set_cc(port, tcpm_rp_cc(port));
6129 break;
6130 case SRC_SEND_CAPABILITIES:
6131 case SRC_SEND_CAPABILITIES_TIMEOUT:
6132 case SRC_NEGOTIATE_CAPABILITIES:
6133 case SRC_READY:
6134 case SRC_WAIT_NEW_CAPABILITIES:
6135 port->caps_count = 0;
6136 port->upcoming_state = SRC_SEND_CAPABILITIES;
6137 ret = tcpm_ams_start(port, POWER_NEGOTIATION);
6138 if (ret == -EAGAIN) {
6139 port->upcoming_state = INVALID_STATE;
6140 goto unlock;
6141 }
6142 break;
6143 case SNK_NEGOTIATE_CAPABILITIES:
6144 case SNK_NEGOTIATE_PPS_CAPABILITIES:
6145 case SNK_READY:
6146 case SNK_TRANSITION_SINK:
6147 case SNK_TRANSITION_SINK_VBUS:
6148 if (port->pps_data.active)
6149 port->upcoming_state = SNK_NEGOTIATE_PPS_CAPABILITIES;
6150 else if (port->pd_capable)
6151 port->upcoming_state = SNK_NEGOTIATE_CAPABILITIES;
6152 else
6153 break;
6154
6155 port->update_sink_caps = true;
6156
6157 ret = tcpm_ams_start(port, POWER_NEGOTIATION);
6158 if (ret == -EAGAIN) {
6159 port->upcoming_state = INVALID_STATE;
6160 goto unlock;
6161 }
6162 break;
6163 default:
6164 break;
6165 }
6166
6167 port->port_source_caps = data->source_cap;
6168 port->port_sink_caps = data->sink_cap;
6169 port->selected_pd = pd;
6170 unlock:
6171 mutex_unlock(&port->lock);
6172 return ret;
6173 }
6174
6175 static const struct typec_operations tcpm_ops = {
6176 .try_role = tcpm_try_role,
6177 .dr_set = tcpm_dr_set,
6178 .pr_set = tcpm_pr_set,
6179 .vconn_set = tcpm_vconn_set,
6180 .port_type_set = tcpm_port_type_set,
6181 .pd_get = tcpm_pd_get,
6182 .pd_set = tcpm_pd_set
6183 };
6184
6185 void tcpm_tcpc_reset(struct tcpm_port *port)
6186 {
6187 mutex_lock(&port->lock);
6188 /* XXX: Maintain PD connection if possible? */
6189 tcpm_init(port);
6190 mutex_unlock(&port->lock);
6191 }
6192 EXPORT_SYMBOL_GPL(tcpm_tcpc_reset);
6193
6194 static void tcpm_port_unregister_pd(struct tcpm_port *port)
6195 {
6196 int i;
6197
6198 port->port_sink_caps = NULL;
6199 port->port_source_caps = NULL;
6200 for (i = 0; i < port->pd_count; i++) {
6201 usb_power_delivery_unregister_capabilities(port->pd_list[i]->sink_cap);
6202 kfree(port->pd_list[i]->sink_cap);
6203 usb_power_delivery_unregister_capabilities(port->pd_list[i]->source_cap);
6204 kfree(port->pd_list[i]->source_cap);
6205 devm_kfree(port->dev, port->pd_list[i]);
6206 port->pd_list[i] = NULL;
6207 usb_power_delivery_unregister(port->pds[i]);
6208 port->pds[i] = NULL;
6209 }
6210 }
6211
6212 static int tcpm_port_register_pd(struct tcpm_port *port)
6213 {
6214 struct usb_power_delivery_desc desc = { port->typec_caps.pd_revision };
6215 struct usb_power_delivery_capabilities *cap;
6216 int ret, i;
6217
6218 if (!port->nr_src_pdo && !port->nr_snk_pdo)
6219 return 0;
6220
6221 for (i = 0; i < port->pd_count; i++) {
6222 port->pds[i] = usb_power_delivery_register(port->dev, &desc);
6223 if (IS_ERR(port->pds[i])) {
6224 ret = PTR_ERR(port->pds[i]);
6225 goto err_unregister;
6226 }
6227 port->pd_list[i]->pd = port->pds[i];
6228
6229 if (port->pd_list[i]->source_desc.pdo[0]) {
6230 cap = usb_power_delivery_register_capabilities(port->pds[i],
6231 &port->pd_list[i]->source_desc);
6232 if (IS_ERR(cap)) {
6233 ret = PTR_ERR(cap);
6234 goto err_unregister;
6235 }
6236 port->pd_list[i]->source_cap = cap;
6237 }
6238
6239 if (port->pd_list[i]->sink_desc.pdo[0]) {
6240 cap = usb_power_delivery_register_capabilities(port->pds[i],
6241 &port->pd_list[i]->sink_desc);
6242 if (IS_ERR(cap)) {
6243 ret = PTR_ERR(cap);
6244 goto err_unregister;
6245 }
6246 port->pd_list[i]->sink_cap = cap;
6247 }
6248 }
6249
6250 port->port_source_caps = port->pd_list[0]->source_cap;
6251 port->port_sink_caps = port->pd_list[0]->sink_cap;
6252 port->selected_pd = port->pds[0];
6253 return 0;
6254
6255 err_unregister:
6256 tcpm_port_unregister_pd(port);
6257
6258 return ret;
6259 }
6260
6261 static int tcpm_fw_get_caps(struct tcpm_port *port, struct fwnode_handle *fwnode)
6262 {
6263 struct fwnode_handle *capabilities, *child, *caps = NULL;
6264 unsigned int nr_src_pdo, nr_snk_pdo;
6265 const char *opmode_str;
6266 u32 *src_pdo, *snk_pdo;
6267 u32 uw, frs_current;
6268 int ret = 0, i;
6269 int mode;
6270
6271 if (!fwnode)
6272 return -EINVAL;
6273
6274 /*
6275 * This fwnode has a "compatible" property, but is never populated as a
6276 * struct device. Instead we simply parse it to read the properties.
6277 * This it breaks fw_devlink=on. To maintain backward compatibility
6278 * with existing DT files, we work around this by deleting any
6279 * fwnode_links to/from this fwnode.
6280 */
6281 fw_devlink_purge_absent_suppliers(fwnode);
6282
6283 ret = typec_get_fw_cap(&port->typec_caps, fwnode);
6284 if (ret < 0)
6285 return ret;
6286
6287 mode = 0;
6288
6289 if (fwnode_property_read_bool(fwnode, "accessory-mode-audio"))
6290 port->typec_caps.accessory[mode++] = TYPEC_ACCESSORY_AUDIO;
6291
6292 if (fwnode_property_read_bool(fwnode, "accessory-mode-debug"))
6293 port->typec_caps.accessory[mode++] = TYPEC_ACCESSORY_DEBUG;
6294
6295 port->port_type = port->typec_caps.type;
6296 port->pd_supported = !fwnode_property_read_bool(fwnode, "pd-disable");
6297 port->slow_charger_loop = fwnode_property_read_bool(fwnode, "slow-charger-loop");
6298 port->self_powered = fwnode_property_read_bool(fwnode, "self-powered");
6299
6300 if (!port->pd_supported) {
6301 ret = fwnode_property_read_string(fwnode, "typec-power-opmode", &opmode_str);
6302 if (ret)
6303 return ret;
6304 ret = typec_find_pwr_opmode(opmode_str);
6305 if (ret < 0)
6306 return ret;
6307 port->src_rp = tcpm_pwr_opmode_to_rp(ret);
6308 return 0;
6309 }
6310
6311 /* The following code are applicable to pd-capable ports, i.e. pd_supported is true. */
6312
6313 /* FRS can only be supported by DRP ports */
6314 if (port->port_type == TYPEC_PORT_DRP) {
6315 ret = fwnode_property_read_u32(fwnode, "new-source-frs-typec-current",
6316 &frs_current);
6317 if (!ret && frs_current <= FRS_5V_3A)
6318 port->new_source_frs_current = frs_current;
6319
6320 if (ret)
6321 ret = 0;
6322 }
6323
6324 /* For the backward compatibility, "capabilities" node is optional. */
6325 capabilities = fwnode_get_named_child_node(fwnode, "capabilities");
6326 if (!capabilities) {
6327 port->pd_count = 1;
6328 } else {
6329 fwnode_for_each_child_node(capabilities, child)
6330 port->pd_count++;
6331
6332 if (!port->pd_count) {
6333 ret = -ENODATA;
6334 goto put_capabilities;
6335 }
6336 }
6337
6338 port->pds = devm_kcalloc(port->dev, port->pd_count, sizeof(struct usb_power_delivery *),
6339 GFP_KERNEL);
6340 if (!port->pds) {
6341 ret = -ENOMEM;
6342 goto put_capabilities;
6343 }
6344
6345 port->pd_list = devm_kcalloc(port->dev, port->pd_count, sizeof(struct pd_data *),
6346 GFP_KERNEL);
6347 if (!port->pd_list) {
6348 ret = -ENOMEM;
6349 goto put_capabilities;
6350 }
6351
6352 for (i = 0; i < port->pd_count; i++) {
6353 port->pd_list[i] = devm_kzalloc(port->dev, sizeof(struct pd_data), GFP_KERNEL);
6354 if (!port->pd_list[i]) {
6355 ret = -ENOMEM;
6356 goto put_capabilities;
6357 }
6358
6359 src_pdo = port->pd_list[i]->source_desc.pdo;
6360 port->pd_list[i]->source_desc.role = TYPEC_SOURCE;
6361 snk_pdo = port->pd_list[i]->sink_desc.pdo;
6362 port->pd_list[i]->sink_desc.role = TYPEC_SINK;
6363
6364 /* If "capabilities" is NULL, fall back to single pd cap population. */
6365 if (!capabilities)
6366 caps = fwnode;
6367 else
6368 caps = fwnode_get_next_child_node(capabilities, caps);
6369
6370 if (port->port_type != TYPEC_PORT_SNK) {
6371 ret = fwnode_property_count_u32(caps, "source-pdos");
6372 if (ret == 0) {
6373 ret = -EINVAL;
6374 goto put_caps;
6375 }
6376 if (ret < 0)
6377 goto put_caps;
6378
6379 nr_src_pdo = min(ret, PDO_MAX_OBJECTS);
6380 ret = fwnode_property_read_u32_array(caps, "source-pdos", src_pdo,
6381 nr_src_pdo);
6382 if (ret)
6383 goto put_caps;
6384
6385 ret = tcpm_validate_caps(port, src_pdo, nr_src_pdo);
6386 if (ret)
6387 goto put_caps;
6388
6389 if (i == 0) {
6390 port->nr_src_pdo = nr_src_pdo;
6391 memcpy_and_pad(port->src_pdo, sizeof(u32) * PDO_MAX_OBJECTS,
6392 port->pd_list[0]->source_desc.pdo,
6393 sizeof(u32) * nr_src_pdo,
6394 0);
6395 }
6396 }
6397
6398 if (port->port_type != TYPEC_PORT_SRC) {
6399 ret = fwnode_property_count_u32(caps, "sink-pdos");
6400 if (ret == 0) {
6401 ret = -EINVAL;
6402 goto put_caps;
6403 }
6404
6405 if (ret < 0)
6406 goto put_caps;
6407
6408 nr_snk_pdo = min(ret, PDO_MAX_OBJECTS);
6409 ret = fwnode_property_read_u32_array(caps, "sink-pdos", snk_pdo,
6410 nr_snk_pdo);
6411 if (ret)
6412 goto put_caps;
6413
6414 ret = tcpm_validate_caps(port, snk_pdo, nr_snk_pdo);
6415 if (ret)
6416 goto put_caps;
6417
6418 if (fwnode_property_read_u32(caps, "op-sink-microwatt", &uw) < 0) {
6419 ret = -EINVAL;
6420 goto put_caps;
6421 }
6422
6423 port->pd_list[i]->operating_snk_mw = uw / 1000;
6424
6425 if (i == 0) {
6426 port->nr_snk_pdo = nr_snk_pdo;
6427 memcpy_and_pad(port->snk_pdo, sizeof(u32) * PDO_MAX_OBJECTS,
6428 port->pd_list[0]->sink_desc.pdo,
6429 sizeof(u32) * nr_snk_pdo,
6430 0);
6431 port->operating_snk_mw = port->pd_list[0]->operating_snk_mw;
6432 }
6433 }
6434 }
6435
6436 put_caps:
6437 if (caps != fwnode)
6438 fwnode_handle_put(caps);
6439 put_capabilities:
6440 fwnode_handle_put(capabilities);
6441 return ret;
6442 }
6443
6444 static int tcpm_fw_get_snk_vdos(struct tcpm_port *port, struct fwnode_handle *fwnode)
6445 {
6446 int ret;
6447
6448 /* sink-vdos is optional */
6449 ret = fwnode_property_count_u32(fwnode, "sink-vdos");
6450 if (ret < 0)
6451 return 0;
6452
6453 port->nr_snk_vdo = min(ret, VDO_MAX_OBJECTS);
6454 if (port->nr_snk_vdo) {
6455 ret = fwnode_property_read_u32_array(fwnode, "sink-vdos",
6456 port->snk_vdo,
6457 port->nr_snk_vdo);
6458 if (ret < 0)
6459 return ret;
6460 }
6461
6462 /* If sink-vdos is found, sink-vdos-v1 is expected for backward compatibility. */
6463 if (port->nr_snk_vdo) {
6464 ret = fwnode_property_count_u32(fwnode, "sink-vdos-v1");
6465 if (ret < 0)
6466 return ret;
6467 else if (ret == 0)
6468 return -ENODATA;
6469
6470 port->nr_snk_vdo_v1 = min(ret, VDO_MAX_OBJECTS);
6471 ret = fwnode_property_read_u32_array(fwnode, "sink-vdos-v1",
6472 port->snk_vdo_v1,
6473 port->nr_snk_vdo_v1);
6474 if (ret < 0)
6475 return ret;
6476 }
6477
6478 return 0;
6479 }
6480
6481 /* Power Supply access to expose source power information */
6482 enum tcpm_psy_online_states {
6483 TCPM_PSY_OFFLINE = 0,
6484 TCPM_PSY_FIXED_ONLINE,
6485 TCPM_PSY_PROG_ONLINE,
6486 };
6487
6488 static enum power_supply_property tcpm_psy_props[] = {
6489 POWER_SUPPLY_PROP_USB_TYPE,
6490 POWER_SUPPLY_PROP_ONLINE,
6491 POWER_SUPPLY_PROP_VOLTAGE_MIN,
6492 POWER_SUPPLY_PROP_VOLTAGE_MAX,
6493 POWER_SUPPLY_PROP_VOLTAGE_NOW,
6494 POWER_SUPPLY_PROP_CURRENT_MAX,
6495 POWER_SUPPLY_PROP_CURRENT_NOW,
6496 };
6497
6498 static int tcpm_psy_get_online(struct tcpm_port *port,
6499 union power_supply_propval *val)
6500 {
6501 if (port->vbus_charge) {
6502 if (port->pps_data.active)
6503 val->intval = TCPM_PSY_PROG_ONLINE;
6504 else
6505 val->intval = TCPM_PSY_FIXED_ONLINE;
6506 } else {
6507 val->intval = TCPM_PSY_OFFLINE;
6508 }
6509
6510 return 0;
6511 }
6512
6513 static int tcpm_psy_get_voltage_min(struct tcpm_port *port,
6514 union power_supply_propval *val)
6515 {
6516 if (port->pps_data.active)
6517 val->intval = port->pps_data.min_volt * 1000;
6518 else
6519 val->intval = port->supply_voltage * 1000;
6520
6521 return 0;
6522 }
6523
6524 static int tcpm_psy_get_voltage_max(struct tcpm_port *port,
6525 union power_supply_propval *val)
6526 {
6527 if (port->pps_data.active)
6528 val->intval = port->pps_data.max_volt * 1000;
6529 else
6530 val->intval = port->supply_voltage * 1000;
6531
6532 return 0;
6533 }
6534
6535 static int tcpm_psy_get_voltage_now(struct tcpm_port *port,
6536 union power_supply_propval *val)
6537 {
6538 val->intval = port->supply_voltage * 1000;
6539
6540 return 0;
6541 }
6542
6543 static int tcpm_psy_get_current_max(struct tcpm_port *port,
6544 union power_supply_propval *val)
6545 {
6546 if (port->pps_data.active)
6547 val->intval = port->pps_data.max_curr * 1000;
6548 else
6549 val->intval = port->current_limit * 1000;
6550
6551 return 0;
6552 }
6553
6554 static int tcpm_psy_get_current_now(struct tcpm_port *port,
6555 union power_supply_propval *val)
6556 {
6557 val->intval = port->current_limit * 1000;
6558
6559 return 0;
6560 }
6561
6562 static int tcpm_psy_get_input_power_limit(struct tcpm_port *port,
6563 union power_supply_propval *val)
6564 {
6565 unsigned int src_mv, src_ma, max_src_uw = 0;
6566 unsigned int i, tmp;
6567
6568 for (i = 0; i < port->nr_source_caps; i++) {
6569 u32 pdo = port->source_caps[i];
6570
6571 if (pdo_type(pdo) == PDO_TYPE_FIXED) {
6572 src_mv = pdo_fixed_voltage(pdo);
6573 src_ma = pdo_max_current(pdo);
6574 tmp = src_mv * src_ma;
6575 max_src_uw = tmp > max_src_uw ? tmp : max_src_uw;
6576 }
6577 }
6578
6579 val->intval = max_src_uw;
6580 return 0;
6581 }
6582
6583 static int tcpm_psy_get_prop(struct power_supply *psy,
6584 enum power_supply_property psp,
6585 union power_supply_propval *val)
6586 {
6587 struct tcpm_port *port = power_supply_get_drvdata(psy);
6588 int ret = 0;
6589
6590 switch (psp) {
6591 case POWER_SUPPLY_PROP_USB_TYPE:
6592 val->intval = port->usb_type;
6593 break;
6594 case POWER_SUPPLY_PROP_ONLINE:
6595 ret = tcpm_psy_get_online(port, val);
6596 break;
6597 case POWER_SUPPLY_PROP_VOLTAGE_MIN:
6598 ret = tcpm_psy_get_voltage_min(port, val);
6599 break;
6600 case POWER_SUPPLY_PROP_VOLTAGE_MAX:
6601 ret = tcpm_psy_get_voltage_max(port, val);
6602 break;
6603 case POWER_SUPPLY_PROP_VOLTAGE_NOW:
6604 ret = tcpm_psy_get_voltage_now(port, val);
6605 break;
6606 case POWER_SUPPLY_PROP_CURRENT_MAX:
6607 ret = tcpm_psy_get_current_max(port, val);
6608 break;
6609 case POWER_SUPPLY_PROP_CURRENT_NOW:
6610 ret = tcpm_psy_get_current_now(port, val);
6611 break;
6612 case POWER_SUPPLY_PROP_INPUT_POWER_LIMIT:
6613 tcpm_psy_get_input_power_limit(port, val);
6614 break;
6615 default:
6616 ret = -EINVAL;
6617 break;
6618 }
6619
6620 return ret;
6621 }
6622
6623 static int tcpm_psy_set_online(struct tcpm_port *port,
6624 const union power_supply_propval *val)
6625 {
6626 int ret;
6627
6628 switch (val->intval) {
6629 case TCPM_PSY_FIXED_ONLINE:
6630 ret = tcpm_pps_activate(port, false);
6631 break;
6632 case TCPM_PSY_PROG_ONLINE:
6633 ret = tcpm_pps_activate(port, true);
6634 break;
6635 default:
6636 ret = -EINVAL;
6637 break;
6638 }
6639
6640 return ret;
6641 }
6642
6643 static int tcpm_psy_set_prop(struct power_supply *psy,
6644 enum power_supply_property psp,
6645 const union power_supply_propval *val)
6646 {
6647 struct tcpm_port *port = power_supply_get_drvdata(psy);
6648 int ret;
6649
6650 /*
6651 * All the properties below are related to USB PD. The check needs to be
6652 * property specific when a non-pd related property is added.
6653 */
6654 if (!port->pd_supported)
6655 return -EOPNOTSUPP;
6656
6657 switch (psp) {
6658 case POWER_SUPPLY_PROP_ONLINE:
6659 ret = tcpm_psy_set_online(port, val);
6660 break;
6661 case POWER_SUPPLY_PROP_VOLTAGE_NOW:
6662 ret = tcpm_pps_set_out_volt(port, val->intval / 1000);
6663 break;
6664 case POWER_SUPPLY_PROP_CURRENT_NOW:
6665 if (val->intval > port->pps_data.max_curr * 1000)
6666 ret = -EINVAL;
6667 else
6668 ret = tcpm_pps_set_op_curr(port, val->intval / 1000);
6669 break;
6670 default:
6671 ret = -EINVAL;
6672 break;
6673 }
6674 power_supply_changed(port->psy);
6675 return ret;
6676 }
6677
6678 static int tcpm_psy_prop_writeable(struct power_supply *psy,
6679 enum power_supply_property psp)
6680 {
6681 switch (psp) {
6682 case POWER_SUPPLY_PROP_ONLINE:
6683 case POWER_SUPPLY_PROP_VOLTAGE_NOW:
6684 case POWER_SUPPLY_PROP_CURRENT_NOW:
6685 return 1;
6686 default:
6687 return 0;
6688 }
6689 }
6690
6691 static enum power_supply_usb_type tcpm_psy_usb_types[] = {
6692 POWER_SUPPLY_USB_TYPE_C,
6693 POWER_SUPPLY_USB_TYPE_PD,
6694 POWER_SUPPLY_USB_TYPE_PD_PPS,
6695 };
6696
6697 static const char *tcpm_psy_name_prefix = "tcpm-source-psy-";
6698
6699 static int devm_tcpm_psy_register(struct tcpm_port *port)
6700 {
6701 struct power_supply_config psy_cfg = {};
6702 const char *port_dev_name = dev_name(port->dev);
6703 size_t psy_name_len = strlen(tcpm_psy_name_prefix) +
6704 strlen(port_dev_name) + 1;
6705 char *psy_name;
6706
6707 psy_cfg.drv_data = port;
6708 psy_cfg.fwnode = dev_fwnode(port->dev);
6709 psy_name = devm_kzalloc(port->dev, psy_name_len, GFP_KERNEL);
6710 if (!psy_name)
6711 return -ENOMEM;
6712
6713 snprintf(psy_name, psy_name_len, "%s%s", tcpm_psy_name_prefix,
6714 port_dev_name);
6715 port->psy_desc.name = psy_name;
6716 port->psy_desc.type = POWER_SUPPLY_TYPE_USB;
6717 port->psy_desc.usb_types = tcpm_psy_usb_types;
6718 port->psy_desc.num_usb_types = ARRAY_SIZE(tcpm_psy_usb_types);
6719 port->psy_desc.properties = tcpm_psy_props;
6720 port->psy_desc.num_properties = ARRAY_SIZE(tcpm_psy_props);
6721 port->psy_desc.get_property = tcpm_psy_get_prop;
6722 port->psy_desc.set_property = tcpm_psy_set_prop;
6723 port->psy_desc.property_is_writeable = tcpm_psy_prop_writeable;
6724
6725 port->usb_type = POWER_SUPPLY_USB_TYPE_C;
6726
6727 port->psy = devm_power_supply_register(port->dev, &port->psy_desc,
6728 &psy_cfg);
6729
6730 return PTR_ERR_OR_ZERO(port->psy);
6731 }
6732
6733 static enum hrtimer_restart state_machine_timer_handler(struct hrtimer *timer)
6734 {
6735 struct tcpm_port *port = container_of(timer, struct tcpm_port, state_machine_timer);
6736
6737 if (port->registered)
6738 kthread_queue_work(port->wq, &port->state_machine);
6739 return HRTIMER_NORESTART;
6740 }
6741
6742 static enum hrtimer_restart vdm_state_machine_timer_handler(struct hrtimer *timer)
6743 {
6744 struct tcpm_port *port = container_of(timer, struct tcpm_port, vdm_state_machine_timer);
6745
6746 if (port->registered)
6747 kthread_queue_work(port->wq, &port->vdm_state_machine);
6748 return HRTIMER_NORESTART;
6749 }
6750
6751 static enum hrtimer_restart enable_frs_timer_handler(struct hrtimer *timer)
6752 {
6753 struct tcpm_port *port = container_of(timer, struct tcpm_port, enable_frs_timer);
6754
6755 if (port->registered)
6756 kthread_queue_work(port->wq, &port->enable_frs);
6757 return HRTIMER_NORESTART;
6758 }
6759
6760 static enum hrtimer_restart send_discover_timer_handler(struct hrtimer *timer)
6761 {
6762 struct tcpm_port *port = container_of(timer, struct tcpm_port, send_discover_timer);
6763
6764 if (port->registered)
6765 kthread_queue_work(port->wq, &port->send_discover_work);
6766 return HRTIMER_NORESTART;
6767 }
6768
6769 struct tcpm_port *tcpm_register_port(struct device *dev, struct tcpc_dev *tcpc)
6770 {
6771 struct tcpm_port *port;
6772 int err;
6773
6774 if (!dev || !tcpc ||
6775 !tcpc->get_vbus || !tcpc->set_cc || !tcpc->get_cc ||
6776 !tcpc->set_polarity || !tcpc->set_vconn || !tcpc->set_vbus ||
6777 !tcpc->set_pd_rx || !tcpc->set_roles || !tcpc->pd_transmit)
6778 return ERR_PTR(-EINVAL);
6779
6780 port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL);
6781 if (!port)
6782 return ERR_PTR(-ENOMEM);
6783
6784 port->dev = dev;
6785 port->tcpc = tcpc;
6786
6787 mutex_init(&port->lock);
6788 mutex_init(&port->swap_lock);
6789
6790 port->wq = kthread_create_worker(0, dev_name(dev));
6791 if (IS_ERR(port->wq))
6792 return ERR_CAST(port->wq);
6793 sched_set_fifo(port->wq->task);
6794
6795 kthread_init_work(&port->state_machine, tcpm_state_machine_work);
6796 kthread_init_work(&port->vdm_state_machine, vdm_state_machine_work);
6797 kthread_init_work(&port->event_work, tcpm_pd_event_handler);
6798 kthread_init_work(&port->enable_frs, tcpm_enable_frs_work);
6799 kthread_init_work(&port->send_discover_work, tcpm_send_discover_work);
6800 hrtimer_init(&port->state_machine_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
6801 port->state_machine_timer.function = state_machine_timer_handler;
6802 hrtimer_init(&port->vdm_state_machine_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
6803 port->vdm_state_machine_timer.function = vdm_state_machine_timer_handler;
6804 hrtimer_init(&port->enable_frs_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
6805 port->enable_frs_timer.function = enable_frs_timer_handler;
6806 hrtimer_init(&port->send_discover_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
6807 port->send_discover_timer.function = send_discover_timer_handler;
6808
6809 spin_lock_init(&port->pd_event_lock);
6810
6811 init_completion(&port->tx_complete);
6812 init_completion(&port->swap_complete);
6813 init_completion(&port->pps_complete);
6814 tcpm_debugfs_init(port);
6815
6816 err = tcpm_fw_get_caps(port, tcpc->fwnode);
6817 if (err < 0)
6818 goto out_destroy_wq;
6819 err = tcpm_fw_get_snk_vdos(port, tcpc->fwnode);
6820 if (err < 0)
6821 goto out_destroy_wq;
6822
6823 port->try_role = port->typec_caps.prefer_role;
6824
6825 port->typec_caps.revision = 0x0120; /* Type-C spec release 1.2 */
6826 port->typec_caps.pd_revision = 0x0300; /* USB-PD spec release 3.0 */
6827 port->typec_caps.svdm_version = SVDM_VER_2_0;
6828 port->typec_caps.driver_data = port;
6829 port->typec_caps.ops = &tcpm_ops;
6830 port->typec_caps.orientation_aware = 1;
6831
6832 port->partner_desc.identity = &port->partner_ident;
6833
6834 port->role_sw = usb_role_switch_get(port->dev);
6835 if (!port->role_sw)
6836 port->role_sw = fwnode_usb_role_switch_get(tcpc->fwnode);
6837 if (IS_ERR(port->role_sw)) {
6838 err = PTR_ERR(port->role_sw);
6839 goto out_destroy_wq;
6840 }
6841
6842 err = devm_tcpm_psy_register(port);
6843 if (err)
6844 goto out_role_sw_put;
6845 power_supply_changed(port->psy);
6846
6847 err = tcpm_port_register_pd(port);
6848 if (err)
6849 goto out_role_sw_put;
6850
6851 if (port->pds)
6852 port->typec_caps.pd = port->pds[0];
6853
6854 port->typec_port = typec_register_port(port->dev, &port->typec_caps);
6855 if (IS_ERR(port->typec_port)) {
6856 err = PTR_ERR(port->typec_port);
6857 goto out_unregister_pd;
6858 }
6859
6860 typec_port_register_altmodes(port->typec_port,
6861 &tcpm_altmode_ops, port,
6862 port->port_altmode, ALTMODE_DISCOVERY_MAX);
6863 port->registered = true;
6864
6865 mutex_lock(&port->lock);
6866 tcpm_init(port);
6867 mutex_unlock(&port->lock);
6868
6869 tcpm_log(port, "%s: registered", dev_name(dev));
6870 return port;
6871
6872 out_unregister_pd:
6873 tcpm_port_unregister_pd(port);
6874 out_role_sw_put:
6875 usb_role_switch_put(port->role_sw);
6876 out_destroy_wq:
6877 tcpm_debugfs_exit(port);
6878 kthread_destroy_worker(port->wq);
6879 return ERR_PTR(err);
6880 }
6881 EXPORT_SYMBOL_GPL(tcpm_register_port);
6882
6883 void tcpm_unregister_port(struct tcpm_port *port)
6884 {
6885 int i;
6886
6887 port->registered = false;
6888 kthread_destroy_worker(port->wq);
6889
6890 hrtimer_cancel(&port->send_discover_timer);
6891 hrtimer_cancel(&port->enable_frs_timer);
6892 hrtimer_cancel(&port->vdm_state_machine_timer);
6893 hrtimer_cancel(&port->state_machine_timer);
6894
6895 tcpm_reset_port(port);
6896
6897 tcpm_port_unregister_pd(port);
6898
6899 for (i = 0; i < ARRAY_SIZE(port->port_altmode); i++)
6900 typec_unregister_altmode(port->port_altmode[i]);
6901 typec_unregister_port(port->typec_port);
6902 usb_role_switch_put(port->role_sw);
6903 tcpm_debugfs_exit(port);
6904 }
6905 EXPORT_SYMBOL_GPL(tcpm_unregister_port);
6906
6907 MODULE_AUTHOR("Guenter Roeck <groeck@chromium.org>");
6908 MODULE_DESCRIPTION("USB Type-C Port Manager");
6909 MODULE_LICENSE("GPL");