1 // SPDX-License-Identifier: GPL-2.0+
3 * Copyright 2015-2017 Google, Inc
5 * USB Power Delivery protocol stack.
8 #include <linux/completion.h>
9 #include <linux/debugfs.h>
10 #include <linux/device.h>
11 #include <linux/jiffies.h>
12 #include <linux/kernel.h>
13 #include <linux/module.h>
14 #include <linux/mutex.h>
15 #include <linux/proc_fs.h>
16 #include <linux/sched/clock.h>
17 #include <linux/seq_file.h>
18 #include <linux/slab.h>
19 #include <linux/spinlock.h>
20 #include <linux/usb/pd.h>
21 #include <linux/usb/pd_bdo.h>
22 #include <linux/usb/pd_vdo.h>
23 #include <linux/usb/tcpm.h>
24 #include <linux/usb/typec.h>
25 #include <linux/workqueue.h>
27 #define FOREACH_STATE(S) \
34 S(SRC_SEND_CAPABILITIES), \
35 S(SRC_NEGOTIATE_CAPABILITIES), \
36 S(SRC_TRANSITION_SUPPLY), \
38 S(SRC_WAIT_NEW_CAPABILITIES), \
46 S(SNK_DISCOVERY_DEBOUNCE), \
47 S(SNK_DISCOVERY_DEBOUNCE_DONE), \
48 S(SNK_WAIT_CAPABILITIES), \
49 S(SNK_NEGOTIATE_CAPABILITIES), \
50 S(SNK_TRANSITION_SINK), \
51 S(SNK_TRANSITION_SINK_VBUS), \
55 S(DEBUG_ACC_ATTACHED), \
56 S(AUDIO_ACC_ATTACHED), \
57 S(AUDIO_ACC_DEBOUNCE), \
60 S(HARD_RESET_START), \
61 S(SRC_HARD_RESET_VBUS_OFF), \
62 S(SRC_HARD_RESET_VBUS_ON), \
63 S(SNK_HARD_RESET_SINK_OFF), \
64 S(SNK_HARD_RESET_WAIT_VBUS), \
65 S(SNK_HARD_RESET_SINK_ON), \
72 S(DR_SWAP_SEND_TIMEOUT), \
74 S(DR_SWAP_CHANGE_DR), \
78 S(PR_SWAP_SEND_TIMEOUT), \
81 S(PR_SWAP_SRC_SNK_TRANSITION_OFF), \
82 S(PR_SWAP_SRC_SNK_SOURCE_OFF), \
83 S(PR_SWAP_SRC_SNK_SOURCE_OFF_CC_DEBOUNCED), \
84 S(PR_SWAP_SRC_SNK_SINK_ON), \
85 S(PR_SWAP_SNK_SRC_SINK_OFF), \
86 S(PR_SWAP_SNK_SRC_SOURCE_ON), \
87 S(PR_SWAP_SNK_SRC_SOURCE_ON_VBUS_RAMPED_UP), \
89 S(VCONN_SWAP_ACCEPT), \
91 S(VCONN_SWAP_SEND_TIMEOUT), \
92 S(VCONN_SWAP_CANCEL), \
93 S(VCONN_SWAP_START), \
94 S(VCONN_SWAP_WAIT_FOR_VCONN), \
95 S(VCONN_SWAP_TURN_ON_VCONN), \
96 S(VCONN_SWAP_TURN_OFF_VCONN), \
100 S(SNK_TRY_WAIT_DEBOUNCE), \
101 S(SNK_TRY_WAIT_DEBOUNCE_CHECK_VBUS), \
103 S(SRC_TRYWAIT_DEBOUNCE), \
104 S(SRC_TRYWAIT_UNATTACHED), \
108 S(SRC_TRY_DEBOUNCE), \
110 S(SNK_TRYWAIT_DEBOUNCE), \
111 S(SNK_TRYWAIT_VBUS), \
116 S(PORT_RESET_WAIT_OFF)
118 #define GENERATE_ENUM(e) e
119 #define GENERATE_STRING(s) #s
122 FOREACH_STATE(GENERATE_ENUM
)
125 static const char * const tcpm_states
[] = {
126 FOREACH_STATE(GENERATE_STRING
)
130 VDM_STATE_ERR_BUSY
= -3,
131 VDM_STATE_ERR_SEND
= -2,
132 VDM_STATE_ERR_TMOUT
= -1,
134 /* Anything >0 represents an active state */
137 VDM_STATE_WAIT_RSP_BUSY
= 3,
140 enum pd_msg_request
{
144 PD_MSG_DATA_SINK_CAP
,
145 PD_MSG_DATA_SOURCE_CAP
,
148 /* Events from low level driver */
150 #define TCPM_CC_EVENT BIT(0)
151 #define TCPM_VBUS_EVENT BIT(1)
152 #define TCPM_RESET_EVENT BIT(2)
154 #define LOG_BUFFER_ENTRIES 1024
155 #define LOG_BUFFER_ENTRY_SIZE 128
157 /* Alternate mode support */
159 #define SVID_DISCOVERY_MAX 16
161 struct pd_mode_data
{
162 int svid_index
; /* current SVID index */
164 u16 svids
[SVID_DISCOVERY_MAX
];
165 int altmodes
; /* number of alternate modes */
166 struct typec_altmode_desc altmode_desc
[SVID_DISCOVERY_MAX
];
172 struct mutex lock
; /* tcpm state machine lock */
173 struct workqueue_struct
*wq
;
175 struct typec_capability typec_caps
;
176 struct typec_port
*typec_port
;
178 struct tcpc_dev
*tcpc
;
180 enum typec_role vconn_role
;
181 enum typec_role pwr_role
;
182 enum typec_data_role data_role
;
183 enum typec_pwr_opmode pwr_opmode
;
185 struct usb_pd_identity partner_ident
;
186 struct typec_partner_desc partner_desc
;
187 struct typec_partner
*partner
;
189 enum typec_cc_status cc_req
;
191 enum typec_cc_status cc1
;
192 enum typec_cc_status cc2
;
193 enum typec_cc_polarity polarity
;
197 enum typec_port_type port_type
;
210 enum pd_msg_request queued_message
;
212 enum tcpm_state enter_state
;
213 enum tcpm_state prev_state
;
214 enum tcpm_state state
;
215 enum tcpm_state delayed_state
;
216 unsigned long delayed_runtime
;
217 unsigned long delay_ms
;
219 spinlock_t pd_event_lock
;
222 struct work_struct event_work
;
223 struct delayed_work state_machine
;
224 struct delayed_work vdm_state_machine
;
225 bool state_machine_running
;
227 struct completion tx_complete
;
228 enum tcpm_transmit_status tx_status
;
230 struct mutex swap_lock
; /* swap command lock */
232 bool non_pd_role_swap
;
233 struct completion swap_complete
;
236 unsigned int message_id
;
237 unsigned int caps_count
;
238 unsigned int hard_reset_count
;
240 bool explicit_contract
;
241 unsigned int rx_msgid
;
243 /* Partner capabilities/requests */
245 u32 source_caps
[PDO_MAX_OBJECTS
];
246 unsigned int nr_source_caps
;
247 u32 sink_caps
[PDO_MAX_OBJECTS
];
248 unsigned int nr_sink_caps
;
250 /* Local capabilities */
251 u32 src_pdo
[PDO_MAX_OBJECTS
];
252 unsigned int nr_src_pdo
;
253 u32 snk_pdo
[PDO_MAX_OBJECTS
];
254 unsigned int nr_snk_pdo
;
255 u32 snk_vdo
[VDO_MAX_OBJECTS
];
256 unsigned int nr_snk_vdo
;
258 unsigned int max_snk_mv
;
259 unsigned int max_snk_ma
;
260 unsigned int max_snk_mw
;
261 unsigned int operating_snk_mw
;
263 /* Requested current / voltage */
269 /* PD state for Vendor Defined Messages */
270 enum vdm_states vdm_state
;
272 /* next Vendor Defined Message to send */
273 u32 vdo_data
[VDO_MAX_SIZE
];
275 /* VDO to retry if UFP responder replied busy */
278 /* Alternate mode data */
280 struct pd_mode_data mode_data
;
281 struct typec_altmode
*partner_altmode
[SVID_DISCOVERY_MAX
];
282 struct typec_altmode
*port_altmode
[SVID_DISCOVERY_MAX
];
284 /* Deadline in jiffies to exit src_try_wait state */
285 unsigned long max_wait
;
287 #ifdef CONFIG_DEBUG_FS
288 struct dentry
*dentry
;
289 struct mutex logbuffer_lock
; /* log buffer access lock */
292 u8
*logbuffer
[LOG_BUFFER_ENTRIES
];
297 struct work_struct work
;
298 struct tcpm_port
*port
;
299 struct pd_message msg
;
302 #define tcpm_cc_is_sink(cc) \
303 ((cc) == TYPEC_CC_RP_DEF || (cc) == TYPEC_CC_RP_1_5 || \
304 (cc) == TYPEC_CC_RP_3_0)
306 #define tcpm_port_is_sink(port) \
307 ((tcpm_cc_is_sink((port)->cc1) && !tcpm_cc_is_sink((port)->cc2)) || \
308 (tcpm_cc_is_sink((port)->cc2) && !tcpm_cc_is_sink((port)->cc1)))
310 #define tcpm_cc_is_source(cc) ((cc) == TYPEC_CC_RD)
311 #define tcpm_cc_is_audio(cc) ((cc) == TYPEC_CC_RA)
312 #define tcpm_cc_is_open(cc) ((cc) == TYPEC_CC_OPEN)
314 #define tcpm_port_is_source(port) \
315 ((tcpm_cc_is_source((port)->cc1) && \
316 !tcpm_cc_is_source((port)->cc2)) || \
317 (tcpm_cc_is_source((port)->cc2) && \
318 !tcpm_cc_is_source((port)->cc1)))
320 #define tcpm_port_is_debug(port) \
321 (tcpm_cc_is_source((port)->cc1) && tcpm_cc_is_source((port)->cc2))
323 #define tcpm_port_is_audio(port) \
324 (tcpm_cc_is_audio((port)->cc1) && tcpm_cc_is_audio((port)->cc2))
326 #define tcpm_port_is_audio_detached(port) \
327 ((tcpm_cc_is_audio((port)->cc1) && tcpm_cc_is_open((port)->cc2)) || \
328 (tcpm_cc_is_audio((port)->cc2) && tcpm_cc_is_open((port)->cc1)))
330 #define tcpm_try_snk(port) \
331 ((port)->try_snk_count == 0 && (port)->try_role == TYPEC_SINK && \
332 (port)->port_type == TYPEC_PORT_DRP)
334 #define tcpm_try_src(port) \
335 ((port)->try_src_count == 0 && (port)->try_role == TYPEC_SOURCE && \
336 (port)->port_type == TYPEC_PORT_DRP)
338 static enum tcpm_state
tcpm_default_state(struct tcpm_port
*port
)
340 if (port
->port_type
== TYPEC_PORT_DRP
) {
341 if (port
->try_role
== TYPEC_SINK
)
342 return SNK_UNATTACHED
;
343 else if (port
->try_role
== TYPEC_SOURCE
)
344 return SRC_UNATTACHED
;
345 else if (port
->tcpc
->config
->default_role
== TYPEC_SINK
)
346 return SNK_UNATTACHED
;
347 /* Fall through to return SRC_UNATTACHED */
348 } else if (port
->port_type
== TYPEC_PORT_UFP
) {
349 return SNK_UNATTACHED
;
351 return SRC_UNATTACHED
;
355 struct tcpm_port
*typec_cap_to_tcpm(const struct typec_capability
*cap
)
357 return container_of(cap
, struct tcpm_port
, typec_caps
);
360 static bool tcpm_port_is_disconnected(struct tcpm_port
*port
)
362 return (!port
->attached
&& port
->cc1
== TYPEC_CC_OPEN
&&
363 port
->cc2
== TYPEC_CC_OPEN
) ||
364 (port
->attached
&& ((port
->polarity
== TYPEC_POLARITY_CC1
&&
365 port
->cc1
== TYPEC_CC_OPEN
) ||
366 (port
->polarity
== TYPEC_POLARITY_CC2
&&
367 port
->cc2
== TYPEC_CC_OPEN
)));
374 #ifdef CONFIG_DEBUG_FS
376 static bool tcpm_log_full(struct tcpm_port
*port
)
378 return port
->logbuffer_tail
==
379 (port
->logbuffer_head
+ 1) % LOG_BUFFER_ENTRIES
;
383 static void _tcpm_log(struct tcpm_port
*port
, const char *fmt
, va_list args
)
385 char tmpbuffer
[LOG_BUFFER_ENTRY_SIZE
];
386 u64 ts_nsec
= local_clock();
387 unsigned long rem_nsec
;
389 if (!port
->logbuffer
[port
->logbuffer_head
]) {
390 port
->logbuffer
[port
->logbuffer_head
] =
391 kzalloc(LOG_BUFFER_ENTRY_SIZE
, GFP_KERNEL
);
392 if (!port
->logbuffer
[port
->logbuffer_head
])
396 vsnprintf(tmpbuffer
, sizeof(tmpbuffer
), fmt
, args
);
398 mutex_lock(&port
->logbuffer_lock
);
400 if (tcpm_log_full(port
)) {
401 port
->logbuffer_head
= max(port
->logbuffer_head
- 1, 0);
402 strcpy(tmpbuffer
, "overflow");
405 if (port
->logbuffer_head
< 0 ||
406 port
->logbuffer_head
>= LOG_BUFFER_ENTRIES
) {
408 "Bad log buffer index %d\n", port
->logbuffer_head
);
412 if (!port
->logbuffer
[port
->logbuffer_head
]) {
414 "Log buffer index %d is NULL\n", port
->logbuffer_head
);
418 rem_nsec
= do_div(ts_nsec
, 1000000000);
419 scnprintf(port
->logbuffer
[port
->logbuffer_head
],
420 LOG_BUFFER_ENTRY_SIZE
, "[%5lu.%06lu] %s",
421 (unsigned long)ts_nsec
, rem_nsec
/ 1000,
423 port
->logbuffer_head
= (port
->logbuffer_head
+ 1) % LOG_BUFFER_ENTRIES
;
426 mutex_unlock(&port
->logbuffer_lock
);
430 static void tcpm_log(struct tcpm_port
*port
, const char *fmt
, ...)
434 /* Do not log while disconnected and unattached */
435 if (tcpm_port_is_disconnected(port
) &&
436 (port
->state
== SRC_UNATTACHED
|| port
->state
== SNK_UNATTACHED
||
437 port
->state
== DRP_TOGGLING
))
441 _tcpm_log(port
, fmt
, args
);
446 static void tcpm_log_force(struct tcpm_port
*port
, const char *fmt
, ...)
451 _tcpm_log(port
, fmt
, args
);
455 static void tcpm_log_source_caps(struct tcpm_port
*port
)
459 for (i
= 0; i
< port
->nr_source_caps
; i
++) {
460 u32 pdo
= port
->source_caps
[i
];
461 enum pd_pdo_type type
= pdo_type(pdo
);
466 scnprintf(msg
, sizeof(msg
),
467 "%u mV, %u mA [%s%s%s%s%s%s]",
468 pdo_fixed_voltage(pdo
),
469 pdo_max_current(pdo
),
470 (pdo
& PDO_FIXED_DUAL_ROLE
) ?
472 (pdo
& PDO_FIXED_SUSPEND
) ?
474 (pdo
& PDO_FIXED_HIGHER_CAP
) ?
476 (pdo
& PDO_FIXED_USB_COMM
) ?
478 (pdo
& PDO_FIXED_DATA_SWAP
) ?
480 (pdo
& PDO_FIXED_EXTPOWER
) ?
484 scnprintf(msg
, sizeof(msg
),
486 pdo_min_voltage(pdo
),
487 pdo_max_voltage(pdo
),
488 pdo_max_current(pdo
));
491 scnprintf(msg
, sizeof(msg
),
493 pdo_min_voltage(pdo
),
494 pdo_max_voltage(pdo
),
498 strcpy(msg
, "undefined");
501 tcpm_log(port
, " PDO %d: type %d, %s",
506 static int tcpm_seq_show(struct seq_file
*s
, void *v
)
508 struct tcpm_port
*port
= (struct tcpm_port
*)s
->private;
511 mutex_lock(&port
->logbuffer_lock
);
512 tail
= port
->logbuffer_tail
;
513 while (tail
!= port
->logbuffer_head
) {
514 seq_printf(s
, "%s\n", port
->logbuffer
[tail
]);
515 tail
= (tail
+ 1) % LOG_BUFFER_ENTRIES
;
517 if (!seq_has_overflowed(s
))
518 port
->logbuffer_tail
= tail
;
519 mutex_unlock(&port
->logbuffer_lock
);
524 static int tcpm_debug_open(struct inode
*inode
, struct file
*file
)
526 return single_open(file
, tcpm_seq_show
, inode
->i_private
);
529 static const struct file_operations tcpm_debug_operations
= {
530 .open
= tcpm_debug_open
,
533 .release
= single_release
,
536 static struct dentry
*rootdir
;
538 static int tcpm_debugfs_init(struct tcpm_port
*port
)
540 mutex_init(&port
->logbuffer_lock
);
541 /* /sys/kernel/debug/tcpm/usbcX */
543 rootdir
= debugfs_create_dir("tcpm", NULL
);
548 port
->dentry
= debugfs_create_file(dev_name(port
->dev
),
549 S_IFREG
| 0444, rootdir
,
550 port
, &tcpm_debug_operations
);
555 static void tcpm_debugfs_exit(struct tcpm_port
*port
)
557 debugfs_remove(port
->dentry
);
563 static void tcpm_log(const struct tcpm_port
*port
, const char *fmt
, ...) { }
565 static void tcpm_log_force(struct tcpm_port
*port
, const char *fmt
, ...) { }
566 static void tcpm_log_source_caps(struct tcpm_port
*port
) { }
567 static int tcpm_debugfs_init(const struct tcpm_port
*port
) { return 0; }
568 static void tcpm_debugfs_exit(const struct tcpm_port
*port
) { }
572 static int tcpm_pd_transmit(struct tcpm_port
*port
,
573 enum tcpm_transmit_type type
,
574 const struct pd_message
*msg
)
576 unsigned long timeout
;
580 tcpm_log(port
, "PD TX, header: %#x", le16_to_cpu(msg
->header
));
582 tcpm_log(port
, "PD TX, type: %#x", type
);
584 reinit_completion(&port
->tx_complete
);
585 ret
= port
->tcpc
->pd_transmit(port
->tcpc
, type
, msg
);
589 mutex_unlock(&port
->lock
);
590 timeout
= wait_for_completion_timeout(&port
->tx_complete
,
591 msecs_to_jiffies(PD_T_TCPC_TX_TIMEOUT
));
592 mutex_lock(&port
->lock
);
596 switch (port
->tx_status
) {
597 case TCPC_TX_SUCCESS
:
598 port
->message_id
= (port
->message_id
+ 1) & PD_HEADER_ID_MASK
;
600 case TCPC_TX_DISCARDED
:
608 void tcpm_pd_transmit_complete(struct tcpm_port
*port
,
609 enum tcpm_transmit_status status
)
611 tcpm_log(port
, "PD TX complete, status: %u", status
);
612 port
->tx_status
= status
;
613 complete(&port
->tx_complete
);
615 EXPORT_SYMBOL_GPL(tcpm_pd_transmit_complete
);
617 static int tcpm_mux_set(struct tcpm_port
*port
, enum tcpc_mux_mode mode
,
618 enum tcpc_usb_switch config
)
622 tcpm_log(port
, "Requesting mux mode %d, config %d, polarity %d",
623 mode
, config
, port
->polarity
);
626 ret
= port
->tcpc
->mux
->set(port
->tcpc
->mux
, mode
, config
,
632 static int tcpm_set_polarity(struct tcpm_port
*port
,
633 enum typec_cc_polarity polarity
)
637 tcpm_log(port
, "polarity %d", polarity
);
639 ret
= port
->tcpc
->set_polarity(port
->tcpc
, polarity
);
643 port
->polarity
= polarity
;
648 static int tcpm_set_vconn(struct tcpm_port
*port
, bool enable
)
652 tcpm_log(port
, "vconn:=%d", enable
);
654 ret
= port
->tcpc
->set_vconn(port
->tcpc
, enable
);
656 port
->vconn_role
= enable
? TYPEC_SOURCE
: TYPEC_SINK
;
657 typec_set_vconn_role(port
->typec_port
, port
->vconn_role
);
663 static u32
tcpm_get_current_limit(struct tcpm_port
*port
)
665 enum typec_cc_status cc
;
668 cc
= port
->polarity
? port
->cc2
: port
->cc1
;
670 case TYPEC_CC_RP_1_5
:
673 case TYPEC_CC_RP_3_0
:
676 case TYPEC_CC_RP_DEF
:
678 if (port
->tcpc
->get_current_limit
)
679 limit
= port
->tcpc
->get_current_limit(port
->tcpc
);
688 static int tcpm_set_current_limit(struct tcpm_port
*port
, u32 max_ma
, u32 mv
)
690 int ret
= -EOPNOTSUPP
;
692 tcpm_log(port
, "Setting voltage/current limit %u mV %u mA", mv
, max_ma
);
694 if (port
->tcpc
->set_current_limit
)
695 ret
= port
->tcpc
->set_current_limit(port
->tcpc
, max_ma
, mv
);
701 * Determine RP value to set based on maximum current supported
702 * by a port if configured as source.
703 * Returns CC value to report to link partner.
705 static enum typec_cc_status
tcpm_rp_cc(struct tcpm_port
*port
)
707 const u32
*src_pdo
= port
->src_pdo
;
708 int nr_pdo
= port
->nr_src_pdo
;
712 * Search for first entry with matching voltage.
713 * It should report the maximum supported current.
715 for (i
= 0; i
< nr_pdo
; i
++) {
716 const u32 pdo
= src_pdo
[i
];
718 if (pdo_type(pdo
) == PDO_TYPE_FIXED
&&
719 pdo_fixed_voltage(pdo
) == 5000) {
720 unsigned int curr
= pdo_max_current(pdo
);
723 return TYPEC_CC_RP_3_0
;
724 else if (curr
>= 1500)
725 return TYPEC_CC_RP_1_5
;
726 return TYPEC_CC_RP_DEF
;
730 return TYPEC_CC_RP_DEF
;
733 static int tcpm_set_attached_state(struct tcpm_port
*port
, bool attached
)
735 return port
->tcpc
->set_roles(port
->tcpc
, attached
, port
->pwr_role
,
739 static int tcpm_set_roles(struct tcpm_port
*port
, bool attached
,
740 enum typec_role role
, enum typec_data_role data
)
744 if (data
== TYPEC_HOST
)
745 ret
= tcpm_mux_set(port
, TYPEC_MUX_USB
,
746 TCPC_USB_SWITCH_CONNECT
);
748 ret
= tcpm_mux_set(port
, TYPEC_MUX_NONE
,
749 TCPC_USB_SWITCH_DISCONNECT
);
753 ret
= port
->tcpc
->set_roles(port
->tcpc
, attached
, role
, data
);
757 port
->pwr_role
= role
;
758 port
->data_role
= data
;
759 typec_set_data_role(port
->typec_port
, data
);
760 typec_set_pwr_role(port
->typec_port
, role
);
765 static int tcpm_set_pwr_role(struct tcpm_port
*port
, enum typec_role role
)
769 ret
= port
->tcpc
->set_roles(port
->tcpc
, true, role
,
774 port
->pwr_role
= role
;
775 typec_set_pwr_role(port
->typec_port
, role
);
780 static int tcpm_pd_send_source_caps(struct tcpm_port
*port
)
782 struct pd_message msg
;
785 memset(&msg
, 0, sizeof(msg
));
786 if (!port
->nr_src_pdo
) {
787 /* No source capabilities defined, sink only */
788 msg
.header
= PD_HEADER_LE(PD_CTRL_REJECT
,
791 port
->message_id
, 0);
793 msg
.header
= PD_HEADER_LE(PD_DATA_SOURCE_CAP
,
799 for (i
= 0; i
< port
->nr_src_pdo
; i
++)
800 msg
.payload
[i
] = cpu_to_le32(port
->src_pdo
[i
]);
802 return tcpm_pd_transmit(port
, TCPC_TX_SOP
, &msg
);
805 static int tcpm_pd_send_sink_caps(struct tcpm_port
*port
)
807 struct pd_message msg
;
810 memset(&msg
, 0, sizeof(msg
));
811 if (!port
->nr_snk_pdo
) {
812 /* No sink capabilities defined, source only */
813 msg
.header
= PD_HEADER_LE(PD_CTRL_REJECT
,
816 port
->message_id
, 0);
818 msg
.header
= PD_HEADER_LE(PD_DATA_SINK_CAP
,
824 for (i
= 0; i
< port
->nr_snk_pdo
; i
++)
825 msg
.payload
[i
] = cpu_to_le32(port
->snk_pdo
[i
]);
827 return tcpm_pd_transmit(port
, TCPC_TX_SOP
, &msg
);
830 static void tcpm_set_state(struct tcpm_port
*port
, enum tcpm_state state
,
831 unsigned int delay_ms
)
834 tcpm_log(port
, "pending state change %s -> %s @ %u ms",
835 tcpm_states
[port
->state
], tcpm_states
[state
],
837 port
->delayed_state
= state
;
838 mod_delayed_work(port
->wq
, &port
->state_machine
,
839 msecs_to_jiffies(delay_ms
));
840 port
->delayed_runtime
= jiffies
+ msecs_to_jiffies(delay_ms
);
841 port
->delay_ms
= delay_ms
;
843 tcpm_log(port
, "state change %s -> %s",
844 tcpm_states
[port
->state
], tcpm_states
[state
]);
845 port
->delayed_state
= INVALID_STATE
;
846 port
->prev_state
= port
->state
;
849 * Don't re-queue the state machine work item if we're currently
850 * in the state machine and we're immediately changing states.
851 * tcpm_state_machine_work() will continue running the state
854 if (!port
->state_machine_running
)
855 mod_delayed_work(port
->wq
, &port
->state_machine
, 0);
859 static void tcpm_set_state_cond(struct tcpm_port
*port
, enum tcpm_state state
,
860 unsigned int delay_ms
)
862 if (port
->enter_state
== port
->state
)
863 tcpm_set_state(port
, state
, delay_ms
);
866 "skipped %sstate change %s -> %s [%u ms], context state %s",
867 delay_ms
? "delayed " : "",
868 tcpm_states
[port
->state
], tcpm_states
[state
],
869 delay_ms
, tcpm_states
[port
->enter_state
]);
872 static void tcpm_queue_message(struct tcpm_port
*port
,
873 enum pd_msg_request message
)
875 port
->queued_message
= message
;
876 mod_delayed_work(port
->wq
, &port
->state_machine
, 0);
880 * VDM/VDO handling functions
882 static void tcpm_queue_vdm(struct tcpm_port
*port
, const u32 header
,
883 const u32
*data
, int cnt
)
885 port
->vdo_count
= cnt
+ 1;
886 port
->vdo_data
[0] = header
;
887 memcpy(&port
->vdo_data
[1], data
, sizeof(u32
) * cnt
);
888 /* Set ready, vdm state machine will actually send */
889 port
->vdm_retries
= 0;
890 port
->vdm_state
= VDM_STATE_READY
;
893 static void svdm_consume_identity(struct tcpm_port
*port
, const __le32
*payload
,
896 u32 vdo
= le32_to_cpu(payload
[VDO_INDEX_IDH
]);
897 u32 product
= le32_to_cpu(payload
[VDO_INDEX_PRODUCT
]);
899 memset(&port
->mode_data
, 0, sizeof(port
->mode_data
));
901 port
->partner_ident
.id_header
= vdo
;
902 port
->partner_ident
.cert_stat
= le32_to_cpu(payload
[VDO_INDEX_CSTAT
]);
903 port
->partner_ident
.product
= product
;
905 typec_partner_set_identity(port
->partner
);
907 tcpm_log(port
, "Identity: %04x:%04x.%04x",
909 PD_PRODUCT_PID(product
), product
& 0xffff);
912 static bool svdm_consume_svids(struct tcpm_port
*port
, const __le32
*payload
,
915 struct pd_mode_data
*pmdata
= &port
->mode_data
;
918 for (i
= 1; i
< cnt
; i
++) {
919 u32 p
= le32_to_cpu(payload
[i
]);
922 svid
= (p
>> 16) & 0xffff;
926 if (pmdata
->nsvids
>= SVID_DISCOVERY_MAX
)
929 pmdata
->svids
[pmdata
->nsvids
++] = svid
;
930 tcpm_log(port
, "SVID %d: 0x%x", pmdata
->nsvids
, svid
);
936 if (pmdata
->nsvids
>= SVID_DISCOVERY_MAX
)
939 pmdata
->svids
[pmdata
->nsvids
++] = svid
;
940 tcpm_log(port
, "SVID %d: 0x%x", pmdata
->nsvids
, svid
);
944 tcpm_log(port
, "SVID_DISCOVERY_MAX(%d) too low!", SVID_DISCOVERY_MAX
);
948 static void svdm_consume_modes(struct tcpm_port
*port
, const __le32
*payload
,
951 struct pd_mode_data
*pmdata
= &port
->mode_data
;
952 struct typec_altmode_desc
*paltmode
;
953 struct typec_mode_desc
*pmode
;
956 if (pmdata
->altmodes
>= ARRAY_SIZE(port
->partner_altmode
)) {
957 /* Already logged in svdm_consume_svids() */
961 paltmode
= &pmdata
->altmode_desc
[pmdata
->altmodes
];
962 memset(paltmode
, 0, sizeof(*paltmode
));
964 paltmode
->svid
= pmdata
->svids
[pmdata
->svid_index
];
966 tcpm_log(port
, " Alternate mode %d: SVID 0x%04x",
967 pmdata
->altmodes
, paltmode
->svid
);
969 for (i
= 1; i
< cnt
&& paltmode
->n_modes
< ALTMODE_MAX_MODES
; i
++) {
970 pmode
= &paltmode
->modes
[paltmode
->n_modes
];
971 memset(pmode
, 0, sizeof(*pmode
));
972 pmode
->vdo
= le32_to_cpu(payload
[i
]);
973 pmode
->index
= i
- 1;
975 tcpm_log(port
, " VDO %d: 0x%08x",
976 pmode
->index
, pmode
->vdo
);
978 port
->partner_altmode
[pmdata
->altmodes
] =
979 typec_partner_register_altmode(port
->partner
, paltmode
);
980 if (!port
->partner_altmode
[pmdata
->altmodes
]) {
982 "Failed to register alternate modes for SVID 0x%04x",
989 #define supports_modal(port) PD_IDH_MODAL_SUPP((port)->partner_ident.id_header)
991 static int tcpm_pd_svdm(struct tcpm_port
*port
, const __le32
*payload
, int cnt
,
994 u32 p0
= le32_to_cpu(payload
[0]);
995 int cmd_type
= PD_VDO_CMDT(p0
);
996 int cmd
= PD_VDO_CMD(p0
);
997 struct pd_mode_data
*modep
;
1002 tcpm_log(port
, "Rx VDM cmd 0x%x type %d cmd %d len %d",
1003 p0
, cmd_type
, cmd
, cnt
);
1005 modep
= &port
->mode_data
;
1010 case CMD_DISCOVER_IDENT
:
1011 /* 6.4.4.3.1: Only respond as UFP (device) */
1012 if (port
->data_role
== TYPEC_DEVICE
&&
1014 for (i
= 0; i
< port
->nr_snk_vdo
; i
++)
1015 response
[i
+ 1] = port
->snk_vdo
[i
];
1016 rlen
= port
->nr_snk_vdo
+ 1;
1019 case CMD_DISCOVER_SVID
:
1021 case CMD_DISCOVER_MODES
:
1023 case CMD_ENTER_MODE
:
1033 response
[0] = p0
| VDO_CMDT(CMDT_RSP_ACK
);
1034 } else if (rlen
== 0) {
1035 response
[0] = p0
| VDO_CMDT(CMDT_RSP_NAK
);
1038 response
[0] = p0
| VDO_CMDT(CMDT_RSP_BUSY
);
1043 /* silently drop message if we are not connected */
1048 case CMD_DISCOVER_IDENT
:
1050 svdm_consume_identity(port
, payload
, cnt
);
1051 response
[0] = VDO(USB_SID_PD
, 1, CMD_DISCOVER_SVID
);
1054 case CMD_DISCOVER_SVID
:
1056 if (svdm_consume_svids(port
, payload
, cnt
)) {
1057 response
[0] = VDO(USB_SID_PD
, 1,
1060 } else if (modep
->nsvids
&& supports_modal(port
)) {
1061 response
[0] = VDO(modep
->svids
[0], 1,
1062 CMD_DISCOVER_MODES
);
1066 case CMD_DISCOVER_MODES
:
1068 svdm_consume_modes(port
, payload
, cnt
);
1069 modep
->svid_index
++;
1070 if (modep
->svid_index
< modep
->nsvids
) {
1071 svid
= modep
->svids
[modep
->svid_index
];
1072 response
[0] = VDO(svid
, 1, CMD_DISCOVER_MODES
);
1075 /* enter alternate mode if/when implemented */
1078 case CMD_ENTER_MODE
:
1091 static void tcpm_handle_vdm_request(struct tcpm_port
*port
,
1092 const __le32
*payload
, int cnt
)
1095 u32 response
[8] = { };
1096 u32 p0
= le32_to_cpu(payload
[0]);
1098 if (port
->vdm_state
== VDM_STATE_BUSY
) {
1099 /* If UFP responded busy retry after timeout */
1100 if (PD_VDO_CMDT(p0
) == CMDT_RSP_BUSY
) {
1101 port
->vdm_state
= VDM_STATE_WAIT_RSP_BUSY
;
1102 port
->vdo_retry
= (p0
& ~VDO_CMDT_MASK
) |
1104 mod_delayed_work(port
->wq
, &port
->vdm_state_machine
,
1105 msecs_to_jiffies(PD_T_VDM_BUSY
));
1108 port
->vdm_state
= VDM_STATE_DONE
;
1111 if (PD_VDO_SVDM(p0
))
1112 rlen
= tcpm_pd_svdm(port
, payload
, cnt
, response
);
1115 tcpm_queue_vdm(port
, response
[0], &response
[1], rlen
- 1);
1116 mod_delayed_work(port
->wq
, &port
->vdm_state_machine
, 0);
1120 static void tcpm_send_vdm(struct tcpm_port
*port
, u32 vid
, int cmd
,
1121 const u32
*data
, int count
)
1125 if (WARN_ON(count
> VDO_MAX_SIZE
- 1))
1126 count
= VDO_MAX_SIZE
- 1;
1128 /* set VDM header with VID & CMD */
1129 header
= VDO(vid
, ((vid
& USB_SID_PD
) == USB_SID_PD
) ?
1130 1 : (PD_VDO_CMD(cmd
) <= CMD_ATTENTION
), cmd
);
1131 tcpm_queue_vdm(port
, header
, data
, count
);
1133 mod_delayed_work(port
->wq
, &port
->vdm_state_machine
, 0);
1136 static unsigned int vdm_ready_timeout(u32 vdm_hdr
)
1138 unsigned int timeout
;
1139 int cmd
= PD_VDO_CMD(vdm_hdr
);
1141 /* its not a structured VDM command */
1142 if (!PD_VDO_SVDM(vdm_hdr
))
1143 return PD_T_VDM_UNSTRUCTURED
;
1145 switch (PD_VDO_CMDT(vdm_hdr
)) {
1147 if (cmd
== CMD_ENTER_MODE
|| cmd
== CMD_EXIT_MODE
)
1148 timeout
= PD_T_VDM_WAIT_MODE_E
;
1150 timeout
= PD_T_VDM_SNDR_RSP
;
1153 if (cmd
== CMD_ENTER_MODE
|| cmd
== CMD_EXIT_MODE
)
1154 timeout
= PD_T_VDM_E_MODE
;
1156 timeout
= PD_T_VDM_RCVR_RSP
;
1162 static void vdm_run_state_machine(struct tcpm_port
*port
)
1164 struct pd_message msg
;
1167 switch (port
->vdm_state
) {
1168 case VDM_STATE_READY
:
1169 /* Only transmit VDM if attached */
1170 if (!port
->attached
) {
1171 port
->vdm_state
= VDM_STATE_ERR_BUSY
;
1176 * if there's traffic or we're not in PDO ready state don't send
1179 if (port
->state
!= SRC_READY
&& port
->state
!= SNK_READY
)
1182 /* Prepare and send VDM */
1183 memset(&msg
, 0, sizeof(msg
));
1184 msg
.header
= PD_HEADER_LE(PD_DATA_VENDOR_DEF
,
1187 port
->message_id
, port
->vdo_count
);
1188 for (i
= 0; i
< port
->vdo_count
; i
++)
1189 msg
.payload
[i
] = cpu_to_le32(port
->vdo_data
[i
]);
1190 res
= tcpm_pd_transmit(port
, TCPC_TX_SOP
, &msg
);
1192 port
->vdm_state
= VDM_STATE_ERR_SEND
;
1194 unsigned long timeout
;
1196 port
->vdm_retries
= 0;
1197 port
->vdm_state
= VDM_STATE_BUSY
;
1198 timeout
= vdm_ready_timeout(port
->vdo_data
[0]);
1199 mod_delayed_work(port
->wq
, &port
->vdm_state_machine
,
1203 case VDM_STATE_WAIT_RSP_BUSY
:
1204 port
->vdo_data
[0] = port
->vdo_retry
;
1205 port
->vdo_count
= 1;
1206 port
->vdm_state
= VDM_STATE_READY
;
1208 case VDM_STATE_BUSY
:
1209 port
->vdm_state
= VDM_STATE_ERR_TMOUT
;
1211 case VDM_STATE_ERR_SEND
:
1213 * A partner which does not support USB PD will not reply,
1214 * so this is not a fatal error. At the same time, some
1215 * devices may not return GoodCRC under some circumstances,
1216 * so we need to retry.
1218 if (port
->vdm_retries
< 3) {
1219 tcpm_log(port
, "VDM Tx error, retry");
1220 port
->vdm_retries
++;
1221 port
->vdm_state
= VDM_STATE_READY
;
1229 static void vdm_state_machine_work(struct work_struct
*work
)
1231 struct tcpm_port
*port
= container_of(work
, struct tcpm_port
,
1232 vdm_state_machine
.work
);
1233 enum vdm_states prev_state
;
1235 mutex_lock(&port
->lock
);
1238 * Continue running as long as the port is not busy and there was
1242 prev_state
= port
->vdm_state
;
1243 vdm_run_state_machine(port
);
1244 } while (port
->vdm_state
!= prev_state
&&
1245 port
->vdm_state
!= VDM_STATE_BUSY
);
1247 mutex_unlock(&port
->lock
);
1253 PDO_ERR_VSAFE5V_NOT_FIRST
,
1254 PDO_ERR_PDO_TYPE_NOT_IN_ORDER
,
1255 PDO_ERR_FIXED_NOT_SORTED
,
1256 PDO_ERR_VARIABLE_BATT_NOT_SORTED
,
1260 static const char * const pdo_err_msg
[] = {
1261 [PDO_ERR_NO_VSAFE5V
] =
1262 " err: source/sink caps should atleast have vSafe5V",
1263 [PDO_ERR_VSAFE5V_NOT_FIRST
] =
1264 " err: vSafe5V Fixed Supply Object Shall always be the first object",
1265 [PDO_ERR_PDO_TYPE_NOT_IN_ORDER
] =
1266 " err: PDOs should be in the following order: Fixed; Battery; Variable",
1267 [PDO_ERR_FIXED_NOT_SORTED
] =
1268 " err: Fixed supply pdos should be in increasing order of their fixed voltage",
1269 [PDO_ERR_VARIABLE_BATT_NOT_SORTED
] =
1270 " err: Variable/Battery supply pdos should be in increasing order of their minimum voltage",
1271 [PDO_ERR_DUPE_PDO
] =
1272 " err: Variable/Batt supply pdos cannot have same min/max voltage",
1275 static enum pdo_err
tcpm_caps_err(struct tcpm_port
*port
, const u32
*pdo
,
1276 unsigned int nr_pdo
)
1280 /* Should at least contain vSafe5v */
1282 return PDO_ERR_NO_VSAFE5V
;
1284 /* The vSafe5V Fixed Supply Object Shall always be the first object */
1285 if (pdo_type(pdo
[0]) != PDO_TYPE_FIXED
||
1286 pdo_fixed_voltage(pdo
[0]) != VSAFE5V
)
1287 return PDO_ERR_VSAFE5V_NOT_FIRST
;
1289 for (i
= 1; i
< nr_pdo
; i
++) {
1290 if (pdo_type(pdo
[i
]) < pdo_type(pdo
[i
- 1])) {
1291 return PDO_ERR_PDO_TYPE_NOT_IN_ORDER
;
1292 } else if (pdo_type(pdo
[i
]) == pdo_type(pdo
[i
- 1])) {
1293 enum pd_pdo_type type
= pdo_type(pdo
[i
]);
1297 * The remaining Fixed Supply Objects, if
1298 * present, shall be sent in voltage order;
1299 * lowest to highest.
1301 case PDO_TYPE_FIXED
:
1302 if (pdo_fixed_voltage(pdo
[i
]) <=
1303 pdo_fixed_voltage(pdo
[i
- 1]))
1304 return PDO_ERR_FIXED_NOT_SORTED
;
1307 * The Battery Supply Objects and Variable
1308 * supply, if present shall be sent in Minimum
1309 * Voltage order; lowest to highest.
1313 if (pdo_min_voltage(pdo
[i
]) <
1314 pdo_min_voltage(pdo
[i
- 1]))
1315 return PDO_ERR_VARIABLE_BATT_NOT_SORTED
;
1316 else if ((pdo_min_voltage(pdo
[i
]) ==
1317 pdo_min_voltage(pdo
[i
- 1])) &&
1318 (pdo_max_voltage(pdo
[i
]) ==
1319 pdo_min_voltage(pdo
[i
- 1])))
1320 return PDO_ERR_DUPE_PDO
;
1323 tcpm_log_force(port
, " Unknown pdo type");
1331 static int tcpm_validate_caps(struct tcpm_port
*port
, const u32
*pdo
,
1332 unsigned int nr_pdo
)
1334 enum pdo_err err_index
= tcpm_caps_err(port
, pdo
, nr_pdo
);
1336 if (err_index
!= PDO_NO_ERR
) {
1337 tcpm_log_force(port
, " %s", pdo_err_msg
[err_index
]);
1345 * PD (data, control) command handling functions
1347 static void tcpm_pd_data_request(struct tcpm_port
*port
,
1348 const struct pd_message
*msg
)
1350 enum pd_data_msg_type type
= pd_header_type_le(msg
->header
);
1351 unsigned int cnt
= pd_header_cnt_le(msg
->header
);
1355 case PD_DATA_SOURCE_CAP
:
1356 if (port
->pwr_role
!= TYPEC_SINK
)
1359 for (i
= 0; i
< cnt
; i
++)
1360 port
->source_caps
[i
] = le32_to_cpu(msg
->payload
[i
]);
1362 port
->nr_source_caps
= cnt
;
1364 tcpm_log_source_caps(port
);
1366 tcpm_validate_caps(port
, port
->source_caps
,
1367 port
->nr_source_caps
);
1370 * This message may be received even if VBUS is not
1371 * present. This is quite unexpected; see USB PD
1372 * specification, sections 8.3.3.6.3.1 and 8.3.3.6.3.2.
1373 * However, at the same time, we must be ready to
1374 * receive this message and respond to it 15ms after
1375 * receiving PS_RDY during power swap operations, no matter
1376 * if VBUS is available or not (USB PD specification,
1378 * So we need to accept the message either way,
1379 * but be prepared to keep waiting for VBUS after it was
1382 tcpm_set_state(port
, SNK_NEGOTIATE_CAPABILITIES
, 0);
1384 case PD_DATA_REQUEST
:
1385 if (port
->pwr_role
!= TYPEC_SOURCE
||
1387 tcpm_queue_message(port
, PD_MSG_CTRL_REJECT
);
1390 port
->sink_request
= le32_to_cpu(msg
->payload
[0]);
1391 tcpm_set_state(port
, SRC_NEGOTIATE_CAPABILITIES
, 0);
1393 case PD_DATA_SINK_CAP
:
1394 /* We don't do anything with this at the moment... */
1395 for (i
= 0; i
< cnt
; i
++)
1396 port
->sink_caps
[i
] = le32_to_cpu(msg
->payload
[i
]);
1397 port
->nr_sink_caps
= cnt
;
1399 case PD_DATA_VENDOR_DEF
:
1400 tcpm_handle_vdm_request(port
, msg
->payload
, cnt
);
1403 if (port
->state
== SRC_READY
|| port
->state
== SNK_READY
) {
1404 port
->bist_request
= le32_to_cpu(msg
->payload
[0]);
1405 tcpm_set_state(port
, BIST_RX
, 0);
1409 tcpm_log(port
, "Unhandled data message type %#x", type
);
1414 static void tcpm_pd_ctrl_request(struct tcpm_port
*port
,
1415 const struct pd_message
*msg
)
1417 enum pd_ctrl_msg_type type
= pd_header_type_le(msg
->header
);
1418 enum tcpm_state next_state
;
1421 case PD_CTRL_GOOD_CRC
:
1424 case PD_CTRL_GET_SOURCE_CAP
:
1425 switch (port
->state
) {
1428 tcpm_queue_message(port
, PD_MSG_DATA_SOURCE_CAP
);
1431 tcpm_queue_message(port
, PD_MSG_CTRL_REJECT
);
1435 case PD_CTRL_GET_SINK_CAP
:
1436 switch (port
->state
) {
1439 tcpm_queue_message(port
, PD_MSG_DATA_SINK_CAP
);
1442 tcpm_queue_message(port
, PD_MSG_CTRL_REJECT
);
1446 case PD_CTRL_GOTO_MIN
:
1448 case PD_CTRL_PS_RDY
:
1449 switch (port
->state
) {
1450 case SNK_TRANSITION_SINK
:
1451 if (port
->vbus_present
) {
1452 tcpm_set_current_limit(port
,
1453 port
->current_limit
,
1454 port
->supply_voltage
);
1455 port
->explicit_contract
= true;
1456 tcpm_set_state(port
, SNK_READY
, 0);
1459 * Seen after power swap. Keep waiting for VBUS
1460 * in a transitional state.
1462 tcpm_set_state(port
,
1463 SNK_TRANSITION_SINK_VBUS
, 0);
1466 case PR_SWAP_SRC_SNK_SOURCE_OFF_CC_DEBOUNCED
:
1467 tcpm_set_state(port
, PR_SWAP_SRC_SNK_SINK_ON
, 0);
1469 case PR_SWAP_SNK_SRC_SINK_OFF
:
1470 tcpm_set_state(port
, PR_SWAP_SNK_SRC_SOURCE_ON
, 0);
1472 case VCONN_SWAP_WAIT_FOR_VCONN
:
1473 tcpm_set_state(port
, VCONN_SWAP_TURN_OFF_VCONN
, 0);
1479 case PD_CTRL_REJECT
:
1481 switch (port
->state
) {
1482 case SNK_NEGOTIATE_CAPABILITIES
:
1483 /* USB PD specification, Figure 8-43 */
1484 if (port
->explicit_contract
)
1485 next_state
= SNK_READY
;
1487 next_state
= SNK_WAIT_CAPABILITIES
;
1488 tcpm_set_state(port
, next_state
, 0);
1491 port
->swap_status
= (type
== PD_CTRL_WAIT
?
1492 -EAGAIN
: -EOPNOTSUPP
);
1493 tcpm_set_state(port
, DR_SWAP_CANCEL
, 0);
1496 port
->swap_status
= (type
== PD_CTRL_WAIT
?
1497 -EAGAIN
: -EOPNOTSUPP
);
1498 tcpm_set_state(port
, PR_SWAP_CANCEL
, 0);
1500 case VCONN_SWAP_SEND
:
1501 port
->swap_status
= (type
== PD_CTRL_WAIT
?
1502 -EAGAIN
: -EOPNOTSUPP
);
1503 tcpm_set_state(port
, VCONN_SWAP_CANCEL
, 0);
1509 case PD_CTRL_ACCEPT
:
1510 switch (port
->state
) {
1511 case SNK_NEGOTIATE_CAPABILITIES
:
1512 tcpm_set_state(port
, SNK_TRANSITION_SINK
, 0);
1514 case SOFT_RESET_SEND
:
1515 port
->message_id
= 0;
1516 port
->rx_msgid
= -1;
1517 if (port
->pwr_role
== TYPEC_SOURCE
)
1518 next_state
= SRC_SEND_CAPABILITIES
;
1520 next_state
= SNK_WAIT_CAPABILITIES
;
1521 tcpm_set_state(port
, next_state
, 0);
1524 tcpm_set_state(port
, DR_SWAP_CHANGE_DR
, 0);
1527 tcpm_set_state(port
, PR_SWAP_START
, 0);
1529 case VCONN_SWAP_SEND
:
1530 tcpm_set_state(port
, VCONN_SWAP_START
, 0);
1536 case PD_CTRL_SOFT_RESET
:
1537 tcpm_set_state(port
, SOFT_RESET
, 0);
1539 case PD_CTRL_DR_SWAP
:
1540 if (port
->port_type
!= TYPEC_PORT_DRP
) {
1541 tcpm_queue_message(port
, PD_MSG_CTRL_REJECT
);
1546 * 6.3.9: If an alternate mode is active, a request to swap
1547 * alternate modes shall trigger a port reset.
1549 switch (port
->state
) {
1552 tcpm_set_state(port
, DR_SWAP_ACCEPT
, 0);
1555 tcpm_queue_message(port
, PD_MSG_CTRL_WAIT
);
1559 case PD_CTRL_PR_SWAP
:
1560 if (port
->port_type
!= TYPEC_PORT_DRP
) {
1561 tcpm_queue_message(port
, PD_MSG_CTRL_REJECT
);
1564 switch (port
->state
) {
1567 tcpm_set_state(port
, PR_SWAP_ACCEPT
, 0);
1570 tcpm_queue_message(port
, PD_MSG_CTRL_WAIT
);
1574 case PD_CTRL_VCONN_SWAP
:
1575 switch (port
->state
) {
1578 tcpm_set_state(port
, VCONN_SWAP_ACCEPT
, 0);
1581 tcpm_queue_message(port
, PD_MSG_CTRL_WAIT
);
1586 tcpm_log(port
, "Unhandled ctrl message type %#x", type
);
1591 static void tcpm_pd_rx_handler(struct work_struct
*work
)
1593 struct pd_rx_event
*event
= container_of(work
,
1594 struct pd_rx_event
, work
);
1595 const struct pd_message
*msg
= &event
->msg
;
1596 unsigned int cnt
= pd_header_cnt_le(msg
->header
);
1597 struct tcpm_port
*port
= event
->port
;
1599 mutex_lock(&port
->lock
);
1601 tcpm_log(port
, "PD RX, header: %#x [%d]", le16_to_cpu(msg
->header
),
1604 if (port
->attached
) {
1605 enum pd_ctrl_msg_type type
= pd_header_type_le(msg
->header
);
1606 unsigned int msgid
= pd_header_msgid_le(msg
->header
);
1609 * USB PD standard, 6.6.1.2:
1610 * "... if MessageID value in a received Message is the
1611 * same as the stored value, the receiver shall return a
1612 * GoodCRC Message with that MessageID value and drop
1613 * the Message (this is a retry of an already received
1614 * Message). Note: this shall not apply to the Soft_Reset
1615 * Message which always has a MessageID value of zero."
1617 if (msgid
== port
->rx_msgid
&& type
!= PD_CTRL_SOFT_RESET
)
1619 port
->rx_msgid
= msgid
;
1622 * If both ends believe to be DFP/host, we have a data role
1625 if (!!(le16_to_cpu(msg
->header
) & PD_HEADER_DATA_ROLE
) ==
1626 (port
->data_role
== TYPEC_HOST
)) {
1628 "Data role mismatch, initiating error recovery");
1629 tcpm_set_state(port
, ERROR_RECOVERY
, 0);
1632 tcpm_pd_data_request(port
, msg
);
1634 tcpm_pd_ctrl_request(port
, msg
);
1639 mutex_unlock(&port
->lock
);
1643 void tcpm_pd_receive(struct tcpm_port
*port
, const struct pd_message
*msg
)
1645 struct pd_rx_event
*event
;
1647 event
= kzalloc(sizeof(*event
), GFP_ATOMIC
);
1651 INIT_WORK(&event
->work
, tcpm_pd_rx_handler
);
1653 memcpy(&event
->msg
, msg
, sizeof(*msg
));
1654 queue_work(port
->wq
, &event
->work
);
1656 EXPORT_SYMBOL_GPL(tcpm_pd_receive
);
1658 static int tcpm_pd_send_control(struct tcpm_port
*port
,
1659 enum pd_ctrl_msg_type type
)
1661 struct pd_message msg
;
1663 memset(&msg
, 0, sizeof(msg
));
1664 msg
.header
= PD_HEADER_LE(type
, port
->pwr_role
,
1666 port
->message_id
, 0);
1668 return tcpm_pd_transmit(port
, TCPC_TX_SOP
, &msg
);
1672 * Send queued message without affecting state.
1673 * Return true if state machine should go back to sleep,
1676 static bool tcpm_send_queued_message(struct tcpm_port
*port
)
1678 enum pd_msg_request queued_message
;
1681 queued_message
= port
->queued_message
;
1682 port
->queued_message
= PD_MSG_NONE
;
1684 switch (queued_message
) {
1685 case PD_MSG_CTRL_WAIT
:
1686 tcpm_pd_send_control(port
, PD_CTRL_WAIT
);
1688 case PD_MSG_CTRL_REJECT
:
1689 tcpm_pd_send_control(port
, PD_CTRL_REJECT
);
1691 case PD_MSG_DATA_SINK_CAP
:
1692 tcpm_pd_send_sink_caps(port
);
1694 case PD_MSG_DATA_SOURCE_CAP
:
1695 tcpm_pd_send_source_caps(port
);
1700 } while (port
->queued_message
!= PD_MSG_NONE
);
1702 if (port
->delayed_state
!= INVALID_STATE
) {
1703 if (time_is_after_jiffies(port
->delayed_runtime
)) {
1704 mod_delayed_work(port
->wq
, &port
->state_machine
,
1705 port
->delayed_runtime
- jiffies
);
1708 port
->delayed_state
= INVALID_STATE
;
1713 static int tcpm_pd_check_request(struct tcpm_port
*port
)
1715 u32 pdo
, rdo
= port
->sink_request
;
1716 unsigned int max
, op
, pdo_max
, index
;
1717 enum pd_pdo_type type
;
1719 index
= rdo_index(rdo
);
1720 if (!index
|| index
> port
->nr_src_pdo
)
1723 pdo
= port
->src_pdo
[index
- 1];
1724 type
= pdo_type(pdo
);
1726 case PDO_TYPE_FIXED
:
1728 max
= rdo_max_current(rdo
);
1729 op
= rdo_op_current(rdo
);
1730 pdo_max
= pdo_max_current(pdo
);
1734 if (max
> pdo_max
&& !(rdo
& RDO_CAP_MISMATCH
))
1737 if (type
== PDO_TYPE_FIXED
)
1739 "Requested %u mV, %u mA for %u / %u mA",
1740 pdo_fixed_voltage(pdo
), pdo_max
, op
, max
);
1743 "Requested %u -> %u mV, %u mA for %u / %u mA",
1744 pdo_min_voltage(pdo
), pdo_max_voltage(pdo
),
1748 max
= rdo_max_power(rdo
);
1749 op
= rdo_op_power(rdo
);
1750 pdo_max
= pdo_max_power(pdo
);
1754 if (max
> pdo_max
&& !(rdo
& RDO_CAP_MISMATCH
))
1757 "Requested %u -> %u mV, %u mW for %u / %u mW",
1758 pdo_min_voltage(pdo
), pdo_max_voltage(pdo
),
1765 port
->op_vsafe5v
= index
== 1;
1770 static int tcpm_pd_select_pdo(struct tcpm_port
*port
)
1772 unsigned int i
, max_mw
= 0, max_mv
= 0;
1776 * Select the source PDO providing the most power while staying within
1777 * the board's voltage limits. Prefer PDO providing exp
1779 for (i
= 0; i
< port
->nr_source_caps
; i
++) {
1780 u32 pdo
= port
->source_caps
[i
];
1781 enum pd_pdo_type type
= pdo_type(pdo
);
1782 unsigned int mv
, ma
, mw
;
1784 if (type
== PDO_TYPE_FIXED
)
1785 mv
= pdo_fixed_voltage(pdo
);
1787 mv
= pdo_min_voltage(pdo
);
1789 if (type
== PDO_TYPE_BATT
) {
1790 mw
= pdo_max_power(pdo
);
1792 ma
= min(pdo_max_current(pdo
),
1794 mw
= ma
* mv
/ 1000;
1797 /* Perfer higher voltages if available */
1798 if ((mw
> max_mw
|| (mw
== max_mw
&& mv
> max_mv
)) &&
1799 mv
<= port
->max_snk_mv
) {
1809 static int tcpm_pd_build_request(struct tcpm_port
*port
, u32
*rdo
)
1811 unsigned int mv
, ma
, mw
, flags
;
1812 unsigned int max_ma
, max_mw
;
1813 enum pd_pdo_type type
;
1817 index
= tcpm_pd_select_pdo(port
);
1820 pdo
= port
->source_caps
[index
];
1821 type
= pdo_type(pdo
);
1823 if (type
== PDO_TYPE_FIXED
)
1824 mv
= pdo_fixed_voltage(pdo
);
1826 mv
= pdo_min_voltage(pdo
);
1828 /* Select maximum available current within the board's power limit */
1829 if (type
== PDO_TYPE_BATT
) {
1830 mw
= pdo_max_power(pdo
);
1831 ma
= 1000 * min(mw
, port
->max_snk_mw
) / mv
;
1833 ma
= min(pdo_max_current(pdo
),
1834 1000 * port
->max_snk_mw
/ mv
);
1836 ma
= min(ma
, port
->max_snk_ma
);
1838 flags
= RDO_USB_COMM
| RDO_NO_SUSPEND
;
1840 /* Set mismatch bit if offered power is less than operating power */
1841 mw
= ma
* mv
/ 1000;
1844 if (mw
< port
->operating_snk_mw
) {
1845 flags
|= RDO_CAP_MISMATCH
;
1846 max_mw
= port
->operating_snk_mw
;
1847 max_ma
= max_mw
* 1000 / mv
;
1850 tcpm_log(port
, "cc=%d cc1=%d cc2=%d vbus=%d vconn=%s polarity=%d",
1851 port
->cc_req
, port
->cc1
, port
->cc2
, port
->vbus_source
,
1852 port
->vconn_role
== TYPEC_SOURCE
? "source" : "sink",
1855 if (type
== PDO_TYPE_BATT
) {
1856 *rdo
= RDO_BATT(index
+ 1, mw
, max_mw
, flags
);
1858 tcpm_log(port
, "Requesting PDO %d: %u mV, %u mW%s",
1860 flags
& RDO_CAP_MISMATCH
? " [mismatch]" : "");
1862 *rdo
= RDO_FIXED(index
+ 1, ma
, max_ma
, flags
);
1864 tcpm_log(port
, "Requesting PDO %d: %u mV, %u mA%s",
1866 flags
& RDO_CAP_MISMATCH
? " [mismatch]" : "");
1869 port
->current_limit
= ma
;
1870 port
->supply_voltage
= mv
;
1875 static int tcpm_pd_send_request(struct tcpm_port
*port
)
1877 struct pd_message msg
;
1881 ret
= tcpm_pd_build_request(port
, &rdo
);
1885 memset(&msg
, 0, sizeof(msg
));
1886 msg
.header
= PD_HEADER_LE(PD_DATA_REQUEST
,
1889 port
->message_id
, 1);
1890 msg
.payload
[0] = cpu_to_le32(rdo
);
1892 return tcpm_pd_transmit(port
, TCPC_TX_SOP
, &msg
);
1895 static int tcpm_set_vbus(struct tcpm_port
*port
, bool enable
)
1899 if (enable
&& port
->vbus_charge
)
1902 tcpm_log(port
, "vbus:=%d charge=%d", enable
, port
->vbus_charge
);
1904 ret
= port
->tcpc
->set_vbus(port
->tcpc
, enable
, port
->vbus_charge
);
1908 port
->vbus_source
= enable
;
1912 static int tcpm_set_charge(struct tcpm_port
*port
, bool charge
)
1916 if (charge
&& port
->vbus_source
)
1919 if (charge
!= port
->vbus_charge
) {
1920 tcpm_log(port
, "vbus=%d charge:=%d", port
->vbus_source
, charge
);
1921 ret
= port
->tcpc
->set_vbus(port
->tcpc
, port
->vbus_source
,
1926 port
->vbus_charge
= charge
;
1930 static bool tcpm_start_drp_toggling(struct tcpm_port
*port
)
1934 if (port
->tcpc
->start_drp_toggling
&&
1935 port
->port_type
== TYPEC_PORT_DRP
) {
1936 tcpm_log_force(port
, "Start DRP toggling");
1937 ret
= port
->tcpc
->start_drp_toggling(port
->tcpc
,
1946 static void tcpm_set_cc(struct tcpm_port
*port
, enum typec_cc_status cc
)
1948 tcpm_log(port
, "cc:=%d", cc
);
1950 port
->tcpc
->set_cc(port
->tcpc
, cc
);
1953 static int tcpm_init_vbus(struct tcpm_port
*port
)
1957 ret
= port
->tcpc
->set_vbus(port
->tcpc
, false, false);
1958 port
->vbus_source
= false;
1959 port
->vbus_charge
= false;
1963 static int tcpm_init_vconn(struct tcpm_port
*port
)
1967 ret
= port
->tcpc
->set_vconn(port
->tcpc
, false);
1968 port
->vconn_role
= TYPEC_SINK
;
1972 static void tcpm_typec_connect(struct tcpm_port
*port
)
1974 if (!port
->connected
) {
1975 /* Make sure we don't report stale identity information */
1976 memset(&port
->partner_ident
, 0, sizeof(port
->partner_ident
));
1977 port
->partner_desc
.usb_pd
= port
->pd_capable
;
1978 if (tcpm_port_is_debug(port
))
1979 port
->partner_desc
.accessory
= TYPEC_ACCESSORY_DEBUG
;
1980 else if (tcpm_port_is_audio(port
))
1981 port
->partner_desc
.accessory
= TYPEC_ACCESSORY_AUDIO
;
1983 port
->partner_desc
.accessory
= TYPEC_ACCESSORY_NONE
;
1984 port
->partner
= typec_register_partner(port
->typec_port
,
1985 &port
->partner_desc
);
1986 port
->connected
= true;
1990 static int tcpm_src_attach(struct tcpm_port
*port
)
1992 enum typec_cc_polarity polarity
=
1993 port
->cc2
== TYPEC_CC_RD
? TYPEC_POLARITY_CC2
1994 : TYPEC_POLARITY_CC1
;
2000 ret
= tcpm_set_polarity(port
, polarity
);
2004 ret
= tcpm_set_roles(port
, true, TYPEC_SOURCE
, TYPEC_HOST
);
2008 ret
= port
->tcpc
->set_pd_rx(port
->tcpc
, true);
2010 goto out_disable_mux
;
2013 * USB Type-C specification, version 1.2,
2014 * chapter 4.5.2.2.8.1 (Attached.SRC Requirements)
2015 * Enable VCONN only if the non-RD port is set to RA.
2017 if ((polarity
== TYPEC_POLARITY_CC1
&& port
->cc2
== TYPEC_CC_RA
) ||
2018 (polarity
== TYPEC_POLARITY_CC2
&& port
->cc1
== TYPEC_CC_RA
)) {
2019 ret
= tcpm_set_vconn(port
, true);
2021 goto out_disable_pd
;
2024 ret
= tcpm_set_vbus(port
, true);
2026 goto out_disable_vconn
;
2028 port
->pd_capable
= false;
2030 port
->partner
= NULL
;
2032 port
->attached
= true;
2033 port
->send_discover
= true;
2038 tcpm_set_vconn(port
, false);
2040 port
->tcpc
->set_pd_rx(port
->tcpc
, false);
2042 tcpm_mux_set(port
, TYPEC_MUX_NONE
, TCPC_USB_SWITCH_DISCONNECT
);
2046 static void tcpm_typec_disconnect(struct tcpm_port
*port
)
2048 if (port
->connected
) {
2049 typec_unregister_partner(port
->partner
);
2050 port
->partner
= NULL
;
2051 port
->connected
= false;
2055 static void tcpm_unregister_altmodes(struct tcpm_port
*port
)
2057 struct pd_mode_data
*modep
= &port
->mode_data
;
2060 for (i
= 0; i
< modep
->altmodes
; i
++) {
2061 typec_unregister_altmode(port
->partner_altmode
[i
]);
2062 port
->partner_altmode
[i
] = NULL
;
2065 memset(modep
, 0, sizeof(*modep
));
2068 static void tcpm_reset_port(struct tcpm_port
*port
)
2070 tcpm_unregister_altmodes(port
);
2071 tcpm_typec_disconnect(port
);
2072 port
->attached
= false;
2073 port
->pd_capable
= false;
2076 * First Rx ID should be 0; set this to a sentinel of -1 so that
2077 * we can check tcpm_pd_rx_handler() if we had seen it before.
2079 port
->rx_msgid
= -1;
2081 port
->tcpc
->set_pd_rx(port
->tcpc
, false);
2082 tcpm_init_vbus(port
); /* also disables charging */
2083 tcpm_init_vconn(port
);
2084 tcpm_set_current_limit(port
, 0, 0);
2085 tcpm_set_polarity(port
, TYPEC_POLARITY_CC1
);
2086 tcpm_set_attached_state(port
, false);
2087 port
->try_src_count
= 0;
2088 port
->try_snk_count
= 0;
2091 static void tcpm_detach(struct tcpm_port
*port
)
2093 if (!port
->attached
)
2096 if (tcpm_port_is_disconnected(port
))
2097 port
->hard_reset_count
= 0;
2099 tcpm_reset_port(port
);
2102 static void tcpm_src_detach(struct tcpm_port
*port
)
2107 static int tcpm_snk_attach(struct tcpm_port
*port
)
2114 ret
= tcpm_set_polarity(port
, port
->cc2
!= TYPEC_CC_OPEN
?
2115 TYPEC_POLARITY_CC2
: TYPEC_POLARITY_CC1
);
2119 ret
= tcpm_set_roles(port
, true, TYPEC_SINK
, TYPEC_DEVICE
);
2123 port
->pd_capable
= false;
2125 port
->partner
= NULL
;
2127 port
->attached
= true;
2128 port
->send_discover
= true;
2133 static void tcpm_snk_detach(struct tcpm_port
*port
)
2137 /* XXX: (Dis)connect SuperSpeed mux? */
2140 static int tcpm_acc_attach(struct tcpm_port
*port
)
2147 ret
= tcpm_set_roles(port
, true, TYPEC_SOURCE
, TYPEC_HOST
);
2151 port
->partner
= NULL
;
2153 tcpm_typec_connect(port
);
2155 port
->attached
= true;
2160 static void tcpm_acc_detach(struct tcpm_port
*port
)
2165 static inline enum tcpm_state
hard_reset_state(struct tcpm_port
*port
)
2167 if (port
->hard_reset_count
< PD_N_HARD_RESET_COUNT
)
2168 return HARD_RESET_SEND
;
2169 if (port
->pd_capable
)
2170 return ERROR_RECOVERY
;
2171 if (port
->pwr_role
== TYPEC_SOURCE
)
2172 return SRC_UNATTACHED
;
2173 if (port
->state
== SNK_WAIT_CAPABILITIES
)
2175 return SNK_UNATTACHED
;
2178 static inline enum tcpm_state
ready_state(struct tcpm_port
*port
)
2180 if (port
->pwr_role
== TYPEC_SOURCE
)
2186 static inline enum tcpm_state
unattached_state(struct tcpm_port
*port
)
2188 if (port
->port_type
== TYPEC_PORT_DRP
) {
2189 if (port
->pwr_role
== TYPEC_SOURCE
)
2190 return SRC_UNATTACHED
;
2192 return SNK_UNATTACHED
;
2193 } else if (port
->port_type
== TYPEC_PORT_DFP
) {
2194 return SRC_UNATTACHED
;
2197 return SNK_UNATTACHED
;
2200 static void tcpm_check_send_discover(struct tcpm_port
*port
)
2202 if (port
->data_role
== TYPEC_HOST
&& port
->send_discover
&&
2204 tcpm_send_vdm(port
, USB_SID_PD
, CMD_DISCOVER_IDENT
, NULL
, 0);
2205 port
->send_discover
= false;
2209 static void tcpm_swap_complete(struct tcpm_port
*port
, int result
)
2211 if (port
->swap_pending
) {
2212 port
->swap_status
= result
;
2213 port
->swap_pending
= false;
2214 port
->non_pd_role_swap
= false;
2215 complete(&port
->swap_complete
);
2219 static enum typec_pwr_opmode
tcpm_get_pwr_opmode(enum typec_cc_status cc
)
2222 case TYPEC_CC_RP_1_5
:
2223 return TYPEC_PWR_MODE_1_5A
;
2224 case TYPEC_CC_RP_3_0
:
2225 return TYPEC_PWR_MODE_3_0A
;
2226 case TYPEC_CC_RP_DEF
:
2228 return TYPEC_PWR_MODE_USB
;
2232 static void run_state_machine(struct tcpm_port
*port
)
2235 enum typec_pwr_opmode opmode
;
2238 port
->enter_state
= port
->state
;
2239 switch (port
->state
) {
2243 case SRC_UNATTACHED
:
2244 if (!port
->non_pd_role_swap
)
2245 tcpm_swap_complete(port
, -ENOTCONN
);
2246 tcpm_src_detach(port
);
2247 if (tcpm_start_drp_toggling(port
)) {
2248 tcpm_set_state(port
, DRP_TOGGLING
, 0);
2251 tcpm_set_cc(port
, tcpm_rp_cc(port
));
2252 if (port
->port_type
== TYPEC_PORT_DRP
)
2253 tcpm_set_state(port
, SNK_UNATTACHED
, PD_T_DRP_SNK
);
2255 case SRC_ATTACH_WAIT
:
2256 if (tcpm_port_is_debug(port
))
2257 tcpm_set_state(port
, DEBUG_ACC_ATTACHED
,
2259 else if (tcpm_port_is_audio(port
))
2260 tcpm_set_state(port
, AUDIO_ACC_ATTACHED
,
2262 else if (tcpm_port_is_source(port
))
2263 tcpm_set_state(port
,
2264 tcpm_try_snk(port
) ? SNK_TRY
2270 port
->try_snk_count
++;
2273 * - Do not drive vconn or vbus
2274 * - Terminate CC pins (both) to Rd
2276 * - Wait for tDRPTry (PD_T_DRP_TRY).
2277 * Until then, ignore any state changes.
2279 tcpm_set_cc(port
, TYPEC_CC_RD
);
2280 tcpm_set_state(port
, SNK_TRY_WAIT
, PD_T_DRP_TRY
);
2283 if (tcpm_port_is_sink(port
)) {
2284 tcpm_set_state(port
, SNK_TRY_WAIT_DEBOUNCE
, 0);
2286 tcpm_set_state(port
, SRC_TRYWAIT
, 0);
2290 case SNK_TRY_WAIT_DEBOUNCE
:
2291 tcpm_set_state(port
, SNK_TRY_WAIT_DEBOUNCE_CHECK_VBUS
,
2294 case SNK_TRY_WAIT_DEBOUNCE_CHECK_VBUS
:
2295 if (port
->vbus_present
&& tcpm_port_is_sink(port
)) {
2296 tcpm_set_state(port
, SNK_ATTACHED
, 0);
2298 tcpm_set_state(port
, SRC_TRYWAIT
, 0);
2303 tcpm_set_cc(port
, tcpm_rp_cc(port
));
2304 if (port
->max_wait
== 0) {
2305 port
->max_wait
= jiffies
+
2306 msecs_to_jiffies(PD_T_DRP_TRY
);
2307 tcpm_set_state(port
, SRC_TRYWAIT_UNATTACHED
,
2310 if (time_is_after_jiffies(port
->max_wait
))
2311 tcpm_set_state(port
, SRC_TRYWAIT_UNATTACHED
,
2312 jiffies_to_msecs(port
->max_wait
-
2315 tcpm_set_state(port
, SNK_UNATTACHED
, 0);
2318 case SRC_TRYWAIT_DEBOUNCE
:
2319 tcpm_set_state(port
, SRC_ATTACHED
, PD_T_CC_DEBOUNCE
);
2321 case SRC_TRYWAIT_UNATTACHED
:
2322 tcpm_set_state(port
, SNK_UNATTACHED
, 0);
2326 ret
= tcpm_src_attach(port
);
2327 tcpm_set_state(port
, SRC_UNATTACHED
,
2328 ret
< 0 ? 0 : PD_T_PS_SOURCE_ON
);
2331 opmode
= tcpm_get_pwr_opmode(tcpm_rp_cc(port
));
2332 typec_set_pwr_opmode(port
->typec_port
, opmode
);
2333 port
->pwr_opmode
= TYPEC_PWR_MODE_USB
;
2334 port
->caps_count
= 0;
2335 port
->message_id
= 0;
2336 port
->rx_msgid
= -1;
2337 port
->explicit_contract
= false;
2338 tcpm_set_state(port
, SRC_SEND_CAPABILITIES
, 0);
2340 case SRC_SEND_CAPABILITIES
:
2342 if (port
->caps_count
> PD_N_CAPS_COUNT
) {
2343 tcpm_set_state(port
, SRC_READY
, 0);
2346 ret
= tcpm_pd_send_source_caps(port
);
2348 tcpm_set_state(port
, SRC_SEND_CAPABILITIES
,
2349 PD_T_SEND_SOURCE_CAP
);
2352 * Per standard, we should clear the reset counter here.
2353 * However, that can result in state machine hang-ups.
2354 * Reset it only in READY state to improve stability.
2356 /* port->hard_reset_count = 0; */
2357 port
->caps_count
= 0;
2358 port
->pd_capable
= true;
2359 tcpm_set_state_cond(port
, hard_reset_state(port
),
2360 PD_T_SEND_SOURCE_CAP
);
2363 case SRC_NEGOTIATE_CAPABILITIES
:
2364 ret
= tcpm_pd_check_request(port
);
2366 tcpm_pd_send_control(port
, PD_CTRL_REJECT
);
2367 if (!port
->explicit_contract
) {
2368 tcpm_set_state(port
,
2369 SRC_WAIT_NEW_CAPABILITIES
, 0);
2371 tcpm_set_state(port
, SRC_READY
, 0);
2374 tcpm_pd_send_control(port
, PD_CTRL_ACCEPT
);
2375 tcpm_set_state(port
, SRC_TRANSITION_SUPPLY
,
2376 PD_T_SRC_TRANSITION
);
2379 case SRC_TRANSITION_SUPPLY
:
2380 /* XXX: regulator_set_voltage(vbus, ...) */
2381 tcpm_pd_send_control(port
, PD_CTRL_PS_RDY
);
2382 port
->explicit_contract
= true;
2383 typec_set_pwr_opmode(port
->typec_port
, TYPEC_PWR_MODE_PD
);
2384 port
->pwr_opmode
= TYPEC_PWR_MODE_PD
;
2385 tcpm_set_state_cond(port
, SRC_READY
, 0);
2389 port
->hard_reset_count
= 0;
2391 port
->try_src_count
= 0;
2393 tcpm_swap_complete(port
, 0);
2394 tcpm_typec_connect(port
);
2395 tcpm_check_send_discover(port
);
2398 * Sending ping messages is not necessary if
2399 * - the source operates at vSafe5V
2401 * - The system is not operating in PD mode
2403 * - Both partners are connected using a Type-C connector
2405 * There is no actual need to send PD messages since the local
2406 * port type-c and the spec does not clearly say whether PD is
2407 * possible when type-c is connected to Type-A/B
2410 case SRC_WAIT_NEW_CAPABILITIES
:
2411 /* Nothing to do... */
2415 case SNK_UNATTACHED
:
2416 if (!port
->non_pd_role_swap
)
2417 tcpm_swap_complete(port
, -ENOTCONN
);
2418 tcpm_snk_detach(port
);
2419 if (tcpm_start_drp_toggling(port
)) {
2420 tcpm_set_state(port
, DRP_TOGGLING
, 0);
2423 tcpm_set_cc(port
, TYPEC_CC_RD
);
2424 if (port
->port_type
== TYPEC_PORT_DRP
)
2425 tcpm_set_state(port
, SRC_UNATTACHED
, PD_T_DRP_SRC
);
2427 case SNK_ATTACH_WAIT
:
2428 if ((port
->cc1
== TYPEC_CC_OPEN
&&
2429 port
->cc2
!= TYPEC_CC_OPEN
) ||
2430 (port
->cc1
!= TYPEC_CC_OPEN
&&
2431 port
->cc2
== TYPEC_CC_OPEN
))
2432 tcpm_set_state(port
, SNK_DEBOUNCED
,
2434 else if (tcpm_port_is_disconnected(port
))
2435 tcpm_set_state(port
, SNK_UNATTACHED
,
2439 if (tcpm_port_is_disconnected(port
))
2440 tcpm_set_state(port
, SNK_UNATTACHED
,
2442 else if (port
->vbus_present
)
2443 tcpm_set_state(port
,
2444 tcpm_try_src(port
) ? SRC_TRY
2448 /* Wait for VBUS, but not forever */
2449 tcpm_set_state(port
, PORT_RESET
, PD_T_PS_SOURCE_ON
);
2453 port
->try_src_count
++;
2454 tcpm_set_cc(port
, tcpm_rp_cc(port
));
2456 tcpm_set_state(port
, SRC_TRY_WAIT
, 0);
2459 if (port
->max_wait
== 0) {
2460 port
->max_wait
= jiffies
+
2461 msecs_to_jiffies(PD_T_DRP_TRY
);
2462 msecs
= PD_T_DRP_TRY
;
2464 if (time_is_after_jiffies(port
->max_wait
))
2465 msecs
= jiffies_to_msecs(port
->max_wait
-
2470 tcpm_set_state(port
, SNK_TRYWAIT
, msecs
);
2472 case SRC_TRY_DEBOUNCE
:
2473 tcpm_set_state(port
, SRC_ATTACHED
, PD_T_PD_DEBOUNCE
);
2476 tcpm_set_cc(port
, TYPEC_CC_RD
);
2477 tcpm_set_state(port
, SNK_TRYWAIT_VBUS
, PD_T_CC_DEBOUNCE
);
2479 case SNK_TRYWAIT_VBUS
:
2481 * TCPM stays in this state indefinitely until VBUS
2482 * is detected as long as Rp is not detected for
2483 * more than a time period of tPDDebounce.
2485 if (port
->vbus_present
&& tcpm_port_is_sink(port
)) {
2486 tcpm_set_state(port
, SNK_ATTACHED
, 0);
2489 if (!tcpm_port_is_sink(port
))
2490 tcpm_set_state(port
, SNK_TRYWAIT_DEBOUNCE
, 0);
2492 case SNK_TRYWAIT_DEBOUNCE
:
2493 tcpm_set_state(port
, SNK_UNATTACHED
, PD_T_PD_DEBOUNCE
);
2496 ret
= tcpm_snk_attach(port
);
2498 tcpm_set_state(port
, SNK_UNATTACHED
, 0);
2500 tcpm_set_state(port
, SNK_STARTUP
, 0);
2503 opmode
= tcpm_get_pwr_opmode(port
->polarity
?
2504 port
->cc2
: port
->cc1
);
2505 typec_set_pwr_opmode(port
->typec_port
, opmode
);
2506 port
->pwr_opmode
= TYPEC_PWR_MODE_USB
;
2507 port
->message_id
= 0;
2508 port
->rx_msgid
= -1;
2509 port
->explicit_contract
= false;
2510 tcpm_set_state(port
, SNK_DISCOVERY
, 0);
2513 if (port
->vbus_present
) {
2514 tcpm_set_current_limit(port
,
2515 tcpm_get_current_limit(port
),
2517 tcpm_set_charge(port
, true);
2518 tcpm_set_state(port
, SNK_WAIT_CAPABILITIES
, 0);
2522 * For DRP, timeouts differ. Also, handling is supposed to be
2523 * different and much more complex (dead battery detection;
2524 * see USB power delivery specification, section 8.3.3.6.1.5.1).
2526 tcpm_set_state(port
, hard_reset_state(port
),
2527 port
->port_type
== TYPEC_PORT_DRP
?
2528 PD_T_DB_DETECT
: PD_T_NO_RESPONSE
);
2530 case SNK_DISCOVERY_DEBOUNCE
:
2531 tcpm_set_state(port
, SNK_DISCOVERY_DEBOUNCE_DONE
,
2534 case SNK_DISCOVERY_DEBOUNCE_DONE
:
2535 if (!tcpm_port_is_disconnected(port
) &&
2536 tcpm_port_is_sink(port
) &&
2537 time_is_after_jiffies(port
->delayed_runtime
)) {
2538 tcpm_set_state(port
, SNK_DISCOVERY
,
2539 port
->delayed_runtime
- jiffies
);
2542 tcpm_set_state(port
, unattached_state(port
), 0);
2544 case SNK_WAIT_CAPABILITIES
:
2545 ret
= port
->tcpc
->set_pd_rx(port
->tcpc
, true);
2547 tcpm_set_state(port
, SNK_READY
, 0);
2551 * If VBUS has never been low, and we time out waiting
2552 * for source cap, try a soft reset first, in case we
2553 * were already in a stable contract before this boot.
2554 * Do this only once.
2556 if (port
->vbus_never_low
) {
2557 port
->vbus_never_low
= false;
2558 tcpm_set_state(port
, SOFT_RESET_SEND
,
2559 PD_T_SINK_WAIT_CAP
);
2561 tcpm_set_state(port
, hard_reset_state(port
),
2562 PD_T_SINK_WAIT_CAP
);
2565 case SNK_NEGOTIATE_CAPABILITIES
:
2566 port
->pd_capable
= true;
2567 port
->hard_reset_count
= 0;
2568 ret
= tcpm_pd_send_request(port
);
2570 /* Let the Source send capabilities again. */
2571 tcpm_set_state(port
, SNK_WAIT_CAPABILITIES
, 0);
2573 tcpm_set_state_cond(port
, hard_reset_state(port
),
2574 PD_T_SENDER_RESPONSE
);
2577 case SNK_TRANSITION_SINK
:
2578 case SNK_TRANSITION_SINK_VBUS
:
2579 tcpm_set_state(port
, hard_reset_state(port
),
2580 PD_T_PS_TRANSITION
);
2583 port
->try_snk_count
= 0;
2584 if (port
->explicit_contract
) {
2585 typec_set_pwr_opmode(port
->typec_port
,
2587 port
->pwr_opmode
= TYPEC_PWR_MODE_PD
;
2590 tcpm_swap_complete(port
, 0);
2591 tcpm_typec_connect(port
);
2592 tcpm_check_send_discover(port
);
2595 /* Accessory states */
2596 case ACC_UNATTACHED
:
2597 tcpm_acc_detach(port
);
2598 tcpm_set_state(port
, SRC_UNATTACHED
, 0);
2600 case DEBUG_ACC_ATTACHED
:
2601 case AUDIO_ACC_ATTACHED
:
2602 ret
= tcpm_acc_attach(port
);
2604 tcpm_set_state(port
, ACC_UNATTACHED
, 0);
2606 case AUDIO_ACC_DEBOUNCE
:
2607 tcpm_set_state(port
, ACC_UNATTACHED
, PD_T_CC_DEBOUNCE
);
2610 /* Hard_Reset states */
2611 case HARD_RESET_SEND
:
2612 tcpm_pd_transmit(port
, TCPC_TX_HARD_RESET
, NULL
);
2613 tcpm_set_state(port
, HARD_RESET_START
, 0);
2615 case HARD_RESET_START
:
2616 port
->hard_reset_count
++;
2617 port
->tcpc
->set_pd_rx(port
->tcpc
, false);
2618 tcpm_unregister_altmodes(port
);
2619 port
->send_discover
= true;
2620 if (port
->pwr_role
== TYPEC_SOURCE
)
2621 tcpm_set_state(port
, SRC_HARD_RESET_VBUS_OFF
,
2622 PD_T_PS_HARD_RESET
);
2624 tcpm_set_state(port
, SNK_HARD_RESET_SINK_OFF
, 0);
2626 case SRC_HARD_RESET_VBUS_OFF
:
2627 tcpm_set_vconn(port
, true);
2628 tcpm_set_vbus(port
, false);
2629 tcpm_set_roles(port
, false, TYPEC_SOURCE
, TYPEC_HOST
);
2630 tcpm_set_state(port
, SRC_HARD_RESET_VBUS_ON
, PD_T_SRC_RECOVER
);
2632 case SRC_HARD_RESET_VBUS_ON
:
2633 tcpm_set_vbus(port
, true);
2634 port
->tcpc
->set_pd_rx(port
->tcpc
, true);
2635 tcpm_set_attached_state(port
, true);
2636 tcpm_set_state(port
, SRC_UNATTACHED
, PD_T_PS_SOURCE_ON
);
2638 case SNK_HARD_RESET_SINK_OFF
:
2639 tcpm_set_vconn(port
, false);
2640 tcpm_set_charge(port
, false);
2641 tcpm_set_roles(port
, false, TYPEC_SINK
, TYPEC_DEVICE
);
2643 * VBUS may or may not toggle, depending on the adapter.
2644 * If it doesn't toggle, transition to SNK_HARD_RESET_SINK_ON
2645 * directly after timeout.
2647 tcpm_set_state(port
, SNK_HARD_RESET_SINK_ON
, PD_T_SAFE_0V
);
2649 case SNK_HARD_RESET_WAIT_VBUS
:
2650 /* Assume we're disconnected if VBUS doesn't come back. */
2651 tcpm_set_state(port
, SNK_UNATTACHED
,
2652 PD_T_SRC_RECOVER_MAX
+ PD_T_SRC_TURN_ON
);
2654 case SNK_HARD_RESET_SINK_ON
:
2655 /* Note: There is no guarantee that VBUS is on in this state */
2658 * The specification suggests that dual mode ports in sink
2659 * mode should transition to state PE_SRC_Transition_to_default.
2660 * See USB power delivery specification chapter 8.3.3.6.1.3.
2661 * This would mean to to
2662 * - turn off VCONN, reset power supply
2663 * - request hardware reset
2665 * - Transition to state PE_Src_Startup
2666 * SNK only ports shall transition to state Snk_Startup
2667 * (see chapter 8.3.3.3.8).
2668 * Similar, dual-mode ports in source mode should transition
2669 * to PE_SNK_Transition_to_default.
2671 tcpm_set_attached_state(port
, true);
2672 tcpm_set_state(port
, SNK_STARTUP
, 0);
2675 /* Soft_Reset states */
2677 port
->message_id
= 0;
2678 port
->rx_msgid
= -1;
2679 tcpm_pd_send_control(port
, PD_CTRL_ACCEPT
);
2680 if (port
->pwr_role
== TYPEC_SOURCE
)
2681 tcpm_set_state(port
, SRC_SEND_CAPABILITIES
, 0);
2683 tcpm_set_state(port
, SNK_WAIT_CAPABILITIES
, 0);
2685 case SOFT_RESET_SEND
:
2686 port
->message_id
= 0;
2687 port
->rx_msgid
= -1;
2688 if (tcpm_pd_send_control(port
, PD_CTRL_SOFT_RESET
))
2689 tcpm_set_state_cond(port
, hard_reset_state(port
), 0);
2691 tcpm_set_state_cond(port
, hard_reset_state(port
),
2692 PD_T_SENDER_RESPONSE
);
2695 /* DR_Swap states */
2697 tcpm_pd_send_control(port
, PD_CTRL_DR_SWAP
);
2698 tcpm_set_state_cond(port
, DR_SWAP_SEND_TIMEOUT
,
2699 PD_T_SENDER_RESPONSE
);
2701 case DR_SWAP_ACCEPT
:
2702 tcpm_pd_send_control(port
, PD_CTRL_ACCEPT
);
2703 tcpm_set_state_cond(port
, DR_SWAP_CHANGE_DR
, 0);
2705 case DR_SWAP_SEND_TIMEOUT
:
2706 tcpm_swap_complete(port
, -ETIMEDOUT
);
2707 tcpm_set_state(port
, ready_state(port
), 0);
2709 case DR_SWAP_CHANGE_DR
:
2710 if (port
->data_role
== TYPEC_HOST
) {
2711 tcpm_unregister_altmodes(port
);
2712 tcpm_set_roles(port
, true, port
->pwr_role
,
2715 tcpm_set_roles(port
, true, port
->pwr_role
,
2717 port
->send_discover
= true;
2719 tcpm_set_state(port
, ready_state(port
), 0);
2722 /* PR_Swap states */
2723 case PR_SWAP_ACCEPT
:
2724 tcpm_pd_send_control(port
, PD_CTRL_ACCEPT
);
2725 tcpm_set_state(port
, PR_SWAP_START
, 0);
2728 tcpm_pd_send_control(port
, PD_CTRL_PR_SWAP
);
2729 tcpm_set_state_cond(port
, PR_SWAP_SEND_TIMEOUT
,
2730 PD_T_SENDER_RESPONSE
);
2732 case PR_SWAP_SEND_TIMEOUT
:
2733 tcpm_swap_complete(port
, -ETIMEDOUT
);
2734 tcpm_set_state(port
, ready_state(port
), 0);
2737 if (port
->pwr_role
== TYPEC_SOURCE
)
2738 tcpm_set_state(port
, PR_SWAP_SRC_SNK_TRANSITION_OFF
,
2739 PD_T_SRC_TRANSITION
);
2741 tcpm_set_state(port
, PR_SWAP_SNK_SRC_SINK_OFF
, 0);
2743 case PR_SWAP_SRC_SNK_TRANSITION_OFF
:
2744 tcpm_set_vbus(port
, false);
2745 port
->explicit_contract
= false;
2746 /* allow time for Vbus discharge, must be < tSrcSwapStdby */
2747 tcpm_set_state(port
, PR_SWAP_SRC_SNK_SOURCE_OFF
,
2750 case PR_SWAP_SRC_SNK_SOURCE_OFF
:
2751 tcpm_set_cc(port
, TYPEC_CC_RD
);
2752 /* allow CC debounce */
2753 tcpm_set_state(port
, PR_SWAP_SRC_SNK_SOURCE_OFF_CC_DEBOUNCED
,
2756 case PR_SWAP_SRC_SNK_SOURCE_OFF_CC_DEBOUNCED
:
2758 * USB-PD standard, 6.2.1.4, Port Power Role:
2759 * "During the Power Role Swap Sequence, for the initial Source
2760 * Port, the Port Power Role field shall be set to Sink in the
2761 * PS_RDY Message indicating that the initial Source’s power
2762 * supply is turned off"
2764 tcpm_set_pwr_role(port
, TYPEC_SINK
);
2765 if (tcpm_pd_send_control(port
, PD_CTRL_PS_RDY
)) {
2766 tcpm_set_state(port
, ERROR_RECOVERY
, 0);
2769 tcpm_set_state_cond(port
, SNK_UNATTACHED
, PD_T_PS_SOURCE_ON
);
2771 case PR_SWAP_SRC_SNK_SINK_ON
:
2772 tcpm_set_state(port
, SNK_STARTUP
, 0);
2774 case PR_SWAP_SNK_SRC_SINK_OFF
:
2775 tcpm_set_charge(port
, false);
2776 tcpm_set_state(port
, hard_reset_state(port
),
2777 PD_T_PS_SOURCE_OFF
);
2779 case PR_SWAP_SNK_SRC_SOURCE_ON
:
2780 tcpm_set_cc(port
, tcpm_rp_cc(port
));
2781 tcpm_set_vbus(port
, true);
2783 * allow time VBUS ramp-up, must be < tNewSrc
2784 * Also, this window overlaps with CC debounce as well.
2785 * So, Wait for the max of two which is PD_T_NEWSRC
2787 tcpm_set_state(port
, PR_SWAP_SNK_SRC_SOURCE_ON_VBUS_RAMPED_UP
,
2790 case PR_SWAP_SNK_SRC_SOURCE_ON_VBUS_RAMPED_UP
:
2792 * USB PD standard, 6.2.1.4:
2793 * "Subsequent Messages initiated by the Policy Engine,
2794 * such as the PS_RDY Message sent to indicate that Vbus
2795 * is ready, will have the Port Power Role field set to
2798 tcpm_set_pwr_role(port
, TYPEC_SOURCE
);
2799 tcpm_pd_send_control(port
, PD_CTRL_PS_RDY
);
2800 tcpm_set_state(port
, SRC_STARTUP
, 0);
2803 case VCONN_SWAP_ACCEPT
:
2804 tcpm_pd_send_control(port
, PD_CTRL_ACCEPT
);
2805 tcpm_set_state(port
, VCONN_SWAP_START
, 0);
2807 case VCONN_SWAP_SEND
:
2808 tcpm_pd_send_control(port
, PD_CTRL_VCONN_SWAP
);
2809 tcpm_set_state(port
, VCONN_SWAP_SEND_TIMEOUT
,
2810 PD_T_SENDER_RESPONSE
);
2812 case VCONN_SWAP_SEND_TIMEOUT
:
2813 tcpm_swap_complete(port
, -ETIMEDOUT
);
2814 tcpm_set_state(port
, ready_state(port
), 0);
2816 case VCONN_SWAP_START
:
2817 if (port
->vconn_role
== TYPEC_SOURCE
)
2818 tcpm_set_state(port
, VCONN_SWAP_WAIT_FOR_VCONN
, 0);
2820 tcpm_set_state(port
, VCONN_SWAP_TURN_ON_VCONN
, 0);
2822 case VCONN_SWAP_WAIT_FOR_VCONN
:
2823 tcpm_set_state(port
, hard_reset_state(port
),
2824 PD_T_VCONN_SOURCE_ON
);
2826 case VCONN_SWAP_TURN_ON_VCONN
:
2827 tcpm_set_vconn(port
, true);
2828 tcpm_pd_send_control(port
, PD_CTRL_PS_RDY
);
2829 tcpm_set_state(port
, ready_state(port
), 0);
2831 case VCONN_SWAP_TURN_OFF_VCONN
:
2832 tcpm_set_vconn(port
, false);
2833 tcpm_set_state(port
, ready_state(port
), 0);
2836 case DR_SWAP_CANCEL
:
2837 case PR_SWAP_CANCEL
:
2838 case VCONN_SWAP_CANCEL
:
2839 tcpm_swap_complete(port
, port
->swap_status
);
2840 if (port
->pwr_role
== TYPEC_SOURCE
)
2841 tcpm_set_state(port
, SRC_READY
, 0);
2843 tcpm_set_state(port
, SNK_READY
, 0);
2847 switch (BDO_MODE_MASK(port
->bist_request
)) {
2848 case BDO_MODE_CARRIER2
:
2849 tcpm_pd_transmit(port
, TCPC_TX_BIST_MODE_2
, NULL
);
2854 /* Always switch to unattached state */
2855 tcpm_set_state(port
, unattached_state(port
), 0);
2857 case ERROR_RECOVERY
:
2858 tcpm_swap_complete(port
, -EPROTO
);
2859 tcpm_set_state(port
, PORT_RESET
, 0);
2862 tcpm_reset_port(port
);
2863 tcpm_set_cc(port
, TYPEC_CC_OPEN
);
2864 tcpm_set_state(port
, PORT_RESET_WAIT_OFF
,
2865 PD_T_ERROR_RECOVERY
);
2867 case PORT_RESET_WAIT_OFF
:
2868 tcpm_set_state(port
,
2869 tcpm_default_state(port
),
2870 port
->vbus_present
? PD_T_PS_SOURCE_OFF
: 0);
2873 WARN(1, "Unexpected port state %d\n", port
->state
);
2878 static void tcpm_state_machine_work(struct work_struct
*work
)
2880 struct tcpm_port
*port
= container_of(work
, struct tcpm_port
,
2881 state_machine
.work
);
2882 enum tcpm_state prev_state
;
2884 mutex_lock(&port
->lock
);
2885 port
->state_machine_running
= true;
2887 if (port
->queued_message
&& tcpm_send_queued_message(port
))
2890 /* If we were queued due to a delayed state change, update it now */
2891 if (port
->delayed_state
) {
2892 tcpm_log(port
, "state change %s -> %s [delayed %ld ms]",
2893 tcpm_states
[port
->state
],
2894 tcpm_states
[port
->delayed_state
], port
->delay_ms
);
2895 port
->prev_state
= port
->state
;
2896 port
->state
= port
->delayed_state
;
2897 port
->delayed_state
= INVALID_STATE
;
2901 * Continue running as long as we have (non-delayed) state changes
2905 prev_state
= port
->state
;
2906 run_state_machine(port
);
2907 if (port
->queued_message
)
2908 tcpm_send_queued_message(port
);
2909 } while (port
->state
!= prev_state
&& !port
->delayed_state
);
2912 port
->state_machine_running
= false;
2913 mutex_unlock(&port
->lock
);
2916 static void _tcpm_cc_change(struct tcpm_port
*port
, enum typec_cc_status cc1
,
2917 enum typec_cc_status cc2
)
2919 enum typec_cc_status old_cc1
, old_cc2
;
2920 enum tcpm_state new_state
;
2922 old_cc1
= port
->cc1
;
2923 old_cc2
= port
->cc2
;
2927 tcpm_log_force(port
,
2928 "CC1: %u -> %u, CC2: %u -> %u [state %s, polarity %d, %s]",
2929 old_cc1
, cc1
, old_cc2
, cc2
, tcpm_states
[port
->state
],
2931 tcpm_port_is_disconnected(port
) ? "disconnected"
2934 switch (port
->state
) {
2936 if (tcpm_port_is_debug(port
) || tcpm_port_is_audio(port
) ||
2937 tcpm_port_is_source(port
))
2938 tcpm_set_state(port
, SRC_ATTACH_WAIT
, 0);
2939 else if (tcpm_port_is_sink(port
))
2940 tcpm_set_state(port
, SNK_ATTACH_WAIT
, 0);
2942 case SRC_UNATTACHED
:
2943 case ACC_UNATTACHED
:
2944 if (tcpm_port_is_debug(port
) || tcpm_port_is_audio(port
) ||
2945 tcpm_port_is_source(port
))
2946 tcpm_set_state(port
, SRC_ATTACH_WAIT
, 0);
2948 case SRC_ATTACH_WAIT
:
2949 if (tcpm_port_is_disconnected(port
) ||
2950 tcpm_port_is_audio_detached(port
))
2951 tcpm_set_state(port
, SRC_UNATTACHED
, 0);
2952 else if (cc1
!= old_cc1
|| cc2
!= old_cc2
)
2953 tcpm_set_state(port
, SRC_ATTACH_WAIT
, 0);
2956 case SRC_SEND_CAPABILITIES
:
2958 if (tcpm_port_is_disconnected(port
) ||
2959 !tcpm_port_is_source(port
))
2960 tcpm_set_state(port
, SRC_UNATTACHED
, 0);
2962 case SNK_UNATTACHED
:
2963 if (tcpm_port_is_sink(port
))
2964 tcpm_set_state(port
, SNK_ATTACH_WAIT
, 0);
2966 case SNK_ATTACH_WAIT
:
2967 if ((port
->cc1
== TYPEC_CC_OPEN
&&
2968 port
->cc2
!= TYPEC_CC_OPEN
) ||
2969 (port
->cc1
!= TYPEC_CC_OPEN
&&
2970 port
->cc2
== TYPEC_CC_OPEN
))
2971 new_state
= SNK_DEBOUNCED
;
2972 else if (tcpm_port_is_disconnected(port
))
2973 new_state
= SNK_UNATTACHED
;
2976 if (new_state
!= port
->delayed_state
)
2977 tcpm_set_state(port
, SNK_ATTACH_WAIT
, 0);
2980 if (tcpm_port_is_disconnected(port
))
2981 new_state
= SNK_UNATTACHED
;
2982 else if (port
->vbus_present
)
2983 new_state
= tcpm_try_src(port
) ? SRC_TRY
: SNK_ATTACHED
;
2985 new_state
= SNK_UNATTACHED
;
2986 if (new_state
!= port
->delayed_state
)
2987 tcpm_set_state(port
, SNK_DEBOUNCED
, 0);
2990 if (tcpm_port_is_disconnected(port
))
2991 tcpm_set_state(port
, unattached_state(port
), 0);
2992 else if (!port
->pd_capable
&&
2993 (cc1
!= old_cc1
|| cc2
!= old_cc2
))
2994 tcpm_set_current_limit(port
,
2995 tcpm_get_current_limit(port
),
2999 case AUDIO_ACC_ATTACHED
:
3000 if (cc1
== TYPEC_CC_OPEN
|| cc2
== TYPEC_CC_OPEN
)
3001 tcpm_set_state(port
, AUDIO_ACC_DEBOUNCE
, 0);
3003 case AUDIO_ACC_DEBOUNCE
:
3004 if (tcpm_port_is_audio(port
))
3005 tcpm_set_state(port
, AUDIO_ACC_ATTACHED
, 0);
3008 case DEBUG_ACC_ATTACHED
:
3009 if (cc1
== TYPEC_CC_OPEN
|| cc2
== TYPEC_CC_OPEN
)
3010 tcpm_set_state(port
, ACC_UNATTACHED
, 0);
3014 /* Do nothing, waiting for timeout */
3018 /* CC line is unstable, wait for debounce */
3019 if (tcpm_port_is_disconnected(port
))
3020 tcpm_set_state(port
, SNK_DISCOVERY_DEBOUNCE
, 0);
3022 case SNK_DISCOVERY_DEBOUNCE
:
3026 /* Hand over to state machine if needed */
3027 if (!port
->vbus_present
&& tcpm_port_is_source(port
))
3028 tcpm_set_state(port
, SRC_TRYWAIT_DEBOUNCE
, 0);
3030 case SRC_TRYWAIT_DEBOUNCE
:
3031 if (port
->vbus_present
|| !tcpm_port_is_source(port
))
3032 tcpm_set_state(port
, SRC_TRYWAIT
, 0);
3034 case SNK_TRY_WAIT_DEBOUNCE
:
3035 if (!tcpm_port_is_sink(port
)) {
3037 tcpm_set_state(port
, SRC_TRYWAIT
, 0);
3041 if (tcpm_port_is_source(port
))
3042 tcpm_set_state(port
, SRC_TRY_DEBOUNCE
, 0);
3044 case SRC_TRY_DEBOUNCE
:
3045 tcpm_set_state(port
, SRC_TRY_WAIT
, 0);
3047 case SNK_TRYWAIT_DEBOUNCE
:
3048 if (tcpm_port_is_sink(port
))
3049 tcpm_set_state(port
, SNK_TRYWAIT_VBUS
, 0);
3051 case SNK_TRYWAIT_VBUS
:
3052 if (!tcpm_port_is_sink(port
))
3053 tcpm_set_state(port
, SNK_TRYWAIT_DEBOUNCE
, 0);
3056 /* Do nothing, waiting for tCCDebounce */
3058 case PR_SWAP_SNK_SRC_SINK_OFF
:
3059 case PR_SWAP_SRC_SNK_TRANSITION_OFF
:
3060 case PR_SWAP_SRC_SNK_SOURCE_OFF
:
3061 case PR_SWAP_SRC_SNK_SOURCE_OFF_CC_DEBOUNCED
:
3062 case PR_SWAP_SNK_SRC_SOURCE_ON
:
3064 * CC state change is expected in PR_SWAP
3070 if (tcpm_port_is_disconnected(port
))
3071 tcpm_set_state(port
, unattached_state(port
), 0);
3076 static void _tcpm_pd_vbus_on(struct tcpm_port
*port
)
3078 tcpm_log_force(port
, "VBUS on");
3079 port
->vbus_present
= true;
3080 switch (port
->state
) {
3081 case SNK_TRANSITION_SINK_VBUS
:
3082 port
->explicit_contract
= true;
3083 tcpm_set_state(port
, SNK_READY
, 0);
3086 tcpm_set_state(port
, SNK_DISCOVERY
, 0);
3090 tcpm_set_state(port
, tcpm_try_src(port
) ? SRC_TRY
3094 case SNK_HARD_RESET_WAIT_VBUS
:
3095 tcpm_set_state(port
, SNK_HARD_RESET_SINK_ON
, 0);
3098 tcpm_set_state(port
, SRC_STARTUP
, 0);
3100 case SRC_HARD_RESET_VBUS_ON
:
3101 tcpm_set_state(port
, SRC_STARTUP
, 0);
3105 /* Do nothing, waiting for timeout */
3108 /* Do nothing, Waiting for Rd to be detected */
3110 case SRC_TRYWAIT_DEBOUNCE
:
3111 tcpm_set_state(port
, SRC_TRYWAIT
, 0);
3113 case SNK_TRY_WAIT_DEBOUNCE
:
3114 /* Do nothing, waiting for PD_DEBOUNCE to do be done */
3117 /* Do nothing, waiting for tCCDebounce */
3119 case SNK_TRYWAIT_VBUS
:
3120 if (tcpm_port_is_sink(port
))
3121 tcpm_set_state(port
, SNK_ATTACHED
, 0);
3123 case SNK_TRYWAIT_DEBOUNCE
:
3124 /* Do nothing, waiting for Rp */
3127 case SRC_TRY_DEBOUNCE
:
3128 /* Do nothing, waiting for sink detection */
3135 static void _tcpm_pd_vbus_off(struct tcpm_port
*port
)
3137 tcpm_log_force(port
, "VBUS off");
3138 port
->vbus_present
= false;
3139 port
->vbus_never_low
= false;
3140 switch (port
->state
) {
3141 case SNK_HARD_RESET_SINK_OFF
:
3142 tcpm_set_state(port
, SNK_HARD_RESET_WAIT_VBUS
, 0);
3144 case SRC_HARD_RESET_VBUS_OFF
:
3145 tcpm_set_state(port
, SRC_HARD_RESET_VBUS_ON
, 0);
3147 case HARD_RESET_SEND
:
3151 /* Do nothing, waiting for timeout */
3154 /* Hand over to state machine if needed */
3155 if (tcpm_port_is_source(port
))
3156 tcpm_set_state(port
, SRC_TRYWAIT_DEBOUNCE
, 0);
3158 case SNK_TRY_WAIT_DEBOUNCE
:
3159 /* Do nothing, waiting for PD_DEBOUNCE to do be done */
3162 case SNK_TRYWAIT_VBUS
:
3163 case SNK_TRYWAIT_DEBOUNCE
:
3165 case SNK_ATTACH_WAIT
:
3166 tcpm_set_state(port
, SNK_UNATTACHED
, 0);
3169 case SNK_NEGOTIATE_CAPABILITIES
:
3172 case PR_SWAP_SRC_SNK_TRANSITION_OFF
:
3173 tcpm_set_state(port
, PR_SWAP_SRC_SNK_SOURCE_OFF
, 0);
3176 case PR_SWAP_SNK_SRC_SINK_OFF
:
3177 /* Do nothing, expected */
3180 case PORT_RESET_WAIT_OFF
:
3181 tcpm_set_state(port
, tcpm_default_state(port
), 0);
3184 case SRC_TRY_DEBOUNCE
:
3185 /* Do nothing, waiting for sink detection */
3188 if (port
->pwr_role
== TYPEC_SINK
&&
3190 tcpm_set_state(port
, SNK_UNATTACHED
, 0);
3195 static void _tcpm_pd_hard_reset(struct tcpm_port
*port
)
3197 tcpm_log_force(port
, "Received hard reset");
3199 * If we keep receiving hard reset requests, executing the hard reset
3200 * must have failed. Revert to error recovery if that happens.
3202 tcpm_set_state(port
,
3203 port
->hard_reset_count
< PD_N_HARD_RESET_COUNT
?
3204 HARD_RESET_START
: ERROR_RECOVERY
,
3208 static void tcpm_pd_event_handler(struct work_struct
*work
)
3210 struct tcpm_port
*port
= container_of(work
, struct tcpm_port
,
3214 mutex_lock(&port
->lock
);
3216 spin_lock(&port
->pd_event_lock
);
3217 while (port
->pd_events
) {
3218 events
= port
->pd_events
;
3219 port
->pd_events
= 0;
3220 spin_unlock(&port
->pd_event_lock
);
3221 if (events
& TCPM_RESET_EVENT
)
3222 _tcpm_pd_hard_reset(port
);
3223 if (events
& TCPM_VBUS_EVENT
) {
3226 vbus
= port
->tcpc
->get_vbus(port
->tcpc
);
3228 _tcpm_pd_vbus_on(port
);
3230 _tcpm_pd_vbus_off(port
);
3232 if (events
& TCPM_CC_EVENT
) {
3233 enum typec_cc_status cc1
, cc2
;
3235 if (port
->tcpc
->get_cc(port
->tcpc
, &cc1
, &cc2
) == 0)
3236 _tcpm_cc_change(port
, cc1
, cc2
);
3238 spin_lock(&port
->pd_event_lock
);
3240 spin_unlock(&port
->pd_event_lock
);
3241 mutex_unlock(&port
->lock
);
3244 void tcpm_cc_change(struct tcpm_port
*port
)
3246 spin_lock(&port
->pd_event_lock
);
3247 port
->pd_events
|= TCPM_CC_EVENT
;
3248 spin_unlock(&port
->pd_event_lock
);
3249 queue_work(port
->wq
, &port
->event_work
);
3251 EXPORT_SYMBOL_GPL(tcpm_cc_change
);
3253 void tcpm_vbus_change(struct tcpm_port
*port
)
3255 spin_lock(&port
->pd_event_lock
);
3256 port
->pd_events
|= TCPM_VBUS_EVENT
;
3257 spin_unlock(&port
->pd_event_lock
);
3258 queue_work(port
->wq
, &port
->event_work
);
3260 EXPORT_SYMBOL_GPL(tcpm_vbus_change
);
3262 void tcpm_pd_hard_reset(struct tcpm_port
*port
)
3264 spin_lock(&port
->pd_event_lock
);
3265 port
->pd_events
= TCPM_RESET_EVENT
;
3266 spin_unlock(&port
->pd_event_lock
);
3267 queue_work(port
->wq
, &port
->event_work
);
3269 EXPORT_SYMBOL_GPL(tcpm_pd_hard_reset
);
3271 static int tcpm_dr_set(const struct typec_capability
*cap
,
3272 enum typec_data_role data
)
3274 struct tcpm_port
*port
= typec_cap_to_tcpm(cap
);
3277 mutex_lock(&port
->swap_lock
);
3278 mutex_lock(&port
->lock
);
3280 if (port
->port_type
!= TYPEC_PORT_DRP
) {
3284 if (port
->state
!= SRC_READY
&& port
->state
!= SNK_READY
) {
3289 if (port
->data_role
== data
) {
3296 * 6.3.9: If an alternate mode is active, a request to swap
3297 * alternate modes shall trigger a port reset.
3298 * Reject data role swap request in this case.
3301 if (!port
->pd_capable
) {
3303 * If the partner is not PD capable, reset the port to
3304 * trigger a role change. This can only work if a preferred
3305 * role is configured, and if it matches the requested role.
3307 if (port
->try_role
== TYPEC_NO_PREFERRED_ROLE
||
3308 port
->try_role
== port
->pwr_role
) {
3312 port
->non_pd_role_swap
= true;
3313 tcpm_set_state(port
, PORT_RESET
, 0);
3315 tcpm_set_state(port
, DR_SWAP_SEND
, 0);
3318 port
->swap_status
= 0;
3319 port
->swap_pending
= true;
3320 reinit_completion(&port
->swap_complete
);
3321 mutex_unlock(&port
->lock
);
3323 if (!wait_for_completion_timeout(&port
->swap_complete
,
3324 msecs_to_jiffies(PD_ROLE_SWAP_TIMEOUT
)))
3327 ret
= port
->swap_status
;
3329 port
->non_pd_role_swap
= false;
3333 mutex_unlock(&port
->lock
);
3335 mutex_unlock(&port
->swap_lock
);
3339 static int tcpm_pr_set(const struct typec_capability
*cap
,
3340 enum typec_role role
)
3342 struct tcpm_port
*port
= typec_cap_to_tcpm(cap
);
3345 mutex_lock(&port
->swap_lock
);
3346 mutex_lock(&port
->lock
);
3348 if (port
->port_type
!= TYPEC_PORT_DRP
) {
3352 if (port
->state
!= SRC_READY
&& port
->state
!= SNK_READY
) {
3357 if (role
== port
->pwr_role
) {
3362 port
->swap_status
= 0;
3363 port
->swap_pending
= true;
3364 reinit_completion(&port
->swap_complete
);
3365 tcpm_set_state(port
, PR_SWAP_SEND
, 0);
3366 mutex_unlock(&port
->lock
);
3368 if (!wait_for_completion_timeout(&port
->swap_complete
,
3369 msecs_to_jiffies(PD_ROLE_SWAP_TIMEOUT
)))
3372 ret
= port
->swap_status
;
3377 mutex_unlock(&port
->lock
);
3379 mutex_unlock(&port
->swap_lock
);
3383 static int tcpm_vconn_set(const struct typec_capability
*cap
,
3384 enum typec_role role
)
3386 struct tcpm_port
*port
= typec_cap_to_tcpm(cap
);
3389 mutex_lock(&port
->swap_lock
);
3390 mutex_lock(&port
->lock
);
3392 if (port
->state
!= SRC_READY
&& port
->state
!= SNK_READY
) {
3397 if (role
== port
->vconn_role
) {
3402 port
->swap_status
= 0;
3403 port
->swap_pending
= true;
3404 reinit_completion(&port
->swap_complete
);
3405 tcpm_set_state(port
, VCONN_SWAP_SEND
, 0);
3406 mutex_unlock(&port
->lock
);
3408 if (!wait_for_completion_timeout(&port
->swap_complete
,
3409 msecs_to_jiffies(PD_ROLE_SWAP_TIMEOUT
)))
3412 ret
= port
->swap_status
;
3417 mutex_unlock(&port
->lock
);
3419 mutex_unlock(&port
->swap_lock
);
3423 static int tcpm_try_role(const struct typec_capability
*cap
, int role
)
3425 struct tcpm_port
*port
= typec_cap_to_tcpm(cap
);
3426 struct tcpc_dev
*tcpc
= port
->tcpc
;
3429 mutex_lock(&port
->lock
);
3431 ret
= tcpc
->try_role(tcpc
, role
);
3432 if (!ret
&& !tcpc
->config
->try_role_hw
)
3433 port
->try_role
= role
;
3434 port
->try_src_count
= 0;
3435 port
->try_snk_count
= 0;
3436 mutex_unlock(&port
->lock
);
3441 static void tcpm_init(struct tcpm_port
*port
)
3443 enum typec_cc_status cc1
, cc2
;
3445 port
->tcpc
->init(port
->tcpc
);
3447 tcpm_reset_port(port
);
3451 * Should possibly wait for VBUS to settle if it was enabled locally
3452 * since tcpm_reset_port() will disable VBUS.
3454 port
->vbus_present
= port
->tcpc
->get_vbus(port
->tcpc
);
3455 if (port
->vbus_present
)
3456 port
->vbus_never_low
= true;
3458 tcpm_set_state(port
, tcpm_default_state(port
), 0);
3460 if (port
->tcpc
->get_cc(port
->tcpc
, &cc1
, &cc2
) == 0)
3461 _tcpm_cc_change(port
, cc1
, cc2
);
3464 * Some adapters need a clean slate at startup, and won't recover
3465 * otherwise. So do not try to be fancy and force a clean disconnect.
3467 tcpm_set_state(port
, PORT_RESET
, 0);
3470 static int tcpm_port_type_set(const struct typec_capability
*cap
,
3471 enum typec_port_type type
)
3473 struct tcpm_port
*port
= typec_cap_to_tcpm(cap
);
3475 mutex_lock(&port
->lock
);
3476 if (type
== port
->port_type
)
3479 port
->port_type
= type
;
3481 if (!port
->connected
) {
3482 tcpm_set_state(port
, PORT_RESET
, 0);
3483 } else if (type
== TYPEC_PORT_UFP
) {
3484 if (!(port
->pwr_role
== TYPEC_SINK
&&
3485 port
->data_role
== TYPEC_DEVICE
))
3486 tcpm_set_state(port
, PORT_RESET
, 0);
3487 } else if (type
== TYPEC_PORT_DFP
) {
3488 if (!(port
->pwr_role
== TYPEC_SOURCE
&&
3489 port
->data_role
== TYPEC_HOST
))
3490 tcpm_set_state(port
, PORT_RESET
, 0);
3494 mutex_unlock(&port
->lock
);
3498 void tcpm_tcpc_reset(struct tcpm_port
*port
)
3500 mutex_lock(&port
->lock
);
3501 /* XXX: Maintain PD connection if possible? */
3503 mutex_unlock(&port
->lock
);
3505 EXPORT_SYMBOL_GPL(tcpm_tcpc_reset
);
3507 static int tcpm_copy_pdos(u32
*dest_pdo
, const u32
*src_pdo
,
3508 unsigned int nr_pdo
)
3512 if (nr_pdo
> PDO_MAX_OBJECTS
)
3513 nr_pdo
= PDO_MAX_OBJECTS
;
3515 for (i
= 0; i
< nr_pdo
; i
++)
3516 dest_pdo
[i
] = src_pdo
[i
];
3521 static int tcpm_copy_vdos(u32
*dest_vdo
, const u32
*src_vdo
,
3522 unsigned int nr_vdo
)
3526 if (nr_vdo
> VDO_MAX_OBJECTS
)
3527 nr_vdo
= VDO_MAX_OBJECTS
;
3529 for (i
= 0; i
< nr_vdo
; i
++)
3530 dest_vdo
[i
] = src_vdo
[i
];
3535 int tcpm_update_source_capabilities(struct tcpm_port
*port
, const u32
*pdo
,
3536 unsigned int nr_pdo
)
3538 if (tcpm_validate_caps(port
, pdo
, nr_pdo
))
3541 mutex_lock(&port
->lock
);
3542 port
->nr_src_pdo
= tcpm_copy_pdos(port
->src_pdo
, pdo
, nr_pdo
);
3543 switch (port
->state
) {
3544 case SRC_UNATTACHED
:
3545 case SRC_ATTACH_WAIT
:
3547 tcpm_set_cc(port
, tcpm_rp_cc(port
));
3549 case SRC_SEND_CAPABILITIES
:
3550 case SRC_NEGOTIATE_CAPABILITIES
:
3552 case SRC_WAIT_NEW_CAPABILITIES
:
3553 tcpm_set_cc(port
, tcpm_rp_cc(port
));
3554 tcpm_set_state(port
, SRC_SEND_CAPABILITIES
, 0);
3559 mutex_unlock(&port
->lock
);
3562 EXPORT_SYMBOL_GPL(tcpm_update_source_capabilities
);
3564 int tcpm_update_sink_capabilities(struct tcpm_port
*port
, const u32
*pdo
,
3565 unsigned int nr_pdo
,
3566 unsigned int max_snk_mv
,
3567 unsigned int max_snk_ma
,
3568 unsigned int max_snk_mw
,
3569 unsigned int operating_snk_mw
)
3571 if (tcpm_validate_caps(port
, pdo
, nr_pdo
))
3574 mutex_lock(&port
->lock
);
3575 port
->nr_snk_pdo
= tcpm_copy_pdos(port
->snk_pdo
, pdo
, nr_pdo
);
3576 port
->max_snk_mv
= max_snk_mv
;
3577 port
->max_snk_ma
= max_snk_ma
;
3578 port
->max_snk_mw
= max_snk_mw
;
3579 port
->operating_snk_mw
= operating_snk_mw
;
3581 switch (port
->state
) {
3582 case SNK_NEGOTIATE_CAPABILITIES
:
3584 case SNK_TRANSITION_SINK
:
3585 case SNK_TRANSITION_SINK_VBUS
:
3586 tcpm_set_state(port
, SNK_NEGOTIATE_CAPABILITIES
, 0);
3591 mutex_unlock(&port
->lock
);
3594 EXPORT_SYMBOL_GPL(tcpm_update_sink_capabilities
);
3596 struct tcpm_port
*tcpm_register_port(struct device
*dev
, struct tcpc_dev
*tcpc
)
3598 struct tcpm_port
*port
;
3601 if (!dev
|| !tcpc
|| !tcpc
->config
||
3602 !tcpc
->get_vbus
|| !tcpc
->set_cc
|| !tcpc
->get_cc
||
3603 !tcpc
->set_polarity
|| !tcpc
->set_vconn
|| !tcpc
->set_vbus
||
3604 !tcpc
->set_pd_rx
|| !tcpc
->set_roles
|| !tcpc
->pd_transmit
)
3605 return ERR_PTR(-EINVAL
);
3607 port
= devm_kzalloc(dev
, sizeof(*port
), GFP_KERNEL
);
3609 return ERR_PTR(-ENOMEM
);
3614 mutex_init(&port
->lock
);
3615 mutex_init(&port
->swap_lock
);
3617 port
->wq
= create_singlethread_workqueue(dev_name(dev
));
3619 return ERR_PTR(-ENOMEM
);
3620 INIT_DELAYED_WORK(&port
->state_machine
, tcpm_state_machine_work
);
3621 INIT_DELAYED_WORK(&port
->vdm_state_machine
, vdm_state_machine_work
);
3622 INIT_WORK(&port
->event_work
, tcpm_pd_event_handler
);
3624 spin_lock_init(&port
->pd_event_lock
);
3626 init_completion(&port
->tx_complete
);
3627 init_completion(&port
->swap_complete
);
3628 tcpm_debugfs_init(port
);
3630 if (tcpm_validate_caps(port
, tcpc
->config
->src_pdo
,
3631 tcpc
->config
->nr_src_pdo
) ||
3632 tcpm_validate_caps(port
, tcpc
->config
->snk_pdo
,
3633 tcpc
->config
->nr_snk_pdo
)) {
3635 goto out_destroy_wq
;
3637 port
->nr_src_pdo
= tcpm_copy_pdos(port
->src_pdo
, tcpc
->config
->src_pdo
,
3638 tcpc
->config
->nr_src_pdo
);
3639 port
->nr_snk_pdo
= tcpm_copy_pdos(port
->snk_pdo
, tcpc
->config
->snk_pdo
,
3640 tcpc
->config
->nr_snk_pdo
);
3641 port
->nr_snk_vdo
= tcpm_copy_vdos(port
->snk_vdo
, tcpc
->config
->snk_vdo
,
3642 tcpc
->config
->nr_snk_vdo
);
3644 port
->max_snk_mv
= tcpc
->config
->max_snk_mv
;
3645 port
->max_snk_ma
= tcpc
->config
->max_snk_ma
;
3646 port
->max_snk_mw
= tcpc
->config
->max_snk_mw
;
3647 port
->operating_snk_mw
= tcpc
->config
->operating_snk_mw
;
3648 if (!tcpc
->config
->try_role_hw
)
3649 port
->try_role
= tcpc
->config
->default_role
;
3651 port
->try_role
= TYPEC_NO_PREFERRED_ROLE
;
3653 port
->typec_caps
.prefer_role
= tcpc
->config
->default_role
;
3654 port
->typec_caps
.type
= tcpc
->config
->type
;
3655 port
->typec_caps
.revision
= 0x0120; /* Type-C spec release 1.2 */
3656 port
->typec_caps
.pd_revision
= 0x0200; /* USB-PD spec release 2.0 */
3657 port
->typec_caps
.dr_set
= tcpm_dr_set
;
3658 port
->typec_caps
.pr_set
= tcpm_pr_set
;
3659 port
->typec_caps
.vconn_set
= tcpm_vconn_set
;
3660 port
->typec_caps
.try_role
= tcpm_try_role
;
3661 port
->typec_caps
.port_type_set
= tcpm_port_type_set
;
3663 port
->partner_desc
.identity
= &port
->partner_ident
;
3664 port
->port_type
= tcpc
->config
->type
;
3666 port
->typec_port
= typec_register_port(port
->dev
, &port
->typec_caps
);
3667 if (!port
->typec_port
) {
3669 goto out_destroy_wq
;
3672 if (tcpc
->config
->alt_modes
) {
3673 const struct typec_altmode_desc
*paltmode
= tcpc
->config
->alt_modes
;
3676 while (paltmode
->svid
&& i
< ARRAY_SIZE(port
->port_altmode
)) {
3677 port
->port_altmode
[i
] =
3678 typec_port_register_altmode(port
->typec_port
,
3680 if (!port
->port_altmode
[i
]) {
3682 "%s: failed to register port alternate mode 0x%x",
3683 dev_name(dev
), paltmode
->svid
);
3691 mutex_lock(&port
->lock
);
3693 mutex_unlock(&port
->lock
);
3695 tcpm_log(port
, "%s: registered", dev_name(dev
));
3699 destroy_workqueue(port
->wq
);
3700 return ERR_PTR(err
);
3702 EXPORT_SYMBOL_GPL(tcpm_register_port
);
3704 void tcpm_unregister_port(struct tcpm_port
*port
)
3708 tcpm_reset_port(port
);
3709 for (i
= 0; i
< ARRAY_SIZE(port
->port_altmode
); i
++)
3710 typec_unregister_altmode(port
->port_altmode
[i
]);
3711 typec_unregister_port(port
->typec_port
);
3712 tcpm_debugfs_exit(port
);
3713 destroy_workqueue(port
->wq
);
3715 EXPORT_SYMBOL_GPL(tcpm_unregister_port
);
3717 MODULE_AUTHOR("Guenter Roeck <groeck@chromium.org>");
3718 MODULE_DESCRIPTION("USB Type-C Port Manager");
3719 MODULE_LICENSE("GPL");