tcpm.c (173377B)
1// SPDX-License-Identifier: GPL-2.0+ 2/* 3 * Copyright 2015-2017 Google, Inc 4 * 5 * USB Power Delivery protocol stack. 6 */ 7 8#include <linux/completion.h> 9#include <linux/debugfs.h> 10#include <linux/device.h> 11#include <linux/hrtimer.h> 12#include <linux/jiffies.h> 13#include <linux/kernel.h> 14#include <linux/kthread.h> 15#include <linux/module.h> 16#include <linux/mutex.h> 17#include <linux/power_supply.h> 18#include <linux/proc_fs.h> 19#include <linux/property.h> 20#include <linux/sched/clock.h> 21#include <linux/seq_file.h> 22#include <linux/slab.h> 23#include <linux/spinlock.h> 24#include <linux/usb.h> 25#include <linux/usb/pd.h> 26#include <linux/usb/pd_ado.h> 27#include <linux/usb/pd_bdo.h> 28#include <linux/usb/pd_ext_sdb.h> 29#include <linux/usb/pd_vdo.h> 30#include <linux/usb/role.h> 31#include <linux/usb/tcpm.h> 32#include <linux/usb/typec_altmode.h> 33 34#include <uapi/linux/sched/types.h> 35 36#define FOREACH_STATE(S) \ 37 S(INVALID_STATE), \ 38 S(TOGGLING), \ 39 S(SRC_UNATTACHED), \ 40 S(SRC_ATTACH_WAIT), \ 41 S(SRC_ATTACHED), \ 42 S(SRC_STARTUP), \ 43 S(SRC_SEND_CAPABILITIES), \ 44 S(SRC_SEND_CAPABILITIES_TIMEOUT), \ 45 S(SRC_NEGOTIATE_CAPABILITIES), \ 46 S(SRC_TRANSITION_SUPPLY), \ 47 S(SRC_READY), \ 48 S(SRC_WAIT_NEW_CAPABILITIES), \ 49 \ 50 S(SNK_UNATTACHED), \ 51 S(SNK_ATTACH_WAIT), \ 52 S(SNK_DEBOUNCED), \ 53 S(SNK_ATTACHED), \ 54 S(SNK_STARTUP), \ 55 S(SNK_DISCOVERY), \ 56 S(SNK_DISCOVERY_DEBOUNCE), \ 57 S(SNK_DISCOVERY_DEBOUNCE_DONE), \ 58 S(SNK_WAIT_CAPABILITIES), \ 59 S(SNK_NEGOTIATE_CAPABILITIES), \ 60 S(SNK_NEGOTIATE_PPS_CAPABILITIES), \ 61 S(SNK_TRANSITION_SINK), \ 62 S(SNK_TRANSITION_SINK_VBUS), \ 63 S(SNK_READY), \ 64 \ 65 S(ACC_UNATTACHED), \ 66 S(DEBUG_ACC_ATTACHED), \ 67 S(AUDIO_ACC_ATTACHED), \ 68 S(AUDIO_ACC_DEBOUNCE), \ 69 \ 70 S(HARD_RESET_SEND), \ 71 S(HARD_RESET_START), \ 72 S(SRC_HARD_RESET_VBUS_OFF), \ 73 S(SRC_HARD_RESET_VBUS_ON), \ 74 S(SNK_HARD_RESET_SINK_OFF), \ 75 S(SNK_HARD_RESET_WAIT_VBUS), \ 76 S(SNK_HARD_RESET_SINK_ON), \ 77 \ 78 S(SOFT_RESET), \ 79 S(SRC_SOFT_RESET_WAIT_SNK_TX), \ 80 S(SNK_SOFT_RESET), \ 81 S(SOFT_RESET_SEND), \ 82 \ 83 S(DR_SWAP_ACCEPT), \ 84 S(DR_SWAP_SEND), \ 85 S(DR_SWAP_SEND_TIMEOUT), \ 86 S(DR_SWAP_CANCEL), \ 87 S(DR_SWAP_CHANGE_DR), \ 88 \ 89 S(PR_SWAP_ACCEPT), \ 90 S(PR_SWAP_SEND), \ 91 S(PR_SWAP_SEND_TIMEOUT), \ 92 S(PR_SWAP_CANCEL), \ 93 S(PR_SWAP_START), \ 94 S(PR_SWAP_SRC_SNK_TRANSITION_OFF), \ 95 S(PR_SWAP_SRC_SNK_SOURCE_OFF), \ 96 S(PR_SWAP_SRC_SNK_SOURCE_OFF_CC_DEBOUNCED), \ 97 S(PR_SWAP_SRC_SNK_SINK_ON), \ 98 S(PR_SWAP_SNK_SRC_SINK_OFF), \ 99 S(PR_SWAP_SNK_SRC_SOURCE_ON), \ 100 S(PR_SWAP_SNK_SRC_SOURCE_ON_VBUS_RAMPED_UP), \ 101 \ 102 S(VCONN_SWAP_ACCEPT), \ 103 S(VCONN_SWAP_SEND), \ 104 S(VCONN_SWAP_SEND_TIMEOUT), \ 105 S(VCONN_SWAP_CANCEL), \ 106 S(VCONN_SWAP_START), \ 107 S(VCONN_SWAP_WAIT_FOR_VCONN), \ 108 S(VCONN_SWAP_TURN_ON_VCONN), \ 109 S(VCONN_SWAP_TURN_OFF_VCONN), \ 110 \ 111 S(FR_SWAP_SEND), \ 112 S(FR_SWAP_SEND_TIMEOUT), \ 113 S(FR_SWAP_SNK_SRC_TRANSITION_TO_OFF), \ 114 S(FR_SWAP_SNK_SRC_NEW_SINK_READY), \ 115 S(FR_SWAP_SNK_SRC_SOURCE_VBUS_APPLIED), \ 116 S(FR_SWAP_CANCEL), \ 117 \ 118 S(SNK_TRY), \ 119 S(SNK_TRY_WAIT), \ 120 S(SNK_TRY_WAIT_DEBOUNCE), \ 121 S(SNK_TRY_WAIT_DEBOUNCE_CHECK_VBUS), \ 122 S(SRC_TRYWAIT), \ 123 S(SRC_TRYWAIT_DEBOUNCE), \ 124 S(SRC_TRYWAIT_UNATTACHED), \ 125 \ 126 S(SRC_TRY), \ 127 S(SRC_TRY_WAIT), \ 128 S(SRC_TRY_DEBOUNCE), \ 129 S(SNK_TRYWAIT), \ 130 S(SNK_TRYWAIT_DEBOUNCE), \ 131 S(SNK_TRYWAIT_VBUS), \ 132 S(BIST_RX), \ 133 \ 134 S(GET_STATUS_SEND), \ 135 S(GET_STATUS_SEND_TIMEOUT), \ 136 S(GET_PPS_STATUS_SEND), \ 137 S(GET_PPS_STATUS_SEND_TIMEOUT), \ 138 \ 139 S(GET_SINK_CAP), \ 140 S(GET_SINK_CAP_TIMEOUT), \ 141 \ 142 S(ERROR_RECOVERY), \ 143 S(PORT_RESET), \ 144 S(PORT_RESET_WAIT_OFF), \ 145 \ 146 S(AMS_START), \ 147 S(CHUNK_NOT_SUPP) 148 149#define FOREACH_AMS(S) \ 150 S(NONE_AMS), \ 151 S(POWER_NEGOTIATION), \ 152 S(GOTOMIN), \ 153 S(SOFT_RESET_AMS), \ 154 S(HARD_RESET), \ 155 S(CABLE_RESET), \ 156 S(GET_SOURCE_CAPABILITIES), \ 157 S(GET_SINK_CAPABILITIES), \ 158 S(POWER_ROLE_SWAP), \ 159 S(FAST_ROLE_SWAP), \ 160 S(DATA_ROLE_SWAP), \ 161 S(VCONN_SWAP), \ 162 S(SOURCE_ALERT), \ 163 S(GETTING_SOURCE_EXTENDED_CAPABILITIES),\ 164 S(GETTING_SOURCE_SINK_STATUS), \ 165 S(GETTING_BATTERY_CAPABILITIES), \ 166 S(GETTING_BATTERY_STATUS), \ 167 S(GETTING_MANUFACTURER_INFORMATION), \ 168 S(SECURITY), \ 169 S(FIRMWARE_UPDATE), \ 170 S(DISCOVER_IDENTITY), \ 171 S(SOURCE_STARTUP_CABLE_PLUG_DISCOVER_IDENTITY), \ 172 S(DISCOVER_SVIDS), \ 173 S(DISCOVER_MODES), \ 174 S(DFP_TO_UFP_ENTER_MODE), \ 175 S(DFP_TO_UFP_EXIT_MODE), \ 176 S(DFP_TO_CABLE_PLUG_ENTER_MODE), \ 177 S(DFP_TO_CABLE_PLUG_EXIT_MODE), \ 178 S(ATTENTION), \ 179 S(BIST), \ 180 S(UNSTRUCTURED_VDMS), \ 181 S(STRUCTURED_VDMS), \ 182 S(COUNTRY_INFO), \ 183 S(COUNTRY_CODES) 184 185#define GENERATE_ENUM(e) e 186#define GENERATE_STRING(s) #s 187 188enum tcpm_state { 189 FOREACH_STATE(GENERATE_ENUM) 190}; 191 192static const char * const tcpm_states[] = { 193 FOREACH_STATE(GENERATE_STRING) 194}; 195 196enum tcpm_ams { 197 FOREACH_AMS(GENERATE_ENUM) 198}; 199 200static const char * const tcpm_ams_str[] = { 201 FOREACH_AMS(GENERATE_STRING) 202}; 203 204enum vdm_states { 205 VDM_STATE_ERR_BUSY = -3, 206 VDM_STATE_ERR_SEND = -2, 207 VDM_STATE_ERR_TMOUT = -1, 208 VDM_STATE_DONE = 0, 209 /* Anything >0 represents an active state */ 210 VDM_STATE_READY = 1, 211 VDM_STATE_BUSY = 2, 212 VDM_STATE_WAIT_RSP_BUSY = 3, 213 VDM_STATE_SEND_MESSAGE = 4, 214}; 215 216enum pd_msg_request { 217 PD_MSG_NONE = 0, 218 PD_MSG_CTRL_REJECT, 219 PD_MSG_CTRL_WAIT, 220 PD_MSG_CTRL_NOT_SUPP, 221 PD_MSG_DATA_SINK_CAP, 222 PD_MSG_DATA_SOURCE_CAP, 223}; 224 225enum adev_actions { 226 ADEV_NONE = 0, 227 ADEV_NOTIFY_USB_AND_QUEUE_VDM, 228 ADEV_QUEUE_VDM, 229 ADEV_QUEUE_VDM_SEND_EXIT_MODE_ON_FAIL, 230 ADEV_ATTENTION, 231}; 232 233/* 234 * Initial current capability of the new source when vSafe5V is applied during PD3.0 Fast Role Swap. 235 * Based on "Table 6-14 Fixed Supply PDO - Sink" of "USB Power Delivery Specification Revision 3.0, 236 * Version 1.2" 237 */ 238enum frs_typec_current { 239 FRS_NOT_SUPPORTED, 240 FRS_DEFAULT_POWER, 241 FRS_5V_1P5A, 242 FRS_5V_3A, 243}; 244 245/* Events from low level driver */ 246 247#define TCPM_CC_EVENT BIT(0) 248#define TCPM_VBUS_EVENT BIT(1) 249#define TCPM_RESET_EVENT BIT(2) 250#define TCPM_FRS_EVENT BIT(3) 251#define TCPM_SOURCING_VBUS BIT(4) 252 253#define LOG_BUFFER_ENTRIES 1024 254#define LOG_BUFFER_ENTRY_SIZE 128 255 256/* Alternate mode support */ 257 258#define SVID_DISCOVERY_MAX 16 259#define ALTMODE_DISCOVERY_MAX (SVID_DISCOVERY_MAX * MODE_DISCOVERY_MAX) 260 261#define GET_SINK_CAP_RETRY_MS 100 262#define SEND_DISCOVER_RETRY_MS 100 263 264struct pd_mode_data { 265 int svid_index; /* current SVID index */ 266 int nsvids; 267 u16 svids[SVID_DISCOVERY_MAX]; 268 int altmodes; /* number of alternate modes */ 269 struct typec_altmode_desc altmode_desc[ALTMODE_DISCOVERY_MAX]; 270}; 271 272/* 273 * @min_volt: Actual min voltage at the local port 274 * @req_min_volt: Requested min voltage to the port partner 275 * @max_volt: Actual max voltage at the local port 276 * @req_max_volt: Requested max voltage to the port partner 277 * @max_curr: Actual max current at the local port 278 * @req_max_curr: Requested max current of the port partner 279 * @req_out_volt: Requested output voltage to the port partner 280 * @req_op_curr: Requested operating current to the port partner 281 * @supported: Parter has at least one APDO hence supports PPS 282 * @active: PPS mode is active 283 */ 284struct pd_pps_data { 285 u32 min_volt; 286 u32 req_min_volt; 287 u32 max_volt; 288 u32 req_max_volt; 289 u32 max_curr; 290 u32 req_max_curr; 291 u32 req_out_volt; 292 u32 req_op_curr; 293 bool supported; 294 bool active; 295}; 296 297struct tcpm_port { 298 struct device *dev; 299 300 struct mutex lock; /* tcpm state machine lock */ 301 struct kthread_worker *wq; 302 303 struct typec_capability typec_caps; 304 struct typec_port *typec_port; 305 306 struct tcpc_dev *tcpc; 307 struct usb_role_switch *role_sw; 308 309 enum typec_role vconn_role; 310 enum typec_role pwr_role; 311 enum typec_data_role data_role; 312 enum typec_pwr_opmode pwr_opmode; 313 314 struct usb_pd_identity partner_ident; 315 struct typec_partner_desc partner_desc; 316 struct typec_partner *partner; 317 318 enum typec_cc_status cc_req; 319 enum typec_cc_status src_rp; /* work only if pd_supported == false */ 320 321 enum typec_cc_status cc1; 322 enum typec_cc_status cc2; 323 enum typec_cc_polarity polarity; 324 325 bool attached; 326 bool connected; 327 bool registered; 328 bool pd_supported; 329 enum typec_port_type port_type; 330 331 /* 332 * Set to true when vbus is greater than VSAFE5V min. 333 * Set to false when vbus falls below vSinkDisconnect max threshold. 334 */ 335 bool vbus_present; 336 337 /* 338 * Set to true when vbus is less than VSAFE0V max. 339 * Set to false when vbus is greater than VSAFE0V max. 340 */ 341 bool vbus_vsafe0v; 342 343 bool vbus_never_low; 344 bool vbus_source; 345 bool vbus_charge; 346 347 /* Set to true when Discover_Identity Command is expected to be sent in Ready states. */ 348 bool send_discover; 349 bool op_vsafe5v; 350 351 int try_role; 352 int try_snk_count; 353 int try_src_count; 354 355 enum pd_msg_request queued_message; 356 357 enum tcpm_state enter_state; 358 enum tcpm_state prev_state; 359 enum tcpm_state state; 360 enum tcpm_state delayed_state; 361 ktime_t delayed_runtime; 362 unsigned long delay_ms; 363 364 spinlock_t pd_event_lock; 365 u32 pd_events; 366 367 struct kthread_work event_work; 368 struct hrtimer state_machine_timer; 369 struct kthread_work state_machine; 370 struct hrtimer vdm_state_machine_timer; 371 struct kthread_work vdm_state_machine; 372 struct hrtimer enable_frs_timer; 373 struct kthread_work enable_frs; 374 struct hrtimer send_discover_timer; 375 struct kthread_work send_discover_work; 376 bool state_machine_running; 377 /* Set to true when VDM State Machine has following actions. */ 378 bool vdm_sm_running; 379 380 struct completion tx_complete; 381 enum tcpm_transmit_status tx_status; 382 383 struct mutex swap_lock; /* swap command lock */ 384 bool swap_pending; 385 bool non_pd_role_swap; 386 struct completion swap_complete; 387 int swap_status; 388 389 unsigned int negotiated_rev; 390 unsigned int message_id; 391 unsigned int caps_count; 392 unsigned int hard_reset_count; 393 bool pd_capable; 394 bool explicit_contract; 395 unsigned int rx_msgid; 396 397 /* Partner capabilities/requests */ 398 u32 sink_request; 399 u32 source_caps[PDO_MAX_OBJECTS]; 400 unsigned int nr_source_caps; 401 u32 sink_caps[PDO_MAX_OBJECTS]; 402 unsigned int nr_sink_caps; 403 404 /* Local capabilities */ 405 u32 src_pdo[PDO_MAX_OBJECTS]; 406 unsigned int nr_src_pdo; 407 u32 snk_pdo[PDO_MAX_OBJECTS]; 408 unsigned int nr_snk_pdo; 409 u32 snk_vdo_v1[VDO_MAX_OBJECTS]; 410 unsigned int nr_snk_vdo_v1; 411 u32 snk_vdo[VDO_MAX_OBJECTS]; 412 unsigned int nr_snk_vdo; 413 414 unsigned int operating_snk_mw; 415 bool update_sink_caps; 416 417 /* Requested current / voltage to the port partner */ 418 u32 req_current_limit; 419 u32 req_supply_voltage; 420 /* Actual current / voltage limit of the local port */ 421 u32 current_limit; 422 u32 supply_voltage; 423 424 /* Used to export TA voltage and current */ 425 struct power_supply *psy; 426 struct power_supply_desc psy_desc; 427 enum power_supply_usb_type usb_type; 428 429 u32 bist_request; 430 431 /* PD state for Vendor Defined Messages */ 432 enum vdm_states vdm_state; 433 u32 vdm_retries; 434 /* next Vendor Defined Message to send */ 435 u32 vdo_data[VDO_MAX_SIZE]; 436 u8 vdo_count; 437 /* VDO to retry if UFP responder replied busy */ 438 u32 vdo_retry; 439 440 /* PPS */ 441 struct pd_pps_data pps_data; 442 struct completion pps_complete; 443 bool pps_pending; 444 int pps_status; 445 446 /* Alternate mode data */ 447 struct pd_mode_data mode_data; 448 struct typec_altmode *partner_altmode[ALTMODE_DISCOVERY_MAX]; 449 struct typec_altmode *port_altmode[ALTMODE_DISCOVERY_MAX]; 450 451 /* Deadline in jiffies to exit src_try_wait state */ 452 unsigned long max_wait; 453 454 /* port belongs to a self powered device */ 455 bool self_powered; 456 457 /* Sink FRS */ 458 enum frs_typec_current new_source_frs_current; 459 460 /* Sink caps have been queried */ 461 bool sink_cap_done; 462 463 /* Collision Avoidance and Atomic Message Sequence */ 464 enum tcpm_state upcoming_state; 465 enum tcpm_ams ams; 466 enum tcpm_ams next_ams; 467 bool in_ams; 468 469 /* Auto vbus discharge status */ 470 bool auto_vbus_discharge_enabled; 471 472 /* 473 * When set, port requests PD_P_SNK_STDBY_MW upon entering SNK_DISCOVERY and 474 * the actual currrent limit after RX of PD_CTRL_PSRDY for PD link, 475 * SNK_READY for non-pd link. 476 */ 477 bool slow_charger_loop; 478#ifdef CONFIG_DEBUG_FS 479 struct dentry *dentry; 480 struct mutex logbuffer_lock; /* log buffer access lock */ 481 int logbuffer_head; 482 int logbuffer_tail; 483 u8 *logbuffer[LOG_BUFFER_ENTRIES]; 484#endif 485}; 486 487struct pd_rx_event { 488 struct kthread_work work; 489 struct tcpm_port *port; 490 struct pd_message msg; 491}; 492 493static const char * const pd_rev[] = { 494 [PD_REV10] = "rev1", 495 [PD_REV20] = "rev2", 496 [PD_REV30] = "rev3", 497}; 498 499#define tcpm_cc_is_sink(cc) \ 500 ((cc) == TYPEC_CC_RP_DEF || (cc) == TYPEC_CC_RP_1_5 || \ 501 (cc) == TYPEC_CC_RP_3_0) 502 503#define tcpm_port_is_sink(port) \ 504 ((tcpm_cc_is_sink((port)->cc1) && !tcpm_cc_is_sink((port)->cc2)) || \ 505 (tcpm_cc_is_sink((port)->cc2) && !tcpm_cc_is_sink((port)->cc1))) 506 507#define tcpm_cc_is_source(cc) ((cc) == TYPEC_CC_RD) 508#define tcpm_cc_is_audio(cc) ((cc) == TYPEC_CC_RA) 509#define tcpm_cc_is_open(cc) ((cc) == TYPEC_CC_OPEN) 510 511#define tcpm_port_is_source(port) \ 512 ((tcpm_cc_is_source((port)->cc1) && \ 513 !tcpm_cc_is_source((port)->cc2)) || \ 514 (tcpm_cc_is_source((port)->cc2) && \ 515 !tcpm_cc_is_source((port)->cc1))) 516 517#define tcpm_port_is_debug(port) \ 518 (tcpm_cc_is_source((port)->cc1) && tcpm_cc_is_source((port)->cc2)) 519 520#define tcpm_port_is_audio(port) \ 521 (tcpm_cc_is_audio((port)->cc1) && tcpm_cc_is_audio((port)->cc2)) 522 523#define tcpm_port_is_audio_detached(port) \ 524 ((tcpm_cc_is_audio((port)->cc1) && tcpm_cc_is_open((port)->cc2)) || \ 525 (tcpm_cc_is_audio((port)->cc2) && tcpm_cc_is_open((port)->cc1))) 526 527#define tcpm_try_snk(port) \ 528 ((port)->try_snk_count == 0 && (port)->try_role == TYPEC_SINK && \ 529 (port)->port_type == TYPEC_PORT_DRP) 530 531#define tcpm_try_src(port) \ 532 ((port)->try_src_count == 0 && (port)->try_role == TYPEC_SOURCE && \ 533 (port)->port_type == TYPEC_PORT_DRP) 534 535#define tcpm_data_role_for_source(port) \ 536 ((port)->typec_caps.data == TYPEC_PORT_UFP ? \ 537 TYPEC_DEVICE : TYPEC_HOST) 538 539#define tcpm_data_role_for_sink(port) \ 540 ((port)->typec_caps.data == TYPEC_PORT_DFP ? \ 541 TYPEC_HOST : TYPEC_DEVICE) 542 543#define tcpm_sink_tx_ok(port) \ 544 (tcpm_port_is_sink(port) && \ 545 ((port)->cc1 == TYPEC_CC_RP_3_0 || (port)->cc2 == TYPEC_CC_RP_3_0)) 546 547#define tcpm_wait_for_discharge(port) \ 548 (((port)->auto_vbus_discharge_enabled && !(port)->vbus_vsafe0v) ? PD_T_SAFE_0V : 0) 549 550static enum tcpm_state tcpm_default_state(struct tcpm_port *port) 551{ 552 if (port->port_type == TYPEC_PORT_DRP) { 553 if (port->try_role == TYPEC_SINK) 554 return SNK_UNATTACHED; 555 else if (port->try_role == TYPEC_SOURCE) 556 return SRC_UNATTACHED; 557 /* Fall through to return SRC_UNATTACHED */ 558 } else if (port->port_type == TYPEC_PORT_SNK) { 559 return SNK_UNATTACHED; 560 } 561 return SRC_UNATTACHED; 562} 563 564static bool tcpm_port_is_disconnected(struct tcpm_port *port) 565{ 566 return (!port->attached && port->cc1 == TYPEC_CC_OPEN && 567 port->cc2 == TYPEC_CC_OPEN) || 568 (port->attached && ((port->polarity == TYPEC_POLARITY_CC1 && 569 port->cc1 == TYPEC_CC_OPEN) || 570 (port->polarity == TYPEC_POLARITY_CC2 && 571 port->cc2 == TYPEC_CC_OPEN))); 572} 573 574/* 575 * Logging 576 */ 577 578#ifdef CONFIG_DEBUG_FS 579 580static bool tcpm_log_full(struct tcpm_port *port) 581{ 582 return port->logbuffer_tail == 583 (port->logbuffer_head + 1) % LOG_BUFFER_ENTRIES; 584} 585 586__printf(2, 0) 587static void _tcpm_log(struct tcpm_port *port, const char *fmt, va_list args) 588{ 589 char tmpbuffer[LOG_BUFFER_ENTRY_SIZE]; 590 u64 ts_nsec = local_clock(); 591 unsigned long rem_nsec; 592 593 mutex_lock(&port->logbuffer_lock); 594 if (!port->logbuffer[port->logbuffer_head]) { 595 port->logbuffer[port->logbuffer_head] = 596 kzalloc(LOG_BUFFER_ENTRY_SIZE, GFP_KERNEL); 597 if (!port->logbuffer[port->logbuffer_head]) { 598 mutex_unlock(&port->logbuffer_lock); 599 return; 600 } 601 } 602 603 vsnprintf(tmpbuffer, sizeof(tmpbuffer), fmt, args); 604 605 if (tcpm_log_full(port)) { 606 port->logbuffer_head = max(port->logbuffer_head - 1, 0); 607 strcpy(tmpbuffer, "overflow"); 608 } 609 610 if (port->logbuffer_head < 0 || 611 port->logbuffer_head >= LOG_BUFFER_ENTRIES) { 612 dev_warn(port->dev, 613 "Bad log buffer index %d\n", port->logbuffer_head); 614 goto abort; 615 } 616 617 if (!port->logbuffer[port->logbuffer_head]) { 618 dev_warn(port->dev, 619 "Log buffer index %d is NULL\n", port->logbuffer_head); 620 goto abort; 621 } 622 623 rem_nsec = do_div(ts_nsec, 1000000000); 624 scnprintf(port->logbuffer[port->logbuffer_head], 625 LOG_BUFFER_ENTRY_SIZE, "[%5lu.%06lu] %s", 626 (unsigned long)ts_nsec, rem_nsec / 1000, 627 tmpbuffer); 628 port->logbuffer_head = (port->logbuffer_head + 1) % LOG_BUFFER_ENTRIES; 629 630abort: 631 mutex_unlock(&port->logbuffer_lock); 632} 633 634__printf(2, 3) 635static void tcpm_log(struct tcpm_port *port, const char *fmt, ...) 636{ 637 va_list args; 638 639 /* Do not log while disconnected and unattached */ 640 if (tcpm_port_is_disconnected(port) && 641 (port->state == SRC_UNATTACHED || port->state == SNK_UNATTACHED || 642 port->state == TOGGLING)) 643 return; 644 645 va_start(args, fmt); 646 _tcpm_log(port, fmt, args); 647 va_end(args); 648} 649 650__printf(2, 3) 651static void tcpm_log_force(struct tcpm_port *port, const char *fmt, ...) 652{ 653 va_list args; 654 655 va_start(args, fmt); 656 _tcpm_log(port, fmt, args); 657 va_end(args); 658} 659 660static void tcpm_log_source_caps(struct tcpm_port *port) 661{ 662 int i; 663 664 for (i = 0; i < port->nr_source_caps; i++) { 665 u32 pdo = port->source_caps[i]; 666 enum pd_pdo_type type = pdo_type(pdo); 667 char msg[64]; 668 669 switch (type) { 670 case PDO_TYPE_FIXED: 671 scnprintf(msg, sizeof(msg), 672 "%u mV, %u mA [%s%s%s%s%s%s]", 673 pdo_fixed_voltage(pdo), 674 pdo_max_current(pdo), 675 (pdo & PDO_FIXED_DUAL_ROLE) ? 676 "R" : "", 677 (pdo & PDO_FIXED_SUSPEND) ? 678 "S" : "", 679 (pdo & PDO_FIXED_HIGHER_CAP) ? 680 "H" : "", 681 (pdo & PDO_FIXED_USB_COMM) ? 682 "U" : "", 683 (pdo & PDO_FIXED_DATA_SWAP) ? 684 "D" : "", 685 (pdo & PDO_FIXED_EXTPOWER) ? 686 "E" : ""); 687 break; 688 case PDO_TYPE_VAR: 689 scnprintf(msg, sizeof(msg), 690 "%u-%u mV, %u mA", 691 pdo_min_voltage(pdo), 692 pdo_max_voltage(pdo), 693 pdo_max_current(pdo)); 694 break; 695 case PDO_TYPE_BATT: 696 scnprintf(msg, sizeof(msg), 697 "%u-%u mV, %u mW", 698 pdo_min_voltage(pdo), 699 pdo_max_voltage(pdo), 700 pdo_max_power(pdo)); 701 break; 702 case PDO_TYPE_APDO: 703 if (pdo_apdo_type(pdo) == APDO_TYPE_PPS) 704 scnprintf(msg, sizeof(msg), 705 "%u-%u mV, %u mA", 706 pdo_pps_apdo_min_voltage(pdo), 707 pdo_pps_apdo_max_voltage(pdo), 708 pdo_pps_apdo_max_current(pdo)); 709 else 710 strcpy(msg, "undefined APDO"); 711 break; 712 default: 713 strcpy(msg, "undefined"); 714 break; 715 } 716 tcpm_log(port, " PDO %d: type %d, %s", 717 i, type, msg); 718 } 719} 720 721static int tcpm_debug_show(struct seq_file *s, void *v) 722{ 723 struct tcpm_port *port = (struct tcpm_port *)s->private; 724 int tail; 725 726 mutex_lock(&port->logbuffer_lock); 727 tail = port->logbuffer_tail; 728 while (tail != port->logbuffer_head) { 729 seq_printf(s, "%s\n", port->logbuffer[tail]); 730 tail = (tail + 1) % LOG_BUFFER_ENTRIES; 731 } 732 if (!seq_has_overflowed(s)) 733 port->logbuffer_tail = tail; 734 mutex_unlock(&port->logbuffer_lock); 735 736 return 0; 737} 738DEFINE_SHOW_ATTRIBUTE(tcpm_debug); 739 740static void tcpm_debugfs_init(struct tcpm_port *port) 741{ 742 char name[NAME_MAX]; 743 744 mutex_init(&port->logbuffer_lock); 745 snprintf(name, NAME_MAX, "tcpm-%s", dev_name(port->dev)); 746 port->dentry = debugfs_create_dir(name, usb_debug_root); 747 debugfs_create_file("log", S_IFREG | 0444, port->dentry, port, 748 &tcpm_debug_fops); 749} 750 751static void tcpm_debugfs_exit(struct tcpm_port *port) 752{ 753 int i; 754 755 mutex_lock(&port->logbuffer_lock); 756 for (i = 0; i < LOG_BUFFER_ENTRIES; i++) { 757 kfree(port->logbuffer[i]); 758 port->logbuffer[i] = NULL; 759 } 760 mutex_unlock(&port->logbuffer_lock); 761 762 debugfs_remove(port->dentry); 763} 764 765#else 766 767__printf(2, 3) 768static void tcpm_log(const struct tcpm_port *port, const char *fmt, ...) { } 769__printf(2, 3) 770static void tcpm_log_force(struct tcpm_port *port, const char *fmt, ...) { } 771static void tcpm_log_source_caps(struct tcpm_port *port) { } 772static void tcpm_debugfs_init(const struct tcpm_port *port) { } 773static void tcpm_debugfs_exit(const struct tcpm_port *port) { } 774 775#endif 776 777static void tcpm_set_cc(struct tcpm_port *port, enum typec_cc_status cc) 778{ 779 tcpm_log(port, "cc:=%d", cc); 780 port->cc_req = cc; 781 port->tcpc->set_cc(port->tcpc, cc); 782} 783 784static int tcpm_enable_auto_vbus_discharge(struct tcpm_port *port, bool enable) 785{ 786 int ret = 0; 787 788 if (port->tcpc->enable_auto_vbus_discharge) { 789 ret = port->tcpc->enable_auto_vbus_discharge(port->tcpc, enable); 790 tcpm_log_force(port, "%s vbus discharge ret:%d", enable ? "enable" : "disable", 791 ret); 792 if (!ret) 793 port->auto_vbus_discharge_enabled = enable; 794 } 795 796 return ret; 797} 798 799static void tcpm_apply_rc(struct tcpm_port *port) 800{ 801 /* 802 * TCPCI: Move to APPLY_RC state to prevent disconnect during PR_SWAP 803 * when Vbus auto discharge on disconnect is enabled. 804 */ 805 if (port->tcpc->enable_auto_vbus_discharge && port->tcpc->apply_rc) { 806 tcpm_log(port, "Apply_RC"); 807 port->tcpc->apply_rc(port->tcpc, port->cc_req, port->polarity); 808 tcpm_enable_auto_vbus_discharge(port, false); 809 } 810} 811 812/* 813 * Determine RP value to set based on maximum current supported 814 * by a port if configured as source. 815 * Returns CC value to report to link partner. 816 */ 817static enum typec_cc_status tcpm_rp_cc(struct tcpm_port *port) 818{ 819 const u32 *src_pdo = port->src_pdo; 820 int nr_pdo = port->nr_src_pdo; 821 int i; 822 823 if (!port->pd_supported) 824 return port->src_rp; 825 826 /* 827 * Search for first entry with matching voltage. 828 * It should report the maximum supported current. 829 */ 830 for (i = 0; i < nr_pdo; i++) { 831 const u32 pdo = src_pdo[i]; 832 833 if (pdo_type(pdo) == PDO_TYPE_FIXED && 834 pdo_fixed_voltage(pdo) == 5000) { 835 unsigned int curr = pdo_max_current(pdo); 836 837 if (curr >= 3000) 838 return TYPEC_CC_RP_3_0; 839 else if (curr >= 1500) 840 return TYPEC_CC_RP_1_5; 841 return TYPEC_CC_RP_DEF; 842 } 843 } 844 845 return TYPEC_CC_RP_DEF; 846} 847 848static void tcpm_ams_finish(struct tcpm_port *port) 849{ 850 tcpm_log(port, "AMS %s finished", tcpm_ams_str[port->ams]); 851 852 if (port->pd_capable && port->pwr_role == TYPEC_SOURCE) { 853 if (port->negotiated_rev >= PD_REV30) 854 tcpm_set_cc(port, SINK_TX_OK); 855 else 856 tcpm_set_cc(port, SINK_TX_NG); 857 } else if (port->pwr_role == TYPEC_SOURCE) { 858 tcpm_set_cc(port, tcpm_rp_cc(port)); 859 } 860 861 port->in_ams = false; 862 port->ams = NONE_AMS; 863} 864 865static int tcpm_pd_transmit(struct tcpm_port *port, 866 enum tcpm_transmit_type type, 867 const struct pd_message *msg) 868{ 869 unsigned long timeout; 870 int ret; 871 872 if (msg) 873 tcpm_log(port, "PD TX, header: %#x", le16_to_cpu(msg->header)); 874 else 875 tcpm_log(port, "PD TX, type: %#x", type); 876 877 reinit_completion(&port->tx_complete); 878 ret = port->tcpc->pd_transmit(port->tcpc, type, msg, port->negotiated_rev); 879 if (ret < 0) 880 return ret; 881 882 mutex_unlock(&port->lock); 883 timeout = wait_for_completion_timeout(&port->tx_complete, 884 msecs_to_jiffies(PD_T_TCPC_TX_TIMEOUT)); 885 mutex_lock(&port->lock); 886 if (!timeout) 887 return -ETIMEDOUT; 888 889 switch (port->tx_status) { 890 case TCPC_TX_SUCCESS: 891 port->message_id = (port->message_id + 1) & PD_HEADER_ID_MASK; 892 /* 893 * USB PD rev 2.0, 8.3.2.2.1: 894 * USB PD rev 3.0, 8.3.2.1.3: 895 * "... Note that every AMS is Interruptible until the first 896 * Message in the sequence has been successfully sent (GoodCRC 897 * Message received)." 898 */ 899 if (port->ams != NONE_AMS) 900 port->in_ams = true; 901 break; 902 case TCPC_TX_DISCARDED: 903 ret = -EAGAIN; 904 break; 905 case TCPC_TX_FAILED: 906 default: 907 ret = -EIO; 908 break; 909 } 910 911 /* Some AMS don't expect responses. Finish them here. */ 912 if (port->ams == ATTENTION || port->ams == SOURCE_ALERT) 913 tcpm_ams_finish(port); 914 915 return ret; 916} 917 918void tcpm_pd_transmit_complete(struct tcpm_port *port, 919 enum tcpm_transmit_status status) 920{ 921 tcpm_log(port, "PD TX complete, status: %u", status); 922 port->tx_status = status; 923 complete(&port->tx_complete); 924} 925EXPORT_SYMBOL_GPL(tcpm_pd_transmit_complete); 926 927static int tcpm_mux_set(struct tcpm_port *port, int state, 928 enum usb_role usb_role, 929 enum typec_orientation orientation) 930{ 931 int ret; 932 933 tcpm_log(port, "Requesting mux state %d, usb-role %d, orientation %d", 934 state, usb_role, orientation); 935 936 ret = typec_set_orientation(port->typec_port, orientation); 937 if (ret) 938 return ret; 939 940 if (port->role_sw) { 941 ret = usb_role_switch_set_role(port->role_sw, usb_role); 942 if (ret) 943 return ret; 944 } 945 946 return typec_set_mode(port->typec_port, state); 947} 948 949static int tcpm_set_polarity(struct tcpm_port *port, 950 enum typec_cc_polarity polarity) 951{ 952 int ret; 953 954 tcpm_log(port, "polarity %d", polarity); 955 956 ret = port->tcpc->set_polarity(port->tcpc, polarity); 957 if (ret < 0) 958 return ret; 959 960 port->polarity = polarity; 961 962 return 0; 963} 964 965static int tcpm_set_vconn(struct tcpm_port *port, bool enable) 966{ 967 int ret; 968 969 tcpm_log(port, "vconn:=%d", enable); 970 971 ret = port->tcpc->set_vconn(port->tcpc, enable); 972 if (!ret) { 973 port->vconn_role = enable ? TYPEC_SOURCE : TYPEC_SINK; 974 typec_set_vconn_role(port->typec_port, port->vconn_role); 975 } 976 977 return ret; 978} 979 980static u32 tcpm_get_current_limit(struct tcpm_port *port) 981{ 982 enum typec_cc_status cc; 983 u32 limit; 984 985 cc = port->polarity ? port->cc2 : port->cc1; 986 switch (cc) { 987 case TYPEC_CC_RP_1_5: 988 limit = 1500; 989 break; 990 case TYPEC_CC_RP_3_0: 991 limit = 3000; 992 break; 993 case TYPEC_CC_RP_DEF: 994 default: 995 if (port->tcpc->get_current_limit) 996 limit = port->tcpc->get_current_limit(port->tcpc); 997 else 998 limit = 0; 999 break; 1000 } 1001 1002 return limit; 1003} 1004 1005static int tcpm_set_current_limit(struct tcpm_port *port, u32 max_ma, u32 mv) 1006{ 1007 int ret = -EOPNOTSUPP; 1008 1009 tcpm_log(port, "Setting voltage/current limit %u mV %u mA", mv, max_ma); 1010 1011 port->supply_voltage = mv; 1012 port->current_limit = max_ma; 1013 power_supply_changed(port->psy); 1014 1015 if (port->tcpc->set_current_limit) 1016 ret = port->tcpc->set_current_limit(port->tcpc, max_ma, mv); 1017 1018 return ret; 1019} 1020 1021static int tcpm_set_attached_state(struct tcpm_port *port, bool attached) 1022{ 1023 return port->tcpc->set_roles(port->tcpc, attached, port->pwr_role, 1024 port->data_role); 1025} 1026 1027static int tcpm_set_roles(struct tcpm_port *port, bool attached, 1028 enum typec_role role, enum typec_data_role data) 1029{ 1030 enum typec_orientation orientation; 1031 enum usb_role usb_role; 1032 int ret; 1033 1034 if (port->polarity == TYPEC_POLARITY_CC1) 1035 orientation = TYPEC_ORIENTATION_NORMAL; 1036 else 1037 orientation = TYPEC_ORIENTATION_REVERSE; 1038 1039 if (port->typec_caps.data == TYPEC_PORT_DRD) { 1040 if (data == TYPEC_HOST) 1041 usb_role = USB_ROLE_HOST; 1042 else 1043 usb_role = USB_ROLE_DEVICE; 1044 } else if (port->typec_caps.data == TYPEC_PORT_DFP) { 1045 if (data == TYPEC_HOST) { 1046 if (role == TYPEC_SOURCE) 1047 usb_role = USB_ROLE_HOST; 1048 else 1049 usb_role = USB_ROLE_NONE; 1050 } else { 1051 return -ENOTSUPP; 1052 } 1053 } else { 1054 if (data == TYPEC_DEVICE) { 1055 if (role == TYPEC_SINK) 1056 usb_role = USB_ROLE_DEVICE; 1057 else 1058 usb_role = USB_ROLE_NONE; 1059 } else { 1060 return -ENOTSUPP; 1061 } 1062 } 1063 1064 ret = tcpm_mux_set(port, TYPEC_STATE_USB, usb_role, orientation); 1065 if (ret < 0) 1066 return ret; 1067 1068 ret = port->tcpc->set_roles(port->tcpc, attached, role, data); 1069 if (ret < 0) 1070 return ret; 1071 1072 port->pwr_role = role; 1073 port->data_role = data; 1074 typec_set_data_role(port->typec_port, data); 1075 typec_set_pwr_role(port->typec_port, role); 1076 1077 return 0; 1078} 1079 1080static int tcpm_set_pwr_role(struct tcpm_port *port, enum typec_role role) 1081{ 1082 int ret; 1083 1084 ret = port->tcpc->set_roles(port->tcpc, true, role, 1085 port->data_role); 1086 if (ret < 0) 1087 return ret; 1088 1089 port->pwr_role = role; 1090 typec_set_pwr_role(port->typec_port, role); 1091 1092 return 0; 1093} 1094 1095/* 1096 * Transform the PDO to be compliant to PD rev2.0. 1097 * Return 0 if the PDO type is not defined in PD rev2.0. 1098 * Otherwise, return the converted PDO. 1099 */ 1100static u32 tcpm_forge_legacy_pdo(struct tcpm_port *port, u32 pdo, enum typec_role role) 1101{ 1102 switch (pdo_type(pdo)) { 1103 case PDO_TYPE_FIXED: 1104 if (role == TYPEC_SINK) 1105 return pdo & ~PDO_FIXED_FRS_CURR_MASK; 1106 else 1107 return pdo & ~PDO_FIXED_UNCHUNK_EXT; 1108 case PDO_TYPE_VAR: 1109 case PDO_TYPE_BATT: 1110 return pdo; 1111 case PDO_TYPE_APDO: 1112 default: 1113 return 0; 1114 } 1115} 1116 1117static int tcpm_pd_send_source_caps(struct tcpm_port *port) 1118{ 1119 struct pd_message msg; 1120 u32 pdo; 1121 unsigned int i, nr_pdo = 0; 1122 1123 memset(&msg, 0, sizeof(msg)); 1124 1125 for (i = 0; i < port->nr_src_pdo; i++) { 1126 if (port->negotiated_rev >= PD_REV30) { 1127 msg.payload[nr_pdo++] = cpu_to_le32(port->src_pdo[i]); 1128 } else { 1129 pdo = tcpm_forge_legacy_pdo(port, port->src_pdo[i], TYPEC_SOURCE); 1130 if (pdo) 1131 msg.payload[nr_pdo++] = cpu_to_le32(pdo); 1132 } 1133 } 1134 1135 if (!nr_pdo) { 1136 /* No source capabilities defined, sink only */ 1137 msg.header = PD_HEADER_LE(PD_CTRL_REJECT, 1138 port->pwr_role, 1139 port->data_role, 1140 port->negotiated_rev, 1141 port->message_id, 0); 1142 } else { 1143 msg.header = PD_HEADER_LE(PD_DATA_SOURCE_CAP, 1144 port->pwr_role, 1145 port->data_role, 1146 port->negotiated_rev, 1147 port->message_id, 1148 nr_pdo); 1149 } 1150 1151 return tcpm_pd_transmit(port, TCPC_TX_SOP, &msg); 1152} 1153 1154static int tcpm_pd_send_sink_caps(struct tcpm_port *port) 1155{ 1156 struct pd_message msg; 1157 u32 pdo; 1158 unsigned int i, nr_pdo = 0; 1159 1160 memset(&msg, 0, sizeof(msg)); 1161 1162 for (i = 0; i < port->nr_snk_pdo; i++) { 1163 if (port->negotiated_rev >= PD_REV30) { 1164 msg.payload[nr_pdo++] = cpu_to_le32(port->snk_pdo[i]); 1165 } else { 1166 pdo = tcpm_forge_legacy_pdo(port, port->snk_pdo[i], TYPEC_SINK); 1167 if (pdo) 1168 msg.payload[nr_pdo++] = cpu_to_le32(pdo); 1169 } 1170 } 1171 1172 if (!nr_pdo) { 1173 /* No sink capabilities defined, source only */ 1174 msg.header = PD_HEADER_LE(PD_CTRL_REJECT, 1175 port->pwr_role, 1176 port->data_role, 1177 port->negotiated_rev, 1178 port->message_id, 0); 1179 } else { 1180 msg.header = PD_HEADER_LE(PD_DATA_SINK_CAP, 1181 port->pwr_role, 1182 port->data_role, 1183 port->negotiated_rev, 1184 port->message_id, 1185 nr_pdo); 1186 } 1187 1188 return tcpm_pd_transmit(port, TCPC_TX_SOP, &msg); 1189} 1190 1191static void mod_tcpm_delayed_work(struct tcpm_port *port, unsigned int delay_ms) 1192{ 1193 if (delay_ms) { 1194 hrtimer_start(&port->state_machine_timer, ms_to_ktime(delay_ms), HRTIMER_MODE_REL); 1195 } else { 1196 hrtimer_cancel(&port->state_machine_timer); 1197 kthread_queue_work(port->wq, &port->state_machine); 1198 } 1199} 1200 1201static void mod_vdm_delayed_work(struct tcpm_port *port, unsigned int delay_ms) 1202{ 1203 if (delay_ms) { 1204 hrtimer_start(&port->vdm_state_machine_timer, ms_to_ktime(delay_ms), 1205 HRTIMER_MODE_REL); 1206 } else { 1207 hrtimer_cancel(&port->vdm_state_machine_timer); 1208 kthread_queue_work(port->wq, &port->vdm_state_machine); 1209 } 1210} 1211 1212static void mod_enable_frs_delayed_work(struct tcpm_port *port, unsigned int delay_ms) 1213{ 1214 if (delay_ms) { 1215 hrtimer_start(&port->enable_frs_timer, ms_to_ktime(delay_ms), HRTIMER_MODE_REL); 1216 } else { 1217 hrtimer_cancel(&port->enable_frs_timer); 1218 kthread_queue_work(port->wq, &port->enable_frs); 1219 } 1220} 1221 1222static void mod_send_discover_delayed_work(struct tcpm_port *port, unsigned int delay_ms) 1223{ 1224 if (delay_ms) { 1225 hrtimer_start(&port->send_discover_timer, ms_to_ktime(delay_ms), HRTIMER_MODE_REL); 1226 } else { 1227 hrtimer_cancel(&port->send_discover_timer); 1228 kthread_queue_work(port->wq, &port->send_discover_work); 1229 } 1230} 1231 1232static void tcpm_set_state(struct tcpm_port *port, enum tcpm_state state, 1233 unsigned int delay_ms) 1234{ 1235 if (delay_ms) { 1236 tcpm_log(port, "pending state change %s -> %s @ %u ms [%s %s]", 1237 tcpm_states[port->state], tcpm_states[state], delay_ms, 1238 pd_rev[port->negotiated_rev], tcpm_ams_str[port->ams]); 1239 port->delayed_state = state; 1240 mod_tcpm_delayed_work(port, delay_ms); 1241 port->delayed_runtime = ktime_add(ktime_get(), ms_to_ktime(delay_ms)); 1242 port->delay_ms = delay_ms; 1243 } else { 1244 tcpm_log(port, "state change %s -> %s [%s %s]", 1245 tcpm_states[port->state], tcpm_states[state], 1246 pd_rev[port->negotiated_rev], tcpm_ams_str[port->ams]); 1247 port->delayed_state = INVALID_STATE; 1248 port->prev_state = port->state; 1249 port->state = state; 1250 /* 1251 * Don't re-queue the state machine work item if we're currently 1252 * in the state machine and we're immediately changing states. 1253 * tcpm_state_machine_work() will continue running the state 1254 * machine. 1255 */ 1256 if (!port->state_machine_running) 1257 mod_tcpm_delayed_work(port, 0); 1258 } 1259} 1260 1261static void tcpm_set_state_cond(struct tcpm_port *port, enum tcpm_state state, 1262 unsigned int delay_ms) 1263{ 1264 if (port->enter_state == port->state) 1265 tcpm_set_state(port, state, delay_ms); 1266 else 1267 tcpm_log(port, 1268 "skipped %sstate change %s -> %s [%u ms], context state %s [%s %s]", 1269 delay_ms ? "delayed " : "", 1270 tcpm_states[port->state], tcpm_states[state], 1271 delay_ms, tcpm_states[port->enter_state], 1272 pd_rev[port->negotiated_rev], tcpm_ams_str[port->ams]); 1273} 1274 1275static void tcpm_queue_message(struct tcpm_port *port, 1276 enum pd_msg_request message) 1277{ 1278 port->queued_message = message; 1279 mod_tcpm_delayed_work(port, 0); 1280} 1281 1282static bool tcpm_vdm_ams(struct tcpm_port *port) 1283{ 1284 switch (port->ams) { 1285 case DISCOVER_IDENTITY: 1286 case SOURCE_STARTUP_CABLE_PLUG_DISCOVER_IDENTITY: 1287 case DISCOVER_SVIDS: 1288 case DISCOVER_MODES: 1289 case DFP_TO_UFP_ENTER_MODE: 1290 case DFP_TO_UFP_EXIT_MODE: 1291 case DFP_TO_CABLE_PLUG_ENTER_MODE: 1292 case DFP_TO_CABLE_PLUG_EXIT_MODE: 1293 case ATTENTION: 1294 case UNSTRUCTURED_VDMS: 1295 case STRUCTURED_VDMS: 1296 break; 1297 default: 1298 return false; 1299 } 1300 1301 return true; 1302} 1303 1304static bool tcpm_ams_interruptible(struct tcpm_port *port) 1305{ 1306 switch (port->ams) { 1307 /* Interruptible AMS */ 1308 case NONE_AMS: 1309 case SECURITY: 1310 case FIRMWARE_UPDATE: 1311 case DISCOVER_IDENTITY: 1312 case SOURCE_STARTUP_CABLE_PLUG_DISCOVER_IDENTITY: 1313 case DISCOVER_SVIDS: 1314 case DISCOVER_MODES: 1315 case DFP_TO_UFP_ENTER_MODE: 1316 case DFP_TO_UFP_EXIT_MODE: 1317 case DFP_TO_CABLE_PLUG_ENTER_MODE: 1318 case DFP_TO_CABLE_PLUG_EXIT_MODE: 1319 case UNSTRUCTURED_VDMS: 1320 case STRUCTURED_VDMS: 1321 case COUNTRY_INFO: 1322 case COUNTRY_CODES: 1323 break; 1324 /* Non-Interruptible AMS */ 1325 default: 1326 if (port->in_ams) 1327 return false; 1328 break; 1329 } 1330 1331 return true; 1332} 1333 1334static int tcpm_ams_start(struct tcpm_port *port, enum tcpm_ams ams) 1335{ 1336 int ret = 0; 1337 1338 tcpm_log(port, "AMS %s start", tcpm_ams_str[ams]); 1339 1340 if (!tcpm_ams_interruptible(port) && 1341 !(ams == HARD_RESET || ams == SOFT_RESET_AMS)) { 1342 port->upcoming_state = INVALID_STATE; 1343 tcpm_log(port, "AMS %s not interruptible, aborting", 1344 tcpm_ams_str[port->ams]); 1345 return -EAGAIN; 1346 } 1347 1348 if (port->pwr_role == TYPEC_SOURCE) { 1349 enum typec_cc_status cc_req = port->cc_req; 1350 1351 port->ams = ams; 1352 1353 if (ams == HARD_RESET) { 1354 tcpm_set_cc(port, tcpm_rp_cc(port)); 1355 tcpm_pd_transmit(port, TCPC_TX_HARD_RESET, NULL); 1356 tcpm_set_state(port, HARD_RESET_START, 0); 1357 return ret; 1358 } else if (ams == SOFT_RESET_AMS) { 1359 if (!port->explicit_contract) 1360 tcpm_set_cc(port, tcpm_rp_cc(port)); 1361 tcpm_set_state(port, SOFT_RESET_SEND, 0); 1362 return ret; 1363 } else if (tcpm_vdm_ams(port)) { 1364 /* tSinkTx is enforced in vdm_run_state_machine */ 1365 if (port->negotiated_rev >= PD_REV30) 1366 tcpm_set_cc(port, SINK_TX_NG); 1367 return ret; 1368 } 1369 1370 if (port->negotiated_rev >= PD_REV30) 1371 tcpm_set_cc(port, SINK_TX_NG); 1372 1373 switch (port->state) { 1374 case SRC_READY: 1375 case SRC_STARTUP: 1376 case SRC_SOFT_RESET_WAIT_SNK_TX: 1377 case SOFT_RESET: 1378 case SOFT_RESET_SEND: 1379 if (port->negotiated_rev >= PD_REV30) 1380 tcpm_set_state(port, AMS_START, 1381 cc_req == SINK_TX_OK ? 1382 PD_T_SINK_TX : 0); 1383 else 1384 tcpm_set_state(port, AMS_START, 0); 1385 break; 1386 default: 1387 if (port->negotiated_rev >= PD_REV30) 1388 tcpm_set_state(port, SRC_READY, 1389 cc_req == SINK_TX_OK ? 1390 PD_T_SINK_TX : 0); 1391 else 1392 tcpm_set_state(port, SRC_READY, 0); 1393 break; 1394 } 1395 } else { 1396 if (port->negotiated_rev >= PD_REV30 && 1397 !tcpm_sink_tx_ok(port) && 1398 ams != SOFT_RESET_AMS && 1399 ams != HARD_RESET) { 1400 port->upcoming_state = INVALID_STATE; 1401 tcpm_log(port, "Sink TX No Go"); 1402 return -EAGAIN; 1403 } 1404 1405 port->ams = ams; 1406 1407 if (ams == HARD_RESET) { 1408 tcpm_pd_transmit(port, TCPC_TX_HARD_RESET, NULL); 1409 tcpm_set_state(port, HARD_RESET_START, 0); 1410 return ret; 1411 } else if (tcpm_vdm_ams(port)) { 1412 return ret; 1413 } 1414 1415 if (port->state == SNK_READY || 1416 port->state == SNK_SOFT_RESET) 1417 tcpm_set_state(port, AMS_START, 0); 1418 else 1419 tcpm_set_state(port, SNK_READY, 0); 1420 } 1421 1422 return ret; 1423} 1424 1425/* 1426 * VDM/VDO handling functions 1427 */ 1428static void tcpm_queue_vdm(struct tcpm_port *port, const u32 header, 1429 const u32 *data, int cnt) 1430{ 1431 WARN_ON(!mutex_is_locked(&port->lock)); 1432 1433 /* Make sure we are not still processing a previous VDM packet */ 1434 WARN_ON(port->vdm_state > VDM_STATE_DONE); 1435 1436 port->vdo_count = cnt + 1; 1437 port->vdo_data[0] = header; 1438 memcpy(&port->vdo_data[1], data, sizeof(u32) * cnt); 1439 /* Set ready, vdm state machine will actually send */ 1440 port->vdm_retries = 0; 1441 port->vdm_state = VDM_STATE_READY; 1442 port->vdm_sm_running = true; 1443 1444 mod_vdm_delayed_work(port, 0); 1445} 1446 1447static void tcpm_queue_vdm_unlocked(struct tcpm_port *port, const u32 header, 1448 const u32 *data, int cnt) 1449{ 1450 mutex_lock(&port->lock); 1451 tcpm_queue_vdm(port, header, data, cnt); 1452 mutex_unlock(&port->lock); 1453} 1454 1455static void svdm_consume_identity(struct tcpm_port *port, const u32 *p, int cnt) 1456{ 1457 u32 vdo = p[VDO_INDEX_IDH]; 1458 u32 product = p[VDO_INDEX_PRODUCT]; 1459 1460 memset(&port->mode_data, 0, sizeof(port->mode_data)); 1461 1462 port->partner_ident.id_header = vdo; 1463 port->partner_ident.cert_stat = p[VDO_INDEX_CSTAT]; 1464 port->partner_ident.product = product; 1465 1466 typec_partner_set_identity(port->partner); 1467 1468 tcpm_log(port, "Identity: %04x:%04x.%04x", 1469 PD_IDH_VID(vdo), 1470 PD_PRODUCT_PID(product), product & 0xffff); 1471} 1472 1473static bool svdm_consume_svids(struct tcpm_port *port, const u32 *p, int cnt) 1474{ 1475 struct pd_mode_data *pmdata = &port->mode_data; 1476 int i; 1477 1478 for (i = 1; i < cnt; i++) { 1479 u16 svid; 1480 1481 svid = (p[i] >> 16) & 0xffff; 1482 if (!svid) 1483 return false; 1484 1485 if (pmdata->nsvids >= SVID_DISCOVERY_MAX) 1486 goto abort; 1487 1488 pmdata->svids[pmdata->nsvids++] = svid; 1489 tcpm_log(port, "SVID %d: 0x%x", pmdata->nsvids, svid); 1490 1491 svid = p[i] & 0xffff; 1492 if (!svid) 1493 return false; 1494 1495 if (pmdata->nsvids >= SVID_DISCOVERY_MAX) 1496 goto abort; 1497 1498 pmdata->svids[pmdata->nsvids++] = svid; 1499 tcpm_log(port, "SVID %d: 0x%x", pmdata->nsvids, svid); 1500 } 1501 return true; 1502abort: 1503 tcpm_log(port, "SVID_DISCOVERY_MAX(%d) too low!", SVID_DISCOVERY_MAX); 1504 return false; 1505} 1506 1507static void svdm_consume_modes(struct tcpm_port *port, const u32 *p, int cnt) 1508{ 1509 struct pd_mode_data *pmdata = &port->mode_data; 1510 struct typec_altmode_desc *paltmode; 1511 int i; 1512 1513 if (pmdata->altmodes >= ARRAY_SIZE(port->partner_altmode)) { 1514 /* Already logged in svdm_consume_svids() */ 1515 return; 1516 } 1517 1518 for (i = 1; i < cnt; i++) { 1519 paltmode = &pmdata->altmode_desc[pmdata->altmodes]; 1520 memset(paltmode, 0, sizeof(*paltmode)); 1521 1522 paltmode->svid = pmdata->svids[pmdata->svid_index]; 1523 paltmode->mode = i; 1524 paltmode->vdo = p[i]; 1525 1526 tcpm_log(port, " Alternate mode %d: SVID 0x%04x, VDO %d: 0x%08x", 1527 pmdata->altmodes, paltmode->svid, 1528 paltmode->mode, paltmode->vdo); 1529 1530 pmdata->altmodes++; 1531 } 1532} 1533 1534static void tcpm_register_partner_altmodes(struct tcpm_port *port) 1535{ 1536 struct pd_mode_data *modep = &port->mode_data; 1537 struct typec_altmode *altmode; 1538 int i; 1539 1540 for (i = 0; i < modep->altmodes; i++) { 1541 altmode = typec_partner_register_altmode(port->partner, 1542 &modep->altmode_desc[i]); 1543 if (IS_ERR(altmode)) { 1544 tcpm_log(port, "Failed to register partner SVID 0x%04x", 1545 modep->altmode_desc[i].svid); 1546 altmode = NULL; 1547 } 1548 port->partner_altmode[i] = altmode; 1549 } 1550} 1551 1552#define supports_modal(port) PD_IDH_MODAL_SUPP((port)->partner_ident.id_header) 1553 1554static int tcpm_pd_svdm(struct tcpm_port *port, struct typec_altmode *adev, 1555 const u32 *p, int cnt, u32 *response, 1556 enum adev_actions *adev_action) 1557{ 1558 struct typec_port *typec = port->typec_port; 1559 struct typec_altmode *pdev; 1560 struct pd_mode_data *modep; 1561 int svdm_version; 1562 int rlen = 0; 1563 int cmd_type; 1564 int cmd; 1565 int i; 1566 1567 cmd_type = PD_VDO_CMDT(p[0]); 1568 cmd = PD_VDO_CMD(p[0]); 1569 1570 tcpm_log(port, "Rx VDM cmd 0x%x type %d cmd %d len %d", 1571 p[0], cmd_type, cmd, cnt); 1572 1573 modep = &port->mode_data; 1574 1575 pdev = typec_match_altmode(port->partner_altmode, ALTMODE_DISCOVERY_MAX, 1576 PD_VDO_VID(p[0]), PD_VDO_OPOS(p[0])); 1577 1578 svdm_version = typec_get_negotiated_svdm_version(typec); 1579 if (svdm_version < 0) 1580 return 0; 1581 1582 switch (cmd_type) { 1583 case CMDT_INIT: 1584 switch (cmd) { 1585 case CMD_DISCOVER_IDENT: 1586 if (PD_VDO_VID(p[0]) != USB_SID_PD) 1587 break; 1588 1589 if (PD_VDO_SVDM_VER(p[0]) < svdm_version) { 1590 typec_partner_set_svdm_version(port->partner, 1591 PD_VDO_SVDM_VER(p[0])); 1592 svdm_version = PD_VDO_SVDM_VER(p[0]); 1593 } 1594 1595 port->ams = DISCOVER_IDENTITY; 1596 /* 1597 * PD2.0 Spec 6.10.3: respond with NAK as DFP (data host) 1598 * PD3.1 Spec 6.4.4.2.5.1: respond with NAK if "invalid field" or 1599 * "wrong configuation" or "Unrecognized" 1600 */ 1601 if ((port->data_role == TYPEC_DEVICE || svdm_version >= SVDM_VER_2_0) && 1602 port->nr_snk_vdo) { 1603 if (svdm_version < SVDM_VER_2_0) { 1604 for (i = 0; i < port->nr_snk_vdo_v1; i++) 1605 response[i + 1] = port->snk_vdo_v1[i]; 1606 rlen = port->nr_snk_vdo_v1 + 1; 1607 1608 } else { 1609 for (i = 0; i < port->nr_snk_vdo; i++) 1610 response[i + 1] = port->snk_vdo[i]; 1611 rlen = port->nr_snk_vdo + 1; 1612 } 1613 } 1614 break; 1615 case CMD_DISCOVER_SVID: 1616 port->ams = DISCOVER_SVIDS; 1617 break; 1618 case CMD_DISCOVER_MODES: 1619 port->ams = DISCOVER_MODES; 1620 break; 1621 case CMD_ENTER_MODE: 1622 port->ams = DFP_TO_UFP_ENTER_MODE; 1623 break; 1624 case CMD_EXIT_MODE: 1625 port->ams = DFP_TO_UFP_EXIT_MODE; 1626 break; 1627 case CMD_ATTENTION: 1628 /* Attention command does not have response */ 1629 *adev_action = ADEV_ATTENTION; 1630 return 0; 1631 default: 1632 break; 1633 } 1634 if (rlen >= 1) { 1635 response[0] = p[0] | VDO_CMDT(CMDT_RSP_ACK); 1636 } else if (rlen == 0) { 1637 response[0] = p[0] | VDO_CMDT(CMDT_RSP_NAK); 1638 rlen = 1; 1639 } else { 1640 response[0] = p[0] | VDO_CMDT(CMDT_RSP_BUSY); 1641 rlen = 1; 1642 } 1643 response[0] = (response[0] & ~VDO_SVDM_VERS_MASK) | 1644 (VDO_SVDM_VERS(typec_get_negotiated_svdm_version(typec))); 1645 break; 1646 case CMDT_RSP_ACK: 1647 /* silently drop message if we are not connected */ 1648 if (IS_ERR_OR_NULL(port->partner)) 1649 break; 1650 1651 tcpm_ams_finish(port); 1652 1653 switch (cmd) { 1654 case CMD_DISCOVER_IDENT: 1655 if (PD_VDO_SVDM_VER(p[0]) < svdm_version) 1656 typec_partner_set_svdm_version(port->partner, 1657 PD_VDO_SVDM_VER(p[0])); 1658 /* 6.4.4.3.1 */ 1659 svdm_consume_identity(port, p, cnt); 1660 response[0] = VDO(USB_SID_PD, 1, typec_get_negotiated_svdm_version(typec), 1661 CMD_DISCOVER_SVID); 1662 rlen = 1; 1663 break; 1664 case CMD_DISCOVER_SVID: 1665 /* 6.4.4.3.2 */ 1666 if (svdm_consume_svids(port, p, cnt)) { 1667 response[0] = VDO(USB_SID_PD, 1, svdm_version, CMD_DISCOVER_SVID); 1668 rlen = 1; 1669 } else if (modep->nsvids && supports_modal(port)) { 1670 response[0] = VDO(modep->svids[0], 1, svdm_version, 1671 CMD_DISCOVER_MODES); 1672 rlen = 1; 1673 } 1674 break; 1675 case CMD_DISCOVER_MODES: 1676 /* 6.4.4.3.3 */ 1677 svdm_consume_modes(port, p, cnt); 1678 modep->svid_index++; 1679 if (modep->svid_index < modep->nsvids) { 1680 u16 svid = modep->svids[modep->svid_index]; 1681 response[0] = VDO(svid, 1, svdm_version, CMD_DISCOVER_MODES); 1682 rlen = 1; 1683 } else { 1684 tcpm_register_partner_altmodes(port); 1685 } 1686 break; 1687 case CMD_ENTER_MODE: 1688 if (adev && pdev) { 1689 typec_altmode_update_active(pdev, true); 1690 *adev_action = ADEV_QUEUE_VDM_SEND_EXIT_MODE_ON_FAIL; 1691 } 1692 return 0; 1693 case CMD_EXIT_MODE: 1694 if (adev && pdev) { 1695 typec_altmode_update_active(pdev, false); 1696 /* Back to USB Operation */ 1697 *adev_action = ADEV_NOTIFY_USB_AND_QUEUE_VDM; 1698 return 0; 1699 } 1700 break; 1701 case VDO_CMD_VENDOR(0) ... VDO_CMD_VENDOR(15): 1702 break; 1703 default: 1704 /* Unrecognized SVDM */ 1705 response[0] = p[0] | VDO_CMDT(CMDT_RSP_NAK); 1706 rlen = 1; 1707 response[0] = (response[0] & ~VDO_SVDM_VERS_MASK) | 1708 (VDO_SVDM_VERS(svdm_version)); 1709 break; 1710 } 1711 break; 1712 case CMDT_RSP_NAK: 1713 tcpm_ams_finish(port); 1714 switch (cmd) { 1715 case CMD_DISCOVER_IDENT: 1716 case CMD_DISCOVER_SVID: 1717 case CMD_DISCOVER_MODES: 1718 case VDO_CMD_VENDOR(0) ... VDO_CMD_VENDOR(15): 1719 break; 1720 case CMD_ENTER_MODE: 1721 /* Back to USB Operation */ 1722 *adev_action = ADEV_NOTIFY_USB_AND_QUEUE_VDM; 1723 return 0; 1724 default: 1725 /* Unrecognized SVDM */ 1726 response[0] = p[0] | VDO_CMDT(CMDT_RSP_NAK); 1727 rlen = 1; 1728 response[0] = (response[0] & ~VDO_SVDM_VERS_MASK) | 1729 (VDO_SVDM_VERS(svdm_version)); 1730 break; 1731 } 1732 break; 1733 default: 1734 response[0] = p[0] | VDO_CMDT(CMDT_RSP_NAK); 1735 rlen = 1; 1736 response[0] = (response[0] & ~VDO_SVDM_VERS_MASK) | 1737 (VDO_SVDM_VERS(svdm_version)); 1738 break; 1739 } 1740 1741 /* Informing the alternate mode drivers about everything */ 1742 *adev_action = ADEV_QUEUE_VDM; 1743 return rlen; 1744} 1745 1746static void tcpm_pd_handle_msg(struct tcpm_port *port, 1747 enum pd_msg_request message, 1748 enum tcpm_ams ams); 1749 1750static void tcpm_handle_vdm_request(struct tcpm_port *port, 1751 const __le32 *payload, int cnt) 1752{ 1753 enum adev_actions adev_action = ADEV_NONE; 1754 struct typec_altmode *adev; 1755 u32 p[PD_MAX_PAYLOAD]; 1756 u32 response[8] = { }; 1757 int i, rlen = 0; 1758 1759 for (i = 0; i < cnt; i++) 1760 p[i] = le32_to_cpu(payload[i]); 1761 1762 adev = typec_match_altmode(port->port_altmode, ALTMODE_DISCOVERY_MAX, 1763 PD_VDO_VID(p[0]), PD_VDO_OPOS(p[0])); 1764 1765 if (port->vdm_state == VDM_STATE_BUSY) { 1766 /* If UFP responded busy retry after timeout */ 1767 if (PD_VDO_CMDT(p[0]) == CMDT_RSP_BUSY) { 1768 port->vdm_state = VDM_STATE_WAIT_RSP_BUSY; 1769 port->vdo_retry = (p[0] & ~VDO_CMDT_MASK) | 1770 CMDT_INIT; 1771 mod_vdm_delayed_work(port, PD_T_VDM_BUSY); 1772 return; 1773 } 1774 port->vdm_state = VDM_STATE_DONE; 1775 } 1776 1777 if (PD_VDO_SVDM(p[0]) && (adev || tcpm_vdm_ams(port) || port->nr_snk_vdo)) { 1778 /* 1779 * Here a SVDM is received (INIT or RSP or unknown). Set the vdm_sm_running in 1780 * advance because we are dropping the lock but may send VDMs soon. 1781 * For the cases of INIT received: 1782 * - If no response to send, it will be cleared later in this function. 1783 * - If there are responses to send, it will be cleared in the state machine. 1784 * For the cases of RSP received: 1785 * - If no further INIT to send, it will be cleared later in this function. 1786 * - Otherwise, it will be cleared in the state machine if timeout or it will go 1787 * back here until no further INIT to send. 1788 * For the cases of unknown type received: 1789 * - We will send NAK and the flag will be cleared in the state machine. 1790 */ 1791 port->vdm_sm_running = true; 1792 rlen = tcpm_pd_svdm(port, adev, p, cnt, response, &adev_action); 1793 } else { 1794 if (port->negotiated_rev >= PD_REV30) 1795 tcpm_pd_handle_msg(port, PD_MSG_CTRL_NOT_SUPP, NONE_AMS); 1796 } 1797 1798 /* 1799 * We are done with any state stored in the port struct now, except 1800 * for any port struct changes done by the tcpm_queue_vdm() call 1801 * below, which is a separate operation. 1802 * 1803 * So we can safely release the lock here; and we MUST release the 1804 * lock here to avoid an AB BA lock inversion: 1805 * 1806 * If we keep the lock here then the lock ordering in this path is: 1807 * 1. tcpm_pd_rx_handler take the tcpm port lock 1808 * 2. One of the typec_altmode_* calls below takes the alt-mode's lock 1809 * 1810 * And we also have this ordering: 1811 * 1. alt-mode driver takes the alt-mode's lock 1812 * 2. alt-mode driver calls tcpm_altmode_enter which takes the 1813 * tcpm port lock 1814 * 1815 * Dropping our lock here avoids this. 1816 */ 1817 mutex_unlock(&port->lock); 1818 1819 if (adev) { 1820 switch (adev_action) { 1821 case ADEV_NONE: 1822 break; 1823 case ADEV_NOTIFY_USB_AND_QUEUE_VDM: 1824 WARN_ON(typec_altmode_notify(adev, TYPEC_STATE_USB, NULL)); 1825 typec_altmode_vdm(adev, p[0], &p[1], cnt); 1826 break; 1827 case ADEV_QUEUE_VDM: 1828 typec_altmode_vdm(adev, p[0], &p[1], cnt); 1829 break; 1830 case ADEV_QUEUE_VDM_SEND_EXIT_MODE_ON_FAIL: 1831 if (typec_altmode_vdm(adev, p[0], &p[1], cnt)) { 1832 int svdm_version = typec_get_negotiated_svdm_version( 1833 port->typec_port); 1834 if (svdm_version < 0) 1835 break; 1836 1837 response[0] = VDO(adev->svid, 1, svdm_version, 1838 CMD_EXIT_MODE); 1839 response[0] |= VDO_OPOS(adev->mode); 1840 rlen = 1; 1841 } 1842 break; 1843 case ADEV_ATTENTION: 1844 typec_altmode_attention(adev, p[1]); 1845 break; 1846 } 1847 } 1848 1849 /* 1850 * We must re-take the lock here to balance the unlock in 1851 * tcpm_pd_rx_handler, note that no changes, other then the 1852 * tcpm_queue_vdm call, are made while the lock is held again. 1853 * All that is done after the call is unwinding the call stack until 1854 * we return to tcpm_pd_rx_handler and do the unlock there. 1855 */ 1856 mutex_lock(&port->lock); 1857 1858 if (rlen > 0) 1859 tcpm_queue_vdm(port, response[0], &response[1], rlen - 1); 1860 else 1861 port->vdm_sm_running = false; 1862} 1863 1864static void tcpm_send_vdm(struct tcpm_port *port, u32 vid, int cmd, 1865 const u32 *data, int count) 1866{ 1867 int svdm_version = typec_get_negotiated_svdm_version(port->typec_port); 1868 u32 header; 1869 1870 if (svdm_version < 0) 1871 return; 1872 1873 if (WARN_ON(count > VDO_MAX_SIZE - 1)) 1874 count = VDO_MAX_SIZE - 1; 1875 1876 /* set VDM header with VID & CMD */ 1877 header = VDO(vid, ((vid & USB_SID_PD) == USB_SID_PD) ? 1878 1 : (PD_VDO_CMD(cmd) <= CMD_ATTENTION), 1879 svdm_version, cmd); 1880 tcpm_queue_vdm(port, header, data, count); 1881} 1882 1883static unsigned int vdm_ready_timeout(u32 vdm_hdr) 1884{ 1885 unsigned int timeout; 1886 int cmd = PD_VDO_CMD(vdm_hdr); 1887 1888 /* its not a structured VDM command */ 1889 if (!PD_VDO_SVDM(vdm_hdr)) 1890 return PD_T_VDM_UNSTRUCTURED; 1891 1892 switch (PD_VDO_CMDT(vdm_hdr)) { 1893 case CMDT_INIT: 1894 if (cmd == CMD_ENTER_MODE || cmd == CMD_EXIT_MODE) 1895 timeout = PD_T_VDM_WAIT_MODE_E; 1896 else 1897 timeout = PD_T_VDM_SNDR_RSP; 1898 break; 1899 default: 1900 if (cmd == CMD_ENTER_MODE || cmd == CMD_EXIT_MODE) 1901 timeout = PD_T_VDM_E_MODE; 1902 else 1903 timeout = PD_T_VDM_RCVR_RSP; 1904 break; 1905 } 1906 return timeout; 1907} 1908 1909static void vdm_run_state_machine(struct tcpm_port *port) 1910{ 1911 struct pd_message msg; 1912 int i, res = 0; 1913 u32 vdo_hdr = port->vdo_data[0]; 1914 1915 switch (port->vdm_state) { 1916 case VDM_STATE_READY: 1917 /* Only transmit VDM if attached */ 1918 if (!port->attached) { 1919 port->vdm_state = VDM_STATE_ERR_BUSY; 1920 break; 1921 } 1922 1923 /* 1924 * if there's traffic or we're not in PDO ready state don't send 1925 * a VDM. 1926 */ 1927 if (port->state != SRC_READY && port->state != SNK_READY) { 1928 port->vdm_sm_running = false; 1929 break; 1930 } 1931 1932 /* TODO: AMS operation for Unstructured VDM */ 1933 if (PD_VDO_SVDM(vdo_hdr) && PD_VDO_CMDT(vdo_hdr) == CMDT_INIT) { 1934 switch (PD_VDO_CMD(vdo_hdr)) { 1935 case CMD_DISCOVER_IDENT: 1936 res = tcpm_ams_start(port, DISCOVER_IDENTITY); 1937 if (res == 0) 1938 port->send_discover = false; 1939 else if (res == -EAGAIN) 1940 mod_send_discover_delayed_work(port, 1941 SEND_DISCOVER_RETRY_MS); 1942 break; 1943 case CMD_DISCOVER_SVID: 1944 res = tcpm_ams_start(port, DISCOVER_SVIDS); 1945 break; 1946 case CMD_DISCOVER_MODES: 1947 res = tcpm_ams_start(port, DISCOVER_MODES); 1948 break; 1949 case CMD_ENTER_MODE: 1950 res = tcpm_ams_start(port, DFP_TO_UFP_ENTER_MODE); 1951 break; 1952 case CMD_EXIT_MODE: 1953 res = tcpm_ams_start(port, DFP_TO_UFP_EXIT_MODE); 1954 break; 1955 case CMD_ATTENTION: 1956 res = tcpm_ams_start(port, ATTENTION); 1957 break; 1958 case VDO_CMD_VENDOR(0) ... VDO_CMD_VENDOR(15): 1959 res = tcpm_ams_start(port, STRUCTURED_VDMS); 1960 break; 1961 default: 1962 res = -EOPNOTSUPP; 1963 break; 1964 } 1965 1966 if (res < 0) { 1967 port->vdm_state = VDM_STATE_ERR_BUSY; 1968 return; 1969 } 1970 } 1971 1972 port->vdm_state = VDM_STATE_SEND_MESSAGE; 1973 mod_vdm_delayed_work(port, (port->negotiated_rev >= PD_REV30 && 1974 port->pwr_role == TYPEC_SOURCE && 1975 PD_VDO_SVDM(vdo_hdr) && 1976 PD_VDO_CMDT(vdo_hdr) == CMDT_INIT) ? 1977 PD_T_SINK_TX : 0); 1978 break; 1979 case VDM_STATE_WAIT_RSP_BUSY: 1980 port->vdo_data[0] = port->vdo_retry; 1981 port->vdo_count = 1; 1982 port->vdm_state = VDM_STATE_READY; 1983 tcpm_ams_finish(port); 1984 break; 1985 case VDM_STATE_BUSY: 1986 port->vdm_state = VDM_STATE_ERR_TMOUT; 1987 if (port->ams != NONE_AMS) 1988 tcpm_ams_finish(port); 1989 break; 1990 case VDM_STATE_ERR_SEND: 1991 /* 1992 * A partner which does not support USB PD will not reply, 1993 * so this is not a fatal error. At the same time, some 1994 * devices may not return GoodCRC under some circumstances, 1995 * so we need to retry. 1996 */ 1997 if (port->vdm_retries < 3) { 1998 tcpm_log(port, "VDM Tx error, retry"); 1999 port->vdm_retries++; 2000 port->vdm_state = VDM_STATE_READY; 2001 if (PD_VDO_SVDM(vdo_hdr) && PD_VDO_CMDT(vdo_hdr) == CMDT_INIT) 2002 tcpm_ams_finish(port); 2003 } else { 2004 tcpm_ams_finish(port); 2005 } 2006 break; 2007 case VDM_STATE_SEND_MESSAGE: 2008 /* Prepare and send VDM */ 2009 memset(&msg, 0, sizeof(msg)); 2010 msg.header = PD_HEADER_LE(PD_DATA_VENDOR_DEF, 2011 port->pwr_role, 2012 port->data_role, 2013 port->negotiated_rev, 2014 port->message_id, port->vdo_count); 2015 for (i = 0; i < port->vdo_count; i++) 2016 msg.payload[i] = cpu_to_le32(port->vdo_data[i]); 2017 res = tcpm_pd_transmit(port, TCPC_TX_SOP, &msg); 2018 if (res < 0) { 2019 port->vdm_state = VDM_STATE_ERR_SEND; 2020 } else { 2021 unsigned long timeout; 2022 2023 port->vdm_retries = 0; 2024 port->vdm_state = VDM_STATE_BUSY; 2025 timeout = vdm_ready_timeout(vdo_hdr); 2026 mod_vdm_delayed_work(port, timeout); 2027 } 2028 break; 2029 default: 2030 break; 2031 } 2032} 2033 2034static void vdm_state_machine_work(struct kthread_work *work) 2035{ 2036 struct tcpm_port *port = container_of(work, struct tcpm_port, vdm_state_machine); 2037 enum vdm_states prev_state; 2038 2039 mutex_lock(&port->lock); 2040 2041 /* 2042 * Continue running as long as the port is not busy and there was 2043 * a state change. 2044 */ 2045 do { 2046 prev_state = port->vdm_state; 2047 vdm_run_state_machine(port); 2048 } while (port->vdm_state != prev_state && 2049 port->vdm_state != VDM_STATE_BUSY && 2050 port->vdm_state != VDM_STATE_SEND_MESSAGE); 2051 2052 if (port->vdm_state < VDM_STATE_READY) 2053 port->vdm_sm_running = false; 2054 2055 mutex_unlock(&port->lock); 2056} 2057 2058enum pdo_err { 2059 PDO_NO_ERR, 2060 PDO_ERR_NO_VSAFE5V, 2061 PDO_ERR_VSAFE5V_NOT_FIRST, 2062 PDO_ERR_PDO_TYPE_NOT_IN_ORDER, 2063 PDO_ERR_FIXED_NOT_SORTED, 2064 PDO_ERR_VARIABLE_BATT_NOT_SORTED, 2065 PDO_ERR_DUPE_PDO, 2066 PDO_ERR_PPS_APDO_NOT_SORTED, 2067 PDO_ERR_DUPE_PPS_APDO, 2068}; 2069 2070static const char * const pdo_err_msg[] = { 2071 [PDO_ERR_NO_VSAFE5V] = 2072 " err: source/sink caps should at least have vSafe5V", 2073 [PDO_ERR_VSAFE5V_NOT_FIRST] = 2074 " err: vSafe5V Fixed Supply Object Shall always be the first object", 2075 [PDO_ERR_PDO_TYPE_NOT_IN_ORDER] = 2076 " err: PDOs should be in the following order: Fixed; Battery; Variable", 2077 [PDO_ERR_FIXED_NOT_SORTED] = 2078 " err: Fixed supply pdos should be in increasing order of their fixed voltage", 2079 [PDO_ERR_VARIABLE_BATT_NOT_SORTED] = 2080 " err: Variable/Battery supply pdos should be in increasing order of their minimum voltage", 2081 [PDO_ERR_DUPE_PDO] = 2082 " err: Variable/Batt supply pdos cannot have same min/max voltage", 2083 [PDO_ERR_PPS_APDO_NOT_SORTED] = 2084 " err: Programmable power supply apdos should be in increasing order of their maximum voltage", 2085 [PDO_ERR_DUPE_PPS_APDO] = 2086 " err: Programmable power supply apdos cannot have same min/max voltage and max current", 2087}; 2088 2089static enum pdo_err tcpm_caps_err(struct tcpm_port *port, const u32 *pdo, 2090 unsigned int nr_pdo) 2091{ 2092 unsigned int i; 2093 2094 /* Should at least contain vSafe5v */ 2095 if (nr_pdo < 1) 2096 return PDO_ERR_NO_VSAFE5V; 2097 2098 /* The vSafe5V Fixed Supply Object Shall always be the first object */ 2099 if (pdo_type(pdo[0]) != PDO_TYPE_FIXED || 2100 pdo_fixed_voltage(pdo[0]) != VSAFE5V) 2101 return PDO_ERR_VSAFE5V_NOT_FIRST; 2102 2103 for (i = 1; i < nr_pdo; i++) { 2104 if (pdo_type(pdo[i]) < pdo_type(pdo[i - 1])) { 2105 return PDO_ERR_PDO_TYPE_NOT_IN_ORDER; 2106 } else if (pdo_type(pdo[i]) == pdo_type(pdo[i - 1])) { 2107 enum pd_pdo_type type = pdo_type(pdo[i]); 2108 2109 switch (type) { 2110 /* 2111 * The remaining Fixed Supply Objects, if 2112 * present, shall be sent in voltage order; 2113 * lowest to highest. 2114 */ 2115 case PDO_TYPE_FIXED: 2116 if (pdo_fixed_voltage(pdo[i]) <= 2117 pdo_fixed_voltage(pdo[i - 1])) 2118 return PDO_ERR_FIXED_NOT_SORTED; 2119 break; 2120 /* 2121 * The Battery Supply Objects and Variable 2122 * supply, if present shall be sent in Minimum 2123 * Voltage order; lowest to highest. 2124 */ 2125 case PDO_TYPE_VAR: 2126 case PDO_TYPE_BATT: 2127 if (pdo_min_voltage(pdo[i]) < 2128 pdo_min_voltage(pdo[i - 1])) 2129 return PDO_ERR_VARIABLE_BATT_NOT_SORTED; 2130 else if ((pdo_min_voltage(pdo[i]) == 2131 pdo_min_voltage(pdo[i - 1])) && 2132 (pdo_max_voltage(pdo[i]) == 2133 pdo_max_voltage(pdo[i - 1]))) 2134 return PDO_ERR_DUPE_PDO; 2135 break; 2136 /* 2137 * The Programmable Power Supply APDOs, if present, 2138 * shall be sent in Maximum Voltage order; 2139 * lowest to highest. 2140 */ 2141 case PDO_TYPE_APDO: 2142 if (pdo_apdo_type(pdo[i]) != APDO_TYPE_PPS) 2143 break; 2144 2145 if (pdo_pps_apdo_max_voltage(pdo[i]) < 2146 pdo_pps_apdo_max_voltage(pdo[i - 1])) 2147 return PDO_ERR_PPS_APDO_NOT_SORTED; 2148 else if (pdo_pps_apdo_min_voltage(pdo[i]) == 2149 pdo_pps_apdo_min_voltage(pdo[i - 1]) && 2150 pdo_pps_apdo_max_voltage(pdo[i]) == 2151 pdo_pps_apdo_max_voltage(pdo[i - 1]) && 2152 pdo_pps_apdo_max_current(pdo[i]) == 2153 pdo_pps_apdo_max_current(pdo[i - 1])) 2154 return PDO_ERR_DUPE_PPS_APDO; 2155 break; 2156 default: 2157 tcpm_log_force(port, " Unknown pdo type"); 2158 } 2159 } 2160 } 2161 2162 return PDO_NO_ERR; 2163} 2164 2165static int tcpm_validate_caps(struct tcpm_port *port, const u32 *pdo, 2166 unsigned int nr_pdo) 2167{ 2168 enum pdo_err err_index = tcpm_caps_err(port, pdo, nr_pdo); 2169 2170 if (err_index != PDO_NO_ERR) { 2171 tcpm_log_force(port, " %s", pdo_err_msg[err_index]); 2172 return -EINVAL; 2173 } 2174 2175 return 0; 2176} 2177 2178static int tcpm_altmode_enter(struct typec_altmode *altmode, u32 *vdo) 2179{ 2180 struct tcpm_port *port = typec_altmode_get_drvdata(altmode); 2181 int svdm_version; 2182 u32 header; 2183 2184 svdm_version = typec_get_negotiated_svdm_version(port->typec_port); 2185 if (svdm_version < 0) 2186 return svdm_version; 2187 2188 header = VDO(altmode->svid, vdo ? 2 : 1, svdm_version, CMD_ENTER_MODE); 2189 header |= VDO_OPOS(altmode->mode); 2190 2191 tcpm_queue_vdm_unlocked(port, header, vdo, vdo ? 1 : 0); 2192 return 0; 2193} 2194 2195static int tcpm_altmode_exit(struct typec_altmode *altmode) 2196{ 2197 struct tcpm_port *port = typec_altmode_get_drvdata(altmode); 2198 int svdm_version; 2199 u32 header; 2200 2201 svdm_version = typec_get_negotiated_svdm_version(port->typec_port); 2202 if (svdm_version < 0) 2203 return svdm_version; 2204 2205 header = VDO(altmode->svid, 1, svdm_version, CMD_EXIT_MODE); 2206 header |= VDO_OPOS(altmode->mode); 2207 2208 tcpm_queue_vdm_unlocked(port, header, NULL, 0); 2209 return 0; 2210} 2211 2212static int tcpm_altmode_vdm(struct typec_altmode *altmode, 2213 u32 header, const u32 *data, int count) 2214{ 2215 struct tcpm_port *port = typec_altmode_get_drvdata(altmode); 2216 2217 tcpm_queue_vdm_unlocked(port, header, data, count - 1); 2218 2219 return 0; 2220} 2221 2222static const struct typec_altmode_ops tcpm_altmode_ops = { 2223 .enter = tcpm_altmode_enter, 2224 .exit = tcpm_altmode_exit, 2225 .vdm = tcpm_altmode_vdm, 2226}; 2227 2228/* 2229 * PD (data, control) command handling functions 2230 */ 2231static inline enum tcpm_state ready_state(struct tcpm_port *port) 2232{ 2233 if (port->pwr_role == TYPEC_SOURCE) 2234 return SRC_READY; 2235 else 2236 return SNK_READY; 2237} 2238 2239static int tcpm_pd_send_control(struct tcpm_port *port, 2240 enum pd_ctrl_msg_type type); 2241 2242static void tcpm_handle_alert(struct tcpm_port *port, const __le32 *payload, 2243 int cnt) 2244{ 2245 u32 p0 = le32_to_cpu(payload[0]); 2246 unsigned int type = usb_pd_ado_type(p0); 2247 2248 if (!type) { 2249 tcpm_log(port, "Alert message received with no type"); 2250 tcpm_queue_message(port, PD_MSG_CTRL_NOT_SUPP); 2251 return; 2252 } 2253 2254 /* Just handling non-battery alerts for now */ 2255 if (!(type & USB_PD_ADO_TYPE_BATT_STATUS_CHANGE)) { 2256 if (port->pwr_role == TYPEC_SOURCE) { 2257 port->upcoming_state = GET_STATUS_SEND; 2258 tcpm_ams_start(port, GETTING_SOURCE_SINK_STATUS); 2259 } else { 2260 /* 2261 * Do not check SinkTxOk here in case the Source doesn't set its Rp to 2262 * SinkTxOk in time. 2263 */ 2264 port->ams = GETTING_SOURCE_SINK_STATUS; 2265 tcpm_set_state(port, GET_STATUS_SEND, 0); 2266 } 2267 } else { 2268 tcpm_queue_message(port, PD_MSG_CTRL_NOT_SUPP); 2269 } 2270} 2271 2272static int tcpm_set_auto_vbus_discharge_threshold(struct tcpm_port *port, 2273 enum typec_pwr_opmode mode, bool pps_active, 2274 u32 requested_vbus_voltage) 2275{ 2276 int ret; 2277 2278 if (!port->tcpc->set_auto_vbus_discharge_threshold) 2279 return 0; 2280 2281 ret = port->tcpc->set_auto_vbus_discharge_threshold(port->tcpc, mode, pps_active, 2282 requested_vbus_voltage); 2283 tcpm_log_force(port, 2284 "set_auto_vbus_discharge_threshold mode:%d pps_active:%c vbus:%u ret:%d", 2285 mode, pps_active ? 'y' : 'n', requested_vbus_voltage, ret); 2286 2287 return ret; 2288} 2289 2290static void tcpm_pd_handle_state(struct tcpm_port *port, 2291 enum tcpm_state state, 2292 enum tcpm_ams ams, 2293 unsigned int delay_ms) 2294{ 2295 switch (port->state) { 2296 case SRC_READY: 2297 case SNK_READY: 2298 port->ams = ams; 2299 tcpm_set_state(port, state, delay_ms); 2300 break; 2301 /* 8.3.3.4.1.1 and 6.8.1 power transitioning */ 2302 case SNK_TRANSITION_SINK: 2303 case SNK_TRANSITION_SINK_VBUS: 2304 case SRC_TRANSITION_SUPPLY: 2305 tcpm_set_state(port, HARD_RESET_SEND, 0); 2306 break; 2307 default: 2308 if (!tcpm_ams_interruptible(port)) { 2309 tcpm_set_state(port, port->pwr_role == TYPEC_SOURCE ? 2310 SRC_SOFT_RESET_WAIT_SNK_TX : 2311 SNK_SOFT_RESET, 2312 0); 2313 } else { 2314 /* process the Message 6.8.1 */ 2315 port->upcoming_state = state; 2316 port->next_ams = ams; 2317 tcpm_set_state(port, ready_state(port), delay_ms); 2318 } 2319 break; 2320 } 2321} 2322 2323static void tcpm_pd_handle_msg(struct tcpm_port *port, 2324 enum pd_msg_request message, 2325 enum tcpm_ams ams) 2326{ 2327 switch (port->state) { 2328 case SRC_READY: 2329 case SNK_READY: 2330 port->ams = ams; 2331 tcpm_queue_message(port, message); 2332 break; 2333 /* PD 3.0 Spec 8.3.3.4.1.1 and 6.8.1 */ 2334 case SNK_TRANSITION_SINK: 2335 case SNK_TRANSITION_SINK_VBUS: 2336 case SRC_TRANSITION_SUPPLY: 2337 tcpm_set_state(port, HARD_RESET_SEND, 0); 2338 break; 2339 default: 2340 if (!tcpm_ams_interruptible(port)) { 2341 tcpm_set_state(port, port->pwr_role == TYPEC_SOURCE ? 2342 SRC_SOFT_RESET_WAIT_SNK_TX : 2343 SNK_SOFT_RESET, 2344 0); 2345 } else { 2346 port->next_ams = ams; 2347 tcpm_set_state(port, ready_state(port), 0); 2348 /* 6.8.1 process the Message */ 2349 tcpm_queue_message(port, message); 2350 } 2351 break; 2352 } 2353} 2354 2355static void tcpm_pd_data_request(struct tcpm_port *port, 2356 const struct pd_message *msg) 2357{ 2358 enum pd_data_msg_type type = pd_header_type_le(msg->header); 2359 unsigned int cnt = pd_header_cnt_le(msg->header); 2360 unsigned int rev = pd_header_rev_le(msg->header); 2361 unsigned int i; 2362 enum frs_typec_current partner_frs_current; 2363 bool frs_enable; 2364 int ret; 2365 2366 if (tcpm_vdm_ams(port) && type != PD_DATA_VENDOR_DEF) { 2367 port->vdm_state = VDM_STATE_ERR_BUSY; 2368 tcpm_ams_finish(port); 2369 mod_vdm_delayed_work(port, 0); 2370 } 2371 2372 switch (type) { 2373 case PD_DATA_SOURCE_CAP: 2374 for (i = 0; i < cnt; i++) 2375 port->source_caps[i] = le32_to_cpu(msg->payload[i]); 2376 2377 port->nr_source_caps = cnt; 2378 2379 tcpm_log_source_caps(port); 2380 2381 tcpm_validate_caps(port, port->source_caps, 2382 port->nr_source_caps); 2383 2384 /* 2385 * Adjust revision in subsequent message headers, as required, 2386 * to comply with 6.2.1.1.5 of the USB PD 3.0 spec. We don't 2387 * support Rev 1.0 so just do nothing in that scenario. 2388 */ 2389 if (rev == PD_REV10) { 2390 if (port->ams == GET_SOURCE_CAPABILITIES) 2391 tcpm_ams_finish(port); 2392 break; 2393 } 2394 2395 if (rev < PD_MAX_REV) 2396 port->negotiated_rev = rev; 2397 2398 if (port->pwr_role == TYPEC_SOURCE) { 2399 if (port->ams == GET_SOURCE_CAPABILITIES) 2400 tcpm_pd_handle_state(port, SRC_READY, NONE_AMS, 0); 2401 /* Unexpected Source Capabilities */ 2402 else 2403 tcpm_pd_handle_msg(port, 2404 port->negotiated_rev < PD_REV30 ? 2405 PD_MSG_CTRL_REJECT : 2406 PD_MSG_CTRL_NOT_SUPP, 2407 NONE_AMS); 2408 } else if (port->state == SNK_WAIT_CAPABILITIES) { 2409 /* 2410 * This message may be received even if VBUS is not 2411 * present. This is quite unexpected; see USB PD 2412 * specification, sections 8.3.3.6.3.1 and 8.3.3.6.3.2. 2413 * However, at the same time, we must be ready to 2414 * receive this message and respond to it 15ms after 2415 * receiving PS_RDY during power swap operations, no matter 2416 * if VBUS is available or not (USB PD specification, 2417 * section 6.5.9.2). 2418 * So we need to accept the message either way, 2419 * but be prepared to keep waiting for VBUS after it was 2420 * handled. 2421 */ 2422 port->ams = POWER_NEGOTIATION; 2423 port->in_ams = true; 2424 tcpm_set_state(port, SNK_NEGOTIATE_CAPABILITIES, 0); 2425 } else { 2426 if (port->ams == GET_SOURCE_CAPABILITIES) 2427 tcpm_ams_finish(port); 2428 tcpm_pd_handle_state(port, SNK_NEGOTIATE_CAPABILITIES, 2429 POWER_NEGOTIATION, 0); 2430 } 2431 break; 2432 case PD_DATA_REQUEST: 2433 /* 2434 * Adjust revision in subsequent message headers, as required, 2435 * to comply with 6.2.1.1.5 of the USB PD 3.0 spec. We don't 2436 * support Rev 1.0 so just reject in that scenario. 2437 */ 2438 if (rev == PD_REV10) { 2439 tcpm_pd_handle_msg(port, 2440 port->negotiated_rev < PD_REV30 ? 2441 PD_MSG_CTRL_REJECT : 2442 PD_MSG_CTRL_NOT_SUPP, 2443 NONE_AMS); 2444 break; 2445 } 2446 2447 if (rev < PD_MAX_REV) 2448 port->negotiated_rev = rev; 2449 2450 if (port->pwr_role != TYPEC_SOURCE || cnt != 1) { 2451 tcpm_pd_handle_msg(port, 2452 port->negotiated_rev < PD_REV30 ? 2453 PD_MSG_CTRL_REJECT : 2454 PD_MSG_CTRL_NOT_SUPP, 2455 NONE_AMS); 2456 break; 2457 } 2458 2459 port->sink_request = le32_to_cpu(msg->payload[0]); 2460 2461 if (port->vdm_sm_running && port->explicit_contract) { 2462 tcpm_pd_handle_msg(port, PD_MSG_CTRL_WAIT, port->ams); 2463 break; 2464 } 2465 2466 if (port->state == SRC_SEND_CAPABILITIES) 2467 tcpm_set_state(port, SRC_NEGOTIATE_CAPABILITIES, 0); 2468 else 2469 tcpm_pd_handle_state(port, SRC_NEGOTIATE_CAPABILITIES, 2470 POWER_NEGOTIATION, 0); 2471 break; 2472 case PD_DATA_SINK_CAP: 2473 /* We don't do anything with this at the moment... */ 2474 for (i = 0; i < cnt; i++) 2475 port->sink_caps[i] = le32_to_cpu(msg->payload[i]); 2476 2477 partner_frs_current = (port->sink_caps[0] & PDO_FIXED_FRS_CURR_MASK) >> 2478 PDO_FIXED_FRS_CURR_SHIFT; 2479 frs_enable = partner_frs_current && (partner_frs_current <= 2480 port->new_source_frs_current); 2481 tcpm_log(port, 2482 "Port partner FRS capable partner_frs_current:%u port_frs_current:%u enable:%c", 2483 partner_frs_current, port->new_source_frs_current, frs_enable ? 'y' : 'n'); 2484 if (frs_enable) { 2485 ret = port->tcpc->enable_frs(port->tcpc, true); 2486 tcpm_log(port, "Enable FRS %s, ret:%d\n", ret ? "fail" : "success", ret); 2487 } 2488 2489 port->nr_sink_caps = cnt; 2490 port->sink_cap_done = true; 2491 if (port->ams == GET_SINK_CAPABILITIES) 2492 tcpm_set_state(port, ready_state(port), 0); 2493 /* Unexpected Sink Capabilities */ 2494 else 2495 tcpm_pd_handle_msg(port, 2496 port->negotiated_rev < PD_REV30 ? 2497 PD_MSG_CTRL_REJECT : 2498 PD_MSG_CTRL_NOT_SUPP, 2499 NONE_AMS); 2500 break; 2501 case PD_DATA_VENDOR_DEF: 2502 tcpm_handle_vdm_request(port, msg->payload, cnt); 2503 break; 2504 case PD_DATA_BIST: 2505 port->bist_request = le32_to_cpu(msg->payload[0]); 2506 tcpm_pd_handle_state(port, BIST_RX, BIST, 0); 2507 break; 2508 case PD_DATA_ALERT: 2509 if (port->state != SRC_READY && port->state != SNK_READY) 2510 tcpm_pd_handle_state(port, port->pwr_role == TYPEC_SOURCE ? 2511 SRC_SOFT_RESET_WAIT_SNK_TX : SNK_SOFT_RESET, 2512 NONE_AMS, 0); 2513 else 2514 tcpm_handle_alert(port, msg->payload, cnt); 2515 break; 2516 case PD_DATA_BATT_STATUS: 2517 case PD_DATA_GET_COUNTRY_INFO: 2518 /* Currently unsupported */ 2519 tcpm_pd_handle_msg(port, port->negotiated_rev < PD_REV30 ? 2520 PD_MSG_CTRL_REJECT : 2521 PD_MSG_CTRL_NOT_SUPP, 2522 NONE_AMS); 2523 break; 2524 default: 2525 tcpm_pd_handle_msg(port, port->negotiated_rev < PD_REV30 ? 2526 PD_MSG_CTRL_REJECT : 2527 PD_MSG_CTRL_NOT_SUPP, 2528 NONE_AMS); 2529 tcpm_log(port, "Unrecognized data message type %#x", type); 2530 break; 2531 } 2532} 2533 2534static void tcpm_pps_complete(struct tcpm_port *port, int result) 2535{ 2536 if (port->pps_pending) { 2537 port->pps_status = result; 2538 port->pps_pending = false; 2539 complete(&port->pps_complete); 2540 } 2541} 2542 2543static void tcpm_pd_ctrl_request(struct tcpm_port *port, 2544 const struct pd_message *msg) 2545{ 2546 enum pd_ctrl_msg_type type = pd_header_type_le(msg->header); 2547 enum tcpm_state next_state; 2548 2549 /* 2550 * Stop VDM state machine if interrupted by other Messages while NOT_SUPP is allowed in 2551 * VDM AMS if waiting for VDM responses and will be handled later. 2552 */ 2553 if (tcpm_vdm_ams(port) && type != PD_CTRL_NOT_SUPP && type != PD_CTRL_GOOD_CRC) { 2554 port->vdm_state = VDM_STATE_ERR_BUSY; 2555 tcpm_ams_finish(port); 2556 mod_vdm_delayed_work(port, 0); 2557 } 2558 2559 switch (type) { 2560 case PD_CTRL_GOOD_CRC: 2561 case PD_CTRL_PING: 2562 break; 2563 case PD_CTRL_GET_SOURCE_CAP: 2564 tcpm_pd_handle_msg(port, PD_MSG_DATA_SOURCE_CAP, GET_SOURCE_CAPABILITIES); 2565 break; 2566 case PD_CTRL_GET_SINK_CAP: 2567 tcpm_pd_handle_msg(port, PD_MSG_DATA_SINK_CAP, GET_SINK_CAPABILITIES); 2568 break; 2569 case PD_CTRL_GOTO_MIN: 2570 break; 2571 case PD_CTRL_PS_RDY: 2572 switch (port->state) { 2573 case SNK_TRANSITION_SINK: 2574 if (port->vbus_present) { 2575 tcpm_set_current_limit(port, 2576 port->req_current_limit, 2577 port->req_supply_voltage); 2578 port->explicit_contract = true; 2579 tcpm_set_auto_vbus_discharge_threshold(port, 2580 TYPEC_PWR_MODE_PD, 2581 port->pps_data.active, 2582 port->supply_voltage); 2583 tcpm_set_state(port, SNK_READY, 0); 2584 } else { 2585 /* 2586 * Seen after power swap. Keep waiting for VBUS 2587 * in a transitional state. 2588 */ 2589 tcpm_set_state(port, 2590 SNK_TRANSITION_SINK_VBUS, 0); 2591 } 2592 break; 2593 case PR_SWAP_SRC_SNK_SOURCE_OFF_CC_DEBOUNCED: 2594 tcpm_set_state(port, PR_SWAP_SRC_SNK_SINK_ON, 0); 2595 break; 2596 case PR_SWAP_SNK_SRC_SINK_OFF: 2597 tcpm_set_state(port, PR_SWAP_SNK_SRC_SOURCE_ON, 0); 2598 break; 2599 case VCONN_SWAP_WAIT_FOR_VCONN: 2600 tcpm_set_state(port, VCONN_SWAP_TURN_OFF_VCONN, 0); 2601 break; 2602 case FR_SWAP_SNK_SRC_TRANSITION_TO_OFF: 2603 tcpm_set_state(port, FR_SWAP_SNK_SRC_NEW_SINK_READY, 0); 2604 break; 2605 default: 2606 tcpm_pd_handle_state(port, 2607 port->pwr_role == TYPEC_SOURCE ? 2608 SRC_SOFT_RESET_WAIT_SNK_TX : 2609 SNK_SOFT_RESET, 2610 NONE_AMS, 0); 2611 break; 2612 } 2613 break; 2614 case PD_CTRL_REJECT: 2615 case PD_CTRL_WAIT: 2616 case PD_CTRL_NOT_SUPP: 2617 switch (port->state) { 2618 case SNK_NEGOTIATE_CAPABILITIES: 2619 /* USB PD specification, Figure 8-43 */ 2620 if (port->explicit_contract) 2621 next_state = SNK_READY; 2622 else 2623 next_state = SNK_WAIT_CAPABILITIES; 2624 2625 /* Threshold was relaxed before sending Request. Restore it back. */ 2626 tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_PD, 2627 port->pps_data.active, 2628 port->supply_voltage); 2629 tcpm_set_state(port, next_state, 0); 2630 break; 2631 case SNK_NEGOTIATE_PPS_CAPABILITIES: 2632 /* Revert data back from any requested PPS updates */ 2633 port->pps_data.req_out_volt = port->supply_voltage; 2634 port->pps_data.req_op_curr = port->current_limit; 2635 port->pps_status = (type == PD_CTRL_WAIT ? 2636 -EAGAIN : -EOPNOTSUPP); 2637 2638 /* Threshold was relaxed before sending Request. Restore it back. */ 2639 tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_PD, 2640 port->pps_data.active, 2641 port->supply_voltage); 2642 2643 tcpm_set_state(port, SNK_READY, 0); 2644 break; 2645 case DR_SWAP_SEND: 2646 port->swap_status = (type == PD_CTRL_WAIT ? 2647 -EAGAIN : -EOPNOTSUPP); 2648 tcpm_set_state(port, DR_SWAP_CANCEL, 0); 2649 break; 2650 case PR_SWAP_SEND: 2651 port->swap_status = (type == PD_CTRL_WAIT ? 2652 -EAGAIN : -EOPNOTSUPP); 2653 tcpm_set_state(port, PR_SWAP_CANCEL, 0); 2654 break; 2655 case VCONN_SWAP_SEND: 2656 port->swap_status = (type == PD_CTRL_WAIT ? 2657 -EAGAIN : -EOPNOTSUPP); 2658 tcpm_set_state(port, VCONN_SWAP_CANCEL, 0); 2659 break; 2660 case FR_SWAP_SEND: 2661 tcpm_set_state(port, FR_SWAP_CANCEL, 0); 2662 break; 2663 case GET_SINK_CAP: 2664 port->sink_cap_done = true; 2665 tcpm_set_state(port, ready_state(port), 0); 2666 break; 2667 case SRC_READY: 2668 case SNK_READY: 2669 if (port->vdm_state > VDM_STATE_READY) { 2670 port->vdm_state = VDM_STATE_DONE; 2671 if (tcpm_vdm_ams(port)) 2672 tcpm_ams_finish(port); 2673 mod_vdm_delayed_work(port, 0); 2674 break; 2675 } 2676 fallthrough; 2677 default: 2678 tcpm_pd_handle_state(port, 2679 port->pwr_role == TYPEC_SOURCE ? 2680 SRC_SOFT_RESET_WAIT_SNK_TX : 2681 SNK_SOFT_RESET, 2682 NONE_AMS, 0); 2683 break; 2684 } 2685 break; 2686 case PD_CTRL_ACCEPT: 2687 switch (port->state) { 2688 case SNK_NEGOTIATE_CAPABILITIES: 2689 port->pps_data.active = false; 2690 tcpm_set_state(port, SNK_TRANSITION_SINK, 0); 2691 break; 2692 case SNK_NEGOTIATE_PPS_CAPABILITIES: 2693 port->pps_data.active = true; 2694 port->pps_data.min_volt = port->pps_data.req_min_volt; 2695 port->pps_data.max_volt = port->pps_data.req_max_volt; 2696 port->pps_data.max_curr = port->pps_data.req_max_curr; 2697 port->req_supply_voltage = port->pps_data.req_out_volt; 2698 port->req_current_limit = port->pps_data.req_op_curr; 2699 power_supply_changed(port->psy); 2700 tcpm_set_state(port, SNK_TRANSITION_SINK, 0); 2701 break; 2702 case SOFT_RESET_SEND: 2703 if (port->ams == SOFT_RESET_AMS) 2704 tcpm_ams_finish(port); 2705 if (port->pwr_role == TYPEC_SOURCE) { 2706 port->upcoming_state = SRC_SEND_CAPABILITIES; 2707 tcpm_ams_start(port, POWER_NEGOTIATION); 2708 } else { 2709 tcpm_set_state(port, SNK_WAIT_CAPABILITIES, 0); 2710 } 2711 break; 2712 case DR_SWAP_SEND: 2713 tcpm_set_state(port, DR_SWAP_CHANGE_DR, 0); 2714 break; 2715 case PR_SWAP_SEND: 2716 tcpm_set_state(port, PR_SWAP_START, 0); 2717 break; 2718 case VCONN_SWAP_SEND: 2719 tcpm_set_state(port, VCONN_SWAP_START, 0); 2720 break; 2721 case FR_SWAP_SEND: 2722 tcpm_set_state(port, FR_SWAP_SNK_SRC_TRANSITION_TO_OFF, 0); 2723 break; 2724 default: 2725 tcpm_pd_handle_state(port, 2726 port->pwr_role == TYPEC_SOURCE ? 2727 SRC_SOFT_RESET_WAIT_SNK_TX : 2728 SNK_SOFT_RESET, 2729 NONE_AMS, 0); 2730 break; 2731 } 2732 break; 2733 case PD_CTRL_SOFT_RESET: 2734 port->ams = SOFT_RESET_AMS; 2735 tcpm_set_state(port, SOFT_RESET, 0); 2736 break; 2737 case PD_CTRL_DR_SWAP: 2738 /* 2739 * XXX 2740 * 6.3.9: If an alternate mode is active, a request to swap 2741 * alternate modes shall trigger a port reset. 2742 */ 2743 if (port->typec_caps.data != TYPEC_PORT_DRD) { 2744 tcpm_pd_handle_msg(port, 2745 port->negotiated_rev < PD_REV30 ? 2746 PD_MSG_CTRL_REJECT : 2747 PD_MSG_CTRL_NOT_SUPP, 2748 NONE_AMS); 2749 } else { 2750 if (port->send_discover) { 2751 tcpm_queue_message(port, PD_MSG_CTRL_WAIT); 2752 break; 2753 } 2754 2755 tcpm_pd_handle_state(port, DR_SWAP_ACCEPT, DATA_ROLE_SWAP, 0); 2756 } 2757 break; 2758 case PD_CTRL_PR_SWAP: 2759 if (port->port_type != TYPEC_PORT_DRP) { 2760 tcpm_pd_handle_msg(port, 2761 port->negotiated_rev < PD_REV30 ? 2762 PD_MSG_CTRL_REJECT : 2763 PD_MSG_CTRL_NOT_SUPP, 2764 NONE_AMS); 2765 } else { 2766 if (port->send_discover) { 2767 tcpm_queue_message(port, PD_MSG_CTRL_WAIT); 2768 break; 2769 } 2770 2771 tcpm_pd_handle_state(port, PR_SWAP_ACCEPT, POWER_ROLE_SWAP, 0); 2772 } 2773 break; 2774 case PD_CTRL_VCONN_SWAP: 2775 if (port->send_discover) { 2776 tcpm_queue_message(port, PD_MSG_CTRL_WAIT); 2777 break; 2778 } 2779 2780 tcpm_pd_handle_state(port, VCONN_SWAP_ACCEPT, VCONN_SWAP, 0); 2781 break; 2782 case PD_CTRL_GET_SOURCE_CAP_EXT: 2783 case PD_CTRL_GET_STATUS: 2784 case PD_CTRL_FR_SWAP: 2785 case PD_CTRL_GET_PPS_STATUS: 2786 case PD_CTRL_GET_COUNTRY_CODES: 2787 /* Currently not supported */ 2788 tcpm_pd_handle_msg(port, 2789 port->negotiated_rev < PD_REV30 ? 2790 PD_MSG_CTRL_REJECT : 2791 PD_MSG_CTRL_NOT_SUPP, 2792 NONE_AMS); 2793 break; 2794 default: 2795 tcpm_pd_handle_msg(port, 2796 port->negotiated_rev < PD_REV30 ? 2797 PD_MSG_CTRL_REJECT : 2798 PD_MSG_CTRL_NOT_SUPP, 2799 NONE_AMS); 2800 tcpm_log(port, "Unrecognized ctrl message type %#x", type); 2801 break; 2802 } 2803} 2804 2805static void tcpm_pd_ext_msg_request(struct tcpm_port *port, 2806 const struct pd_message *msg) 2807{ 2808 enum pd_ext_msg_type type = pd_header_type_le(msg->header); 2809 unsigned int data_size = pd_ext_header_data_size_le(msg->ext_msg.header); 2810 2811 /* stopping VDM state machine if interrupted by other Messages */ 2812 if (tcpm_vdm_ams(port)) { 2813 port->vdm_state = VDM_STATE_ERR_BUSY; 2814 tcpm_ams_finish(port); 2815 mod_vdm_delayed_work(port, 0); 2816 } 2817 2818 if (!(le16_to_cpu(msg->ext_msg.header) & PD_EXT_HDR_CHUNKED)) { 2819 tcpm_pd_handle_msg(port, PD_MSG_CTRL_NOT_SUPP, NONE_AMS); 2820 tcpm_log(port, "Unchunked extended messages unsupported"); 2821 return; 2822 } 2823 2824 if (data_size > PD_EXT_MAX_CHUNK_DATA) { 2825 tcpm_pd_handle_state(port, CHUNK_NOT_SUPP, NONE_AMS, PD_T_CHUNK_NOT_SUPP); 2826 tcpm_log(port, "Chunk handling not yet supported"); 2827 return; 2828 } 2829 2830 switch (type) { 2831 case PD_EXT_STATUS: 2832 case PD_EXT_PPS_STATUS: 2833 if (port->ams == GETTING_SOURCE_SINK_STATUS) { 2834 tcpm_ams_finish(port); 2835 tcpm_set_state(port, ready_state(port), 0); 2836 } else { 2837 /* unexpected Status or PPS_Status Message */ 2838 tcpm_pd_handle_state(port, port->pwr_role == TYPEC_SOURCE ? 2839 SRC_SOFT_RESET_WAIT_SNK_TX : SNK_SOFT_RESET, 2840 NONE_AMS, 0); 2841 } 2842 break; 2843 case PD_EXT_SOURCE_CAP_EXT: 2844 case PD_EXT_GET_BATT_CAP: 2845 case PD_EXT_GET_BATT_STATUS: 2846 case PD_EXT_BATT_CAP: 2847 case PD_EXT_GET_MANUFACTURER_INFO: 2848 case PD_EXT_MANUFACTURER_INFO: 2849 case PD_EXT_SECURITY_REQUEST: 2850 case PD_EXT_SECURITY_RESPONSE: 2851 case PD_EXT_FW_UPDATE_REQUEST: 2852 case PD_EXT_FW_UPDATE_RESPONSE: 2853 case PD_EXT_COUNTRY_INFO: 2854 case PD_EXT_COUNTRY_CODES: 2855 tcpm_pd_handle_msg(port, PD_MSG_CTRL_NOT_SUPP, NONE_AMS); 2856 break; 2857 default: 2858 tcpm_pd_handle_msg(port, PD_MSG_CTRL_NOT_SUPP, NONE_AMS); 2859 tcpm_log(port, "Unrecognized extended message type %#x", type); 2860 break; 2861 } 2862} 2863 2864static void tcpm_pd_rx_handler(struct kthread_work *work) 2865{ 2866 struct pd_rx_event *event = container_of(work, 2867 struct pd_rx_event, work); 2868 const struct pd_message *msg = &event->msg; 2869 unsigned int cnt = pd_header_cnt_le(msg->header); 2870 struct tcpm_port *port = event->port; 2871 2872 mutex_lock(&port->lock); 2873 2874 tcpm_log(port, "PD RX, header: %#x [%d]", le16_to_cpu(msg->header), 2875 port->attached); 2876 2877 if (port->attached) { 2878 enum pd_ctrl_msg_type type = pd_header_type_le(msg->header); 2879 unsigned int msgid = pd_header_msgid_le(msg->header); 2880 2881 /* 2882 * USB PD standard, 6.6.1.2: 2883 * "... if MessageID value in a received Message is the 2884 * same as the stored value, the receiver shall return a 2885 * GoodCRC Message with that MessageID value and drop 2886 * the Message (this is a retry of an already received 2887 * Message). Note: this shall not apply to the Soft_Reset 2888 * Message which always has a MessageID value of zero." 2889 */ 2890 if (msgid == port->rx_msgid && type != PD_CTRL_SOFT_RESET) 2891 goto done; 2892 port->rx_msgid = msgid; 2893 2894 /* 2895 * If both ends believe to be DFP/host, we have a data role 2896 * mismatch. 2897 */ 2898 if (!!(le16_to_cpu(msg->header) & PD_HEADER_DATA_ROLE) == 2899 (port->data_role == TYPEC_HOST)) { 2900 tcpm_log(port, 2901 "Data role mismatch, initiating error recovery"); 2902 tcpm_set_state(port, ERROR_RECOVERY, 0); 2903 } else { 2904 if (le16_to_cpu(msg->header) & PD_HEADER_EXT_HDR) 2905 tcpm_pd_ext_msg_request(port, msg); 2906 else if (cnt) 2907 tcpm_pd_data_request(port, msg); 2908 else 2909 tcpm_pd_ctrl_request(port, msg); 2910 } 2911 } 2912 2913done: 2914 mutex_unlock(&port->lock); 2915 kfree(event); 2916} 2917 2918void tcpm_pd_receive(struct tcpm_port *port, const struct pd_message *msg) 2919{ 2920 struct pd_rx_event *event; 2921 2922 event = kzalloc(sizeof(*event), GFP_ATOMIC); 2923 if (!event) 2924 return; 2925 2926 kthread_init_work(&event->work, tcpm_pd_rx_handler); 2927 event->port = port; 2928 memcpy(&event->msg, msg, sizeof(*msg)); 2929 kthread_queue_work(port->wq, &event->work); 2930} 2931EXPORT_SYMBOL_GPL(tcpm_pd_receive); 2932 2933static int tcpm_pd_send_control(struct tcpm_port *port, 2934 enum pd_ctrl_msg_type type) 2935{ 2936 struct pd_message msg; 2937 2938 memset(&msg, 0, sizeof(msg)); 2939 msg.header = PD_HEADER_LE(type, port->pwr_role, 2940 port->data_role, 2941 port->negotiated_rev, 2942 port->message_id, 0); 2943 2944 return tcpm_pd_transmit(port, TCPC_TX_SOP, &msg); 2945} 2946 2947/* 2948 * Send queued message without affecting state. 2949 * Return true if state machine should go back to sleep, 2950 * false otherwise. 2951 */ 2952static bool tcpm_send_queued_message(struct tcpm_port *port) 2953{ 2954 enum pd_msg_request queued_message; 2955 int ret; 2956 2957 do { 2958 queued_message = port->queued_message; 2959 port->queued_message = PD_MSG_NONE; 2960 2961 switch (queued_message) { 2962 case PD_MSG_CTRL_WAIT: 2963 tcpm_pd_send_control(port, PD_CTRL_WAIT); 2964 break; 2965 case PD_MSG_CTRL_REJECT: 2966 tcpm_pd_send_control(port, PD_CTRL_REJECT); 2967 break; 2968 case PD_MSG_CTRL_NOT_SUPP: 2969 tcpm_pd_send_control(port, PD_CTRL_NOT_SUPP); 2970 break; 2971 case PD_MSG_DATA_SINK_CAP: 2972 ret = tcpm_pd_send_sink_caps(port); 2973 if (ret < 0) { 2974 tcpm_log(port, "Unable to send snk caps, ret=%d", ret); 2975 tcpm_set_state(port, SNK_SOFT_RESET, 0); 2976 } 2977 tcpm_ams_finish(port); 2978 break; 2979 case PD_MSG_DATA_SOURCE_CAP: 2980 ret = tcpm_pd_send_source_caps(port); 2981 if (ret < 0) { 2982 tcpm_log(port, 2983 "Unable to send src caps, ret=%d", 2984 ret); 2985 tcpm_set_state(port, SOFT_RESET_SEND, 0); 2986 } else if (port->pwr_role == TYPEC_SOURCE) { 2987 tcpm_ams_finish(port); 2988 tcpm_set_state(port, HARD_RESET_SEND, 2989 PD_T_SENDER_RESPONSE); 2990 } else { 2991 tcpm_ams_finish(port); 2992 } 2993 break; 2994 default: 2995 break; 2996 } 2997 } while (port->queued_message != PD_MSG_NONE); 2998 2999 if (port->delayed_state != INVALID_STATE) { 3000 if (ktime_after(port->delayed_runtime, ktime_get())) { 3001 mod_tcpm_delayed_work(port, ktime_to_ms(ktime_sub(port->delayed_runtime, 3002 ktime_get()))); 3003 return true; 3004 } 3005 port->delayed_state = INVALID_STATE; 3006 } 3007 return false; 3008} 3009 3010static int tcpm_pd_check_request(struct tcpm_port *port) 3011{ 3012 u32 pdo, rdo = port->sink_request; 3013 unsigned int max, op, pdo_max, index; 3014 enum pd_pdo_type type; 3015 3016 index = rdo_index(rdo); 3017 if (!index || index > port->nr_src_pdo) 3018 return -EINVAL; 3019 3020 pdo = port->src_pdo[index - 1]; 3021 type = pdo_type(pdo); 3022 switch (type) { 3023 case PDO_TYPE_FIXED: 3024 case PDO_TYPE_VAR: 3025 max = rdo_max_current(rdo); 3026 op = rdo_op_current(rdo); 3027 pdo_max = pdo_max_current(pdo); 3028 3029 if (op > pdo_max) 3030 return -EINVAL; 3031 if (max > pdo_max && !(rdo & RDO_CAP_MISMATCH)) 3032 return -EINVAL; 3033 3034 if (type == PDO_TYPE_FIXED) 3035 tcpm_log(port, 3036 "Requested %u mV, %u mA for %u / %u mA", 3037 pdo_fixed_voltage(pdo), pdo_max, op, max); 3038 else 3039 tcpm_log(port, 3040 "Requested %u -> %u mV, %u mA for %u / %u mA", 3041 pdo_min_voltage(pdo), pdo_max_voltage(pdo), 3042 pdo_max, op, max); 3043 break; 3044 case PDO_TYPE_BATT: 3045 max = rdo_max_power(rdo); 3046 op = rdo_op_power(rdo); 3047 pdo_max = pdo_max_power(pdo); 3048 3049 if (op > pdo_max) 3050 return -EINVAL; 3051 if (max > pdo_max && !(rdo & RDO_CAP_MISMATCH)) 3052 return -EINVAL; 3053 tcpm_log(port, 3054 "Requested %u -> %u mV, %u mW for %u / %u mW", 3055 pdo_min_voltage(pdo), pdo_max_voltage(pdo), 3056 pdo_max, op, max); 3057 break; 3058 default: 3059 return -EINVAL; 3060 } 3061 3062 port->op_vsafe5v = index == 1; 3063 3064 return 0; 3065} 3066 3067#define min_power(x, y) min(pdo_max_power(x), pdo_max_power(y)) 3068#define min_current(x, y) min(pdo_max_current(x), pdo_max_current(y)) 3069 3070static int tcpm_pd_select_pdo(struct tcpm_port *port, int *sink_pdo, 3071 int *src_pdo) 3072{ 3073 unsigned int i, j, max_src_mv = 0, min_src_mv = 0, max_mw = 0, 3074 max_mv = 0, src_mw = 0, src_ma = 0, max_snk_mv = 0, 3075 min_snk_mv = 0; 3076 int ret = -EINVAL; 3077 3078 port->pps_data.supported = false; 3079 port->usb_type = POWER_SUPPLY_USB_TYPE_PD; 3080 power_supply_changed(port->psy); 3081 3082 /* 3083 * Select the source PDO providing the most power which has a 3084 * matchig sink cap. 3085 */ 3086 for (i = 0; i < port->nr_source_caps; i++) { 3087 u32 pdo = port->source_caps[i]; 3088 enum pd_pdo_type type = pdo_type(pdo); 3089 3090 switch (type) { 3091 case PDO_TYPE_FIXED: 3092 max_src_mv = pdo_fixed_voltage(pdo); 3093 min_src_mv = max_src_mv; 3094 break; 3095 case PDO_TYPE_BATT: 3096 case PDO_TYPE_VAR: 3097 max_src_mv = pdo_max_voltage(pdo); 3098 min_src_mv = pdo_min_voltage(pdo); 3099 break; 3100 case PDO_TYPE_APDO: 3101 if (pdo_apdo_type(pdo) == APDO_TYPE_PPS) { 3102 port->pps_data.supported = true; 3103 port->usb_type = 3104 POWER_SUPPLY_USB_TYPE_PD_PPS; 3105 power_supply_changed(port->psy); 3106 } 3107 continue; 3108 default: 3109 tcpm_log(port, "Invalid source PDO type, ignoring"); 3110 continue; 3111 } 3112 3113 switch (type) { 3114 case PDO_TYPE_FIXED: 3115 case PDO_TYPE_VAR: 3116 src_ma = pdo_max_current(pdo); 3117 src_mw = src_ma * min_src_mv / 1000; 3118 break; 3119 case PDO_TYPE_BATT: 3120 src_mw = pdo_max_power(pdo); 3121 break; 3122 case PDO_TYPE_APDO: 3123 continue; 3124 default: 3125 tcpm_log(port, "Invalid source PDO type, ignoring"); 3126 continue; 3127 } 3128 3129 for (j = 0; j < port->nr_snk_pdo; j++) { 3130 pdo = port->snk_pdo[j]; 3131 3132 switch (pdo_type(pdo)) { 3133 case PDO_TYPE_FIXED: 3134 max_snk_mv = pdo_fixed_voltage(pdo); 3135 min_snk_mv = max_snk_mv; 3136 break; 3137 case PDO_TYPE_BATT: 3138 case PDO_TYPE_VAR: 3139 max_snk_mv = pdo_max_voltage(pdo); 3140 min_snk_mv = pdo_min_voltage(pdo); 3141 break; 3142 case PDO_TYPE_APDO: 3143 continue; 3144 default: 3145 tcpm_log(port, "Invalid sink PDO type, ignoring"); 3146 continue; 3147 } 3148 3149 if (max_src_mv <= max_snk_mv && 3150 min_src_mv >= min_snk_mv) { 3151 /* Prefer higher voltages if available */ 3152 if ((src_mw == max_mw && min_src_mv > max_mv) || 3153 src_mw > max_mw) { 3154 *src_pdo = i; 3155 *sink_pdo = j; 3156 max_mw = src_mw; 3157 max_mv = min_src_mv; 3158 ret = 0; 3159 } 3160 } 3161 } 3162 } 3163 3164 return ret; 3165} 3166 3167#define min_pps_apdo_current(x, y) \ 3168 min(pdo_pps_apdo_max_current(x), pdo_pps_apdo_max_current(y)) 3169 3170static unsigned int tcpm_pd_select_pps_apdo(struct tcpm_port *port) 3171{ 3172 unsigned int i, j, max_mw = 0, max_mv = 0; 3173 unsigned int min_src_mv, max_src_mv, src_ma, src_mw; 3174 unsigned int min_snk_mv, max_snk_mv; 3175 unsigned int max_op_mv; 3176 u32 pdo, src, snk; 3177 unsigned int src_pdo = 0, snk_pdo = 0; 3178 3179 /* 3180 * Select the source PPS APDO providing the most power while staying 3181 * within the board's limits. We skip the first PDO as this is always 3182 * 5V 3A. 3183 */ 3184 for (i = 1; i < port->nr_source_caps; ++i) { 3185 pdo = port->source_caps[i]; 3186 3187 switch (pdo_type(pdo)) { 3188 case PDO_TYPE_APDO: 3189 if (pdo_apdo_type(pdo) != APDO_TYPE_PPS) { 3190 tcpm_log(port, "Not PPS APDO (source), ignoring"); 3191 continue; 3192 } 3193 3194 min_src_mv = pdo_pps_apdo_min_voltage(pdo); 3195 max_src_mv = pdo_pps_apdo_max_voltage(pdo); 3196 src_ma = pdo_pps_apdo_max_current(pdo); 3197 src_mw = (src_ma * max_src_mv) / 1000; 3198 3199 /* 3200 * Now search through the sink PDOs to find a matching 3201 * PPS APDO. Again skip the first sink PDO as this will 3202 * always be 5V 3A. 3203 */ 3204 for (j = 1; j < port->nr_snk_pdo; j++) { 3205 pdo = port->snk_pdo[j]; 3206 3207 switch (pdo_type(pdo)) { 3208 case PDO_TYPE_APDO: 3209 if (pdo_apdo_type(pdo) != APDO_TYPE_PPS) { 3210 tcpm_log(port, 3211 "Not PPS APDO (sink), ignoring"); 3212 continue; 3213 } 3214 3215 min_snk_mv = 3216 pdo_pps_apdo_min_voltage(pdo); 3217 max_snk_mv = 3218 pdo_pps_apdo_max_voltage(pdo); 3219 break; 3220 default: 3221 tcpm_log(port, 3222 "Not APDO type (sink), ignoring"); 3223 continue; 3224 } 3225 3226 if (min_src_mv <= max_snk_mv && 3227 max_src_mv >= min_snk_mv) { 3228 max_op_mv = min(max_src_mv, max_snk_mv); 3229 src_mw = (max_op_mv * src_ma) / 1000; 3230 /* Prefer higher voltages if available */ 3231 if ((src_mw == max_mw && 3232 max_op_mv > max_mv) || 3233 src_mw > max_mw) { 3234 src_pdo = i; 3235 snk_pdo = j; 3236 max_mw = src_mw; 3237 max_mv = max_op_mv; 3238 } 3239 } 3240 } 3241 3242 break; 3243 default: 3244 tcpm_log(port, "Not APDO type (source), ignoring"); 3245 continue; 3246 } 3247 } 3248 3249 if (src_pdo) { 3250 src = port->source_caps[src_pdo]; 3251 snk = port->snk_pdo[snk_pdo]; 3252 3253 port->pps_data.req_min_volt = max(pdo_pps_apdo_min_voltage(src), 3254 pdo_pps_apdo_min_voltage(snk)); 3255 port->pps_data.req_max_volt = min(pdo_pps_apdo_max_voltage(src), 3256 pdo_pps_apdo_max_voltage(snk)); 3257 port->pps_data.req_max_curr = min_pps_apdo_current(src, snk); 3258 port->pps_data.req_out_volt = min(port->pps_data.req_max_volt, 3259 max(port->pps_data.req_min_volt, 3260 port->pps_data.req_out_volt)); 3261 port->pps_data.req_op_curr = min(port->pps_data.req_max_curr, 3262 port->pps_data.req_op_curr); 3263 } 3264 3265 return src_pdo; 3266} 3267 3268static int tcpm_pd_build_request(struct tcpm_port *port, u32 *rdo) 3269{ 3270 unsigned int mv, ma, mw, flags; 3271 unsigned int max_ma, max_mw; 3272 enum pd_pdo_type type; 3273 u32 pdo, matching_snk_pdo; 3274 int src_pdo_index = 0; 3275 int snk_pdo_index = 0; 3276 int ret; 3277 3278 ret = tcpm_pd_select_pdo(port, &snk_pdo_index, &src_pdo_index); 3279 if (ret < 0) 3280 return ret; 3281 3282 pdo = port->source_caps[src_pdo_index]; 3283 matching_snk_pdo = port->snk_pdo[snk_pdo_index]; 3284 type = pdo_type(pdo); 3285 3286 switch (type) { 3287 case PDO_TYPE_FIXED: 3288 mv = pdo_fixed_voltage(pdo); 3289 break; 3290 case PDO_TYPE_BATT: 3291 case PDO_TYPE_VAR: 3292 mv = pdo_min_voltage(pdo); 3293 break; 3294 default: 3295 tcpm_log(port, "Invalid PDO selected!"); 3296 return -EINVAL; 3297 } 3298 3299 /* Select maximum available current within the sink pdo's limit */ 3300 if (type == PDO_TYPE_BATT) { 3301 mw = min_power(pdo, matching_snk_pdo); 3302 ma = 1000 * mw / mv; 3303 } else { 3304 ma = min_current(pdo, matching_snk_pdo); 3305 mw = ma * mv / 1000; 3306 } 3307 3308 flags = RDO_USB_COMM | RDO_NO_SUSPEND; 3309 3310 /* Set mismatch bit if offered power is less than operating power */ 3311 max_ma = ma; 3312 max_mw = mw; 3313 if (mw < port->operating_snk_mw) { 3314 flags |= RDO_CAP_MISMATCH; 3315 if (type == PDO_TYPE_BATT && 3316 (pdo_max_power(matching_snk_pdo) > pdo_max_power(pdo))) 3317 max_mw = pdo_max_power(matching_snk_pdo); 3318 else if (pdo_max_current(matching_snk_pdo) > 3319 pdo_max_current(pdo)) 3320 max_ma = pdo_max_current(matching_snk_pdo); 3321 } 3322 3323 tcpm_log(port, "cc=%d cc1=%d cc2=%d vbus=%d vconn=%s polarity=%d", 3324 port->cc_req, port->cc1, port->cc2, port->vbus_source, 3325 port->vconn_role == TYPEC_SOURCE ? "source" : "sink", 3326 port->polarity); 3327 3328 if (type == PDO_TYPE_BATT) { 3329 *rdo = RDO_BATT(src_pdo_index + 1, mw, max_mw, flags); 3330 3331 tcpm_log(port, "Requesting PDO %d: %u mV, %u mW%s", 3332 src_pdo_index, mv, mw, 3333 flags & RDO_CAP_MISMATCH ? " [mismatch]" : ""); 3334 } else { 3335 *rdo = RDO_FIXED(src_pdo_index + 1, ma, max_ma, flags); 3336 3337 tcpm_log(port, "Requesting PDO %d: %u mV, %u mA%s", 3338 src_pdo_index, mv, ma, 3339 flags & RDO_CAP_MISMATCH ? " [mismatch]" : ""); 3340 } 3341 3342 port->req_current_limit = ma; 3343 port->req_supply_voltage = mv; 3344 3345 return 0; 3346} 3347 3348static int tcpm_pd_send_request(struct tcpm_port *port) 3349{ 3350 struct pd_message msg; 3351 int ret; 3352 u32 rdo; 3353 3354 ret = tcpm_pd_build_request(port, &rdo); 3355 if (ret < 0) 3356 return ret; 3357 3358 /* 3359 * Relax the threshold as voltage will be adjusted after Accept Message plus tSrcTransition. 3360 * It is safer to modify the threshold here. 3361 */ 3362 tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_USB, false, 0); 3363 3364 memset(&msg, 0, sizeof(msg)); 3365 msg.header = PD_HEADER_LE(PD_DATA_REQUEST, 3366 port->pwr_role, 3367 port->data_role, 3368 port->negotiated_rev, 3369 port->message_id, 1); 3370 msg.payload[0] = cpu_to_le32(rdo); 3371 3372 return tcpm_pd_transmit(port, TCPC_TX_SOP, &msg); 3373} 3374 3375static int tcpm_pd_build_pps_request(struct tcpm_port *port, u32 *rdo) 3376{ 3377 unsigned int out_mv, op_ma, op_mw, max_mv, max_ma, flags; 3378 enum pd_pdo_type type; 3379 unsigned int src_pdo_index; 3380 u32 pdo; 3381 3382 src_pdo_index = tcpm_pd_select_pps_apdo(port); 3383 if (!src_pdo_index) 3384 return -EOPNOTSUPP; 3385 3386 pdo = port->source_caps[src_pdo_index]; 3387 type = pdo_type(pdo); 3388 3389 switch (type) { 3390 case PDO_TYPE_APDO: 3391 if (pdo_apdo_type(pdo) != APDO_TYPE_PPS) { 3392 tcpm_log(port, "Invalid APDO selected!"); 3393 return -EINVAL; 3394 } 3395 max_mv = port->pps_data.req_max_volt; 3396 max_ma = port->pps_data.req_max_curr; 3397 out_mv = port->pps_data.req_out_volt; 3398 op_ma = port->pps_data.req_op_curr; 3399 break; 3400 default: 3401 tcpm_log(port, "Invalid PDO selected!"); 3402 return -EINVAL; 3403 } 3404 3405 flags = RDO_USB_COMM | RDO_NO_SUSPEND; 3406 3407 op_mw = (op_ma * out_mv) / 1000; 3408 if (op_mw < port->operating_snk_mw) { 3409 /* 3410 * Try raising current to meet power needs. If that's not enough 3411 * then try upping the voltage. If that's still not enough 3412 * then we've obviously chosen a PPS APDO which really isn't 3413 * suitable so abandon ship. 3414 */ 3415 op_ma = (port->operating_snk_mw * 1000) / out_mv; 3416 if ((port->operating_snk_mw * 1000) % out_mv) 3417 ++op_ma; 3418 op_ma += RDO_PROG_CURR_MA_STEP - (op_ma % RDO_PROG_CURR_MA_STEP); 3419 3420 if (op_ma > max_ma) { 3421 op_ma = max_ma; 3422 out_mv = (port->operating_snk_mw * 1000) / op_ma; 3423 if ((port->operating_snk_mw * 1000) % op_ma) 3424 ++out_mv; 3425 out_mv += RDO_PROG_VOLT_MV_STEP - 3426 (out_mv % RDO_PROG_VOLT_MV_STEP); 3427 3428 if (out_mv > max_mv) { 3429 tcpm_log(port, "Invalid PPS APDO selected!"); 3430 return -EINVAL; 3431 } 3432 } 3433 } 3434 3435 tcpm_log(port, "cc=%d cc1=%d cc2=%d vbus=%d vconn=%s polarity=%d", 3436 port->cc_req, port->cc1, port->cc2, port->vbus_source, 3437 port->vconn_role == TYPEC_SOURCE ? "source" : "sink", 3438 port->polarity); 3439 3440 *rdo = RDO_PROG(src_pdo_index + 1, out_mv, op_ma, flags); 3441 3442 tcpm_log(port, "Requesting APDO %d: %u mV, %u mA", 3443 src_pdo_index, out_mv, op_ma); 3444 3445 port->pps_data.req_op_curr = op_ma; 3446 port->pps_data.req_out_volt = out_mv; 3447 3448 return 0; 3449} 3450 3451static int tcpm_pd_send_pps_request(struct tcpm_port *port) 3452{ 3453 struct pd_message msg; 3454 int ret; 3455 u32 rdo; 3456 3457 ret = tcpm_pd_build_pps_request(port, &rdo); 3458 if (ret < 0) 3459 return ret; 3460 3461 /* Relax the threshold as voltage will be adjusted right after Accept Message. */ 3462 tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_USB, false, 0); 3463 3464 memset(&msg, 0, sizeof(msg)); 3465 msg.header = PD_HEADER_LE(PD_DATA_REQUEST, 3466 port->pwr_role, 3467 port->data_role, 3468 port->negotiated_rev, 3469 port->message_id, 1); 3470 msg.payload[0] = cpu_to_le32(rdo); 3471 3472 return tcpm_pd_transmit(port, TCPC_TX_SOP, &msg); 3473} 3474 3475static int tcpm_set_vbus(struct tcpm_port *port, bool enable) 3476{ 3477 int ret; 3478 3479 if (enable && port->vbus_charge) 3480 return -EINVAL; 3481 3482 tcpm_log(port, "vbus:=%d charge=%d", enable, port->vbus_charge); 3483 3484 ret = port->tcpc->set_vbus(port->tcpc, enable, port->vbus_charge); 3485 if (ret < 0) 3486 return ret; 3487 3488 port->vbus_source = enable; 3489 return 0; 3490} 3491 3492static int tcpm_set_charge(struct tcpm_port *port, bool charge) 3493{ 3494 int ret; 3495 3496 if (charge && port->vbus_source) 3497 return -EINVAL; 3498 3499 if (charge != port->vbus_charge) { 3500 tcpm_log(port, "vbus=%d charge:=%d", port->vbus_source, charge); 3501 ret = port->tcpc->set_vbus(port->tcpc, port->vbus_source, 3502 charge); 3503 if (ret < 0) 3504 return ret; 3505 } 3506 port->vbus_charge = charge; 3507 power_supply_changed(port->psy); 3508 return 0; 3509} 3510 3511static bool tcpm_start_toggling(struct tcpm_port *port, enum typec_cc_status cc) 3512{ 3513 int ret; 3514 3515 if (!port->tcpc->start_toggling) 3516 return false; 3517 3518 tcpm_log_force(port, "Start toggling"); 3519 ret = port->tcpc->start_toggling(port->tcpc, port->port_type, cc); 3520 return ret == 0; 3521} 3522 3523static int tcpm_init_vbus(struct tcpm_port *port) 3524{ 3525 int ret; 3526 3527 ret = port->tcpc->set_vbus(port->tcpc, false, false); 3528 port->vbus_source = false; 3529 port->vbus_charge = false; 3530 return ret; 3531} 3532 3533static int tcpm_init_vconn(struct tcpm_port *port) 3534{ 3535 int ret; 3536 3537 ret = port->tcpc->set_vconn(port->tcpc, false); 3538 port->vconn_role = TYPEC_SINK; 3539 return ret; 3540} 3541 3542static void tcpm_typec_connect(struct tcpm_port *port) 3543{ 3544 if (!port->connected) { 3545 /* Make sure we don't report stale identity information */ 3546 memset(&port->partner_ident, 0, sizeof(port->partner_ident)); 3547 port->partner_desc.usb_pd = port->pd_capable; 3548 if (tcpm_port_is_debug(port)) 3549 port->partner_desc.accessory = TYPEC_ACCESSORY_DEBUG; 3550 else if (tcpm_port_is_audio(port)) 3551 port->partner_desc.accessory = TYPEC_ACCESSORY_AUDIO; 3552 else 3553 port->partner_desc.accessory = TYPEC_ACCESSORY_NONE; 3554 port->partner = typec_register_partner(port->typec_port, 3555 &port->partner_desc); 3556 port->connected = true; 3557 } 3558} 3559 3560static int tcpm_src_attach(struct tcpm_port *port) 3561{ 3562 enum typec_cc_polarity polarity = 3563 port->cc2 == TYPEC_CC_RD ? TYPEC_POLARITY_CC2 3564 : TYPEC_POLARITY_CC1; 3565 int ret; 3566 3567 if (port->attached) 3568 return 0; 3569 3570 ret = tcpm_set_polarity(port, polarity); 3571 if (ret < 0) 3572 return ret; 3573 3574 tcpm_enable_auto_vbus_discharge(port, true); 3575 3576 ret = tcpm_set_roles(port, true, TYPEC_SOURCE, tcpm_data_role_for_source(port)); 3577 if (ret < 0) 3578 return ret; 3579 3580 if (port->pd_supported) { 3581 ret = port->tcpc->set_pd_rx(port->tcpc, true); 3582 if (ret < 0) 3583 goto out_disable_mux; 3584 } 3585 3586 /* 3587 * USB Type-C specification, version 1.2, 3588 * chapter 4.5.2.2.8.1 (Attached.SRC Requirements) 3589 * Enable VCONN only if the non-RD port is set to RA. 3590 */ 3591 if ((polarity == TYPEC_POLARITY_CC1 && port->cc2 == TYPEC_CC_RA) || 3592 (polarity == TYPEC_POLARITY_CC2 && port->cc1 == TYPEC_CC_RA)) { 3593 ret = tcpm_set_vconn(port, true); 3594 if (ret < 0) 3595 goto out_disable_pd; 3596 } 3597 3598 ret = tcpm_set_vbus(port, true); 3599 if (ret < 0) 3600 goto out_disable_vconn; 3601 3602 port->pd_capable = false; 3603 3604 port->partner = NULL; 3605 3606 port->attached = true; 3607 port->send_discover = true; 3608 3609 return 0; 3610 3611out_disable_vconn: 3612 tcpm_set_vconn(port, false); 3613out_disable_pd: 3614 if (port->pd_supported) 3615 port->tcpc->set_pd_rx(port->tcpc, false); 3616out_disable_mux: 3617 tcpm_mux_set(port, TYPEC_STATE_SAFE, USB_ROLE_NONE, 3618 TYPEC_ORIENTATION_NONE); 3619 return ret; 3620} 3621 3622static void tcpm_typec_disconnect(struct tcpm_port *port) 3623{ 3624 if (port->connected) { 3625 typec_unregister_partner(port->partner); 3626 port->partner = NULL; 3627 port->connected = false; 3628 } 3629} 3630 3631static void tcpm_unregister_altmodes(struct tcpm_port *port) 3632{ 3633 struct pd_mode_data *modep = &port->mode_data; 3634 int i; 3635 3636 for (i = 0; i < modep->altmodes; i++) { 3637 typec_unregister_altmode(port->partner_altmode[i]); 3638 port->partner_altmode[i] = NULL; 3639 } 3640 3641 memset(modep, 0, sizeof(*modep)); 3642} 3643 3644static void tcpm_set_partner_usb_comm_capable(struct tcpm_port *port, bool capable) 3645{ 3646 tcpm_log(port, "Setting usb_comm capable %s", capable ? "true" : "false"); 3647 3648 if (port->tcpc->set_partner_usb_comm_capable) 3649 port->tcpc->set_partner_usb_comm_capable(port->tcpc, capable); 3650} 3651 3652static void tcpm_reset_port(struct tcpm_port *port) 3653{ 3654 tcpm_enable_auto_vbus_discharge(port, false); 3655 port->in_ams = false; 3656 port->ams = NONE_AMS; 3657 port->vdm_sm_running = false; 3658 tcpm_unregister_altmodes(port); 3659 tcpm_typec_disconnect(port); 3660 port->attached = false; 3661 port->pd_capable = false; 3662 port->pps_data.supported = false; 3663 tcpm_set_partner_usb_comm_capable(port, false); 3664 3665 /* 3666 * First Rx ID should be 0; set this to a sentinel of -1 so that 3667 * we can check tcpm_pd_rx_handler() if we had seen it before. 3668 */ 3669 port->rx_msgid = -1; 3670 3671 port->tcpc->set_pd_rx(port->tcpc, false); 3672 tcpm_init_vbus(port); /* also disables charging */ 3673 tcpm_init_vconn(port); 3674 tcpm_set_current_limit(port, 0, 0); 3675 tcpm_set_polarity(port, TYPEC_POLARITY_CC1); 3676 tcpm_mux_set(port, TYPEC_STATE_SAFE, USB_ROLE_NONE, 3677 TYPEC_ORIENTATION_NONE); 3678 tcpm_set_attached_state(port, false); 3679 port->try_src_count = 0; 3680 port->try_snk_count = 0; 3681 port->usb_type = POWER_SUPPLY_USB_TYPE_C; 3682 power_supply_changed(port->psy); 3683 port->nr_sink_caps = 0; 3684 port->sink_cap_done = false; 3685 if (port->tcpc->enable_frs) 3686 port->tcpc->enable_frs(port->tcpc, false); 3687} 3688 3689static void tcpm_detach(struct tcpm_port *port) 3690{ 3691 if (tcpm_port_is_disconnected(port)) 3692 port->hard_reset_count = 0; 3693 3694 if (!port->attached) 3695 return; 3696 3697 if (port->tcpc->set_bist_data) { 3698 tcpm_log(port, "disable BIST MODE TESTDATA"); 3699 port->tcpc->set_bist_data(port->tcpc, false); 3700 } 3701 3702 tcpm_reset_port(port); 3703} 3704 3705static void tcpm_src_detach(struct tcpm_port *port) 3706{ 3707 tcpm_detach(port); 3708} 3709 3710static int tcpm_snk_attach(struct tcpm_port *port) 3711{ 3712 int ret; 3713 3714 if (port->attached) 3715 return 0; 3716 3717 ret = tcpm_set_polarity(port, port->cc2 != TYPEC_CC_OPEN ? 3718 TYPEC_POLARITY_CC2 : TYPEC_POLARITY_CC1); 3719 if (ret < 0) 3720 return ret; 3721 3722 tcpm_enable_auto_vbus_discharge(port, true); 3723 3724 ret = tcpm_set_roles(port, true, TYPEC_SINK, tcpm_data_role_for_sink(port)); 3725 if (ret < 0) 3726 return ret; 3727 3728 port->pd_capable = false; 3729 3730 port->partner = NULL; 3731 3732 port->attached = true; 3733 port->send_discover = true; 3734 3735 return 0; 3736} 3737 3738static void tcpm_snk_detach(struct tcpm_port *port) 3739{ 3740 tcpm_detach(port); 3741} 3742 3743static int tcpm_acc_attach(struct tcpm_port *port) 3744{ 3745 int ret; 3746 3747 if (port->attached) 3748 return 0; 3749 3750 ret = tcpm_set_roles(port, true, TYPEC_SOURCE, 3751 tcpm_data_role_for_source(port)); 3752 if (ret < 0) 3753 return ret; 3754 3755 port->partner = NULL; 3756 3757 tcpm_typec_connect(port); 3758 3759 port->attached = true; 3760 3761 return 0; 3762} 3763 3764static void tcpm_acc_detach(struct tcpm_port *port) 3765{ 3766 tcpm_detach(port); 3767} 3768 3769static inline enum tcpm_state hard_reset_state(struct tcpm_port *port) 3770{ 3771 if (port->hard_reset_count < PD_N_HARD_RESET_COUNT) 3772 return HARD_RESET_SEND; 3773 if (port->pd_capable) 3774 return ERROR_RECOVERY; 3775 if (port->pwr_role == TYPEC_SOURCE) 3776 return SRC_UNATTACHED; 3777 if (port->state == SNK_WAIT_CAPABILITIES) 3778 return SNK_READY; 3779 return SNK_UNATTACHED; 3780} 3781 3782static inline enum tcpm_state unattached_state(struct tcpm_port *port) 3783{ 3784 if (port->port_type == TYPEC_PORT_DRP) { 3785 if (port->pwr_role == TYPEC_SOURCE) 3786 return SRC_UNATTACHED; 3787 else 3788 return SNK_UNATTACHED; 3789 } else if (port->port_type == TYPEC_PORT_SRC) { 3790 return SRC_UNATTACHED; 3791 } 3792 3793 return SNK_UNATTACHED; 3794} 3795 3796static void tcpm_swap_complete(struct tcpm_port *port, int result) 3797{ 3798 if (port->swap_pending) { 3799 port->swap_status = result; 3800 port->swap_pending = false; 3801 port->non_pd_role_swap = false; 3802 complete(&port->swap_complete); 3803 } 3804} 3805 3806static enum typec_pwr_opmode tcpm_get_pwr_opmode(enum typec_cc_status cc) 3807{ 3808 switch (cc) { 3809 case TYPEC_CC_RP_1_5: 3810 return TYPEC_PWR_MODE_1_5A; 3811 case TYPEC_CC_RP_3_0: 3812 return TYPEC_PWR_MODE_3_0A; 3813 case TYPEC_CC_RP_DEF: 3814 default: 3815 return TYPEC_PWR_MODE_USB; 3816 } 3817} 3818 3819static enum typec_cc_status tcpm_pwr_opmode_to_rp(enum typec_pwr_opmode opmode) 3820{ 3821 switch (opmode) { 3822 case TYPEC_PWR_MODE_USB: 3823 return TYPEC_CC_RP_DEF; 3824 case TYPEC_PWR_MODE_1_5A: 3825 return TYPEC_CC_RP_1_5; 3826 case TYPEC_PWR_MODE_3_0A: 3827 case TYPEC_PWR_MODE_PD: 3828 default: 3829 return TYPEC_CC_RP_3_0; 3830 } 3831} 3832 3833static void run_state_machine(struct tcpm_port *port) 3834{ 3835 int ret; 3836 enum typec_pwr_opmode opmode; 3837 unsigned int msecs; 3838 enum tcpm_state upcoming_state; 3839 3840 port->enter_state = port->state; 3841 switch (port->state) { 3842 case TOGGLING: 3843 break; 3844 /* SRC states */ 3845 case SRC_UNATTACHED: 3846 if (!port->non_pd_role_swap) 3847 tcpm_swap_complete(port, -ENOTCONN); 3848 tcpm_src_detach(port); 3849 if (tcpm_start_toggling(port, tcpm_rp_cc(port))) { 3850 tcpm_set_state(port, TOGGLING, 0); 3851 break; 3852 } 3853 tcpm_set_cc(port, tcpm_rp_cc(port)); 3854 if (port->port_type == TYPEC_PORT_DRP) 3855 tcpm_set_state(port, SNK_UNATTACHED, PD_T_DRP_SNK); 3856 break; 3857 case SRC_ATTACH_WAIT: 3858 if (tcpm_port_is_debug(port)) 3859 tcpm_set_state(port, DEBUG_ACC_ATTACHED, 3860 PD_T_CC_DEBOUNCE); 3861 else if (tcpm_port_is_audio(port)) 3862 tcpm_set_state(port, AUDIO_ACC_ATTACHED, 3863 PD_T_CC_DEBOUNCE); 3864 else if (tcpm_port_is_source(port) && port->vbus_vsafe0v) 3865 tcpm_set_state(port, 3866 tcpm_try_snk(port) ? SNK_TRY 3867 : SRC_ATTACHED, 3868 PD_T_CC_DEBOUNCE); 3869 break; 3870 3871 case SNK_TRY: 3872 port->try_snk_count++; 3873 /* 3874 * Requirements: 3875 * - Do not drive vconn or vbus 3876 * - Terminate CC pins (both) to Rd 3877 * Action: 3878 * - Wait for tDRPTry (PD_T_DRP_TRY). 3879 * Until then, ignore any state changes. 3880 */ 3881 tcpm_set_cc(port, TYPEC_CC_RD); 3882 tcpm_set_state(port, SNK_TRY_WAIT, PD_T_DRP_TRY); 3883 break; 3884 case SNK_TRY_WAIT: 3885 if (tcpm_port_is_sink(port)) { 3886 tcpm_set_state(port, SNK_TRY_WAIT_DEBOUNCE, 0); 3887 } else { 3888 tcpm_set_state(port, SRC_TRYWAIT, 0); 3889 port->max_wait = 0; 3890 } 3891 break; 3892 case SNK_TRY_WAIT_DEBOUNCE: 3893 tcpm_set_state(port, SNK_TRY_WAIT_DEBOUNCE_CHECK_VBUS, 3894 PD_T_TRY_CC_DEBOUNCE); 3895 break; 3896 case SNK_TRY_WAIT_DEBOUNCE_CHECK_VBUS: 3897 if (port->vbus_present && tcpm_port_is_sink(port)) 3898 tcpm_set_state(port, SNK_ATTACHED, 0); 3899 else 3900 port->max_wait = 0; 3901 break; 3902 case SRC_TRYWAIT: 3903 tcpm_set_cc(port, tcpm_rp_cc(port)); 3904 if (port->max_wait == 0) { 3905 port->max_wait = jiffies + 3906 msecs_to_jiffies(PD_T_DRP_TRY); 3907 tcpm_set_state(port, SRC_TRYWAIT_UNATTACHED, 3908 PD_T_DRP_TRY); 3909 } else { 3910 if (time_is_after_jiffies(port->max_wait)) 3911 tcpm_set_state(port, SRC_TRYWAIT_UNATTACHED, 3912 jiffies_to_msecs(port->max_wait - 3913 jiffies)); 3914 else 3915 tcpm_set_state(port, SNK_UNATTACHED, 0); 3916 } 3917 break; 3918 case SRC_TRYWAIT_DEBOUNCE: 3919 tcpm_set_state(port, SRC_ATTACHED, PD_T_CC_DEBOUNCE); 3920 break; 3921 case SRC_TRYWAIT_UNATTACHED: 3922 tcpm_set_state(port, SNK_UNATTACHED, 0); 3923 break; 3924 3925 case SRC_ATTACHED: 3926 ret = tcpm_src_attach(port); 3927 tcpm_set_state(port, SRC_UNATTACHED, 3928 ret < 0 ? 0 : PD_T_PS_SOURCE_ON); 3929 break; 3930 case SRC_STARTUP: 3931 opmode = tcpm_get_pwr_opmode(tcpm_rp_cc(port)); 3932 typec_set_pwr_opmode(port->typec_port, opmode); 3933 port->pwr_opmode = TYPEC_PWR_MODE_USB; 3934 port->caps_count = 0; 3935 port->negotiated_rev = PD_MAX_REV; 3936 port->message_id = 0; 3937 port->rx_msgid = -1; 3938 port->explicit_contract = false; 3939 /* SNK -> SRC POWER/FAST_ROLE_SWAP finished */ 3940 if (port->ams == POWER_ROLE_SWAP || 3941 port->ams == FAST_ROLE_SWAP) 3942 tcpm_ams_finish(port); 3943 if (!port->pd_supported) { 3944 tcpm_set_state(port, SRC_READY, 0); 3945 break; 3946 } 3947 port->upcoming_state = SRC_SEND_CAPABILITIES; 3948 tcpm_ams_start(port, POWER_NEGOTIATION); 3949 break; 3950 case SRC_SEND_CAPABILITIES: 3951 port->caps_count++; 3952 if (port->caps_count > PD_N_CAPS_COUNT) { 3953 tcpm_set_state(port, SRC_READY, 0); 3954 break; 3955 } 3956 ret = tcpm_pd_send_source_caps(port); 3957 if (ret < 0) { 3958 tcpm_set_state(port, SRC_SEND_CAPABILITIES, 3959 PD_T_SEND_SOURCE_CAP); 3960 } else { 3961 /* 3962 * Per standard, we should clear the reset counter here. 3963 * However, that can result in state machine hang-ups. 3964 * Reset it only in READY state to improve stability. 3965 */ 3966 /* port->hard_reset_count = 0; */ 3967 port->caps_count = 0; 3968 port->pd_capable = true; 3969 tcpm_set_state_cond(port, SRC_SEND_CAPABILITIES_TIMEOUT, 3970 PD_T_SEND_SOURCE_CAP); 3971 } 3972 break; 3973 case SRC_SEND_CAPABILITIES_TIMEOUT: 3974 /* 3975 * Error recovery for a PD_DATA_SOURCE_CAP reply timeout. 3976 * 3977 * PD 2.0 sinks are supposed to accept src-capabilities with a 3978 * 3.0 header and simply ignore any src PDOs which the sink does 3979 * not understand such as PPS but some 2.0 sinks instead ignore 3980 * the entire PD_DATA_SOURCE_CAP message, causing contract 3981 * negotiation to fail. 3982 * 3983 * After PD_N_HARD_RESET_COUNT hard-reset attempts, we try 3984 * sending src-capabilities with a lower PD revision to 3985 * make these broken sinks work. 3986 */ 3987 if (port->hard_reset_count < PD_N_HARD_RESET_COUNT) { 3988 tcpm_set_state(port, HARD_RESET_SEND, 0); 3989 } else if (port->negotiated_rev > PD_REV20) { 3990 port->negotiated_rev--; 3991 port->hard_reset_count = 0; 3992 tcpm_set_state(port, SRC_SEND_CAPABILITIES, 0); 3993 } else { 3994 tcpm_set_state(port, hard_reset_state(port), 0); 3995 } 3996 break; 3997 case SRC_NEGOTIATE_CAPABILITIES: 3998 ret = tcpm_pd_check_request(port); 3999 if (ret < 0) { 4000 tcpm_pd_send_control(port, PD_CTRL_REJECT); 4001 if (!port->explicit_contract) { 4002 tcpm_set_state(port, 4003 SRC_WAIT_NEW_CAPABILITIES, 0); 4004 } else { 4005 tcpm_set_state(port, SRC_READY, 0); 4006 } 4007 } else { 4008 tcpm_pd_send_control(port, PD_CTRL_ACCEPT); 4009 tcpm_set_partner_usb_comm_capable(port, 4010 !!(port->sink_request & RDO_USB_COMM)); 4011 tcpm_set_state(port, SRC_TRANSITION_SUPPLY, 4012 PD_T_SRC_TRANSITION); 4013 } 4014 break; 4015 case SRC_TRANSITION_SUPPLY: 4016 /* XXX: regulator_set_voltage(vbus, ...) */ 4017 tcpm_pd_send_control(port, PD_CTRL_PS_RDY); 4018 port->explicit_contract = true; 4019 typec_set_pwr_opmode(port->typec_port, TYPEC_PWR_MODE_PD); 4020 port->pwr_opmode = TYPEC_PWR_MODE_PD; 4021 tcpm_set_state_cond(port, SRC_READY, 0); 4022 break; 4023 case SRC_READY: 4024#if 1 4025 port->hard_reset_count = 0; 4026#endif 4027 port->try_src_count = 0; 4028 4029 tcpm_swap_complete(port, 0); 4030 tcpm_typec_connect(port); 4031 4032 if (port->ams != NONE_AMS) 4033 tcpm_ams_finish(port); 4034 if (port->next_ams != NONE_AMS) { 4035 port->ams = port->next_ams; 4036 port->next_ams = NONE_AMS; 4037 } 4038 4039 /* 4040 * If previous AMS is interrupted, switch to the upcoming 4041 * state. 4042 */ 4043 if (port->upcoming_state != INVALID_STATE) { 4044 upcoming_state = port->upcoming_state; 4045 port->upcoming_state = INVALID_STATE; 4046 tcpm_set_state(port, upcoming_state, 0); 4047 break; 4048 } 4049 4050 /* 4051 * 6.4.4.3.1 Discover Identity 4052 * "The Discover Identity Command Shall only be sent to SOP when there is an 4053 * Explicit Contract." 4054 * For now, this driver only supports SOP for DISCOVER_IDENTITY, thus using 4055 * port->explicit_contract to decide whether to send the command. 4056 */ 4057 if (port->explicit_contract) 4058 mod_send_discover_delayed_work(port, 0); 4059 else 4060 port->send_discover = false; 4061 4062 /* 4063 * 6.3.5 4064 * Sending ping messages is not necessary if 4065 * - the source operates at vSafe5V 4066 * or 4067 * - The system is not operating in PD mode 4068 * or 4069 * - Both partners are connected using a Type-C connector 4070 * 4071 * There is no actual need to send PD messages since the local 4072 * port type-c and the spec does not clearly say whether PD is 4073 * possible when type-c is connected to Type-A/B 4074 */ 4075 break; 4076 case SRC_WAIT_NEW_CAPABILITIES: 4077 /* Nothing to do... */ 4078 break; 4079 4080 /* SNK states */ 4081 case SNK_UNATTACHED: 4082 if (!port->non_pd_role_swap) 4083 tcpm_swap_complete(port, -ENOTCONN); 4084 tcpm_pps_complete(port, -ENOTCONN); 4085 tcpm_snk_detach(port); 4086 if (tcpm_start_toggling(port, TYPEC_CC_RD)) { 4087 tcpm_set_state(port, TOGGLING, 0); 4088 break; 4089 } 4090 tcpm_set_cc(port, TYPEC_CC_RD); 4091 if (port->port_type == TYPEC_PORT_DRP) 4092 tcpm_set_state(port, SRC_UNATTACHED, PD_T_DRP_SRC); 4093 break; 4094 case SNK_ATTACH_WAIT: 4095 if ((port->cc1 == TYPEC_CC_OPEN && 4096 port->cc2 != TYPEC_CC_OPEN) || 4097 (port->cc1 != TYPEC_CC_OPEN && 4098 port->cc2 == TYPEC_CC_OPEN)) 4099 tcpm_set_state(port, SNK_DEBOUNCED, 4100 PD_T_CC_DEBOUNCE); 4101 else if (tcpm_port_is_disconnected(port)) 4102 tcpm_set_state(port, SNK_UNATTACHED, 4103 PD_T_PD_DEBOUNCE); 4104 break; 4105 case SNK_DEBOUNCED: 4106 if (tcpm_port_is_disconnected(port)) 4107 tcpm_set_state(port, SNK_UNATTACHED, 4108 PD_T_PD_DEBOUNCE); 4109 else if (port->vbus_present) 4110 tcpm_set_state(port, 4111 tcpm_try_src(port) ? SRC_TRY 4112 : SNK_ATTACHED, 4113 0); 4114 break; 4115 case SRC_TRY: 4116 port->try_src_count++; 4117 tcpm_set_cc(port, tcpm_rp_cc(port)); 4118 port->max_wait = 0; 4119 tcpm_set_state(port, SRC_TRY_WAIT, 0); 4120 break; 4121 case SRC_TRY_WAIT: 4122 if (port->max_wait == 0) { 4123 port->max_wait = jiffies + 4124 msecs_to_jiffies(PD_T_DRP_TRY); 4125 msecs = PD_T_DRP_TRY; 4126 } else { 4127 if (time_is_after_jiffies(port->max_wait)) 4128 msecs = jiffies_to_msecs(port->max_wait - 4129 jiffies); 4130 else 4131 msecs = 0; 4132 } 4133 tcpm_set_state(port, SNK_TRYWAIT, msecs); 4134 break; 4135 case SRC_TRY_DEBOUNCE: 4136 tcpm_set_state(port, SRC_ATTACHED, PD_T_PD_DEBOUNCE); 4137 break; 4138 case SNK_TRYWAIT: 4139 tcpm_set_cc(port, TYPEC_CC_RD); 4140 tcpm_set_state(port, SNK_TRYWAIT_VBUS, PD_T_CC_DEBOUNCE); 4141 break; 4142 case SNK_TRYWAIT_VBUS: 4143 /* 4144 * TCPM stays in this state indefinitely until VBUS 4145 * is detected as long as Rp is not detected for 4146 * more than a time period of tPDDebounce. 4147 */ 4148 if (port->vbus_present && tcpm_port_is_sink(port)) { 4149 tcpm_set_state(port, SNK_ATTACHED, 0); 4150 break; 4151 } 4152 if (!tcpm_port_is_sink(port)) 4153 tcpm_set_state(port, SNK_TRYWAIT_DEBOUNCE, 0); 4154 break; 4155 case SNK_TRYWAIT_DEBOUNCE: 4156 tcpm_set_state(port, SNK_UNATTACHED, PD_T_PD_DEBOUNCE); 4157 break; 4158 case SNK_ATTACHED: 4159 ret = tcpm_snk_attach(port); 4160 if (ret < 0) 4161 tcpm_set_state(port, SNK_UNATTACHED, 0); 4162 else 4163 tcpm_set_state(port, SNK_STARTUP, 0); 4164 break; 4165 case SNK_STARTUP: 4166 opmode = tcpm_get_pwr_opmode(port->polarity ? 4167 port->cc2 : port->cc1); 4168 typec_set_pwr_opmode(port->typec_port, opmode); 4169 port->pwr_opmode = TYPEC_PWR_MODE_USB; 4170 port->negotiated_rev = PD_MAX_REV; 4171 port->message_id = 0; 4172 port->rx_msgid = -1; 4173 port->explicit_contract = false; 4174 4175 if (port->ams == POWER_ROLE_SWAP || 4176 port->ams == FAST_ROLE_SWAP) 4177 /* SRC -> SNK POWER/FAST_ROLE_SWAP finished */ 4178 tcpm_ams_finish(port); 4179 4180 tcpm_set_state(port, SNK_DISCOVERY, 0); 4181 break; 4182 case SNK_DISCOVERY: 4183 if (port->vbus_present) { 4184 u32 current_lim = tcpm_get_current_limit(port); 4185 4186 if (port->slow_charger_loop && (current_lim > PD_P_SNK_STDBY_MW / 5)) 4187 current_lim = PD_P_SNK_STDBY_MW / 5; 4188 tcpm_set_current_limit(port, current_lim, 5000); 4189 tcpm_set_charge(port, true); 4190 if (!port->pd_supported) 4191 tcpm_set_state(port, SNK_READY, 0); 4192 else 4193 tcpm_set_state(port, SNK_WAIT_CAPABILITIES, 0); 4194 break; 4195 } 4196 /* 4197 * For DRP, timeouts differ. Also, handling is supposed to be 4198 * different and much more complex (dead battery detection; 4199 * see USB power delivery specification, section 8.3.3.6.1.5.1). 4200 */ 4201 tcpm_set_state(port, hard_reset_state(port), 4202 port->port_type == TYPEC_PORT_DRP ? 4203 PD_T_DB_DETECT : PD_T_NO_RESPONSE); 4204 break; 4205 case SNK_DISCOVERY_DEBOUNCE: 4206 tcpm_set_state(port, SNK_DISCOVERY_DEBOUNCE_DONE, 4207 PD_T_CC_DEBOUNCE); 4208 break; 4209 case SNK_DISCOVERY_DEBOUNCE_DONE: 4210 if (!tcpm_port_is_disconnected(port) && 4211 tcpm_port_is_sink(port) && 4212 ktime_after(port->delayed_runtime, ktime_get())) { 4213 tcpm_set_state(port, SNK_DISCOVERY, 4214 ktime_to_ms(ktime_sub(port->delayed_runtime, ktime_get()))); 4215 break; 4216 } 4217 tcpm_set_state(port, unattached_state(port), 0); 4218 break; 4219 case SNK_WAIT_CAPABILITIES: 4220 ret = port->tcpc->set_pd_rx(port->tcpc, true); 4221 if (ret < 0) { 4222 tcpm_set_state(port, SNK_READY, 0); 4223 break; 4224 } 4225 /* 4226 * If VBUS has never been low, and we time out waiting 4227 * for source cap, try a soft reset first, in case we 4228 * were already in a stable contract before this boot. 4229 * Do this only once. 4230 */ 4231 if (port->vbus_never_low) { 4232 port->vbus_never_low = false; 4233 tcpm_set_state(port, SNK_SOFT_RESET, 4234 PD_T_SINK_WAIT_CAP); 4235 } else { 4236 tcpm_set_state(port, hard_reset_state(port), 4237 PD_T_SINK_WAIT_CAP); 4238 } 4239 break; 4240 case SNK_NEGOTIATE_CAPABILITIES: 4241 port->pd_capable = true; 4242 tcpm_set_partner_usb_comm_capable(port, 4243 !!(port->source_caps[0] & PDO_FIXED_USB_COMM)); 4244 port->hard_reset_count = 0; 4245 ret = tcpm_pd_send_request(port); 4246 if (ret < 0) { 4247 /* Restore back to the original state */ 4248 tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_PD, 4249 port->pps_data.active, 4250 port->supply_voltage); 4251 /* Let the Source send capabilities again. */ 4252 tcpm_set_state(port, SNK_WAIT_CAPABILITIES, 0); 4253 } else { 4254 tcpm_set_state_cond(port, hard_reset_state(port), 4255 PD_T_SENDER_RESPONSE); 4256 } 4257 break; 4258 case SNK_NEGOTIATE_PPS_CAPABILITIES: 4259 ret = tcpm_pd_send_pps_request(port); 4260 if (ret < 0) { 4261 /* Restore back to the original state */ 4262 tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_PD, 4263 port->pps_data.active, 4264 port->supply_voltage); 4265 port->pps_status = ret; 4266 /* 4267 * If this was called due to updates to sink 4268 * capabilities, and pps is no longer valid, we should 4269 * safely fall back to a standard PDO. 4270 */ 4271 if (port->update_sink_caps) 4272 tcpm_set_state(port, SNK_NEGOTIATE_CAPABILITIES, 0); 4273 else 4274 tcpm_set_state(port, SNK_READY, 0); 4275 } else { 4276 tcpm_set_state_cond(port, hard_reset_state(port), 4277 PD_T_SENDER_RESPONSE); 4278 } 4279 break; 4280 case SNK_TRANSITION_SINK: 4281 /* From the USB PD spec: 4282 * "The Sink Shall transition to Sink Standby before a positive or 4283 * negative voltage transition of VBUS. During Sink Standby 4284 * the Sink Shall reduce its power draw to pSnkStdby." 4285 * 4286 * This is not applicable to PPS though as the port can continue 4287 * to draw negotiated power without switching to standby. 4288 */ 4289 if (port->supply_voltage != port->req_supply_voltage && !port->pps_data.active && 4290 port->current_limit * port->supply_voltage / 1000 > PD_P_SNK_STDBY_MW) { 4291 u32 stdby_ma = PD_P_SNK_STDBY_MW * 1000 / port->supply_voltage; 4292 4293 tcpm_log(port, "Setting standby current %u mV @ %u mA", 4294 port->supply_voltage, stdby_ma); 4295 tcpm_set_current_limit(port, stdby_ma, port->supply_voltage); 4296 } 4297 fallthrough; 4298 case SNK_TRANSITION_SINK_VBUS: 4299 tcpm_set_state(port, hard_reset_state(port), 4300 PD_T_PS_TRANSITION); 4301 break; 4302 case SNK_READY: 4303 port->try_snk_count = 0; 4304 port->update_sink_caps = false; 4305 if (port->explicit_contract) { 4306 typec_set_pwr_opmode(port->typec_port, 4307 TYPEC_PWR_MODE_PD); 4308 port->pwr_opmode = TYPEC_PWR_MODE_PD; 4309 } 4310 4311 if (!port->pd_capable && port->slow_charger_loop) 4312 tcpm_set_current_limit(port, tcpm_get_current_limit(port), 5000); 4313 tcpm_swap_complete(port, 0); 4314 tcpm_typec_connect(port); 4315 mod_enable_frs_delayed_work(port, 0); 4316 tcpm_pps_complete(port, port->pps_status); 4317 4318 if (port->ams != NONE_AMS) 4319 tcpm_ams_finish(port); 4320 if (port->next_ams != NONE_AMS) { 4321 port->ams = port->next_ams; 4322 port->next_ams = NONE_AMS; 4323 } 4324 4325 /* 4326 * If previous AMS is interrupted, switch to the upcoming 4327 * state. 4328 */ 4329 if (port->upcoming_state != INVALID_STATE) { 4330 upcoming_state = port->upcoming_state; 4331 port->upcoming_state = INVALID_STATE; 4332 tcpm_set_state(port, upcoming_state, 0); 4333 break; 4334 } 4335 4336 /* 4337 * 6.4.4.3.1 Discover Identity 4338 * "The Discover Identity Command Shall only be sent to SOP when there is an 4339 * Explicit Contract." 4340 * For now, this driver only supports SOP for DISCOVER_IDENTITY, thus using 4341 * port->explicit_contract. 4342 */ 4343 if (port->explicit_contract) 4344 mod_send_discover_delayed_work(port, 0); 4345 else 4346 port->send_discover = false; 4347 4348 power_supply_changed(port->psy); 4349 break; 4350 4351 /* Accessory states */ 4352 case ACC_UNATTACHED: 4353 tcpm_acc_detach(port); 4354 tcpm_set_state(port, SRC_UNATTACHED, 0); 4355 break; 4356 case DEBUG_ACC_ATTACHED: 4357 case AUDIO_ACC_ATTACHED: 4358 ret = tcpm_acc_attach(port); 4359 if (ret < 0) 4360 tcpm_set_state(port, ACC_UNATTACHED, 0); 4361 break; 4362 case AUDIO_ACC_DEBOUNCE: 4363 tcpm_set_state(port, ACC_UNATTACHED, PD_T_CC_DEBOUNCE); 4364 break; 4365 4366 /* Hard_Reset states */ 4367 case HARD_RESET_SEND: 4368 if (port->ams != NONE_AMS) 4369 tcpm_ams_finish(port); 4370 /* 4371 * State machine will be directed to HARD_RESET_START, 4372 * thus set upcoming_state to INVALID_STATE. 4373 */ 4374 port->upcoming_state = INVALID_STATE; 4375 tcpm_ams_start(port, HARD_RESET); 4376 break; 4377 case HARD_RESET_START: 4378 port->sink_cap_done = false; 4379 if (port->tcpc->enable_frs) 4380 port->tcpc->enable_frs(port->tcpc, false); 4381 port->hard_reset_count++; 4382 port->tcpc->set_pd_rx(port->tcpc, false); 4383 tcpm_unregister_altmodes(port); 4384 port->nr_sink_caps = 0; 4385 port->send_discover = true; 4386 if (port->pwr_role == TYPEC_SOURCE) 4387 tcpm_set_state(port, SRC_HARD_RESET_VBUS_OFF, 4388 PD_T_PS_HARD_RESET); 4389 else 4390 tcpm_set_state(port, SNK_HARD_RESET_SINK_OFF, 0); 4391 break; 4392 case SRC_HARD_RESET_VBUS_OFF: 4393 /* 4394 * 7.1.5 Response to Hard Resets 4395 * Hard Reset Signaling indicates a communication failure has occurred and the 4396 * Source Shall stop driving VCONN, Shall remove Rp from the VCONN pin and Shall 4397 * drive VBUS to vSafe0V as shown in Figure 7-9. 4398 */ 4399 tcpm_set_vconn(port, false); 4400 tcpm_set_vbus(port, false); 4401 tcpm_set_roles(port, port->self_powered, TYPEC_SOURCE, 4402 tcpm_data_role_for_source(port)); 4403 /* 4404 * If tcpc fails to notify vbus off, TCPM will wait for PD_T_SAFE_0V + 4405 * PD_T_SRC_RECOVER before turning vbus back on. 4406 * From Table 7-12 Sequence Description for a Source Initiated Hard Reset: 4407 * 4. Policy Engine waits tPSHardReset after sending Hard Reset Signaling and then 4408 * tells the Device Policy Manager to instruct the power supply to perform a 4409 * Hard Reset. The transition to vSafe0V Shall occur within tSafe0V (t2). 4410 * 5. After tSrcRecover the Source applies power to VBUS in an attempt to 4411 * re-establish communication with the Sink and resume USB Default Operation. 4412 * The transition to vSafe5V Shall occur within tSrcTurnOn(t4). 4413 */ 4414 tcpm_set_state(port, SRC_HARD_RESET_VBUS_ON, PD_T_SAFE_0V + PD_T_SRC_RECOVER); 4415 break; 4416 case SRC_HARD_RESET_VBUS_ON: 4417 tcpm_set_vconn(port, true); 4418 tcpm_set_vbus(port, true); 4419 if (port->ams == HARD_RESET) 4420 tcpm_ams_finish(port); 4421 if (port->pd_supported) 4422 port->tcpc->set_pd_rx(port->tcpc, true); 4423 tcpm_set_attached_state(port, true); 4424 tcpm_set_state(port, SRC_UNATTACHED, PD_T_PS_SOURCE_ON); 4425 break; 4426 case SNK_HARD_RESET_SINK_OFF: 4427 /* Do not discharge/disconnect during hard reseet */ 4428 tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_USB, false, 0); 4429 memset(&port->pps_data, 0, sizeof(port->pps_data)); 4430 tcpm_set_vconn(port, false); 4431 if (port->pd_capable) 4432 tcpm_set_charge(port, false); 4433 tcpm_set_roles(port, port->self_powered, TYPEC_SINK, 4434 tcpm_data_role_for_sink(port)); 4435 /* 4436 * VBUS may or may not toggle, depending on the adapter. 4437 * If it doesn't toggle, transition to SNK_HARD_RESET_SINK_ON 4438 * directly after timeout. 4439 */ 4440 tcpm_set_state(port, SNK_HARD_RESET_SINK_ON, PD_T_SAFE_0V); 4441 break; 4442 case SNK_HARD_RESET_WAIT_VBUS: 4443 if (port->ams == HARD_RESET) 4444 tcpm_ams_finish(port); 4445 /* Assume we're disconnected if VBUS doesn't come back. */ 4446 tcpm_set_state(port, SNK_UNATTACHED, 4447 PD_T_SRC_RECOVER_MAX + PD_T_SRC_TURN_ON); 4448 break; 4449 case SNK_HARD_RESET_SINK_ON: 4450 /* Note: There is no guarantee that VBUS is on in this state */ 4451 /* 4452 * XXX: 4453 * The specification suggests that dual mode ports in sink 4454 * mode should transition to state PE_SRC_Transition_to_default. 4455 * See USB power delivery specification chapter 8.3.3.6.1.3. 4456 * This would mean to to 4457 * - turn off VCONN, reset power supply 4458 * - request hardware reset 4459 * - turn on VCONN 4460 * - Transition to state PE_Src_Startup 4461 * SNK only ports shall transition to state Snk_Startup 4462 * (see chapter 8.3.3.3.8). 4463 * Similar, dual-mode ports in source mode should transition 4464 * to PE_SNK_Transition_to_default. 4465 */ 4466 if (port->pd_capable) { 4467 tcpm_set_current_limit(port, 4468 tcpm_get_current_limit(port), 4469 5000); 4470 tcpm_set_charge(port, true); 4471 } 4472 if (port->ams == HARD_RESET) 4473 tcpm_ams_finish(port); 4474 tcpm_set_attached_state(port, true); 4475 tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_USB, false, VSAFE5V); 4476 tcpm_set_state(port, SNK_STARTUP, 0); 4477 break; 4478 4479 /* Soft_Reset states */ 4480 case SOFT_RESET: 4481 port->message_id = 0; 4482 port->rx_msgid = -1; 4483 tcpm_pd_send_control(port, PD_CTRL_ACCEPT); 4484 tcpm_ams_finish(port); 4485 if (port->pwr_role == TYPEC_SOURCE) { 4486 port->upcoming_state = SRC_SEND_CAPABILITIES; 4487 tcpm_ams_start(port, POWER_NEGOTIATION); 4488 } else { 4489 tcpm_set_state(port, SNK_WAIT_CAPABILITIES, 0); 4490 } 4491 break; 4492 case SRC_SOFT_RESET_WAIT_SNK_TX: 4493 case SNK_SOFT_RESET: 4494 if (port->ams != NONE_AMS) 4495 tcpm_ams_finish(port); 4496 port->upcoming_state = SOFT_RESET_SEND; 4497 tcpm_ams_start(port, SOFT_RESET_AMS); 4498 break; 4499 case SOFT_RESET_SEND: 4500 port->message_id = 0; 4501 port->rx_msgid = -1; 4502 if (tcpm_pd_send_control(port, PD_CTRL_SOFT_RESET)) 4503 tcpm_set_state_cond(port, hard_reset_state(port), 0); 4504 else 4505 tcpm_set_state_cond(port, hard_reset_state(port), 4506 PD_T_SENDER_RESPONSE); 4507 break; 4508 4509 /* DR_Swap states */ 4510 case DR_SWAP_SEND: 4511 tcpm_pd_send_control(port, PD_CTRL_DR_SWAP); 4512 if (port->data_role == TYPEC_DEVICE || port->negotiated_rev > PD_REV20) 4513 port->send_discover = true; 4514 tcpm_set_state_cond(port, DR_SWAP_SEND_TIMEOUT, 4515 PD_T_SENDER_RESPONSE); 4516 break; 4517 case DR_SWAP_ACCEPT: 4518 tcpm_pd_send_control(port, PD_CTRL_ACCEPT); 4519 if (port->data_role == TYPEC_DEVICE || port->negotiated_rev > PD_REV20) 4520 port->send_discover = true; 4521 tcpm_set_state_cond(port, DR_SWAP_CHANGE_DR, 0); 4522 break; 4523 case DR_SWAP_SEND_TIMEOUT: 4524 tcpm_swap_complete(port, -ETIMEDOUT); 4525 port->send_discover = false; 4526 tcpm_ams_finish(port); 4527 tcpm_set_state(port, ready_state(port), 0); 4528 break; 4529 case DR_SWAP_CHANGE_DR: 4530 if (port->data_role == TYPEC_HOST) { 4531 tcpm_unregister_altmodes(port); 4532 tcpm_set_roles(port, true, port->pwr_role, 4533 TYPEC_DEVICE); 4534 } else { 4535 tcpm_set_roles(port, true, port->pwr_role, 4536 TYPEC_HOST); 4537 } 4538 tcpm_ams_finish(port); 4539 tcpm_set_state(port, ready_state(port), 0); 4540 break; 4541 4542 case FR_SWAP_SEND: 4543 if (tcpm_pd_send_control(port, PD_CTRL_FR_SWAP)) { 4544 tcpm_set_state(port, ERROR_RECOVERY, 0); 4545 break; 4546 } 4547 tcpm_set_state_cond(port, FR_SWAP_SEND_TIMEOUT, PD_T_SENDER_RESPONSE); 4548 break; 4549 case FR_SWAP_SEND_TIMEOUT: 4550 tcpm_set_state(port, ERROR_RECOVERY, 0); 4551 break; 4552 case FR_SWAP_SNK_SRC_TRANSITION_TO_OFF: 4553 tcpm_set_state(port, ERROR_RECOVERY, PD_T_PS_SOURCE_OFF); 4554 break; 4555 case FR_SWAP_SNK_SRC_NEW_SINK_READY: 4556 if (port->vbus_source) 4557 tcpm_set_state(port, FR_SWAP_SNK_SRC_SOURCE_VBUS_APPLIED, 0); 4558 else 4559 tcpm_set_state(port, ERROR_RECOVERY, PD_T_RECEIVER_RESPONSE); 4560 break; 4561 case FR_SWAP_SNK_SRC_SOURCE_VBUS_APPLIED: 4562 tcpm_set_pwr_role(port, TYPEC_SOURCE); 4563 if (tcpm_pd_send_control(port, PD_CTRL_PS_RDY)) { 4564 tcpm_set_state(port, ERROR_RECOVERY, 0); 4565 break; 4566 } 4567 tcpm_set_cc(port, tcpm_rp_cc(port)); 4568 tcpm_set_state(port, SRC_STARTUP, PD_T_SWAP_SRC_START); 4569 break; 4570 4571 /* PR_Swap states */ 4572 case PR_SWAP_ACCEPT: 4573 tcpm_pd_send_control(port, PD_CTRL_ACCEPT); 4574 tcpm_set_state(port, PR_SWAP_START, 0); 4575 break; 4576 case PR_SWAP_SEND: 4577 tcpm_pd_send_control(port, PD_CTRL_PR_SWAP); 4578 tcpm_set_state_cond(port, PR_SWAP_SEND_TIMEOUT, 4579 PD_T_SENDER_RESPONSE); 4580 break; 4581 case PR_SWAP_SEND_TIMEOUT: 4582 tcpm_swap_complete(port, -ETIMEDOUT); 4583 tcpm_set_state(port, ready_state(port), 0); 4584 break; 4585 case PR_SWAP_START: 4586 tcpm_apply_rc(port); 4587 if (port->pwr_role == TYPEC_SOURCE) 4588 tcpm_set_state(port, PR_SWAP_SRC_SNK_TRANSITION_OFF, 4589 PD_T_SRC_TRANSITION); 4590 else 4591 tcpm_set_state(port, PR_SWAP_SNK_SRC_SINK_OFF, 0); 4592 break; 4593 case PR_SWAP_SRC_SNK_TRANSITION_OFF: 4594 /* 4595 * Prevent vbus discharge circuit from turning on during PR_SWAP 4596 * as this is not a disconnect. 4597 */ 4598 tcpm_set_vbus(port, false); 4599 port->explicit_contract = false; 4600 /* allow time for Vbus discharge, must be < tSrcSwapStdby */ 4601 tcpm_set_state(port, PR_SWAP_SRC_SNK_SOURCE_OFF, 4602 PD_T_SRCSWAPSTDBY); 4603 break; 4604 case PR_SWAP_SRC_SNK_SOURCE_OFF: 4605 tcpm_set_cc(port, TYPEC_CC_RD); 4606 /* allow CC debounce */ 4607 tcpm_set_state(port, PR_SWAP_SRC_SNK_SOURCE_OFF_CC_DEBOUNCED, 4608 PD_T_CC_DEBOUNCE); 4609 break; 4610 case PR_SWAP_SRC_SNK_SOURCE_OFF_CC_DEBOUNCED: 4611 /* 4612 * USB-PD standard, 6.2.1.4, Port Power Role: 4613 * "During the Power Role Swap Sequence, for the initial Source 4614 * Port, the Port Power Role field shall be set to Sink in the 4615 * PS_RDY Message indicating that the initial Source’s power 4616 * supply is turned off" 4617 */ 4618 tcpm_set_pwr_role(port, TYPEC_SINK); 4619 if (tcpm_pd_send_control(port, PD_CTRL_PS_RDY)) { 4620 tcpm_set_state(port, ERROR_RECOVERY, 0); 4621 break; 4622 } 4623 tcpm_set_state(port, ERROR_RECOVERY, PD_T_PS_SOURCE_ON_PRS); 4624 break; 4625 case PR_SWAP_SRC_SNK_SINK_ON: 4626 tcpm_enable_auto_vbus_discharge(port, true); 4627 /* Set the vbus disconnect threshold for implicit contract */ 4628 tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_USB, false, VSAFE5V); 4629 tcpm_set_state(port, SNK_STARTUP, 0); 4630 break; 4631 case PR_SWAP_SNK_SRC_SINK_OFF: 4632 /* 4633 * Prevent vbus discharge circuit from turning on during PR_SWAP 4634 * as this is not a disconnect. 4635 */ 4636 tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_USB, 4637 port->pps_data.active, 0); 4638 tcpm_set_charge(port, false); 4639 tcpm_set_state(port, hard_reset_state(port), 4640 PD_T_PS_SOURCE_OFF); 4641 break; 4642 case PR_SWAP_SNK_SRC_SOURCE_ON: 4643 tcpm_enable_auto_vbus_discharge(port, true); 4644 tcpm_set_cc(port, tcpm_rp_cc(port)); 4645 tcpm_set_vbus(port, true); 4646 /* 4647 * allow time VBUS ramp-up, must be < tNewSrc 4648 * Also, this window overlaps with CC debounce as well. 4649 * So, Wait for the max of two which is PD_T_NEWSRC 4650 */ 4651 tcpm_set_state(port, PR_SWAP_SNK_SRC_SOURCE_ON_VBUS_RAMPED_UP, 4652 PD_T_NEWSRC); 4653 break; 4654 case PR_SWAP_SNK_SRC_SOURCE_ON_VBUS_RAMPED_UP: 4655 /* 4656 * USB PD standard, 6.2.1.4: 4657 * "Subsequent Messages initiated by the Policy Engine, 4658 * such as the PS_RDY Message sent to indicate that Vbus 4659 * is ready, will have the Port Power Role field set to 4660 * Source." 4661 */ 4662 tcpm_set_pwr_role(port, TYPEC_SOURCE); 4663 tcpm_pd_send_control(port, PD_CTRL_PS_RDY); 4664 tcpm_set_state(port, SRC_STARTUP, PD_T_SWAP_SRC_START); 4665 break; 4666 4667 case VCONN_SWAP_ACCEPT: 4668 tcpm_pd_send_control(port, PD_CTRL_ACCEPT); 4669 tcpm_ams_finish(port); 4670 tcpm_set_state(port, VCONN_SWAP_START, 0); 4671 break; 4672 case VCONN_SWAP_SEND: 4673 tcpm_pd_send_control(port, PD_CTRL_VCONN_SWAP); 4674 tcpm_set_state(port, VCONN_SWAP_SEND_TIMEOUT, 4675 PD_T_SENDER_RESPONSE); 4676 break; 4677 case VCONN_SWAP_SEND_TIMEOUT: 4678 tcpm_swap_complete(port, -ETIMEDOUT); 4679 tcpm_set_state(port, ready_state(port), 0); 4680 break; 4681 case VCONN_SWAP_START: 4682 if (port->vconn_role == TYPEC_SOURCE) 4683 tcpm_set_state(port, VCONN_SWAP_WAIT_FOR_VCONN, 0); 4684 else 4685 tcpm_set_state(port, VCONN_SWAP_TURN_ON_VCONN, 0); 4686 break; 4687 case VCONN_SWAP_WAIT_FOR_VCONN: 4688 tcpm_set_state(port, hard_reset_state(port), 4689 PD_T_VCONN_SOURCE_ON); 4690 break; 4691 case VCONN_SWAP_TURN_ON_VCONN: 4692 tcpm_set_vconn(port, true); 4693 tcpm_pd_send_control(port, PD_CTRL_PS_RDY); 4694 tcpm_set_state(port, ready_state(port), 0); 4695 break; 4696 case VCONN_SWAP_TURN_OFF_VCONN: 4697 tcpm_set_vconn(port, false); 4698 tcpm_set_state(port, ready_state(port), 0); 4699 break; 4700 4701 case DR_SWAP_CANCEL: 4702 case PR_SWAP_CANCEL: 4703 case VCONN_SWAP_CANCEL: 4704 tcpm_swap_complete(port, port->swap_status); 4705 if (port->pwr_role == TYPEC_SOURCE) 4706 tcpm_set_state(port, SRC_READY, 0); 4707 else 4708 tcpm_set_state(port, SNK_READY, 0); 4709 break; 4710 case FR_SWAP_CANCEL: 4711 if (port->pwr_role == TYPEC_SOURCE) 4712 tcpm_set_state(port, SRC_READY, 0); 4713 else 4714 tcpm_set_state(port, SNK_READY, 0); 4715 break; 4716 4717 case BIST_RX: 4718 switch (BDO_MODE_MASK(port->bist_request)) { 4719 case BDO_MODE_CARRIER2: 4720 tcpm_pd_transmit(port, TCPC_TX_BIST_MODE_2, NULL); 4721 tcpm_set_state(port, unattached_state(port), 4722 PD_T_BIST_CONT_MODE); 4723 break; 4724 case BDO_MODE_TESTDATA: 4725 if (port->tcpc->set_bist_data) { 4726 tcpm_log(port, "Enable BIST MODE TESTDATA"); 4727 port->tcpc->set_bist_data(port->tcpc, true); 4728 } 4729 break; 4730 default: 4731 break; 4732 } 4733 break; 4734 case GET_STATUS_SEND: 4735 tcpm_pd_send_control(port, PD_CTRL_GET_STATUS); 4736 tcpm_set_state(port, GET_STATUS_SEND_TIMEOUT, 4737 PD_T_SENDER_RESPONSE); 4738 break; 4739 case GET_STATUS_SEND_TIMEOUT: 4740 tcpm_set_state(port, ready_state(port), 0); 4741 break; 4742 case GET_PPS_STATUS_SEND: 4743 tcpm_pd_send_control(port, PD_CTRL_GET_PPS_STATUS); 4744 tcpm_set_state(port, GET_PPS_STATUS_SEND_TIMEOUT, 4745 PD_T_SENDER_RESPONSE); 4746 break; 4747 case GET_PPS_STATUS_SEND_TIMEOUT: 4748 tcpm_set_state(port, ready_state(port), 0); 4749 break; 4750 case GET_SINK_CAP: 4751 tcpm_pd_send_control(port, PD_CTRL_GET_SINK_CAP); 4752 tcpm_set_state(port, GET_SINK_CAP_TIMEOUT, PD_T_SENDER_RESPONSE); 4753 break; 4754 case GET_SINK_CAP_TIMEOUT: 4755 port->sink_cap_done = true; 4756 tcpm_set_state(port, ready_state(port), 0); 4757 break; 4758 case ERROR_RECOVERY: 4759 tcpm_swap_complete(port, -EPROTO); 4760 tcpm_pps_complete(port, -EPROTO); 4761 tcpm_set_state(port, PORT_RESET, 0); 4762 break; 4763 case PORT_RESET: 4764 tcpm_reset_port(port); 4765 tcpm_set_cc(port, TYPEC_CC_OPEN); 4766 tcpm_set_state(port, PORT_RESET_WAIT_OFF, 4767 PD_T_ERROR_RECOVERY); 4768 break; 4769 case PORT_RESET_WAIT_OFF: 4770 tcpm_set_state(port, 4771 tcpm_default_state(port), 4772 port->vbus_present ? PD_T_PS_SOURCE_OFF : 0); 4773 break; 4774 4775 /* AMS intermediate state */ 4776 case AMS_START: 4777 if (port->upcoming_state == INVALID_STATE) { 4778 tcpm_set_state(port, port->pwr_role == TYPEC_SOURCE ? 4779 SRC_READY : SNK_READY, 0); 4780 break; 4781 } 4782 4783 upcoming_state = port->upcoming_state; 4784 port->upcoming_state = INVALID_STATE; 4785 tcpm_set_state(port, upcoming_state, 0); 4786 break; 4787 4788 /* Chunk state */ 4789 case CHUNK_NOT_SUPP: 4790 tcpm_pd_send_control(port, PD_CTRL_NOT_SUPP); 4791 tcpm_set_state(port, port->pwr_role == TYPEC_SOURCE ? SRC_READY : SNK_READY, 0); 4792 break; 4793 default: 4794 WARN(1, "Unexpected port state %d\n", port->state); 4795 break; 4796 } 4797} 4798 4799static void tcpm_state_machine_work(struct kthread_work *work) 4800{ 4801 struct tcpm_port *port = container_of(work, struct tcpm_port, state_machine); 4802 enum tcpm_state prev_state; 4803 4804 mutex_lock(&port->lock); 4805 port->state_machine_running = true; 4806 4807 if (port->queued_message && tcpm_send_queued_message(port)) 4808 goto done; 4809 4810 /* If we were queued due to a delayed state change, update it now */ 4811 if (port->delayed_state) { 4812 tcpm_log(port, "state change %s -> %s [delayed %ld ms]", 4813 tcpm_states[port->state], 4814 tcpm_states[port->delayed_state], port->delay_ms); 4815 port->prev_state = port->state; 4816 port->state = port->delayed_state; 4817 port->delayed_state = INVALID_STATE; 4818 } 4819 4820 /* 4821 * Continue running as long as we have (non-delayed) state changes 4822 * to make. 4823 */ 4824 do { 4825 prev_state = port->state; 4826 run_state_machine(port); 4827 if (port->queued_message) 4828 tcpm_send_queued_message(port); 4829 } while (port->state != prev_state && !port->delayed_state); 4830 4831done: 4832 port->state_machine_running = false; 4833 mutex_unlock(&port->lock); 4834} 4835 4836static void _tcpm_cc_change(struct tcpm_port *port, enum typec_cc_status cc1, 4837 enum typec_cc_status cc2) 4838{ 4839 enum typec_cc_status old_cc1, old_cc2; 4840 enum tcpm_state new_state; 4841 4842 old_cc1 = port->cc1; 4843 old_cc2 = port->cc2; 4844 port->cc1 = cc1; 4845 port->cc2 = cc2; 4846 4847 tcpm_log_force(port, 4848 "CC1: %u -> %u, CC2: %u -> %u [state %s, polarity %d, %s]", 4849 old_cc1, cc1, old_cc2, cc2, tcpm_states[port->state], 4850 port->polarity, 4851 tcpm_port_is_disconnected(port) ? "disconnected" 4852 : "connected"); 4853 4854 switch (port->state) { 4855 case TOGGLING: 4856 if (tcpm_port_is_debug(port) || tcpm_port_is_audio(port) || 4857 tcpm_port_is_source(port)) 4858 tcpm_set_state(port, SRC_ATTACH_WAIT, 0); 4859 else if (tcpm_port_is_sink(port)) 4860 tcpm_set_state(port, SNK_ATTACH_WAIT, 0); 4861 break; 4862 case SRC_UNATTACHED: 4863 case ACC_UNATTACHED: 4864 if (tcpm_port_is_debug(port) || tcpm_port_is_audio(port) || 4865 tcpm_port_is_source(port)) 4866 tcpm_set_state(port, SRC_ATTACH_WAIT, 0); 4867 break; 4868 case SRC_ATTACH_WAIT: 4869 if (tcpm_port_is_disconnected(port) || 4870 tcpm_port_is_audio_detached(port)) 4871 tcpm_set_state(port, SRC_UNATTACHED, 0); 4872 else if (cc1 != old_cc1 || cc2 != old_cc2) 4873 tcpm_set_state(port, SRC_ATTACH_WAIT, 0); 4874 break; 4875 case SRC_ATTACHED: 4876 case SRC_STARTUP: 4877 case SRC_SEND_CAPABILITIES: 4878 case SRC_READY: 4879 if (tcpm_port_is_disconnected(port) || 4880 !tcpm_port_is_source(port)) { 4881 if (port->port_type == TYPEC_PORT_SRC) 4882 tcpm_set_state(port, SRC_UNATTACHED, tcpm_wait_for_discharge(port)); 4883 else 4884 tcpm_set_state(port, SNK_UNATTACHED, tcpm_wait_for_discharge(port)); 4885 } 4886 break; 4887 case SNK_UNATTACHED: 4888 if (tcpm_port_is_sink(port)) 4889 tcpm_set_state(port, SNK_ATTACH_WAIT, 0); 4890 break; 4891 case SNK_ATTACH_WAIT: 4892 if ((port->cc1 == TYPEC_CC_OPEN && 4893 port->cc2 != TYPEC_CC_OPEN) || 4894 (port->cc1 != TYPEC_CC_OPEN && 4895 port->cc2 == TYPEC_CC_OPEN)) 4896 new_state = SNK_DEBOUNCED; 4897 else if (tcpm_port_is_disconnected(port)) 4898 new_state = SNK_UNATTACHED; 4899 else 4900 break; 4901 if (new_state != port->delayed_state) 4902 tcpm_set_state(port, SNK_ATTACH_WAIT, 0); 4903 break; 4904 case SNK_DEBOUNCED: 4905 if (tcpm_port_is_disconnected(port)) 4906 new_state = SNK_UNATTACHED; 4907 else if (port->vbus_present) 4908 new_state = tcpm_try_src(port) ? SRC_TRY : SNK_ATTACHED; 4909 else 4910 new_state = SNK_UNATTACHED; 4911 if (new_state != port->delayed_state) 4912 tcpm_set_state(port, SNK_DEBOUNCED, 0); 4913 break; 4914 case SNK_READY: 4915 /* 4916 * EXIT condition is based primarily on vbus disconnect and CC is secondary. 4917 * "A port that has entered into USB PD communications with the Source and 4918 * has seen the CC voltage exceed vRd-USB may monitor the CC pin to detect 4919 * cable disconnect in addition to monitoring VBUS. 4920 * 4921 * A port that is monitoring the CC voltage for disconnect (but is not in 4922 * the process of a USB PD PR_Swap or USB PD FR_Swap) shall transition to 4923 * Unattached.SNK within tSinkDisconnect after the CC voltage remains below 4924 * vRd-USB for tPDDebounce." 4925 * 4926 * When set_auto_vbus_discharge_threshold is enabled, CC pins go 4927 * away before vbus decays to disconnect threshold. Allow 4928 * disconnect to be driven by vbus disconnect when auto vbus 4929 * discharge is enabled. 4930 */ 4931 if (!port->auto_vbus_discharge_enabled && tcpm_port_is_disconnected(port)) 4932 tcpm_set_state(port, unattached_state(port), 0); 4933 else if (!port->pd_capable && 4934 (cc1 != old_cc1 || cc2 != old_cc2)) 4935 tcpm_set_current_limit(port, 4936 tcpm_get_current_limit(port), 4937 5000); 4938 break; 4939 4940 case AUDIO_ACC_ATTACHED: 4941 if (cc1 == TYPEC_CC_OPEN || cc2 == TYPEC_CC_OPEN) 4942 tcpm_set_state(port, AUDIO_ACC_DEBOUNCE, 0); 4943 break; 4944 case AUDIO_ACC_DEBOUNCE: 4945 if (tcpm_port_is_audio(port)) 4946 tcpm_set_state(port, AUDIO_ACC_ATTACHED, 0); 4947 break; 4948 4949 case DEBUG_ACC_ATTACHED: 4950 if (cc1 == TYPEC_CC_OPEN || cc2 == TYPEC_CC_OPEN) 4951 tcpm_set_state(port, ACC_UNATTACHED, 0); 4952 break; 4953 4954 case SNK_TRY: 4955 /* Do nothing, waiting for timeout */ 4956 break; 4957 4958 case SNK_DISCOVERY: 4959 /* CC line is unstable, wait for debounce */ 4960 if (tcpm_port_is_disconnected(port)) 4961 tcpm_set_state(port, SNK_DISCOVERY_DEBOUNCE, 0); 4962 break; 4963 case SNK_DISCOVERY_DEBOUNCE: 4964 break; 4965 4966 case SRC_TRYWAIT: 4967 /* Hand over to state machine if needed */ 4968 if (!port->vbus_present && tcpm_port_is_source(port)) 4969 tcpm_set_state(port, SRC_TRYWAIT_DEBOUNCE, 0); 4970 break; 4971 case SRC_TRYWAIT_DEBOUNCE: 4972 if (port->vbus_present || !tcpm_port_is_source(port)) 4973 tcpm_set_state(port, SRC_TRYWAIT, 0); 4974 break; 4975 case SNK_TRY_WAIT_DEBOUNCE: 4976 if (!tcpm_port_is_sink(port)) { 4977 port->max_wait = 0; 4978 tcpm_set_state(port, SRC_TRYWAIT, 0); 4979 } 4980 break; 4981 case SRC_TRY_WAIT: 4982 if (tcpm_port_is_source(port)) 4983 tcpm_set_state(port, SRC_TRY_DEBOUNCE, 0); 4984 break; 4985 case SRC_TRY_DEBOUNCE: 4986 tcpm_set_state(port, SRC_TRY_WAIT, 0); 4987 break; 4988 case SNK_TRYWAIT_DEBOUNCE: 4989 if (tcpm_port_is_sink(port)) 4990 tcpm_set_state(port, SNK_TRYWAIT_VBUS, 0); 4991 break; 4992 case SNK_TRYWAIT_VBUS: 4993 if (!tcpm_port_is_sink(port)) 4994 tcpm_set_state(port, SNK_TRYWAIT_DEBOUNCE, 0); 4995 break; 4996 case SNK_TRY_WAIT_DEBOUNCE_CHECK_VBUS: 4997 if (!tcpm_port_is_sink(port)) 4998 tcpm_set_state(port, SRC_TRYWAIT, PD_T_TRY_CC_DEBOUNCE); 4999 else 5000 tcpm_set_state(port, SNK_TRY_WAIT_DEBOUNCE_CHECK_VBUS, 0); 5001 break; 5002 case SNK_TRYWAIT: 5003 /* Do nothing, waiting for tCCDebounce */ 5004 break; 5005 case PR_SWAP_SNK_SRC_SINK_OFF: 5006 case PR_SWAP_SRC_SNK_TRANSITION_OFF: 5007 case PR_SWAP_SRC_SNK_SOURCE_OFF: 5008 case PR_SWAP_SRC_SNK_SOURCE_OFF_CC_DEBOUNCED: 5009 case PR_SWAP_SNK_SRC_SOURCE_ON: 5010 /* 5011 * CC state change is expected in PR_SWAP 5012 * Ignore it. 5013 */ 5014 break; 5015 case FR_SWAP_SEND: 5016 case FR_SWAP_SEND_TIMEOUT: 5017 case FR_SWAP_SNK_SRC_TRANSITION_TO_OFF: 5018 case FR_SWAP_SNK_SRC_NEW_SINK_READY: 5019 case FR_SWAP_SNK_SRC_SOURCE_VBUS_APPLIED: 5020 /* Do nothing, CC change expected */ 5021 break; 5022 5023 case PORT_RESET: 5024 case PORT_RESET_WAIT_OFF: 5025 /* 5026 * State set back to default mode once the timer completes. 5027 * Ignore CC changes here. 5028 */ 5029 break; 5030 default: 5031 /* 5032 * While acting as sink and auto vbus discharge is enabled, Allow disconnect 5033 * to be driven by vbus disconnect. 5034 */ 5035 if (tcpm_port_is_disconnected(port) && !(port->pwr_role == TYPEC_SINK && 5036 port->auto_vbus_discharge_enabled)) 5037 tcpm_set_state(port, unattached_state(port), 0); 5038 break; 5039 } 5040} 5041 5042static void _tcpm_pd_vbus_on(struct tcpm_port *port) 5043{ 5044 tcpm_log_force(port, "VBUS on"); 5045 port->vbus_present = true; 5046 /* 5047 * When vbus_present is true i.e. Voltage at VBUS is greater than VSAFE5V implicitly 5048 * states that vbus is not at VSAFE0V, hence clear the vbus_vsafe0v flag here. 5049 */ 5050 port->vbus_vsafe0v = false; 5051 5052 switch (port->state) { 5053 case SNK_TRANSITION_SINK_VBUS: 5054 port->explicit_contract = true; 5055 tcpm_set_state(port, SNK_READY, 0); 5056 break; 5057 case SNK_DISCOVERY: 5058 tcpm_set_state(port, SNK_DISCOVERY, 0); 5059 break; 5060 5061 case SNK_DEBOUNCED: 5062 tcpm_set_state(port, tcpm_try_src(port) ? SRC_TRY 5063 : SNK_ATTACHED, 5064 0); 5065 break; 5066 case SNK_HARD_RESET_WAIT_VBUS: 5067 tcpm_set_state(port, SNK_HARD_RESET_SINK_ON, 0); 5068 break; 5069 case SRC_ATTACHED: 5070 tcpm_set_state(port, SRC_STARTUP, 0); 5071 break; 5072 case SRC_HARD_RESET_VBUS_ON: 5073 tcpm_set_state(port, SRC_STARTUP, 0); 5074 break; 5075 5076 case SNK_TRY: 5077 /* Do nothing, waiting for timeout */ 5078 break; 5079 case SRC_TRYWAIT: 5080 /* Do nothing, Waiting for Rd to be detected */ 5081 break; 5082 case SRC_TRYWAIT_DEBOUNCE: 5083 tcpm_set_state(port, SRC_TRYWAIT, 0); 5084 break; 5085 case SNK_TRY_WAIT_DEBOUNCE: 5086 /* Do nothing, waiting for PD_DEBOUNCE to do be done */ 5087 break; 5088 case SNK_TRYWAIT: 5089 /* Do nothing, waiting for tCCDebounce */ 5090 break; 5091 case SNK_TRYWAIT_VBUS: 5092 if (tcpm_port_is_sink(port)) 5093 tcpm_set_state(port, SNK_ATTACHED, 0); 5094 break; 5095 case SNK_TRYWAIT_DEBOUNCE: 5096 /* Do nothing, waiting for Rp */ 5097 break; 5098 case SNK_TRY_WAIT_DEBOUNCE_CHECK_VBUS: 5099 if (port->vbus_present && tcpm_port_is_sink(port)) 5100 tcpm_set_state(port, SNK_ATTACHED, 0); 5101 break; 5102 case SRC_TRY_WAIT: 5103 case SRC_TRY_DEBOUNCE: 5104 /* Do nothing, waiting for sink detection */ 5105 break; 5106 case FR_SWAP_SEND: 5107 case FR_SWAP_SEND_TIMEOUT: 5108 case FR_SWAP_SNK_SRC_TRANSITION_TO_OFF: 5109 case FR_SWAP_SNK_SRC_SOURCE_VBUS_APPLIED: 5110 if (port->tcpc->frs_sourcing_vbus) 5111 port->tcpc->frs_sourcing_vbus(port->tcpc); 5112 break; 5113 case FR_SWAP_SNK_SRC_NEW_SINK_READY: 5114 if (port->tcpc->frs_sourcing_vbus) 5115 port->tcpc->frs_sourcing_vbus(port->tcpc); 5116 tcpm_set_state(port, FR_SWAP_SNK_SRC_SOURCE_VBUS_APPLIED, 0); 5117 break; 5118 5119 case PORT_RESET: 5120 case PORT_RESET_WAIT_OFF: 5121 /* 5122 * State set back to default mode once the timer completes. 5123 * Ignore vbus changes here. 5124 */ 5125 break; 5126 5127 default: 5128 break; 5129 } 5130} 5131 5132static void _tcpm_pd_vbus_off(struct tcpm_port *port) 5133{ 5134 tcpm_log_force(port, "VBUS off"); 5135 port->vbus_present = false; 5136 port->vbus_never_low = false; 5137 switch (port->state) { 5138 case SNK_HARD_RESET_SINK_OFF: 5139 tcpm_set_state(port, SNK_HARD_RESET_WAIT_VBUS, 0); 5140 break; 5141 case HARD_RESET_SEND: 5142 break; 5143 case SNK_TRY: 5144 /* Do nothing, waiting for timeout */ 5145 break; 5146 case SRC_TRYWAIT: 5147 /* Hand over to state machine if needed */ 5148 if (tcpm_port_is_source(port)) 5149 tcpm_set_state(port, SRC_TRYWAIT_DEBOUNCE, 0); 5150 break; 5151 case SNK_TRY_WAIT_DEBOUNCE: 5152 /* Do nothing, waiting for PD_DEBOUNCE to do be done */ 5153 break; 5154 case SNK_TRYWAIT: 5155 case SNK_TRYWAIT_VBUS: 5156 case SNK_TRYWAIT_DEBOUNCE: 5157 break; 5158 case SNK_ATTACH_WAIT: 5159 case SNK_DEBOUNCED: 5160 /* Do nothing, as TCPM is still waiting for vbus to reaach VSAFE5V to connect */ 5161 break; 5162 5163 case SNK_NEGOTIATE_CAPABILITIES: 5164 break; 5165 5166 case PR_SWAP_SRC_SNK_TRANSITION_OFF: 5167 tcpm_set_state(port, PR_SWAP_SRC_SNK_SOURCE_OFF, 0); 5168 break; 5169 5170 case PR_SWAP_SNK_SRC_SINK_OFF: 5171 /* Do nothing, expected */ 5172 break; 5173 5174 case PR_SWAP_SNK_SRC_SOURCE_ON: 5175 /* 5176 * Do nothing when vbus off notification is received. 5177 * TCPM can wait for PD_T_NEWSRC in PR_SWAP_SNK_SRC_SOURCE_ON 5178 * for the vbus source to ramp up. 5179 */ 5180 break; 5181 5182 case PORT_RESET_WAIT_OFF: 5183 tcpm_set_state(port, tcpm_default_state(port), 0); 5184 break; 5185 5186 case SRC_TRY_WAIT: 5187 case SRC_TRY_DEBOUNCE: 5188 /* Do nothing, waiting for sink detection */ 5189 break; 5190 5191 case SRC_STARTUP: 5192 case SRC_SEND_CAPABILITIES: 5193 case SRC_SEND_CAPABILITIES_TIMEOUT: 5194 case SRC_NEGOTIATE_CAPABILITIES: 5195 case SRC_TRANSITION_SUPPLY: 5196 case SRC_READY: 5197 case SRC_WAIT_NEW_CAPABILITIES: 5198 /* 5199 * Force to unattached state to re-initiate connection. 5200 * DRP port should move to Unattached.SNK instead of Unattached.SRC if 5201 * sink removed. Although sink removal here is due to source's vbus collapse, 5202 * treat it the same way for consistency. 5203 */ 5204 if (port->port_type == TYPEC_PORT_SRC) 5205 tcpm_set_state(port, SRC_UNATTACHED, tcpm_wait_for_discharge(port)); 5206 else 5207 tcpm_set_state(port, SNK_UNATTACHED, tcpm_wait_for_discharge(port)); 5208 break; 5209 5210 case PORT_RESET: 5211 /* 5212 * State set back to default mode once the timer completes. 5213 * Ignore vbus changes here. 5214 */ 5215 break; 5216 5217 case FR_SWAP_SEND: 5218 case FR_SWAP_SEND_TIMEOUT: 5219 case FR_SWAP_SNK_SRC_TRANSITION_TO_OFF: 5220 case FR_SWAP_SNK_SRC_NEW_SINK_READY: 5221 case FR_SWAP_SNK_SRC_SOURCE_VBUS_APPLIED: 5222 /* Do nothing, vbus drop expected */ 5223 break; 5224 5225 default: 5226 if (port->pwr_role == TYPEC_SINK && port->attached) 5227 tcpm_set_state(port, SNK_UNATTACHED, tcpm_wait_for_discharge(port)); 5228 break; 5229 } 5230} 5231 5232static void _tcpm_pd_vbus_vsafe0v(struct tcpm_port *port) 5233{ 5234 tcpm_log_force(port, "VBUS VSAFE0V"); 5235 port->vbus_vsafe0v = true; 5236 switch (port->state) { 5237 case SRC_HARD_RESET_VBUS_OFF: 5238 /* 5239 * After establishing the vSafe0V voltage condition on VBUS, the Source Shall wait 5240 * tSrcRecover before re-applying VCONN and restoring VBUS to vSafe5V. 5241 */ 5242 tcpm_set_state(port, SRC_HARD_RESET_VBUS_ON, PD_T_SRC_RECOVER); 5243 break; 5244 case SRC_ATTACH_WAIT: 5245 if (tcpm_port_is_source(port)) 5246 tcpm_set_state(port, tcpm_try_snk(port) ? SNK_TRY : SRC_ATTACHED, 5247 PD_T_CC_DEBOUNCE); 5248 break; 5249 case SRC_STARTUP: 5250 case SRC_SEND_CAPABILITIES: 5251 case SRC_SEND_CAPABILITIES_TIMEOUT: 5252 case SRC_NEGOTIATE_CAPABILITIES: 5253 case SRC_TRANSITION_SUPPLY: 5254 case SRC_READY: 5255 case SRC_WAIT_NEW_CAPABILITIES: 5256 if (port->auto_vbus_discharge_enabled) { 5257 if (port->port_type == TYPEC_PORT_SRC) 5258 tcpm_set_state(port, SRC_UNATTACHED, 0); 5259 else 5260 tcpm_set_state(port, SNK_UNATTACHED, 0); 5261 } 5262 break; 5263 case PR_SWAP_SNK_SRC_SINK_OFF: 5264 case PR_SWAP_SNK_SRC_SOURCE_ON: 5265 /* Do nothing, vsafe0v is expected during transition */ 5266 break; 5267 case SNK_ATTACH_WAIT: 5268 case SNK_DEBOUNCED: 5269 /*Do nothing, still waiting for VSAFE5V for connect */ 5270 break; 5271 default: 5272 if (port->pwr_role == TYPEC_SINK && port->auto_vbus_discharge_enabled) 5273 tcpm_set_state(port, SNK_UNATTACHED, 0); 5274 break; 5275 } 5276} 5277 5278static void _tcpm_pd_hard_reset(struct tcpm_port *port) 5279{ 5280 tcpm_log_force(port, "Received hard reset"); 5281 if (port->bist_request == BDO_MODE_TESTDATA && port->tcpc->set_bist_data) 5282 port->tcpc->set_bist_data(port->tcpc, false); 5283 5284 if (port->ams != NONE_AMS) 5285 port->ams = NONE_AMS; 5286 if (port->hard_reset_count < PD_N_HARD_RESET_COUNT) 5287 port->ams = HARD_RESET; 5288 /* 5289 * If we keep receiving hard reset requests, executing the hard reset 5290 * must have failed. Revert to error recovery if that happens. 5291 */ 5292 tcpm_set_state(port, 5293 port->hard_reset_count < PD_N_HARD_RESET_COUNT ? 5294 HARD_RESET_START : ERROR_RECOVERY, 5295 0); 5296} 5297 5298static void tcpm_pd_event_handler(struct kthread_work *work) 5299{ 5300 struct tcpm_port *port = container_of(work, struct tcpm_port, 5301 event_work); 5302 u32 events; 5303 5304 mutex_lock(&port->lock); 5305 5306 spin_lock(&port->pd_event_lock); 5307 while (port->pd_events) { 5308 events = port->pd_events; 5309 port->pd_events = 0; 5310 spin_unlock(&port->pd_event_lock); 5311 if (events & TCPM_RESET_EVENT) 5312 _tcpm_pd_hard_reset(port); 5313 if (events & TCPM_VBUS_EVENT) { 5314 bool vbus; 5315 5316 vbus = port->tcpc->get_vbus(port->tcpc); 5317 if (vbus) { 5318 _tcpm_pd_vbus_on(port); 5319 } else { 5320 _tcpm_pd_vbus_off(port); 5321 /* 5322 * When TCPC does not support detecting vsafe0v voltage level, 5323 * treat vbus absent as vsafe0v. Else invoke is_vbus_vsafe0v 5324 * to see if vbus has discharge to VSAFE0V. 5325 */ 5326 if (!port->tcpc->is_vbus_vsafe0v || 5327 port->tcpc->is_vbus_vsafe0v(port->tcpc)) 5328 _tcpm_pd_vbus_vsafe0v(port); 5329 } 5330 } 5331 if (events & TCPM_CC_EVENT) { 5332 enum typec_cc_status cc1, cc2; 5333 5334 if (port->tcpc->get_cc(port->tcpc, &cc1, &cc2) == 0) 5335 _tcpm_cc_change(port, cc1, cc2); 5336 } 5337 if (events & TCPM_FRS_EVENT) { 5338 if (port->state == SNK_READY) { 5339 int ret; 5340 5341 port->upcoming_state = FR_SWAP_SEND; 5342 ret = tcpm_ams_start(port, FAST_ROLE_SWAP); 5343 if (ret == -EAGAIN) 5344 port->upcoming_state = INVALID_STATE; 5345 } else { 5346 tcpm_log(port, "Discarding FRS_SIGNAL! Not in sink ready"); 5347 } 5348 } 5349 if (events & TCPM_SOURCING_VBUS) { 5350 tcpm_log(port, "sourcing vbus"); 5351 /* 5352 * In fast role swap case TCPC autonomously sources vbus. Set vbus_source 5353 * true as TCPM wouldn't have called tcpm_set_vbus. 5354 * 5355 * When vbus is sourced on the command on TCPM i.e. TCPM called 5356 * tcpm_set_vbus to source vbus, vbus_source would already be true. 5357 */ 5358 port->vbus_source = true; 5359 _tcpm_pd_vbus_on(port); 5360 } 5361 5362 spin_lock(&port->pd_event_lock); 5363 } 5364 spin_unlock(&port->pd_event_lock); 5365 mutex_unlock(&port->lock); 5366} 5367 5368void tcpm_cc_change(struct tcpm_port *port) 5369{ 5370 spin_lock(&port->pd_event_lock); 5371 port->pd_events |= TCPM_CC_EVENT; 5372 spin_unlock(&port->pd_event_lock); 5373 kthread_queue_work(port->wq, &port->event_work); 5374} 5375EXPORT_SYMBOL_GPL(tcpm_cc_change); 5376 5377void tcpm_vbus_change(struct tcpm_port *port) 5378{ 5379 spin_lock(&port->pd_event_lock); 5380 port->pd_events |= TCPM_VBUS_EVENT; 5381 spin_unlock(&port->pd_event_lock); 5382 kthread_queue_work(port->wq, &port->event_work); 5383} 5384EXPORT_SYMBOL_GPL(tcpm_vbus_change); 5385 5386void tcpm_pd_hard_reset(struct tcpm_port *port) 5387{ 5388 spin_lock(&port->pd_event_lock); 5389 port->pd_events = TCPM_RESET_EVENT; 5390 spin_unlock(&port->pd_event_lock); 5391 kthread_queue_work(port->wq, &port->event_work); 5392} 5393EXPORT_SYMBOL_GPL(tcpm_pd_hard_reset); 5394 5395void tcpm_sink_frs(struct tcpm_port *port) 5396{ 5397 spin_lock(&port->pd_event_lock); 5398 port->pd_events |= TCPM_FRS_EVENT; 5399 spin_unlock(&port->pd_event_lock); 5400 kthread_queue_work(port->wq, &port->event_work); 5401} 5402EXPORT_SYMBOL_GPL(tcpm_sink_frs); 5403 5404void tcpm_sourcing_vbus(struct tcpm_port *port) 5405{ 5406 spin_lock(&port->pd_event_lock); 5407 port->pd_events |= TCPM_SOURCING_VBUS; 5408 spin_unlock(&port->pd_event_lock); 5409 kthread_queue_work(port->wq, &port->event_work); 5410} 5411EXPORT_SYMBOL_GPL(tcpm_sourcing_vbus); 5412 5413static void tcpm_enable_frs_work(struct kthread_work *work) 5414{ 5415 struct tcpm_port *port = container_of(work, struct tcpm_port, enable_frs); 5416 int ret; 5417 5418 mutex_lock(&port->lock); 5419 /* Not FRS capable */ 5420 if (!port->connected || port->port_type != TYPEC_PORT_DRP || 5421 port->pwr_opmode != TYPEC_PWR_MODE_PD || 5422 !port->tcpc->enable_frs || 5423 /* Sink caps queried */ 5424 port->sink_cap_done || port->negotiated_rev < PD_REV30) 5425 goto unlock; 5426 5427 /* Send when the state machine is idle */ 5428 if (port->state != SNK_READY || port->vdm_sm_running || port->send_discover) 5429 goto resched; 5430 5431 port->upcoming_state = GET_SINK_CAP; 5432 ret = tcpm_ams_start(port, GET_SINK_CAPABILITIES); 5433 if (ret == -EAGAIN) { 5434 port->upcoming_state = INVALID_STATE; 5435 } else { 5436 port->sink_cap_done = true; 5437 goto unlock; 5438 } 5439resched: 5440 mod_enable_frs_delayed_work(port, GET_SINK_CAP_RETRY_MS); 5441unlock: 5442 mutex_unlock(&port->lock); 5443} 5444 5445static void tcpm_send_discover_work(struct kthread_work *work) 5446{ 5447 struct tcpm_port *port = container_of(work, struct tcpm_port, send_discover_work); 5448 5449 mutex_lock(&port->lock); 5450 /* No need to send DISCOVER_IDENTITY anymore */ 5451 if (!port->send_discover) 5452 goto unlock; 5453 5454 if (port->data_role == TYPEC_DEVICE && port->negotiated_rev < PD_REV30) { 5455 port->send_discover = false; 5456 goto unlock; 5457 } 5458 5459 /* Retry if the port is not idle */ 5460 if ((port->state != SRC_READY && port->state != SNK_READY) || port->vdm_sm_running) { 5461 mod_send_discover_delayed_work(port, SEND_DISCOVER_RETRY_MS); 5462 goto unlock; 5463 } 5464 5465 tcpm_send_vdm(port, USB_SID_PD, CMD_DISCOVER_IDENT, NULL, 0); 5466 5467unlock: 5468 mutex_unlock(&port->lock); 5469} 5470 5471static int tcpm_dr_set(struct typec_port *p, enum typec_data_role data) 5472{ 5473 struct tcpm_port *port = typec_get_drvdata(p); 5474 int ret; 5475 5476 mutex_lock(&port->swap_lock); 5477 mutex_lock(&port->lock); 5478 5479 if (port->typec_caps.data != TYPEC_PORT_DRD) { 5480 ret = -EINVAL; 5481 goto port_unlock; 5482 } 5483 if (port->state != SRC_READY && port->state != SNK_READY) { 5484 ret = -EAGAIN; 5485 goto port_unlock; 5486 } 5487 5488 if (port->data_role == data) { 5489 ret = 0; 5490 goto port_unlock; 5491 } 5492 5493 /* 5494 * XXX 5495 * 6.3.9: If an alternate mode is active, a request to swap 5496 * alternate modes shall trigger a port reset. 5497 * Reject data role swap request in this case. 5498 */ 5499 5500 if (!port->pd_capable) { 5501 /* 5502 * If the partner is not PD capable, reset the port to 5503 * trigger a role change. This can only work if a preferred 5504 * role is configured, and if it matches the requested role. 5505 */ 5506 if (port->try_role == TYPEC_NO_PREFERRED_ROLE || 5507 port->try_role == port->pwr_role) { 5508 ret = -EINVAL; 5509 goto port_unlock; 5510 } 5511 port->non_pd_role_swap = true; 5512 tcpm_set_state(port, PORT_RESET, 0); 5513 } else { 5514 port->upcoming_state = DR_SWAP_SEND; 5515 ret = tcpm_ams_start(port, DATA_ROLE_SWAP); 5516 if (ret == -EAGAIN) { 5517 port->upcoming_state = INVALID_STATE; 5518 goto port_unlock; 5519 } 5520 } 5521 5522 port->swap_status = 0; 5523 port->swap_pending = true; 5524 reinit_completion(&port->swap_complete); 5525 mutex_unlock(&port->lock); 5526 5527 if (!wait_for_completion_timeout(&port->swap_complete, 5528 msecs_to_jiffies(PD_ROLE_SWAP_TIMEOUT))) 5529 ret = -ETIMEDOUT; 5530 else 5531 ret = port->swap_status; 5532 5533 port->non_pd_role_swap = false; 5534 goto swap_unlock; 5535 5536port_unlock: 5537 mutex_unlock(&port->lock); 5538swap_unlock: 5539 mutex_unlock(&port->swap_lock); 5540 return ret; 5541} 5542 5543static int tcpm_pr_set(struct typec_port *p, enum typec_role role) 5544{ 5545 struct tcpm_port *port = typec_get_drvdata(p); 5546 int ret; 5547 5548 mutex_lock(&port->swap_lock); 5549 mutex_lock(&port->lock); 5550 5551 if (port->port_type != TYPEC_PORT_DRP) { 5552 ret = -EINVAL; 5553 goto port_unlock; 5554 } 5555 if (port->state != SRC_READY && port->state != SNK_READY) { 5556 ret = -EAGAIN; 5557 goto port_unlock; 5558 } 5559 5560 if (role == port->pwr_role) { 5561 ret = 0; 5562 goto port_unlock; 5563 } 5564 5565 port->upcoming_state = PR_SWAP_SEND; 5566 ret = tcpm_ams_start(port, POWER_ROLE_SWAP); 5567 if (ret == -EAGAIN) { 5568 port->upcoming_state = INVALID_STATE; 5569 goto port_unlock; 5570 } 5571 5572 port->swap_status = 0; 5573 port->swap_pending = true; 5574 reinit_completion(&port->swap_complete); 5575 mutex_unlock(&port->lock); 5576 5577 if (!wait_for_completion_timeout(&port->swap_complete, 5578 msecs_to_jiffies(PD_ROLE_SWAP_TIMEOUT))) 5579 ret = -ETIMEDOUT; 5580 else 5581 ret = port->swap_status; 5582 5583 goto swap_unlock; 5584 5585port_unlock: 5586 mutex_unlock(&port->lock); 5587swap_unlock: 5588 mutex_unlock(&port->swap_lock); 5589 return ret; 5590} 5591 5592static int tcpm_vconn_set(struct typec_port *p, enum typec_role role) 5593{ 5594 struct tcpm_port *port = typec_get_drvdata(p); 5595 int ret; 5596 5597 mutex_lock(&port->swap_lock); 5598 mutex_lock(&port->lock); 5599 5600 if (port->state != SRC_READY && port->state != SNK_READY) { 5601 ret = -EAGAIN; 5602 goto port_unlock; 5603 } 5604 5605 if (role == port->vconn_role) { 5606 ret = 0; 5607 goto port_unlock; 5608 } 5609 5610 port->upcoming_state = VCONN_SWAP_SEND; 5611 ret = tcpm_ams_start(port, VCONN_SWAP); 5612 if (ret == -EAGAIN) { 5613 port->upcoming_state = INVALID_STATE; 5614 goto port_unlock; 5615 } 5616 5617 port->swap_status = 0; 5618 port->swap_pending = true; 5619 reinit_completion(&port->swap_complete); 5620 mutex_unlock(&port->lock); 5621 5622 if (!wait_for_completion_timeout(&port->swap_complete, 5623 msecs_to_jiffies(PD_ROLE_SWAP_TIMEOUT))) 5624 ret = -ETIMEDOUT; 5625 else 5626 ret = port->swap_status; 5627 5628 goto swap_unlock; 5629 5630port_unlock: 5631 mutex_unlock(&port->lock); 5632swap_unlock: 5633 mutex_unlock(&port->swap_lock); 5634 return ret; 5635} 5636 5637static int tcpm_try_role(struct typec_port *p, int role) 5638{ 5639 struct tcpm_port *port = typec_get_drvdata(p); 5640 struct tcpc_dev *tcpc = port->tcpc; 5641 int ret = 0; 5642 5643 mutex_lock(&port->lock); 5644 if (tcpc->try_role) 5645 ret = tcpc->try_role(tcpc, role); 5646 if (!ret) 5647 port->try_role = role; 5648 port->try_src_count = 0; 5649 port->try_snk_count = 0; 5650 mutex_unlock(&port->lock); 5651 5652 return ret; 5653} 5654 5655static int tcpm_pps_set_op_curr(struct tcpm_port *port, u16 req_op_curr) 5656{ 5657 unsigned int target_mw; 5658 int ret; 5659 5660 mutex_lock(&port->swap_lock); 5661 mutex_lock(&port->lock); 5662 5663 if (!port->pps_data.active) { 5664 ret = -EOPNOTSUPP; 5665 goto port_unlock; 5666 } 5667 5668 if (port->state != SNK_READY) { 5669 ret = -EAGAIN; 5670 goto port_unlock; 5671 } 5672 5673 if (req_op_curr > port->pps_data.max_curr) { 5674 ret = -EINVAL; 5675 goto port_unlock; 5676 } 5677 5678 target_mw = (req_op_curr * port->supply_voltage) / 1000; 5679 if (target_mw < port->operating_snk_mw) { 5680 ret = -EINVAL; 5681 goto port_unlock; 5682 } 5683 5684 port->upcoming_state = SNK_NEGOTIATE_PPS_CAPABILITIES; 5685 ret = tcpm_ams_start(port, POWER_NEGOTIATION); 5686 if (ret == -EAGAIN) { 5687 port->upcoming_state = INVALID_STATE; 5688 goto port_unlock; 5689 } 5690 5691 /* Round down operating current to align with PPS valid steps */ 5692 req_op_curr = req_op_curr - (req_op_curr % RDO_PROG_CURR_MA_STEP); 5693 5694 reinit_completion(&port->pps_complete); 5695 port->pps_data.req_op_curr = req_op_curr; 5696 port->pps_status = 0; 5697 port->pps_pending = true; 5698 mutex_unlock(&port->lock); 5699 5700 if (!wait_for_completion_timeout(&port->pps_complete, 5701 msecs_to_jiffies(PD_PPS_CTRL_TIMEOUT))) 5702 ret = -ETIMEDOUT; 5703 else 5704 ret = port->pps_status; 5705 5706 goto swap_unlock; 5707 5708port_unlock: 5709 mutex_unlock(&port->lock); 5710swap_unlock: 5711 mutex_unlock(&port->swap_lock); 5712 5713 return ret; 5714} 5715 5716static int tcpm_pps_set_out_volt(struct tcpm_port *port, u16 req_out_volt) 5717{ 5718 unsigned int target_mw; 5719 int ret; 5720 5721 mutex_lock(&port->swap_lock); 5722 mutex_lock(&port->lock); 5723 5724 if (!port->pps_data.active) { 5725 ret = -EOPNOTSUPP; 5726 goto port_unlock; 5727 } 5728 5729 if (port->state != SNK_READY) { 5730 ret = -EAGAIN; 5731 goto port_unlock; 5732 } 5733 5734 if (req_out_volt < port->pps_data.min_volt || 5735 req_out_volt > port->pps_data.max_volt) { 5736 ret = -EINVAL; 5737 goto port_unlock; 5738 } 5739 5740 target_mw = (port->current_limit * req_out_volt) / 1000; 5741 if (target_mw < port->operating_snk_mw) { 5742 ret = -EINVAL; 5743 goto port_unlock; 5744 } 5745 5746 port->upcoming_state = SNK_NEGOTIATE_PPS_CAPABILITIES; 5747 ret = tcpm_ams_start(port, POWER_NEGOTIATION); 5748 if (ret == -EAGAIN) { 5749 port->upcoming_state = INVALID_STATE; 5750 goto port_unlock; 5751 } 5752 5753 /* Round down output voltage to align with PPS valid steps */ 5754 req_out_volt = req_out_volt - (req_out_volt % RDO_PROG_VOLT_MV_STEP); 5755 5756 reinit_completion(&port->pps_complete); 5757 port->pps_data.req_out_volt = req_out_volt; 5758 port->pps_status = 0; 5759 port->pps_pending = true; 5760 mutex_unlock(&port->lock); 5761 5762 if (!wait_for_completion_timeout(&port->pps_complete, 5763 msecs_to_jiffies(PD_PPS_CTRL_TIMEOUT))) 5764 ret = -ETIMEDOUT; 5765 else 5766 ret = port->pps_status; 5767 5768 goto swap_unlock; 5769 5770port_unlock: 5771 mutex_unlock(&port->lock); 5772swap_unlock: 5773 mutex_unlock(&port->swap_lock); 5774 5775 return ret; 5776} 5777 5778static int tcpm_pps_activate(struct tcpm_port *port, bool activate) 5779{ 5780 int ret = 0; 5781 5782 mutex_lock(&port->swap_lock); 5783 mutex_lock(&port->lock); 5784 5785 if (!port->pps_data.supported) { 5786 ret = -EOPNOTSUPP; 5787 goto port_unlock; 5788 } 5789 5790 /* Trying to deactivate PPS when already deactivated so just bail */ 5791 if (!port->pps_data.active && !activate) 5792 goto port_unlock; 5793 5794 if (port->state != SNK_READY) { 5795 ret = -EAGAIN; 5796 goto port_unlock; 5797 } 5798 5799 if (activate) 5800 port->upcoming_state = SNK_NEGOTIATE_PPS_CAPABILITIES; 5801 else 5802 port->upcoming_state = SNK_NEGOTIATE_CAPABILITIES; 5803 ret = tcpm_ams_start(port, POWER_NEGOTIATION); 5804 if (ret == -EAGAIN) { 5805 port->upcoming_state = INVALID_STATE; 5806 goto port_unlock; 5807 } 5808 5809 reinit_completion(&port->pps_complete); 5810 port->pps_status = 0; 5811 port->pps_pending = true; 5812 5813 /* Trigger PPS request or move back to standard PDO contract */ 5814 if (activate) { 5815 port->pps_data.req_out_volt = port->supply_voltage; 5816 port->pps_data.req_op_curr = port->current_limit; 5817 } 5818 mutex_unlock(&port->lock); 5819 5820 if (!wait_for_completion_timeout(&port->pps_complete, 5821 msecs_to_jiffies(PD_PPS_CTRL_TIMEOUT))) 5822 ret = -ETIMEDOUT; 5823 else 5824 ret = port->pps_status; 5825 5826 goto swap_unlock; 5827 5828port_unlock: 5829 mutex_unlock(&port->lock); 5830swap_unlock: 5831 mutex_unlock(&port->swap_lock); 5832 5833 return ret; 5834} 5835 5836static void tcpm_init(struct tcpm_port *port) 5837{ 5838 enum typec_cc_status cc1, cc2; 5839 5840 port->tcpc->init(port->tcpc); 5841 5842 tcpm_reset_port(port); 5843 5844 /* 5845 * XXX 5846 * Should possibly wait for VBUS to settle if it was enabled locally 5847 * since tcpm_reset_port() will disable VBUS. 5848 */ 5849 port->vbus_present = port->tcpc->get_vbus(port->tcpc); 5850 if (port->vbus_present) 5851 port->vbus_never_low = true; 5852 5853 /* 5854 * 1. When vbus_present is true, voltage on VBUS is already at VSAFE5V. 5855 * So implicitly vbus_vsafe0v = false. 5856 * 5857 * 2. When vbus_present is false and TCPC does NOT support querying 5858 * vsafe0v status, then, it's best to assume vbus is at VSAFE0V i.e. 5859 * vbus_vsafe0v is true. 5860 * 5861 * 3. When vbus_present is false and TCPC does support querying vsafe0v, 5862 * then, query tcpc for vsafe0v status. 5863 */ 5864 if (port->vbus_present) 5865 port->vbus_vsafe0v = false; 5866 else if (!port->tcpc->is_vbus_vsafe0v) 5867 port->vbus_vsafe0v = true; 5868 else 5869 port->vbus_vsafe0v = port->tcpc->is_vbus_vsafe0v(port->tcpc); 5870 5871 tcpm_set_state(port, tcpm_default_state(port), 0); 5872 5873 if (port->tcpc->get_cc(port->tcpc, &cc1, &cc2) == 0) 5874 _tcpm_cc_change(port, cc1, cc2); 5875 5876 /* 5877 * Some adapters need a clean slate at startup, and won't recover 5878 * otherwise. So do not try to be fancy and force a clean disconnect. 5879 */ 5880 tcpm_set_state(port, PORT_RESET, 0); 5881} 5882 5883static int tcpm_port_type_set(struct typec_port *p, enum typec_port_type type) 5884{ 5885 struct tcpm_port *port = typec_get_drvdata(p); 5886 5887 mutex_lock(&port->lock); 5888 if (type == port->port_type) 5889 goto port_unlock; 5890 5891 port->port_type = type; 5892 5893 if (!port->connected) { 5894 tcpm_set_state(port, PORT_RESET, 0); 5895 } else if (type == TYPEC_PORT_SNK) { 5896 if (!(port->pwr_role == TYPEC_SINK && 5897 port->data_role == TYPEC_DEVICE)) 5898 tcpm_set_state(port, PORT_RESET, 0); 5899 } else if (type == TYPEC_PORT_SRC) { 5900 if (!(port->pwr_role == TYPEC_SOURCE && 5901 port->data_role == TYPEC_HOST)) 5902 tcpm_set_state(port, PORT_RESET, 0); 5903 } 5904 5905port_unlock: 5906 mutex_unlock(&port->lock); 5907 return 0; 5908} 5909 5910static const struct typec_operations tcpm_ops = { 5911 .try_role = tcpm_try_role, 5912 .dr_set = tcpm_dr_set, 5913 .pr_set = tcpm_pr_set, 5914 .vconn_set = tcpm_vconn_set, 5915 .port_type_set = tcpm_port_type_set 5916}; 5917 5918void tcpm_tcpc_reset(struct tcpm_port *port) 5919{ 5920 mutex_lock(&port->lock); 5921 /* XXX: Maintain PD connection if possible? */ 5922 tcpm_init(port); 5923 mutex_unlock(&port->lock); 5924} 5925EXPORT_SYMBOL_GPL(tcpm_tcpc_reset); 5926 5927static int tcpm_fw_get_caps(struct tcpm_port *port, 5928 struct fwnode_handle *fwnode) 5929{ 5930 const char *opmode_str; 5931 int ret; 5932 u32 mw, frs_current; 5933 5934 if (!fwnode) 5935 return -EINVAL; 5936 5937 /* 5938 * This fwnode has a "compatible" property, but is never populated as a 5939 * struct device. Instead we simply parse it to read the properties. 5940 * This it breaks fw_devlink=on. To maintain backward compatibility 5941 * with existing DT files, we work around this by deleting any 5942 * fwnode_links to/from this fwnode. 5943 */ 5944 fw_devlink_purge_absent_suppliers(fwnode); 5945 5946 ret = typec_get_fw_cap(&port->typec_caps, fwnode); 5947 if (ret < 0) 5948 return ret; 5949 5950 port->port_type = port->typec_caps.type; 5951 port->pd_supported = !fwnode_property_read_bool(fwnode, "pd-disable"); 5952 5953 port->slow_charger_loop = fwnode_property_read_bool(fwnode, "slow-charger-loop"); 5954 if (port->port_type == TYPEC_PORT_SNK) 5955 goto sink; 5956 5957 /* Get Source PDOs for the PD port or Source Rp value for the non-PD port */ 5958 if (port->pd_supported) { 5959 ret = fwnode_property_count_u32(fwnode, "source-pdos"); 5960 if (ret == 0) 5961 return -EINVAL; 5962 else if (ret < 0) 5963 return ret; 5964 5965 port->nr_src_pdo = min(ret, PDO_MAX_OBJECTS); 5966 ret = fwnode_property_read_u32_array(fwnode, "source-pdos", 5967 port->src_pdo, port->nr_src_pdo); 5968 if (ret) 5969 return ret; 5970 ret = tcpm_validate_caps(port, port->src_pdo, port->nr_src_pdo); 5971 if (ret) 5972 return ret; 5973 } else { 5974 ret = fwnode_property_read_string(fwnode, "typec-power-opmode", &opmode_str); 5975 if (ret) 5976 return ret; 5977 ret = typec_find_pwr_opmode(opmode_str); 5978 if (ret < 0) 5979 return ret; 5980 port->src_rp = tcpm_pwr_opmode_to_rp(ret); 5981 } 5982 5983 if (port->port_type == TYPEC_PORT_SRC) 5984 return 0; 5985 5986sink: 5987 port->self_powered = fwnode_property_read_bool(fwnode, "self-powered"); 5988 5989 if (!port->pd_supported) 5990 return 0; 5991 5992 /* Get sink pdos */ 5993 ret = fwnode_property_count_u32(fwnode, "sink-pdos"); 5994 if (ret <= 0) 5995 return -EINVAL; 5996 5997 port->nr_snk_pdo = min(ret, PDO_MAX_OBJECTS); 5998 ret = fwnode_property_read_u32_array(fwnode, "sink-pdos", 5999 port->snk_pdo, port->nr_snk_pdo); 6000 if ((ret < 0) || tcpm_validate_caps(port, port->snk_pdo, 6001 port->nr_snk_pdo)) 6002 return -EINVAL; 6003 6004 if (fwnode_property_read_u32(fwnode, "op-sink-microwatt", &mw) < 0) 6005 return -EINVAL; 6006 port->operating_snk_mw = mw / 1000; 6007 6008 /* FRS can only be supported by DRP ports */ 6009 if (port->port_type == TYPEC_PORT_DRP) { 6010 ret = fwnode_property_read_u32(fwnode, "new-source-frs-typec-current", 6011 &frs_current); 6012 if (ret >= 0 && frs_current <= FRS_5V_3A) 6013 port->new_source_frs_current = frs_current; 6014 } 6015 6016 /* sink-vdos is optional */ 6017 ret = fwnode_property_count_u32(fwnode, "sink-vdos"); 6018 if (ret < 0) 6019 ret = 0; 6020 6021 port->nr_snk_vdo = min(ret, VDO_MAX_OBJECTS); 6022 if (port->nr_snk_vdo) { 6023 ret = fwnode_property_read_u32_array(fwnode, "sink-vdos", 6024 port->snk_vdo, 6025 port->nr_snk_vdo); 6026 if (ret < 0) 6027 return ret; 6028 } 6029 6030 /* If sink-vdos is found, sink-vdos-v1 is expected for backward compatibility. */ 6031 if (port->nr_snk_vdo) { 6032 ret = fwnode_property_count_u32(fwnode, "sink-vdos-v1"); 6033 if (ret < 0) 6034 return ret; 6035 else if (ret == 0) 6036 return -ENODATA; 6037 6038 port->nr_snk_vdo_v1 = min(ret, VDO_MAX_OBJECTS); 6039 ret = fwnode_property_read_u32_array(fwnode, "sink-vdos-v1", 6040 port->snk_vdo_v1, 6041 port->nr_snk_vdo_v1); 6042 if (ret < 0) 6043 return ret; 6044 } 6045 6046 return 0; 6047} 6048 6049/* Power Supply access to expose source power information */ 6050enum tcpm_psy_online_states { 6051 TCPM_PSY_OFFLINE = 0, 6052 TCPM_PSY_FIXED_ONLINE, 6053 TCPM_PSY_PROG_ONLINE, 6054}; 6055 6056static enum power_supply_property tcpm_psy_props[] = { 6057 POWER_SUPPLY_PROP_USB_TYPE, 6058 POWER_SUPPLY_PROP_ONLINE, 6059 POWER_SUPPLY_PROP_VOLTAGE_MIN, 6060 POWER_SUPPLY_PROP_VOLTAGE_MAX, 6061 POWER_SUPPLY_PROP_VOLTAGE_NOW, 6062 POWER_SUPPLY_PROP_CURRENT_MAX, 6063 POWER_SUPPLY_PROP_CURRENT_NOW, 6064}; 6065 6066static int tcpm_psy_get_online(struct tcpm_port *port, 6067 union power_supply_propval *val) 6068{ 6069 if (port->vbus_charge) { 6070 if (port->pps_data.active) 6071 val->intval = TCPM_PSY_PROG_ONLINE; 6072 else 6073 val->intval = TCPM_PSY_FIXED_ONLINE; 6074 } else { 6075 val->intval = TCPM_PSY_OFFLINE; 6076 } 6077 6078 return 0; 6079} 6080 6081static int tcpm_psy_get_voltage_min(struct tcpm_port *port, 6082 union power_supply_propval *val) 6083{ 6084 if (port->pps_data.active) 6085 val->intval = port->pps_data.min_volt * 1000; 6086 else 6087 val->intval = port->supply_voltage * 1000; 6088 6089 return 0; 6090} 6091 6092static int tcpm_psy_get_voltage_max(struct tcpm_port *port, 6093 union power_supply_propval *val) 6094{ 6095 if (port->pps_data.active) 6096 val->intval = port->pps_data.max_volt * 1000; 6097 else 6098 val->intval = port->supply_voltage * 1000; 6099 6100 return 0; 6101} 6102 6103static int tcpm_psy_get_voltage_now(struct tcpm_port *port, 6104 union power_supply_propval *val) 6105{ 6106 val->intval = port->supply_voltage * 1000; 6107 6108 return 0; 6109} 6110 6111static int tcpm_psy_get_current_max(struct tcpm_port *port, 6112 union power_supply_propval *val) 6113{ 6114 if (port->pps_data.active) 6115 val->intval = port->pps_data.max_curr * 1000; 6116 else 6117 val->intval = port->current_limit * 1000; 6118 6119 return 0; 6120} 6121 6122static int tcpm_psy_get_current_now(struct tcpm_port *port, 6123 union power_supply_propval *val) 6124{ 6125 val->intval = port->current_limit * 1000; 6126 6127 return 0; 6128} 6129 6130static int tcpm_psy_get_prop(struct power_supply *psy, 6131 enum power_supply_property psp, 6132 union power_supply_propval *val) 6133{ 6134 struct tcpm_port *port = power_supply_get_drvdata(psy); 6135 int ret = 0; 6136 6137 switch (psp) { 6138 case POWER_SUPPLY_PROP_USB_TYPE: 6139 val->intval = port->usb_type; 6140 break; 6141 case POWER_SUPPLY_PROP_ONLINE: 6142 ret = tcpm_psy_get_online(port, val); 6143 break; 6144 case POWER_SUPPLY_PROP_VOLTAGE_MIN: 6145 ret = tcpm_psy_get_voltage_min(port, val); 6146 break; 6147 case POWER_SUPPLY_PROP_VOLTAGE_MAX: 6148 ret = tcpm_psy_get_voltage_max(port, val); 6149 break; 6150 case POWER_SUPPLY_PROP_VOLTAGE_NOW: 6151 ret = tcpm_psy_get_voltage_now(port, val); 6152 break; 6153 case POWER_SUPPLY_PROP_CURRENT_MAX: 6154 ret = tcpm_psy_get_current_max(port, val); 6155 break; 6156 case POWER_SUPPLY_PROP_CURRENT_NOW: 6157 ret = tcpm_psy_get_current_now(port, val); 6158 break; 6159 default: 6160 ret = -EINVAL; 6161 break; 6162 } 6163 6164 return ret; 6165} 6166 6167static int tcpm_psy_set_online(struct tcpm_port *port, 6168 const union power_supply_propval *val) 6169{ 6170 int ret; 6171 6172 switch (val->intval) { 6173 case TCPM_PSY_FIXED_ONLINE: 6174 ret = tcpm_pps_activate(port, false); 6175 break; 6176 case TCPM_PSY_PROG_ONLINE: 6177 ret = tcpm_pps_activate(port, true); 6178 break; 6179 default: 6180 ret = -EINVAL; 6181 break; 6182 } 6183 6184 return ret; 6185} 6186 6187static int tcpm_psy_set_prop(struct power_supply *psy, 6188 enum power_supply_property psp, 6189 const union power_supply_propval *val) 6190{ 6191 struct tcpm_port *port = power_supply_get_drvdata(psy); 6192 int ret; 6193 6194 switch (psp) { 6195 case POWER_SUPPLY_PROP_ONLINE: 6196 ret = tcpm_psy_set_online(port, val); 6197 break; 6198 case POWER_SUPPLY_PROP_VOLTAGE_NOW: 6199 if (val->intval < port->pps_data.min_volt * 1000 || 6200 val->intval > port->pps_data.max_volt * 1000) 6201 ret = -EINVAL; 6202 else 6203 ret = tcpm_pps_set_out_volt(port, val->intval / 1000); 6204 break; 6205 case POWER_SUPPLY_PROP_CURRENT_NOW: 6206 if (val->intval > port->pps_data.max_curr * 1000) 6207 ret = -EINVAL; 6208 else 6209 ret = tcpm_pps_set_op_curr(port, val->intval / 1000); 6210 break; 6211 default: 6212 ret = -EINVAL; 6213 break; 6214 } 6215 power_supply_changed(port->psy); 6216 return ret; 6217} 6218 6219static int tcpm_psy_prop_writeable(struct power_supply *psy, 6220 enum power_supply_property psp) 6221{ 6222 switch (psp) { 6223 case POWER_SUPPLY_PROP_ONLINE: 6224 case POWER_SUPPLY_PROP_VOLTAGE_NOW: 6225 case POWER_SUPPLY_PROP_CURRENT_NOW: 6226 return 1; 6227 default: 6228 return 0; 6229 } 6230} 6231 6232static enum power_supply_usb_type tcpm_psy_usb_types[] = { 6233 POWER_SUPPLY_USB_TYPE_C, 6234 POWER_SUPPLY_USB_TYPE_PD, 6235 POWER_SUPPLY_USB_TYPE_PD_PPS, 6236}; 6237 6238static const char *tcpm_psy_name_prefix = "tcpm-source-psy-"; 6239 6240static int devm_tcpm_psy_register(struct tcpm_port *port) 6241{ 6242 struct power_supply_config psy_cfg = {}; 6243 const char *port_dev_name = dev_name(port->dev); 6244 size_t psy_name_len = strlen(tcpm_psy_name_prefix) + 6245 strlen(port_dev_name) + 1; 6246 char *psy_name; 6247 6248 psy_cfg.drv_data = port; 6249 psy_cfg.fwnode = dev_fwnode(port->dev); 6250 psy_name = devm_kzalloc(port->dev, psy_name_len, GFP_KERNEL); 6251 if (!psy_name) 6252 return -ENOMEM; 6253 6254 snprintf(psy_name, psy_name_len, "%s%s", tcpm_psy_name_prefix, 6255 port_dev_name); 6256 port->psy_desc.name = psy_name; 6257 port->psy_desc.type = POWER_SUPPLY_TYPE_USB; 6258 port->psy_desc.usb_types = tcpm_psy_usb_types; 6259 port->psy_desc.num_usb_types = ARRAY_SIZE(tcpm_psy_usb_types); 6260 port->psy_desc.properties = tcpm_psy_props; 6261 port->psy_desc.num_properties = ARRAY_SIZE(tcpm_psy_props); 6262 port->psy_desc.get_property = tcpm_psy_get_prop; 6263 port->psy_desc.set_property = tcpm_psy_set_prop; 6264 port->psy_desc.property_is_writeable = tcpm_psy_prop_writeable; 6265 6266 port->usb_type = POWER_SUPPLY_USB_TYPE_C; 6267 6268 port->psy = devm_power_supply_register(port->dev, &port->psy_desc, 6269 &psy_cfg); 6270 6271 return PTR_ERR_OR_ZERO(port->psy); 6272} 6273 6274static enum hrtimer_restart state_machine_timer_handler(struct hrtimer *timer) 6275{ 6276 struct tcpm_port *port = container_of(timer, struct tcpm_port, state_machine_timer); 6277 6278 if (port->registered) 6279 kthread_queue_work(port->wq, &port->state_machine); 6280 return HRTIMER_NORESTART; 6281} 6282 6283static enum hrtimer_restart vdm_state_machine_timer_handler(struct hrtimer *timer) 6284{ 6285 struct tcpm_port *port = container_of(timer, struct tcpm_port, vdm_state_machine_timer); 6286 6287 if (port->registered) 6288 kthread_queue_work(port->wq, &port->vdm_state_machine); 6289 return HRTIMER_NORESTART; 6290} 6291 6292static enum hrtimer_restart enable_frs_timer_handler(struct hrtimer *timer) 6293{ 6294 struct tcpm_port *port = container_of(timer, struct tcpm_port, enable_frs_timer); 6295 6296 if (port->registered) 6297 kthread_queue_work(port->wq, &port->enable_frs); 6298 return HRTIMER_NORESTART; 6299} 6300 6301static enum hrtimer_restart send_discover_timer_handler(struct hrtimer *timer) 6302{ 6303 struct tcpm_port *port = container_of(timer, struct tcpm_port, send_discover_timer); 6304 6305 if (port->registered) 6306 kthread_queue_work(port->wq, &port->send_discover_work); 6307 return HRTIMER_NORESTART; 6308} 6309 6310struct tcpm_port *tcpm_register_port(struct device *dev, struct tcpc_dev *tcpc) 6311{ 6312 struct tcpm_port *port; 6313 int err; 6314 6315 if (!dev || !tcpc || 6316 !tcpc->get_vbus || !tcpc->set_cc || !tcpc->get_cc || 6317 !tcpc->set_polarity || !tcpc->set_vconn || !tcpc->set_vbus || 6318 !tcpc->set_pd_rx || !tcpc->set_roles || !tcpc->pd_transmit) 6319 return ERR_PTR(-EINVAL); 6320 6321 port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL); 6322 if (!port) 6323 return ERR_PTR(-ENOMEM); 6324 6325 port->dev = dev; 6326 port->tcpc = tcpc; 6327 6328 mutex_init(&port->lock); 6329 mutex_init(&port->swap_lock); 6330 6331 port->wq = kthread_create_worker(0, dev_name(dev)); 6332 if (IS_ERR(port->wq)) 6333 return ERR_CAST(port->wq); 6334 sched_set_fifo(port->wq->task); 6335 6336 kthread_init_work(&port->state_machine, tcpm_state_machine_work); 6337 kthread_init_work(&port->vdm_state_machine, vdm_state_machine_work); 6338 kthread_init_work(&port->event_work, tcpm_pd_event_handler); 6339 kthread_init_work(&port->enable_frs, tcpm_enable_frs_work); 6340 kthread_init_work(&port->send_discover_work, tcpm_send_discover_work); 6341 hrtimer_init(&port->state_machine_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 6342 port->state_machine_timer.function = state_machine_timer_handler; 6343 hrtimer_init(&port->vdm_state_machine_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 6344 port->vdm_state_machine_timer.function = vdm_state_machine_timer_handler; 6345 hrtimer_init(&port->enable_frs_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 6346 port->enable_frs_timer.function = enable_frs_timer_handler; 6347 hrtimer_init(&port->send_discover_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 6348 port->send_discover_timer.function = send_discover_timer_handler; 6349 6350 spin_lock_init(&port->pd_event_lock); 6351 6352 init_completion(&port->tx_complete); 6353 init_completion(&port->swap_complete); 6354 init_completion(&port->pps_complete); 6355 tcpm_debugfs_init(port); 6356 6357 err = tcpm_fw_get_caps(port, tcpc->fwnode); 6358 if (err < 0) 6359 goto out_destroy_wq; 6360 6361 port->try_role = port->typec_caps.prefer_role; 6362 6363 port->typec_caps.fwnode = tcpc->fwnode; 6364 port->typec_caps.revision = 0x0120; /* Type-C spec release 1.2 */ 6365 port->typec_caps.pd_revision = 0x0300; /* USB-PD spec release 3.0 */ 6366 port->typec_caps.svdm_version = SVDM_VER_2_0; 6367 port->typec_caps.driver_data = port; 6368 port->typec_caps.ops = &tcpm_ops; 6369 port->typec_caps.orientation_aware = 1; 6370 6371 port->partner_desc.identity = &port->partner_ident; 6372 port->port_type = port->typec_caps.type; 6373 6374 port->role_sw = usb_role_switch_get(port->dev); 6375 if (IS_ERR(port->role_sw)) { 6376 err = PTR_ERR(port->role_sw); 6377 goto out_destroy_wq; 6378 } 6379 6380 err = devm_tcpm_psy_register(port); 6381 if (err) 6382 goto out_role_sw_put; 6383 power_supply_changed(port->psy); 6384 6385 port->typec_port = typec_register_port(port->dev, &port->typec_caps); 6386 if (IS_ERR(port->typec_port)) { 6387 err = PTR_ERR(port->typec_port); 6388 goto out_role_sw_put; 6389 } 6390 6391 typec_port_register_altmodes(port->typec_port, 6392 &tcpm_altmode_ops, port, 6393 port->port_altmode, ALTMODE_DISCOVERY_MAX); 6394 port->registered = true; 6395 6396 mutex_lock(&port->lock); 6397 tcpm_init(port); 6398 mutex_unlock(&port->lock); 6399 6400 tcpm_log(port, "%s: registered", dev_name(dev)); 6401 return port; 6402 6403out_role_sw_put: 6404 usb_role_switch_put(port->role_sw); 6405out_destroy_wq: 6406 tcpm_debugfs_exit(port); 6407 kthread_destroy_worker(port->wq); 6408 return ERR_PTR(err); 6409} 6410EXPORT_SYMBOL_GPL(tcpm_register_port); 6411 6412void tcpm_unregister_port(struct tcpm_port *port) 6413{ 6414 int i; 6415 6416 port->registered = false; 6417 kthread_destroy_worker(port->wq); 6418 6419 hrtimer_cancel(&port->send_discover_timer); 6420 hrtimer_cancel(&port->enable_frs_timer); 6421 hrtimer_cancel(&port->vdm_state_machine_timer); 6422 hrtimer_cancel(&port->state_machine_timer); 6423 6424 tcpm_reset_port(port); 6425 for (i = 0; i < ARRAY_SIZE(port->port_altmode); i++) 6426 typec_unregister_altmode(port->port_altmode[i]); 6427 typec_unregister_port(port->typec_port); 6428 usb_role_switch_put(port->role_sw); 6429 tcpm_debugfs_exit(port); 6430} 6431EXPORT_SYMBOL_GPL(tcpm_unregister_port); 6432 6433MODULE_AUTHOR("Guenter Roeck <groeck@chromium.org>"); 6434MODULE_DESCRIPTION("USB Type-C Port Manager"); 6435MODULE_LICENSE("GPL");