cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

usb4.c (50831B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * USB4 specific functionality
      4 *
      5 * Copyright (C) 2019, Intel Corporation
      6 * Authors: Mika Westerberg <mika.westerberg@linux.intel.com>
      7 *	    Rajmohan Mani <rajmohan.mani@intel.com>
      8 */
      9
     10#include <linux/delay.h>
     11#include <linux/ktime.h>
     12
     13#include "sb_regs.h"
     14#include "tb.h"
     15
     16#define USB4_DATA_RETRIES		3
     17
     18enum usb4_sb_target {
     19	USB4_SB_TARGET_ROUTER,
     20	USB4_SB_TARGET_PARTNER,
     21	USB4_SB_TARGET_RETIMER,
     22};
     23
     24#define USB4_NVM_READ_OFFSET_MASK	GENMASK(23, 2)
     25#define USB4_NVM_READ_OFFSET_SHIFT	2
     26#define USB4_NVM_READ_LENGTH_MASK	GENMASK(27, 24)
     27#define USB4_NVM_READ_LENGTH_SHIFT	24
     28
     29#define USB4_NVM_SET_OFFSET_MASK	USB4_NVM_READ_OFFSET_MASK
     30#define USB4_NVM_SET_OFFSET_SHIFT	USB4_NVM_READ_OFFSET_SHIFT
     31
     32#define USB4_DROM_ADDRESS_MASK		GENMASK(14, 2)
     33#define USB4_DROM_ADDRESS_SHIFT		2
     34#define USB4_DROM_SIZE_MASK		GENMASK(19, 15)
     35#define USB4_DROM_SIZE_SHIFT		15
     36
     37#define USB4_NVM_SECTOR_SIZE_MASK	GENMASK(23, 0)
     38
     39#define USB4_BA_LENGTH_MASK		GENMASK(7, 0)
     40#define USB4_BA_INDEX_MASK		GENMASK(15, 0)
     41
     42enum usb4_ba_index {
     43	USB4_BA_MAX_USB3 = 0x1,
     44	USB4_BA_MIN_DP_AUX = 0x2,
     45	USB4_BA_MIN_DP_MAIN = 0x3,
     46	USB4_BA_MAX_PCIE = 0x4,
     47	USB4_BA_MAX_HI = 0x5,
     48};
     49
     50#define USB4_BA_VALUE_MASK		GENMASK(31, 16)
     51#define USB4_BA_VALUE_SHIFT		16
     52
     53static int usb4_native_switch_op(struct tb_switch *sw, u16 opcode,
     54				 u32 *metadata, u8 *status,
     55				 const void *tx_data, size_t tx_dwords,
     56				 void *rx_data, size_t rx_dwords)
     57{
     58	u32 val;
     59	int ret;
     60
     61	if (metadata) {
     62		ret = tb_sw_write(sw, metadata, TB_CFG_SWITCH, ROUTER_CS_25, 1);
     63		if (ret)
     64			return ret;
     65	}
     66	if (tx_dwords) {
     67		ret = tb_sw_write(sw, tx_data, TB_CFG_SWITCH, ROUTER_CS_9,
     68				  tx_dwords);
     69		if (ret)
     70			return ret;
     71	}
     72
     73	val = opcode | ROUTER_CS_26_OV;
     74	ret = tb_sw_write(sw, &val, TB_CFG_SWITCH, ROUTER_CS_26, 1);
     75	if (ret)
     76		return ret;
     77
     78	ret = tb_switch_wait_for_bit(sw, ROUTER_CS_26, ROUTER_CS_26_OV, 0, 500);
     79	if (ret)
     80		return ret;
     81
     82	ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_26, 1);
     83	if (ret)
     84		return ret;
     85
     86	if (val & ROUTER_CS_26_ONS)
     87		return -EOPNOTSUPP;
     88
     89	if (status)
     90		*status = (val & ROUTER_CS_26_STATUS_MASK) >>
     91			ROUTER_CS_26_STATUS_SHIFT;
     92
     93	if (metadata) {
     94		ret = tb_sw_read(sw, metadata, TB_CFG_SWITCH, ROUTER_CS_25, 1);
     95		if (ret)
     96			return ret;
     97	}
     98	if (rx_dwords) {
     99		ret = tb_sw_read(sw, rx_data, TB_CFG_SWITCH, ROUTER_CS_9,
    100				 rx_dwords);
    101		if (ret)
    102			return ret;
    103	}
    104
    105	return 0;
    106}
    107
    108static int __usb4_switch_op(struct tb_switch *sw, u16 opcode, u32 *metadata,
    109			    u8 *status, const void *tx_data, size_t tx_dwords,
    110			    void *rx_data, size_t rx_dwords)
    111{
    112	const struct tb_cm_ops *cm_ops = sw->tb->cm_ops;
    113
    114	if (tx_dwords > NVM_DATA_DWORDS || rx_dwords > NVM_DATA_DWORDS)
    115		return -EINVAL;
    116
    117	/*
    118	 * If the connection manager implementation provides USB4 router
    119	 * operation proxy callback, call it here instead of running the
    120	 * operation natively.
    121	 */
    122	if (cm_ops->usb4_switch_op) {
    123		int ret;
    124
    125		ret = cm_ops->usb4_switch_op(sw, opcode, metadata, status,
    126					     tx_data, tx_dwords, rx_data,
    127					     rx_dwords);
    128		if (ret != -EOPNOTSUPP)
    129			return ret;
    130
    131		/*
    132		 * If the proxy was not supported then run the native
    133		 * router operation instead.
    134		 */
    135	}
    136
    137	return usb4_native_switch_op(sw, opcode, metadata, status, tx_data,
    138				     tx_dwords, rx_data, rx_dwords);
    139}
    140
    141static inline int usb4_switch_op(struct tb_switch *sw, u16 opcode,
    142				 u32 *metadata, u8 *status)
    143{
    144	return __usb4_switch_op(sw, opcode, metadata, status, NULL, 0, NULL, 0);
    145}
    146
    147static inline int usb4_switch_op_data(struct tb_switch *sw, u16 opcode,
    148				      u32 *metadata, u8 *status,
    149				      const void *tx_data, size_t tx_dwords,
    150				      void *rx_data, size_t rx_dwords)
    151{
    152	return __usb4_switch_op(sw, opcode, metadata, status, tx_data,
    153				tx_dwords, rx_data, rx_dwords);
    154}
    155
    156static void usb4_switch_check_wakes(struct tb_switch *sw)
    157{
    158	struct tb_port *port;
    159	bool wakeup = false;
    160	u32 val;
    161
    162	if (!device_may_wakeup(&sw->dev))
    163		return;
    164
    165	if (tb_route(sw)) {
    166		if (tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_6, 1))
    167			return;
    168
    169		tb_sw_dbg(sw, "PCIe wake: %s, USB3 wake: %s\n",
    170			  (val & ROUTER_CS_6_WOPS) ? "yes" : "no",
    171			  (val & ROUTER_CS_6_WOUS) ? "yes" : "no");
    172
    173		wakeup = val & (ROUTER_CS_6_WOPS | ROUTER_CS_6_WOUS);
    174	}
    175
    176	/* Check for any connected downstream ports for USB4 wake */
    177	tb_switch_for_each_port(sw, port) {
    178		if (!tb_port_has_remote(port))
    179			continue;
    180
    181		if (tb_port_read(port, &val, TB_CFG_PORT,
    182				 port->cap_usb4 + PORT_CS_18, 1))
    183			break;
    184
    185		tb_port_dbg(port, "USB4 wake: %s\n",
    186			    (val & PORT_CS_18_WOU4S) ? "yes" : "no");
    187
    188		if (val & PORT_CS_18_WOU4S)
    189			wakeup = true;
    190	}
    191
    192	if (wakeup)
    193		pm_wakeup_event(&sw->dev, 0);
    194}
    195
    196static bool link_is_usb4(struct tb_port *port)
    197{
    198	u32 val;
    199
    200	if (!port->cap_usb4)
    201		return false;
    202
    203	if (tb_port_read(port, &val, TB_CFG_PORT,
    204			 port->cap_usb4 + PORT_CS_18, 1))
    205		return false;
    206
    207	return !(val & PORT_CS_18_TCM);
    208}
    209
    210/**
    211 * usb4_switch_setup() - Additional setup for USB4 device
    212 * @sw: USB4 router to setup
    213 *
    214 * USB4 routers need additional settings in order to enable all the
    215 * tunneling. This function enables USB and PCIe tunneling if it can be
    216 * enabled (e.g the parent switch also supports them). If USB tunneling
    217 * is not available for some reason (like that there is Thunderbolt 3
    218 * switch upstream) then the internal xHCI controller is enabled
    219 * instead.
    220 */
    221int usb4_switch_setup(struct tb_switch *sw)
    222{
    223	struct tb_port *downstream_port;
    224	struct tb_switch *parent;
    225	bool tbt3, xhci;
    226	u32 val = 0;
    227	int ret;
    228
    229	usb4_switch_check_wakes(sw);
    230
    231	if (!tb_route(sw))
    232		return 0;
    233
    234	ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_6, 1);
    235	if (ret)
    236		return ret;
    237
    238	parent = tb_switch_parent(sw);
    239	downstream_port = tb_port_at(tb_route(sw), parent);
    240	sw->link_usb4 = link_is_usb4(downstream_port);
    241	tb_sw_dbg(sw, "link: %s\n", sw->link_usb4 ? "USB4" : "TBT");
    242
    243	xhci = val & ROUTER_CS_6_HCI;
    244	tbt3 = !(val & ROUTER_CS_6_TNS);
    245
    246	tb_sw_dbg(sw, "TBT3 support: %s, xHCI: %s\n",
    247		  tbt3 ? "yes" : "no", xhci ? "yes" : "no");
    248
    249	ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1);
    250	if (ret)
    251		return ret;
    252
    253	if (tb_acpi_may_tunnel_usb3() && sw->link_usb4 &&
    254	    tb_switch_find_port(parent, TB_TYPE_USB3_DOWN)) {
    255		val |= ROUTER_CS_5_UTO;
    256		xhci = false;
    257	}
    258
    259	/*
    260	 * Only enable PCIe tunneling if the parent router supports it
    261	 * and it is not disabled.
    262	 */
    263	if (tb_acpi_may_tunnel_pcie() &&
    264	    tb_switch_find_port(parent, TB_TYPE_PCIE_DOWN)) {
    265		val |= ROUTER_CS_5_PTO;
    266		/*
    267		 * xHCI can be enabled if PCIe tunneling is supported
    268		 * and the parent does not have any USB3 dowstream
    269		 * adapters (so we cannot do USB 3.x tunneling).
    270		 */
    271		if (xhci)
    272			val |= ROUTER_CS_5_HCO;
    273	}
    274
    275	/* TBT3 supported by the CM */
    276	val |= ROUTER_CS_5_C3S;
    277	/* Tunneling configuration is ready now */
    278	val |= ROUTER_CS_5_CV;
    279
    280	ret = tb_sw_write(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1);
    281	if (ret)
    282		return ret;
    283
    284	return tb_switch_wait_for_bit(sw, ROUTER_CS_6, ROUTER_CS_6_CR,
    285				      ROUTER_CS_6_CR, 50);
    286}
    287
    288/**
    289 * usb4_switch_read_uid() - Read UID from USB4 router
    290 * @sw: USB4 router
    291 * @uid: UID is stored here
    292 *
    293 * Reads 64-bit UID from USB4 router config space.
    294 */
    295int usb4_switch_read_uid(struct tb_switch *sw, u64 *uid)
    296{
    297	return tb_sw_read(sw, uid, TB_CFG_SWITCH, ROUTER_CS_7, 2);
    298}
    299
    300static int usb4_switch_drom_read_block(void *data,
    301				       unsigned int dwaddress, void *buf,
    302				       size_t dwords)
    303{
    304	struct tb_switch *sw = data;
    305	u8 status = 0;
    306	u32 metadata;
    307	int ret;
    308
    309	metadata = (dwords << USB4_DROM_SIZE_SHIFT) & USB4_DROM_SIZE_MASK;
    310	metadata |= (dwaddress << USB4_DROM_ADDRESS_SHIFT) &
    311		USB4_DROM_ADDRESS_MASK;
    312
    313	ret = usb4_switch_op_data(sw, USB4_SWITCH_OP_DROM_READ, &metadata,
    314				  &status, NULL, 0, buf, dwords);
    315	if (ret)
    316		return ret;
    317
    318	return status ? -EIO : 0;
    319}
    320
    321/**
    322 * usb4_switch_drom_read() - Read arbitrary bytes from USB4 router DROM
    323 * @sw: USB4 router
    324 * @address: Byte address inside DROM to start reading
    325 * @buf: Buffer where the DROM content is stored
    326 * @size: Number of bytes to read from DROM
    327 *
    328 * Uses USB4 router operations to read router DROM. For devices this
    329 * should always work but for hosts it may return %-EOPNOTSUPP in which
    330 * case the host router does not have DROM.
    331 */
    332int usb4_switch_drom_read(struct tb_switch *sw, unsigned int address, void *buf,
    333			  size_t size)
    334{
    335	return tb_nvm_read_data(address, buf, size, USB4_DATA_RETRIES,
    336				usb4_switch_drom_read_block, sw);
    337}
    338
    339/**
    340 * usb4_switch_lane_bonding_possible() - Are conditions met for lane bonding
    341 * @sw: USB4 router
    342 *
    343 * Checks whether conditions are met so that lane bonding can be
    344 * established with the upstream router. Call only for device routers.
    345 */
    346bool usb4_switch_lane_bonding_possible(struct tb_switch *sw)
    347{
    348	struct tb_port *up;
    349	int ret;
    350	u32 val;
    351
    352	up = tb_upstream_port(sw);
    353	ret = tb_port_read(up, &val, TB_CFG_PORT, up->cap_usb4 + PORT_CS_18, 1);
    354	if (ret)
    355		return false;
    356
    357	return !!(val & PORT_CS_18_BE);
    358}
    359
    360/**
    361 * usb4_switch_set_wake() - Enabled/disable wake
    362 * @sw: USB4 router
    363 * @flags: Wakeup flags (%0 to disable)
    364 *
    365 * Enables/disables router to wake up from sleep.
    366 */
    367int usb4_switch_set_wake(struct tb_switch *sw, unsigned int flags)
    368{
    369	struct tb_port *port;
    370	u64 route = tb_route(sw);
    371	u32 val;
    372	int ret;
    373
    374	/*
    375	 * Enable wakes coming from all USB4 downstream ports (from
    376	 * child routers). For device routers do this also for the
    377	 * upstream USB4 port.
    378	 */
    379	tb_switch_for_each_port(sw, port) {
    380		if (!tb_port_is_null(port))
    381			continue;
    382		if (!route && tb_is_upstream_port(port))
    383			continue;
    384		if (!port->cap_usb4)
    385			continue;
    386
    387		ret = tb_port_read(port, &val, TB_CFG_PORT,
    388				   port->cap_usb4 + PORT_CS_19, 1);
    389		if (ret)
    390			return ret;
    391
    392		val &= ~(PORT_CS_19_WOC | PORT_CS_19_WOD | PORT_CS_19_WOU4);
    393
    394		if (tb_is_upstream_port(port)) {
    395			val |= PORT_CS_19_WOU4;
    396		} else {
    397			bool configured = val & PORT_CS_19_PC;
    398
    399			if ((flags & TB_WAKE_ON_CONNECT) && !configured)
    400				val |= PORT_CS_19_WOC;
    401			if ((flags & TB_WAKE_ON_DISCONNECT) && configured)
    402				val |= PORT_CS_19_WOD;
    403			if ((flags & TB_WAKE_ON_USB4) && configured)
    404				val |= PORT_CS_19_WOU4;
    405		}
    406
    407		ret = tb_port_write(port, &val, TB_CFG_PORT,
    408				    port->cap_usb4 + PORT_CS_19, 1);
    409		if (ret)
    410			return ret;
    411	}
    412
    413	/*
    414	 * Enable wakes from PCIe, USB 3.x and DP on this router. Only
    415	 * needed for device routers.
    416	 */
    417	if (route) {
    418		ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1);
    419		if (ret)
    420			return ret;
    421
    422		val &= ~(ROUTER_CS_5_WOP | ROUTER_CS_5_WOU | ROUTER_CS_5_WOD);
    423		if (flags & TB_WAKE_ON_USB3)
    424			val |= ROUTER_CS_5_WOU;
    425		if (flags & TB_WAKE_ON_PCIE)
    426			val |= ROUTER_CS_5_WOP;
    427		if (flags & TB_WAKE_ON_DP)
    428			val |= ROUTER_CS_5_WOD;
    429
    430		ret = tb_sw_write(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1);
    431		if (ret)
    432			return ret;
    433	}
    434
    435	return 0;
    436}
    437
    438/**
    439 * usb4_switch_set_sleep() - Prepare the router to enter sleep
    440 * @sw: USB4 router
    441 *
    442 * Sets sleep bit for the router. Returns when the router sleep ready
    443 * bit has been asserted.
    444 */
    445int usb4_switch_set_sleep(struct tb_switch *sw)
    446{
    447	int ret;
    448	u32 val;
    449
    450	/* Set sleep bit and wait for sleep ready to be asserted */
    451	ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1);
    452	if (ret)
    453		return ret;
    454
    455	val |= ROUTER_CS_5_SLP;
    456
    457	ret = tb_sw_write(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1);
    458	if (ret)
    459		return ret;
    460
    461	return tb_switch_wait_for_bit(sw, ROUTER_CS_6, ROUTER_CS_6_SLPR,
    462				      ROUTER_CS_6_SLPR, 500);
    463}
    464
    465/**
    466 * usb4_switch_nvm_sector_size() - Return router NVM sector size
    467 * @sw: USB4 router
    468 *
    469 * If the router supports NVM operations this function returns the NVM
    470 * sector size in bytes. If NVM operations are not supported returns
    471 * %-EOPNOTSUPP.
    472 */
    473int usb4_switch_nvm_sector_size(struct tb_switch *sw)
    474{
    475	u32 metadata;
    476	u8 status;
    477	int ret;
    478
    479	ret = usb4_switch_op(sw, USB4_SWITCH_OP_NVM_SECTOR_SIZE, &metadata,
    480			     &status);
    481	if (ret)
    482		return ret;
    483
    484	if (status)
    485		return status == 0x2 ? -EOPNOTSUPP : -EIO;
    486
    487	return metadata & USB4_NVM_SECTOR_SIZE_MASK;
    488}
    489
    490static int usb4_switch_nvm_read_block(void *data,
    491	unsigned int dwaddress, void *buf, size_t dwords)
    492{
    493	struct tb_switch *sw = data;
    494	u8 status = 0;
    495	u32 metadata;
    496	int ret;
    497
    498	metadata = (dwords << USB4_NVM_READ_LENGTH_SHIFT) &
    499		   USB4_NVM_READ_LENGTH_MASK;
    500	metadata |= (dwaddress << USB4_NVM_READ_OFFSET_SHIFT) &
    501		   USB4_NVM_READ_OFFSET_MASK;
    502
    503	ret = usb4_switch_op_data(sw, USB4_SWITCH_OP_NVM_READ, &metadata,
    504				  &status, NULL, 0, buf, dwords);
    505	if (ret)
    506		return ret;
    507
    508	return status ? -EIO : 0;
    509}
    510
    511/**
    512 * usb4_switch_nvm_read() - Read arbitrary bytes from router NVM
    513 * @sw: USB4 router
    514 * @address: Starting address in bytes
    515 * @buf: Read data is placed here
    516 * @size: How many bytes to read
    517 *
    518 * Reads NVM contents of the router. If NVM is not supported returns
    519 * %-EOPNOTSUPP.
    520 */
    521int usb4_switch_nvm_read(struct tb_switch *sw, unsigned int address, void *buf,
    522			 size_t size)
    523{
    524	return tb_nvm_read_data(address, buf, size, USB4_DATA_RETRIES,
    525				usb4_switch_nvm_read_block, sw);
    526}
    527
    528/**
    529 * usb4_switch_nvm_set_offset() - Set NVM write offset
    530 * @sw: USB4 router
    531 * @address: Start offset
    532 *
    533 * Explicitly sets NVM write offset. Normally when writing to NVM this
    534 * is done automatically by usb4_switch_nvm_write().
    535 *
    536 * Returns %0 in success and negative errno if there was a failure.
    537 */
    538int usb4_switch_nvm_set_offset(struct tb_switch *sw, unsigned int address)
    539{
    540	u32 metadata, dwaddress;
    541	u8 status = 0;
    542	int ret;
    543
    544	dwaddress = address / 4;
    545	metadata = (dwaddress << USB4_NVM_SET_OFFSET_SHIFT) &
    546		   USB4_NVM_SET_OFFSET_MASK;
    547
    548	ret = usb4_switch_op(sw, USB4_SWITCH_OP_NVM_SET_OFFSET, &metadata,
    549			     &status);
    550	if (ret)
    551		return ret;
    552
    553	return status ? -EIO : 0;
    554}
    555
    556static int usb4_switch_nvm_write_next_block(void *data, unsigned int dwaddress,
    557					    const void *buf, size_t dwords)
    558{
    559	struct tb_switch *sw = data;
    560	u8 status;
    561	int ret;
    562
    563	ret = usb4_switch_op_data(sw, USB4_SWITCH_OP_NVM_WRITE, NULL, &status,
    564				  buf, dwords, NULL, 0);
    565	if (ret)
    566		return ret;
    567
    568	return status ? -EIO : 0;
    569}
    570
    571/**
    572 * usb4_switch_nvm_write() - Write to the router NVM
    573 * @sw: USB4 router
    574 * @address: Start address where to write in bytes
    575 * @buf: Pointer to the data to write
    576 * @size: Size of @buf in bytes
    577 *
    578 * Writes @buf to the router NVM using USB4 router operations. If NVM
    579 * write is not supported returns %-EOPNOTSUPP.
    580 */
    581int usb4_switch_nvm_write(struct tb_switch *sw, unsigned int address,
    582			  const void *buf, size_t size)
    583{
    584	int ret;
    585
    586	ret = usb4_switch_nvm_set_offset(sw, address);
    587	if (ret)
    588		return ret;
    589
    590	return tb_nvm_write_data(address, buf, size, USB4_DATA_RETRIES,
    591				 usb4_switch_nvm_write_next_block, sw);
    592}
    593
    594/**
    595 * usb4_switch_nvm_authenticate() - Authenticate new NVM
    596 * @sw: USB4 router
    597 *
    598 * After the new NVM has been written via usb4_switch_nvm_write(), this
    599 * function triggers NVM authentication process. The router gets power
    600 * cycled and if the authentication is successful the new NVM starts
    601 * running. In case of failure returns negative errno.
    602 *
    603 * The caller should call usb4_switch_nvm_authenticate_status() to read
    604 * the status of the authentication after power cycle. It should be the
    605 * first router operation to avoid the status being lost.
    606 */
    607int usb4_switch_nvm_authenticate(struct tb_switch *sw)
    608{
    609	int ret;
    610
    611	ret = usb4_switch_op(sw, USB4_SWITCH_OP_NVM_AUTH, NULL, NULL);
    612	switch (ret) {
    613	/*
    614	 * The router is power cycled once NVM_AUTH is started so it is
    615	 * expected to get any of the following errors back.
    616	 */
    617	case -EACCES:
    618	case -ENOTCONN:
    619	case -ETIMEDOUT:
    620		return 0;
    621
    622	default:
    623		return ret;
    624	}
    625}
    626
    627/**
    628 * usb4_switch_nvm_authenticate_status() - Read status of last NVM authenticate
    629 * @sw: USB4 router
    630 * @status: Status code of the operation
    631 *
    632 * The function checks if there is status available from the last NVM
    633 * authenticate router operation. If there is status then %0 is returned
    634 * and the status code is placed in @status. Returns negative errno in case
    635 * of failure.
    636 *
    637 * Must be called before any other router operation.
    638 */
    639int usb4_switch_nvm_authenticate_status(struct tb_switch *sw, u32 *status)
    640{
    641	const struct tb_cm_ops *cm_ops = sw->tb->cm_ops;
    642	u16 opcode;
    643	u32 val;
    644	int ret;
    645
    646	if (cm_ops->usb4_switch_nvm_authenticate_status) {
    647		ret = cm_ops->usb4_switch_nvm_authenticate_status(sw, status);
    648		if (ret != -EOPNOTSUPP)
    649			return ret;
    650	}
    651
    652	ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_26, 1);
    653	if (ret)
    654		return ret;
    655
    656	/* Check that the opcode is correct */
    657	opcode = val & ROUTER_CS_26_OPCODE_MASK;
    658	if (opcode == USB4_SWITCH_OP_NVM_AUTH) {
    659		if (val & ROUTER_CS_26_OV)
    660			return -EBUSY;
    661		if (val & ROUTER_CS_26_ONS)
    662			return -EOPNOTSUPP;
    663
    664		*status = (val & ROUTER_CS_26_STATUS_MASK) >>
    665			ROUTER_CS_26_STATUS_SHIFT;
    666	} else {
    667		*status = 0;
    668	}
    669
    670	return 0;
    671}
    672
    673/**
    674 * usb4_switch_credits_init() - Read buffer allocation parameters
    675 * @sw: USB4 router
    676 *
    677 * Reads @sw buffer allocation parameters and initializes @sw buffer
    678 * allocation fields accordingly. Specifically @sw->credits_allocation
    679 * is set to %true if these parameters can be used in tunneling.
    680 *
    681 * Returns %0 on success and negative errno otherwise.
    682 */
    683int usb4_switch_credits_init(struct tb_switch *sw)
    684{
    685	int max_usb3, min_dp_aux, min_dp_main, max_pcie, max_dma;
    686	int ret, length, i, nports;
    687	const struct tb_port *port;
    688	u32 data[NVM_DATA_DWORDS];
    689	u32 metadata = 0;
    690	u8 status = 0;
    691
    692	memset(data, 0, sizeof(data));
    693	ret = usb4_switch_op_data(sw, USB4_SWITCH_OP_BUFFER_ALLOC, &metadata,
    694				  &status, NULL, 0, data, ARRAY_SIZE(data));
    695	if (ret)
    696		return ret;
    697	if (status)
    698		return -EIO;
    699
    700	length = metadata & USB4_BA_LENGTH_MASK;
    701	if (WARN_ON(length > ARRAY_SIZE(data)))
    702		return -EMSGSIZE;
    703
    704	max_usb3 = -1;
    705	min_dp_aux = -1;
    706	min_dp_main = -1;
    707	max_pcie = -1;
    708	max_dma = -1;
    709
    710	tb_sw_dbg(sw, "credit allocation parameters:\n");
    711
    712	for (i = 0; i < length; i++) {
    713		u16 index, value;
    714
    715		index = data[i] & USB4_BA_INDEX_MASK;
    716		value = (data[i] & USB4_BA_VALUE_MASK) >> USB4_BA_VALUE_SHIFT;
    717
    718		switch (index) {
    719		case USB4_BA_MAX_USB3:
    720			tb_sw_dbg(sw, " USB3: %u\n", value);
    721			max_usb3 = value;
    722			break;
    723		case USB4_BA_MIN_DP_AUX:
    724			tb_sw_dbg(sw, " DP AUX: %u\n", value);
    725			min_dp_aux = value;
    726			break;
    727		case USB4_BA_MIN_DP_MAIN:
    728			tb_sw_dbg(sw, " DP main: %u\n", value);
    729			min_dp_main = value;
    730			break;
    731		case USB4_BA_MAX_PCIE:
    732			tb_sw_dbg(sw, " PCIe: %u\n", value);
    733			max_pcie = value;
    734			break;
    735		case USB4_BA_MAX_HI:
    736			tb_sw_dbg(sw, " DMA: %u\n", value);
    737			max_dma = value;
    738			break;
    739		default:
    740			tb_sw_dbg(sw, " unknown credit allocation index %#x, skipping\n",
    741				  index);
    742			break;
    743		}
    744	}
    745
    746	/*
    747	 * Validate the buffer allocation preferences. If we find
    748	 * issues, log a warning and fall back using the hard-coded
    749	 * values.
    750	 */
    751
    752	/* Host router must report baMaxHI */
    753	if (!tb_route(sw) && max_dma < 0) {
    754		tb_sw_warn(sw, "host router is missing baMaxHI\n");
    755		goto err_invalid;
    756	}
    757
    758	nports = 0;
    759	tb_switch_for_each_port(sw, port) {
    760		if (tb_port_is_null(port))
    761			nports++;
    762	}
    763
    764	/* Must have DP buffer allocation (multiple USB4 ports) */
    765	if (nports > 2 && (min_dp_aux < 0 || min_dp_main < 0)) {
    766		tb_sw_warn(sw, "multiple USB4 ports require baMinDPaux/baMinDPmain\n");
    767		goto err_invalid;
    768	}
    769
    770	tb_switch_for_each_port(sw, port) {
    771		if (tb_port_is_dpout(port) && min_dp_main < 0) {
    772			tb_sw_warn(sw, "missing baMinDPmain");
    773			goto err_invalid;
    774		}
    775		if ((tb_port_is_dpin(port) || tb_port_is_dpout(port)) &&
    776		    min_dp_aux < 0) {
    777			tb_sw_warn(sw, "missing baMinDPaux");
    778			goto err_invalid;
    779		}
    780		if ((tb_port_is_usb3_down(port) || tb_port_is_usb3_up(port)) &&
    781		    max_usb3 < 0) {
    782			tb_sw_warn(sw, "missing baMaxUSB3");
    783			goto err_invalid;
    784		}
    785		if ((tb_port_is_pcie_down(port) || tb_port_is_pcie_up(port)) &&
    786		    max_pcie < 0) {
    787			tb_sw_warn(sw, "missing baMaxPCIe");
    788			goto err_invalid;
    789		}
    790	}
    791
    792	/*
    793	 * Buffer allocation passed the validation so we can use it in
    794	 * path creation.
    795	 */
    796	sw->credit_allocation = true;
    797	if (max_usb3 > 0)
    798		sw->max_usb3_credits = max_usb3;
    799	if (min_dp_aux > 0)
    800		sw->min_dp_aux_credits = min_dp_aux;
    801	if (min_dp_main > 0)
    802		sw->min_dp_main_credits = min_dp_main;
    803	if (max_pcie > 0)
    804		sw->max_pcie_credits = max_pcie;
    805	if (max_dma > 0)
    806		sw->max_dma_credits = max_dma;
    807
    808	return 0;
    809
    810err_invalid:
    811	return -EINVAL;
    812}
    813
    814/**
    815 * usb4_switch_query_dp_resource() - Query availability of DP IN resource
    816 * @sw: USB4 router
    817 * @in: DP IN adapter
    818 *
    819 * For DP tunneling this function can be used to query availability of
    820 * DP IN resource. Returns true if the resource is available for DP
    821 * tunneling, false otherwise.
    822 */
    823bool usb4_switch_query_dp_resource(struct tb_switch *sw, struct tb_port *in)
    824{
    825	u32 metadata = in->port;
    826	u8 status;
    827	int ret;
    828
    829	ret = usb4_switch_op(sw, USB4_SWITCH_OP_QUERY_DP_RESOURCE, &metadata,
    830			     &status);
    831	/*
    832	 * If DP resource allocation is not supported assume it is
    833	 * always available.
    834	 */
    835	if (ret == -EOPNOTSUPP)
    836		return true;
    837	else if (ret)
    838		return false;
    839
    840	return !status;
    841}
    842
    843/**
    844 * usb4_switch_alloc_dp_resource() - Allocate DP IN resource
    845 * @sw: USB4 router
    846 * @in: DP IN adapter
    847 *
    848 * Allocates DP IN resource for DP tunneling using USB4 router
    849 * operations. If the resource was allocated returns %0. Otherwise
    850 * returns negative errno, in particular %-EBUSY if the resource is
    851 * already allocated.
    852 */
    853int usb4_switch_alloc_dp_resource(struct tb_switch *sw, struct tb_port *in)
    854{
    855	u32 metadata = in->port;
    856	u8 status;
    857	int ret;
    858
    859	ret = usb4_switch_op(sw, USB4_SWITCH_OP_ALLOC_DP_RESOURCE, &metadata,
    860			     &status);
    861	if (ret == -EOPNOTSUPP)
    862		return 0;
    863	else if (ret)
    864		return ret;
    865
    866	return status ? -EBUSY : 0;
    867}
    868
    869/**
    870 * usb4_switch_dealloc_dp_resource() - Releases allocated DP IN resource
    871 * @sw: USB4 router
    872 * @in: DP IN adapter
    873 *
    874 * Releases the previously allocated DP IN resource.
    875 */
    876int usb4_switch_dealloc_dp_resource(struct tb_switch *sw, struct tb_port *in)
    877{
    878	u32 metadata = in->port;
    879	u8 status;
    880	int ret;
    881
    882	ret = usb4_switch_op(sw, USB4_SWITCH_OP_DEALLOC_DP_RESOURCE, &metadata,
    883			     &status);
    884	if (ret == -EOPNOTSUPP)
    885		return 0;
    886	else if (ret)
    887		return ret;
    888
    889	return status ? -EIO : 0;
    890}
    891
    892static int usb4_port_idx(const struct tb_switch *sw, const struct tb_port *port)
    893{
    894	struct tb_port *p;
    895	int usb4_idx = 0;
    896
    897	/* Assume port is primary */
    898	tb_switch_for_each_port(sw, p) {
    899		if (!tb_port_is_null(p))
    900			continue;
    901		if (tb_is_upstream_port(p))
    902			continue;
    903		if (!p->link_nr) {
    904			if (p == port)
    905				break;
    906			usb4_idx++;
    907		}
    908	}
    909
    910	return usb4_idx;
    911}
    912
    913/**
    914 * usb4_switch_map_pcie_down() - Map USB4 port to a PCIe downstream adapter
    915 * @sw: USB4 router
    916 * @port: USB4 port
    917 *
    918 * USB4 routers have direct mapping between USB4 ports and PCIe
    919 * downstream adapters where the PCIe topology is extended. This
    920 * function returns the corresponding downstream PCIe adapter or %NULL
    921 * if no such mapping was possible.
    922 */
    923struct tb_port *usb4_switch_map_pcie_down(struct tb_switch *sw,
    924					  const struct tb_port *port)
    925{
    926	int usb4_idx = usb4_port_idx(sw, port);
    927	struct tb_port *p;
    928	int pcie_idx = 0;
    929
    930	/* Find PCIe down port matching usb4_port */
    931	tb_switch_for_each_port(sw, p) {
    932		if (!tb_port_is_pcie_down(p))
    933			continue;
    934
    935		if (pcie_idx == usb4_idx)
    936			return p;
    937
    938		pcie_idx++;
    939	}
    940
    941	return NULL;
    942}
    943
    944/**
    945 * usb4_switch_map_usb3_down() - Map USB4 port to a USB3 downstream adapter
    946 * @sw: USB4 router
    947 * @port: USB4 port
    948 *
    949 * USB4 routers have direct mapping between USB4 ports and USB 3.x
    950 * downstream adapters where the USB 3.x topology is extended. This
    951 * function returns the corresponding downstream USB 3.x adapter or
    952 * %NULL if no such mapping was possible.
    953 */
    954struct tb_port *usb4_switch_map_usb3_down(struct tb_switch *sw,
    955					  const struct tb_port *port)
    956{
    957	int usb4_idx = usb4_port_idx(sw, port);
    958	struct tb_port *p;
    959	int usb_idx = 0;
    960
    961	/* Find USB3 down port matching usb4_port */
    962	tb_switch_for_each_port(sw, p) {
    963		if (!tb_port_is_usb3_down(p))
    964			continue;
    965
    966		if (usb_idx == usb4_idx)
    967			return p;
    968
    969		usb_idx++;
    970	}
    971
    972	return NULL;
    973}
    974
    975/**
    976 * usb4_switch_add_ports() - Add USB4 ports for this router
    977 * @sw: USB4 router
    978 *
    979 * For USB4 router finds all USB4 ports and registers devices for each.
    980 * Can be called to any router.
    981 *
    982 * Return %0 in case of success and negative errno in case of failure.
    983 */
    984int usb4_switch_add_ports(struct tb_switch *sw)
    985{
    986	struct tb_port *port;
    987
    988	if (tb_switch_is_icm(sw) || !tb_switch_is_usb4(sw))
    989		return 0;
    990
    991	tb_switch_for_each_port(sw, port) {
    992		struct usb4_port *usb4;
    993
    994		if (!tb_port_is_null(port))
    995			continue;
    996		if (!port->cap_usb4)
    997			continue;
    998
    999		usb4 = usb4_port_device_add(port);
   1000		if (IS_ERR(usb4)) {
   1001			usb4_switch_remove_ports(sw);
   1002			return PTR_ERR(usb4);
   1003		}
   1004
   1005		port->usb4 = usb4;
   1006	}
   1007
   1008	return 0;
   1009}
   1010
   1011/**
   1012 * usb4_switch_remove_ports() - Removes USB4 ports from this router
   1013 * @sw: USB4 router
   1014 *
   1015 * Unregisters previously registered USB4 ports.
   1016 */
   1017void usb4_switch_remove_ports(struct tb_switch *sw)
   1018{
   1019	struct tb_port *port;
   1020
   1021	tb_switch_for_each_port(sw, port) {
   1022		if (port->usb4) {
   1023			usb4_port_device_remove(port->usb4);
   1024			port->usb4 = NULL;
   1025		}
   1026	}
   1027}
   1028
   1029/**
   1030 * usb4_port_unlock() - Unlock USB4 downstream port
   1031 * @port: USB4 port to unlock
   1032 *
   1033 * Unlocks USB4 downstream port so that the connection manager can
   1034 * access the router below this port.
   1035 */
   1036int usb4_port_unlock(struct tb_port *port)
   1037{
   1038	int ret;
   1039	u32 val;
   1040
   1041	ret = tb_port_read(port, &val, TB_CFG_PORT, ADP_CS_4, 1);
   1042	if (ret)
   1043		return ret;
   1044
   1045	val &= ~ADP_CS_4_LCK;
   1046	return tb_port_write(port, &val, TB_CFG_PORT, ADP_CS_4, 1);
   1047}
   1048
   1049static int usb4_port_set_configured(struct tb_port *port, bool configured)
   1050{
   1051	int ret;
   1052	u32 val;
   1053
   1054	if (!port->cap_usb4)
   1055		return -EINVAL;
   1056
   1057	ret = tb_port_read(port, &val, TB_CFG_PORT,
   1058			   port->cap_usb4 + PORT_CS_19, 1);
   1059	if (ret)
   1060		return ret;
   1061
   1062	if (configured)
   1063		val |= PORT_CS_19_PC;
   1064	else
   1065		val &= ~PORT_CS_19_PC;
   1066
   1067	return tb_port_write(port, &val, TB_CFG_PORT,
   1068			     port->cap_usb4 + PORT_CS_19, 1);
   1069}
   1070
   1071/**
   1072 * usb4_port_configure() - Set USB4 port configured
   1073 * @port: USB4 router
   1074 *
   1075 * Sets the USB4 link to be configured for power management purposes.
   1076 */
   1077int usb4_port_configure(struct tb_port *port)
   1078{
   1079	return usb4_port_set_configured(port, true);
   1080}
   1081
   1082/**
   1083 * usb4_port_unconfigure() - Set USB4 port unconfigured
   1084 * @port: USB4 router
   1085 *
   1086 * Sets the USB4 link to be unconfigured for power management purposes.
   1087 */
   1088void usb4_port_unconfigure(struct tb_port *port)
   1089{
   1090	usb4_port_set_configured(port, false);
   1091}
   1092
   1093static int usb4_set_xdomain_configured(struct tb_port *port, bool configured)
   1094{
   1095	int ret;
   1096	u32 val;
   1097
   1098	if (!port->cap_usb4)
   1099		return -EINVAL;
   1100
   1101	ret = tb_port_read(port, &val, TB_CFG_PORT,
   1102			   port->cap_usb4 + PORT_CS_19, 1);
   1103	if (ret)
   1104		return ret;
   1105
   1106	if (configured)
   1107		val |= PORT_CS_19_PID;
   1108	else
   1109		val &= ~PORT_CS_19_PID;
   1110
   1111	return tb_port_write(port, &val, TB_CFG_PORT,
   1112			     port->cap_usb4 + PORT_CS_19, 1);
   1113}
   1114
   1115/**
   1116 * usb4_port_configure_xdomain() - Configure port for XDomain
   1117 * @port: USB4 port connected to another host
   1118 *
   1119 * Marks the USB4 port as being connected to another host. Returns %0 in
   1120 * success and negative errno in failure.
   1121 */
   1122int usb4_port_configure_xdomain(struct tb_port *port)
   1123{
   1124	return usb4_set_xdomain_configured(port, true);
   1125}
   1126
   1127/**
   1128 * usb4_port_unconfigure_xdomain() - Unconfigure port for XDomain
   1129 * @port: USB4 port that was connected to another host
   1130 *
   1131 * Clears USB4 port from being marked as XDomain.
   1132 */
   1133void usb4_port_unconfigure_xdomain(struct tb_port *port)
   1134{
   1135	usb4_set_xdomain_configured(port, false);
   1136}
   1137
   1138static int usb4_port_wait_for_bit(struct tb_port *port, u32 offset, u32 bit,
   1139				  u32 value, int timeout_msec)
   1140{
   1141	ktime_t timeout = ktime_add_ms(ktime_get(), timeout_msec);
   1142
   1143	do {
   1144		u32 val;
   1145		int ret;
   1146
   1147		ret = tb_port_read(port, &val, TB_CFG_PORT, offset, 1);
   1148		if (ret)
   1149			return ret;
   1150
   1151		if ((val & bit) == value)
   1152			return 0;
   1153
   1154		usleep_range(50, 100);
   1155	} while (ktime_before(ktime_get(), timeout));
   1156
   1157	return -ETIMEDOUT;
   1158}
   1159
   1160static int usb4_port_read_data(struct tb_port *port, void *data, size_t dwords)
   1161{
   1162	if (dwords > NVM_DATA_DWORDS)
   1163		return -EINVAL;
   1164
   1165	return tb_port_read(port, data, TB_CFG_PORT, port->cap_usb4 + PORT_CS_2,
   1166			    dwords);
   1167}
   1168
   1169static int usb4_port_write_data(struct tb_port *port, const void *data,
   1170				size_t dwords)
   1171{
   1172	if (dwords > NVM_DATA_DWORDS)
   1173		return -EINVAL;
   1174
   1175	return tb_port_write(port, data, TB_CFG_PORT, port->cap_usb4 + PORT_CS_2,
   1176			     dwords);
   1177}
   1178
   1179static int usb4_port_sb_read(struct tb_port *port, enum usb4_sb_target target,
   1180			     u8 index, u8 reg, void *buf, u8 size)
   1181{
   1182	size_t dwords = DIV_ROUND_UP(size, 4);
   1183	int ret;
   1184	u32 val;
   1185
   1186	if (!port->cap_usb4)
   1187		return -EINVAL;
   1188
   1189	val = reg;
   1190	val |= size << PORT_CS_1_LENGTH_SHIFT;
   1191	val |= (target << PORT_CS_1_TARGET_SHIFT) & PORT_CS_1_TARGET_MASK;
   1192	if (target == USB4_SB_TARGET_RETIMER)
   1193		val |= (index << PORT_CS_1_RETIMER_INDEX_SHIFT);
   1194	val |= PORT_CS_1_PND;
   1195
   1196	ret = tb_port_write(port, &val, TB_CFG_PORT,
   1197			    port->cap_usb4 + PORT_CS_1, 1);
   1198	if (ret)
   1199		return ret;
   1200
   1201	ret = usb4_port_wait_for_bit(port, port->cap_usb4 + PORT_CS_1,
   1202				     PORT_CS_1_PND, 0, 500);
   1203	if (ret)
   1204		return ret;
   1205
   1206	ret = tb_port_read(port, &val, TB_CFG_PORT,
   1207			    port->cap_usb4 + PORT_CS_1, 1);
   1208	if (ret)
   1209		return ret;
   1210
   1211	if (val & PORT_CS_1_NR)
   1212		return -ENODEV;
   1213	if (val & PORT_CS_1_RC)
   1214		return -EIO;
   1215
   1216	return buf ? usb4_port_read_data(port, buf, dwords) : 0;
   1217}
   1218
   1219static int usb4_port_sb_write(struct tb_port *port, enum usb4_sb_target target,
   1220			      u8 index, u8 reg, const void *buf, u8 size)
   1221{
   1222	size_t dwords = DIV_ROUND_UP(size, 4);
   1223	int ret;
   1224	u32 val;
   1225
   1226	if (!port->cap_usb4)
   1227		return -EINVAL;
   1228
   1229	if (buf) {
   1230		ret = usb4_port_write_data(port, buf, dwords);
   1231		if (ret)
   1232			return ret;
   1233	}
   1234
   1235	val = reg;
   1236	val |= size << PORT_CS_1_LENGTH_SHIFT;
   1237	val |= PORT_CS_1_WNR_WRITE;
   1238	val |= (target << PORT_CS_1_TARGET_SHIFT) & PORT_CS_1_TARGET_MASK;
   1239	if (target == USB4_SB_TARGET_RETIMER)
   1240		val |= (index << PORT_CS_1_RETIMER_INDEX_SHIFT);
   1241	val |= PORT_CS_1_PND;
   1242
   1243	ret = tb_port_write(port, &val, TB_CFG_PORT,
   1244			    port->cap_usb4 + PORT_CS_1, 1);
   1245	if (ret)
   1246		return ret;
   1247
   1248	ret = usb4_port_wait_for_bit(port, port->cap_usb4 + PORT_CS_1,
   1249				     PORT_CS_1_PND, 0, 500);
   1250	if (ret)
   1251		return ret;
   1252
   1253	ret = tb_port_read(port, &val, TB_CFG_PORT,
   1254			    port->cap_usb4 + PORT_CS_1, 1);
   1255	if (ret)
   1256		return ret;
   1257
   1258	if (val & PORT_CS_1_NR)
   1259		return -ENODEV;
   1260	if (val & PORT_CS_1_RC)
   1261		return -EIO;
   1262
   1263	return 0;
   1264}
   1265
   1266static int usb4_port_sb_op(struct tb_port *port, enum usb4_sb_target target,
   1267			   u8 index, enum usb4_sb_opcode opcode, int timeout_msec)
   1268{
   1269	ktime_t timeout;
   1270	u32 val;
   1271	int ret;
   1272
   1273	val = opcode;
   1274	ret = usb4_port_sb_write(port, target, index, USB4_SB_OPCODE, &val,
   1275				 sizeof(val));
   1276	if (ret)
   1277		return ret;
   1278
   1279	timeout = ktime_add_ms(ktime_get(), timeout_msec);
   1280
   1281	do {
   1282		/* Check results */
   1283		ret = usb4_port_sb_read(port, target, index, USB4_SB_OPCODE,
   1284					&val, sizeof(val));
   1285		if (ret)
   1286			return ret;
   1287
   1288		switch (val) {
   1289		case 0:
   1290			return 0;
   1291
   1292		case USB4_SB_OPCODE_ERR:
   1293			return -EAGAIN;
   1294
   1295		case USB4_SB_OPCODE_ONS:
   1296			return -EOPNOTSUPP;
   1297
   1298		default:
   1299			if (val != opcode)
   1300				return -EIO;
   1301			break;
   1302		}
   1303	} while (ktime_before(ktime_get(), timeout));
   1304
   1305	return -ETIMEDOUT;
   1306}
   1307
   1308static int usb4_port_set_router_offline(struct tb_port *port, bool offline)
   1309{
   1310	u32 val = !offline;
   1311	int ret;
   1312
   1313	ret = usb4_port_sb_write(port, USB4_SB_TARGET_ROUTER, 0,
   1314				  USB4_SB_METADATA, &val, sizeof(val));
   1315	if (ret)
   1316		return ret;
   1317
   1318	val = USB4_SB_OPCODE_ROUTER_OFFLINE;
   1319	return usb4_port_sb_write(port, USB4_SB_TARGET_ROUTER, 0,
   1320				  USB4_SB_OPCODE, &val, sizeof(val));
   1321}
   1322
   1323/**
   1324 * usb4_port_router_offline() - Put the USB4 port to offline mode
   1325 * @port: USB4 port
   1326 *
   1327 * This function puts the USB4 port into offline mode. In this mode the
   1328 * port does not react on hotplug events anymore. This needs to be
   1329 * called before retimer access is done when the USB4 links is not up.
   1330 *
   1331 * Returns %0 in case of success and negative errno if there was an
   1332 * error.
   1333 */
   1334int usb4_port_router_offline(struct tb_port *port)
   1335{
   1336	return usb4_port_set_router_offline(port, true);
   1337}
   1338
   1339/**
   1340 * usb4_port_router_online() - Put the USB4 port back to online
   1341 * @port: USB4 port
   1342 *
   1343 * Makes the USB4 port functional again.
   1344 */
   1345int usb4_port_router_online(struct tb_port *port)
   1346{
   1347	return usb4_port_set_router_offline(port, false);
   1348}
   1349
   1350/**
   1351 * usb4_port_enumerate_retimers() - Send RT broadcast transaction
   1352 * @port: USB4 port
   1353 *
   1354 * This forces the USB4 port to send broadcast RT transaction which
   1355 * makes the retimers on the link to assign index to themselves. Returns
   1356 * %0 in case of success and negative errno if there was an error.
   1357 */
   1358int usb4_port_enumerate_retimers(struct tb_port *port)
   1359{
   1360	u32 val;
   1361
   1362	val = USB4_SB_OPCODE_ENUMERATE_RETIMERS;
   1363	return usb4_port_sb_write(port, USB4_SB_TARGET_ROUTER, 0,
   1364				  USB4_SB_OPCODE, &val, sizeof(val));
   1365}
   1366
   1367/**
   1368 * usb4_port_clx_supported() - Check if CLx is supported by the link
   1369 * @port: Port to check for CLx support for
   1370 *
   1371 * PORT_CS_18_CPS bit reflects if the link supports CLx including
   1372 * active cables (if connected on the link).
   1373 */
   1374bool usb4_port_clx_supported(struct tb_port *port)
   1375{
   1376	int ret;
   1377	u32 val;
   1378
   1379	ret = tb_port_read(port, &val, TB_CFG_PORT,
   1380			   port->cap_usb4 + PORT_CS_18, 1);
   1381	if (ret)
   1382		return false;
   1383
   1384	return !!(val & PORT_CS_18_CPS);
   1385}
   1386
   1387static inline int usb4_port_retimer_op(struct tb_port *port, u8 index,
   1388				       enum usb4_sb_opcode opcode,
   1389				       int timeout_msec)
   1390{
   1391	return usb4_port_sb_op(port, USB4_SB_TARGET_RETIMER, index, opcode,
   1392			       timeout_msec);
   1393}
   1394
   1395/**
   1396 * usb4_port_retimer_set_inbound_sbtx() - Enable sideband channel transactions
   1397 * @port: USB4 port
   1398 * @index: Retimer index
   1399 *
   1400 * Enables sideband channel transations on SBTX. Can be used when USB4
   1401 * link does not go up, for example if there is no device connected.
   1402 */
   1403int usb4_port_retimer_set_inbound_sbtx(struct tb_port *port, u8 index)
   1404{
   1405	int ret;
   1406
   1407	ret = usb4_port_retimer_op(port, index, USB4_SB_OPCODE_SET_INBOUND_SBTX,
   1408				   500);
   1409
   1410	if (ret != -ENODEV)
   1411		return ret;
   1412
   1413	/*
   1414	 * Per the USB4 retimer spec, the retimer is not required to
   1415	 * send an RT (Retimer Transaction) response for the first
   1416	 * SET_INBOUND_SBTX command
   1417	 */
   1418	return usb4_port_retimer_op(port, index, USB4_SB_OPCODE_SET_INBOUND_SBTX,
   1419				    500);
   1420}
   1421
   1422/**
   1423 * usb4_port_retimer_read() - Read from retimer sideband registers
   1424 * @port: USB4 port
   1425 * @index: Retimer index
   1426 * @reg: Sideband register to read
   1427 * @buf: Data from @reg is stored here
   1428 * @size: Number of bytes to read
   1429 *
   1430 * Function reads retimer sideband registers starting from @reg. The
   1431 * retimer is connected to @port at @index. Returns %0 in case of
   1432 * success, and read data is copied to @buf. If there is no retimer
   1433 * present at given @index returns %-ENODEV. In any other failure
   1434 * returns negative errno.
   1435 */
   1436int usb4_port_retimer_read(struct tb_port *port, u8 index, u8 reg, void *buf,
   1437			   u8 size)
   1438{
   1439	return usb4_port_sb_read(port, USB4_SB_TARGET_RETIMER, index, reg, buf,
   1440				 size);
   1441}
   1442
   1443/**
   1444 * usb4_port_retimer_write() - Write to retimer sideband registers
   1445 * @port: USB4 port
   1446 * @index: Retimer index
   1447 * @reg: Sideband register to write
   1448 * @buf: Data that is written starting from @reg
   1449 * @size: Number of bytes to write
   1450 *
   1451 * Writes retimer sideband registers starting from @reg. The retimer is
   1452 * connected to @port at @index. Returns %0 in case of success. If there
   1453 * is no retimer present at given @index returns %-ENODEV. In any other
   1454 * failure returns negative errno.
   1455 */
   1456int usb4_port_retimer_write(struct tb_port *port, u8 index, u8 reg,
   1457			    const void *buf, u8 size)
   1458{
   1459	return usb4_port_sb_write(port, USB4_SB_TARGET_RETIMER, index, reg, buf,
   1460				  size);
   1461}
   1462
   1463/**
   1464 * usb4_port_retimer_is_last() - Is the retimer last on-board retimer
   1465 * @port: USB4 port
   1466 * @index: Retimer index
   1467 *
   1468 * If the retimer at @index is last one (connected directly to the
   1469 * Type-C port) this function returns %1. If it is not returns %0. If
   1470 * the retimer is not present returns %-ENODEV. Otherwise returns
   1471 * negative errno.
   1472 */
   1473int usb4_port_retimer_is_last(struct tb_port *port, u8 index)
   1474{
   1475	u32 metadata;
   1476	int ret;
   1477
   1478	ret = usb4_port_retimer_op(port, index, USB4_SB_OPCODE_QUERY_LAST_RETIMER,
   1479				   500);
   1480	if (ret)
   1481		return ret;
   1482
   1483	ret = usb4_port_retimer_read(port, index, USB4_SB_METADATA, &metadata,
   1484				     sizeof(metadata));
   1485	return ret ? ret : metadata & 1;
   1486}
   1487
   1488/**
   1489 * usb4_port_retimer_nvm_sector_size() - Read retimer NVM sector size
   1490 * @port: USB4 port
   1491 * @index: Retimer index
   1492 *
   1493 * Reads NVM sector size (in bytes) of a retimer at @index. This
   1494 * operation can be used to determine whether the retimer supports NVM
   1495 * upgrade for example. Returns sector size in bytes or negative errno
   1496 * in case of error. Specifically returns %-ENODEV if there is no
   1497 * retimer at @index.
   1498 */
   1499int usb4_port_retimer_nvm_sector_size(struct tb_port *port, u8 index)
   1500{
   1501	u32 metadata;
   1502	int ret;
   1503
   1504	ret = usb4_port_retimer_op(port, index, USB4_SB_OPCODE_GET_NVM_SECTOR_SIZE,
   1505				   500);
   1506	if (ret)
   1507		return ret;
   1508
   1509	ret = usb4_port_retimer_read(port, index, USB4_SB_METADATA, &metadata,
   1510				     sizeof(metadata));
   1511	return ret ? ret : metadata & USB4_NVM_SECTOR_SIZE_MASK;
   1512}
   1513
   1514/**
   1515 * usb4_port_retimer_nvm_set_offset() - Set NVM write offset
   1516 * @port: USB4 port
   1517 * @index: Retimer index
   1518 * @address: Start offset
   1519 *
   1520 * Exlicitly sets NVM write offset. Normally when writing to NVM this is
   1521 * done automatically by usb4_port_retimer_nvm_write().
   1522 *
   1523 * Returns %0 in success and negative errno if there was a failure.
   1524 */
   1525int usb4_port_retimer_nvm_set_offset(struct tb_port *port, u8 index,
   1526				     unsigned int address)
   1527{
   1528	u32 metadata, dwaddress;
   1529	int ret;
   1530
   1531	dwaddress = address / 4;
   1532	metadata = (dwaddress << USB4_NVM_SET_OFFSET_SHIFT) &
   1533		  USB4_NVM_SET_OFFSET_MASK;
   1534
   1535	ret = usb4_port_retimer_write(port, index, USB4_SB_METADATA, &metadata,
   1536				      sizeof(metadata));
   1537	if (ret)
   1538		return ret;
   1539
   1540	return usb4_port_retimer_op(port, index, USB4_SB_OPCODE_NVM_SET_OFFSET,
   1541				    500);
   1542}
   1543
   1544struct retimer_info {
   1545	struct tb_port *port;
   1546	u8 index;
   1547};
   1548
   1549static int usb4_port_retimer_nvm_write_next_block(void *data,
   1550	unsigned int dwaddress, const void *buf, size_t dwords)
   1551
   1552{
   1553	const struct retimer_info *info = data;
   1554	struct tb_port *port = info->port;
   1555	u8 index = info->index;
   1556	int ret;
   1557
   1558	ret = usb4_port_retimer_write(port, index, USB4_SB_DATA,
   1559				      buf, dwords * 4);
   1560	if (ret)
   1561		return ret;
   1562
   1563	return usb4_port_retimer_op(port, index,
   1564			USB4_SB_OPCODE_NVM_BLOCK_WRITE, 1000);
   1565}
   1566
   1567/**
   1568 * usb4_port_retimer_nvm_write() - Write to retimer NVM
   1569 * @port: USB4 port
   1570 * @index: Retimer index
   1571 * @address: Byte address where to start the write
   1572 * @buf: Data to write
   1573 * @size: Size in bytes how much to write
   1574 *
   1575 * Writes @size bytes from @buf to the retimer NVM. Used for NVM
   1576 * upgrade. Returns %0 if the data was written successfully and negative
   1577 * errno in case of failure. Specifically returns %-ENODEV if there is
   1578 * no retimer at @index.
   1579 */
   1580int usb4_port_retimer_nvm_write(struct tb_port *port, u8 index, unsigned int address,
   1581				const void *buf, size_t size)
   1582{
   1583	struct retimer_info info = { .port = port, .index = index };
   1584	int ret;
   1585
   1586	ret = usb4_port_retimer_nvm_set_offset(port, index, address);
   1587	if (ret)
   1588		return ret;
   1589
   1590	return tb_nvm_write_data(address, buf, size, USB4_DATA_RETRIES,
   1591				 usb4_port_retimer_nvm_write_next_block, &info);
   1592}
   1593
   1594/**
   1595 * usb4_port_retimer_nvm_authenticate() - Start retimer NVM upgrade
   1596 * @port: USB4 port
   1597 * @index: Retimer index
   1598 *
   1599 * After the new NVM image has been written via usb4_port_retimer_nvm_write()
   1600 * this function can be used to trigger the NVM upgrade process. If
   1601 * successful the retimer restarts with the new NVM and may not have the
   1602 * index set so one needs to call usb4_port_enumerate_retimers() to
   1603 * force index to be assigned.
   1604 */
   1605int usb4_port_retimer_nvm_authenticate(struct tb_port *port, u8 index)
   1606{
   1607	u32 val;
   1608
   1609	/*
   1610	 * We need to use the raw operation here because once the
   1611	 * authentication completes the retimer index is not set anymore
   1612	 * so we do not get back the status now.
   1613	 */
   1614	val = USB4_SB_OPCODE_NVM_AUTH_WRITE;
   1615	return usb4_port_sb_write(port, USB4_SB_TARGET_RETIMER, index,
   1616				  USB4_SB_OPCODE, &val, sizeof(val));
   1617}
   1618
   1619/**
   1620 * usb4_port_retimer_nvm_authenticate_status() - Read status of NVM upgrade
   1621 * @port: USB4 port
   1622 * @index: Retimer index
   1623 * @status: Raw status code read from metadata
   1624 *
   1625 * This can be called after usb4_port_retimer_nvm_authenticate() and
   1626 * usb4_port_enumerate_retimers() to fetch status of the NVM upgrade.
   1627 *
   1628 * Returns %0 if the authentication status was successfully read. The
   1629 * completion metadata (the result) is then stored into @status. If
   1630 * reading the status fails, returns negative errno.
   1631 */
   1632int usb4_port_retimer_nvm_authenticate_status(struct tb_port *port, u8 index,
   1633					      u32 *status)
   1634{
   1635	u32 metadata, val;
   1636	int ret;
   1637
   1638	ret = usb4_port_retimer_read(port, index, USB4_SB_OPCODE, &val,
   1639				     sizeof(val));
   1640	if (ret)
   1641		return ret;
   1642
   1643	switch (val) {
   1644	case 0:
   1645		*status = 0;
   1646		return 0;
   1647
   1648	case USB4_SB_OPCODE_ERR:
   1649		ret = usb4_port_retimer_read(port, index, USB4_SB_METADATA,
   1650					     &metadata, sizeof(metadata));
   1651		if (ret)
   1652			return ret;
   1653
   1654		*status = metadata & USB4_SB_METADATA_NVM_AUTH_WRITE_MASK;
   1655		return 0;
   1656
   1657	case USB4_SB_OPCODE_ONS:
   1658		return -EOPNOTSUPP;
   1659
   1660	default:
   1661		return -EIO;
   1662	}
   1663}
   1664
   1665static int usb4_port_retimer_nvm_read_block(void *data, unsigned int dwaddress,
   1666					    void *buf, size_t dwords)
   1667{
   1668	const struct retimer_info *info = data;
   1669	struct tb_port *port = info->port;
   1670	u8 index = info->index;
   1671	u32 metadata;
   1672	int ret;
   1673
   1674	metadata = dwaddress << USB4_NVM_READ_OFFSET_SHIFT;
   1675	if (dwords < NVM_DATA_DWORDS)
   1676		metadata |= dwords << USB4_NVM_READ_LENGTH_SHIFT;
   1677
   1678	ret = usb4_port_retimer_write(port, index, USB4_SB_METADATA, &metadata,
   1679				      sizeof(metadata));
   1680	if (ret)
   1681		return ret;
   1682
   1683	ret = usb4_port_retimer_op(port, index, USB4_SB_OPCODE_NVM_READ, 500);
   1684	if (ret)
   1685		return ret;
   1686
   1687	return usb4_port_retimer_read(port, index, USB4_SB_DATA, buf,
   1688				      dwords * 4);
   1689}
   1690
   1691/**
   1692 * usb4_port_retimer_nvm_read() - Read contents of retimer NVM
   1693 * @port: USB4 port
   1694 * @index: Retimer index
   1695 * @address: NVM address (in bytes) to start reading
   1696 * @buf: Data read from NVM is stored here
   1697 * @size: Number of bytes to read
   1698 *
   1699 * Reads retimer NVM and copies the contents to @buf. Returns %0 if the
   1700 * read was successful and negative errno in case of failure.
   1701 * Specifically returns %-ENODEV if there is no retimer at @index.
   1702 */
   1703int usb4_port_retimer_nvm_read(struct tb_port *port, u8 index,
   1704			       unsigned int address, void *buf, size_t size)
   1705{
   1706	struct retimer_info info = { .port = port, .index = index };
   1707
   1708	return tb_nvm_read_data(address, buf, size, USB4_DATA_RETRIES,
   1709				usb4_port_retimer_nvm_read_block, &info);
   1710}
   1711
   1712/**
   1713 * usb4_usb3_port_max_link_rate() - Maximum support USB3 link rate
   1714 * @port: USB3 adapter port
   1715 *
   1716 * Return maximum supported link rate of a USB3 adapter in Mb/s.
   1717 * Negative errno in case of error.
   1718 */
   1719int usb4_usb3_port_max_link_rate(struct tb_port *port)
   1720{
   1721	int ret, lr;
   1722	u32 val;
   1723
   1724	if (!tb_port_is_usb3_down(port) && !tb_port_is_usb3_up(port))
   1725		return -EINVAL;
   1726
   1727	ret = tb_port_read(port, &val, TB_CFG_PORT,
   1728			   port->cap_adap + ADP_USB3_CS_4, 1);
   1729	if (ret)
   1730		return ret;
   1731
   1732	lr = (val & ADP_USB3_CS_4_MSLR_MASK) >> ADP_USB3_CS_4_MSLR_SHIFT;
   1733	return lr == ADP_USB3_CS_4_MSLR_20G ? 20000 : 10000;
   1734}
   1735
   1736/**
   1737 * usb4_usb3_port_actual_link_rate() - Established USB3 link rate
   1738 * @port: USB3 adapter port
   1739 *
   1740 * Return actual established link rate of a USB3 adapter in Mb/s. If the
   1741 * link is not up returns %0 and negative errno in case of failure.
   1742 */
   1743int usb4_usb3_port_actual_link_rate(struct tb_port *port)
   1744{
   1745	int ret, lr;
   1746	u32 val;
   1747
   1748	if (!tb_port_is_usb3_down(port) && !tb_port_is_usb3_up(port))
   1749		return -EINVAL;
   1750
   1751	ret = tb_port_read(port, &val, TB_CFG_PORT,
   1752			   port->cap_adap + ADP_USB3_CS_4, 1);
   1753	if (ret)
   1754		return ret;
   1755
   1756	if (!(val & ADP_USB3_CS_4_ULV))
   1757		return 0;
   1758
   1759	lr = val & ADP_USB3_CS_4_ALR_MASK;
   1760	return lr == ADP_USB3_CS_4_ALR_20G ? 20000 : 10000;
   1761}
   1762
   1763static int usb4_usb3_port_cm_request(struct tb_port *port, bool request)
   1764{
   1765	int ret;
   1766	u32 val;
   1767
   1768	if (!tb_port_is_usb3_down(port))
   1769		return -EINVAL;
   1770	if (tb_route(port->sw))
   1771		return -EINVAL;
   1772
   1773	ret = tb_port_read(port, &val, TB_CFG_PORT,
   1774			   port->cap_adap + ADP_USB3_CS_2, 1);
   1775	if (ret)
   1776		return ret;
   1777
   1778	if (request)
   1779		val |= ADP_USB3_CS_2_CMR;
   1780	else
   1781		val &= ~ADP_USB3_CS_2_CMR;
   1782
   1783	ret = tb_port_write(port, &val, TB_CFG_PORT,
   1784			    port->cap_adap + ADP_USB3_CS_2, 1);
   1785	if (ret)
   1786		return ret;
   1787
   1788	/*
   1789	 * We can use val here directly as the CMR bit is in the same place
   1790	 * as HCA. Just mask out others.
   1791	 */
   1792	val &= ADP_USB3_CS_2_CMR;
   1793	return usb4_port_wait_for_bit(port, port->cap_adap + ADP_USB3_CS_1,
   1794				      ADP_USB3_CS_1_HCA, val, 1500);
   1795}
   1796
   1797static inline int usb4_usb3_port_set_cm_request(struct tb_port *port)
   1798{
   1799	return usb4_usb3_port_cm_request(port, true);
   1800}
   1801
   1802static inline int usb4_usb3_port_clear_cm_request(struct tb_port *port)
   1803{
   1804	return usb4_usb3_port_cm_request(port, false);
   1805}
   1806
   1807static unsigned int usb3_bw_to_mbps(u32 bw, u8 scale)
   1808{
   1809	unsigned long uframes;
   1810
   1811	uframes = bw * 512UL << scale;
   1812	return DIV_ROUND_CLOSEST(uframes * 8000, 1000 * 1000);
   1813}
   1814
   1815static u32 mbps_to_usb3_bw(unsigned int mbps, u8 scale)
   1816{
   1817	unsigned long uframes;
   1818
   1819	/* 1 uframe is 1/8 ms (125 us) -> 1 / 8000 s */
   1820	uframes = ((unsigned long)mbps * 1000 *  1000) / 8000;
   1821	return DIV_ROUND_UP(uframes, 512UL << scale);
   1822}
   1823
   1824static int usb4_usb3_port_read_allocated_bandwidth(struct tb_port *port,
   1825						   int *upstream_bw,
   1826						   int *downstream_bw)
   1827{
   1828	u32 val, bw, scale;
   1829	int ret;
   1830
   1831	ret = tb_port_read(port, &val, TB_CFG_PORT,
   1832			   port->cap_adap + ADP_USB3_CS_2, 1);
   1833	if (ret)
   1834		return ret;
   1835
   1836	ret = tb_port_read(port, &scale, TB_CFG_PORT,
   1837			   port->cap_adap + ADP_USB3_CS_3, 1);
   1838	if (ret)
   1839		return ret;
   1840
   1841	scale &= ADP_USB3_CS_3_SCALE_MASK;
   1842
   1843	bw = val & ADP_USB3_CS_2_AUBW_MASK;
   1844	*upstream_bw = usb3_bw_to_mbps(bw, scale);
   1845
   1846	bw = (val & ADP_USB3_CS_2_ADBW_MASK) >> ADP_USB3_CS_2_ADBW_SHIFT;
   1847	*downstream_bw = usb3_bw_to_mbps(bw, scale);
   1848
   1849	return 0;
   1850}
   1851
   1852/**
   1853 * usb4_usb3_port_allocated_bandwidth() - Bandwidth allocated for USB3
   1854 * @port: USB3 adapter port
   1855 * @upstream_bw: Allocated upstream bandwidth is stored here
   1856 * @downstream_bw: Allocated downstream bandwidth is stored here
   1857 *
   1858 * Stores currently allocated USB3 bandwidth into @upstream_bw and
   1859 * @downstream_bw in Mb/s. Returns %0 in case of success and negative
   1860 * errno in failure.
   1861 */
   1862int usb4_usb3_port_allocated_bandwidth(struct tb_port *port, int *upstream_bw,
   1863				       int *downstream_bw)
   1864{
   1865	int ret;
   1866
   1867	ret = usb4_usb3_port_set_cm_request(port);
   1868	if (ret)
   1869		return ret;
   1870
   1871	ret = usb4_usb3_port_read_allocated_bandwidth(port, upstream_bw,
   1872						      downstream_bw);
   1873	usb4_usb3_port_clear_cm_request(port);
   1874
   1875	return ret;
   1876}
   1877
   1878static int usb4_usb3_port_read_consumed_bandwidth(struct tb_port *port,
   1879						  int *upstream_bw,
   1880						  int *downstream_bw)
   1881{
   1882	u32 val, bw, scale;
   1883	int ret;
   1884
   1885	ret = tb_port_read(port, &val, TB_CFG_PORT,
   1886			   port->cap_adap + ADP_USB3_CS_1, 1);
   1887	if (ret)
   1888		return ret;
   1889
   1890	ret = tb_port_read(port, &scale, TB_CFG_PORT,
   1891			   port->cap_adap + ADP_USB3_CS_3, 1);
   1892	if (ret)
   1893		return ret;
   1894
   1895	scale &= ADP_USB3_CS_3_SCALE_MASK;
   1896
   1897	bw = val & ADP_USB3_CS_1_CUBW_MASK;
   1898	*upstream_bw = usb3_bw_to_mbps(bw, scale);
   1899
   1900	bw = (val & ADP_USB3_CS_1_CDBW_MASK) >> ADP_USB3_CS_1_CDBW_SHIFT;
   1901	*downstream_bw = usb3_bw_to_mbps(bw, scale);
   1902
   1903	return 0;
   1904}
   1905
   1906static int usb4_usb3_port_write_allocated_bandwidth(struct tb_port *port,
   1907						    int upstream_bw,
   1908						    int downstream_bw)
   1909{
   1910	u32 val, ubw, dbw, scale;
   1911	int ret;
   1912
   1913	/* Read the used scale, hardware default is 0 */
   1914	ret = tb_port_read(port, &scale, TB_CFG_PORT,
   1915			   port->cap_adap + ADP_USB3_CS_3, 1);
   1916	if (ret)
   1917		return ret;
   1918
   1919	scale &= ADP_USB3_CS_3_SCALE_MASK;
   1920	ubw = mbps_to_usb3_bw(upstream_bw, scale);
   1921	dbw = mbps_to_usb3_bw(downstream_bw, scale);
   1922
   1923	ret = tb_port_read(port, &val, TB_CFG_PORT,
   1924			   port->cap_adap + ADP_USB3_CS_2, 1);
   1925	if (ret)
   1926		return ret;
   1927
   1928	val &= ~(ADP_USB3_CS_2_AUBW_MASK | ADP_USB3_CS_2_ADBW_MASK);
   1929	val |= dbw << ADP_USB3_CS_2_ADBW_SHIFT;
   1930	val |= ubw;
   1931
   1932	return tb_port_write(port, &val, TB_CFG_PORT,
   1933			     port->cap_adap + ADP_USB3_CS_2, 1);
   1934}
   1935
   1936/**
   1937 * usb4_usb3_port_allocate_bandwidth() - Allocate bandwidth for USB3
   1938 * @port: USB3 adapter port
   1939 * @upstream_bw: New upstream bandwidth
   1940 * @downstream_bw: New downstream bandwidth
   1941 *
   1942 * This can be used to set how much bandwidth is allocated for the USB3
   1943 * tunneled isochronous traffic. @upstream_bw and @downstream_bw are the
   1944 * new values programmed to the USB3 adapter allocation registers. If
   1945 * the values are lower than what is currently consumed the allocation
   1946 * is set to what is currently consumed instead (consumed bandwidth
   1947 * cannot be taken away by CM). The actual new values are returned in
   1948 * @upstream_bw and @downstream_bw.
   1949 *
   1950 * Returns %0 in case of success and negative errno if there was a
   1951 * failure.
   1952 */
   1953int usb4_usb3_port_allocate_bandwidth(struct tb_port *port, int *upstream_bw,
   1954				      int *downstream_bw)
   1955{
   1956	int ret, consumed_up, consumed_down, allocate_up, allocate_down;
   1957
   1958	ret = usb4_usb3_port_set_cm_request(port);
   1959	if (ret)
   1960		return ret;
   1961
   1962	ret = usb4_usb3_port_read_consumed_bandwidth(port, &consumed_up,
   1963						     &consumed_down);
   1964	if (ret)
   1965		goto err_request;
   1966
   1967	/* Don't allow it go lower than what is consumed */
   1968	allocate_up = max(*upstream_bw, consumed_up);
   1969	allocate_down = max(*downstream_bw, consumed_down);
   1970
   1971	ret = usb4_usb3_port_write_allocated_bandwidth(port, allocate_up,
   1972						       allocate_down);
   1973	if (ret)
   1974		goto err_request;
   1975
   1976	*upstream_bw = allocate_up;
   1977	*downstream_bw = allocate_down;
   1978
   1979err_request:
   1980	usb4_usb3_port_clear_cm_request(port);
   1981	return ret;
   1982}
   1983
   1984/**
   1985 * usb4_usb3_port_release_bandwidth() - Release allocated USB3 bandwidth
   1986 * @port: USB3 adapter port
   1987 * @upstream_bw: New allocated upstream bandwidth
   1988 * @downstream_bw: New allocated downstream bandwidth
   1989 *
   1990 * Releases USB3 allocated bandwidth down to what is actually consumed.
   1991 * The new bandwidth is returned in @upstream_bw and @downstream_bw.
   1992 *
   1993 * Returns 0% in success and negative errno in case of failure.
   1994 */
   1995int usb4_usb3_port_release_bandwidth(struct tb_port *port, int *upstream_bw,
   1996				     int *downstream_bw)
   1997{
   1998	int ret, consumed_up, consumed_down;
   1999
   2000	ret = usb4_usb3_port_set_cm_request(port);
   2001	if (ret)
   2002		return ret;
   2003
   2004	ret = usb4_usb3_port_read_consumed_bandwidth(port, &consumed_up,
   2005						     &consumed_down);
   2006	if (ret)
   2007		goto err_request;
   2008
   2009	/*
   2010	 * Always keep 1000 Mb/s to make sure xHCI has at least some
   2011	 * bandwidth available for isochronous traffic.
   2012	 */
   2013	if (consumed_up < 1000)
   2014		consumed_up = 1000;
   2015	if (consumed_down < 1000)
   2016		consumed_down = 1000;
   2017
   2018	ret = usb4_usb3_port_write_allocated_bandwidth(port, consumed_up,
   2019						       consumed_down);
   2020	if (ret)
   2021		goto err_request;
   2022
   2023	*upstream_bw = consumed_up;
   2024	*downstream_bw = consumed_down;
   2025
   2026err_request:
   2027	usb4_usb3_port_clear_cm_request(port);
   2028	return ret;
   2029}