cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

switch.c (92228B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * Thunderbolt driver - switch/port utility functions
      4 *
      5 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
      6 * Copyright (C) 2018, Intel Corporation
      7 */
      8
      9#include <linux/delay.h>
     10#include <linux/idr.h>
     11#include <linux/nvmem-provider.h>
     12#include <linux/pm_runtime.h>
     13#include <linux/sched/signal.h>
     14#include <linux/sizes.h>
     15#include <linux/slab.h>
     16#include <linux/module.h>
     17
     18#include "tb.h"
     19
     20/* Switch NVM support */
     21
     22#define NVM_CSS			0x10
     23
     24struct nvm_auth_status {
     25	struct list_head list;
     26	uuid_t uuid;
     27	u32 status;
     28};
     29
     30static bool clx_enabled = true;
     31module_param_named(clx, clx_enabled, bool, 0444);
     32MODULE_PARM_DESC(clx, "allow low power states on the high-speed lanes (default: true)");
     33
     34/*
     35 * Hold NVM authentication failure status per switch This information
     36 * needs to stay around even when the switch gets power cycled so we
     37 * keep it separately.
     38 */
     39static LIST_HEAD(nvm_auth_status_cache);
     40static DEFINE_MUTEX(nvm_auth_status_lock);
     41
     42static struct nvm_auth_status *__nvm_get_auth_status(const struct tb_switch *sw)
     43{
     44	struct nvm_auth_status *st;
     45
     46	list_for_each_entry(st, &nvm_auth_status_cache, list) {
     47		if (uuid_equal(&st->uuid, sw->uuid))
     48			return st;
     49	}
     50
     51	return NULL;
     52}
     53
     54static void nvm_get_auth_status(const struct tb_switch *sw, u32 *status)
     55{
     56	struct nvm_auth_status *st;
     57
     58	mutex_lock(&nvm_auth_status_lock);
     59	st = __nvm_get_auth_status(sw);
     60	mutex_unlock(&nvm_auth_status_lock);
     61
     62	*status = st ? st->status : 0;
     63}
     64
     65static void nvm_set_auth_status(const struct tb_switch *sw, u32 status)
     66{
     67	struct nvm_auth_status *st;
     68
     69	if (WARN_ON(!sw->uuid))
     70		return;
     71
     72	mutex_lock(&nvm_auth_status_lock);
     73	st = __nvm_get_auth_status(sw);
     74
     75	if (!st) {
     76		st = kzalloc(sizeof(*st), GFP_KERNEL);
     77		if (!st)
     78			goto unlock;
     79
     80		memcpy(&st->uuid, sw->uuid, sizeof(st->uuid));
     81		INIT_LIST_HEAD(&st->list);
     82		list_add_tail(&st->list, &nvm_auth_status_cache);
     83	}
     84
     85	st->status = status;
     86unlock:
     87	mutex_unlock(&nvm_auth_status_lock);
     88}
     89
     90static void nvm_clear_auth_status(const struct tb_switch *sw)
     91{
     92	struct nvm_auth_status *st;
     93
     94	mutex_lock(&nvm_auth_status_lock);
     95	st = __nvm_get_auth_status(sw);
     96	if (st) {
     97		list_del(&st->list);
     98		kfree(st);
     99	}
    100	mutex_unlock(&nvm_auth_status_lock);
    101}
    102
    103static int nvm_validate_and_write(struct tb_switch *sw)
    104{
    105	unsigned int image_size, hdr_size;
    106	const u8 *buf = sw->nvm->buf;
    107	u16 ds_size;
    108	int ret;
    109
    110	if (!buf)
    111		return -EINVAL;
    112
    113	image_size = sw->nvm->buf_data_size;
    114	if (image_size < NVM_MIN_SIZE || image_size > NVM_MAX_SIZE)
    115		return -EINVAL;
    116
    117	/*
    118	 * FARB pointer must point inside the image and must at least
    119	 * contain parts of the digital section we will be reading here.
    120	 */
    121	hdr_size = (*(u32 *)buf) & 0xffffff;
    122	if (hdr_size + NVM_DEVID + 2 >= image_size)
    123		return -EINVAL;
    124
    125	/* Digital section start should be aligned to 4k page */
    126	if (!IS_ALIGNED(hdr_size, SZ_4K))
    127		return -EINVAL;
    128
    129	/*
    130	 * Read digital section size and check that it also fits inside
    131	 * the image.
    132	 */
    133	ds_size = *(u16 *)(buf + hdr_size);
    134	if (ds_size >= image_size)
    135		return -EINVAL;
    136
    137	if (!sw->safe_mode) {
    138		u16 device_id;
    139
    140		/*
    141		 * Make sure the device ID in the image matches the one
    142		 * we read from the switch config space.
    143		 */
    144		device_id = *(u16 *)(buf + hdr_size + NVM_DEVID);
    145		if (device_id != sw->config.device_id)
    146			return -EINVAL;
    147
    148		if (sw->generation < 3) {
    149			/* Write CSS headers first */
    150			ret = dma_port_flash_write(sw->dma_port,
    151				DMA_PORT_CSS_ADDRESS, buf + NVM_CSS,
    152				DMA_PORT_CSS_MAX_SIZE);
    153			if (ret)
    154				return ret;
    155		}
    156
    157		/* Skip headers in the image */
    158		buf += hdr_size;
    159		image_size -= hdr_size;
    160	}
    161
    162	if (tb_switch_is_usb4(sw))
    163		ret = usb4_switch_nvm_write(sw, 0, buf, image_size);
    164	else
    165		ret = dma_port_flash_write(sw->dma_port, 0, buf, image_size);
    166	if (!ret)
    167		sw->nvm->flushed = true;
    168	return ret;
    169}
    170
    171static int nvm_authenticate_host_dma_port(struct tb_switch *sw)
    172{
    173	int ret = 0;
    174
    175	/*
    176	 * Root switch NVM upgrade requires that we disconnect the
    177	 * existing paths first (in case it is not in safe mode
    178	 * already).
    179	 */
    180	if (!sw->safe_mode) {
    181		u32 status;
    182
    183		ret = tb_domain_disconnect_all_paths(sw->tb);
    184		if (ret)
    185			return ret;
    186		/*
    187		 * The host controller goes away pretty soon after this if
    188		 * everything goes well so getting timeout is expected.
    189		 */
    190		ret = dma_port_flash_update_auth(sw->dma_port);
    191		if (!ret || ret == -ETIMEDOUT)
    192			return 0;
    193
    194		/*
    195		 * Any error from update auth operation requires power
    196		 * cycling of the host router.
    197		 */
    198		tb_sw_warn(sw, "failed to authenticate NVM, power cycling\n");
    199		if (dma_port_flash_update_auth_status(sw->dma_port, &status) > 0)
    200			nvm_set_auth_status(sw, status);
    201	}
    202
    203	/*
    204	 * From safe mode we can get out by just power cycling the
    205	 * switch.
    206	 */
    207	dma_port_power_cycle(sw->dma_port);
    208	return ret;
    209}
    210
    211static int nvm_authenticate_device_dma_port(struct tb_switch *sw)
    212{
    213	int ret, retries = 10;
    214
    215	ret = dma_port_flash_update_auth(sw->dma_port);
    216	switch (ret) {
    217	case 0:
    218	case -ETIMEDOUT:
    219	case -EACCES:
    220	case -EINVAL:
    221		/* Power cycle is required */
    222		break;
    223	default:
    224		return ret;
    225	}
    226
    227	/*
    228	 * Poll here for the authentication status. It takes some time
    229	 * for the device to respond (we get timeout for a while). Once
    230	 * we get response the device needs to be power cycled in order
    231	 * to the new NVM to be taken into use.
    232	 */
    233	do {
    234		u32 status;
    235
    236		ret = dma_port_flash_update_auth_status(sw->dma_port, &status);
    237		if (ret < 0 && ret != -ETIMEDOUT)
    238			return ret;
    239		if (ret > 0) {
    240			if (status) {
    241				tb_sw_warn(sw, "failed to authenticate NVM\n");
    242				nvm_set_auth_status(sw, status);
    243			}
    244
    245			tb_sw_info(sw, "power cycling the switch now\n");
    246			dma_port_power_cycle(sw->dma_port);
    247			return 0;
    248		}
    249
    250		msleep(500);
    251	} while (--retries);
    252
    253	return -ETIMEDOUT;
    254}
    255
    256static void nvm_authenticate_start_dma_port(struct tb_switch *sw)
    257{
    258	struct pci_dev *root_port;
    259
    260	/*
    261	 * During host router NVM upgrade we should not allow root port to
    262	 * go into D3cold because some root ports cannot trigger PME
    263	 * itself. To be on the safe side keep the root port in D0 during
    264	 * the whole upgrade process.
    265	 */
    266	root_port = pcie_find_root_port(sw->tb->nhi->pdev);
    267	if (root_port)
    268		pm_runtime_get_noresume(&root_port->dev);
    269}
    270
    271static void nvm_authenticate_complete_dma_port(struct tb_switch *sw)
    272{
    273	struct pci_dev *root_port;
    274
    275	root_port = pcie_find_root_port(sw->tb->nhi->pdev);
    276	if (root_port)
    277		pm_runtime_put(&root_port->dev);
    278}
    279
    280static inline bool nvm_readable(struct tb_switch *sw)
    281{
    282	if (tb_switch_is_usb4(sw)) {
    283		/*
    284		 * USB4 devices must support NVM operations but it is
    285		 * optional for hosts. Therefore we query the NVM sector
    286		 * size here and if it is supported assume NVM
    287		 * operations are implemented.
    288		 */
    289		return usb4_switch_nvm_sector_size(sw) > 0;
    290	}
    291
    292	/* Thunderbolt 2 and 3 devices support NVM through DMA port */
    293	return !!sw->dma_port;
    294}
    295
    296static inline bool nvm_upgradeable(struct tb_switch *sw)
    297{
    298	if (sw->no_nvm_upgrade)
    299		return false;
    300	return nvm_readable(sw);
    301}
    302
    303static inline int nvm_read(struct tb_switch *sw, unsigned int address,
    304			   void *buf, size_t size)
    305{
    306	if (tb_switch_is_usb4(sw))
    307		return usb4_switch_nvm_read(sw, address, buf, size);
    308	return dma_port_flash_read(sw->dma_port, address, buf, size);
    309}
    310
    311static int nvm_authenticate(struct tb_switch *sw, bool auth_only)
    312{
    313	int ret;
    314
    315	if (tb_switch_is_usb4(sw)) {
    316		if (auth_only) {
    317			ret = usb4_switch_nvm_set_offset(sw, 0);
    318			if (ret)
    319				return ret;
    320		}
    321		sw->nvm->authenticating = true;
    322		return usb4_switch_nvm_authenticate(sw);
    323	} else if (auth_only) {
    324		return -EOPNOTSUPP;
    325	}
    326
    327	sw->nvm->authenticating = true;
    328	if (!tb_route(sw)) {
    329		nvm_authenticate_start_dma_port(sw);
    330		ret = nvm_authenticate_host_dma_port(sw);
    331	} else {
    332		ret = nvm_authenticate_device_dma_port(sw);
    333	}
    334
    335	return ret;
    336}
    337
    338static int tb_switch_nvm_read(void *priv, unsigned int offset, void *val,
    339			      size_t bytes)
    340{
    341	struct tb_nvm *nvm = priv;
    342	struct tb_switch *sw = tb_to_switch(nvm->dev);
    343	int ret;
    344
    345	pm_runtime_get_sync(&sw->dev);
    346
    347	if (!mutex_trylock(&sw->tb->lock)) {
    348		ret = restart_syscall();
    349		goto out;
    350	}
    351
    352	ret = nvm_read(sw, offset, val, bytes);
    353	mutex_unlock(&sw->tb->lock);
    354
    355out:
    356	pm_runtime_mark_last_busy(&sw->dev);
    357	pm_runtime_put_autosuspend(&sw->dev);
    358
    359	return ret;
    360}
    361
    362static int tb_switch_nvm_write(void *priv, unsigned int offset, void *val,
    363			       size_t bytes)
    364{
    365	struct tb_nvm *nvm = priv;
    366	struct tb_switch *sw = tb_to_switch(nvm->dev);
    367	int ret;
    368
    369	if (!mutex_trylock(&sw->tb->lock))
    370		return restart_syscall();
    371
    372	/*
    373	 * Since writing the NVM image might require some special steps,
    374	 * for example when CSS headers are written, we cache the image
    375	 * locally here and handle the special cases when the user asks
    376	 * us to authenticate the image.
    377	 */
    378	ret = tb_nvm_write_buf(nvm, offset, val, bytes);
    379	mutex_unlock(&sw->tb->lock);
    380
    381	return ret;
    382}
    383
    384static int tb_switch_nvm_add(struct tb_switch *sw)
    385{
    386	struct tb_nvm *nvm;
    387	u32 val;
    388	int ret;
    389
    390	if (!nvm_readable(sw))
    391		return 0;
    392
    393	/*
    394	 * The NVM format of non-Intel hardware is not known so
    395	 * currently restrict NVM upgrade for Intel hardware. We may
    396	 * relax this in the future when we learn other NVM formats.
    397	 */
    398	if (sw->config.vendor_id != PCI_VENDOR_ID_INTEL &&
    399	    sw->config.vendor_id != 0x8087) {
    400		dev_info(&sw->dev,
    401			 "NVM format of vendor %#x is not known, disabling NVM upgrade\n",
    402			 sw->config.vendor_id);
    403		return 0;
    404	}
    405
    406	nvm = tb_nvm_alloc(&sw->dev);
    407	if (IS_ERR(nvm))
    408		return PTR_ERR(nvm);
    409
    410	/*
    411	 * If the switch is in safe-mode the only accessible portion of
    412	 * the NVM is the non-active one where userspace is expected to
    413	 * write new functional NVM.
    414	 */
    415	if (!sw->safe_mode) {
    416		u32 nvm_size, hdr_size;
    417
    418		ret = nvm_read(sw, NVM_FLASH_SIZE, &val, sizeof(val));
    419		if (ret)
    420			goto err_nvm;
    421
    422		hdr_size = sw->generation < 3 ? SZ_8K : SZ_16K;
    423		nvm_size = (SZ_1M << (val & 7)) / 8;
    424		nvm_size = (nvm_size - hdr_size) / 2;
    425
    426		ret = nvm_read(sw, NVM_VERSION, &val, sizeof(val));
    427		if (ret)
    428			goto err_nvm;
    429
    430		nvm->major = val >> 16;
    431		nvm->minor = val >> 8;
    432
    433		ret = tb_nvm_add_active(nvm, nvm_size, tb_switch_nvm_read);
    434		if (ret)
    435			goto err_nvm;
    436	}
    437
    438	if (!sw->no_nvm_upgrade) {
    439		ret = tb_nvm_add_non_active(nvm, NVM_MAX_SIZE,
    440					    tb_switch_nvm_write);
    441		if (ret)
    442			goto err_nvm;
    443	}
    444
    445	sw->nvm = nvm;
    446	return 0;
    447
    448err_nvm:
    449	tb_nvm_free(nvm);
    450	return ret;
    451}
    452
    453static void tb_switch_nvm_remove(struct tb_switch *sw)
    454{
    455	struct tb_nvm *nvm;
    456
    457	nvm = sw->nvm;
    458	sw->nvm = NULL;
    459
    460	if (!nvm)
    461		return;
    462
    463	/* Remove authentication status in case the switch is unplugged */
    464	if (!nvm->authenticating)
    465		nvm_clear_auth_status(sw);
    466
    467	tb_nvm_free(nvm);
    468}
    469
    470/* port utility functions */
    471
    472static const char *tb_port_type(const struct tb_regs_port_header *port)
    473{
    474	switch (port->type >> 16) {
    475	case 0:
    476		switch ((u8) port->type) {
    477		case 0:
    478			return "Inactive";
    479		case 1:
    480			return "Port";
    481		case 2:
    482			return "NHI";
    483		default:
    484			return "unknown";
    485		}
    486	case 0x2:
    487		return "Ethernet";
    488	case 0x8:
    489		return "SATA";
    490	case 0xe:
    491		return "DP/HDMI";
    492	case 0x10:
    493		return "PCIe";
    494	case 0x20:
    495		return "USB";
    496	default:
    497		return "unknown";
    498	}
    499}
    500
    501static void tb_dump_port(struct tb *tb, const struct tb_port *port)
    502{
    503	const struct tb_regs_port_header *regs = &port->config;
    504
    505	tb_dbg(tb,
    506	       " Port %d: %x:%x (Revision: %d, TB Version: %d, Type: %s (%#x))\n",
    507	       regs->port_number, regs->vendor_id, regs->device_id,
    508	       regs->revision, regs->thunderbolt_version, tb_port_type(regs),
    509	       regs->type);
    510	tb_dbg(tb, "  Max hop id (in/out): %d/%d\n",
    511	       regs->max_in_hop_id, regs->max_out_hop_id);
    512	tb_dbg(tb, "  Max counters: %d\n", regs->max_counters);
    513	tb_dbg(tb, "  NFC Credits: %#x\n", regs->nfc_credits);
    514	tb_dbg(tb, "  Credits (total/control): %u/%u\n", port->total_credits,
    515	       port->ctl_credits);
    516}
    517
    518/**
    519 * tb_port_state() - get connectedness state of a port
    520 * @port: the port to check
    521 *
    522 * The port must have a TB_CAP_PHY (i.e. it should be a real port).
    523 *
    524 * Return: Returns an enum tb_port_state on success or an error code on failure.
    525 */
    526int tb_port_state(struct tb_port *port)
    527{
    528	struct tb_cap_phy phy;
    529	int res;
    530	if (port->cap_phy == 0) {
    531		tb_port_WARN(port, "does not have a PHY\n");
    532		return -EINVAL;
    533	}
    534	res = tb_port_read(port, &phy, TB_CFG_PORT, port->cap_phy, 2);
    535	if (res)
    536		return res;
    537	return phy.state;
    538}
    539
    540/**
    541 * tb_wait_for_port() - wait for a port to become ready
    542 * @port: Port to wait
    543 * @wait_if_unplugged: Wait also when port is unplugged
    544 *
    545 * Wait up to 1 second for a port to reach state TB_PORT_UP. If
    546 * wait_if_unplugged is set then we also wait if the port is in state
    547 * TB_PORT_UNPLUGGED (it takes a while for the device to be registered after
    548 * switch resume). Otherwise we only wait if a device is registered but the link
    549 * has not yet been established.
    550 *
    551 * Return: Returns an error code on failure. Returns 0 if the port is not
    552 * connected or failed to reach state TB_PORT_UP within one second. Returns 1
    553 * if the port is connected and in state TB_PORT_UP.
    554 */
    555int tb_wait_for_port(struct tb_port *port, bool wait_if_unplugged)
    556{
    557	int retries = 10;
    558	int state;
    559	if (!port->cap_phy) {
    560		tb_port_WARN(port, "does not have PHY\n");
    561		return -EINVAL;
    562	}
    563	if (tb_is_upstream_port(port)) {
    564		tb_port_WARN(port, "is the upstream port\n");
    565		return -EINVAL;
    566	}
    567
    568	while (retries--) {
    569		state = tb_port_state(port);
    570		if (state < 0)
    571			return state;
    572		if (state == TB_PORT_DISABLED) {
    573			tb_port_dbg(port, "is disabled (state: 0)\n");
    574			return 0;
    575		}
    576		if (state == TB_PORT_UNPLUGGED) {
    577			if (wait_if_unplugged) {
    578				/* used during resume */
    579				tb_port_dbg(port,
    580					    "is unplugged (state: 7), retrying...\n");
    581				msleep(100);
    582				continue;
    583			}
    584			tb_port_dbg(port, "is unplugged (state: 7)\n");
    585			return 0;
    586		}
    587		if (state == TB_PORT_UP) {
    588			tb_port_dbg(port, "is connected, link is up (state: 2)\n");
    589			return 1;
    590		}
    591
    592		/*
    593		 * After plug-in the state is TB_PORT_CONNECTING. Give it some
    594		 * time.
    595		 */
    596		tb_port_dbg(port,
    597			    "is connected, link is not up (state: %d), retrying...\n",
    598			    state);
    599		msleep(100);
    600	}
    601	tb_port_warn(port,
    602		     "failed to reach state TB_PORT_UP. Ignoring port...\n");
    603	return 0;
    604}
    605
    606/**
    607 * tb_port_add_nfc_credits() - add/remove non flow controlled credits to port
    608 * @port: Port to add/remove NFC credits
    609 * @credits: Credits to add/remove
    610 *
    611 * Change the number of NFC credits allocated to @port by @credits. To remove
    612 * NFC credits pass a negative amount of credits.
    613 *
    614 * Return: Returns 0 on success or an error code on failure.
    615 */
    616int tb_port_add_nfc_credits(struct tb_port *port, int credits)
    617{
    618	u32 nfc_credits;
    619
    620	if (credits == 0 || port->sw->is_unplugged)
    621		return 0;
    622
    623	/*
    624	 * USB4 restricts programming NFC buffers to lane adapters only
    625	 * so skip other ports.
    626	 */
    627	if (tb_switch_is_usb4(port->sw) && !tb_port_is_null(port))
    628		return 0;
    629
    630	nfc_credits = port->config.nfc_credits & ADP_CS_4_NFC_BUFFERS_MASK;
    631	if (credits < 0)
    632		credits = max_t(int, -nfc_credits, credits);
    633
    634	nfc_credits += credits;
    635
    636	tb_port_dbg(port, "adding %d NFC credits to %lu", credits,
    637		    port->config.nfc_credits & ADP_CS_4_NFC_BUFFERS_MASK);
    638
    639	port->config.nfc_credits &= ~ADP_CS_4_NFC_BUFFERS_MASK;
    640	port->config.nfc_credits |= nfc_credits;
    641
    642	return tb_port_write(port, &port->config.nfc_credits,
    643			     TB_CFG_PORT, ADP_CS_4, 1);
    644}
    645
    646/**
    647 * tb_port_clear_counter() - clear a counter in TB_CFG_COUNTER
    648 * @port: Port whose counters to clear
    649 * @counter: Counter index to clear
    650 *
    651 * Return: Returns 0 on success or an error code on failure.
    652 */
    653int tb_port_clear_counter(struct tb_port *port, int counter)
    654{
    655	u32 zero[3] = { 0, 0, 0 };
    656	tb_port_dbg(port, "clearing counter %d\n", counter);
    657	return tb_port_write(port, zero, TB_CFG_COUNTERS, 3 * counter, 3);
    658}
    659
    660/**
    661 * tb_port_unlock() - Unlock downstream port
    662 * @port: Port to unlock
    663 *
    664 * Needed for USB4 but can be called for any CIO/USB4 ports. Makes the
    665 * downstream router accessible for CM.
    666 */
    667int tb_port_unlock(struct tb_port *port)
    668{
    669	if (tb_switch_is_icm(port->sw))
    670		return 0;
    671	if (!tb_port_is_null(port))
    672		return -EINVAL;
    673	if (tb_switch_is_usb4(port->sw))
    674		return usb4_port_unlock(port);
    675	return 0;
    676}
    677
    678static int __tb_port_enable(struct tb_port *port, bool enable)
    679{
    680	int ret;
    681	u32 phy;
    682
    683	if (!tb_port_is_null(port))
    684		return -EINVAL;
    685
    686	ret = tb_port_read(port, &phy, TB_CFG_PORT,
    687			   port->cap_phy + LANE_ADP_CS_1, 1);
    688	if (ret)
    689		return ret;
    690
    691	if (enable)
    692		phy &= ~LANE_ADP_CS_1_LD;
    693	else
    694		phy |= LANE_ADP_CS_1_LD;
    695
    696
    697	ret = tb_port_write(port, &phy, TB_CFG_PORT,
    698			    port->cap_phy + LANE_ADP_CS_1, 1);
    699	if (ret)
    700		return ret;
    701
    702	tb_port_dbg(port, "lane %sabled\n", enable ? "en" : "dis");
    703	return 0;
    704}
    705
    706/**
    707 * tb_port_enable() - Enable lane adapter
    708 * @port: Port to enable (can be %NULL)
    709 *
    710 * This is used for lane 0 and 1 adapters to enable it.
    711 */
    712int tb_port_enable(struct tb_port *port)
    713{
    714	return __tb_port_enable(port, true);
    715}
    716
    717/**
    718 * tb_port_disable() - Disable lane adapter
    719 * @port: Port to disable (can be %NULL)
    720 *
    721 * This is used for lane 0 and 1 adapters to disable it.
    722 */
    723int tb_port_disable(struct tb_port *port)
    724{
    725	return __tb_port_enable(port, false);
    726}
    727
    728/*
    729 * tb_init_port() - initialize a port
    730 *
    731 * This is a helper method for tb_switch_alloc. Does not check or initialize
    732 * any downstream switches.
    733 *
    734 * Return: Returns 0 on success or an error code on failure.
    735 */
    736static int tb_init_port(struct tb_port *port)
    737{
    738	int res;
    739	int cap;
    740
    741	INIT_LIST_HEAD(&port->list);
    742
    743	/* Control adapter does not have configuration space */
    744	if (!port->port)
    745		return 0;
    746
    747	res = tb_port_read(port, &port->config, TB_CFG_PORT, 0, 8);
    748	if (res) {
    749		if (res == -ENODEV) {
    750			tb_dbg(port->sw->tb, " Port %d: not implemented\n",
    751			       port->port);
    752			port->disabled = true;
    753			return 0;
    754		}
    755		return res;
    756	}
    757
    758	/* Port 0 is the switch itself and has no PHY. */
    759	if (port->config.type == TB_TYPE_PORT) {
    760		cap = tb_port_find_cap(port, TB_PORT_CAP_PHY);
    761
    762		if (cap > 0)
    763			port->cap_phy = cap;
    764		else
    765			tb_port_WARN(port, "non switch port without a PHY\n");
    766
    767		cap = tb_port_find_cap(port, TB_PORT_CAP_USB4);
    768		if (cap > 0)
    769			port->cap_usb4 = cap;
    770
    771		/*
    772		 * USB4 ports the buffers allocated for the control path
    773		 * can be read from the path config space. Legacy
    774		 * devices we use hard-coded value.
    775		 */
    776		if (tb_switch_is_usb4(port->sw)) {
    777			struct tb_regs_hop hop;
    778
    779			if (!tb_port_read(port, &hop, TB_CFG_HOPS, 0, 2))
    780				port->ctl_credits = hop.initial_credits;
    781		}
    782		if (!port->ctl_credits)
    783			port->ctl_credits = 2;
    784
    785	} else {
    786		cap = tb_port_find_cap(port, TB_PORT_CAP_ADAP);
    787		if (cap > 0)
    788			port->cap_adap = cap;
    789	}
    790
    791	port->total_credits =
    792		(port->config.nfc_credits & ADP_CS_4_TOTAL_BUFFERS_MASK) >>
    793		ADP_CS_4_TOTAL_BUFFERS_SHIFT;
    794
    795	tb_dump_port(port->sw->tb, port);
    796	return 0;
    797}
    798
    799static int tb_port_alloc_hopid(struct tb_port *port, bool in, int min_hopid,
    800			       int max_hopid)
    801{
    802	int port_max_hopid;
    803	struct ida *ida;
    804
    805	if (in) {
    806		port_max_hopid = port->config.max_in_hop_id;
    807		ida = &port->in_hopids;
    808	} else {
    809		port_max_hopid = port->config.max_out_hop_id;
    810		ida = &port->out_hopids;
    811	}
    812
    813	/*
    814	 * NHI can use HopIDs 1-max for other adapters HopIDs 0-7 are
    815	 * reserved.
    816	 */
    817	if (!tb_port_is_nhi(port) && min_hopid < TB_PATH_MIN_HOPID)
    818		min_hopid = TB_PATH_MIN_HOPID;
    819
    820	if (max_hopid < 0 || max_hopid > port_max_hopid)
    821		max_hopid = port_max_hopid;
    822
    823	return ida_simple_get(ida, min_hopid, max_hopid + 1, GFP_KERNEL);
    824}
    825
    826/**
    827 * tb_port_alloc_in_hopid() - Allocate input HopID from port
    828 * @port: Port to allocate HopID for
    829 * @min_hopid: Minimum acceptable input HopID
    830 * @max_hopid: Maximum acceptable input HopID
    831 *
    832 * Return: HopID between @min_hopid and @max_hopid or negative errno in
    833 * case of error.
    834 */
    835int tb_port_alloc_in_hopid(struct tb_port *port, int min_hopid, int max_hopid)
    836{
    837	return tb_port_alloc_hopid(port, true, min_hopid, max_hopid);
    838}
    839
    840/**
    841 * tb_port_alloc_out_hopid() - Allocate output HopID from port
    842 * @port: Port to allocate HopID for
    843 * @min_hopid: Minimum acceptable output HopID
    844 * @max_hopid: Maximum acceptable output HopID
    845 *
    846 * Return: HopID between @min_hopid and @max_hopid or negative errno in
    847 * case of error.
    848 */
    849int tb_port_alloc_out_hopid(struct tb_port *port, int min_hopid, int max_hopid)
    850{
    851	return tb_port_alloc_hopid(port, false, min_hopid, max_hopid);
    852}
    853
    854/**
    855 * tb_port_release_in_hopid() - Release allocated input HopID from port
    856 * @port: Port whose HopID to release
    857 * @hopid: HopID to release
    858 */
    859void tb_port_release_in_hopid(struct tb_port *port, int hopid)
    860{
    861	ida_simple_remove(&port->in_hopids, hopid);
    862}
    863
    864/**
    865 * tb_port_release_out_hopid() - Release allocated output HopID from port
    866 * @port: Port whose HopID to release
    867 * @hopid: HopID to release
    868 */
    869void tb_port_release_out_hopid(struct tb_port *port, int hopid)
    870{
    871	ida_simple_remove(&port->out_hopids, hopid);
    872}
    873
    874static inline bool tb_switch_is_reachable(const struct tb_switch *parent,
    875					  const struct tb_switch *sw)
    876{
    877	u64 mask = (1ULL << parent->config.depth * 8) - 1;
    878	return (tb_route(parent) & mask) == (tb_route(sw) & mask);
    879}
    880
    881/**
    882 * tb_next_port_on_path() - Return next port for given port on a path
    883 * @start: Start port of the walk
    884 * @end: End port of the walk
    885 * @prev: Previous port (%NULL if this is the first)
    886 *
    887 * This function can be used to walk from one port to another if they
    888 * are connected through zero or more switches. If the @prev is dual
    889 * link port, the function follows that link and returns another end on
    890 * that same link.
    891 *
    892 * If the @end port has been reached, return %NULL.
    893 *
    894 * Domain tb->lock must be held when this function is called.
    895 */
    896struct tb_port *tb_next_port_on_path(struct tb_port *start, struct tb_port *end,
    897				     struct tb_port *prev)
    898{
    899	struct tb_port *next;
    900
    901	if (!prev)
    902		return start;
    903
    904	if (prev->sw == end->sw) {
    905		if (prev == end)
    906			return NULL;
    907		return end;
    908	}
    909
    910	if (tb_switch_is_reachable(prev->sw, end->sw)) {
    911		next = tb_port_at(tb_route(end->sw), prev->sw);
    912		/* Walk down the topology if next == prev */
    913		if (prev->remote &&
    914		    (next == prev || next->dual_link_port == prev))
    915			next = prev->remote;
    916	} else {
    917		if (tb_is_upstream_port(prev)) {
    918			next = prev->remote;
    919		} else {
    920			next = tb_upstream_port(prev->sw);
    921			/*
    922			 * Keep the same link if prev and next are both
    923			 * dual link ports.
    924			 */
    925			if (next->dual_link_port &&
    926			    next->link_nr != prev->link_nr) {
    927				next = next->dual_link_port;
    928			}
    929		}
    930	}
    931
    932	return next != prev ? next : NULL;
    933}
    934
    935/**
    936 * tb_port_get_link_speed() - Get current link speed
    937 * @port: Port to check (USB4 or CIO)
    938 *
    939 * Returns link speed in Gb/s or negative errno in case of failure.
    940 */
    941int tb_port_get_link_speed(struct tb_port *port)
    942{
    943	u32 val, speed;
    944	int ret;
    945
    946	if (!port->cap_phy)
    947		return -EINVAL;
    948
    949	ret = tb_port_read(port, &val, TB_CFG_PORT,
    950			   port->cap_phy + LANE_ADP_CS_1, 1);
    951	if (ret)
    952		return ret;
    953
    954	speed = (val & LANE_ADP_CS_1_CURRENT_SPEED_MASK) >>
    955		LANE_ADP_CS_1_CURRENT_SPEED_SHIFT;
    956	return speed == LANE_ADP_CS_1_CURRENT_SPEED_GEN3 ? 20 : 10;
    957}
    958
    959/**
    960 * tb_port_get_link_width() - Get current link width
    961 * @port: Port to check (USB4 or CIO)
    962 *
    963 * Returns link width. Return values can be 1 (Single-Lane), 2 (Dual-Lane)
    964 * or negative errno in case of failure.
    965 */
    966int tb_port_get_link_width(struct tb_port *port)
    967{
    968	u32 val;
    969	int ret;
    970
    971	if (!port->cap_phy)
    972		return -EINVAL;
    973
    974	ret = tb_port_read(port, &val, TB_CFG_PORT,
    975			   port->cap_phy + LANE_ADP_CS_1, 1);
    976	if (ret)
    977		return ret;
    978
    979	return (val & LANE_ADP_CS_1_CURRENT_WIDTH_MASK) >>
    980		LANE_ADP_CS_1_CURRENT_WIDTH_SHIFT;
    981}
    982
    983static bool tb_port_is_width_supported(struct tb_port *port, int width)
    984{
    985	u32 phy, widths;
    986	int ret;
    987
    988	if (!port->cap_phy)
    989		return false;
    990
    991	ret = tb_port_read(port, &phy, TB_CFG_PORT,
    992			   port->cap_phy + LANE_ADP_CS_0, 1);
    993	if (ret)
    994		return false;
    995
    996	widths = (phy & LANE_ADP_CS_0_SUPPORTED_WIDTH_MASK) >>
    997		LANE_ADP_CS_0_SUPPORTED_WIDTH_SHIFT;
    998
    999	return !!(widths & width);
   1000}
   1001
   1002/**
   1003 * tb_port_set_link_width() - Set target link width of the lane adapter
   1004 * @port: Lane adapter
   1005 * @width: Target link width (%1 or %2)
   1006 *
   1007 * Sets the target link width of the lane adapter to @width. Does not
   1008 * enable/disable lane bonding. For that call tb_port_set_lane_bonding().
   1009 *
   1010 * Return: %0 in case of success and negative errno in case of error
   1011 */
   1012int tb_port_set_link_width(struct tb_port *port, unsigned int width)
   1013{
   1014	u32 val;
   1015	int ret;
   1016
   1017	if (!port->cap_phy)
   1018		return -EINVAL;
   1019
   1020	ret = tb_port_read(port, &val, TB_CFG_PORT,
   1021			   port->cap_phy + LANE_ADP_CS_1, 1);
   1022	if (ret)
   1023		return ret;
   1024
   1025	val &= ~LANE_ADP_CS_1_TARGET_WIDTH_MASK;
   1026	switch (width) {
   1027	case 1:
   1028		val |= LANE_ADP_CS_1_TARGET_WIDTH_SINGLE <<
   1029			LANE_ADP_CS_1_TARGET_WIDTH_SHIFT;
   1030		break;
   1031	case 2:
   1032		val |= LANE_ADP_CS_1_TARGET_WIDTH_DUAL <<
   1033			LANE_ADP_CS_1_TARGET_WIDTH_SHIFT;
   1034		break;
   1035	default:
   1036		return -EINVAL;
   1037	}
   1038
   1039	return tb_port_write(port, &val, TB_CFG_PORT,
   1040			     port->cap_phy + LANE_ADP_CS_1, 1);
   1041}
   1042
   1043/**
   1044 * tb_port_set_lane_bonding() - Enable/disable lane bonding
   1045 * @port: Lane adapter
   1046 * @bonding: enable/disable bonding
   1047 *
   1048 * Enables or disables lane bonding. This should be called after target
   1049 * link width has been set (tb_port_set_link_width()). Note in most
   1050 * cases one should use tb_port_lane_bonding_enable() instead to enable
   1051 * lane bonding.
   1052 *
   1053 * As a side effect sets @port->bonding accordingly (and does the same
   1054 * for lane 1 too).
   1055 *
   1056 * Return: %0 in case of success and negative errno in case of error
   1057 */
   1058int tb_port_set_lane_bonding(struct tb_port *port, bool bonding)
   1059{
   1060	u32 val;
   1061	int ret;
   1062
   1063	if (!port->cap_phy)
   1064		return -EINVAL;
   1065
   1066	ret = tb_port_read(port, &val, TB_CFG_PORT,
   1067			   port->cap_phy + LANE_ADP_CS_1, 1);
   1068	if (ret)
   1069		return ret;
   1070
   1071	if (bonding)
   1072		val |= LANE_ADP_CS_1_LB;
   1073	else
   1074		val &= ~LANE_ADP_CS_1_LB;
   1075
   1076	ret = tb_port_write(port, &val, TB_CFG_PORT,
   1077			    port->cap_phy + LANE_ADP_CS_1, 1);
   1078	if (ret)
   1079		return ret;
   1080
   1081	/*
   1082	 * When lane 0 bonding is set it will affect lane 1 too so
   1083	 * update both.
   1084	 */
   1085	port->bonded = bonding;
   1086	port->dual_link_port->bonded = bonding;
   1087
   1088	return 0;
   1089}
   1090
   1091/**
   1092 * tb_port_lane_bonding_enable() - Enable bonding on port
   1093 * @port: port to enable
   1094 *
   1095 * Enable bonding by setting the link width of the port and the other
   1096 * port in case of dual link port. Does not wait for the link to
   1097 * actually reach the bonded state so caller needs to call
   1098 * tb_port_wait_for_link_width() before enabling any paths through the
   1099 * link to make sure the link is in expected state.
   1100 *
   1101 * Return: %0 in case of success and negative errno in case of error
   1102 */
   1103int tb_port_lane_bonding_enable(struct tb_port *port)
   1104{
   1105	int ret;
   1106
   1107	/*
   1108	 * Enable lane bonding for both links if not already enabled by
   1109	 * for example the boot firmware.
   1110	 */
   1111	ret = tb_port_get_link_width(port);
   1112	if (ret == 1) {
   1113		ret = tb_port_set_link_width(port, 2);
   1114		if (ret)
   1115			goto err_lane0;
   1116	}
   1117
   1118	ret = tb_port_get_link_width(port->dual_link_port);
   1119	if (ret == 1) {
   1120		ret = tb_port_set_link_width(port->dual_link_port, 2);
   1121		if (ret)
   1122			goto err_lane0;
   1123	}
   1124
   1125	ret = tb_port_set_lane_bonding(port, true);
   1126	if (ret)
   1127		goto err_lane1;
   1128
   1129	return 0;
   1130
   1131err_lane1:
   1132	tb_port_set_link_width(port->dual_link_port, 1);
   1133err_lane0:
   1134	tb_port_set_link_width(port, 1);
   1135	return ret;
   1136}
   1137
   1138/**
   1139 * tb_port_lane_bonding_disable() - Disable bonding on port
   1140 * @port: port to disable
   1141 *
   1142 * Disable bonding by setting the link width of the port and the
   1143 * other port in case of dual link port.
   1144 */
   1145void tb_port_lane_bonding_disable(struct tb_port *port)
   1146{
   1147	tb_port_set_lane_bonding(port, false);
   1148	tb_port_set_link_width(port->dual_link_port, 1);
   1149	tb_port_set_link_width(port, 1);
   1150}
   1151
   1152/**
   1153 * tb_port_wait_for_link_width() - Wait until link reaches specific width
   1154 * @port: Port to wait for
   1155 * @width: Expected link width (%1 or %2)
   1156 * @timeout_msec: Timeout in ms how long to wait
   1157 *
   1158 * Should be used after both ends of the link have been bonded (or
   1159 * bonding has been disabled) to wait until the link actually reaches
   1160 * the expected state. Returns %-ETIMEDOUT if the @width was not reached
   1161 * within the given timeout, %0 if it did.
   1162 */
   1163int tb_port_wait_for_link_width(struct tb_port *port, int width,
   1164				int timeout_msec)
   1165{
   1166	ktime_t timeout = ktime_add_ms(ktime_get(), timeout_msec);
   1167	int ret;
   1168
   1169	do {
   1170		ret = tb_port_get_link_width(port);
   1171		if (ret < 0) {
   1172			/*
   1173			 * Sometimes we get port locked error when
   1174			 * polling the lanes so we can ignore it and
   1175			 * retry.
   1176			 */
   1177			if (ret != -EACCES)
   1178				return ret;
   1179		} else if (ret == width) {
   1180			return 0;
   1181		}
   1182
   1183		usleep_range(1000, 2000);
   1184	} while (ktime_before(ktime_get(), timeout));
   1185
   1186	return -ETIMEDOUT;
   1187}
   1188
   1189static int tb_port_do_update_credits(struct tb_port *port)
   1190{
   1191	u32 nfc_credits;
   1192	int ret;
   1193
   1194	ret = tb_port_read(port, &nfc_credits, TB_CFG_PORT, ADP_CS_4, 1);
   1195	if (ret)
   1196		return ret;
   1197
   1198	if (nfc_credits != port->config.nfc_credits) {
   1199		u32 total;
   1200
   1201		total = (nfc_credits & ADP_CS_4_TOTAL_BUFFERS_MASK) >>
   1202			ADP_CS_4_TOTAL_BUFFERS_SHIFT;
   1203
   1204		tb_port_dbg(port, "total credits changed %u -> %u\n",
   1205			    port->total_credits, total);
   1206
   1207		port->config.nfc_credits = nfc_credits;
   1208		port->total_credits = total;
   1209	}
   1210
   1211	return 0;
   1212}
   1213
   1214/**
   1215 * tb_port_update_credits() - Re-read port total credits
   1216 * @port: Port to update
   1217 *
   1218 * After the link is bonded (or bonding was disabled) the port total
   1219 * credits may change, so this function needs to be called to re-read
   1220 * the credits. Updates also the second lane adapter.
   1221 */
   1222int tb_port_update_credits(struct tb_port *port)
   1223{
   1224	int ret;
   1225
   1226	ret = tb_port_do_update_credits(port);
   1227	if (ret)
   1228		return ret;
   1229	return tb_port_do_update_credits(port->dual_link_port);
   1230}
   1231
   1232static int tb_port_start_lane_initialization(struct tb_port *port)
   1233{
   1234	int ret;
   1235
   1236	if (tb_switch_is_usb4(port->sw))
   1237		return 0;
   1238
   1239	ret = tb_lc_start_lane_initialization(port);
   1240	return ret == -EINVAL ? 0 : ret;
   1241}
   1242
   1243/*
   1244 * Returns true if the port had something (router, XDomain) connected
   1245 * before suspend.
   1246 */
   1247static bool tb_port_resume(struct tb_port *port)
   1248{
   1249	bool has_remote = tb_port_has_remote(port);
   1250
   1251	if (port->usb4) {
   1252		usb4_port_device_resume(port->usb4);
   1253	} else if (!has_remote) {
   1254		/*
   1255		 * For disconnected downstream lane adapters start lane
   1256		 * initialization now so we detect future connects.
   1257		 *
   1258		 * For XDomain start the lane initialzation now so the
   1259		 * link gets re-established.
   1260		 *
   1261		 * This is only needed for non-USB4 ports.
   1262		 */
   1263		if (!tb_is_upstream_port(port) || port->xdomain)
   1264			tb_port_start_lane_initialization(port);
   1265	}
   1266
   1267	return has_remote || port->xdomain;
   1268}
   1269
   1270/**
   1271 * tb_port_is_enabled() - Is the adapter port enabled
   1272 * @port: Port to check
   1273 */
   1274bool tb_port_is_enabled(struct tb_port *port)
   1275{
   1276	switch (port->config.type) {
   1277	case TB_TYPE_PCIE_UP:
   1278	case TB_TYPE_PCIE_DOWN:
   1279		return tb_pci_port_is_enabled(port);
   1280
   1281	case TB_TYPE_DP_HDMI_IN:
   1282	case TB_TYPE_DP_HDMI_OUT:
   1283		return tb_dp_port_is_enabled(port);
   1284
   1285	case TB_TYPE_USB3_UP:
   1286	case TB_TYPE_USB3_DOWN:
   1287		return tb_usb3_port_is_enabled(port);
   1288
   1289	default:
   1290		return false;
   1291	}
   1292}
   1293
   1294/**
   1295 * tb_usb3_port_is_enabled() - Is the USB3 adapter port enabled
   1296 * @port: USB3 adapter port to check
   1297 */
   1298bool tb_usb3_port_is_enabled(struct tb_port *port)
   1299{
   1300	u32 data;
   1301
   1302	if (tb_port_read(port, &data, TB_CFG_PORT,
   1303			 port->cap_adap + ADP_USB3_CS_0, 1))
   1304		return false;
   1305
   1306	return !!(data & ADP_USB3_CS_0_PE);
   1307}
   1308
   1309/**
   1310 * tb_usb3_port_enable() - Enable USB3 adapter port
   1311 * @port: USB3 adapter port to enable
   1312 * @enable: Enable/disable the USB3 adapter
   1313 */
   1314int tb_usb3_port_enable(struct tb_port *port, bool enable)
   1315{
   1316	u32 word = enable ? (ADP_USB3_CS_0_PE | ADP_USB3_CS_0_V)
   1317			  : ADP_USB3_CS_0_V;
   1318
   1319	if (!port->cap_adap)
   1320		return -ENXIO;
   1321	return tb_port_write(port, &word, TB_CFG_PORT,
   1322			     port->cap_adap + ADP_USB3_CS_0, 1);
   1323}
   1324
   1325/**
   1326 * tb_pci_port_is_enabled() - Is the PCIe adapter port enabled
   1327 * @port: PCIe port to check
   1328 */
   1329bool tb_pci_port_is_enabled(struct tb_port *port)
   1330{
   1331	u32 data;
   1332
   1333	if (tb_port_read(port, &data, TB_CFG_PORT,
   1334			 port->cap_adap + ADP_PCIE_CS_0, 1))
   1335		return false;
   1336
   1337	return !!(data & ADP_PCIE_CS_0_PE);
   1338}
   1339
   1340/**
   1341 * tb_pci_port_enable() - Enable PCIe adapter port
   1342 * @port: PCIe port to enable
   1343 * @enable: Enable/disable the PCIe adapter
   1344 */
   1345int tb_pci_port_enable(struct tb_port *port, bool enable)
   1346{
   1347	u32 word = enable ? ADP_PCIE_CS_0_PE : 0x0;
   1348	if (!port->cap_adap)
   1349		return -ENXIO;
   1350	return tb_port_write(port, &word, TB_CFG_PORT,
   1351			     port->cap_adap + ADP_PCIE_CS_0, 1);
   1352}
   1353
   1354/**
   1355 * tb_dp_port_hpd_is_active() - Is HPD already active
   1356 * @port: DP out port to check
   1357 *
   1358 * Checks if the DP OUT adapter port has HDP bit already set.
   1359 */
   1360int tb_dp_port_hpd_is_active(struct tb_port *port)
   1361{
   1362	u32 data;
   1363	int ret;
   1364
   1365	ret = tb_port_read(port, &data, TB_CFG_PORT,
   1366			   port->cap_adap + ADP_DP_CS_2, 1);
   1367	if (ret)
   1368		return ret;
   1369
   1370	return !!(data & ADP_DP_CS_2_HDP);
   1371}
   1372
   1373/**
   1374 * tb_dp_port_hpd_clear() - Clear HPD from DP IN port
   1375 * @port: Port to clear HPD
   1376 *
   1377 * If the DP IN port has HDP set, this function can be used to clear it.
   1378 */
   1379int tb_dp_port_hpd_clear(struct tb_port *port)
   1380{
   1381	u32 data;
   1382	int ret;
   1383
   1384	ret = tb_port_read(port, &data, TB_CFG_PORT,
   1385			   port->cap_adap + ADP_DP_CS_3, 1);
   1386	if (ret)
   1387		return ret;
   1388
   1389	data |= ADP_DP_CS_3_HDPC;
   1390	return tb_port_write(port, &data, TB_CFG_PORT,
   1391			     port->cap_adap + ADP_DP_CS_3, 1);
   1392}
   1393
   1394/**
   1395 * tb_dp_port_set_hops() - Set video/aux Hop IDs for DP port
   1396 * @port: DP IN/OUT port to set hops
   1397 * @video: Video Hop ID
   1398 * @aux_tx: AUX TX Hop ID
   1399 * @aux_rx: AUX RX Hop ID
   1400 *
   1401 * Programs specified Hop IDs for DP IN/OUT port. Can be called for USB4
   1402 * router DP adapters too but does not program the values as the fields
   1403 * are read-only.
   1404 */
   1405int tb_dp_port_set_hops(struct tb_port *port, unsigned int video,
   1406			unsigned int aux_tx, unsigned int aux_rx)
   1407{
   1408	u32 data[2];
   1409	int ret;
   1410
   1411	if (tb_switch_is_usb4(port->sw))
   1412		return 0;
   1413
   1414	ret = tb_port_read(port, data, TB_CFG_PORT,
   1415			   port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data));
   1416	if (ret)
   1417		return ret;
   1418
   1419	data[0] &= ~ADP_DP_CS_0_VIDEO_HOPID_MASK;
   1420	data[1] &= ~ADP_DP_CS_1_AUX_RX_HOPID_MASK;
   1421	data[1] &= ~ADP_DP_CS_1_AUX_RX_HOPID_MASK;
   1422
   1423	data[0] |= (video << ADP_DP_CS_0_VIDEO_HOPID_SHIFT) &
   1424		ADP_DP_CS_0_VIDEO_HOPID_MASK;
   1425	data[1] |= aux_tx & ADP_DP_CS_1_AUX_TX_HOPID_MASK;
   1426	data[1] |= (aux_rx << ADP_DP_CS_1_AUX_RX_HOPID_SHIFT) &
   1427		ADP_DP_CS_1_AUX_RX_HOPID_MASK;
   1428
   1429	return tb_port_write(port, data, TB_CFG_PORT,
   1430			     port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data));
   1431}
   1432
   1433/**
   1434 * tb_dp_port_is_enabled() - Is DP adapter port enabled
   1435 * @port: DP adapter port to check
   1436 */
   1437bool tb_dp_port_is_enabled(struct tb_port *port)
   1438{
   1439	u32 data[2];
   1440
   1441	if (tb_port_read(port, data, TB_CFG_PORT, port->cap_adap + ADP_DP_CS_0,
   1442			 ARRAY_SIZE(data)))
   1443		return false;
   1444
   1445	return !!(data[0] & (ADP_DP_CS_0_VE | ADP_DP_CS_0_AE));
   1446}
   1447
   1448/**
   1449 * tb_dp_port_enable() - Enables/disables DP paths of a port
   1450 * @port: DP IN/OUT port
   1451 * @enable: Enable/disable DP path
   1452 *
   1453 * Once Hop IDs are programmed DP paths can be enabled or disabled by
   1454 * calling this function.
   1455 */
   1456int tb_dp_port_enable(struct tb_port *port, bool enable)
   1457{
   1458	u32 data[2];
   1459	int ret;
   1460
   1461	ret = tb_port_read(port, data, TB_CFG_PORT,
   1462			  port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data));
   1463	if (ret)
   1464		return ret;
   1465
   1466	if (enable)
   1467		data[0] |= ADP_DP_CS_0_VE | ADP_DP_CS_0_AE;
   1468	else
   1469		data[0] &= ~(ADP_DP_CS_0_VE | ADP_DP_CS_0_AE);
   1470
   1471	return tb_port_write(port, data, TB_CFG_PORT,
   1472			     port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data));
   1473}
   1474
   1475/* switch utility functions */
   1476
   1477static const char *tb_switch_generation_name(const struct tb_switch *sw)
   1478{
   1479	switch (sw->generation) {
   1480	case 1:
   1481		return "Thunderbolt 1";
   1482	case 2:
   1483		return "Thunderbolt 2";
   1484	case 3:
   1485		return "Thunderbolt 3";
   1486	case 4:
   1487		return "USB4";
   1488	default:
   1489		return "Unknown";
   1490	}
   1491}
   1492
   1493static void tb_dump_switch(const struct tb *tb, const struct tb_switch *sw)
   1494{
   1495	const struct tb_regs_switch_header *regs = &sw->config;
   1496
   1497	tb_dbg(tb, " %s Switch: %x:%x (Revision: %d, TB Version: %d)\n",
   1498	       tb_switch_generation_name(sw), regs->vendor_id, regs->device_id,
   1499	       regs->revision, regs->thunderbolt_version);
   1500	tb_dbg(tb, "  Max Port Number: %d\n", regs->max_port_number);
   1501	tb_dbg(tb, "  Config:\n");
   1502	tb_dbg(tb,
   1503		"   Upstream Port Number: %d Depth: %d Route String: %#llx Enabled: %d, PlugEventsDelay: %dms\n",
   1504	       regs->upstream_port_number, regs->depth,
   1505	       (((u64) regs->route_hi) << 32) | regs->route_lo,
   1506	       regs->enabled, regs->plug_events_delay);
   1507	tb_dbg(tb, "   unknown1: %#x unknown4: %#x\n",
   1508	       regs->__unknown1, regs->__unknown4);
   1509}
   1510
   1511/**
   1512 * tb_switch_reset() - reconfigure route, enable and send TB_CFG_PKG_RESET
   1513 * @sw: Switch to reset
   1514 *
   1515 * Return: Returns 0 on success or an error code on failure.
   1516 */
   1517int tb_switch_reset(struct tb_switch *sw)
   1518{
   1519	struct tb_cfg_result res;
   1520
   1521	if (sw->generation > 1)
   1522		return 0;
   1523
   1524	tb_sw_dbg(sw, "resetting switch\n");
   1525
   1526	res.err = tb_sw_write(sw, ((u32 *) &sw->config) + 2,
   1527			      TB_CFG_SWITCH, 2, 2);
   1528	if (res.err)
   1529		return res.err;
   1530	res = tb_cfg_reset(sw->tb->ctl, tb_route(sw));
   1531	if (res.err > 0)
   1532		return -EIO;
   1533	return res.err;
   1534}
   1535
   1536/**
   1537 * tb_switch_wait_for_bit() - Wait for specified value of bits in offset
   1538 * @sw: Router to read the offset value from
   1539 * @offset: Offset in the router config space to read from
   1540 * @bit: Bit mask in the offset to wait for
   1541 * @value: Value of the bits to wait for
   1542 * @timeout_msec: Timeout in ms how long to wait
   1543 *
   1544 * Wait till the specified bits in specified offset reach specified value.
   1545 * Returns %0 in case of success, %-ETIMEDOUT if the @value was not reached
   1546 * within the given timeout or a negative errno in case of failure.
   1547 */
   1548int tb_switch_wait_for_bit(struct tb_switch *sw, u32 offset, u32 bit,
   1549			   u32 value, int timeout_msec)
   1550{
   1551	ktime_t timeout = ktime_add_ms(ktime_get(), timeout_msec);
   1552
   1553	do {
   1554		u32 val;
   1555		int ret;
   1556
   1557		ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, offset, 1);
   1558		if (ret)
   1559			return ret;
   1560
   1561		if ((val & bit) == value)
   1562			return 0;
   1563
   1564		usleep_range(50, 100);
   1565	} while (ktime_before(ktime_get(), timeout));
   1566
   1567	return -ETIMEDOUT;
   1568}
   1569
   1570/*
   1571 * tb_plug_events_active() - enable/disable plug events on a switch
   1572 *
   1573 * Also configures a sane plug_events_delay of 255ms.
   1574 *
   1575 * Return: Returns 0 on success or an error code on failure.
   1576 */
   1577static int tb_plug_events_active(struct tb_switch *sw, bool active)
   1578{
   1579	u32 data;
   1580	int res;
   1581
   1582	if (tb_switch_is_icm(sw) || tb_switch_is_usb4(sw))
   1583		return 0;
   1584
   1585	sw->config.plug_events_delay = 0xff;
   1586	res = tb_sw_write(sw, ((u32 *) &sw->config) + 4, TB_CFG_SWITCH, 4, 1);
   1587	if (res)
   1588		return res;
   1589
   1590	res = tb_sw_read(sw, &data, TB_CFG_SWITCH, sw->cap_plug_events + 1, 1);
   1591	if (res)
   1592		return res;
   1593
   1594	if (active) {
   1595		data = data & 0xFFFFFF83;
   1596		switch (sw->config.device_id) {
   1597		case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE:
   1598		case PCI_DEVICE_ID_INTEL_EAGLE_RIDGE:
   1599		case PCI_DEVICE_ID_INTEL_PORT_RIDGE:
   1600			break;
   1601		default:
   1602			/*
   1603			 * Skip Alpine Ridge, it needs to have vendor
   1604			 * specific USB hotplug event enabled for the
   1605			 * internal xHCI to work.
   1606			 */
   1607			if (!tb_switch_is_alpine_ridge(sw))
   1608				data |= TB_PLUG_EVENTS_USB_DISABLE;
   1609		}
   1610	} else {
   1611		data = data | 0x7c;
   1612	}
   1613	return tb_sw_write(sw, &data, TB_CFG_SWITCH,
   1614			   sw->cap_plug_events + 1, 1);
   1615}
   1616
   1617static ssize_t authorized_show(struct device *dev,
   1618			       struct device_attribute *attr,
   1619			       char *buf)
   1620{
   1621	struct tb_switch *sw = tb_to_switch(dev);
   1622
   1623	return sprintf(buf, "%u\n", sw->authorized);
   1624}
   1625
   1626static int disapprove_switch(struct device *dev, void *not_used)
   1627{
   1628	char *envp[] = { "AUTHORIZED=0", NULL };
   1629	struct tb_switch *sw;
   1630
   1631	sw = tb_to_switch(dev);
   1632	if (sw && sw->authorized) {
   1633		int ret;
   1634
   1635		/* First children */
   1636		ret = device_for_each_child_reverse(&sw->dev, NULL, disapprove_switch);
   1637		if (ret)
   1638			return ret;
   1639
   1640		ret = tb_domain_disapprove_switch(sw->tb, sw);
   1641		if (ret)
   1642			return ret;
   1643
   1644		sw->authorized = 0;
   1645		kobject_uevent_env(&sw->dev.kobj, KOBJ_CHANGE, envp);
   1646	}
   1647
   1648	return 0;
   1649}
   1650
   1651static int tb_switch_set_authorized(struct tb_switch *sw, unsigned int val)
   1652{
   1653	char envp_string[13];
   1654	int ret = -EINVAL;
   1655	char *envp[] = { envp_string, NULL };
   1656
   1657	if (!mutex_trylock(&sw->tb->lock))
   1658		return restart_syscall();
   1659
   1660	if (!!sw->authorized == !!val)
   1661		goto unlock;
   1662
   1663	switch (val) {
   1664	/* Disapprove switch */
   1665	case 0:
   1666		if (tb_route(sw)) {
   1667			ret = disapprove_switch(&sw->dev, NULL);
   1668			goto unlock;
   1669		}
   1670		break;
   1671
   1672	/* Approve switch */
   1673	case 1:
   1674		if (sw->key)
   1675			ret = tb_domain_approve_switch_key(sw->tb, sw);
   1676		else
   1677			ret = tb_domain_approve_switch(sw->tb, sw);
   1678		break;
   1679
   1680	/* Challenge switch */
   1681	case 2:
   1682		if (sw->key)
   1683			ret = tb_domain_challenge_switch_key(sw->tb, sw);
   1684		break;
   1685
   1686	default:
   1687		break;
   1688	}
   1689
   1690	if (!ret) {
   1691		sw->authorized = val;
   1692		/*
   1693		 * Notify status change to the userspace, informing the new
   1694		 * value of /sys/bus/thunderbolt/devices/.../authorized.
   1695		 */
   1696		sprintf(envp_string, "AUTHORIZED=%u", sw->authorized);
   1697		kobject_uevent_env(&sw->dev.kobj, KOBJ_CHANGE, envp);
   1698	}
   1699
   1700unlock:
   1701	mutex_unlock(&sw->tb->lock);
   1702	return ret;
   1703}
   1704
   1705static ssize_t authorized_store(struct device *dev,
   1706				struct device_attribute *attr,
   1707				const char *buf, size_t count)
   1708{
   1709	struct tb_switch *sw = tb_to_switch(dev);
   1710	unsigned int val;
   1711	ssize_t ret;
   1712
   1713	ret = kstrtouint(buf, 0, &val);
   1714	if (ret)
   1715		return ret;
   1716	if (val > 2)
   1717		return -EINVAL;
   1718
   1719	pm_runtime_get_sync(&sw->dev);
   1720	ret = tb_switch_set_authorized(sw, val);
   1721	pm_runtime_mark_last_busy(&sw->dev);
   1722	pm_runtime_put_autosuspend(&sw->dev);
   1723
   1724	return ret ? ret : count;
   1725}
   1726static DEVICE_ATTR_RW(authorized);
   1727
   1728static ssize_t boot_show(struct device *dev, struct device_attribute *attr,
   1729			 char *buf)
   1730{
   1731	struct tb_switch *sw = tb_to_switch(dev);
   1732
   1733	return sprintf(buf, "%u\n", sw->boot);
   1734}
   1735static DEVICE_ATTR_RO(boot);
   1736
   1737static ssize_t device_show(struct device *dev, struct device_attribute *attr,
   1738			   char *buf)
   1739{
   1740	struct tb_switch *sw = tb_to_switch(dev);
   1741
   1742	return sprintf(buf, "%#x\n", sw->device);
   1743}
   1744static DEVICE_ATTR_RO(device);
   1745
   1746static ssize_t
   1747device_name_show(struct device *dev, struct device_attribute *attr, char *buf)
   1748{
   1749	struct tb_switch *sw = tb_to_switch(dev);
   1750
   1751	return sprintf(buf, "%s\n", sw->device_name ? sw->device_name : "");
   1752}
   1753static DEVICE_ATTR_RO(device_name);
   1754
   1755static ssize_t
   1756generation_show(struct device *dev, struct device_attribute *attr, char *buf)
   1757{
   1758	struct tb_switch *sw = tb_to_switch(dev);
   1759
   1760	return sprintf(buf, "%u\n", sw->generation);
   1761}
   1762static DEVICE_ATTR_RO(generation);
   1763
   1764static ssize_t key_show(struct device *dev, struct device_attribute *attr,
   1765			char *buf)
   1766{
   1767	struct tb_switch *sw = tb_to_switch(dev);
   1768	ssize_t ret;
   1769
   1770	if (!mutex_trylock(&sw->tb->lock))
   1771		return restart_syscall();
   1772
   1773	if (sw->key)
   1774		ret = sprintf(buf, "%*phN\n", TB_SWITCH_KEY_SIZE, sw->key);
   1775	else
   1776		ret = sprintf(buf, "\n");
   1777
   1778	mutex_unlock(&sw->tb->lock);
   1779	return ret;
   1780}
   1781
   1782static ssize_t key_store(struct device *dev, struct device_attribute *attr,
   1783			 const char *buf, size_t count)
   1784{
   1785	struct tb_switch *sw = tb_to_switch(dev);
   1786	u8 key[TB_SWITCH_KEY_SIZE];
   1787	ssize_t ret = count;
   1788	bool clear = false;
   1789
   1790	if (!strcmp(buf, "\n"))
   1791		clear = true;
   1792	else if (hex2bin(key, buf, sizeof(key)))
   1793		return -EINVAL;
   1794
   1795	if (!mutex_trylock(&sw->tb->lock))
   1796		return restart_syscall();
   1797
   1798	if (sw->authorized) {
   1799		ret = -EBUSY;
   1800	} else {
   1801		kfree(sw->key);
   1802		if (clear) {
   1803			sw->key = NULL;
   1804		} else {
   1805			sw->key = kmemdup(key, sizeof(key), GFP_KERNEL);
   1806			if (!sw->key)
   1807				ret = -ENOMEM;
   1808		}
   1809	}
   1810
   1811	mutex_unlock(&sw->tb->lock);
   1812	return ret;
   1813}
   1814static DEVICE_ATTR(key, 0600, key_show, key_store);
   1815
   1816static ssize_t speed_show(struct device *dev, struct device_attribute *attr,
   1817			  char *buf)
   1818{
   1819	struct tb_switch *sw = tb_to_switch(dev);
   1820
   1821	return sprintf(buf, "%u.0 Gb/s\n", sw->link_speed);
   1822}
   1823
   1824/*
   1825 * Currently all lanes must run at the same speed but we expose here
   1826 * both directions to allow possible asymmetric links in the future.
   1827 */
   1828static DEVICE_ATTR(rx_speed, 0444, speed_show, NULL);
   1829static DEVICE_ATTR(tx_speed, 0444, speed_show, NULL);
   1830
   1831static ssize_t lanes_show(struct device *dev, struct device_attribute *attr,
   1832			  char *buf)
   1833{
   1834	struct tb_switch *sw = tb_to_switch(dev);
   1835
   1836	return sprintf(buf, "%u\n", sw->link_width);
   1837}
   1838
   1839/*
   1840 * Currently link has same amount of lanes both directions (1 or 2) but
   1841 * expose them separately to allow possible asymmetric links in the future.
   1842 */
   1843static DEVICE_ATTR(rx_lanes, 0444, lanes_show, NULL);
   1844static DEVICE_ATTR(tx_lanes, 0444, lanes_show, NULL);
   1845
   1846static ssize_t nvm_authenticate_show(struct device *dev,
   1847	struct device_attribute *attr, char *buf)
   1848{
   1849	struct tb_switch *sw = tb_to_switch(dev);
   1850	u32 status;
   1851
   1852	nvm_get_auth_status(sw, &status);
   1853	return sprintf(buf, "%#x\n", status);
   1854}
   1855
   1856static ssize_t nvm_authenticate_sysfs(struct device *dev, const char *buf,
   1857				      bool disconnect)
   1858{
   1859	struct tb_switch *sw = tb_to_switch(dev);
   1860	int val, ret;
   1861
   1862	pm_runtime_get_sync(&sw->dev);
   1863
   1864	if (!mutex_trylock(&sw->tb->lock)) {
   1865		ret = restart_syscall();
   1866		goto exit_rpm;
   1867	}
   1868
   1869	/* If NVMem devices are not yet added */
   1870	if (!sw->nvm) {
   1871		ret = -EAGAIN;
   1872		goto exit_unlock;
   1873	}
   1874
   1875	ret = kstrtoint(buf, 10, &val);
   1876	if (ret)
   1877		goto exit_unlock;
   1878
   1879	/* Always clear the authentication status */
   1880	nvm_clear_auth_status(sw);
   1881
   1882	if (val > 0) {
   1883		if (val == AUTHENTICATE_ONLY) {
   1884			if (disconnect)
   1885				ret = -EINVAL;
   1886			else
   1887				ret = nvm_authenticate(sw, true);
   1888		} else {
   1889			if (!sw->nvm->flushed) {
   1890				if (!sw->nvm->buf) {
   1891					ret = -EINVAL;
   1892					goto exit_unlock;
   1893				}
   1894
   1895				ret = nvm_validate_and_write(sw);
   1896				if (ret || val == WRITE_ONLY)
   1897					goto exit_unlock;
   1898			}
   1899			if (val == WRITE_AND_AUTHENTICATE) {
   1900				if (disconnect)
   1901					ret = tb_lc_force_power(sw);
   1902				else
   1903					ret = nvm_authenticate(sw, false);
   1904			}
   1905		}
   1906	}
   1907
   1908exit_unlock:
   1909	mutex_unlock(&sw->tb->lock);
   1910exit_rpm:
   1911	pm_runtime_mark_last_busy(&sw->dev);
   1912	pm_runtime_put_autosuspend(&sw->dev);
   1913
   1914	return ret;
   1915}
   1916
   1917static ssize_t nvm_authenticate_store(struct device *dev,
   1918	struct device_attribute *attr, const char *buf, size_t count)
   1919{
   1920	int ret = nvm_authenticate_sysfs(dev, buf, false);
   1921	if (ret)
   1922		return ret;
   1923	return count;
   1924}
   1925static DEVICE_ATTR_RW(nvm_authenticate);
   1926
   1927static ssize_t nvm_authenticate_on_disconnect_show(struct device *dev,
   1928	struct device_attribute *attr, char *buf)
   1929{
   1930	return nvm_authenticate_show(dev, attr, buf);
   1931}
   1932
   1933static ssize_t nvm_authenticate_on_disconnect_store(struct device *dev,
   1934	struct device_attribute *attr, const char *buf, size_t count)
   1935{
   1936	int ret;
   1937
   1938	ret = nvm_authenticate_sysfs(dev, buf, true);
   1939	return ret ? ret : count;
   1940}
   1941static DEVICE_ATTR_RW(nvm_authenticate_on_disconnect);
   1942
   1943static ssize_t nvm_version_show(struct device *dev,
   1944				struct device_attribute *attr, char *buf)
   1945{
   1946	struct tb_switch *sw = tb_to_switch(dev);
   1947	int ret;
   1948
   1949	if (!mutex_trylock(&sw->tb->lock))
   1950		return restart_syscall();
   1951
   1952	if (sw->safe_mode)
   1953		ret = -ENODATA;
   1954	else if (!sw->nvm)
   1955		ret = -EAGAIN;
   1956	else
   1957		ret = sprintf(buf, "%x.%x\n", sw->nvm->major, sw->nvm->minor);
   1958
   1959	mutex_unlock(&sw->tb->lock);
   1960
   1961	return ret;
   1962}
   1963static DEVICE_ATTR_RO(nvm_version);
   1964
   1965static ssize_t vendor_show(struct device *dev, struct device_attribute *attr,
   1966			   char *buf)
   1967{
   1968	struct tb_switch *sw = tb_to_switch(dev);
   1969
   1970	return sprintf(buf, "%#x\n", sw->vendor);
   1971}
   1972static DEVICE_ATTR_RO(vendor);
   1973
   1974static ssize_t
   1975vendor_name_show(struct device *dev, struct device_attribute *attr, char *buf)
   1976{
   1977	struct tb_switch *sw = tb_to_switch(dev);
   1978
   1979	return sprintf(buf, "%s\n", sw->vendor_name ? sw->vendor_name : "");
   1980}
   1981static DEVICE_ATTR_RO(vendor_name);
   1982
   1983static ssize_t unique_id_show(struct device *dev, struct device_attribute *attr,
   1984			      char *buf)
   1985{
   1986	struct tb_switch *sw = tb_to_switch(dev);
   1987
   1988	return sprintf(buf, "%pUb\n", sw->uuid);
   1989}
   1990static DEVICE_ATTR_RO(unique_id);
   1991
   1992static struct attribute *switch_attrs[] = {
   1993	&dev_attr_authorized.attr,
   1994	&dev_attr_boot.attr,
   1995	&dev_attr_device.attr,
   1996	&dev_attr_device_name.attr,
   1997	&dev_attr_generation.attr,
   1998	&dev_attr_key.attr,
   1999	&dev_attr_nvm_authenticate.attr,
   2000	&dev_attr_nvm_authenticate_on_disconnect.attr,
   2001	&dev_attr_nvm_version.attr,
   2002	&dev_attr_rx_speed.attr,
   2003	&dev_attr_rx_lanes.attr,
   2004	&dev_attr_tx_speed.attr,
   2005	&dev_attr_tx_lanes.attr,
   2006	&dev_attr_vendor.attr,
   2007	&dev_attr_vendor_name.attr,
   2008	&dev_attr_unique_id.attr,
   2009	NULL,
   2010};
   2011
   2012static umode_t switch_attr_is_visible(struct kobject *kobj,
   2013				      struct attribute *attr, int n)
   2014{
   2015	struct device *dev = kobj_to_dev(kobj);
   2016	struct tb_switch *sw = tb_to_switch(dev);
   2017
   2018	if (attr == &dev_attr_authorized.attr) {
   2019		if (sw->tb->security_level == TB_SECURITY_NOPCIE ||
   2020		    sw->tb->security_level == TB_SECURITY_DPONLY)
   2021			return 0;
   2022	} else if (attr == &dev_attr_device.attr) {
   2023		if (!sw->device)
   2024			return 0;
   2025	} else if (attr == &dev_attr_device_name.attr) {
   2026		if (!sw->device_name)
   2027			return 0;
   2028	} else if (attr == &dev_attr_vendor.attr)  {
   2029		if (!sw->vendor)
   2030			return 0;
   2031	} else if (attr == &dev_attr_vendor_name.attr)  {
   2032		if (!sw->vendor_name)
   2033			return 0;
   2034	} else if (attr == &dev_attr_key.attr) {
   2035		if (tb_route(sw) &&
   2036		    sw->tb->security_level == TB_SECURITY_SECURE &&
   2037		    sw->security_level == TB_SECURITY_SECURE)
   2038			return attr->mode;
   2039		return 0;
   2040	} else if (attr == &dev_attr_rx_speed.attr ||
   2041		   attr == &dev_attr_rx_lanes.attr ||
   2042		   attr == &dev_attr_tx_speed.attr ||
   2043		   attr == &dev_attr_tx_lanes.attr) {
   2044		if (tb_route(sw))
   2045			return attr->mode;
   2046		return 0;
   2047	} else if (attr == &dev_attr_nvm_authenticate.attr) {
   2048		if (nvm_upgradeable(sw))
   2049			return attr->mode;
   2050		return 0;
   2051	} else if (attr == &dev_attr_nvm_version.attr) {
   2052		if (nvm_readable(sw))
   2053			return attr->mode;
   2054		return 0;
   2055	} else if (attr == &dev_attr_boot.attr) {
   2056		if (tb_route(sw))
   2057			return attr->mode;
   2058		return 0;
   2059	} else if (attr == &dev_attr_nvm_authenticate_on_disconnect.attr) {
   2060		if (sw->quirks & QUIRK_FORCE_POWER_LINK_CONTROLLER)
   2061			return attr->mode;
   2062		return 0;
   2063	}
   2064
   2065	return sw->safe_mode ? 0 : attr->mode;
   2066}
   2067
   2068static const struct attribute_group switch_group = {
   2069	.is_visible = switch_attr_is_visible,
   2070	.attrs = switch_attrs,
   2071};
   2072
   2073static const struct attribute_group *switch_groups[] = {
   2074	&switch_group,
   2075	NULL,
   2076};
   2077
   2078static void tb_switch_release(struct device *dev)
   2079{
   2080	struct tb_switch *sw = tb_to_switch(dev);
   2081	struct tb_port *port;
   2082
   2083	dma_port_free(sw->dma_port);
   2084
   2085	tb_switch_for_each_port(sw, port) {
   2086		ida_destroy(&port->in_hopids);
   2087		ida_destroy(&port->out_hopids);
   2088	}
   2089
   2090	kfree(sw->uuid);
   2091	kfree(sw->device_name);
   2092	kfree(sw->vendor_name);
   2093	kfree(sw->ports);
   2094	kfree(sw->drom);
   2095	kfree(sw->key);
   2096	kfree(sw);
   2097}
   2098
   2099static int tb_switch_uevent(struct device *dev, struct kobj_uevent_env *env)
   2100{
   2101	struct tb_switch *sw = tb_to_switch(dev);
   2102	const char *type;
   2103
   2104	if (sw->config.thunderbolt_version == USB4_VERSION_1_0) {
   2105		if (add_uevent_var(env, "USB4_VERSION=1.0"))
   2106			return -ENOMEM;
   2107	}
   2108
   2109	if (!tb_route(sw)) {
   2110		type = "host";
   2111	} else {
   2112		const struct tb_port *port;
   2113		bool hub = false;
   2114
   2115		/* Device is hub if it has any downstream ports */
   2116		tb_switch_for_each_port(sw, port) {
   2117			if (!port->disabled && !tb_is_upstream_port(port) &&
   2118			     tb_port_is_null(port)) {
   2119				hub = true;
   2120				break;
   2121			}
   2122		}
   2123
   2124		type = hub ? "hub" : "device";
   2125	}
   2126
   2127	if (add_uevent_var(env, "USB4_TYPE=%s", type))
   2128		return -ENOMEM;
   2129	return 0;
   2130}
   2131
   2132/*
   2133 * Currently only need to provide the callbacks. Everything else is handled
   2134 * in the connection manager.
   2135 */
   2136static int __maybe_unused tb_switch_runtime_suspend(struct device *dev)
   2137{
   2138	struct tb_switch *sw = tb_to_switch(dev);
   2139	const struct tb_cm_ops *cm_ops = sw->tb->cm_ops;
   2140
   2141	if (cm_ops->runtime_suspend_switch)
   2142		return cm_ops->runtime_suspend_switch(sw);
   2143
   2144	return 0;
   2145}
   2146
   2147static int __maybe_unused tb_switch_runtime_resume(struct device *dev)
   2148{
   2149	struct tb_switch *sw = tb_to_switch(dev);
   2150	const struct tb_cm_ops *cm_ops = sw->tb->cm_ops;
   2151
   2152	if (cm_ops->runtime_resume_switch)
   2153		return cm_ops->runtime_resume_switch(sw);
   2154	return 0;
   2155}
   2156
   2157static const struct dev_pm_ops tb_switch_pm_ops = {
   2158	SET_RUNTIME_PM_OPS(tb_switch_runtime_suspend, tb_switch_runtime_resume,
   2159			   NULL)
   2160};
   2161
   2162struct device_type tb_switch_type = {
   2163	.name = "thunderbolt_device",
   2164	.release = tb_switch_release,
   2165	.uevent = tb_switch_uevent,
   2166	.pm = &tb_switch_pm_ops,
   2167};
   2168
   2169static int tb_switch_get_generation(struct tb_switch *sw)
   2170{
   2171	switch (sw->config.device_id) {
   2172	case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE:
   2173	case PCI_DEVICE_ID_INTEL_EAGLE_RIDGE:
   2174	case PCI_DEVICE_ID_INTEL_LIGHT_PEAK:
   2175	case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_2C:
   2176	case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C:
   2177	case PCI_DEVICE_ID_INTEL_PORT_RIDGE:
   2178	case PCI_DEVICE_ID_INTEL_REDWOOD_RIDGE_2C_BRIDGE:
   2179	case PCI_DEVICE_ID_INTEL_REDWOOD_RIDGE_4C_BRIDGE:
   2180		return 1;
   2181
   2182	case PCI_DEVICE_ID_INTEL_WIN_RIDGE_2C_BRIDGE:
   2183	case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_BRIDGE:
   2184	case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_BRIDGE:
   2185		return 2;
   2186
   2187	case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_BRIDGE:
   2188	case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_BRIDGE:
   2189	case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_BRIDGE:
   2190	case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_BRIDGE:
   2191	case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_BRIDGE:
   2192	case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_BRIDGE:
   2193	case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_BRIDGE:
   2194	case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_BRIDGE:
   2195	case PCI_DEVICE_ID_INTEL_ICL_NHI0:
   2196	case PCI_DEVICE_ID_INTEL_ICL_NHI1:
   2197		return 3;
   2198
   2199	default:
   2200		if (tb_switch_is_usb4(sw))
   2201			return 4;
   2202
   2203		/*
   2204		 * For unknown switches assume generation to be 1 to be
   2205		 * on the safe side.
   2206		 */
   2207		tb_sw_warn(sw, "unsupported switch device id %#x\n",
   2208			   sw->config.device_id);
   2209		return 1;
   2210	}
   2211}
   2212
   2213static bool tb_switch_exceeds_max_depth(const struct tb_switch *sw, int depth)
   2214{
   2215	int max_depth;
   2216
   2217	if (tb_switch_is_usb4(sw) ||
   2218	    (sw->tb->root_switch && tb_switch_is_usb4(sw->tb->root_switch)))
   2219		max_depth = USB4_SWITCH_MAX_DEPTH;
   2220	else
   2221		max_depth = TB_SWITCH_MAX_DEPTH;
   2222
   2223	return depth > max_depth;
   2224}
   2225
   2226/**
   2227 * tb_switch_alloc() - allocate a switch
   2228 * @tb: Pointer to the owning domain
   2229 * @parent: Parent device for this switch
   2230 * @route: Route string for this switch
   2231 *
   2232 * Allocates and initializes a switch. Will not upload configuration to
   2233 * the switch. For that you need to call tb_switch_configure()
   2234 * separately. The returned switch should be released by calling
   2235 * tb_switch_put().
   2236 *
   2237 * Return: Pointer to the allocated switch or ERR_PTR() in case of
   2238 * failure.
   2239 */
   2240struct tb_switch *tb_switch_alloc(struct tb *tb, struct device *parent,
   2241				  u64 route)
   2242{
   2243	struct tb_switch *sw;
   2244	int upstream_port;
   2245	int i, ret, depth;
   2246
   2247	/* Unlock the downstream port so we can access the switch below */
   2248	if (route) {
   2249		struct tb_switch *parent_sw = tb_to_switch(parent);
   2250		struct tb_port *down;
   2251
   2252		down = tb_port_at(route, parent_sw);
   2253		tb_port_unlock(down);
   2254	}
   2255
   2256	depth = tb_route_length(route);
   2257
   2258	upstream_port = tb_cfg_get_upstream_port(tb->ctl, route);
   2259	if (upstream_port < 0)
   2260		return ERR_PTR(upstream_port);
   2261
   2262	sw = kzalloc(sizeof(*sw), GFP_KERNEL);
   2263	if (!sw)
   2264		return ERR_PTR(-ENOMEM);
   2265
   2266	sw->tb = tb;
   2267	ret = tb_cfg_read(tb->ctl, &sw->config, route, 0, TB_CFG_SWITCH, 0, 5);
   2268	if (ret)
   2269		goto err_free_sw_ports;
   2270
   2271	sw->generation = tb_switch_get_generation(sw);
   2272
   2273	tb_dbg(tb, "current switch config:\n");
   2274	tb_dump_switch(tb, sw);
   2275
   2276	/* configure switch */
   2277	sw->config.upstream_port_number = upstream_port;
   2278	sw->config.depth = depth;
   2279	sw->config.route_hi = upper_32_bits(route);
   2280	sw->config.route_lo = lower_32_bits(route);
   2281	sw->config.enabled = 0;
   2282
   2283	/* Make sure we do not exceed maximum topology limit */
   2284	if (tb_switch_exceeds_max_depth(sw, depth)) {
   2285		ret = -EADDRNOTAVAIL;
   2286		goto err_free_sw_ports;
   2287	}
   2288
   2289	/* initialize ports */
   2290	sw->ports = kcalloc(sw->config.max_port_number + 1, sizeof(*sw->ports),
   2291				GFP_KERNEL);
   2292	if (!sw->ports) {
   2293		ret = -ENOMEM;
   2294		goto err_free_sw_ports;
   2295	}
   2296
   2297	for (i = 0; i <= sw->config.max_port_number; i++) {
   2298		/* minimum setup for tb_find_cap and tb_drom_read to work */
   2299		sw->ports[i].sw = sw;
   2300		sw->ports[i].port = i;
   2301
   2302		/* Control port does not need HopID allocation */
   2303		if (i) {
   2304			ida_init(&sw->ports[i].in_hopids);
   2305			ida_init(&sw->ports[i].out_hopids);
   2306		}
   2307	}
   2308
   2309	ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_PLUG_EVENTS);
   2310	if (ret > 0)
   2311		sw->cap_plug_events = ret;
   2312
   2313	ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_TIME2);
   2314	if (ret > 0)
   2315		sw->cap_vsec_tmu = ret;
   2316
   2317	ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_LINK_CONTROLLER);
   2318	if (ret > 0)
   2319		sw->cap_lc = ret;
   2320
   2321	ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_CP_LP);
   2322	if (ret > 0)
   2323		sw->cap_lp = ret;
   2324
   2325	/* Root switch is always authorized */
   2326	if (!route)
   2327		sw->authorized = true;
   2328
   2329	device_initialize(&sw->dev);
   2330	sw->dev.parent = parent;
   2331	sw->dev.bus = &tb_bus_type;
   2332	sw->dev.type = &tb_switch_type;
   2333	sw->dev.groups = switch_groups;
   2334	dev_set_name(&sw->dev, "%u-%llx", tb->index, tb_route(sw));
   2335
   2336	return sw;
   2337
   2338err_free_sw_ports:
   2339	kfree(sw->ports);
   2340	kfree(sw);
   2341
   2342	return ERR_PTR(ret);
   2343}
   2344
   2345/**
   2346 * tb_switch_alloc_safe_mode() - allocate a switch that is in safe mode
   2347 * @tb: Pointer to the owning domain
   2348 * @parent: Parent device for this switch
   2349 * @route: Route string for this switch
   2350 *
   2351 * This creates a switch in safe mode. This means the switch pretty much
   2352 * lacks all capabilities except DMA configuration port before it is
   2353 * flashed with a valid NVM firmware.
   2354 *
   2355 * The returned switch must be released by calling tb_switch_put().
   2356 *
   2357 * Return: Pointer to the allocated switch or ERR_PTR() in case of failure
   2358 */
   2359struct tb_switch *
   2360tb_switch_alloc_safe_mode(struct tb *tb, struct device *parent, u64 route)
   2361{
   2362	struct tb_switch *sw;
   2363
   2364	sw = kzalloc(sizeof(*sw), GFP_KERNEL);
   2365	if (!sw)
   2366		return ERR_PTR(-ENOMEM);
   2367
   2368	sw->tb = tb;
   2369	sw->config.depth = tb_route_length(route);
   2370	sw->config.route_hi = upper_32_bits(route);
   2371	sw->config.route_lo = lower_32_bits(route);
   2372	sw->safe_mode = true;
   2373
   2374	device_initialize(&sw->dev);
   2375	sw->dev.parent = parent;
   2376	sw->dev.bus = &tb_bus_type;
   2377	sw->dev.type = &tb_switch_type;
   2378	sw->dev.groups = switch_groups;
   2379	dev_set_name(&sw->dev, "%u-%llx", tb->index, tb_route(sw));
   2380
   2381	return sw;
   2382}
   2383
   2384/**
   2385 * tb_switch_configure() - Uploads configuration to the switch
   2386 * @sw: Switch to configure
   2387 *
   2388 * Call this function before the switch is added to the system. It will
   2389 * upload configuration to the switch and makes it available for the
   2390 * connection manager to use. Can be called to the switch again after
   2391 * resume from low power states to re-initialize it.
   2392 *
   2393 * Return: %0 in case of success and negative errno in case of failure
   2394 */
   2395int tb_switch_configure(struct tb_switch *sw)
   2396{
   2397	struct tb *tb = sw->tb;
   2398	u64 route;
   2399	int ret;
   2400
   2401	route = tb_route(sw);
   2402
   2403	tb_dbg(tb, "%s Switch at %#llx (depth: %d, up port: %d)\n",
   2404	       sw->config.enabled ? "restoring" : "initializing", route,
   2405	       tb_route_length(route), sw->config.upstream_port_number);
   2406
   2407	sw->config.enabled = 1;
   2408
   2409	if (tb_switch_is_usb4(sw)) {
   2410		/*
   2411		 * For USB4 devices, we need to program the CM version
   2412		 * accordingly so that it knows to expose all the
   2413		 * additional capabilities.
   2414		 */
   2415		sw->config.cmuv = USB4_VERSION_1_0;
   2416
   2417		/* Enumerate the switch */
   2418		ret = tb_sw_write(sw, (u32 *)&sw->config + 1, TB_CFG_SWITCH,
   2419				  ROUTER_CS_1, 4);
   2420		if (ret)
   2421			return ret;
   2422
   2423		ret = usb4_switch_setup(sw);
   2424	} else {
   2425		if (sw->config.vendor_id != PCI_VENDOR_ID_INTEL)
   2426			tb_sw_warn(sw, "unknown switch vendor id %#x\n",
   2427				   sw->config.vendor_id);
   2428
   2429		if (!sw->cap_plug_events) {
   2430			tb_sw_warn(sw, "cannot find TB_VSE_CAP_PLUG_EVENTS aborting\n");
   2431			return -ENODEV;
   2432		}
   2433
   2434		/* Enumerate the switch */
   2435		ret = tb_sw_write(sw, (u32 *)&sw->config + 1, TB_CFG_SWITCH,
   2436				  ROUTER_CS_1, 3);
   2437	}
   2438	if (ret)
   2439		return ret;
   2440
   2441	return tb_plug_events_active(sw, true);
   2442}
   2443
   2444static int tb_switch_set_uuid(struct tb_switch *sw)
   2445{
   2446	bool uid = false;
   2447	u32 uuid[4];
   2448	int ret;
   2449
   2450	if (sw->uuid)
   2451		return 0;
   2452
   2453	if (tb_switch_is_usb4(sw)) {
   2454		ret = usb4_switch_read_uid(sw, &sw->uid);
   2455		if (ret)
   2456			return ret;
   2457		uid = true;
   2458	} else {
   2459		/*
   2460		 * The newer controllers include fused UUID as part of
   2461		 * link controller specific registers
   2462		 */
   2463		ret = tb_lc_read_uuid(sw, uuid);
   2464		if (ret) {
   2465			if (ret != -EINVAL)
   2466				return ret;
   2467			uid = true;
   2468		}
   2469	}
   2470
   2471	if (uid) {
   2472		/*
   2473		 * ICM generates UUID based on UID and fills the upper
   2474		 * two words with ones. This is not strictly following
   2475		 * UUID format but we want to be compatible with it so
   2476		 * we do the same here.
   2477		 */
   2478		uuid[0] = sw->uid & 0xffffffff;
   2479		uuid[1] = (sw->uid >> 32) & 0xffffffff;
   2480		uuid[2] = 0xffffffff;
   2481		uuid[3] = 0xffffffff;
   2482	}
   2483
   2484	sw->uuid = kmemdup(uuid, sizeof(uuid), GFP_KERNEL);
   2485	if (!sw->uuid)
   2486		return -ENOMEM;
   2487	return 0;
   2488}
   2489
   2490static int tb_switch_add_dma_port(struct tb_switch *sw)
   2491{
   2492	u32 status;
   2493	int ret;
   2494
   2495	switch (sw->generation) {
   2496	case 2:
   2497		/* Only root switch can be upgraded */
   2498		if (tb_route(sw))
   2499			return 0;
   2500
   2501		fallthrough;
   2502	case 3:
   2503	case 4:
   2504		ret = tb_switch_set_uuid(sw);
   2505		if (ret)
   2506			return ret;
   2507		break;
   2508
   2509	default:
   2510		/*
   2511		 * DMA port is the only thing available when the switch
   2512		 * is in safe mode.
   2513		 */
   2514		if (!sw->safe_mode)
   2515			return 0;
   2516		break;
   2517	}
   2518
   2519	if (sw->no_nvm_upgrade)
   2520		return 0;
   2521
   2522	if (tb_switch_is_usb4(sw)) {
   2523		ret = usb4_switch_nvm_authenticate_status(sw, &status);
   2524		if (ret)
   2525			return ret;
   2526
   2527		if (status) {
   2528			tb_sw_info(sw, "switch flash authentication failed\n");
   2529			nvm_set_auth_status(sw, status);
   2530		}
   2531
   2532		return 0;
   2533	}
   2534
   2535	/* Root switch DMA port requires running firmware */
   2536	if (!tb_route(sw) && !tb_switch_is_icm(sw))
   2537		return 0;
   2538
   2539	sw->dma_port = dma_port_alloc(sw);
   2540	if (!sw->dma_port)
   2541		return 0;
   2542
   2543	/*
   2544	 * If there is status already set then authentication failed
   2545	 * when the dma_port_flash_update_auth() returned. Power cycling
   2546	 * is not needed (it was done already) so only thing we do here
   2547	 * is to unblock runtime PM of the root port.
   2548	 */
   2549	nvm_get_auth_status(sw, &status);
   2550	if (status) {
   2551		if (!tb_route(sw))
   2552			nvm_authenticate_complete_dma_port(sw);
   2553		return 0;
   2554	}
   2555
   2556	/*
   2557	 * Check status of the previous flash authentication. If there
   2558	 * is one we need to power cycle the switch in any case to make
   2559	 * it functional again.
   2560	 */
   2561	ret = dma_port_flash_update_auth_status(sw->dma_port, &status);
   2562	if (ret <= 0)
   2563		return ret;
   2564
   2565	/* Now we can allow root port to suspend again */
   2566	if (!tb_route(sw))
   2567		nvm_authenticate_complete_dma_port(sw);
   2568
   2569	if (status) {
   2570		tb_sw_info(sw, "switch flash authentication failed\n");
   2571		nvm_set_auth_status(sw, status);
   2572	}
   2573
   2574	tb_sw_info(sw, "power cycling the switch now\n");
   2575	dma_port_power_cycle(sw->dma_port);
   2576
   2577	/*
   2578	 * We return error here which causes the switch adding failure.
   2579	 * It should appear back after power cycle is complete.
   2580	 */
   2581	return -ESHUTDOWN;
   2582}
   2583
   2584static void tb_switch_default_link_ports(struct tb_switch *sw)
   2585{
   2586	int i;
   2587
   2588	for (i = 1; i <= sw->config.max_port_number; i++) {
   2589		struct tb_port *port = &sw->ports[i];
   2590		struct tb_port *subordinate;
   2591
   2592		if (!tb_port_is_null(port))
   2593			continue;
   2594
   2595		/* Check for the subordinate port */
   2596		if (i == sw->config.max_port_number ||
   2597		    !tb_port_is_null(&sw->ports[i + 1]))
   2598			continue;
   2599
   2600		/* Link them if not already done so (by DROM) */
   2601		subordinate = &sw->ports[i + 1];
   2602		if (!port->dual_link_port && !subordinate->dual_link_port) {
   2603			port->link_nr = 0;
   2604			port->dual_link_port = subordinate;
   2605			subordinate->link_nr = 1;
   2606			subordinate->dual_link_port = port;
   2607
   2608			tb_sw_dbg(sw, "linked ports %d <-> %d\n",
   2609				  port->port, subordinate->port);
   2610		}
   2611	}
   2612}
   2613
   2614static bool tb_switch_lane_bonding_possible(struct tb_switch *sw)
   2615{
   2616	const struct tb_port *up = tb_upstream_port(sw);
   2617
   2618	if (!up->dual_link_port || !up->dual_link_port->remote)
   2619		return false;
   2620
   2621	if (tb_switch_is_usb4(sw))
   2622		return usb4_switch_lane_bonding_possible(sw);
   2623	return tb_lc_lane_bonding_possible(sw);
   2624}
   2625
   2626static int tb_switch_update_link_attributes(struct tb_switch *sw)
   2627{
   2628	struct tb_port *up;
   2629	bool change = false;
   2630	int ret;
   2631
   2632	if (!tb_route(sw) || tb_switch_is_icm(sw))
   2633		return 0;
   2634
   2635	up = tb_upstream_port(sw);
   2636
   2637	ret = tb_port_get_link_speed(up);
   2638	if (ret < 0)
   2639		return ret;
   2640	if (sw->link_speed != ret)
   2641		change = true;
   2642	sw->link_speed = ret;
   2643
   2644	ret = tb_port_get_link_width(up);
   2645	if (ret < 0)
   2646		return ret;
   2647	if (sw->link_width != ret)
   2648		change = true;
   2649	sw->link_width = ret;
   2650
   2651	/* Notify userspace that there is possible link attribute change */
   2652	if (device_is_registered(&sw->dev) && change)
   2653		kobject_uevent(&sw->dev.kobj, KOBJ_CHANGE);
   2654
   2655	return 0;
   2656}
   2657
   2658/**
   2659 * tb_switch_lane_bonding_enable() - Enable lane bonding
   2660 * @sw: Switch to enable lane bonding
   2661 *
   2662 * Connection manager can call this function to enable lane bonding of a
   2663 * switch. If conditions are correct and both switches support the feature,
   2664 * lanes are bonded. It is safe to call this to any switch.
   2665 */
   2666int tb_switch_lane_bonding_enable(struct tb_switch *sw)
   2667{
   2668	struct tb_switch *parent = tb_to_switch(sw->dev.parent);
   2669	struct tb_port *up, *down;
   2670	u64 route = tb_route(sw);
   2671	int ret;
   2672
   2673	if (!route)
   2674		return 0;
   2675
   2676	if (!tb_switch_lane_bonding_possible(sw))
   2677		return 0;
   2678
   2679	up = tb_upstream_port(sw);
   2680	down = tb_port_at(route, parent);
   2681
   2682	if (!tb_port_is_width_supported(up, 2) ||
   2683	    !tb_port_is_width_supported(down, 2))
   2684		return 0;
   2685
   2686	ret = tb_port_lane_bonding_enable(up);
   2687	if (ret) {
   2688		tb_port_warn(up, "failed to enable lane bonding\n");
   2689		return ret;
   2690	}
   2691
   2692	ret = tb_port_lane_bonding_enable(down);
   2693	if (ret) {
   2694		tb_port_warn(down, "failed to enable lane bonding\n");
   2695		tb_port_lane_bonding_disable(up);
   2696		return ret;
   2697	}
   2698
   2699	ret = tb_port_wait_for_link_width(down, 2, 100);
   2700	if (ret) {
   2701		tb_port_warn(down, "timeout enabling lane bonding\n");
   2702		return ret;
   2703	}
   2704
   2705	tb_port_update_credits(down);
   2706	tb_port_update_credits(up);
   2707	tb_switch_update_link_attributes(sw);
   2708
   2709	tb_sw_dbg(sw, "lane bonding enabled\n");
   2710	return ret;
   2711}
   2712
   2713/**
   2714 * tb_switch_lane_bonding_disable() - Disable lane bonding
   2715 * @sw: Switch whose lane bonding to disable
   2716 *
   2717 * Disables lane bonding between @sw and parent. This can be called even
   2718 * if lanes were not bonded originally.
   2719 */
   2720void tb_switch_lane_bonding_disable(struct tb_switch *sw)
   2721{
   2722	struct tb_switch *parent = tb_to_switch(sw->dev.parent);
   2723	struct tb_port *up, *down;
   2724
   2725	if (!tb_route(sw))
   2726		return;
   2727
   2728	up = tb_upstream_port(sw);
   2729	if (!up->bonded)
   2730		return;
   2731
   2732	down = tb_port_at(tb_route(sw), parent);
   2733
   2734	tb_port_lane_bonding_disable(up);
   2735	tb_port_lane_bonding_disable(down);
   2736
   2737	/*
   2738	 * It is fine if we get other errors as the router might have
   2739	 * been unplugged.
   2740	 */
   2741	if (tb_port_wait_for_link_width(down, 1, 100) == -ETIMEDOUT)
   2742		tb_sw_warn(sw, "timeout disabling lane bonding\n");
   2743
   2744	tb_port_update_credits(down);
   2745	tb_port_update_credits(up);
   2746	tb_switch_update_link_attributes(sw);
   2747
   2748	tb_sw_dbg(sw, "lane bonding disabled\n");
   2749}
   2750
   2751/**
   2752 * tb_switch_configure_link() - Set link configured
   2753 * @sw: Switch whose link is configured
   2754 *
   2755 * Sets the link upstream from @sw configured (from both ends) so that
   2756 * it will not be disconnected when the domain exits sleep. Can be
   2757 * called for any switch.
   2758 *
   2759 * It is recommended that this is called after lane bonding is enabled.
   2760 *
   2761 * Returns %0 on success and negative errno in case of error.
   2762 */
   2763int tb_switch_configure_link(struct tb_switch *sw)
   2764{
   2765	struct tb_port *up, *down;
   2766	int ret;
   2767
   2768	if (!tb_route(sw) || tb_switch_is_icm(sw))
   2769		return 0;
   2770
   2771	up = tb_upstream_port(sw);
   2772	if (tb_switch_is_usb4(up->sw))
   2773		ret = usb4_port_configure(up);
   2774	else
   2775		ret = tb_lc_configure_port(up);
   2776	if (ret)
   2777		return ret;
   2778
   2779	down = up->remote;
   2780	if (tb_switch_is_usb4(down->sw))
   2781		return usb4_port_configure(down);
   2782	return tb_lc_configure_port(down);
   2783}
   2784
   2785/**
   2786 * tb_switch_unconfigure_link() - Unconfigure link
   2787 * @sw: Switch whose link is unconfigured
   2788 *
   2789 * Sets the link unconfigured so the @sw will be disconnected if the
   2790 * domain exists sleep.
   2791 */
   2792void tb_switch_unconfigure_link(struct tb_switch *sw)
   2793{
   2794	struct tb_port *up, *down;
   2795
   2796	if (sw->is_unplugged)
   2797		return;
   2798	if (!tb_route(sw) || tb_switch_is_icm(sw))
   2799		return;
   2800
   2801	up = tb_upstream_port(sw);
   2802	if (tb_switch_is_usb4(up->sw))
   2803		usb4_port_unconfigure(up);
   2804	else
   2805		tb_lc_unconfigure_port(up);
   2806
   2807	down = up->remote;
   2808	if (tb_switch_is_usb4(down->sw))
   2809		usb4_port_unconfigure(down);
   2810	else
   2811		tb_lc_unconfigure_port(down);
   2812}
   2813
   2814static void tb_switch_credits_init(struct tb_switch *sw)
   2815{
   2816	if (tb_switch_is_icm(sw))
   2817		return;
   2818	if (!tb_switch_is_usb4(sw))
   2819		return;
   2820	if (usb4_switch_credits_init(sw))
   2821		tb_sw_info(sw, "failed to determine preferred buffer allocation, using defaults\n");
   2822}
   2823
   2824/**
   2825 * tb_switch_add() - Add a switch to the domain
   2826 * @sw: Switch to add
   2827 *
   2828 * This is the last step in adding switch to the domain. It will read
   2829 * identification information from DROM and initializes ports so that
   2830 * they can be used to connect other switches. The switch will be
   2831 * exposed to the userspace when this function successfully returns. To
   2832 * remove and release the switch, call tb_switch_remove().
   2833 *
   2834 * Return: %0 in case of success and negative errno in case of failure
   2835 */
   2836int tb_switch_add(struct tb_switch *sw)
   2837{
   2838	int i, ret;
   2839
   2840	/*
   2841	 * Initialize DMA control port now before we read DROM. Recent
   2842	 * host controllers have more complete DROM on NVM that includes
   2843	 * vendor and model identification strings which we then expose
   2844	 * to the userspace. NVM can be accessed through DMA
   2845	 * configuration based mailbox.
   2846	 */
   2847	ret = tb_switch_add_dma_port(sw);
   2848	if (ret) {
   2849		dev_err(&sw->dev, "failed to add DMA port\n");
   2850		return ret;
   2851	}
   2852
   2853	if (!sw->safe_mode) {
   2854		tb_switch_credits_init(sw);
   2855
   2856		/* read drom */
   2857		ret = tb_drom_read(sw);
   2858		if (ret)
   2859			dev_warn(&sw->dev, "reading DROM failed: %d\n", ret);
   2860		tb_sw_dbg(sw, "uid: %#llx\n", sw->uid);
   2861
   2862		tb_check_quirks(sw);
   2863
   2864		ret = tb_switch_set_uuid(sw);
   2865		if (ret) {
   2866			dev_err(&sw->dev, "failed to set UUID\n");
   2867			return ret;
   2868		}
   2869
   2870		for (i = 0; i <= sw->config.max_port_number; i++) {
   2871			if (sw->ports[i].disabled) {
   2872				tb_port_dbg(&sw->ports[i], "disabled by eeprom\n");
   2873				continue;
   2874			}
   2875			ret = tb_init_port(&sw->ports[i]);
   2876			if (ret) {
   2877				dev_err(&sw->dev, "failed to initialize port %d\n", i);
   2878				return ret;
   2879			}
   2880		}
   2881
   2882		tb_switch_default_link_ports(sw);
   2883
   2884		ret = tb_switch_update_link_attributes(sw);
   2885		if (ret)
   2886			return ret;
   2887
   2888		ret = tb_switch_tmu_init(sw);
   2889		if (ret)
   2890			return ret;
   2891	}
   2892
   2893	ret = device_add(&sw->dev);
   2894	if (ret) {
   2895		dev_err(&sw->dev, "failed to add device: %d\n", ret);
   2896		return ret;
   2897	}
   2898
   2899	if (tb_route(sw)) {
   2900		dev_info(&sw->dev, "new device found, vendor=%#x device=%#x\n",
   2901			 sw->vendor, sw->device);
   2902		if (sw->vendor_name && sw->device_name)
   2903			dev_info(&sw->dev, "%s %s\n", sw->vendor_name,
   2904				 sw->device_name);
   2905	}
   2906
   2907	ret = usb4_switch_add_ports(sw);
   2908	if (ret) {
   2909		dev_err(&sw->dev, "failed to add USB4 ports\n");
   2910		goto err_del;
   2911	}
   2912
   2913	ret = tb_switch_nvm_add(sw);
   2914	if (ret) {
   2915		dev_err(&sw->dev, "failed to add NVM devices\n");
   2916		goto err_ports;
   2917	}
   2918
   2919	/*
   2920	 * Thunderbolt routers do not generate wakeups themselves but
   2921	 * they forward wakeups from tunneled protocols, so enable it
   2922	 * here.
   2923	 */
   2924	device_init_wakeup(&sw->dev, true);
   2925
   2926	pm_runtime_set_active(&sw->dev);
   2927	if (sw->rpm) {
   2928		pm_runtime_set_autosuspend_delay(&sw->dev, TB_AUTOSUSPEND_DELAY);
   2929		pm_runtime_use_autosuspend(&sw->dev);
   2930		pm_runtime_mark_last_busy(&sw->dev);
   2931		pm_runtime_enable(&sw->dev);
   2932		pm_request_autosuspend(&sw->dev);
   2933	}
   2934
   2935	tb_switch_debugfs_init(sw);
   2936	return 0;
   2937
   2938err_ports:
   2939	usb4_switch_remove_ports(sw);
   2940err_del:
   2941	device_del(&sw->dev);
   2942
   2943	return ret;
   2944}
   2945
   2946/**
   2947 * tb_switch_remove() - Remove and release a switch
   2948 * @sw: Switch to remove
   2949 *
   2950 * This will remove the switch from the domain and release it after last
   2951 * reference count drops to zero. If there are switches connected below
   2952 * this switch, they will be removed as well.
   2953 */
   2954void tb_switch_remove(struct tb_switch *sw)
   2955{
   2956	struct tb_port *port;
   2957
   2958	tb_switch_debugfs_remove(sw);
   2959
   2960	if (sw->rpm) {
   2961		pm_runtime_get_sync(&sw->dev);
   2962		pm_runtime_disable(&sw->dev);
   2963	}
   2964
   2965	/* port 0 is the switch itself and never has a remote */
   2966	tb_switch_for_each_port(sw, port) {
   2967		if (tb_port_has_remote(port)) {
   2968			tb_switch_remove(port->remote->sw);
   2969			port->remote = NULL;
   2970		} else if (port->xdomain) {
   2971			tb_xdomain_remove(port->xdomain);
   2972			port->xdomain = NULL;
   2973		}
   2974
   2975		/* Remove any downstream retimers */
   2976		tb_retimer_remove_all(port);
   2977	}
   2978
   2979	if (!sw->is_unplugged)
   2980		tb_plug_events_active(sw, false);
   2981
   2982	tb_switch_nvm_remove(sw);
   2983	usb4_switch_remove_ports(sw);
   2984
   2985	if (tb_route(sw))
   2986		dev_info(&sw->dev, "device disconnected\n");
   2987	device_unregister(&sw->dev);
   2988}
   2989
   2990/**
   2991 * tb_sw_set_unplugged() - set is_unplugged on switch and downstream switches
   2992 * @sw: Router to mark unplugged
   2993 */
   2994void tb_sw_set_unplugged(struct tb_switch *sw)
   2995{
   2996	struct tb_port *port;
   2997
   2998	if (sw == sw->tb->root_switch) {
   2999		tb_sw_WARN(sw, "cannot unplug root switch\n");
   3000		return;
   3001	}
   3002	if (sw->is_unplugged) {
   3003		tb_sw_WARN(sw, "is_unplugged already set\n");
   3004		return;
   3005	}
   3006	sw->is_unplugged = true;
   3007	tb_switch_for_each_port(sw, port) {
   3008		if (tb_port_has_remote(port))
   3009			tb_sw_set_unplugged(port->remote->sw);
   3010		else if (port->xdomain)
   3011			port->xdomain->is_unplugged = true;
   3012	}
   3013}
   3014
   3015static int tb_switch_set_wake(struct tb_switch *sw, unsigned int flags)
   3016{
   3017	if (flags)
   3018		tb_sw_dbg(sw, "enabling wakeup: %#x\n", flags);
   3019	else
   3020		tb_sw_dbg(sw, "disabling wakeup\n");
   3021
   3022	if (tb_switch_is_usb4(sw))
   3023		return usb4_switch_set_wake(sw, flags);
   3024	return tb_lc_set_wake(sw, flags);
   3025}
   3026
   3027int tb_switch_resume(struct tb_switch *sw)
   3028{
   3029	struct tb_port *port;
   3030	int err;
   3031
   3032	tb_sw_dbg(sw, "resuming switch\n");
   3033
   3034	/*
   3035	 * Check for UID of the connected switches except for root
   3036	 * switch which we assume cannot be removed.
   3037	 */
   3038	if (tb_route(sw)) {
   3039		u64 uid;
   3040
   3041		/*
   3042		 * Check first that we can still read the switch config
   3043		 * space. It may be that there is now another domain
   3044		 * connected.
   3045		 */
   3046		err = tb_cfg_get_upstream_port(sw->tb->ctl, tb_route(sw));
   3047		if (err < 0) {
   3048			tb_sw_info(sw, "switch not present anymore\n");
   3049			return err;
   3050		}
   3051
   3052		/* We don't have any way to confirm this was the same device */
   3053		if (!sw->uid)
   3054			return -ENODEV;
   3055
   3056		if (tb_switch_is_usb4(sw))
   3057			err = usb4_switch_read_uid(sw, &uid);
   3058		else
   3059			err = tb_drom_read_uid_only(sw, &uid);
   3060		if (err) {
   3061			tb_sw_warn(sw, "uid read failed\n");
   3062			return err;
   3063		}
   3064		if (sw->uid != uid) {
   3065			tb_sw_info(sw,
   3066				"changed while suspended (uid %#llx -> %#llx)\n",
   3067				sw->uid, uid);
   3068			return -ENODEV;
   3069		}
   3070	}
   3071
   3072	err = tb_switch_configure(sw);
   3073	if (err)
   3074		return err;
   3075
   3076	/* Disable wakes */
   3077	tb_switch_set_wake(sw, 0);
   3078
   3079	err = tb_switch_tmu_init(sw);
   3080	if (err)
   3081		return err;
   3082
   3083	/* check for surviving downstream switches */
   3084	tb_switch_for_each_port(sw, port) {
   3085		if (!tb_port_is_null(port))
   3086			continue;
   3087
   3088		if (!tb_port_resume(port))
   3089			continue;
   3090
   3091		if (tb_wait_for_port(port, true) <= 0) {
   3092			tb_port_warn(port,
   3093				     "lost during suspend, disconnecting\n");
   3094			if (tb_port_has_remote(port))
   3095				tb_sw_set_unplugged(port->remote->sw);
   3096			else if (port->xdomain)
   3097				port->xdomain->is_unplugged = true;
   3098		} else {
   3099			/*
   3100			 * Always unlock the port so the downstream
   3101			 * switch/domain is accessible.
   3102			 */
   3103			if (tb_port_unlock(port))
   3104				tb_port_warn(port, "failed to unlock port\n");
   3105			if (port->remote && tb_switch_resume(port->remote->sw)) {
   3106				tb_port_warn(port,
   3107					     "lost during suspend, disconnecting\n");
   3108				tb_sw_set_unplugged(port->remote->sw);
   3109			}
   3110		}
   3111	}
   3112	return 0;
   3113}
   3114
   3115/**
   3116 * tb_switch_suspend() - Put a switch to sleep
   3117 * @sw: Switch to suspend
   3118 * @runtime: Is this runtime suspend or system sleep
   3119 *
   3120 * Suspends router and all its children. Enables wakes according to
   3121 * value of @runtime and then sets sleep bit for the router. If @sw is
   3122 * host router the domain is ready to go to sleep once this function
   3123 * returns.
   3124 */
   3125void tb_switch_suspend(struct tb_switch *sw, bool runtime)
   3126{
   3127	unsigned int flags = 0;
   3128	struct tb_port *port;
   3129	int err;
   3130
   3131	tb_sw_dbg(sw, "suspending switch\n");
   3132
   3133	/*
   3134	 * Actually only needed for Titan Ridge but for simplicity can be
   3135	 * done for USB4 device too as CLx is re-enabled at resume.
   3136	 */
   3137	if (tb_switch_disable_clx(sw, TB_CL0S))
   3138		tb_sw_warn(sw, "failed to disable CLx on upstream port\n");
   3139
   3140	err = tb_plug_events_active(sw, false);
   3141	if (err)
   3142		return;
   3143
   3144	tb_switch_for_each_port(sw, port) {
   3145		if (tb_port_has_remote(port))
   3146			tb_switch_suspend(port->remote->sw, runtime);
   3147	}
   3148
   3149	if (runtime) {
   3150		/* Trigger wake when something is plugged in/out */
   3151		flags |= TB_WAKE_ON_CONNECT | TB_WAKE_ON_DISCONNECT;
   3152		flags |= TB_WAKE_ON_USB4;
   3153		flags |= TB_WAKE_ON_USB3 | TB_WAKE_ON_PCIE | TB_WAKE_ON_DP;
   3154	} else if (device_may_wakeup(&sw->dev)) {
   3155		flags |= TB_WAKE_ON_USB4 | TB_WAKE_ON_USB3 | TB_WAKE_ON_PCIE;
   3156	}
   3157
   3158	tb_switch_set_wake(sw, flags);
   3159
   3160	if (tb_switch_is_usb4(sw))
   3161		usb4_switch_set_sleep(sw);
   3162	else
   3163		tb_lc_set_sleep(sw);
   3164}
   3165
   3166/**
   3167 * tb_switch_query_dp_resource() - Query availability of DP resource
   3168 * @sw: Switch whose DP resource is queried
   3169 * @in: DP IN port
   3170 *
   3171 * Queries availability of DP resource for DP tunneling using switch
   3172 * specific means. Returns %true if resource is available.
   3173 */
   3174bool tb_switch_query_dp_resource(struct tb_switch *sw, struct tb_port *in)
   3175{
   3176	if (tb_switch_is_usb4(sw))
   3177		return usb4_switch_query_dp_resource(sw, in);
   3178	return tb_lc_dp_sink_query(sw, in);
   3179}
   3180
   3181/**
   3182 * tb_switch_alloc_dp_resource() - Allocate available DP resource
   3183 * @sw: Switch whose DP resource is allocated
   3184 * @in: DP IN port
   3185 *
   3186 * Allocates DP resource for DP tunneling. The resource must be
   3187 * available for this to succeed (see tb_switch_query_dp_resource()).
   3188 * Returns %0 in success and negative errno otherwise.
   3189 */
   3190int tb_switch_alloc_dp_resource(struct tb_switch *sw, struct tb_port *in)
   3191{
   3192	int ret;
   3193
   3194	if (tb_switch_is_usb4(sw))
   3195		ret = usb4_switch_alloc_dp_resource(sw, in);
   3196	else
   3197		ret = tb_lc_dp_sink_alloc(sw, in);
   3198
   3199	if (ret)
   3200		tb_sw_warn(sw, "failed to allocate DP resource for port %d\n",
   3201			   in->port);
   3202	else
   3203		tb_sw_dbg(sw, "allocated DP resource for port %d\n", in->port);
   3204
   3205	return ret;
   3206}
   3207
   3208/**
   3209 * tb_switch_dealloc_dp_resource() - De-allocate DP resource
   3210 * @sw: Switch whose DP resource is de-allocated
   3211 * @in: DP IN port
   3212 *
   3213 * De-allocates DP resource that was previously allocated for DP
   3214 * tunneling.
   3215 */
   3216void tb_switch_dealloc_dp_resource(struct tb_switch *sw, struct tb_port *in)
   3217{
   3218	int ret;
   3219
   3220	if (tb_switch_is_usb4(sw))
   3221		ret = usb4_switch_dealloc_dp_resource(sw, in);
   3222	else
   3223		ret = tb_lc_dp_sink_dealloc(sw, in);
   3224
   3225	if (ret)
   3226		tb_sw_warn(sw, "failed to de-allocate DP resource for port %d\n",
   3227			   in->port);
   3228	else
   3229		tb_sw_dbg(sw, "released DP resource for port %d\n", in->port);
   3230}
   3231
   3232struct tb_sw_lookup {
   3233	struct tb *tb;
   3234	u8 link;
   3235	u8 depth;
   3236	const uuid_t *uuid;
   3237	u64 route;
   3238};
   3239
   3240static int tb_switch_match(struct device *dev, const void *data)
   3241{
   3242	struct tb_switch *sw = tb_to_switch(dev);
   3243	const struct tb_sw_lookup *lookup = data;
   3244
   3245	if (!sw)
   3246		return 0;
   3247	if (sw->tb != lookup->tb)
   3248		return 0;
   3249
   3250	if (lookup->uuid)
   3251		return !memcmp(sw->uuid, lookup->uuid, sizeof(*lookup->uuid));
   3252
   3253	if (lookup->route) {
   3254		return sw->config.route_lo == lower_32_bits(lookup->route) &&
   3255		       sw->config.route_hi == upper_32_bits(lookup->route);
   3256	}
   3257
   3258	/* Root switch is matched only by depth */
   3259	if (!lookup->depth)
   3260		return !sw->depth;
   3261
   3262	return sw->link == lookup->link && sw->depth == lookup->depth;
   3263}
   3264
   3265/**
   3266 * tb_switch_find_by_link_depth() - Find switch by link and depth
   3267 * @tb: Domain the switch belongs
   3268 * @link: Link number the switch is connected
   3269 * @depth: Depth of the switch in link
   3270 *
   3271 * Returned switch has reference count increased so the caller needs to
   3272 * call tb_switch_put() when done with the switch.
   3273 */
   3274struct tb_switch *tb_switch_find_by_link_depth(struct tb *tb, u8 link, u8 depth)
   3275{
   3276	struct tb_sw_lookup lookup;
   3277	struct device *dev;
   3278
   3279	memset(&lookup, 0, sizeof(lookup));
   3280	lookup.tb = tb;
   3281	lookup.link = link;
   3282	lookup.depth = depth;
   3283
   3284	dev = bus_find_device(&tb_bus_type, NULL, &lookup, tb_switch_match);
   3285	if (dev)
   3286		return tb_to_switch(dev);
   3287
   3288	return NULL;
   3289}
   3290
   3291/**
   3292 * tb_switch_find_by_uuid() - Find switch by UUID
   3293 * @tb: Domain the switch belongs
   3294 * @uuid: UUID to look for
   3295 *
   3296 * Returned switch has reference count increased so the caller needs to
   3297 * call tb_switch_put() when done with the switch.
   3298 */
   3299struct tb_switch *tb_switch_find_by_uuid(struct tb *tb, const uuid_t *uuid)
   3300{
   3301	struct tb_sw_lookup lookup;
   3302	struct device *dev;
   3303
   3304	memset(&lookup, 0, sizeof(lookup));
   3305	lookup.tb = tb;
   3306	lookup.uuid = uuid;
   3307
   3308	dev = bus_find_device(&tb_bus_type, NULL, &lookup, tb_switch_match);
   3309	if (dev)
   3310		return tb_to_switch(dev);
   3311
   3312	return NULL;
   3313}
   3314
   3315/**
   3316 * tb_switch_find_by_route() - Find switch by route string
   3317 * @tb: Domain the switch belongs
   3318 * @route: Route string to look for
   3319 *
   3320 * Returned switch has reference count increased so the caller needs to
   3321 * call tb_switch_put() when done with the switch.
   3322 */
   3323struct tb_switch *tb_switch_find_by_route(struct tb *tb, u64 route)
   3324{
   3325	struct tb_sw_lookup lookup;
   3326	struct device *dev;
   3327
   3328	if (!route)
   3329		return tb_switch_get(tb->root_switch);
   3330
   3331	memset(&lookup, 0, sizeof(lookup));
   3332	lookup.tb = tb;
   3333	lookup.route = route;
   3334
   3335	dev = bus_find_device(&tb_bus_type, NULL, &lookup, tb_switch_match);
   3336	if (dev)
   3337		return tb_to_switch(dev);
   3338
   3339	return NULL;
   3340}
   3341
   3342/**
   3343 * tb_switch_find_port() - return the first port of @type on @sw or NULL
   3344 * @sw: Switch to find the port from
   3345 * @type: Port type to look for
   3346 */
   3347struct tb_port *tb_switch_find_port(struct tb_switch *sw,
   3348				    enum tb_port_type type)
   3349{
   3350	struct tb_port *port;
   3351
   3352	tb_switch_for_each_port(sw, port) {
   3353		if (port->config.type == type)
   3354			return port;
   3355	}
   3356
   3357	return NULL;
   3358}
   3359
   3360static int __tb_port_pm_secondary_set(struct tb_port *port, bool secondary)
   3361{
   3362	u32 phy;
   3363	int ret;
   3364
   3365	ret = tb_port_read(port, &phy, TB_CFG_PORT,
   3366			   port->cap_phy + LANE_ADP_CS_1, 1);
   3367	if (ret)
   3368		return ret;
   3369
   3370	if (secondary)
   3371		phy |= LANE_ADP_CS_1_PMS;
   3372	else
   3373		phy &= ~LANE_ADP_CS_1_PMS;
   3374
   3375	return tb_port_write(port, &phy, TB_CFG_PORT,
   3376			     port->cap_phy + LANE_ADP_CS_1, 1);
   3377}
   3378
   3379static int tb_port_pm_secondary_enable(struct tb_port *port)
   3380{
   3381	return __tb_port_pm_secondary_set(port, true);
   3382}
   3383
   3384static int tb_port_pm_secondary_disable(struct tb_port *port)
   3385{
   3386	return __tb_port_pm_secondary_set(port, false);
   3387}
   3388
   3389static int tb_switch_pm_secondary_resolve(struct tb_switch *sw)
   3390{
   3391	struct tb_switch *parent = tb_switch_parent(sw);
   3392	struct tb_port *up, *down;
   3393	int ret;
   3394
   3395	if (!tb_route(sw))
   3396		return 0;
   3397
   3398	up = tb_upstream_port(sw);
   3399	down = tb_port_at(tb_route(sw), parent);
   3400	ret = tb_port_pm_secondary_enable(up);
   3401	if (ret)
   3402		return ret;
   3403
   3404	return tb_port_pm_secondary_disable(down);
   3405}
   3406
   3407/* Called for USB4 or Titan Ridge routers only */
   3408static bool tb_port_clx_supported(struct tb_port *port, enum tb_clx clx)
   3409{
   3410	u32 mask, val;
   3411	bool ret;
   3412
   3413	/* Don't enable CLx in case of two single-lane links */
   3414	if (!port->bonded && port->dual_link_port)
   3415		return false;
   3416
   3417	/* Don't enable CLx in case of inter-domain link */
   3418	if (port->xdomain)
   3419		return false;
   3420
   3421	if (tb_switch_is_usb4(port->sw)) {
   3422		if (!usb4_port_clx_supported(port))
   3423			return false;
   3424	} else if (!tb_lc_is_clx_supported(port)) {
   3425		return false;
   3426	}
   3427
   3428	switch (clx) {
   3429	case TB_CL0S:
   3430		/* CL0s support requires also CL1 support */
   3431		mask = LANE_ADP_CS_0_CL0S_SUPPORT | LANE_ADP_CS_0_CL1_SUPPORT;
   3432		break;
   3433
   3434	/* For now we support only CL0s. Not CL1, CL2 */
   3435	case TB_CL1:
   3436	case TB_CL2:
   3437	default:
   3438		return false;
   3439	}
   3440
   3441	ret = tb_port_read(port, &val, TB_CFG_PORT,
   3442			   port->cap_phy + LANE_ADP_CS_0, 1);
   3443	if (ret)
   3444		return false;
   3445
   3446	return !!(val & mask);
   3447}
   3448
   3449static inline bool tb_port_cl0s_supported(struct tb_port *port)
   3450{
   3451	return tb_port_clx_supported(port, TB_CL0S);
   3452}
   3453
   3454static int __tb_port_cl0s_set(struct tb_port *port, bool enable)
   3455{
   3456	u32 phy, mask;
   3457	int ret;
   3458
   3459	/* To enable CL0s also required to enable CL1 */
   3460	mask = LANE_ADP_CS_1_CL0S_ENABLE | LANE_ADP_CS_1_CL1_ENABLE;
   3461	ret = tb_port_read(port, &phy, TB_CFG_PORT,
   3462			   port->cap_phy + LANE_ADP_CS_1, 1);
   3463	if (ret)
   3464		return ret;
   3465
   3466	if (enable)
   3467		phy |= mask;
   3468	else
   3469		phy &= ~mask;
   3470
   3471	return tb_port_write(port, &phy, TB_CFG_PORT,
   3472			     port->cap_phy + LANE_ADP_CS_1, 1);
   3473}
   3474
   3475static int tb_port_cl0s_disable(struct tb_port *port)
   3476{
   3477	return __tb_port_cl0s_set(port, false);
   3478}
   3479
   3480static int tb_port_cl0s_enable(struct tb_port *port)
   3481{
   3482	return __tb_port_cl0s_set(port, true);
   3483}
   3484
   3485static int tb_switch_enable_cl0s(struct tb_switch *sw)
   3486{
   3487	struct tb_switch *parent = tb_switch_parent(sw);
   3488	bool up_cl0s_support, down_cl0s_support;
   3489	struct tb_port *up, *down;
   3490	int ret;
   3491
   3492	if (!tb_switch_is_clx_supported(sw))
   3493		return 0;
   3494
   3495	/*
   3496	 * Enable CLx for host router's downstream port as part of the
   3497	 * downstream router enabling procedure.
   3498	 */
   3499	if (!tb_route(sw))
   3500		return 0;
   3501
   3502	/* Enable CLx only for first hop router (depth = 1) */
   3503	if (tb_route(parent))
   3504		return 0;
   3505
   3506	ret = tb_switch_pm_secondary_resolve(sw);
   3507	if (ret)
   3508		return ret;
   3509
   3510	up = tb_upstream_port(sw);
   3511	down = tb_port_at(tb_route(sw), parent);
   3512
   3513	up_cl0s_support = tb_port_cl0s_supported(up);
   3514	down_cl0s_support = tb_port_cl0s_supported(down);
   3515
   3516	tb_port_dbg(up, "CL0s %ssupported\n",
   3517		    up_cl0s_support ? "" : "not ");
   3518	tb_port_dbg(down, "CL0s %ssupported\n",
   3519		    down_cl0s_support ? "" : "not ");
   3520
   3521	if (!up_cl0s_support || !down_cl0s_support)
   3522		return -EOPNOTSUPP;
   3523
   3524	ret = tb_port_cl0s_enable(up);
   3525	if (ret)
   3526		return ret;
   3527
   3528	ret = tb_port_cl0s_enable(down);
   3529	if (ret) {
   3530		tb_port_cl0s_disable(up);
   3531		return ret;
   3532	}
   3533
   3534	ret = tb_switch_mask_clx_objections(sw);
   3535	if (ret) {
   3536		tb_port_cl0s_disable(up);
   3537		tb_port_cl0s_disable(down);
   3538		return ret;
   3539	}
   3540
   3541	sw->clx = TB_CL0S;
   3542
   3543	tb_port_dbg(up, "CL0s enabled\n");
   3544	return 0;
   3545}
   3546
   3547/**
   3548 * tb_switch_enable_clx() - Enable CLx on upstream port of specified router
   3549 * @sw: Router to enable CLx for
   3550 * @clx: The CLx state to enable
   3551 *
   3552 * Enable CLx state only for first hop router. That is the most common
   3553 * use-case, that is intended for better thermal management, and so helps
   3554 * to improve performance. CLx is enabled only if both sides of the link
   3555 * support CLx, and if both sides of the link are not configured as two
   3556 * single lane links and only if the link is not inter-domain link. The
   3557 * complete set of conditions is descibed in CM Guide 1.0 section 8.1.
   3558 *
   3559 * Return: Returns 0 on success or an error code on failure.
   3560 */
   3561int tb_switch_enable_clx(struct tb_switch *sw, enum tb_clx clx)
   3562{
   3563	struct tb_switch *root_sw = sw->tb->root_switch;
   3564
   3565	if (!clx_enabled)
   3566		return 0;
   3567
   3568	/*
   3569	 * CLx is not enabled and validated on Intel USB4 platforms before
   3570	 * Alder Lake.
   3571	 */
   3572	if (root_sw->generation < 4 || tb_switch_is_tiger_lake(root_sw))
   3573		return 0;
   3574
   3575	switch (clx) {
   3576	case TB_CL0S:
   3577		return tb_switch_enable_cl0s(sw);
   3578
   3579	default:
   3580		return -EOPNOTSUPP;
   3581	}
   3582}
   3583
   3584static int tb_switch_disable_cl0s(struct tb_switch *sw)
   3585{
   3586	struct tb_switch *parent = tb_switch_parent(sw);
   3587	struct tb_port *up, *down;
   3588	int ret;
   3589
   3590	if (!tb_switch_is_clx_supported(sw))
   3591		return 0;
   3592
   3593	/*
   3594	 * Disable CLx for host router's downstream port as part of the
   3595	 * downstream router enabling procedure.
   3596	 */
   3597	if (!tb_route(sw))
   3598		return 0;
   3599
   3600	/* Disable CLx only for first hop router (depth = 1) */
   3601	if (tb_route(parent))
   3602		return 0;
   3603
   3604	up = tb_upstream_port(sw);
   3605	down = tb_port_at(tb_route(sw), parent);
   3606	ret = tb_port_cl0s_disable(up);
   3607	if (ret)
   3608		return ret;
   3609
   3610	ret = tb_port_cl0s_disable(down);
   3611	if (ret)
   3612		return ret;
   3613
   3614	sw->clx = TB_CLX_DISABLE;
   3615
   3616	tb_port_dbg(up, "CL0s disabled\n");
   3617	return 0;
   3618}
   3619
   3620/**
   3621 * tb_switch_disable_clx() - Disable CLx on upstream port of specified router
   3622 * @sw: Router to disable CLx for
   3623 * @clx: The CLx state to disable
   3624 *
   3625 * Return: Returns 0 on success or an error code on failure.
   3626 */
   3627int tb_switch_disable_clx(struct tb_switch *sw, enum tb_clx clx)
   3628{
   3629	if (!clx_enabled)
   3630		return 0;
   3631
   3632	switch (clx) {
   3633	case TB_CL0S:
   3634		return tb_switch_disable_cl0s(sw);
   3635
   3636	default:
   3637		return -EOPNOTSUPP;
   3638	}
   3639}
   3640
   3641/**
   3642 * tb_switch_mask_clx_objections() - Mask CLx objections for a router
   3643 * @sw: Router to mask objections for
   3644 *
   3645 * Mask the objections coming from the second depth routers in order to
   3646 * stop these objections from interfering with the CLx states of the first
   3647 * depth link.
   3648 */
   3649int tb_switch_mask_clx_objections(struct tb_switch *sw)
   3650{
   3651	int up_port = sw->config.upstream_port_number;
   3652	u32 offset, val[2], mask_obj, unmask_obj;
   3653	int ret, i;
   3654
   3655	/* Only Titan Ridge of pre-USB4 devices support CLx states */
   3656	if (!tb_switch_is_titan_ridge(sw))
   3657		return 0;
   3658
   3659	if (!tb_route(sw))
   3660		return 0;
   3661
   3662	/*
   3663	 * In Titan Ridge there are only 2 dual-lane Thunderbolt ports:
   3664	 * Port A consists of lane adapters 1,2 and
   3665	 * Port B consists of lane adapters 3,4
   3666	 * If upstream port is A, (lanes are 1,2), we mask objections from
   3667	 * port B (lanes 3,4) and unmask objections from Port A and vice-versa.
   3668	 */
   3669	if (up_port == 1) {
   3670		mask_obj = TB_LOW_PWR_C0_PORT_B_MASK;
   3671		unmask_obj = TB_LOW_PWR_C1_PORT_A_MASK;
   3672		offset = TB_LOW_PWR_C1_CL1;
   3673	} else {
   3674		mask_obj = TB_LOW_PWR_C1_PORT_A_MASK;
   3675		unmask_obj = TB_LOW_PWR_C0_PORT_B_MASK;
   3676		offset = TB_LOW_PWR_C3_CL1;
   3677	}
   3678
   3679	ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
   3680			 sw->cap_lp + offset, ARRAY_SIZE(val));
   3681	if (ret)
   3682		return ret;
   3683
   3684	for (i = 0; i < ARRAY_SIZE(val); i++) {
   3685		val[i] |= mask_obj;
   3686		val[i] &= ~unmask_obj;
   3687	}
   3688
   3689	return tb_sw_write(sw, &val, TB_CFG_SWITCH,
   3690			   sw->cap_lp + offset, ARRAY_SIZE(val));
   3691}
   3692
   3693/*
   3694 * Can be used for read/write a specified PCIe bridge for any Thunderbolt 3
   3695 * device. For now used only for Titan Ridge.
   3696 */
   3697static int tb_switch_pcie_bridge_write(struct tb_switch *sw, unsigned int bridge,
   3698				       unsigned int pcie_offset, u32 value)
   3699{
   3700	u32 offset, command, val;
   3701	int ret;
   3702
   3703	if (sw->generation != 3)
   3704		return -EOPNOTSUPP;
   3705
   3706	offset = sw->cap_plug_events + TB_PLUG_EVENTS_PCIE_WR_DATA;
   3707	ret = tb_sw_write(sw, &value, TB_CFG_SWITCH, offset, 1);
   3708	if (ret)
   3709		return ret;
   3710
   3711	command = pcie_offset & TB_PLUG_EVENTS_PCIE_CMD_DW_OFFSET_MASK;
   3712	command |= BIT(bridge + TB_PLUG_EVENTS_PCIE_CMD_BR_SHIFT);
   3713	command |= TB_PLUG_EVENTS_PCIE_CMD_RD_WR_MASK;
   3714	command |= TB_PLUG_EVENTS_PCIE_CMD_COMMAND_VAL
   3715			<< TB_PLUG_EVENTS_PCIE_CMD_COMMAND_SHIFT;
   3716	command |= TB_PLUG_EVENTS_PCIE_CMD_REQ_ACK_MASK;
   3717
   3718	offset = sw->cap_plug_events + TB_PLUG_EVENTS_PCIE_CMD;
   3719
   3720	ret = tb_sw_write(sw, &command, TB_CFG_SWITCH, offset, 1);
   3721	if (ret)
   3722		return ret;
   3723
   3724	ret = tb_switch_wait_for_bit(sw, offset,
   3725				     TB_PLUG_EVENTS_PCIE_CMD_REQ_ACK_MASK, 0, 100);
   3726	if (ret)
   3727		return ret;
   3728
   3729	ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, offset, 1);
   3730	if (ret)
   3731		return ret;
   3732
   3733	if (val & TB_PLUG_EVENTS_PCIE_CMD_TIMEOUT_MASK)
   3734		return -ETIMEDOUT;
   3735
   3736	return 0;
   3737}
   3738
   3739/**
   3740 * tb_switch_pcie_l1_enable() - Enable PCIe link to enter L1 state
   3741 * @sw: Router to enable PCIe L1
   3742 *
   3743 * For Titan Ridge switch to enter CLx state, its PCIe bridges shall enable
   3744 * entry to PCIe L1 state. Shall be called after the upstream PCIe tunnel
   3745 * was configured. Due to Intel platforms limitation, shall be called only
   3746 * for first hop switch.
   3747 */
   3748int tb_switch_pcie_l1_enable(struct tb_switch *sw)
   3749{
   3750	struct tb_switch *parent = tb_switch_parent(sw);
   3751	int ret;
   3752
   3753	if (!tb_route(sw))
   3754		return 0;
   3755
   3756	if (!tb_switch_is_titan_ridge(sw))
   3757		return 0;
   3758
   3759	/* Enable PCIe L1 enable only for first hop router (depth = 1) */
   3760	if (tb_route(parent))
   3761		return 0;
   3762
   3763	/* Write to downstream PCIe bridge #5 aka Dn4 */
   3764	ret = tb_switch_pcie_bridge_write(sw, 5, 0x143, 0x0c7806b1);
   3765	if (ret)
   3766		return ret;
   3767
   3768	/* Write to Upstream PCIe bridge #0 aka Up0 */
   3769	return tb_switch_pcie_bridge_write(sw, 0, 0x143, 0x0c5806b1);
   3770}
   3771
   3772/**
   3773 * tb_switch_xhci_connect() - Connect internal xHCI
   3774 * @sw: Router whose xHCI to connect
   3775 *
   3776 * Can be called to any router. For Alpine Ridge and Titan Ridge
   3777 * performs special flows that bring the xHCI functional for any device
   3778 * connected to the type-C port. Call only after PCIe tunnel has been
   3779 * established. The function only does the connect if not done already
   3780 * so can be called several times for the same router.
   3781 */
   3782int tb_switch_xhci_connect(struct tb_switch *sw)
   3783{
   3784	bool usb_port1, usb_port3, xhci_port1, xhci_port3;
   3785	struct tb_port *port1, *port3;
   3786	int ret;
   3787
   3788	port1 = &sw->ports[1];
   3789	port3 = &sw->ports[3];
   3790
   3791	if (tb_switch_is_alpine_ridge(sw)) {
   3792		usb_port1 = tb_lc_is_usb_plugged(port1);
   3793		usb_port3 = tb_lc_is_usb_plugged(port3);
   3794		xhci_port1 = tb_lc_is_xhci_connected(port1);
   3795		xhci_port3 = tb_lc_is_xhci_connected(port3);
   3796
   3797		/* Figure out correct USB port to connect */
   3798		if (usb_port1 && !xhci_port1) {
   3799			ret = tb_lc_xhci_connect(port1);
   3800			if (ret)
   3801				return ret;
   3802		}
   3803		if (usb_port3 && !xhci_port3)
   3804			return tb_lc_xhci_connect(port3);
   3805	} else if (tb_switch_is_titan_ridge(sw)) {
   3806		ret = tb_lc_xhci_connect(port1);
   3807		if (ret)
   3808			return ret;
   3809		return tb_lc_xhci_connect(port3);
   3810	}
   3811
   3812	return 0;
   3813}
   3814
   3815/**
   3816 * tb_switch_xhci_disconnect() - Disconnect internal xHCI
   3817 * @sw: Router whose xHCI to disconnect
   3818 *
   3819 * The opposite of tb_switch_xhci_connect(). Disconnects xHCI on both
   3820 * ports.
   3821 */
   3822void tb_switch_xhci_disconnect(struct tb_switch *sw)
   3823{
   3824	if (sw->generation == 3) {
   3825		struct tb_port *port1 = &sw->ports[1];
   3826		struct tb_port *port3 = &sw->ports[3];
   3827
   3828		tb_lc_xhci_disconnect(port1);
   3829		tb_port_dbg(port1, "disconnected xHCI\n");
   3830		tb_lc_xhci_disconnect(port3);
   3831		tb_port_dbg(port3, "disconnected xHCI\n");
   3832	}
   3833}