cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

tunnel.c (45313B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * Thunderbolt driver - Tunneling support
      4 *
      5 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
      6 * Copyright (C) 2019, Intel Corporation
      7 */
      8
      9#include <linux/delay.h>
     10#include <linux/slab.h>
     11#include <linux/list.h>
     12
     13#include "tunnel.h"
     14#include "tb.h"
     15
     16/* PCIe adapters use always HopID of 8 for both directions */
     17#define TB_PCI_HOPID			8
     18
     19#define TB_PCI_PATH_DOWN		0
     20#define TB_PCI_PATH_UP			1
     21
     22/* USB3 adapters use always HopID of 8 for both directions */
     23#define TB_USB3_HOPID			8
     24
     25#define TB_USB3_PATH_DOWN		0
     26#define TB_USB3_PATH_UP			1
     27
     28/* DP adapters use HopID 8 for AUX and 9 for Video */
     29#define TB_DP_AUX_TX_HOPID		8
     30#define TB_DP_AUX_RX_HOPID		8
     31#define TB_DP_VIDEO_HOPID		9
     32
     33#define TB_DP_VIDEO_PATH_OUT		0
     34#define TB_DP_AUX_PATH_OUT		1
     35#define TB_DP_AUX_PATH_IN		2
     36
     37/* Minimum number of credits needed for PCIe path */
     38#define TB_MIN_PCIE_CREDITS		6U
     39/*
     40 * Number of credits we try to allocate for each DMA path if not limited
     41 * by the host router baMaxHI.
     42 */
     43#define TB_DMA_CREDITS			14U
     44/* Minimum number of credits for DMA path */
     45#define TB_MIN_DMA_CREDITS		1U
     46
     47static const char * const tb_tunnel_names[] = { "PCI", "DP", "DMA", "USB3" };
     48
     49#define __TB_TUNNEL_PRINT(level, tunnel, fmt, arg...)                   \
     50	do {                                                            \
     51		struct tb_tunnel *__tunnel = (tunnel);                  \
     52		level(__tunnel->tb, "%llx:%x <-> %llx:%x (%s): " fmt,   \
     53		      tb_route(__tunnel->src_port->sw),                 \
     54		      __tunnel->src_port->port,                         \
     55		      tb_route(__tunnel->dst_port->sw),                 \
     56		      __tunnel->dst_port->port,                         \
     57		      tb_tunnel_names[__tunnel->type],			\
     58		      ## arg);                                          \
     59	} while (0)
     60
     61#define tb_tunnel_WARN(tunnel, fmt, arg...) \
     62	__TB_TUNNEL_PRINT(tb_WARN, tunnel, fmt, ##arg)
     63#define tb_tunnel_warn(tunnel, fmt, arg...) \
     64	__TB_TUNNEL_PRINT(tb_warn, tunnel, fmt, ##arg)
     65#define tb_tunnel_info(tunnel, fmt, arg...) \
     66	__TB_TUNNEL_PRINT(tb_info, tunnel, fmt, ##arg)
     67#define tb_tunnel_dbg(tunnel, fmt, arg...) \
     68	__TB_TUNNEL_PRINT(tb_dbg, tunnel, fmt, ##arg)
     69
     70static inline unsigned int tb_usable_credits(const struct tb_port *port)
     71{
     72	return port->total_credits - port->ctl_credits;
     73}
     74
     75/**
     76 * tb_available_credits() - Available credits for PCIe and DMA
     77 * @port: Lane adapter to check
     78 * @max_dp_streams: If non-%NULL stores maximum number of simultaneous DP
     79 *		    streams possible through this lane adapter
     80 */
     81static unsigned int tb_available_credits(const struct tb_port *port,
     82					 size_t *max_dp_streams)
     83{
     84	const struct tb_switch *sw = port->sw;
     85	int credits, usb3, pcie, spare;
     86	size_t ndp;
     87
     88	usb3 = tb_acpi_may_tunnel_usb3() ? sw->max_usb3_credits : 0;
     89	pcie = tb_acpi_may_tunnel_pcie() ? sw->max_pcie_credits : 0;
     90
     91	if (tb_acpi_is_xdomain_allowed()) {
     92		spare = min_not_zero(sw->max_dma_credits, TB_DMA_CREDITS);
     93		/* Add some credits for potential second DMA tunnel */
     94		spare += TB_MIN_DMA_CREDITS;
     95	} else {
     96		spare = 0;
     97	}
     98
     99	credits = tb_usable_credits(port);
    100	if (tb_acpi_may_tunnel_dp()) {
    101		/*
    102		 * Maximum number of DP streams possible through the
    103		 * lane adapter.
    104		 */
    105		if (sw->min_dp_aux_credits + sw->min_dp_main_credits)
    106			ndp = (credits - (usb3 + pcie + spare)) /
    107			      (sw->min_dp_aux_credits + sw->min_dp_main_credits);
    108		else
    109			ndp = 0;
    110	} else {
    111		ndp = 0;
    112	}
    113	credits -= ndp * (sw->min_dp_aux_credits + sw->min_dp_main_credits);
    114	credits -= usb3;
    115
    116	if (max_dp_streams)
    117		*max_dp_streams = ndp;
    118
    119	return credits > 0 ? credits : 0;
    120}
    121
    122static struct tb_tunnel *tb_tunnel_alloc(struct tb *tb, size_t npaths,
    123					 enum tb_tunnel_type type)
    124{
    125	struct tb_tunnel *tunnel;
    126
    127	tunnel = kzalloc(sizeof(*tunnel), GFP_KERNEL);
    128	if (!tunnel)
    129		return NULL;
    130
    131	tunnel->paths = kcalloc(npaths, sizeof(tunnel->paths[0]), GFP_KERNEL);
    132	if (!tunnel->paths) {
    133		tb_tunnel_free(tunnel);
    134		return NULL;
    135	}
    136
    137	INIT_LIST_HEAD(&tunnel->list);
    138	tunnel->tb = tb;
    139	tunnel->npaths = npaths;
    140	tunnel->type = type;
    141
    142	return tunnel;
    143}
    144
    145static int tb_pci_activate(struct tb_tunnel *tunnel, bool activate)
    146{
    147	int res;
    148
    149	res = tb_pci_port_enable(tunnel->src_port, activate);
    150	if (res)
    151		return res;
    152
    153	if (tb_port_is_pcie_up(tunnel->dst_port))
    154		return tb_pci_port_enable(tunnel->dst_port, activate);
    155
    156	return 0;
    157}
    158
    159static int tb_pci_init_credits(struct tb_path_hop *hop)
    160{
    161	struct tb_port *port = hop->in_port;
    162	struct tb_switch *sw = port->sw;
    163	unsigned int credits;
    164
    165	if (tb_port_use_credit_allocation(port)) {
    166		unsigned int available;
    167
    168		available = tb_available_credits(port, NULL);
    169		credits = min(sw->max_pcie_credits, available);
    170
    171		if (credits < TB_MIN_PCIE_CREDITS)
    172			return -ENOSPC;
    173
    174		credits = max(TB_MIN_PCIE_CREDITS, credits);
    175	} else {
    176		if (tb_port_is_null(port))
    177			credits = port->bonded ? 32 : 16;
    178		else
    179			credits = 7;
    180	}
    181
    182	hop->initial_credits = credits;
    183	return 0;
    184}
    185
    186static int tb_pci_init_path(struct tb_path *path)
    187{
    188	struct tb_path_hop *hop;
    189
    190	path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
    191	path->egress_shared_buffer = TB_PATH_NONE;
    192	path->ingress_fc_enable = TB_PATH_ALL;
    193	path->ingress_shared_buffer = TB_PATH_NONE;
    194	path->priority = 3;
    195	path->weight = 1;
    196	path->drop_packages = 0;
    197
    198	tb_path_for_each_hop(path, hop) {
    199		int ret;
    200
    201		ret = tb_pci_init_credits(hop);
    202		if (ret)
    203			return ret;
    204	}
    205
    206	return 0;
    207}
    208
    209/**
    210 * tb_tunnel_discover_pci() - Discover existing PCIe tunnels
    211 * @tb: Pointer to the domain structure
    212 * @down: PCIe downstream adapter
    213 * @alloc_hopid: Allocate HopIDs from visited ports
    214 *
    215 * If @down adapter is active, follows the tunnel to the PCIe upstream
    216 * adapter and back. Returns the discovered tunnel or %NULL if there was
    217 * no tunnel.
    218 */
    219struct tb_tunnel *tb_tunnel_discover_pci(struct tb *tb, struct tb_port *down,
    220					 bool alloc_hopid)
    221{
    222	struct tb_tunnel *tunnel;
    223	struct tb_path *path;
    224
    225	if (!tb_pci_port_is_enabled(down))
    226		return NULL;
    227
    228	tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_PCI);
    229	if (!tunnel)
    230		return NULL;
    231
    232	tunnel->activate = tb_pci_activate;
    233	tunnel->src_port = down;
    234
    235	/*
    236	 * Discover both paths even if they are not complete. We will
    237	 * clean them up by calling tb_tunnel_deactivate() below in that
    238	 * case.
    239	 */
    240	path = tb_path_discover(down, TB_PCI_HOPID, NULL, -1,
    241				&tunnel->dst_port, "PCIe Up", alloc_hopid);
    242	if (!path) {
    243		/* Just disable the downstream port */
    244		tb_pci_port_enable(down, false);
    245		goto err_free;
    246	}
    247	tunnel->paths[TB_PCI_PATH_UP] = path;
    248	if (tb_pci_init_path(tunnel->paths[TB_PCI_PATH_UP]))
    249		goto err_free;
    250
    251	path = tb_path_discover(tunnel->dst_port, -1, down, TB_PCI_HOPID, NULL,
    252				"PCIe Down", alloc_hopid);
    253	if (!path)
    254		goto err_deactivate;
    255	tunnel->paths[TB_PCI_PATH_DOWN] = path;
    256	if (tb_pci_init_path(tunnel->paths[TB_PCI_PATH_DOWN]))
    257		goto err_deactivate;
    258
    259	/* Validate that the tunnel is complete */
    260	if (!tb_port_is_pcie_up(tunnel->dst_port)) {
    261		tb_port_warn(tunnel->dst_port,
    262			     "path does not end on a PCIe adapter, cleaning up\n");
    263		goto err_deactivate;
    264	}
    265
    266	if (down != tunnel->src_port) {
    267		tb_tunnel_warn(tunnel, "path is not complete, cleaning up\n");
    268		goto err_deactivate;
    269	}
    270
    271	if (!tb_pci_port_is_enabled(tunnel->dst_port)) {
    272		tb_tunnel_warn(tunnel,
    273			       "tunnel is not fully activated, cleaning up\n");
    274		goto err_deactivate;
    275	}
    276
    277	tb_tunnel_dbg(tunnel, "discovered\n");
    278	return tunnel;
    279
    280err_deactivate:
    281	tb_tunnel_deactivate(tunnel);
    282err_free:
    283	tb_tunnel_free(tunnel);
    284
    285	return NULL;
    286}
    287
    288/**
    289 * tb_tunnel_alloc_pci() - allocate a pci tunnel
    290 * @tb: Pointer to the domain structure
    291 * @up: PCIe upstream adapter port
    292 * @down: PCIe downstream adapter port
    293 *
    294 * Allocate a PCI tunnel. The ports must be of type TB_TYPE_PCIE_UP and
    295 * TB_TYPE_PCIE_DOWN.
    296 *
    297 * Return: Returns a tb_tunnel on success or NULL on failure.
    298 */
    299struct tb_tunnel *tb_tunnel_alloc_pci(struct tb *tb, struct tb_port *up,
    300				      struct tb_port *down)
    301{
    302	struct tb_tunnel *tunnel;
    303	struct tb_path *path;
    304
    305	tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_PCI);
    306	if (!tunnel)
    307		return NULL;
    308
    309	tunnel->activate = tb_pci_activate;
    310	tunnel->src_port = down;
    311	tunnel->dst_port = up;
    312
    313	path = tb_path_alloc(tb, down, TB_PCI_HOPID, up, TB_PCI_HOPID, 0,
    314			     "PCIe Down");
    315	if (!path)
    316		goto err_free;
    317	tunnel->paths[TB_PCI_PATH_DOWN] = path;
    318	if (tb_pci_init_path(path))
    319		goto err_free;
    320
    321	path = tb_path_alloc(tb, up, TB_PCI_HOPID, down, TB_PCI_HOPID, 0,
    322			     "PCIe Up");
    323	if (!path)
    324		goto err_free;
    325	tunnel->paths[TB_PCI_PATH_UP] = path;
    326	if (tb_pci_init_path(path))
    327		goto err_free;
    328
    329	return tunnel;
    330
    331err_free:
    332	tb_tunnel_free(tunnel);
    333	return NULL;
    334}
    335
    336static bool tb_dp_is_usb4(const struct tb_switch *sw)
    337{
    338	/* Titan Ridge DP adapters need the same treatment as USB4 */
    339	return tb_switch_is_usb4(sw) || tb_switch_is_titan_ridge(sw);
    340}
    341
    342static int tb_dp_cm_handshake(struct tb_port *in, struct tb_port *out)
    343{
    344	int timeout = 10;
    345	u32 val;
    346	int ret;
    347
    348	/* Both ends need to support this */
    349	if (!tb_dp_is_usb4(in->sw) || !tb_dp_is_usb4(out->sw))
    350		return 0;
    351
    352	ret = tb_port_read(out, &val, TB_CFG_PORT,
    353			   out->cap_adap + DP_STATUS_CTRL, 1);
    354	if (ret)
    355		return ret;
    356
    357	val |= DP_STATUS_CTRL_UF | DP_STATUS_CTRL_CMHS;
    358
    359	ret = tb_port_write(out, &val, TB_CFG_PORT,
    360			    out->cap_adap + DP_STATUS_CTRL, 1);
    361	if (ret)
    362		return ret;
    363
    364	do {
    365		ret = tb_port_read(out, &val, TB_CFG_PORT,
    366				   out->cap_adap + DP_STATUS_CTRL, 1);
    367		if (ret)
    368			return ret;
    369		if (!(val & DP_STATUS_CTRL_CMHS))
    370			return 0;
    371		usleep_range(10, 100);
    372	} while (timeout--);
    373
    374	return -ETIMEDOUT;
    375}
    376
    377static inline u32 tb_dp_cap_get_rate(u32 val)
    378{
    379	u32 rate = (val & DP_COMMON_CAP_RATE_MASK) >> DP_COMMON_CAP_RATE_SHIFT;
    380
    381	switch (rate) {
    382	case DP_COMMON_CAP_RATE_RBR:
    383		return 1620;
    384	case DP_COMMON_CAP_RATE_HBR:
    385		return 2700;
    386	case DP_COMMON_CAP_RATE_HBR2:
    387		return 5400;
    388	case DP_COMMON_CAP_RATE_HBR3:
    389		return 8100;
    390	default:
    391		return 0;
    392	}
    393}
    394
    395static inline u32 tb_dp_cap_set_rate(u32 val, u32 rate)
    396{
    397	val &= ~DP_COMMON_CAP_RATE_MASK;
    398	switch (rate) {
    399	default:
    400		WARN(1, "invalid rate %u passed, defaulting to 1620 MB/s\n", rate);
    401		fallthrough;
    402	case 1620:
    403		val |= DP_COMMON_CAP_RATE_RBR << DP_COMMON_CAP_RATE_SHIFT;
    404		break;
    405	case 2700:
    406		val |= DP_COMMON_CAP_RATE_HBR << DP_COMMON_CAP_RATE_SHIFT;
    407		break;
    408	case 5400:
    409		val |= DP_COMMON_CAP_RATE_HBR2 << DP_COMMON_CAP_RATE_SHIFT;
    410		break;
    411	case 8100:
    412		val |= DP_COMMON_CAP_RATE_HBR3 << DP_COMMON_CAP_RATE_SHIFT;
    413		break;
    414	}
    415	return val;
    416}
    417
    418static inline u32 tb_dp_cap_get_lanes(u32 val)
    419{
    420	u32 lanes = (val & DP_COMMON_CAP_LANES_MASK) >> DP_COMMON_CAP_LANES_SHIFT;
    421
    422	switch (lanes) {
    423	case DP_COMMON_CAP_1_LANE:
    424		return 1;
    425	case DP_COMMON_CAP_2_LANES:
    426		return 2;
    427	case DP_COMMON_CAP_4_LANES:
    428		return 4;
    429	default:
    430		return 0;
    431	}
    432}
    433
    434static inline u32 tb_dp_cap_set_lanes(u32 val, u32 lanes)
    435{
    436	val &= ~DP_COMMON_CAP_LANES_MASK;
    437	switch (lanes) {
    438	default:
    439		WARN(1, "invalid number of lanes %u passed, defaulting to 1\n",
    440		     lanes);
    441		fallthrough;
    442	case 1:
    443		val |= DP_COMMON_CAP_1_LANE << DP_COMMON_CAP_LANES_SHIFT;
    444		break;
    445	case 2:
    446		val |= DP_COMMON_CAP_2_LANES << DP_COMMON_CAP_LANES_SHIFT;
    447		break;
    448	case 4:
    449		val |= DP_COMMON_CAP_4_LANES << DP_COMMON_CAP_LANES_SHIFT;
    450		break;
    451	}
    452	return val;
    453}
    454
    455static unsigned int tb_dp_bandwidth(unsigned int rate, unsigned int lanes)
    456{
    457	/* Tunneling removes the DP 8b/10b encoding */
    458	return rate * lanes * 8 / 10;
    459}
    460
    461static int tb_dp_reduce_bandwidth(int max_bw, u32 in_rate, u32 in_lanes,
    462				  u32 out_rate, u32 out_lanes, u32 *new_rate,
    463				  u32 *new_lanes)
    464{
    465	static const u32 dp_bw[][2] = {
    466		/* Mb/s, lanes */
    467		{ 8100, 4 }, /* 25920 Mb/s */
    468		{ 5400, 4 }, /* 17280 Mb/s */
    469		{ 8100, 2 }, /* 12960 Mb/s */
    470		{ 2700, 4 }, /* 8640 Mb/s */
    471		{ 5400, 2 }, /* 8640 Mb/s */
    472		{ 8100, 1 }, /* 6480 Mb/s */
    473		{ 1620, 4 }, /* 5184 Mb/s */
    474		{ 5400, 1 }, /* 4320 Mb/s */
    475		{ 2700, 2 }, /* 4320 Mb/s */
    476		{ 1620, 2 }, /* 2592 Mb/s */
    477		{ 2700, 1 }, /* 2160 Mb/s */
    478		{ 1620, 1 }, /* 1296 Mb/s */
    479	};
    480	unsigned int i;
    481
    482	/*
    483	 * Find a combination that can fit into max_bw and does not
    484	 * exceed the maximum rate and lanes supported by the DP OUT and
    485	 * DP IN adapters.
    486	 */
    487	for (i = 0; i < ARRAY_SIZE(dp_bw); i++) {
    488		if (dp_bw[i][0] > out_rate || dp_bw[i][1] > out_lanes)
    489			continue;
    490
    491		if (dp_bw[i][0] > in_rate || dp_bw[i][1] > in_lanes)
    492			continue;
    493
    494		if (tb_dp_bandwidth(dp_bw[i][0], dp_bw[i][1]) <= max_bw) {
    495			*new_rate = dp_bw[i][0];
    496			*new_lanes = dp_bw[i][1];
    497			return 0;
    498		}
    499	}
    500
    501	return -ENOSR;
    502}
    503
    504static int tb_dp_xchg_caps(struct tb_tunnel *tunnel)
    505{
    506	u32 out_dp_cap, out_rate, out_lanes, in_dp_cap, in_rate, in_lanes, bw;
    507	struct tb_port *out = tunnel->dst_port;
    508	struct tb_port *in = tunnel->src_port;
    509	int ret, max_bw;
    510
    511	/*
    512	 * Copy DP_LOCAL_CAP register to DP_REMOTE_CAP register for
    513	 * newer generation hardware.
    514	 */
    515	if (in->sw->generation < 2 || out->sw->generation < 2)
    516		return 0;
    517
    518	/*
    519	 * Perform connection manager handshake between IN and OUT ports
    520	 * before capabilities exchange can take place.
    521	 */
    522	ret = tb_dp_cm_handshake(in, out);
    523	if (ret)
    524		return ret;
    525
    526	/* Read both DP_LOCAL_CAP registers */
    527	ret = tb_port_read(in, &in_dp_cap, TB_CFG_PORT,
    528			   in->cap_adap + DP_LOCAL_CAP, 1);
    529	if (ret)
    530		return ret;
    531
    532	ret = tb_port_read(out, &out_dp_cap, TB_CFG_PORT,
    533			   out->cap_adap + DP_LOCAL_CAP, 1);
    534	if (ret)
    535		return ret;
    536
    537	/* Write IN local caps to OUT remote caps */
    538	ret = tb_port_write(out, &in_dp_cap, TB_CFG_PORT,
    539			    out->cap_adap + DP_REMOTE_CAP, 1);
    540	if (ret)
    541		return ret;
    542
    543	in_rate = tb_dp_cap_get_rate(in_dp_cap);
    544	in_lanes = tb_dp_cap_get_lanes(in_dp_cap);
    545	tb_port_dbg(in, "maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
    546		    in_rate, in_lanes, tb_dp_bandwidth(in_rate, in_lanes));
    547
    548	/*
    549	 * If the tunnel bandwidth is limited (max_bw is set) then see
    550	 * if we need to reduce bandwidth to fit there.
    551	 */
    552	out_rate = tb_dp_cap_get_rate(out_dp_cap);
    553	out_lanes = tb_dp_cap_get_lanes(out_dp_cap);
    554	bw = tb_dp_bandwidth(out_rate, out_lanes);
    555	tb_port_dbg(out, "maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
    556		    out_rate, out_lanes, bw);
    557
    558	if (in->sw->config.depth < out->sw->config.depth)
    559		max_bw = tunnel->max_down;
    560	else
    561		max_bw = tunnel->max_up;
    562
    563	if (max_bw && bw > max_bw) {
    564		u32 new_rate, new_lanes, new_bw;
    565
    566		ret = tb_dp_reduce_bandwidth(max_bw, in_rate, in_lanes,
    567					     out_rate, out_lanes, &new_rate,
    568					     &new_lanes);
    569		if (ret) {
    570			tb_port_info(out, "not enough bandwidth for DP tunnel\n");
    571			return ret;
    572		}
    573
    574		new_bw = tb_dp_bandwidth(new_rate, new_lanes);
    575		tb_port_dbg(out, "bandwidth reduced to %u Mb/s x%u = %u Mb/s\n",
    576			    new_rate, new_lanes, new_bw);
    577
    578		/*
    579		 * Set new rate and number of lanes before writing it to
    580		 * the IN port remote caps.
    581		 */
    582		out_dp_cap = tb_dp_cap_set_rate(out_dp_cap, new_rate);
    583		out_dp_cap = tb_dp_cap_set_lanes(out_dp_cap, new_lanes);
    584	}
    585
    586	/*
    587	 * Titan Ridge does not disable AUX timers when it gets
    588	 * SET_CONFIG with SET_LTTPR_MODE set. This causes problems with
    589	 * DP tunneling.
    590	 */
    591	if (tb_route(out->sw) && tb_switch_is_titan_ridge(out->sw)) {
    592		out_dp_cap |= DP_COMMON_CAP_LTTPR_NS;
    593		tb_port_dbg(out, "disabling LTTPR\n");
    594	}
    595
    596	return tb_port_write(in, &out_dp_cap, TB_CFG_PORT,
    597			     in->cap_adap + DP_REMOTE_CAP, 1);
    598}
    599
    600static int tb_dp_activate(struct tb_tunnel *tunnel, bool active)
    601{
    602	int ret;
    603
    604	if (active) {
    605		struct tb_path **paths;
    606		int last;
    607
    608		paths = tunnel->paths;
    609		last = paths[TB_DP_VIDEO_PATH_OUT]->path_length - 1;
    610
    611		tb_dp_port_set_hops(tunnel->src_port,
    612			paths[TB_DP_VIDEO_PATH_OUT]->hops[0].in_hop_index,
    613			paths[TB_DP_AUX_PATH_OUT]->hops[0].in_hop_index,
    614			paths[TB_DP_AUX_PATH_IN]->hops[last].next_hop_index);
    615
    616		tb_dp_port_set_hops(tunnel->dst_port,
    617			paths[TB_DP_VIDEO_PATH_OUT]->hops[last].next_hop_index,
    618			paths[TB_DP_AUX_PATH_IN]->hops[0].in_hop_index,
    619			paths[TB_DP_AUX_PATH_OUT]->hops[last].next_hop_index);
    620	} else {
    621		tb_dp_port_hpd_clear(tunnel->src_port);
    622		tb_dp_port_set_hops(tunnel->src_port, 0, 0, 0);
    623		if (tb_port_is_dpout(tunnel->dst_port))
    624			tb_dp_port_set_hops(tunnel->dst_port, 0, 0, 0);
    625	}
    626
    627	ret = tb_dp_port_enable(tunnel->src_port, active);
    628	if (ret)
    629		return ret;
    630
    631	if (tb_port_is_dpout(tunnel->dst_port))
    632		return tb_dp_port_enable(tunnel->dst_port, active);
    633
    634	return 0;
    635}
    636
    637static int tb_dp_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up,
    638				    int *consumed_down)
    639{
    640	struct tb_port *in = tunnel->src_port;
    641	const struct tb_switch *sw = in->sw;
    642	u32 val, rate = 0, lanes = 0;
    643	int ret;
    644
    645	if (tb_dp_is_usb4(sw)) {
    646		int timeout = 20;
    647
    648		/*
    649		 * Wait for DPRX done. Normally it should be already set
    650		 * for active tunnel.
    651		 */
    652		do {
    653			ret = tb_port_read(in, &val, TB_CFG_PORT,
    654					   in->cap_adap + DP_COMMON_CAP, 1);
    655			if (ret)
    656				return ret;
    657
    658			if (val & DP_COMMON_CAP_DPRX_DONE) {
    659				rate = tb_dp_cap_get_rate(val);
    660				lanes = tb_dp_cap_get_lanes(val);
    661				break;
    662			}
    663			msleep(250);
    664		} while (timeout--);
    665
    666		if (!timeout)
    667			return -ETIMEDOUT;
    668	} else if (sw->generation >= 2) {
    669		/*
    670		 * Read from the copied remote cap so that we take into
    671		 * account if capabilities were reduced during exchange.
    672		 */
    673		ret = tb_port_read(in, &val, TB_CFG_PORT,
    674				   in->cap_adap + DP_REMOTE_CAP, 1);
    675		if (ret)
    676			return ret;
    677
    678		rate = tb_dp_cap_get_rate(val);
    679		lanes = tb_dp_cap_get_lanes(val);
    680	} else {
    681		/* No bandwidth management for legacy devices  */
    682		*consumed_up = 0;
    683		*consumed_down = 0;
    684		return 0;
    685	}
    686
    687	if (in->sw->config.depth < tunnel->dst_port->sw->config.depth) {
    688		*consumed_up = 0;
    689		*consumed_down = tb_dp_bandwidth(rate, lanes);
    690	} else {
    691		*consumed_up = tb_dp_bandwidth(rate, lanes);
    692		*consumed_down = 0;
    693	}
    694
    695	return 0;
    696}
    697
    698static void tb_dp_init_aux_credits(struct tb_path_hop *hop)
    699{
    700	struct tb_port *port = hop->in_port;
    701	struct tb_switch *sw = port->sw;
    702
    703	if (tb_port_use_credit_allocation(port))
    704		hop->initial_credits = sw->min_dp_aux_credits;
    705	else
    706		hop->initial_credits = 1;
    707}
    708
    709static void tb_dp_init_aux_path(struct tb_path *path)
    710{
    711	struct tb_path_hop *hop;
    712
    713	path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
    714	path->egress_shared_buffer = TB_PATH_NONE;
    715	path->ingress_fc_enable = TB_PATH_ALL;
    716	path->ingress_shared_buffer = TB_PATH_NONE;
    717	path->priority = 2;
    718	path->weight = 1;
    719
    720	tb_path_for_each_hop(path, hop)
    721		tb_dp_init_aux_credits(hop);
    722}
    723
    724static int tb_dp_init_video_credits(struct tb_path_hop *hop)
    725{
    726	struct tb_port *port = hop->in_port;
    727	struct tb_switch *sw = port->sw;
    728
    729	if (tb_port_use_credit_allocation(port)) {
    730		unsigned int nfc_credits;
    731		size_t max_dp_streams;
    732
    733		tb_available_credits(port, &max_dp_streams);
    734		/*
    735		 * Read the number of currently allocated NFC credits
    736		 * from the lane adapter. Since we only use them for DP
    737		 * tunneling we can use that to figure out how many DP
    738		 * tunnels already go through the lane adapter.
    739		 */
    740		nfc_credits = port->config.nfc_credits &
    741				ADP_CS_4_NFC_BUFFERS_MASK;
    742		if (nfc_credits / sw->min_dp_main_credits > max_dp_streams)
    743			return -ENOSPC;
    744
    745		hop->nfc_credits = sw->min_dp_main_credits;
    746	} else {
    747		hop->nfc_credits = min(port->total_credits - 2, 12U);
    748	}
    749
    750	return 0;
    751}
    752
    753static int tb_dp_init_video_path(struct tb_path *path)
    754{
    755	struct tb_path_hop *hop;
    756
    757	path->egress_fc_enable = TB_PATH_NONE;
    758	path->egress_shared_buffer = TB_PATH_NONE;
    759	path->ingress_fc_enable = TB_PATH_NONE;
    760	path->ingress_shared_buffer = TB_PATH_NONE;
    761	path->priority = 1;
    762	path->weight = 1;
    763
    764	tb_path_for_each_hop(path, hop) {
    765		int ret;
    766
    767		ret = tb_dp_init_video_credits(hop);
    768		if (ret)
    769			return ret;
    770	}
    771
    772	return 0;
    773}
    774
    775/**
    776 * tb_tunnel_discover_dp() - Discover existing Display Port tunnels
    777 * @tb: Pointer to the domain structure
    778 * @in: DP in adapter
    779 * @alloc_hopid: Allocate HopIDs from visited ports
    780 *
    781 * If @in adapter is active, follows the tunnel to the DP out adapter
    782 * and back. Returns the discovered tunnel or %NULL if there was no
    783 * tunnel.
    784 *
    785 * Return: DP tunnel or %NULL if no tunnel found.
    786 */
    787struct tb_tunnel *tb_tunnel_discover_dp(struct tb *tb, struct tb_port *in,
    788					bool alloc_hopid)
    789{
    790	struct tb_tunnel *tunnel;
    791	struct tb_port *port;
    792	struct tb_path *path;
    793
    794	if (!tb_dp_port_is_enabled(in))
    795		return NULL;
    796
    797	tunnel = tb_tunnel_alloc(tb, 3, TB_TUNNEL_DP);
    798	if (!tunnel)
    799		return NULL;
    800
    801	tunnel->init = tb_dp_xchg_caps;
    802	tunnel->activate = tb_dp_activate;
    803	tunnel->consumed_bandwidth = tb_dp_consumed_bandwidth;
    804	tunnel->src_port = in;
    805
    806	path = tb_path_discover(in, TB_DP_VIDEO_HOPID, NULL, -1,
    807				&tunnel->dst_port, "Video", alloc_hopid);
    808	if (!path) {
    809		/* Just disable the DP IN port */
    810		tb_dp_port_enable(in, false);
    811		goto err_free;
    812	}
    813	tunnel->paths[TB_DP_VIDEO_PATH_OUT] = path;
    814	if (tb_dp_init_video_path(tunnel->paths[TB_DP_VIDEO_PATH_OUT]))
    815		goto err_free;
    816
    817	path = tb_path_discover(in, TB_DP_AUX_TX_HOPID, NULL, -1, NULL, "AUX TX",
    818				alloc_hopid);
    819	if (!path)
    820		goto err_deactivate;
    821	tunnel->paths[TB_DP_AUX_PATH_OUT] = path;
    822	tb_dp_init_aux_path(tunnel->paths[TB_DP_AUX_PATH_OUT]);
    823
    824	path = tb_path_discover(tunnel->dst_port, -1, in, TB_DP_AUX_RX_HOPID,
    825				&port, "AUX RX", alloc_hopid);
    826	if (!path)
    827		goto err_deactivate;
    828	tunnel->paths[TB_DP_AUX_PATH_IN] = path;
    829	tb_dp_init_aux_path(tunnel->paths[TB_DP_AUX_PATH_IN]);
    830
    831	/* Validate that the tunnel is complete */
    832	if (!tb_port_is_dpout(tunnel->dst_port)) {
    833		tb_port_warn(in, "path does not end on a DP adapter, cleaning up\n");
    834		goto err_deactivate;
    835	}
    836
    837	if (!tb_dp_port_is_enabled(tunnel->dst_port))
    838		goto err_deactivate;
    839
    840	if (!tb_dp_port_hpd_is_active(tunnel->dst_port))
    841		goto err_deactivate;
    842
    843	if (port != tunnel->src_port) {
    844		tb_tunnel_warn(tunnel, "path is not complete, cleaning up\n");
    845		goto err_deactivate;
    846	}
    847
    848	tb_tunnel_dbg(tunnel, "discovered\n");
    849	return tunnel;
    850
    851err_deactivate:
    852	tb_tunnel_deactivate(tunnel);
    853err_free:
    854	tb_tunnel_free(tunnel);
    855
    856	return NULL;
    857}
    858
    859/**
    860 * tb_tunnel_alloc_dp() - allocate a Display Port tunnel
    861 * @tb: Pointer to the domain structure
    862 * @in: DP in adapter port
    863 * @out: DP out adapter port
    864 * @link_nr: Preferred lane adapter when the link is not bonded
    865 * @max_up: Maximum available upstream bandwidth for the DP tunnel (%0
    866 *	    if not limited)
    867 * @max_down: Maximum available downstream bandwidth for the DP tunnel
    868 *	      (%0 if not limited)
    869 *
    870 * Allocates a tunnel between @in and @out that is capable of tunneling
    871 * Display Port traffic.
    872 *
    873 * Return: Returns a tb_tunnel on success or NULL on failure.
    874 */
    875struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in,
    876				     struct tb_port *out, int link_nr,
    877				     int max_up, int max_down)
    878{
    879	struct tb_tunnel *tunnel;
    880	struct tb_path **paths;
    881	struct tb_path *path;
    882
    883	if (WARN_ON(!in->cap_adap || !out->cap_adap))
    884		return NULL;
    885
    886	tunnel = tb_tunnel_alloc(tb, 3, TB_TUNNEL_DP);
    887	if (!tunnel)
    888		return NULL;
    889
    890	tunnel->init = tb_dp_xchg_caps;
    891	tunnel->activate = tb_dp_activate;
    892	tunnel->consumed_bandwidth = tb_dp_consumed_bandwidth;
    893	tunnel->src_port = in;
    894	tunnel->dst_port = out;
    895	tunnel->max_up = max_up;
    896	tunnel->max_down = max_down;
    897
    898	paths = tunnel->paths;
    899
    900	path = tb_path_alloc(tb, in, TB_DP_VIDEO_HOPID, out, TB_DP_VIDEO_HOPID,
    901			     link_nr, "Video");
    902	if (!path)
    903		goto err_free;
    904	tb_dp_init_video_path(path);
    905	paths[TB_DP_VIDEO_PATH_OUT] = path;
    906
    907	path = tb_path_alloc(tb, in, TB_DP_AUX_TX_HOPID, out,
    908			     TB_DP_AUX_TX_HOPID, link_nr, "AUX TX");
    909	if (!path)
    910		goto err_free;
    911	tb_dp_init_aux_path(path);
    912	paths[TB_DP_AUX_PATH_OUT] = path;
    913
    914	path = tb_path_alloc(tb, out, TB_DP_AUX_RX_HOPID, in,
    915			     TB_DP_AUX_RX_HOPID, link_nr, "AUX RX");
    916	if (!path)
    917		goto err_free;
    918	tb_dp_init_aux_path(path);
    919	paths[TB_DP_AUX_PATH_IN] = path;
    920
    921	return tunnel;
    922
    923err_free:
    924	tb_tunnel_free(tunnel);
    925	return NULL;
    926}
    927
    928static unsigned int tb_dma_available_credits(const struct tb_port *port)
    929{
    930	const struct tb_switch *sw = port->sw;
    931	int credits;
    932
    933	credits = tb_available_credits(port, NULL);
    934	if (tb_acpi_may_tunnel_pcie())
    935		credits -= sw->max_pcie_credits;
    936	credits -= port->dma_credits;
    937
    938	return credits > 0 ? credits : 0;
    939}
    940
    941static int tb_dma_reserve_credits(struct tb_path_hop *hop, unsigned int credits)
    942{
    943	struct tb_port *port = hop->in_port;
    944
    945	if (tb_port_use_credit_allocation(port)) {
    946		unsigned int available = tb_dma_available_credits(port);
    947
    948		/*
    949		 * Need to have at least TB_MIN_DMA_CREDITS, otherwise
    950		 * DMA path cannot be established.
    951		 */
    952		if (available < TB_MIN_DMA_CREDITS)
    953			return -ENOSPC;
    954
    955		while (credits > available)
    956			credits--;
    957
    958		tb_port_dbg(port, "reserving %u credits for DMA path\n",
    959			    credits);
    960
    961		port->dma_credits += credits;
    962	} else {
    963		if (tb_port_is_null(port))
    964			credits = port->bonded ? 14 : 6;
    965		else
    966			credits = min(port->total_credits, credits);
    967	}
    968
    969	hop->initial_credits = credits;
    970	return 0;
    971}
    972
    973/* Path from lane adapter to NHI */
    974static int tb_dma_init_rx_path(struct tb_path *path, unsigned int credits)
    975{
    976	struct tb_path_hop *hop;
    977	unsigned int i, tmp;
    978
    979	path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
    980	path->ingress_fc_enable = TB_PATH_ALL;
    981	path->egress_shared_buffer = TB_PATH_NONE;
    982	path->ingress_shared_buffer = TB_PATH_NONE;
    983	path->priority = 5;
    984	path->weight = 1;
    985	path->clear_fc = true;
    986
    987	/*
    988	 * First lane adapter is the one connected to the remote host.
    989	 * We don't tunnel other traffic over this link so can use all
    990	 * the credits (except the ones reserved for control traffic).
    991	 */
    992	hop = &path->hops[0];
    993	tmp = min(tb_usable_credits(hop->in_port), credits);
    994	hop->initial_credits = tmp;
    995	hop->in_port->dma_credits += tmp;
    996
    997	for (i = 1; i < path->path_length; i++) {
    998		int ret;
    999
   1000		ret = tb_dma_reserve_credits(&path->hops[i], credits);
   1001		if (ret)
   1002			return ret;
   1003	}
   1004
   1005	return 0;
   1006}
   1007
   1008/* Path from NHI to lane adapter */
   1009static int tb_dma_init_tx_path(struct tb_path *path, unsigned int credits)
   1010{
   1011	struct tb_path_hop *hop;
   1012
   1013	path->egress_fc_enable = TB_PATH_ALL;
   1014	path->ingress_fc_enable = TB_PATH_ALL;
   1015	path->egress_shared_buffer = TB_PATH_NONE;
   1016	path->ingress_shared_buffer = TB_PATH_NONE;
   1017	path->priority = 5;
   1018	path->weight = 1;
   1019	path->clear_fc = true;
   1020
   1021	tb_path_for_each_hop(path, hop) {
   1022		int ret;
   1023
   1024		ret = tb_dma_reserve_credits(hop, credits);
   1025		if (ret)
   1026			return ret;
   1027	}
   1028
   1029	return 0;
   1030}
   1031
   1032static void tb_dma_release_credits(struct tb_path_hop *hop)
   1033{
   1034	struct tb_port *port = hop->in_port;
   1035
   1036	if (tb_port_use_credit_allocation(port)) {
   1037		port->dma_credits -= hop->initial_credits;
   1038
   1039		tb_port_dbg(port, "released %u DMA path credits\n",
   1040			    hop->initial_credits);
   1041	}
   1042}
   1043
   1044static void tb_dma_deinit_path(struct tb_path *path)
   1045{
   1046	struct tb_path_hop *hop;
   1047
   1048	tb_path_for_each_hop(path, hop)
   1049		tb_dma_release_credits(hop);
   1050}
   1051
   1052static void tb_dma_deinit(struct tb_tunnel *tunnel)
   1053{
   1054	int i;
   1055
   1056	for (i = 0; i < tunnel->npaths; i++) {
   1057		if (!tunnel->paths[i])
   1058			continue;
   1059		tb_dma_deinit_path(tunnel->paths[i]);
   1060	}
   1061}
   1062
   1063/**
   1064 * tb_tunnel_alloc_dma() - allocate a DMA tunnel
   1065 * @tb: Pointer to the domain structure
   1066 * @nhi: Host controller port
   1067 * @dst: Destination null port which the other domain is connected to
   1068 * @transmit_path: HopID used for transmitting packets
   1069 * @transmit_ring: NHI ring number used to send packets towards the
   1070 *		   other domain. Set to %-1 if TX path is not needed.
   1071 * @receive_path: HopID used for receiving packets
   1072 * @receive_ring: NHI ring number used to receive packets from the
   1073 *		  other domain. Set to %-1 if RX path is not needed.
   1074 *
   1075 * Return: Returns a tb_tunnel on success or NULL on failure.
   1076 */
   1077struct tb_tunnel *tb_tunnel_alloc_dma(struct tb *tb, struct tb_port *nhi,
   1078				      struct tb_port *dst, int transmit_path,
   1079				      int transmit_ring, int receive_path,
   1080				      int receive_ring)
   1081{
   1082	struct tb_tunnel *tunnel;
   1083	size_t npaths = 0, i = 0;
   1084	struct tb_path *path;
   1085	int credits;
   1086
   1087	if (receive_ring > 0)
   1088		npaths++;
   1089	if (transmit_ring > 0)
   1090		npaths++;
   1091
   1092	if (WARN_ON(!npaths))
   1093		return NULL;
   1094
   1095	tunnel = tb_tunnel_alloc(tb, npaths, TB_TUNNEL_DMA);
   1096	if (!tunnel)
   1097		return NULL;
   1098
   1099	tunnel->src_port = nhi;
   1100	tunnel->dst_port = dst;
   1101	tunnel->deinit = tb_dma_deinit;
   1102
   1103	credits = min_not_zero(TB_DMA_CREDITS, nhi->sw->max_dma_credits);
   1104
   1105	if (receive_ring > 0) {
   1106		path = tb_path_alloc(tb, dst, receive_path, nhi, receive_ring, 0,
   1107				     "DMA RX");
   1108		if (!path)
   1109			goto err_free;
   1110		tunnel->paths[i++] = path;
   1111		if (tb_dma_init_rx_path(path, credits)) {
   1112			tb_tunnel_dbg(tunnel, "not enough buffers for RX path\n");
   1113			goto err_free;
   1114		}
   1115	}
   1116
   1117	if (transmit_ring > 0) {
   1118		path = tb_path_alloc(tb, nhi, transmit_ring, dst, transmit_path, 0,
   1119				     "DMA TX");
   1120		if (!path)
   1121			goto err_free;
   1122		tunnel->paths[i++] = path;
   1123		if (tb_dma_init_tx_path(path, credits)) {
   1124			tb_tunnel_dbg(tunnel, "not enough buffers for TX path\n");
   1125			goto err_free;
   1126		}
   1127	}
   1128
   1129	return tunnel;
   1130
   1131err_free:
   1132	tb_tunnel_free(tunnel);
   1133	return NULL;
   1134}
   1135
   1136/**
   1137 * tb_tunnel_match_dma() - Match DMA tunnel
   1138 * @tunnel: Tunnel to match
   1139 * @transmit_path: HopID used for transmitting packets. Pass %-1 to ignore.
   1140 * @transmit_ring: NHI ring number used to send packets towards the
   1141 *		   other domain. Pass %-1 to ignore.
   1142 * @receive_path: HopID used for receiving packets. Pass %-1 to ignore.
   1143 * @receive_ring: NHI ring number used to receive packets from the
   1144 *		  other domain. Pass %-1 to ignore.
   1145 *
   1146 * This function can be used to match specific DMA tunnel, if there are
   1147 * multiple DMA tunnels going through the same XDomain connection.
   1148 * Returns true if there is match and false otherwise.
   1149 */
   1150bool tb_tunnel_match_dma(const struct tb_tunnel *tunnel, int transmit_path,
   1151			 int transmit_ring, int receive_path, int receive_ring)
   1152{
   1153	const struct tb_path *tx_path = NULL, *rx_path = NULL;
   1154	int i;
   1155
   1156	if (!receive_ring || !transmit_ring)
   1157		return false;
   1158
   1159	for (i = 0; i < tunnel->npaths; i++) {
   1160		const struct tb_path *path = tunnel->paths[i];
   1161
   1162		if (!path)
   1163			continue;
   1164
   1165		if (tb_port_is_nhi(path->hops[0].in_port))
   1166			tx_path = path;
   1167		else if (tb_port_is_nhi(path->hops[path->path_length - 1].out_port))
   1168			rx_path = path;
   1169	}
   1170
   1171	if (transmit_ring > 0 || transmit_path > 0) {
   1172		if (!tx_path)
   1173			return false;
   1174		if (transmit_ring > 0 &&
   1175		    (tx_path->hops[0].in_hop_index != transmit_ring))
   1176			return false;
   1177		if (transmit_path > 0 &&
   1178		    (tx_path->hops[tx_path->path_length - 1].next_hop_index != transmit_path))
   1179			return false;
   1180	}
   1181
   1182	if (receive_ring > 0 || receive_path > 0) {
   1183		if (!rx_path)
   1184			return false;
   1185		if (receive_path > 0 &&
   1186		    (rx_path->hops[0].in_hop_index != receive_path))
   1187			return false;
   1188		if (receive_ring > 0 &&
   1189		    (rx_path->hops[rx_path->path_length - 1].next_hop_index != receive_ring))
   1190			return false;
   1191	}
   1192
   1193	return true;
   1194}
   1195
   1196static int tb_usb3_max_link_rate(struct tb_port *up, struct tb_port *down)
   1197{
   1198	int ret, up_max_rate, down_max_rate;
   1199
   1200	ret = usb4_usb3_port_max_link_rate(up);
   1201	if (ret < 0)
   1202		return ret;
   1203	up_max_rate = ret;
   1204
   1205	ret = usb4_usb3_port_max_link_rate(down);
   1206	if (ret < 0)
   1207		return ret;
   1208	down_max_rate = ret;
   1209
   1210	return min(up_max_rate, down_max_rate);
   1211}
   1212
   1213static int tb_usb3_init(struct tb_tunnel *tunnel)
   1214{
   1215	tb_tunnel_dbg(tunnel, "allocating initial bandwidth %d/%d Mb/s\n",
   1216		      tunnel->allocated_up, tunnel->allocated_down);
   1217
   1218	return usb4_usb3_port_allocate_bandwidth(tunnel->src_port,
   1219						 &tunnel->allocated_up,
   1220						 &tunnel->allocated_down);
   1221}
   1222
   1223static int tb_usb3_activate(struct tb_tunnel *tunnel, bool activate)
   1224{
   1225	int res;
   1226
   1227	res = tb_usb3_port_enable(tunnel->src_port, activate);
   1228	if (res)
   1229		return res;
   1230
   1231	if (tb_port_is_usb3_up(tunnel->dst_port))
   1232		return tb_usb3_port_enable(tunnel->dst_port, activate);
   1233
   1234	return 0;
   1235}
   1236
   1237static int tb_usb3_consumed_bandwidth(struct tb_tunnel *tunnel,
   1238		int *consumed_up, int *consumed_down)
   1239{
   1240	int pcie_enabled = tb_acpi_may_tunnel_pcie();
   1241
   1242	/*
   1243	 * PCIe tunneling, if enabled, affects the USB3 bandwidth so
   1244	 * take that it into account here.
   1245	 */
   1246	*consumed_up = tunnel->allocated_up * (3 + pcie_enabled) / 3;
   1247	*consumed_down = tunnel->allocated_down * (3 + pcie_enabled) / 3;
   1248	return 0;
   1249}
   1250
   1251static int tb_usb3_release_unused_bandwidth(struct tb_tunnel *tunnel)
   1252{
   1253	int ret;
   1254
   1255	ret = usb4_usb3_port_release_bandwidth(tunnel->src_port,
   1256					       &tunnel->allocated_up,
   1257					       &tunnel->allocated_down);
   1258	if (ret)
   1259		return ret;
   1260
   1261	tb_tunnel_dbg(tunnel, "decreased bandwidth allocation to %d/%d Mb/s\n",
   1262		      tunnel->allocated_up, tunnel->allocated_down);
   1263	return 0;
   1264}
   1265
   1266static void tb_usb3_reclaim_available_bandwidth(struct tb_tunnel *tunnel,
   1267						int *available_up,
   1268						int *available_down)
   1269{
   1270	int ret, max_rate, allocate_up, allocate_down;
   1271
   1272	ret = usb4_usb3_port_actual_link_rate(tunnel->src_port);
   1273	if (ret < 0) {
   1274		tb_tunnel_warn(tunnel, "failed to read actual link rate\n");
   1275		return;
   1276	} else if (!ret) {
   1277		/* Use maximum link rate if the link valid is not set */
   1278		ret = usb4_usb3_port_max_link_rate(tunnel->src_port);
   1279		if (ret < 0) {
   1280			tb_tunnel_warn(tunnel, "failed to read maximum link rate\n");
   1281			return;
   1282		}
   1283	}
   1284
   1285	/*
   1286	 * 90% of the max rate can be allocated for isochronous
   1287	 * transfers.
   1288	 */
   1289	max_rate = ret * 90 / 100;
   1290
   1291	/* No need to reclaim if already at maximum */
   1292	if (tunnel->allocated_up >= max_rate &&
   1293	    tunnel->allocated_down >= max_rate)
   1294		return;
   1295
   1296	/* Don't go lower than what is already allocated */
   1297	allocate_up = min(max_rate, *available_up);
   1298	if (allocate_up < tunnel->allocated_up)
   1299		allocate_up = tunnel->allocated_up;
   1300
   1301	allocate_down = min(max_rate, *available_down);
   1302	if (allocate_down < tunnel->allocated_down)
   1303		allocate_down = tunnel->allocated_down;
   1304
   1305	/* If no changes no need to do more */
   1306	if (allocate_up == tunnel->allocated_up &&
   1307	    allocate_down == tunnel->allocated_down)
   1308		return;
   1309
   1310	ret = usb4_usb3_port_allocate_bandwidth(tunnel->src_port, &allocate_up,
   1311						&allocate_down);
   1312	if (ret) {
   1313		tb_tunnel_info(tunnel, "failed to allocate bandwidth\n");
   1314		return;
   1315	}
   1316
   1317	tunnel->allocated_up = allocate_up;
   1318	*available_up -= tunnel->allocated_up;
   1319
   1320	tunnel->allocated_down = allocate_down;
   1321	*available_down -= tunnel->allocated_down;
   1322
   1323	tb_tunnel_dbg(tunnel, "increased bandwidth allocation to %d/%d Mb/s\n",
   1324		      tunnel->allocated_up, tunnel->allocated_down);
   1325}
   1326
   1327static void tb_usb3_init_credits(struct tb_path_hop *hop)
   1328{
   1329	struct tb_port *port = hop->in_port;
   1330	struct tb_switch *sw = port->sw;
   1331	unsigned int credits;
   1332
   1333	if (tb_port_use_credit_allocation(port)) {
   1334		credits = sw->max_usb3_credits;
   1335	} else {
   1336		if (tb_port_is_null(port))
   1337			credits = port->bonded ? 32 : 16;
   1338		else
   1339			credits = 7;
   1340	}
   1341
   1342	hop->initial_credits = credits;
   1343}
   1344
   1345static void tb_usb3_init_path(struct tb_path *path)
   1346{
   1347	struct tb_path_hop *hop;
   1348
   1349	path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
   1350	path->egress_shared_buffer = TB_PATH_NONE;
   1351	path->ingress_fc_enable = TB_PATH_ALL;
   1352	path->ingress_shared_buffer = TB_PATH_NONE;
   1353	path->priority = 3;
   1354	path->weight = 3;
   1355	path->drop_packages = 0;
   1356
   1357	tb_path_for_each_hop(path, hop)
   1358		tb_usb3_init_credits(hop);
   1359}
   1360
   1361/**
   1362 * tb_tunnel_discover_usb3() - Discover existing USB3 tunnels
   1363 * @tb: Pointer to the domain structure
   1364 * @down: USB3 downstream adapter
   1365 * @alloc_hopid: Allocate HopIDs from visited ports
   1366 *
   1367 * If @down adapter is active, follows the tunnel to the USB3 upstream
   1368 * adapter and back. Returns the discovered tunnel or %NULL if there was
   1369 * no tunnel.
   1370 */
   1371struct tb_tunnel *tb_tunnel_discover_usb3(struct tb *tb, struct tb_port *down,
   1372					  bool alloc_hopid)
   1373{
   1374	struct tb_tunnel *tunnel;
   1375	struct tb_path *path;
   1376
   1377	if (!tb_usb3_port_is_enabled(down))
   1378		return NULL;
   1379
   1380	tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_USB3);
   1381	if (!tunnel)
   1382		return NULL;
   1383
   1384	tunnel->activate = tb_usb3_activate;
   1385	tunnel->src_port = down;
   1386
   1387	/*
   1388	 * Discover both paths even if they are not complete. We will
   1389	 * clean them up by calling tb_tunnel_deactivate() below in that
   1390	 * case.
   1391	 */
   1392	path = tb_path_discover(down, TB_USB3_HOPID, NULL, -1,
   1393				&tunnel->dst_port, "USB3 Down", alloc_hopid);
   1394	if (!path) {
   1395		/* Just disable the downstream port */
   1396		tb_usb3_port_enable(down, false);
   1397		goto err_free;
   1398	}
   1399	tunnel->paths[TB_USB3_PATH_DOWN] = path;
   1400	tb_usb3_init_path(tunnel->paths[TB_USB3_PATH_DOWN]);
   1401
   1402	path = tb_path_discover(tunnel->dst_port, -1, down, TB_USB3_HOPID, NULL,
   1403				"USB3 Up", alloc_hopid);
   1404	if (!path)
   1405		goto err_deactivate;
   1406	tunnel->paths[TB_USB3_PATH_UP] = path;
   1407	tb_usb3_init_path(tunnel->paths[TB_USB3_PATH_UP]);
   1408
   1409	/* Validate that the tunnel is complete */
   1410	if (!tb_port_is_usb3_up(tunnel->dst_port)) {
   1411		tb_port_warn(tunnel->dst_port,
   1412			     "path does not end on an USB3 adapter, cleaning up\n");
   1413		goto err_deactivate;
   1414	}
   1415
   1416	if (down != tunnel->src_port) {
   1417		tb_tunnel_warn(tunnel, "path is not complete, cleaning up\n");
   1418		goto err_deactivate;
   1419	}
   1420
   1421	if (!tb_usb3_port_is_enabled(tunnel->dst_port)) {
   1422		tb_tunnel_warn(tunnel,
   1423			       "tunnel is not fully activated, cleaning up\n");
   1424		goto err_deactivate;
   1425	}
   1426
   1427	if (!tb_route(down->sw)) {
   1428		int ret;
   1429
   1430		/*
   1431		 * Read the initial bandwidth allocation for the first
   1432		 * hop tunnel.
   1433		 */
   1434		ret = usb4_usb3_port_allocated_bandwidth(down,
   1435			&tunnel->allocated_up, &tunnel->allocated_down);
   1436		if (ret)
   1437			goto err_deactivate;
   1438
   1439		tb_tunnel_dbg(tunnel, "currently allocated bandwidth %d/%d Mb/s\n",
   1440			      tunnel->allocated_up, tunnel->allocated_down);
   1441
   1442		tunnel->init = tb_usb3_init;
   1443		tunnel->consumed_bandwidth = tb_usb3_consumed_bandwidth;
   1444		tunnel->release_unused_bandwidth =
   1445			tb_usb3_release_unused_bandwidth;
   1446		tunnel->reclaim_available_bandwidth =
   1447			tb_usb3_reclaim_available_bandwidth;
   1448	}
   1449
   1450	tb_tunnel_dbg(tunnel, "discovered\n");
   1451	return tunnel;
   1452
   1453err_deactivate:
   1454	tb_tunnel_deactivate(tunnel);
   1455err_free:
   1456	tb_tunnel_free(tunnel);
   1457
   1458	return NULL;
   1459}
   1460
   1461/**
   1462 * tb_tunnel_alloc_usb3() - allocate a USB3 tunnel
   1463 * @tb: Pointer to the domain structure
   1464 * @up: USB3 upstream adapter port
   1465 * @down: USB3 downstream adapter port
   1466 * @max_up: Maximum available upstream bandwidth for the USB3 tunnel (%0
   1467 *	    if not limited).
   1468 * @max_down: Maximum available downstream bandwidth for the USB3 tunnel
   1469 *	      (%0 if not limited).
   1470 *
   1471 * Allocate an USB3 tunnel. The ports must be of type @TB_TYPE_USB3_UP and
   1472 * @TB_TYPE_USB3_DOWN.
   1473 *
   1474 * Return: Returns a tb_tunnel on success or %NULL on failure.
   1475 */
   1476struct tb_tunnel *tb_tunnel_alloc_usb3(struct tb *tb, struct tb_port *up,
   1477				       struct tb_port *down, int max_up,
   1478				       int max_down)
   1479{
   1480	struct tb_tunnel *tunnel;
   1481	struct tb_path *path;
   1482	int max_rate = 0;
   1483
   1484	/*
   1485	 * Check that we have enough bandwidth available for the new
   1486	 * USB3 tunnel.
   1487	 */
   1488	if (max_up > 0 || max_down > 0) {
   1489		max_rate = tb_usb3_max_link_rate(down, up);
   1490		if (max_rate < 0)
   1491			return NULL;
   1492
   1493		/* Only 90% can be allocated for USB3 isochronous transfers */
   1494		max_rate = max_rate * 90 / 100;
   1495		tb_port_dbg(up, "required bandwidth for USB3 tunnel %d Mb/s\n",
   1496			    max_rate);
   1497
   1498		if (max_rate > max_up || max_rate > max_down) {
   1499			tb_port_warn(up, "not enough bandwidth for USB3 tunnel\n");
   1500			return NULL;
   1501		}
   1502	}
   1503
   1504	tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_USB3);
   1505	if (!tunnel)
   1506		return NULL;
   1507
   1508	tunnel->activate = tb_usb3_activate;
   1509	tunnel->src_port = down;
   1510	tunnel->dst_port = up;
   1511	tunnel->max_up = max_up;
   1512	tunnel->max_down = max_down;
   1513
   1514	path = tb_path_alloc(tb, down, TB_USB3_HOPID, up, TB_USB3_HOPID, 0,
   1515			     "USB3 Down");
   1516	if (!path) {
   1517		tb_tunnel_free(tunnel);
   1518		return NULL;
   1519	}
   1520	tb_usb3_init_path(path);
   1521	tunnel->paths[TB_USB3_PATH_DOWN] = path;
   1522
   1523	path = tb_path_alloc(tb, up, TB_USB3_HOPID, down, TB_USB3_HOPID, 0,
   1524			     "USB3 Up");
   1525	if (!path) {
   1526		tb_tunnel_free(tunnel);
   1527		return NULL;
   1528	}
   1529	tb_usb3_init_path(path);
   1530	tunnel->paths[TB_USB3_PATH_UP] = path;
   1531
   1532	if (!tb_route(down->sw)) {
   1533		tunnel->allocated_up = max_rate;
   1534		tunnel->allocated_down = max_rate;
   1535
   1536		tunnel->init = tb_usb3_init;
   1537		tunnel->consumed_bandwidth = tb_usb3_consumed_bandwidth;
   1538		tunnel->release_unused_bandwidth =
   1539			tb_usb3_release_unused_bandwidth;
   1540		tunnel->reclaim_available_bandwidth =
   1541			tb_usb3_reclaim_available_bandwidth;
   1542	}
   1543
   1544	return tunnel;
   1545}
   1546
   1547/**
   1548 * tb_tunnel_free() - free a tunnel
   1549 * @tunnel: Tunnel to be freed
   1550 *
   1551 * Frees a tunnel. The tunnel does not need to be deactivated.
   1552 */
   1553void tb_tunnel_free(struct tb_tunnel *tunnel)
   1554{
   1555	int i;
   1556
   1557	if (!tunnel)
   1558		return;
   1559
   1560	if (tunnel->deinit)
   1561		tunnel->deinit(tunnel);
   1562
   1563	for (i = 0; i < tunnel->npaths; i++) {
   1564		if (tunnel->paths[i])
   1565			tb_path_free(tunnel->paths[i]);
   1566	}
   1567
   1568	kfree(tunnel->paths);
   1569	kfree(tunnel);
   1570}
   1571
   1572/**
   1573 * tb_tunnel_is_invalid - check whether an activated path is still valid
   1574 * @tunnel: Tunnel to check
   1575 */
   1576bool tb_tunnel_is_invalid(struct tb_tunnel *tunnel)
   1577{
   1578	int i;
   1579
   1580	for (i = 0; i < tunnel->npaths; i++) {
   1581		WARN_ON(!tunnel->paths[i]->activated);
   1582		if (tb_path_is_invalid(tunnel->paths[i]))
   1583			return true;
   1584	}
   1585
   1586	return false;
   1587}
   1588
   1589/**
   1590 * tb_tunnel_restart() - activate a tunnel after a hardware reset
   1591 * @tunnel: Tunnel to restart
   1592 *
   1593 * Return: 0 on success and negative errno in case if failure
   1594 */
   1595int tb_tunnel_restart(struct tb_tunnel *tunnel)
   1596{
   1597	int res, i;
   1598
   1599	tb_tunnel_dbg(tunnel, "activating\n");
   1600
   1601	/*
   1602	 * Make sure all paths are properly disabled before enabling
   1603	 * them again.
   1604	 */
   1605	for (i = 0; i < tunnel->npaths; i++) {
   1606		if (tunnel->paths[i]->activated) {
   1607			tb_path_deactivate(tunnel->paths[i]);
   1608			tunnel->paths[i]->activated = false;
   1609		}
   1610	}
   1611
   1612	if (tunnel->init) {
   1613		res = tunnel->init(tunnel);
   1614		if (res)
   1615			return res;
   1616	}
   1617
   1618	for (i = 0; i < tunnel->npaths; i++) {
   1619		res = tb_path_activate(tunnel->paths[i]);
   1620		if (res)
   1621			goto err;
   1622	}
   1623
   1624	if (tunnel->activate) {
   1625		res = tunnel->activate(tunnel, true);
   1626		if (res)
   1627			goto err;
   1628	}
   1629
   1630	return 0;
   1631
   1632err:
   1633	tb_tunnel_warn(tunnel, "activation failed\n");
   1634	tb_tunnel_deactivate(tunnel);
   1635	return res;
   1636}
   1637
   1638/**
   1639 * tb_tunnel_activate() - activate a tunnel
   1640 * @tunnel: Tunnel to activate
   1641 *
   1642 * Return: Returns 0 on success or an error code on failure.
   1643 */
   1644int tb_tunnel_activate(struct tb_tunnel *tunnel)
   1645{
   1646	int i;
   1647
   1648	for (i = 0; i < tunnel->npaths; i++) {
   1649		if (tunnel->paths[i]->activated) {
   1650			tb_tunnel_WARN(tunnel,
   1651				       "trying to activate an already activated tunnel\n");
   1652			return -EINVAL;
   1653		}
   1654	}
   1655
   1656	return tb_tunnel_restart(tunnel);
   1657}
   1658
   1659/**
   1660 * tb_tunnel_deactivate() - deactivate a tunnel
   1661 * @tunnel: Tunnel to deactivate
   1662 */
   1663void tb_tunnel_deactivate(struct tb_tunnel *tunnel)
   1664{
   1665	int i;
   1666
   1667	tb_tunnel_dbg(tunnel, "deactivating\n");
   1668
   1669	if (tunnel->activate)
   1670		tunnel->activate(tunnel, false);
   1671
   1672	for (i = 0; i < tunnel->npaths; i++) {
   1673		if (tunnel->paths[i] && tunnel->paths[i]->activated)
   1674			tb_path_deactivate(tunnel->paths[i]);
   1675	}
   1676}
   1677
   1678/**
   1679 * tb_tunnel_port_on_path() - Does the tunnel go through port
   1680 * @tunnel: Tunnel to check
   1681 * @port: Port to check
   1682 *
   1683 * Returns true if @tunnel goes through @port (direction does not matter),
   1684 * false otherwise.
   1685 */
   1686bool tb_tunnel_port_on_path(const struct tb_tunnel *tunnel,
   1687			    const struct tb_port *port)
   1688{
   1689	int i;
   1690
   1691	for (i = 0; i < tunnel->npaths; i++) {
   1692		if (!tunnel->paths[i])
   1693			continue;
   1694
   1695		if (tb_path_port_on_path(tunnel->paths[i], port))
   1696			return true;
   1697	}
   1698
   1699	return false;
   1700}
   1701
   1702static bool tb_tunnel_is_active(const struct tb_tunnel *tunnel)
   1703{
   1704	int i;
   1705
   1706	for (i = 0; i < tunnel->npaths; i++) {
   1707		if (!tunnel->paths[i])
   1708			return false;
   1709		if (!tunnel->paths[i]->activated)
   1710			return false;
   1711	}
   1712
   1713	return true;
   1714}
   1715
   1716/**
   1717 * tb_tunnel_consumed_bandwidth() - Return bandwidth consumed by the tunnel
   1718 * @tunnel: Tunnel to check
   1719 * @consumed_up: Consumed bandwidth in Mb/s from @dst_port to @src_port.
   1720 *		 Can be %NULL.
   1721 * @consumed_down: Consumed bandwidth in Mb/s from @src_port to @dst_port.
   1722 *		   Can be %NULL.
   1723 *
   1724 * Stores the amount of isochronous bandwidth @tunnel consumes in
   1725 * @consumed_up and @consumed_down. In case of success returns %0,
   1726 * negative errno otherwise.
   1727 */
   1728int tb_tunnel_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up,
   1729				 int *consumed_down)
   1730{
   1731	int up_bw = 0, down_bw = 0;
   1732
   1733	if (!tb_tunnel_is_active(tunnel))
   1734		goto out;
   1735
   1736	if (tunnel->consumed_bandwidth) {
   1737		int ret;
   1738
   1739		ret = tunnel->consumed_bandwidth(tunnel, &up_bw, &down_bw);
   1740		if (ret)
   1741			return ret;
   1742
   1743		tb_tunnel_dbg(tunnel, "consumed bandwidth %d/%d Mb/s\n", up_bw,
   1744			      down_bw);
   1745	}
   1746
   1747out:
   1748	if (consumed_up)
   1749		*consumed_up = up_bw;
   1750	if (consumed_down)
   1751		*consumed_down = down_bw;
   1752
   1753	return 0;
   1754}
   1755
   1756/**
   1757 * tb_tunnel_release_unused_bandwidth() - Release unused bandwidth
   1758 * @tunnel: Tunnel whose unused bandwidth to release
   1759 *
   1760 * If tunnel supports dynamic bandwidth management (USB3 tunnels at the
   1761 * moment) this function makes it to release all the unused bandwidth.
   1762 *
   1763 * Returns %0 in case of success and negative errno otherwise.
   1764 */
   1765int tb_tunnel_release_unused_bandwidth(struct tb_tunnel *tunnel)
   1766{
   1767	if (!tb_tunnel_is_active(tunnel))
   1768		return 0;
   1769
   1770	if (tunnel->release_unused_bandwidth) {
   1771		int ret;
   1772
   1773		ret = tunnel->release_unused_bandwidth(tunnel);
   1774		if (ret)
   1775			return ret;
   1776	}
   1777
   1778	return 0;
   1779}
   1780
   1781/**
   1782 * tb_tunnel_reclaim_available_bandwidth() - Reclaim available bandwidth
   1783 * @tunnel: Tunnel reclaiming available bandwidth
   1784 * @available_up: Available upstream bandwidth (in Mb/s)
   1785 * @available_down: Available downstream bandwidth (in Mb/s)
   1786 *
   1787 * Reclaims bandwidth from @available_up and @available_down and updates
   1788 * the variables accordingly (e.g decreases both according to what was
   1789 * reclaimed by the tunnel). If nothing was reclaimed the values are
   1790 * kept as is.
   1791 */
   1792void tb_tunnel_reclaim_available_bandwidth(struct tb_tunnel *tunnel,
   1793					   int *available_up,
   1794					   int *available_down)
   1795{
   1796	if (!tb_tunnel_is_active(tunnel))
   1797		return;
   1798
   1799	if (tunnel->reclaim_available_bandwidth)
   1800		tunnel->reclaim_available_bandwidth(tunnel, available_up,
   1801						    available_down);
   1802}