cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

xdomain.c (64238B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * Thunderbolt XDomain discovery protocol support
      4 *
      5 * Copyright (C) 2017, Intel Corporation
      6 * Authors: Michael Jamet <michael.jamet@intel.com>
      7 *          Mika Westerberg <mika.westerberg@linux.intel.com>
      8 */
      9
     10#include <linux/device.h>
     11#include <linux/delay.h>
     12#include <linux/kmod.h>
     13#include <linux/module.h>
     14#include <linux/pm_runtime.h>
     15#include <linux/prandom.h>
     16#include <linux/utsname.h>
     17#include <linux/uuid.h>
     18#include <linux/workqueue.h>
     19
     20#include "tb.h"
     21
     22#define XDOMAIN_SHORT_TIMEOUT			100	/* ms */
     23#define XDOMAIN_DEFAULT_TIMEOUT			1000	/* ms */
     24#define XDOMAIN_BONDING_TIMEOUT			10000	/* ms */
     25#define XDOMAIN_RETRIES				10
     26#define XDOMAIN_DEFAULT_MAX_HOPID		15
     27
     28enum {
     29	XDOMAIN_STATE_INIT,
     30	XDOMAIN_STATE_UUID,
     31	XDOMAIN_STATE_LINK_STATUS,
     32	XDOMAIN_STATE_LINK_STATE_CHANGE,
     33	XDOMAIN_STATE_LINK_STATUS2,
     34	XDOMAIN_STATE_BONDING_UUID_LOW,
     35	XDOMAIN_STATE_BONDING_UUID_HIGH,
     36	XDOMAIN_STATE_PROPERTIES,
     37	XDOMAIN_STATE_ENUMERATED,
     38	XDOMAIN_STATE_ERROR,
     39};
     40
     41static const char * const state_names[] = {
     42	[XDOMAIN_STATE_INIT] = "INIT",
     43	[XDOMAIN_STATE_UUID] = "UUID",
     44	[XDOMAIN_STATE_LINK_STATUS] = "LINK_STATUS",
     45	[XDOMAIN_STATE_LINK_STATE_CHANGE] = "LINK_STATE_CHANGE",
     46	[XDOMAIN_STATE_LINK_STATUS2] = "LINK_STATUS2",
     47	[XDOMAIN_STATE_BONDING_UUID_LOW] = "BONDING_UUID_LOW",
     48	[XDOMAIN_STATE_BONDING_UUID_HIGH] = "BONDING_UUID_HIGH",
     49	[XDOMAIN_STATE_PROPERTIES] = "PROPERTIES",
     50	[XDOMAIN_STATE_ENUMERATED] = "ENUMERATED",
     51	[XDOMAIN_STATE_ERROR] = "ERROR",
     52};
     53
     54struct xdomain_request_work {
     55	struct work_struct work;
     56	struct tb_xdp_header *pkg;
     57	struct tb *tb;
     58};
     59
     60static bool tb_xdomain_enabled = true;
     61module_param_named(xdomain, tb_xdomain_enabled, bool, 0444);
     62MODULE_PARM_DESC(xdomain, "allow XDomain protocol (default: true)");
     63
     64/*
     65 * Serializes access to the properties and protocol handlers below. If
     66 * you need to take both this lock and the struct tb_xdomain lock, take
     67 * this one first.
     68 */
     69static DEFINE_MUTEX(xdomain_lock);
     70
     71/* Properties exposed to the remote domains */
     72static struct tb_property_dir *xdomain_property_dir;
     73static u32 xdomain_property_block_gen;
     74
     75/* Additional protocol handlers */
     76static LIST_HEAD(protocol_handlers);
     77
     78/* UUID for XDomain discovery protocol: b638d70e-42ff-40bb-97c2-90e2c0b2ff07 */
     79static const uuid_t tb_xdp_uuid =
     80	UUID_INIT(0xb638d70e, 0x42ff, 0x40bb,
     81		  0x97, 0xc2, 0x90, 0xe2, 0xc0, 0xb2, 0xff, 0x07);
     82
     83bool tb_is_xdomain_enabled(void)
     84{
     85	return tb_xdomain_enabled && tb_acpi_is_xdomain_allowed();
     86}
     87
     88static bool tb_xdomain_match(const struct tb_cfg_request *req,
     89			     const struct ctl_pkg *pkg)
     90{
     91	switch (pkg->frame.eof) {
     92	case TB_CFG_PKG_ERROR:
     93		return true;
     94
     95	case TB_CFG_PKG_XDOMAIN_RESP: {
     96		const struct tb_xdp_header *res_hdr = pkg->buffer;
     97		const struct tb_xdp_header *req_hdr = req->request;
     98
     99		if (pkg->frame.size < req->response_size / 4)
    100			return false;
    101
    102		/* Make sure route matches */
    103		if ((res_hdr->xd_hdr.route_hi & ~BIT(31)) !=
    104		     req_hdr->xd_hdr.route_hi)
    105			return false;
    106		if ((res_hdr->xd_hdr.route_lo) != req_hdr->xd_hdr.route_lo)
    107			return false;
    108
    109		/* Check that the XDomain protocol matches */
    110		if (!uuid_equal(&res_hdr->uuid, &req_hdr->uuid))
    111			return false;
    112
    113		return true;
    114	}
    115
    116	default:
    117		return false;
    118	}
    119}
    120
    121static bool tb_xdomain_copy(struct tb_cfg_request *req,
    122			    const struct ctl_pkg *pkg)
    123{
    124	memcpy(req->response, pkg->buffer, req->response_size);
    125	req->result.err = 0;
    126	return true;
    127}
    128
    129static void response_ready(void *data)
    130{
    131	tb_cfg_request_put(data);
    132}
    133
    134static int __tb_xdomain_response(struct tb_ctl *ctl, const void *response,
    135				 size_t size, enum tb_cfg_pkg_type type)
    136{
    137	struct tb_cfg_request *req;
    138
    139	req = tb_cfg_request_alloc();
    140	if (!req)
    141		return -ENOMEM;
    142
    143	req->match = tb_xdomain_match;
    144	req->copy = tb_xdomain_copy;
    145	req->request = response;
    146	req->request_size = size;
    147	req->request_type = type;
    148
    149	return tb_cfg_request(ctl, req, response_ready, req);
    150}
    151
    152/**
    153 * tb_xdomain_response() - Send a XDomain response message
    154 * @xd: XDomain to send the message
    155 * @response: Response to send
    156 * @size: Size of the response
    157 * @type: PDF type of the response
    158 *
    159 * This can be used to send a XDomain response message to the other
    160 * domain. No response for the message is expected.
    161 *
    162 * Return: %0 in case of success and negative errno in case of failure
    163 */
    164int tb_xdomain_response(struct tb_xdomain *xd, const void *response,
    165			size_t size, enum tb_cfg_pkg_type type)
    166{
    167	return __tb_xdomain_response(xd->tb->ctl, response, size, type);
    168}
    169EXPORT_SYMBOL_GPL(tb_xdomain_response);
    170
    171static int __tb_xdomain_request(struct tb_ctl *ctl, const void *request,
    172	size_t request_size, enum tb_cfg_pkg_type request_type, void *response,
    173	size_t response_size, enum tb_cfg_pkg_type response_type,
    174	unsigned int timeout_msec)
    175{
    176	struct tb_cfg_request *req;
    177	struct tb_cfg_result res;
    178
    179	req = tb_cfg_request_alloc();
    180	if (!req)
    181		return -ENOMEM;
    182
    183	req->match = tb_xdomain_match;
    184	req->copy = tb_xdomain_copy;
    185	req->request = request;
    186	req->request_size = request_size;
    187	req->request_type = request_type;
    188	req->response = response;
    189	req->response_size = response_size;
    190	req->response_type = response_type;
    191
    192	res = tb_cfg_request_sync(ctl, req, timeout_msec);
    193
    194	tb_cfg_request_put(req);
    195
    196	return res.err == 1 ? -EIO : res.err;
    197}
    198
    199/**
    200 * tb_xdomain_request() - Send a XDomain request
    201 * @xd: XDomain to send the request
    202 * @request: Request to send
    203 * @request_size: Size of the request in bytes
    204 * @request_type: PDF type of the request
    205 * @response: Response is copied here
    206 * @response_size: Expected size of the response in bytes
    207 * @response_type: Expected PDF type of the response
    208 * @timeout_msec: Timeout in milliseconds to wait for the response
    209 *
    210 * This function can be used to send XDomain control channel messages to
    211 * the other domain. The function waits until the response is received
    212 * or when timeout triggers. Whichever comes first.
    213 *
    214 * Return: %0 in case of success and negative errno in case of failure
    215 */
    216int tb_xdomain_request(struct tb_xdomain *xd, const void *request,
    217	size_t request_size, enum tb_cfg_pkg_type request_type,
    218	void *response, size_t response_size,
    219	enum tb_cfg_pkg_type response_type, unsigned int timeout_msec)
    220{
    221	return __tb_xdomain_request(xd->tb->ctl, request, request_size,
    222				    request_type, response, response_size,
    223				    response_type, timeout_msec);
    224}
    225EXPORT_SYMBOL_GPL(tb_xdomain_request);
    226
    227static inline void tb_xdp_fill_header(struct tb_xdp_header *hdr, u64 route,
    228	u8 sequence, enum tb_xdp_type type, size_t size)
    229{
    230	u32 length_sn;
    231
    232	length_sn = (size - sizeof(hdr->xd_hdr)) / 4;
    233	length_sn |= (sequence << TB_XDOMAIN_SN_SHIFT) & TB_XDOMAIN_SN_MASK;
    234
    235	hdr->xd_hdr.route_hi = upper_32_bits(route);
    236	hdr->xd_hdr.route_lo = lower_32_bits(route);
    237	hdr->xd_hdr.length_sn = length_sn;
    238	hdr->type = type;
    239	memcpy(&hdr->uuid, &tb_xdp_uuid, sizeof(tb_xdp_uuid));
    240}
    241
    242static int tb_xdp_handle_error(const struct tb_xdp_error_response *res)
    243{
    244	if (res->hdr.type != ERROR_RESPONSE)
    245		return 0;
    246
    247	switch (res->error) {
    248	case ERROR_UNKNOWN_PACKET:
    249	case ERROR_UNKNOWN_DOMAIN:
    250		return -EIO;
    251	case ERROR_NOT_SUPPORTED:
    252		return -ENOTSUPP;
    253	case ERROR_NOT_READY:
    254		return -EAGAIN;
    255	default:
    256		break;
    257	}
    258
    259	return 0;
    260}
    261
    262static int tb_xdp_uuid_request(struct tb_ctl *ctl, u64 route, int retry,
    263			       uuid_t *uuid, u64 *remote_route)
    264{
    265	struct tb_xdp_uuid_response res;
    266	struct tb_xdp_uuid req;
    267	int ret;
    268
    269	memset(&req, 0, sizeof(req));
    270	tb_xdp_fill_header(&req.hdr, route, retry % 4, UUID_REQUEST,
    271			   sizeof(req));
    272
    273	memset(&res, 0, sizeof(res));
    274	ret = __tb_xdomain_request(ctl, &req, sizeof(req),
    275				   TB_CFG_PKG_XDOMAIN_REQ, &res, sizeof(res),
    276				   TB_CFG_PKG_XDOMAIN_RESP,
    277				   XDOMAIN_DEFAULT_TIMEOUT);
    278	if (ret)
    279		return ret;
    280
    281	ret = tb_xdp_handle_error(&res.err);
    282	if (ret)
    283		return ret;
    284
    285	uuid_copy(uuid, &res.src_uuid);
    286	*remote_route = (u64)res.src_route_hi << 32 | res.src_route_lo;
    287
    288	return 0;
    289}
    290
    291static int tb_xdp_uuid_response(struct tb_ctl *ctl, u64 route, u8 sequence,
    292				const uuid_t *uuid)
    293{
    294	struct tb_xdp_uuid_response res;
    295
    296	memset(&res, 0, sizeof(res));
    297	tb_xdp_fill_header(&res.hdr, route, sequence, UUID_RESPONSE,
    298			   sizeof(res));
    299
    300	uuid_copy(&res.src_uuid, uuid);
    301	res.src_route_hi = upper_32_bits(route);
    302	res.src_route_lo = lower_32_bits(route);
    303
    304	return __tb_xdomain_response(ctl, &res, sizeof(res),
    305				     TB_CFG_PKG_XDOMAIN_RESP);
    306}
    307
    308static int tb_xdp_error_response(struct tb_ctl *ctl, u64 route, u8 sequence,
    309				 enum tb_xdp_error error)
    310{
    311	struct tb_xdp_error_response res;
    312
    313	memset(&res, 0, sizeof(res));
    314	tb_xdp_fill_header(&res.hdr, route, sequence, ERROR_RESPONSE,
    315			   sizeof(res));
    316	res.error = error;
    317
    318	return __tb_xdomain_response(ctl, &res, sizeof(res),
    319				     TB_CFG_PKG_XDOMAIN_RESP);
    320}
    321
    322static int tb_xdp_properties_request(struct tb_ctl *ctl, u64 route,
    323	const uuid_t *src_uuid, const uuid_t *dst_uuid, int retry,
    324	u32 **block, u32 *generation)
    325{
    326	struct tb_xdp_properties_response *res;
    327	struct tb_xdp_properties req;
    328	u16 data_len, len;
    329	size_t total_size;
    330	u32 *data = NULL;
    331	int ret;
    332
    333	total_size = sizeof(*res) + TB_XDP_PROPERTIES_MAX_DATA_LENGTH * 4;
    334	res = kzalloc(total_size, GFP_KERNEL);
    335	if (!res)
    336		return -ENOMEM;
    337
    338	memset(&req, 0, sizeof(req));
    339	tb_xdp_fill_header(&req.hdr, route, retry % 4, PROPERTIES_REQUEST,
    340			   sizeof(req));
    341	memcpy(&req.src_uuid, src_uuid, sizeof(*src_uuid));
    342	memcpy(&req.dst_uuid, dst_uuid, sizeof(*dst_uuid));
    343
    344	len = 0;
    345	data_len = 0;
    346
    347	do {
    348		ret = __tb_xdomain_request(ctl, &req, sizeof(req),
    349					   TB_CFG_PKG_XDOMAIN_REQ, res,
    350					   total_size, TB_CFG_PKG_XDOMAIN_RESP,
    351					   XDOMAIN_DEFAULT_TIMEOUT);
    352		if (ret)
    353			goto err;
    354
    355		ret = tb_xdp_handle_error(&res->err);
    356		if (ret)
    357			goto err;
    358
    359		/*
    360		 * Package length includes the whole payload without the
    361		 * XDomain header. Validate first that the package is at
    362		 * least size of the response structure.
    363		 */
    364		len = res->hdr.xd_hdr.length_sn & TB_XDOMAIN_LENGTH_MASK;
    365		if (len < sizeof(*res) / 4) {
    366			ret = -EINVAL;
    367			goto err;
    368		}
    369
    370		len += sizeof(res->hdr.xd_hdr) / 4;
    371		len -= sizeof(*res) / 4;
    372
    373		if (res->offset != req.offset) {
    374			ret = -EINVAL;
    375			goto err;
    376		}
    377
    378		/*
    379		 * First time allocate block that has enough space for
    380		 * the whole properties block.
    381		 */
    382		if (!data) {
    383			data_len = res->data_length;
    384			if (data_len > TB_XDP_PROPERTIES_MAX_LENGTH) {
    385				ret = -E2BIG;
    386				goto err;
    387			}
    388
    389			data = kcalloc(data_len, sizeof(u32), GFP_KERNEL);
    390			if (!data) {
    391				ret = -ENOMEM;
    392				goto err;
    393			}
    394		}
    395
    396		memcpy(data + req.offset, res->data, len * 4);
    397		req.offset += len;
    398	} while (!data_len || req.offset < data_len);
    399
    400	*block = data;
    401	*generation = res->generation;
    402
    403	kfree(res);
    404
    405	return data_len;
    406
    407err:
    408	kfree(data);
    409	kfree(res);
    410
    411	return ret;
    412}
    413
    414static int tb_xdp_properties_response(struct tb *tb, struct tb_ctl *ctl,
    415	struct tb_xdomain *xd, u8 sequence, const struct tb_xdp_properties *req)
    416{
    417	struct tb_xdp_properties_response *res;
    418	size_t total_size;
    419	u16 len;
    420	int ret;
    421
    422	/*
    423	 * Currently we expect all requests to be directed to us. The
    424	 * protocol supports forwarding, though which we might add
    425	 * support later on.
    426	 */
    427	if (!uuid_equal(xd->local_uuid, &req->dst_uuid)) {
    428		tb_xdp_error_response(ctl, xd->route, sequence,
    429				      ERROR_UNKNOWN_DOMAIN);
    430		return 0;
    431	}
    432
    433	mutex_lock(&xd->lock);
    434
    435	if (req->offset >= xd->local_property_block_len) {
    436		mutex_unlock(&xd->lock);
    437		return -EINVAL;
    438	}
    439
    440	len = xd->local_property_block_len - req->offset;
    441	len = min_t(u16, len, TB_XDP_PROPERTIES_MAX_DATA_LENGTH);
    442	total_size = sizeof(*res) + len * 4;
    443
    444	res = kzalloc(total_size, GFP_KERNEL);
    445	if (!res) {
    446		mutex_unlock(&xd->lock);
    447		return -ENOMEM;
    448	}
    449
    450	tb_xdp_fill_header(&res->hdr, xd->route, sequence, PROPERTIES_RESPONSE,
    451			   total_size);
    452	res->generation = xd->local_property_block_gen;
    453	res->data_length = xd->local_property_block_len;
    454	res->offset = req->offset;
    455	uuid_copy(&res->src_uuid, xd->local_uuid);
    456	uuid_copy(&res->dst_uuid, &req->src_uuid);
    457	memcpy(res->data, &xd->local_property_block[req->offset], len * 4);
    458
    459	mutex_unlock(&xd->lock);
    460
    461	ret = __tb_xdomain_response(ctl, res, total_size,
    462				    TB_CFG_PKG_XDOMAIN_RESP);
    463
    464	kfree(res);
    465	return ret;
    466}
    467
    468static int tb_xdp_properties_changed_request(struct tb_ctl *ctl, u64 route,
    469					     int retry, const uuid_t *uuid)
    470{
    471	struct tb_xdp_properties_changed_response res;
    472	struct tb_xdp_properties_changed req;
    473	int ret;
    474
    475	memset(&req, 0, sizeof(req));
    476	tb_xdp_fill_header(&req.hdr, route, retry % 4,
    477			   PROPERTIES_CHANGED_REQUEST, sizeof(req));
    478	uuid_copy(&req.src_uuid, uuid);
    479
    480	memset(&res, 0, sizeof(res));
    481	ret = __tb_xdomain_request(ctl, &req, sizeof(req),
    482				   TB_CFG_PKG_XDOMAIN_REQ, &res, sizeof(res),
    483				   TB_CFG_PKG_XDOMAIN_RESP,
    484				   XDOMAIN_DEFAULT_TIMEOUT);
    485	if (ret)
    486		return ret;
    487
    488	return tb_xdp_handle_error(&res.err);
    489}
    490
    491static int
    492tb_xdp_properties_changed_response(struct tb_ctl *ctl, u64 route, u8 sequence)
    493{
    494	struct tb_xdp_properties_changed_response res;
    495
    496	memset(&res, 0, sizeof(res));
    497	tb_xdp_fill_header(&res.hdr, route, sequence,
    498			   PROPERTIES_CHANGED_RESPONSE, sizeof(res));
    499	return __tb_xdomain_response(ctl, &res, sizeof(res),
    500				     TB_CFG_PKG_XDOMAIN_RESP);
    501}
    502
    503static int tb_xdp_link_state_status_request(struct tb_ctl *ctl, u64 route,
    504					    u8 sequence, u8 *slw, u8 *tlw,
    505					    u8 *sls, u8 *tls)
    506{
    507	struct tb_xdp_link_state_status_response res;
    508	struct tb_xdp_link_state_status req;
    509	int ret;
    510
    511	memset(&req, 0, sizeof(req));
    512	tb_xdp_fill_header(&req.hdr, route, sequence, LINK_STATE_STATUS_REQUEST,
    513			   sizeof(req));
    514
    515	memset(&res, 0, sizeof(res));
    516	ret = __tb_xdomain_request(ctl, &req, sizeof(req), TB_CFG_PKG_XDOMAIN_REQ,
    517				   &res, sizeof(res), TB_CFG_PKG_XDOMAIN_RESP,
    518				   XDOMAIN_DEFAULT_TIMEOUT);
    519	if (ret)
    520		return ret;
    521
    522	ret = tb_xdp_handle_error(&res.err);
    523	if (ret)
    524		return ret;
    525
    526	if (res.status != 0)
    527		return -EREMOTEIO;
    528
    529	*slw = res.slw;
    530	*tlw = res.tlw;
    531	*sls = res.sls;
    532	*tls = res.tls;
    533
    534	return 0;
    535}
    536
    537static int tb_xdp_link_state_status_response(struct tb *tb, struct tb_ctl *ctl,
    538					     struct tb_xdomain *xd, u8 sequence)
    539{
    540	struct tb_switch *sw = tb_to_switch(xd->dev.parent);
    541	struct tb_xdp_link_state_status_response res;
    542	struct tb_port *port = tb_port_at(xd->route, sw);
    543	u32 val[2];
    544	int ret;
    545
    546	memset(&res, 0, sizeof(res));
    547	tb_xdp_fill_header(&res.hdr, xd->route, sequence,
    548			   LINK_STATE_STATUS_RESPONSE, sizeof(res));
    549
    550	ret = tb_port_read(port, val, TB_CFG_PORT,
    551			   port->cap_phy + LANE_ADP_CS_0, ARRAY_SIZE(val));
    552	if (ret)
    553		return ret;
    554
    555	res.slw = (val[0] & LANE_ADP_CS_0_SUPPORTED_WIDTH_MASK) >>
    556			LANE_ADP_CS_0_SUPPORTED_WIDTH_SHIFT;
    557	res.sls = (val[0] & LANE_ADP_CS_0_SUPPORTED_SPEED_MASK) >>
    558			LANE_ADP_CS_0_SUPPORTED_SPEED_SHIFT;
    559	res.tls = val[1] & LANE_ADP_CS_1_TARGET_SPEED_MASK;
    560	res.tlw = (val[1] & LANE_ADP_CS_1_TARGET_WIDTH_MASK) >>
    561			LANE_ADP_CS_1_TARGET_WIDTH_SHIFT;
    562
    563	return __tb_xdomain_response(ctl, &res, sizeof(res),
    564				     TB_CFG_PKG_XDOMAIN_RESP);
    565}
    566
    567static int tb_xdp_link_state_change_request(struct tb_ctl *ctl, u64 route,
    568					    u8 sequence, u8 tlw, u8 tls)
    569{
    570	struct tb_xdp_link_state_change_response res;
    571	struct tb_xdp_link_state_change req;
    572	int ret;
    573
    574	memset(&req, 0, sizeof(req));
    575	tb_xdp_fill_header(&req.hdr, route, sequence, LINK_STATE_CHANGE_REQUEST,
    576			   sizeof(req));
    577	req.tlw = tlw;
    578	req.tls = tls;
    579
    580	memset(&res, 0, sizeof(res));
    581	ret = __tb_xdomain_request(ctl, &req, sizeof(req), TB_CFG_PKG_XDOMAIN_REQ,
    582				   &res, sizeof(res), TB_CFG_PKG_XDOMAIN_RESP,
    583				   XDOMAIN_DEFAULT_TIMEOUT);
    584	if (ret)
    585		return ret;
    586
    587	ret = tb_xdp_handle_error(&res.err);
    588	if (ret)
    589		return ret;
    590
    591	return res.status != 0 ? -EREMOTEIO : 0;
    592}
    593
    594static int tb_xdp_link_state_change_response(struct tb_ctl *ctl, u64 route,
    595					     u8 sequence, u32 status)
    596{
    597	struct tb_xdp_link_state_change_response res;
    598
    599	memset(&res, 0, sizeof(res));
    600	tb_xdp_fill_header(&res.hdr, route, sequence, LINK_STATE_CHANGE_RESPONSE,
    601			   sizeof(res));
    602
    603	res.status = status;
    604
    605	return __tb_xdomain_response(ctl, &res, sizeof(res),
    606				     TB_CFG_PKG_XDOMAIN_RESP);
    607}
    608
    609/**
    610 * tb_register_protocol_handler() - Register protocol handler
    611 * @handler: Handler to register
    612 *
    613 * This allows XDomain service drivers to hook into incoming XDomain
    614 * messages. After this function is called the service driver needs to
    615 * be able to handle calls to callback whenever a package with the
    616 * registered protocol is received.
    617 */
    618int tb_register_protocol_handler(struct tb_protocol_handler *handler)
    619{
    620	if (!handler->uuid || !handler->callback)
    621		return -EINVAL;
    622	if (uuid_equal(handler->uuid, &tb_xdp_uuid))
    623		return -EINVAL;
    624
    625	mutex_lock(&xdomain_lock);
    626	list_add_tail(&handler->list, &protocol_handlers);
    627	mutex_unlock(&xdomain_lock);
    628
    629	return 0;
    630}
    631EXPORT_SYMBOL_GPL(tb_register_protocol_handler);
    632
    633/**
    634 * tb_unregister_protocol_handler() - Unregister protocol handler
    635 * @handler: Handler to unregister
    636 *
    637 * Removes the previously registered protocol handler.
    638 */
    639void tb_unregister_protocol_handler(struct tb_protocol_handler *handler)
    640{
    641	mutex_lock(&xdomain_lock);
    642	list_del_init(&handler->list);
    643	mutex_unlock(&xdomain_lock);
    644}
    645EXPORT_SYMBOL_GPL(tb_unregister_protocol_handler);
    646
    647static void update_property_block(struct tb_xdomain *xd)
    648{
    649	mutex_lock(&xdomain_lock);
    650	mutex_lock(&xd->lock);
    651	/*
    652	 * If the local property block is not up-to-date, rebuild it now
    653	 * based on the global property template.
    654	 */
    655	if (!xd->local_property_block ||
    656	    xd->local_property_block_gen < xdomain_property_block_gen) {
    657		struct tb_property_dir *dir;
    658		int ret, block_len;
    659		u32 *block;
    660
    661		dir = tb_property_copy_dir(xdomain_property_dir);
    662		if (!dir) {
    663			dev_warn(&xd->dev, "failed to copy properties\n");
    664			goto out_unlock;
    665		}
    666
    667		/* Fill in non-static properties now */
    668		tb_property_add_text(dir, "deviceid", utsname()->nodename);
    669		tb_property_add_immediate(dir, "maxhopid", xd->local_max_hopid);
    670
    671		ret = tb_property_format_dir(dir, NULL, 0);
    672		if (ret < 0) {
    673			dev_warn(&xd->dev, "local property block creation failed\n");
    674			tb_property_free_dir(dir);
    675			goto out_unlock;
    676		}
    677
    678		block_len = ret;
    679		block = kcalloc(block_len, sizeof(*block), GFP_KERNEL);
    680		if (!block) {
    681			tb_property_free_dir(dir);
    682			goto out_unlock;
    683		}
    684
    685		ret = tb_property_format_dir(dir, block, block_len);
    686		if (ret) {
    687			dev_warn(&xd->dev, "property block generation failed\n");
    688			tb_property_free_dir(dir);
    689			kfree(block);
    690			goto out_unlock;
    691		}
    692
    693		tb_property_free_dir(dir);
    694		/* Release the previous block */
    695		kfree(xd->local_property_block);
    696		/* Assign new one */
    697		xd->local_property_block = block;
    698		xd->local_property_block_len = block_len;
    699		xd->local_property_block_gen = xdomain_property_block_gen;
    700	}
    701
    702out_unlock:
    703	mutex_unlock(&xd->lock);
    704	mutex_unlock(&xdomain_lock);
    705}
    706
    707static void tb_xdp_handle_request(struct work_struct *work)
    708{
    709	struct xdomain_request_work *xw = container_of(work, typeof(*xw), work);
    710	const struct tb_xdp_header *pkg = xw->pkg;
    711	const struct tb_xdomain_header *xhdr = &pkg->xd_hdr;
    712	struct tb *tb = xw->tb;
    713	struct tb_ctl *ctl = tb->ctl;
    714	struct tb_xdomain *xd;
    715	const uuid_t *uuid;
    716	int ret = 0;
    717	u32 sequence;
    718	u64 route;
    719
    720	route = ((u64)xhdr->route_hi << 32 | xhdr->route_lo) & ~BIT_ULL(63);
    721	sequence = xhdr->length_sn & TB_XDOMAIN_SN_MASK;
    722	sequence >>= TB_XDOMAIN_SN_SHIFT;
    723
    724	mutex_lock(&tb->lock);
    725	if (tb->root_switch)
    726		uuid = tb->root_switch->uuid;
    727	else
    728		uuid = NULL;
    729	mutex_unlock(&tb->lock);
    730
    731	if (!uuid) {
    732		tb_xdp_error_response(ctl, route, sequence, ERROR_NOT_READY);
    733		goto out;
    734	}
    735
    736	xd = tb_xdomain_find_by_route_locked(tb, route);
    737	if (xd)
    738		update_property_block(xd);
    739
    740	switch (pkg->type) {
    741	case PROPERTIES_REQUEST:
    742		tb_dbg(tb, "%llx: received XDomain properties request\n", route);
    743		if (xd) {
    744			ret = tb_xdp_properties_response(tb, ctl, xd, sequence,
    745				(const struct tb_xdp_properties *)pkg);
    746		}
    747		break;
    748
    749	case PROPERTIES_CHANGED_REQUEST:
    750		tb_dbg(tb, "%llx: received XDomain properties changed request\n",
    751		       route);
    752
    753		ret = tb_xdp_properties_changed_response(ctl, route, sequence);
    754
    755		/*
    756		 * Since the properties have been changed, let's update
    757		 * the xdomain related to this connection as well in
    758		 * case there is a change in services it offers.
    759		 */
    760		if (xd && device_is_registered(&xd->dev))
    761			queue_delayed_work(tb->wq, &xd->state_work,
    762					   msecs_to_jiffies(XDOMAIN_SHORT_TIMEOUT));
    763		break;
    764
    765	case UUID_REQUEST_OLD:
    766	case UUID_REQUEST:
    767		tb_dbg(tb, "%llx: received XDomain UUID request\n", route);
    768		ret = tb_xdp_uuid_response(ctl, route, sequence, uuid);
    769		break;
    770
    771	case LINK_STATE_STATUS_REQUEST:
    772		tb_dbg(tb, "%llx: received XDomain link state status request\n",
    773		       route);
    774
    775		if (xd) {
    776			ret = tb_xdp_link_state_status_response(tb, ctl, xd,
    777								sequence);
    778		} else {
    779			tb_xdp_error_response(ctl, route, sequence,
    780					      ERROR_NOT_READY);
    781		}
    782		break;
    783
    784	case LINK_STATE_CHANGE_REQUEST:
    785		tb_dbg(tb, "%llx: received XDomain link state change request\n",
    786		       route);
    787
    788		if (xd && xd->state == XDOMAIN_STATE_BONDING_UUID_HIGH) {
    789			const struct tb_xdp_link_state_change *lsc =
    790				(const struct tb_xdp_link_state_change *)pkg;
    791
    792			ret = tb_xdp_link_state_change_response(ctl, route,
    793								sequence, 0);
    794			xd->target_link_width = lsc->tlw;
    795			queue_delayed_work(tb->wq, &xd->state_work,
    796					   msecs_to_jiffies(XDOMAIN_SHORT_TIMEOUT));
    797		} else {
    798			tb_xdp_error_response(ctl, route, sequence,
    799					      ERROR_NOT_READY);
    800		}
    801		break;
    802
    803	default:
    804		tb_dbg(tb, "%llx: unknown XDomain request %#x\n", route, pkg->type);
    805		tb_xdp_error_response(ctl, route, sequence,
    806				      ERROR_NOT_SUPPORTED);
    807		break;
    808	}
    809
    810	tb_xdomain_put(xd);
    811
    812	if (ret) {
    813		tb_warn(tb, "failed to send XDomain response for %#x\n",
    814			pkg->type);
    815	}
    816
    817out:
    818	kfree(xw->pkg);
    819	kfree(xw);
    820
    821	tb_domain_put(tb);
    822}
    823
    824static bool
    825tb_xdp_schedule_request(struct tb *tb, const struct tb_xdp_header *hdr,
    826			size_t size)
    827{
    828	struct xdomain_request_work *xw;
    829
    830	xw = kmalloc(sizeof(*xw), GFP_KERNEL);
    831	if (!xw)
    832		return false;
    833
    834	INIT_WORK(&xw->work, tb_xdp_handle_request);
    835	xw->pkg = kmemdup(hdr, size, GFP_KERNEL);
    836	if (!xw->pkg) {
    837		kfree(xw);
    838		return false;
    839	}
    840	xw->tb = tb_domain_get(tb);
    841
    842	schedule_work(&xw->work);
    843	return true;
    844}
    845
    846/**
    847 * tb_register_service_driver() - Register XDomain service driver
    848 * @drv: Driver to register
    849 *
    850 * Registers new service driver from @drv to the bus.
    851 */
    852int tb_register_service_driver(struct tb_service_driver *drv)
    853{
    854	drv->driver.bus = &tb_bus_type;
    855	return driver_register(&drv->driver);
    856}
    857EXPORT_SYMBOL_GPL(tb_register_service_driver);
    858
    859/**
    860 * tb_unregister_service_driver() - Unregister XDomain service driver
    861 * @drv: Driver to unregister
    862 *
    863 * Unregisters XDomain service driver from the bus.
    864 */
    865void tb_unregister_service_driver(struct tb_service_driver *drv)
    866{
    867	driver_unregister(&drv->driver);
    868}
    869EXPORT_SYMBOL_GPL(tb_unregister_service_driver);
    870
    871static ssize_t key_show(struct device *dev, struct device_attribute *attr,
    872			char *buf)
    873{
    874	struct tb_service *svc = container_of(dev, struct tb_service, dev);
    875
    876	/*
    877	 * It should be null terminated but anything else is pretty much
    878	 * allowed.
    879	 */
    880	return sprintf(buf, "%*pE\n", (int)strlen(svc->key), svc->key);
    881}
    882static DEVICE_ATTR_RO(key);
    883
    884static int get_modalias(struct tb_service *svc, char *buf, size_t size)
    885{
    886	return snprintf(buf, size, "tbsvc:k%sp%08Xv%08Xr%08X", svc->key,
    887			svc->prtcid, svc->prtcvers, svc->prtcrevs);
    888}
    889
    890static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
    891			     char *buf)
    892{
    893	struct tb_service *svc = container_of(dev, struct tb_service, dev);
    894
    895	/* Full buffer size except new line and null termination */
    896	get_modalias(svc, buf, PAGE_SIZE - 2);
    897	return strlen(strcat(buf, "\n"));
    898}
    899static DEVICE_ATTR_RO(modalias);
    900
    901static ssize_t prtcid_show(struct device *dev, struct device_attribute *attr,
    902			   char *buf)
    903{
    904	struct tb_service *svc = container_of(dev, struct tb_service, dev);
    905
    906	return sprintf(buf, "%u\n", svc->prtcid);
    907}
    908static DEVICE_ATTR_RO(prtcid);
    909
    910static ssize_t prtcvers_show(struct device *dev, struct device_attribute *attr,
    911			     char *buf)
    912{
    913	struct tb_service *svc = container_of(dev, struct tb_service, dev);
    914
    915	return sprintf(buf, "%u\n", svc->prtcvers);
    916}
    917static DEVICE_ATTR_RO(prtcvers);
    918
    919static ssize_t prtcrevs_show(struct device *dev, struct device_attribute *attr,
    920			     char *buf)
    921{
    922	struct tb_service *svc = container_of(dev, struct tb_service, dev);
    923
    924	return sprintf(buf, "%u\n", svc->prtcrevs);
    925}
    926static DEVICE_ATTR_RO(prtcrevs);
    927
    928static ssize_t prtcstns_show(struct device *dev, struct device_attribute *attr,
    929			     char *buf)
    930{
    931	struct tb_service *svc = container_of(dev, struct tb_service, dev);
    932
    933	return sprintf(buf, "0x%08x\n", svc->prtcstns);
    934}
    935static DEVICE_ATTR_RO(prtcstns);
    936
    937static struct attribute *tb_service_attrs[] = {
    938	&dev_attr_key.attr,
    939	&dev_attr_modalias.attr,
    940	&dev_attr_prtcid.attr,
    941	&dev_attr_prtcvers.attr,
    942	&dev_attr_prtcrevs.attr,
    943	&dev_attr_prtcstns.attr,
    944	NULL,
    945};
    946
    947static const struct attribute_group tb_service_attr_group = {
    948	.attrs = tb_service_attrs,
    949};
    950
    951static const struct attribute_group *tb_service_attr_groups[] = {
    952	&tb_service_attr_group,
    953	NULL,
    954};
    955
    956static int tb_service_uevent(struct device *dev, struct kobj_uevent_env *env)
    957{
    958	struct tb_service *svc = container_of(dev, struct tb_service, dev);
    959	char modalias[64];
    960
    961	get_modalias(svc, modalias, sizeof(modalias));
    962	return add_uevent_var(env, "MODALIAS=%s", modalias);
    963}
    964
    965static void tb_service_release(struct device *dev)
    966{
    967	struct tb_service *svc = container_of(dev, struct tb_service, dev);
    968	struct tb_xdomain *xd = tb_service_parent(svc);
    969
    970	tb_service_debugfs_remove(svc);
    971	ida_simple_remove(&xd->service_ids, svc->id);
    972	kfree(svc->key);
    973	kfree(svc);
    974}
    975
    976struct device_type tb_service_type = {
    977	.name = "thunderbolt_service",
    978	.groups = tb_service_attr_groups,
    979	.uevent = tb_service_uevent,
    980	.release = tb_service_release,
    981};
    982EXPORT_SYMBOL_GPL(tb_service_type);
    983
    984static int remove_missing_service(struct device *dev, void *data)
    985{
    986	struct tb_xdomain *xd = data;
    987	struct tb_service *svc;
    988
    989	svc = tb_to_service(dev);
    990	if (!svc)
    991		return 0;
    992
    993	if (!tb_property_find(xd->remote_properties, svc->key,
    994			      TB_PROPERTY_TYPE_DIRECTORY))
    995		device_unregister(dev);
    996
    997	return 0;
    998}
    999
   1000static int find_service(struct device *dev, void *data)
   1001{
   1002	const struct tb_property *p = data;
   1003	struct tb_service *svc;
   1004
   1005	svc = tb_to_service(dev);
   1006	if (!svc)
   1007		return 0;
   1008
   1009	return !strcmp(svc->key, p->key);
   1010}
   1011
   1012static int populate_service(struct tb_service *svc,
   1013			    struct tb_property *property)
   1014{
   1015	struct tb_property_dir *dir = property->value.dir;
   1016	struct tb_property *p;
   1017
   1018	/* Fill in standard properties */
   1019	p = tb_property_find(dir, "prtcid", TB_PROPERTY_TYPE_VALUE);
   1020	if (p)
   1021		svc->prtcid = p->value.immediate;
   1022	p = tb_property_find(dir, "prtcvers", TB_PROPERTY_TYPE_VALUE);
   1023	if (p)
   1024		svc->prtcvers = p->value.immediate;
   1025	p = tb_property_find(dir, "prtcrevs", TB_PROPERTY_TYPE_VALUE);
   1026	if (p)
   1027		svc->prtcrevs = p->value.immediate;
   1028	p = tb_property_find(dir, "prtcstns", TB_PROPERTY_TYPE_VALUE);
   1029	if (p)
   1030		svc->prtcstns = p->value.immediate;
   1031
   1032	svc->key = kstrdup(property->key, GFP_KERNEL);
   1033	if (!svc->key)
   1034		return -ENOMEM;
   1035
   1036	return 0;
   1037}
   1038
   1039static void enumerate_services(struct tb_xdomain *xd)
   1040{
   1041	struct tb_service *svc;
   1042	struct tb_property *p;
   1043	struct device *dev;
   1044	int id;
   1045
   1046	/*
   1047	 * First remove all services that are not available anymore in
   1048	 * the updated property block.
   1049	 */
   1050	device_for_each_child_reverse(&xd->dev, xd, remove_missing_service);
   1051
   1052	/* Then re-enumerate properties creating new services as we go */
   1053	tb_property_for_each(xd->remote_properties, p) {
   1054		if (p->type != TB_PROPERTY_TYPE_DIRECTORY)
   1055			continue;
   1056
   1057		/* If the service exists already we are fine */
   1058		dev = device_find_child(&xd->dev, p, find_service);
   1059		if (dev) {
   1060			put_device(dev);
   1061			continue;
   1062		}
   1063
   1064		svc = kzalloc(sizeof(*svc), GFP_KERNEL);
   1065		if (!svc)
   1066			break;
   1067
   1068		if (populate_service(svc, p)) {
   1069			kfree(svc);
   1070			break;
   1071		}
   1072
   1073		id = ida_simple_get(&xd->service_ids, 0, 0, GFP_KERNEL);
   1074		if (id < 0) {
   1075			kfree(svc->key);
   1076			kfree(svc);
   1077			break;
   1078		}
   1079		svc->id = id;
   1080		svc->dev.bus = &tb_bus_type;
   1081		svc->dev.type = &tb_service_type;
   1082		svc->dev.parent = &xd->dev;
   1083		dev_set_name(&svc->dev, "%s.%d", dev_name(&xd->dev), svc->id);
   1084
   1085		tb_service_debugfs_init(svc);
   1086
   1087		if (device_register(&svc->dev)) {
   1088			put_device(&svc->dev);
   1089			break;
   1090		}
   1091	}
   1092}
   1093
   1094static int populate_properties(struct tb_xdomain *xd,
   1095			       struct tb_property_dir *dir)
   1096{
   1097	const struct tb_property *p;
   1098
   1099	/* Required properties */
   1100	p = tb_property_find(dir, "deviceid", TB_PROPERTY_TYPE_VALUE);
   1101	if (!p)
   1102		return -EINVAL;
   1103	xd->device = p->value.immediate;
   1104
   1105	p = tb_property_find(dir, "vendorid", TB_PROPERTY_TYPE_VALUE);
   1106	if (!p)
   1107		return -EINVAL;
   1108	xd->vendor = p->value.immediate;
   1109
   1110	p = tb_property_find(dir, "maxhopid", TB_PROPERTY_TYPE_VALUE);
   1111	/*
   1112	 * USB4 inter-domain spec suggests using 15 as HopID if the
   1113	 * other end does not announce it in a property. This is for
   1114	 * TBT3 compatibility.
   1115	 */
   1116	xd->remote_max_hopid = p ? p->value.immediate : XDOMAIN_DEFAULT_MAX_HOPID;
   1117
   1118	kfree(xd->device_name);
   1119	xd->device_name = NULL;
   1120	kfree(xd->vendor_name);
   1121	xd->vendor_name = NULL;
   1122
   1123	/* Optional properties */
   1124	p = tb_property_find(dir, "deviceid", TB_PROPERTY_TYPE_TEXT);
   1125	if (p)
   1126		xd->device_name = kstrdup(p->value.text, GFP_KERNEL);
   1127	p = tb_property_find(dir, "vendorid", TB_PROPERTY_TYPE_TEXT);
   1128	if (p)
   1129		xd->vendor_name = kstrdup(p->value.text, GFP_KERNEL);
   1130
   1131	return 0;
   1132}
   1133
   1134static inline struct tb_switch *tb_xdomain_parent(struct tb_xdomain *xd)
   1135{
   1136	return tb_to_switch(xd->dev.parent);
   1137}
   1138
   1139static int tb_xdomain_update_link_attributes(struct tb_xdomain *xd)
   1140{
   1141	bool change = false;
   1142	struct tb_port *port;
   1143	int ret;
   1144
   1145	port = tb_port_at(xd->route, tb_xdomain_parent(xd));
   1146
   1147	ret = tb_port_get_link_speed(port);
   1148	if (ret < 0)
   1149		return ret;
   1150
   1151	if (xd->link_speed != ret)
   1152		change = true;
   1153
   1154	xd->link_speed = ret;
   1155
   1156	ret = tb_port_get_link_width(port);
   1157	if (ret < 0)
   1158		return ret;
   1159
   1160	if (xd->link_width != ret)
   1161		change = true;
   1162
   1163	xd->link_width = ret;
   1164
   1165	if (change)
   1166		kobject_uevent(&xd->dev.kobj, KOBJ_CHANGE);
   1167
   1168	return 0;
   1169}
   1170
   1171static int tb_xdomain_get_uuid(struct tb_xdomain *xd)
   1172{
   1173	struct tb *tb = xd->tb;
   1174	uuid_t uuid;
   1175	u64 route;
   1176	int ret;
   1177
   1178	dev_dbg(&xd->dev, "requesting remote UUID\n");
   1179
   1180	ret = tb_xdp_uuid_request(tb->ctl, xd->route, xd->state_retries, &uuid,
   1181				  &route);
   1182	if (ret < 0) {
   1183		if (xd->state_retries-- > 0) {
   1184			dev_dbg(&xd->dev, "failed to request UUID, retrying\n");
   1185			return -EAGAIN;
   1186		} else {
   1187			dev_dbg(&xd->dev, "failed to read remote UUID\n");
   1188		}
   1189		return ret;
   1190	}
   1191
   1192	dev_dbg(&xd->dev, "got remote UUID %pUb\n", &uuid);
   1193
   1194	if (uuid_equal(&uuid, xd->local_uuid)) {
   1195		if (route == xd->route)
   1196			dev_dbg(&xd->dev, "loop back detected\n");
   1197		else
   1198			dev_dbg(&xd->dev, "intra-domain loop detected\n");
   1199
   1200		/* Don't bond lanes automatically for loops */
   1201		xd->bonding_possible = false;
   1202	}
   1203
   1204	/*
   1205	 * If the UUID is different, there is another domain connected
   1206	 * so mark this one unplugged and wait for the connection
   1207	 * manager to replace it.
   1208	 */
   1209	if (xd->remote_uuid && !uuid_equal(&uuid, xd->remote_uuid)) {
   1210		dev_dbg(&xd->dev, "remote UUID is different, unplugging\n");
   1211		xd->is_unplugged = true;
   1212		return -ENODEV;
   1213	}
   1214
   1215	/* First time fill in the missing UUID */
   1216	if (!xd->remote_uuid) {
   1217		xd->remote_uuid = kmemdup(&uuid, sizeof(uuid_t), GFP_KERNEL);
   1218		if (!xd->remote_uuid)
   1219			return -ENOMEM;
   1220	}
   1221
   1222	return 0;
   1223}
   1224
   1225static int tb_xdomain_get_link_status(struct tb_xdomain *xd)
   1226{
   1227	struct tb *tb = xd->tb;
   1228	u8 slw, tlw, sls, tls;
   1229	int ret;
   1230
   1231	dev_dbg(&xd->dev, "sending link state status request to %pUb\n",
   1232		xd->remote_uuid);
   1233
   1234	ret = tb_xdp_link_state_status_request(tb->ctl, xd->route,
   1235					       xd->state_retries, &slw, &tlw, &sls,
   1236					       &tls);
   1237	if (ret) {
   1238		if (ret != -EOPNOTSUPP && xd->state_retries-- > 0) {
   1239			dev_dbg(&xd->dev,
   1240				"failed to request remote link status, retrying\n");
   1241			return -EAGAIN;
   1242		}
   1243		dev_dbg(&xd->dev, "failed to receive remote link status\n");
   1244		return ret;
   1245	}
   1246
   1247	dev_dbg(&xd->dev, "remote link supports width %#x speed %#x\n", slw, sls);
   1248
   1249	if (slw < LANE_ADP_CS_0_SUPPORTED_WIDTH_DUAL) {
   1250		dev_dbg(&xd->dev, "remote adapter is single lane only\n");
   1251		return -EOPNOTSUPP;
   1252	}
   1253
   1254	return 0;
   1255}
   1256
   1257static int tb_xdomain_link_state_change(struct tb_xdomain *xd,
   1258					unsigned int width)
   1259{
   1260	struct tb_switch *sw = tb_to_switch(xd->dev.parent);
   1261	struct tb_port *port = tb_port_at(xd->route, sw);
   1262	struct tb *tb = xd->tb;
   1263	u8 tlw, tls;
   1264	u32 val;
   1265	int ret;
   1266
   1267	if (width == 2)
   1268		tlw = LANE_ADP_CS_1_TARGET_WIDTH_DUAL;
   1269	else if (width == 1)
   1270		tlw = LANE_ADP_CS_1_TARGET_WIDTH_SINGLE;
   1271	else
   1272		return -EINVAL;
   1273
   1274	/* Use the current target speed */
   1275	ret = tb_port_read(port, &val, TB_CFG_PORT, port->cap_phy + LANE_ADP_CS_1, 1);
   1276	if (ret)
   1277		return ret;
   1278	tls = val & LANE_ADP_CS_1_TARGET_SPEED_MASK;
   1279
   1280	dev_dbg(&xd->dev, "sending link state change request with width %#x speed %#x\n",
   1281		tlw, tls);
   1282
   1283	ret = tb_xdp_link_state_change_request(tb->ctl, xd->route,
   1284					       xd->state_retries, tlw, tls);
   1285	if (ret) {
   1286		if (ret != -EOPNOTSUPP && xd->state_retries-- > 0) {
   1287			dev_dbg(&xd->dev,
   1288				"failed to change remote link state, retrying\n");
   1289			return -EAGAIN;
   1290		}
   1291		dev_err(&xd->dev, "failed request link state change, aborting\n");
   1292		return ret;
   1293	}
   1294
   1295	dev_dbg(&xd->dev, "received link state change response\n");
   1296	return 0;
   1297}
   1298
   1299static int tb_xdomain_bond_lanes_uuid_high(struct tb_xdomain *xd)
   1300{
   1301	struct tb_port *port;
   1302	int ret, width;
   1303
   1304	if (xd->target_link_width == LANE_ADP_CS_1_TARGET_WIDTH_SINGLE) {
   1305		width = 1;
   1306	} else if (xd->target_link_width == LANE_ADP_CS_1_TARGET_WIDTH_DUAL) {
   1307		width = 2;
   1308	} else {
   1309		if (xd->state_retries-- > 0) {
   1310			dev_dbg(&xd->dev,
   1311				"link state change request not received yet, retrying\n");
   1312			return -EAGAIN;
   1313		}
   1314		dev_dbg(&xd->dev, "timeout waiting for link change request\n");
   1315		return -ETIMEDOUT;
   1316	}
   1317
   1318	port = tb_port_at(xd->route, tb_xdomain_parent(xd));
   1319
   1320	/*
   1321	 * We can't use tb_xdomain_lane_bonding_enable() here because it
   1322	 * is the other side that initiates lane bonding. So here we
   1323	 * just set the width to both lane adapters and wait for the
   1324	 * link to transition bonded.
   1325	 */
   1326	ret = tb_port_set_link_width(port->dual_link_port, width);
   1327	if (ret) {
   1328		tb_port_warn(port->dual_link_port,
   1329			     "failed to set link width to %d\n", width);
   1330		return ret;
   1331	}
   1332
   1333	ret = tb_port_set_link_width(port, width);
   1334	if (ret) {
   1335		tb_port_warn(port, "failed to set link width to %d\n", width);
   1336		return ret;
   1337	}
   1338
   1339	ret = tb_port_wait_for_link_width(port, width, XDOMAIN_BONDING_TIMEOUT);
   1340	if (ret) {
   1341		dev_warn(&xd->dev, "error waiting for link width to become %d\n",
   1342			 width);
   1343		return ret;
   1344	}
   1345
   1346	port->bonded = width == 2;
   1347	port->dual_link_port->bonded = width == 2;
   1348
   1349	tb_port_update_credits(port);
   1350	tb_xdomain_update_link_attributes(xd);
   1351
   1352	dev_dbg(&xd->dev, "lane bonding %sabled\n", width == 2 ? "en" : "dis");
   1353	return 0;
   1354}
   1355
   1356static int tb_xdomain_get_properties(struct tb_xdomain *xd)
   1357{
   1358	struct tb_property_dir *dir;
   1359	struct tb *tb = xd->tb;
   1360	bool update = false;
   1361	u32 *block = NULL;
   1362	u32 gen = 0;
   1363	int ret;
   1364
   1365	dev_dbg(&xd->dev, "requesting remote properties\n");
   1366
   1367	ret = tb_xdp_properties_request(tb->ctl, xd->route, xd->local_uuid,
   1368					xd->remote_uuid, xd->state_retries,
   1369					&block, &gen);
   1370	if (ret < 0) {
   1371		if (xd->state_retries-- > 0) {
   1372			dev_dbg(&xd->dev,
   1373				"failed to request remote properties, retrying\n");
   1374			return -EAGAIN;
   1375		} else {
   1376			/* Give up now */
   1377			dev_err(&xd->dev,
   1378				"failed read XDomain properties from %pUb\n",
   1379				xd->remote_uuid);
   1380		}
   1381
   1382		return ret;
   1383	}
   1384
   1385	mutex_lock(&xd->lock);
   1386
   1387	/* Only accept newer generation properties */
   1388	if (xd->remote_properties && gen <= xd->remote_property_block_gen) {
   1389		ret = 0;
   1390		goto err_free_block;
   1391	}
   1392
   1393	dir = tb_property_parse_dir(block, ret);
   1394	if (!dir) {
   1395		dev_err(&xd->dev, "failed to parse XDomain properties\n");
   1396		ret = -ENOMEM;
   1397		goto err_free_block;
   1398	}
   1399
   1400	ret = populate_properties(xd, dir);
   1401	if (ret) {
   1402		dev_err(&xd->dev, "missing XDomain properties in response\n");
   1403		goto err_free_dir;
   1404	}
   1405
   1406	/* Release the existing one */
   1407	if (xd->remote_properties) {
   1408		tb_property_free_dir(xd->remote_properties);
   1409		update = true;
   1410	}
   1411
   1412	xd->remote_properties = dir;
   1413	xd->remote_property_block_gen = gen;
   1414
   1415	tb_xdomain_update_link_attributes(xd);
   1416
   1417	mutex_unlock(&xd->lock);
   1418
   1419	kfree(block);
   1420
   1421	/*
   1422	 * Now the device should be ready enough so we can add it to the
   1423	 * bus and let userspace know about it. If the device is already
   1424	 * registered, we notify the userspace that it has changed.
   1425	 */
   1426	if (!update) {
   1427		struct tb_port *port;
   1428
   1429		/* Now disable lane 1 if bonding was not enabled */
   1430		port = tb_port_at(xd->route, tb_xdomain_parent(xd));
   1431		if (!port->bonded)
   1432			tb_port_disable(port->dual_link_port);
   1433
   1434		if (device_add(&xd->dev)) {
   1435			dev_err(&xd->dev, "failed to add XDomain device\n");
   1436			return -ENODEV;
   1437		}
   1438		dev_info(&xd->dev, "new host found, vendor=%#x device=%#x\n",
   1439			 xd->vendor, xd->device);
   1440		if (xd->vendor_name && xd->device_name)
   1441			dev_info(&xd->dev, "%s %s\n", xd->vendor_name,
   1442				 xd->device_name);
   1443	} else {
   1444		kobject_uevent(&xd->dev.kobj, KOBJ_CHANGE);
   1445	}
   1446
   1447	enumerate_services(xd);
   1448	return 0;
   1449
   1450err_free_dir:
   1451	tb_property_free_dir(dir);
   1452err_free_block:
   1453	kfree(block);
   1454	mutex_unlock(&xd->lock);
   1455
   1456	return ret;
   1457}
   1458
   1459static void tb_xdomain_queue_uuid(struct tb_xdomain *xd)
   1460{
   1461	xd->state = XDOMAIN_STATE_UUID;
   1462	xd->state_retries = XDOMAIN_RETRIES;
   1463	queue_delayed_work(xd->tb->wq, &xd->state_work,
   1464			   msecs_to_jiffies(XDOMAIN_SHORT_TIMEOUT));
   1465}
   1466
   1467static void tb_xdomain_queue_link_status(struct tb_xdomain *xd)
   1468{
   1469	xd->state = XDOMAIN_STATE_LINK_STATUS;
   1470	xd->state_retries = XDOMAIN_RETRIES;
   1471	queue_delayed_work(xd->tb->wq, &xd->state_work,
   1472			   msecs_to_jiffies(XDOMAIN_DEFAULT_TIMEOUT));
   1473}
   1474
   1475static void tb_xdomain_queue_link_status2(struct tb_xdomain *xd)
   1476{
   1477	xd->state = XDOMAIN_STATE_LINK_STATUS2;
   1478	xd->state_retries = XDOMAIN_RETRIES;
   1479	queue_delayed_work(xd->tb->wq, &xd->state_work,
   1480			   msecs_to_jiffies(XDOMAIN_DEFAULT_TIMEOUT));
   1481}
   1482
   1483static void tb_xdomain_queue_bonding(struct tb_xdomain *xd)
   1484{
   1485	if (memcmp(xd->local_uuid, xd->remote_uuid, UUID_SIZE) > 0) {
   1486		dev_dbg(&xd->dev, "we have higher UUID, other side bonds the lanes\n");
   1487		xd->state = XDOMAIN_STATE_BONDING_UUID_HIGH;
   1488	} else {
   1489		dev_dbg(&xd->dev, "we have lower UUID, bonding lanes\n");
   1490		xd->state = XDOMAIN_STATE_LINK_STATE_CHANGE;
   1491	}
   1492
   1493	xd->state_retries = XDOMAIN_RETRIES;
   1494	queue_delayed_work(xd->tb->wq, &xd->state_work,
   1495			   msecs_to_jiffies(XDOMAIN_DEFAULT_TIMEOUT));
   1496}
   1497
   1498static void tb_xdomain_queue_bonding_uuid_low(struct tb_xdomain *xd)
   1499{
   1500	xd->state = XDOMAIN_STATE_BONDING_UUID_LOW;
   1501	xd->state_retries = XDOMAIN_RETRIES;
   1502	queue_delayed_work(xd->tb->wq, &xd->state_work,
   1503			   msecs_to_jiffies(XDOMAIN_DEFAULT_TIMEOUT));
   1504}
   1505
   1506static void tb_xdomain_queue_properties(struct tb_xdomain *xd)
   1507{
   1508	xd->state = XDOMAIN_STATE_PROPERTIES;
   1509	xd->state_retries = XDOMAIN_RETRIES;
   1510	queue_delayed_work(xd->tb->wq, &xd->state_work,
   1511			   msecs_to_jiffies(XDOMAIN_DEFAULT_TIMEOUT));
   1512}
   1513
   1514static void tb_xdomain_queue_properties_changed(struct tb_xdomain *xd)
   1515{
   1516	xd->properties_changed_retries = XDOMAIN_RETRIES;
   1517	queue_delayed_work(xd->tb->wq, &xd->properties_changed_work,
   1518			   msecs_to_jiffies(XDOMAIN_SHORT_TIMEOUT));
   1519}
   1520
   1521static void tb_xdomain_state_work(struct work_struct *work)
   1522{
   1523	struct tb_xdomain *xd = container_of(work, typeof(*xd), state_work.work);
   1524	int ret, state = xd->state;
   1525
   1526	if (WARN_ON_ONCE(state < XDOMAIN_STATE_INIT ||
   1527			 state > XDOMAIN_STATE_ERROR))
   1528		return;
   1529
   1530	dev_dbg(&xd->dev, "running state %s\n", state_names[state]);
   1531
   1532	switch (state) {
   1533	case XDOMAIN_STATE_INIT:
   1534		if (xd->needs_uuid) {
   1535			tb_xdomain_queue_uuid(xd);
   1536		} else {
   1537			tb_xdomain_queue_properties_changed(xd);
   1538			tb_xdomain_queue_properties(xd);
   1539		}
   1540		break;
   1541
   1542	case XDOMAIN_STATE_UUID:
   1543		ret = tb_xdomain_get_uuid(xd);
   1544		if (ret) {
   1545			if (ret == -EAGAIN)
   1546				goto retry_state;
   1547			xd->state = XDOMAIN_STATE_ERROR;
   1548		} else {
   1549			tb_xdomain_queue_properties_changed(xd);
   1550			if (xd->bonding_possible)
   1551				tb_xdomain_queue_link_status(xd);
   1552			else
   1553				tb_xdomain_queue_properties(xd);
   1554		}
   1555		break;
   1556
   1557	case XDOMAIN_STATE_LINK_STATUS:
   1558		ret = tb_xdomain_get_link_status(xd);
   1559		if (ret) {
   1560			if (ret == -EAGAIN)
   1561				goto retry_state;
   1562
   1563			/*
   1564			 * If any of the lane bonding states fail we skip
   1565			 * bonding completely and try to continue from
   1566			 * reading properties.
   1567			 */
   1568			tb_xdomain_queue_properties(xd);
   1569		} else {
   1570			tb_xdomain_queue_bonding(xd);
   1571		}
   1572		break;
   1573
   1574	case XDOMAIN_STATE_LINK_STATE_CHANGE:
   1575		ret = tb_xdomain_link_state_change(xd, 2);
   1576		if (ret) {
   1577			if (ret == -EAGAIN)
   1578				goto retry_state;
   1579			tb_xdomain_queue_properties(xd);
   1580		} else {
   1581			tb_xdomain_queue_link_status2(xd);
   1582		}
   1583		break;
   1584
   1585	case XDOMAIN_STATE_LINK_STATUS2:
   1586		ret = tb_xdomain_get_link_status(xd);
   1587		if (ret) {
   1588			if (ret == -EAGAIN)
   1589				goto retry_state;
   1590			tb_xdomain_queue_properties(xd);
   1591		} else {
   1592			tb_xdomain_queue_bonding_uuid_low(xd);
   1593		}
   1594		break;
   1595
   1596	case XDOMAIN_STATE_BONDING_UUID_LOW:
   1597		tb_xdomain_lane_bonding_enable(xd);
   1598		tb_xdomain_queue_properties(xd);
   1599		break;
   1600
   1601	case XDOMAIN_STATE_BONDING_UUID_HIGH:
   1602		if (tb_xdomain_bond_lanes_uuid_high(xd) == -EAGAIN)
   1603			goto retry_state;
   1604		tb_xdomain_queue_properties(xd);
   1605		break;
   1606
   1607	case XDOMAIN_STATE_PROPERTIES:
   1608		ret = tb_xdomain_get_properties(xd);
   1609		if (ret) {
   1610			if (ret == -EAGAIN)
   1611				goto retry_state;
   1612			xd->state = XDOMAIN_STATE_ERROR;
   1613		} else {
   1614			xd->state = XDOMAIN_STATE_ENUMERATED;
   1615		}
   1616		break;
   1617
   1618	case XDOMAIN_STATE_ENUMERATED:
   1619		tb_xdomain_queue_properties(xd);
   1620		break;
   1621
   1622	case XDOMAIN_STATE_ERROR:
   1623		break;
   1624
   1625	default:
   1626		dev_warn(&xd->dev, "unexpected state %d\n", state);
   1627		break;
   1628	}
   1629
   1630	return;
   1631
   1632retry_state:
   1633	queue_delayed_work(xd->tb->wq, &xd->state_work,
   1634			   msecs_to_jiffies(XDOMAIN_DEFAULT_TIMEOUT));
   1635}
   1636
   1637static void tb_xdomain_properties_changed(struct work_struct *work)
   1638{
   1639	struct tb_xdomain *xd = container_of(work, typeof(*xd),
   1640					     properties_changed_work.work);
   1641	int ret;
   1642
   1643	dev_dbg(&xd->dev, "sending properties changed notification\n");
   1644
   1645	ret = tb_xdp_properties_changed_request(xd->tb->ctl, xd->route,
   1646				xd->properties_changed_retries, xd->local_uuid);
   1647	if (ret) {
   1648		if (xd->properties_changed_retries-- > 0) {
   1649			dev_dbg(&xd->dev,
   1650				"failed to send properties changed notification, retrying\n");
   1651			queue_delayed_work(xd->tb->wq,
   1652					   &xd->properties_changed_work,
   1653					   msecs_to_jiffies(XDOMAIN_DEFAULT_TIMEOUT));
   1654		}
   1655		dev_err(&xd->dev, "failed to send properties changed notification\n");
   1656		return;
   1657	}
   1658
   1659	xd->properties_changed_retries = XDOMAIN_RETRIES;
   1660}
   1661
   1662static ssize_t device_show(struct device *dev, struct device_attribute *attr,
   1663			   char *buf)
   1664{
   1665	struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
   1666
   1667	return sprintf(buf, "%#x\n", xd->device);
   1668}
   1669static DEVICE_ATTR_RO(device);
   1670
   1671static ssize_t
   1672device_name_show(struct device *dev, struct device_attribute *attr, char *buf)
   1673{
   1674	struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
   1675	int ret;
   1676
   1677	if (mutex_lock_interruptible(&xd->lock))
   1678		return -ERESTARTSYS;
   1679	ret = sprintf(buf, "%s\n", xd->device_name ? xd->device_name : "");
   1680	mutex_unlock(&xd->lock);
   1681
   1682	return ret;
   1683}
   1684static DEVICE_ATTR_RO(device_name);
   1685
   1686static ssize_t maxhopid_show(struct device *dev, struct device_attribute *attr,
   1687			     char *buf)
   1688{
   1689	struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
   1690
   1691	return sprintf(buf, "%d\n", xd->remote_max_hopid);
   1692}
   1693static DEVICE_ATTR_RO(maxhopid);
   1694
   1695static ssize_t vendor_show(struct device *dev, struct device_attribute *attr,
   1696			   char *buf)
   1697{
   1698	struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
   1699
   1700	return sprintf(buf, "%#x\n", xd->vendor);
   1701}
   1702static DEVICE_ATTR_RO(vendor);
   1703
   1704static ssize_t
   1705vendor_name_show(struct device *dev, struct device_attribute *attr, char *buf)
   1706{
   1707	struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
   1708	int ret;
   1709
   1710	if (mutex_lock_interruptible(&xd->lock))
   1711		return -ERESTARTSYS;
   1712	ret = sprintf(buf, "%s\n", xd->vendor_name ? xd->vendor_name : "");
   1713	mutex_unlock(&xd->lock);
   1714
   1715	return ret;
   1716}
   1717static DEVICE_ATTR_RO(vendor_name);
   1718
   1719static ssize_t unique_id_show(struct device *dev, struct device_attribute *attr,
   1720			      char *buf)
   1721{
   1722	struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
   1723
   1724	return sprintf(buf, "%pUb\n", xd->remote_uuid);
   1725}
   1726static DEVICE_ATTR_RO(unique_id);
   1727
   1728static ssize_t speed_show(struct device *dev, struct device_attribute *attr,
   1729			  char *buf)
   1730{
   1731	struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
   1732
   1733	return sprintf(buf, "%u.0 Gb/s\n", xd->link_speed);
   1734}
   1735
   1736static DEVICE_ATTR(rx_speed, 0444, speed_show, NULL);
   1737static DEVICE_ATTR(tx_speed, 0444, speed_show, NULL);
   1738
   1739static ssize_t lanes_show(struct device *dev, struct device_attribute *attr,
   1740			  char *buf)
   1741{
   1742	struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
   1743
   1744	return sprintf(buf, "%u\n", xd->link_width);
   1745}
   1746
   1747static DEVICE_ATTR(rx_lanes, 0444, lanes_show, NULL);
   1748static DEVICE_ATTR(tx_lanes, 0444, lanes_show, NULL);
   1749
   1750static struct attribute *xdomain_attrs[] = {
   1751	&dev_attr_device.attr,
   1752	&dev_attr_device_name.attr,
   1753	&dev_attr_maxhopid.attr,
   1754	&dev_attr_rx_lanes.attr,
   1755	&dev_attr_rx_speed.attr,
   1756	&dev_attr_tx_lanes.attr,
   1757	&dev_attr_tx_speed.attr,
   1758	&dev_attr_unique_id.attr,
   1759	&dev_attr_vendor.attr,
   1760	&dev_attr_vendor_name.attr,
   1761	NULL,
   1762};
   1763
   1764static const struct attribute_group xdomain_attr_group = {
   1765	.attrs = xdomain_attrs,
   1766};
   1767
   1768static const struct attribute_group *xdomain_attr_groups[] = {
   1769	&xdomain_attr_group,
   1770	NULL,
   1771};
   1772
   1773static void tb_xdomain_release(struct device *dev)
   1774{
   1775	struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
   1776
   1777	put_device(xd->dev.parent);
   1778
   1779	kfree(xd->local_property_block);
   1780	tb_property_free_dir(xd->remote_properties);
   1781	ida_destroy(&xd->out_hopids);
   1782	ida_destroy(&xd->in_hopids);
   1783	ida_destroy(&xd->service_ids);
   1784
   1785	kfree(xd->local_uuid);
   1786	kfree(xd->remote_uuid);
   1787	kfree(xd->device_name);
   1788	kfree(xd->vendor_name);
   1789	kfree(xd);
   1790}
   1791
   1792static void start_handshake(struct tb_xdomain *xd)
   1793{
   1794	xd->state = XDOMAIN_STATE_INIT;
   1795	queue_delayed_work(xd->tb->wq, &xd->state_work,
   1796			   msecs_to_jiffies(XDOMAIN_SHORT_TIMEOUT));
   1797}
   1798
   1799static void stop_handshake(struct tb_xdomain *xd)
   1800{
   1801	cancel_delayed_work_sync(&xd->properties_changed_work);
   1802	cancel_delayed_work_sync(&xd->state_work);
   1803	xd->properties_changed_retries = 0;
   1804	xd->state_retries = 0;
   1805}
   1806
   1807static int __maybe_unused tb_xdomain_suspend(struct device *dev)
   1808{
   1809	stop_handshake(tb_to_xdomain(dev));
   1810	return 0;
   1811}
   1812
   1813static int __maybe_unused tb_xdomain_resume(struct device *dev)
   1814{
   1815	start_handshake(tb_to_xdomain(dev));
   1816	return 0;
   1817}
   1818
   1819static const struct dev_pm_ops tb_xdomain_pm_ops = {
   1820	SET_SYSTEM_SLEEP_PM_OPS(tb_xdomain_suspend, tb_xdomain_resume)
   1821};
   1822
   1823struct device_type tb_xdomain_type = {
   1824	.name = "thunderbolt_xdomain",
   1825	.release = tb_xdomain_release,
   1826	.pm = &tb_xdomain_pm_ops,
   1827};
   1828EXPORT_SYMBOL_GPL(tb_xdomain_type);
   1829
   1830/**
   1831 * tb_xdomain_alloc() - Allocate new XDomain object
   1832 * @tb: Domain where the XDomain belongs
   1833 * @parent: Parent device (the switch through the connection to the
   1834 *	    other domain is reached).
   1835 * @route: Route string used to reach the other domain
   1836 * @local_uuid: Our local domain UUID
   1837 * @remote_uuid: UUID of the other domain (optional)
   1838 *
   1839 * Allocates new XDomain structure and returns pointer to that. The
   1840 * object must be released by calling tb_xdomain_put().
   1841 */
   1842struct tb_xdomain *tb_xdomain_alloc(struct tb *tb, struct device *parent,
   1843				    u64 route, const uuid_t *local_uuid,
   1844				    const uuid_t *remote_uuid)
   1845{
   1846	struct tb_switch *parent_sw = tb_to_switch(parent);
   1847	struct tb_xdomain *xd;
   1848	struct tb_port *down;
   1849
   1850	/* Make sure the downstream domain is accessible */
   1851	down = tb_port_at(route, parent_sw);
   1852	tb_port_unlock(down);
   1853
   1854	xd = kzalloc(sizeof(*xd), GFP_KERNEL);
   1855	if (!xd)
   1856		return NULL;
   1857
   1858	xd->tb = tb;
   1859	xd->route = route;
   1860	xd->local_max_hopid = down->config.max_in_hop_id;
   1861	ida_init(&xd->service_ids);
   1862	ida_init(&xd->in_hopids);
   1863	ida_init(&xd->out_hopids);
   1864	mutex_init(&xd->lock);
   1865	INIT_DELAYED_WORK(&xd->state_work, tb_xdomain_state_work);
   1866	INIT_DELAYED_WORK(&xd->properties_changed_work,
   1867			  tb_xdomain_properties_changed);
   1868
   1869	xd->local_uuid = kmemdup(local_uuid, sizeof(uuid_t), GFP_KERNEL);
   1870	if (!xd->local_uuid)
   1871		goto err_free;
   1872
   1873	if (remote_uuid) {
   1874		xd->remote_uuid = kmemdup(remote_uuid, sizeof(uuid_t),
   1875					  GFP_KERNEL);
   1876		if (!xd->remote_uuid)
   1877			goto err_free_local_uuid;
   1878	} else {
   1879		xd->needs_uuid = true;
   1880		xd->bonding_possible = !!down->dual_link_port;
   1881	}
   1882
   1883	device_initialize(&xd->dev);
   1884	xd->dev.parent = get_device(parent);
   1885	xd->dev.bus = &tb_bus_type;
   1886	xd->dev.type = &tb_xdomain_type;
   1887	xd->dev.groups = xdomain_attr_groups;
   1888	dev_set_name(&xd->dev, "%u-%llx", tb->index, route);
   1889
   1890	dev_dbg(&xd->dev, "local UUID %pUb\n", local_uuid);
   1891	if (remote_uuid)
   1892		dev_dbg(&xd->dev, "remote UUID %pUb\n", remote_uuid);
   1893
   1894	/*
   1895	 * This keeps the DMA powered on as long as we have active
   1896	 * connection to another host.
   1897	 */
   1898	pm_runtime_set_active(&xd->dev);
   1899	pm_runtime_get_noresume(&xd->dev);
   1900	pm_runtime_enable(&xd->dev);
   1901
   1902	return xd;
   1903
   1904err_free_local_uuid:
   1905	kfree(xd->local_uuid);
   1906err_free:
   1907	kfree(xd);
   1908
   1909	return NULL;
   1910}
   1911
   1912/**
   1913 * tb_xdomain_add() - Add XDomain to the bus
   1914 * @xd: XDomain to add
   1915 *
   1916 * This function starts XDomain discovery protocol handshake and
   1917 * eventually adds the XDomain to the bus. After calling this function
   1918 * the caller needs to call tb_xdomain_remove() in order to remove and
   1919 * release the object regardless whether the handshake succeeded or not.
   1920 */
   1921void tb_xdomain_add(struct tb_xdomain *xd)
   1922{
   1923	/* Start exchanging properties with the other host */
   1924	start_handshake(xd);
   1925}
   1926
   1927static int unregister_service(struct device *dev, void *data)
   1928{
   1929	device_unregister(dev);
   1930	return 0;
   1931}
   1932
   1933/**
   1934 * tb_xdomain_remove() - Remove XDomain from the bus
   1935 * @xd: XDomain to remove
   1936 *
   1937 * This will stop all ongoing configuration work and remove the XDomain
   1938 * along with any services from the bus. When the last reference to @xd
   1939 * is released the object will be released as well.
   1940 */
   1941void tb_xdomain_remove(struct tb_xdomain *xd)
   1942{
   1943	stop_handshake(xd);
   1944
   1945	device_for_each_child_reverse(&xd->dev, xd, unregister_service);
   1946
   1947	/*
   1948	 * Undo runtime PM here explicitly because it is possible that
   1949	 * the XDomain was never added to the bus and thus device_del()
   1950	 * is not called for it (device_del() would handle this otherwise).
   1951	 */
   1952	pm_runtime_disable(&xd->dev);
   1953	pm_runtime_put_noidle(&xd->dev);
   1954	pm_runtime_set_suspended(&xd->dev);
   1955
   1956	if (!device_is_registered(&xd->dev)) {
   1957		put_device(&xd->dev);
   1958	} else {
   1959		dev_info(&xd->dev, "host disconnected\n");
   1960		device_unregister(&xd->dev);
   1961	}
   1962}
   1963
   1964/**
   1965 * tb_xdomain_lane_bonding_enable() - Enable lane bonding on XDomain
   1966 * @xd: XDomain connection
   1967 *
   1968 * Lane bonding is disabled by default for XDomains. This function tries
   1969 * to enable bonding by first enabling the port and waiting for the CL0
   1970 * state.
   1971 *
   1972 * Return: %0 in case of success and negative errno in case of error.
   1973 */
   1974int tb_xdomain_lane_bonding_enable(struct tb_xdomain *xd)
   1975{
   1976	struct tb_port *port;
   1977	int ret;
   1978
   1979	port = tb_port_at(xd->route, tb_xdomain_parent(xd));
   1980	if (!port->dual_link_port)
   1981		return -ENODEV;
   1982
   1983	ret = tb_port_enable(port->dual_link_port);
   1984	if (ret)
   1985		return ret;
   1986
   1987	ret = tb_wait_for_port(port->dual_link_port, true);
   1988	if (ret < 0)
   1989		return ret;
   1990	if (!ret)
   1991		return -ENOTCONN;
   1992
   1993	ret = tb_port_lane_bonding_enable(port);
   1994	if (ret) {
   1995		tb_port_warn(port, "failed to enable lane bonding\n");
   1996		return ret;
   1997	}
   1998
   1999	ret = tb_port_wait_for_link_width(port, 2, XDOMAIN_BONDING_TIMEOUT);
   2000	if (ret) {
   2001		tb_port_warn(port, "failed to enable lane bonding\n");
   2002		return ret;
   2003	}
   2004
   2005	tb_port_update_credits(port);
   2006	tb_xdomain_update_link_attributes(xd);
   2007
   2008	dev_dbg(&xd->dev, "lane bonding enabled\n");
   2009	return 0;
   2010}
   2011EXPORT_SYMBOL_GPL(tb_xdomain_lane_bonding_enable);
   2012
   2013/**
   2014 * tb_xdomain_lane_bonding_disable() - Disable lane bonding
   2015 * @xd: XDomain connection
   2016 *
   2017 * Lane bonding is disabled by default for XDomains. If bonding has been
   2018 * enabled, this function can be used to disable it.
   2019 */
   2020void tb_xdomain_lane_bonding_disable(struct tb_xdomain *xd)
   2021{
   2022	struct tb_port *port;
   2023
   2024	port = tb_port_at(xd->route, tb_xdomain_parent(xd));
   2025	if (port->dual_link_port) {
   2026		tb_port_lane_bonding_disable(port);
   2027		if (tb_port_wait_for_link_width(port, 1, 100) == -ETIMEDOUT)
   2028			tb_port_warn(port, "timeout disabling lane bonding\n");
   2029		tb_port_disable(port->dual_link_port);
   2030		tb_port_update_credits(port);
   2031		tb_xdomain_update_link_attributes(xd);
   2032
   2033		dev_dbg(&xd->dev, "lane bonding disabled\n");
   2034	}
   2035}
   2036EXPORT_SYMBOL_GPL(tb_xdomain_lane_bonding_disable);
   2037
   2038/**
   2039 * tb_xdomain_alloc_in_hopid() - Allocate input HopID for tunneling
   2040 * @xd: XDomain connection
   2041 * @hopid: Preferred HopID or %-1 for next available
   2042 *
   2043 * Returns allocated HopID or negative errno. Specifically returns
   2044 * %-ENOSPC if there are no more available HopIDs. Returned HopID is
   2045 * guaranteed to be within range supported by the input lane adapter.
   2046 * Call tb_xdomain_release_in_hopid() to release the allocated HopID.
   2047 */
   2048int tb_xdomain_alloc_in_hopid(struct tb_xdomain *xd, int hopid)
   2049{
   2050	if (hopid < 0)
   2051		hopid = TB_PATH_MIN_HOPID;
   2052	if (hopid < TB_PATH_MIN_HOPID || hopid > xd->local_max_hopid)
   2053		return -EINVAL;
   2054
   2055	return ida_alloc_range(&xd->in_hopids, hopid, xd->local_max_hopid,
   2056			       GFP_KERNEL);
   2057}
   2058EXPORT_SYMBOL_GPL(tb_xdomain_alloc_in_hopid);
   2059
   2060/**
   2061 * tb_xdomain_alloc_out_hopid() - Allocate output HopID for tunneling
   2062 * @xd: XDomain connection
   2063 * @hopid: Preferred HopID or %-1 for next available
   2064 *
   2065 * Returns allocated HopID or negative errno. Specifically returns
   2066 * %-ENOSPC if there are no more available HopIDs. Returned HopID is
   2067 * guaranteed to be within range supported by the output lane adapter.
   2068 * Call tb_xdomain_release_in_hopid() to release the allocated HopID.
   2069 */
   2070int tb_xdomain_alloc_out_hopid(struct tb_xdomain *xd, int hopid)
   2071{
   2072	if (hopid < 0)
   2073		hopid = TB_PATH_MIN_HOPID;
   2074	if (hopid < TB_PATH_MIN_HOPID || hopid > xd->remote_max_hopid)
   2075		return -EINVAL;
   2076
   2077	return ida_alloc_range(&xd->out_hopids, hopid, xd->remote_max_hopid,
   2078			       GFP_KERNEL);
   2079}
   2080EXPORT_SYMBOL_GPL(tb_xdomain_alloc_out_hopid);
   2081
   2082/**
   2083 * tb_xdomain_release_in_hopid() - Release input HopID
   2084 * @xd: XDomain connection
   2085 * @hopid: HopID to release
   2086 */
   2087void tb_xdomain_release_in_hopid(struct tb_xdomain *xd, int hopid)
   2088{
   2089	ida_free(&xd->in_hopids, hopid);
   2090}
   2091EXPORT_SYMBOL_GPL(tb_xdomain_release_in_hopid);
   2092
   2093/**
   2094 * tb_xdomain_release_out_hopid() - Release output HopID
   2095 * @xd: XDomain connection
   2096 * @hopid: HopID to release
   2097 */
   2098void tb_xdomain_release_out_hopid(struct tb_xdomain *xd, int hopid)
   2099{
   2100	ida_free(&xd->out_hopids, hopid);
   2101}
   2102EXPORT_SYMBOL_GPL(tb_xdomain_release_out_hopid);
   2103
   2104/**
   2105 * tb_xdomain_enable_paths() - Enable DMA paths for XDomain connection
   2106 * @xd: XDomain connection
   2107 * @transmit_path: HopID we are using to send out packets
   2108 * @transmit_ring: DMA ring used to send out packets
   2109 * @receive_path: HopID the other end is using to send packets to us
   2110 * @receive_ring: DMA ring used to receive packets from @receive_path
   2111 *
   2112 * The function enables DMA paths accordingly so that after successful
   2113 * return the caller can send and receive packets using high-speed DMA
   2114 * path. If a transmit or receive path is not needed, pass %-1 for those
   2115 * parameters.
   2116 *
   2117 * Return: %0 in case of success and negative errno in case of error
   2118 */
   2119int tb_xdomain_enable_paths(struct tb_xdomain *xd, int transmit_path,
   2120			    int transmit_ring, int receive_path,
   2121			    int receive_ring)
   2122{
   2123	return tb_domain_approve_xdomain_paths(xd->tb, xd, transmit_path,
   2124					       transmit_ring, receive_path,
   2125					       receive_ring);
   2126}
   2127EXPORT_SYMBOL_GPL(tb_xdomain_enable_paths);
   2128
   2129/**
   2130 * tb_xdomain_disable_paths() - Disable DMA paths for XDomain connection
   2131 * @xd: XDomain connection
   2132 * @transmit_path: HopID we are using to send out packets
   2133 * @transmit_ring: DMA ring used to send out packets
   2134 * @receive_path: HopID the other end is using to send packets to us
   2135 * @receive_ring: DMA ring used to receive packets from @receive_path
   2136 *
   2137 * This does the opposite of tb_xdomain_enable_paths(). After call to
   2138 * this the caller is not expected to use the rings anymore. Passing %-1
   2139 * as path/ring parameter means don't care. Normally the callers should
   2140 * pass the same values here as they do when paths are enabled.
   2141 *
   2142 * Return: %0 in case of success and negative errno in case of error
   2143 */
   2144int tb_xdomain_disable_paths(struct tb_xdomain *xd, int transmit_path,
   2145			     int transmit_ring, int receive_path,
   2146			     int receive_ring)
   2147{
   2148	return tb_domain_disconnect_xdomain_paths(xd->tb, xd, transmit_path,
   2149						  transmit_ring, receive_path,
   2150						  receive_ring);
   2151}
   2152EXPORT_SYMBOL_GPL(tb_xdomain_disable_paths);
   2153
   2154struct tb_xdomain_lookup {
   2155	const uuid_t *uuid;
   2156	u8 link;
   2157	u8 depth;
   2158	u64 route;
   2159};
   2160
   2161static struct tb_xdomain *switch_find_xdomain(struct tb_switch *sw,
   2162	const struct tb_xdomain_lookup *lookup)
   2163{
   2164	struct tb_port *port;
   2165
   2166	tb_switch_for_each_port(sw, port) {
   2167		struct tb_xdomain *xd;
   2168
   2169		if (port->xdomain) {
   2170			xd = port->xdomain;
   2171
   2172			if (lookup->uuid) {
   2173				if (xd->remote_uuid &&
   2174				    uuid_equal(xd->remote_uuid, lookup->uuid))
   2175					return xd;
   2176			} else if (lookup->link &&
   2177				   lookup->link == xd->link &&
   2178				   lookup->depth == xd->depth) {
   2179				return xd;
   2180			} else if (lookup->route &&
   2181				   lookup->route == xd->route) {
   2182				return xd;
   2183			}
   2184		} else if (tb_port_has_remote(port)) {
   2185			xd = switch_find_xdomain(port->remote->sw, lookup);
   2186			if (xd)
   2187				return xd;
   2188		}
   2189	}
   2190
   2191	return NULL;
   2192}
   2193
   2194/**
   2195 * tb_xdomain_find_by_uuid() - Find an XDomain by UUID
   2196 * @tb: Domain where the XDomain belongs to
   2197 * @uuid: UUID to look for
   2198 *
   2199 * Finds XDomain by walking through the Thunderbolt topology below @tb.
   2200 * The returned XDomain will have its reference count increased so the
   2201 * caller needs to call tb_xdomain_put() when it is done with the
   2202 * object.
   2203 *
   2204 * This will find all XDomains including the ones that are not yet added
   2205 * to the bus (handshake is still in progress).
   2206 *
   2207 * The caller needs to hold @tb->lock.
   2208 */
   2209struct tb_xdomain *tb_xdomain_find_by_uuid(struct tb *tb, const uuid_t *uuid)
   2210{
   2211	struct tb_xdomain_lookup lookup;
   2212	struct tb_xdomain *xd;
   2213
   2214	memset(&lookup, 0, sizeof(lookup));
   2215	lookup.uuid = uuid;
   2216
   2217	xd = switch_find_xdomain(tb->root_switch, &lookup);
   2218	return tb_xdomain_get(xd);
   2219}
   2220EXPORT_SYMBOL_GPL(tb_xdomain_find_by_uuid);
   2221
   2222/**
   2223 * tb_xdomain_find_by_link_depth() - Find an XDomain by link and depth
   2224 * @tb: Domain where the XDomain belongs to
   2225 * @link: Root switch link number
   2226 * @depth: Depth in the link
   2227 *
   2228 * Finds XDomain by walking through the Thunderbolt topology below @tb.
   2229 * The returned XDomain will have its reference count increased so the
   2230 * caller needs to call tb_xdomain_put() when it is done with the
   2231 * object.
   2232 *
   2233 * This will find all XDomains including the ones that are not yet added
   2234 * to the bus (handshake is still in progress).
   2235 *
   2236 * The caller needs to hold @tb->lock.
   2237 */
   2238struct tb_xdomain *tb_xdomain_find_by_link_depth(struct tb *tb, u8 link,
   2239						 u8 depth)
   2240{
   2241	struct tb_xdomain_lookup lookup;
   2242	struct tb_xdomain *xd;
   2243
   2244	memset(&lookup, 0, sizeof(lookup));
   2245	lookup.link = link;
   2246	lookup.depth = depth;
   2247
   2248	xd = switch_find_xdomain(tb->root_switch, &lookup);
   2249	return tb_xdomain_get(xd);
   2250}
   2251
   2252/**
   2253 * tb_xdomain_find_by_route() - Find an XDomain by route string
   2254 * @tb: Domain where the XDomain belongs to
   2255 * @route: XDomain route string
   2256 *
   2257 * Finds XDomain by walking through the Thunderbolt topology below @tb.
   2258 * The returned XDomain will have its reference count increased so the
   2259 * caller needs to call tb_xdomain_put() when it is done with the
   2260 * object.
   2261 *
   2262 * This will find all XDomains including the ones that are not yet added
   2263 * to the bus (handshake is still in progress).
   2264 *
   2265 * The caller needs to hold @tb->lock.
   2266 */
   2267struct tb_xdomain *tb_xdomain_find_by_route(struct tb *tb, u64 route)
   2268{
   2269	struct tb_xdomain_lookup lookup;
   2270	struct tb_xdomain *xd;
   2271
   2272	memset(&lookup, 0, sizeof(lookup));
   2273	lookup.route = route;
   2274
   2275	xd = switch_find_xdomain(tb->root_switch, &lookup);
   2276	return tb_xdomain_get(xd);
   2277}
   2278EXPORT_SYMBOL_GPL(tb_xdomain_find_by_route);
   2279
   2280bool tb_xdomain_handle_request(struct tb *tb, enum tb_cfg_pkg_type type,
   2281			       const void *buf, size_t size)
   2282{
   2283	const struct tb_protocol_handler *handler, *tmp;
   2284	const struct tb_xdp_header *hdr = buf;
   2285	unsigned int length;
   2286	int ret = 0;
   2287
   2288	/* We expect the packet is at least size of the header */
   2289	length = hdr->xd_hdr.length_sn & TB_XDOMAIN_LENGTH_MASK;
   2290	if (length != size / 4 - sizeof(hdr->xd_hdr) / 4)
   2291		return true;
   2292	if (length < sizeof(*hdr) / 4 - sizeof(hdr->xd_hdr) / 4)
   2293		return true;
   2294
   2295	/*
   2296	 * Handle XDomain discovery protocol packets directly here. For
   2297	 * other protocols (based on their UUID) we call registered
   2298	 * handlers in turn.
   2299	 */
   2300	if (uuid_equal(&hdr->uuid, &tb_xdp_uuid)) {
   2301		if (type == TB_CFG_PKG_XDOMAIN_REQ)
   2302			return tb_xdp_schedule_request(tb, hdr, size);
   2303		return false;
   2304	}
   2305
   2306	mutex_lock(&xdomain_lock);
   2307	list_for_each_entry_safe(handler, tmp, &protocol_handlers, list) {
   2308		if (!uuid_equal(&hdr->uuid, handler->uuid))
   2309			continue;
   2310
   2311		mutex_unlock(&xdomain_lock);
   2312		ret = handler->callback(buf, size, handler->data);
   2313		mutex_lock(&xdomain_lock);
   2314
   2315		if (ret)
   2316			break;
   2317	}
   2318	mutex_unlock(&xdomain_lock);
   2319
   2320	return ret > 0;
   2321}
   2322
   2323static int update_xdomain(struct device *dev, void *data)
   2324{
   2325	struct tb_xdomain *xd;
   2326
   2327	xd = tb_to_xdomain(dev);
   2328	if (xd) {
   2329		queue_delayed_work(xd->tb->wq, &xd->properties_changed_work,
   2330				   msecs_to_jiffies(50));
   2331	}
   2332
   2333	return 0;
   2334}
   2335
   2336static void update_all_xdomains(void)
   2337{
   2338	bus_for_each_dev(&tb_bus_type, NULL, NULL, update_xdomain);
   2339}
   2340
   2341static bool remove_directory(const char *key, const struct tb_property_dir *dir)
   2342{
   2343	struct tb_property *p;
   2344
   2345	p = tb_property_find(xdomain_property_dir, key,
   2346			     TB_PROPERTY_TYPE_DIRECTORY);
   2347	if (p && p->value.dir == dir) {
   2348		tb_property_remove(p);
   2349		return true;
   2350	}
   2351	return false;
   2352}
   2353
   2354/**
   2355 * tb_register_property_dir() - Register property directory to the host
   2356 * @key: Key (name) of the directory to add
   2357 * @dir: Directory to add
   2358 *
   2359 * Service drivers can use this function to add new property directory
   2360 * to the host available properties. The other connected hosts are
   2361 * notified so they can re-read properties of this host if they are
   2362 * interested.
   2363 *
   2364 * Return: %0 on success and negative errno on failure
   2365 */
   2366int tb_register_property_dir(const char *key, struct tb_property_dir *dir)
   2367{
   2368	int ret;
   2369
   2370	if (WARN_ON(!xdomain_property_dir))
   2371		return -EAGAIN;
   2372
   2373	if (!key || strlen(key) > 8)
   2374		return -EINVAL;
   2375
   2376	mutex_lock(&xdomain_lock);
   2377	if (tb_property_find(xdomain_property_dir, key,
   2378			     TB_PROPERTY_TYPE_DIRECTORY)) {
   2379		ret = -EEXIST;
   2380		goto err_unlock;
   2381	}
   2382
   2383	ret = tb_property_add_dir(xdomain_property_dir, key, dir);
   2384	if (ret)
   2385		goto err_unlock;
   2386
   2387	xdomain_property_block_gen++;
   2388
   2389	mutex_unlock(&xdomain_lock);
   2390	update_all_xdomains();
   2391	return 0;
   2392
   2393err_unlock:
   2394	mutex_unlock(&xdomain_lock);
   2395	return ret;
   2396}
   2397EXPORT_SYMBOL_GPL(tb_register_property_dir);
   2398
   2399/**
   2400 * tb_unregister_property_dir() - Removes property directory from host
   2401 * @key: Key (name) of the directory
   2402 * @dir: Directory to remove
   2403 *
   2404 * This will remove the existing directory from this host and notify the
   2405 * connected hosts about the change.
   2406 */
   2407void tb_unregister_property_dir(const char *key, struct tb_property_dir *dir)
   2408{
   2409	int ret = 0;
   2410
   2411	mutex_lock(&xdomain_lock);
   2412	if (remove_directory(key, dir))
   2413		xdomain_property_block_gen++;
   2414	mutex_unlock(&xdomain_lock);
   2415
   2416	if (!ret)
   2417		update_all_xdomains();
   2418}
   2419EXPORT_SYMBOL_GPL(tb_unregister_property_dir);
   2420
   2421int tb_xdomain_init(void)
   2422{
   2423	xdomain_property_dir = tb_property_create_dir(NULL);
   2424	if (!xdomain_property_dir)
   2425		return -ENOMEM;
   2426
   2427	/*
   2428	 * Initialize standard set of properties without any service
   2429	 * directories. Those will be added by service drivers
   2430	 * themselves when they are loaded.
   2431	 *
   2432	 * Rest of the properties are filled dynamically based on these
   2433	 * when the P2P connection is made.
   2434	 */
   2435	tb_property_add_immediate(xdomain_property_dir, "vendorid",
   2436				  PCI_VENDOR_ID_INTEL);
   2437	tb_property_add_text(xdomain_property_dir, "vendorid", "Intel Corp.");
   2438	tb_property_add_immediate(xdomain_property_dir, "deviceid", 0x1);
   2439	tb_property_add_immediate(xdomain_property_dir, "devicerv", 0x80000100);
   2440
   2441	xdomain_property_block_gen = prandom_u32();
   2442	return 0;
   2443}
   2444
   2445void tb_xdomain_exit(void)
   2446{
   2447	tb_property_free_dir(xdomain_property_dir);
   2448}