cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

drm_dp_mst_topology.c (167342B)


      1/*
      2 * Copyright © 2014 Red Hat
      3 *
      4 * Permission to use, copy, modify, distribute, and sell this software and its
      5 * documentation for any purpose is hereby granted without fee, provided that
      6 * the above copyright notice appear in all copies and that both that copyright
      7 * notice and this permission notice appear in supporting documentation, and
      8 * that the name of the copyright holders not be used in advertising or
      9 * publicity pertaining to distribution of the software without specific,
     10 * written prior permission.  The copyright holders make no representations
     11 * about the suitability of this software for any purpose.  It is provided "as
     12 * is" without express or implied warranty.
     13 *
     14 * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
     15 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
     16 * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
     17 * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
     18 * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
     19 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
     20 * OF THIS SOFTWARE.
     21 */
     22
     23#include <linux/bitfield.h>
     24#include <linux/delay.h>
     25#include <linux/errno.h>
     26#include <linux/i2c.h>
     27#include <linux/init.h>
     28#include <linux/kernel.h>
     29#include <linux/random.h>
     30#include <linux/sched.h>
     31#include <linux/seq_file.h>
     32#include <linux/iopoll.h>
     33
     34#if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
     35#include <linux/stacktrace.h>
     36#include <linux/sort.h>
     37#include <linux/timekeeping.h>
     38#include <linux/math64.h>
     39#endif
     40
     41#include <drm/display/drm_dp_mst_helper.h>
     42#include <drm/drm_atomic.h>
     43#include <drm/drm_atomic_helper.h>
     44#include <drm/drm_drv.h>
     45#include <drm/drm_print.h>
     46#include <drm/drm_probe_helper.h>
     47
     48#include "drm_dp_helper_internal.h"
     49#include "drm_dp_mst_topology_internal.h"
     50
     51/**
     52 * DOC: dp mst helper
     53 *
     54 * These functions contain parts of the DisplayPort 1.2a MultiStream Transport
     55 * protocol. The helpers contain a topology manager and bandwidth manager.
     56 * The helpers encapsulate the sending and received of sideband msgs.
     57 */
     58struct drm_dp_pending_up_req {
     59	struct drm_dp_sideband_msg_hdr hdr;
     60	struct drm_dp_sideband_msg_req_body msg;
     61	struct list_head next;
     62};
     63
     64static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr,
     65				  char *buf);
     66
     67static void drm_dp_mst_topology_put_port(struct drm_dp_mst_port *port);
     68
     69static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr,
     70				     int id,
     71				     struct drm_dp_payload *payload);
     72
     73static int drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr *mgr,
     74				 struct drm_dp_mst_port *port,
     75				 int offset, int size, u8 *bytes);
     76static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
     77				  struct drm_dp_mst_port *port,
     78				  int offset, int size, u8 *bytes);
     79
     80static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
     81				    struct drm_dp_mst_branch *mstb);
     82
     83static void
     84drm_dp_send_clear_payload_id_table(struct drm_dp_mst_topology_mgr *mgr,
     85				   struct drm_dp_mst_branch *mstb);
     86
     87static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
     88					   struct drm_dp_mst_branch *mstb,
     89					   struct drm_dp_mst_port *port);
     90static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
     91				 u8 *guid);
     92
     93static int drm_dp_mst_register_i2c_bus(struct drm_dp_mst_port *port);
     94static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_mst_port *port);
     95static void drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr *mgr);
     96
     97static bool drm_dp_mst_port_downstream_of_branch(struct drm_dp_mst_port *port,
     98						 struct drm_dp_mst_branch *branch);
     99
    100#define DBG_PREFIX "[dp_mst]"
    101
    102#define DP_STR(x) [DP_ ## x] = #x
    103
    104static const char *drm_dp_mst_req_type_str(u8 req_type)
    105{
    106	static const char * const req_type_str[] = {
    107		DP_STR(GET_MSG_TRANSACTION_VERSION),
    108		DP_STR(LINK_ADDRESS),
    109		DP_STR(CONNECTION_STATUS_NOTIFY),
    110		DP_STR(ENUM_PATH_RESOURCES),
    111		DP_STR(ALLOCATE_PAYLOAD),
    112		DP_STR(QUERY_PAYLOAD),
    113		DP_STR(RESOURCE_STATUS_NOTIFY),
    114		DP_STR(CLEAR_PAYLOAD_ID_TABLE),
    115		DP_STR(REMOTE_DPCD_READ),
    116		DP_STR(REMOTE_DPCD_WRITE),
    117		DP_STR(REMOTE_I2C_READ),
    118		DP_STR(REMOTE_I2C_WRITE),
    119		DP_STR(POWER_UP_PHY),
    120		DP_STR(POWER_DOWN_PHY),
    121		DP_STR(SINK_EVENT_NOTIFY),
    122		DP_STR(QUERY_STREAM_ENC_STATUS),
    123	};
    124
    125	if (req_type >= ARRAY_SIZE(req_type_str) ||
    126	    !req_type_str[req_type])
    127		return "unknown";
    128
    129	return req_type_str[req_type];
    130}
    131
    132#undef DP_STR
    133#define DP_STR(x) [DP_NAK_ ## x] = #x
    134
    135static const char *drm_dp_mst_nak_reason_str(u8 nak_reason)
    136{
    137	static const char * const nak_reason_str[] = {
    138		DP_STR(WRITE_FAILURE),
    139		DP_STR(INVALID_READ),
    140		DP_STR(CRC_FAILURE),
    141		DP_STR(BAD_PARAM),
    142		DP_STR(DEFER),
    143		DP_STR(LINK_FAILURE),
    144		DP_STR(NO_RESOURCES),
    145		DP_STR(DPCD_FAIL),
    146		DP_STR(I2C_NAK),
    147		DP_STR(ALLOCATE_FAIL),
    148	};
    149
    150	if (nak_reason >= ARRAY_SIZE(nak_reason_str) ||
    151	    !nak_reason_str[nak_reason])
    152		return "unknown";
    153
    154	return nak_reason_str[nak_reason];
    155}
    156
    157#undef DP_STR
    158#define DP_STR(x) [DRM_DP_SIDEBAND_TX_ ## x] = #x
    159
    160static const char *drm_dp_mst_sideband_tx_state_str(int state)
    161{
    162	static const char * const sideband_reason_str[] = {
    163		DP_STR(QUEUED),
    164		DP_STR(START_SEND),
    165		DP_STR(SENT),
    166		DP_STR(RX),
    167		DP_STR(TIMEOUT),
    168	};
    169
    170	if (state >= ARRAY_SIZE(sideband_reason_str) ||
    171	    !sideband_reason_str[state])
    172		return "unknown";
    173
    174	return sideband_reason_str[state];
    175}
    176
    177static int
    178drm_dp_mst_rad_to_str(const u8 rad[8], u8 lct, char *out, size_t len)
    179{
    180	int i;
    181	u8 unpacked_rad[16];
    182
    183	for (i = 0; i < lct; i++) {
    184		if (i % 2)
    185			unpacked_rad[i] = rad[i / 2] >> 4;
    186		else
    187			unpacked_rad[i] = rad[i / 2] & BIT_MASK(4);
    188	}
    189
    190	/* TODO: Eventually add something to printk so we can format the rad
    191	 * like this: 1.2.3
    192	 */
    193	return snprintf(out, len, "%*phC", lct, unpacked_rad);
    194}
    195
    196/* sideband msg handling */
    197static u8 drm_dp_msg_header_crc4(const uint8_t *data, size_t num_nibbles)
    198{
    199	u8 bitmask = 0x80;
    200	u8 bitshift = 7;
    201	u8 array_index = 0;
    202	int number_of_bits = num_nibbles * 4;
    203	u8 remainder = 0;
    204
    205	while (number_of_bits != 0) {
    206		number_of_bits--;
    207		remainder <<= 1;
    208		remainder |= (data[array_index] & bitmask) >> bitshift;
    209		bitmask >>= 1;
    210		bitshift--;
    211		if (bitmask == 0) {
    212			bitmask = 0x80;
    213			bitshift = 7;
    214			array_index++;
    215		}
    216		if ((remainder & 0x10) == 0x10)
    217			remainder ^= 0x13;
    218	}
    219
    220	number_of_bits = 4;
    221	while (number_of_bits != 0) {
    222		number_of_bits--;
    223		remainder <<= 1;
    224		if ((remainder & 0x10) != 0)
    225			remainder ^= 0x13;
    226	}
    227
    228	return remainder;
    229}
    230
    231static u8 drm_dp_msg_data_crc4(const uint8_t *data, u8 number_of_bytes)
    232{
    233	u8 bitmask = 0x80;
    234	u8 bitshift = 7;
    235	u8 array_index = 0;
    236	int number_of_bits = number_of_bytes * 8;
    237	u16 remainder = 0;
    238
    239	while (number_of_bits != 0) {
    240		number_of_bits--;
    241		remainder <<= 1;
    242		remainder |= (data[array_index] & bitmask) >> bitshift;
    243		bitmask >>= 1;
    244		bitshift--;
    245		if (bitmask == 0) {
    246			bitmask = 0x80;
    247			bitshift = 7;
    248			array_index++;
    249		}
    250		if ((remainder & 0x100) == 0x100)
    251			remainder ^= 0xd5;
    252	}
    253
    254	number_of_bits = 8;
    255	while (number_of_bits != 0) {
    256		number_of_bits--;
    257		remainder <<= 1;
    258		if ((remainder & 0x100) != 0)
    259			remainder ^= 0xd5;
    260	}
    261
    262	return remainder & 0xff;
    263}
    264static inline u8 drm_dp_calc_sb_hdr_size(struct drm_dp_sideband_msg_hdr *hdr)
    265{
    266	u8 size = 3;
    267
    268	size += (hdr->lct / 2);
    269	return size;
    270}
    271
    272static void drm_dp_encode_sideband_msg_hdr(struct drm_dp_sideband_msg_hdr *hdr,
    273					   u8 *buf, int *len)
    274{
    275	int idx = 0;
    276	int i;
    277	u8 crc4;
    278
    279	buf[idx++] = ((hdr->lct & 0xf) << 4) | (hdr->lcr & 0xf);
    280	for (i = 0; i < (hdr->lct / 2); i++)
    281		buf[idx++] = hdr->rad[i];
    282	buf[idx++] = (hdr->broadcast << 7) | (hdr->path_msg << 6) |
    283		(hdr->msg_len & 0x3f);
    284	buf[idx++] = (hdr->somt << 7) | (hdr->eomt << 6) | (hdr->seqno << 4);
    285
    286	crc4 = drm_dp_msg_header_crc4(buf, (idx * 2) - 1);
    287	buf[idx - 1] |= (crc4 & 0xf);
    288
    289	*len = idx;
    290}
    291
    292static bool drm_dp_decode_sideband_msg_hdr(const struct drm_dp_mst_topology_mgr *mgr,
    293					   struct drm_dp_sideband_msg_hdr *hdr,
    294					   u8 *buf, int buflen, u8 *hdrlen)
    295{
    296	u8 crc4;
    297	u8 len;
    298	int i;
    299	u8 idx;
    300
    301	if (buf[0] == 0)
    302		return false;
    303	len = 3;
    304	len += ((buf[0] & 0xf0) >> 4) / 2;
    305	if (len > buflen)
    306		return false;
    307	crc4 = drm_dp_msg_header_crc4(buf, (len * 2) - 1);
    308
    309	if ((crc4 & 0xf) != (buf[len - 1] & 0xf)) {
    310		drm_dbg_kms(mgr->dev, "crc4 mismatch 0x%x 0x%x\n", crc4, buf[len - 1]);
    311		return false;
    312	}
    313
    314	hdr->lct = (buf[0] & 0xf0) >> 4;
    315	hdr->lcr = (buf[0] & 0xf);
    316	idx = 1;
    317	for (i = 0; i < (hdr->lct / 2); i++)
    318		hdr->rad[i] = buf[idx++];
    319	hdr->broadcast = (buf[idx] >> 7) & 0x1;
    320	hdr->path_msg = (buf[idx] >> 6) & 0x1;
    321	hdr->msg_len = buf[idx] & 0x3f;
    322	idx++;
    323	hdr->somt = (buf[idx] >> 7) & 0x1;
    324	hdr->eomt = (buf[idx] >> 6) & 0x1;
    325	hdr->seqno = (buf[idx] >> 4) & 0x1;
    326	idx++;
    327	*hdrlen = idx;
    328	return true;
    329}
    330
    331void
    332drm_dp_encode_sideband_req(const struct drm_dp_sideband_msg_req_body *req,
    333			   struct drm_dp_sideband_msg_tx *raw)
    334{
    335	int idx = 0;
    336	int i;
    337	u8 *buf = raw->msg;
    338
    339	buf[idx++] = req->req_type & 0x7f;
    340
    341	switch (req->req_type) {
    342	case DP_ENUM_PATH_RESOURCES:
    343	case DP_POWER_DOWN_PHY:
    344	case DP_POWER_UP_PHY:
    345		buf[idx] = (req->u.port_num.port_number & 0xf) << 4;
    346		idx++;
    347		break;
    348	case DP_ALLOCATE_PAYLOAD:
    349		buf[idx] = (req->u.allocate_payload.port_number & 0xf) << 4 |
    350			(req->u.allocate_payload.number_sdp_streams & 0xf);
    351		idx++;
    352		buf[idx] = (req->u.allocate_payload.vcpi & 0x7f);
    353		idx++;
    354		buf[idx] = (req->u.allocate_payload.pbn >> 8);
    355		idx++;
    356		buf[idx] = (req->u.allocate_payload.pbn & 0xff);
    357		idx++;
    358		for (i = 0; i < req->u.allocate_payload.number_sdp_streams / 2; i++) {
    359			buf[idx] = ((req->u.allocate_payload.sdp_stream_sink[i * 2] & 0xf) << 4) |
    360				(req->u.allocate_payload.sdp_stream_sink[i * 2 + 1] & 0xf);
    361			idx++;
    362		}
    363		if (req->u.allocate_payload.number_sdp_streams & 1) {
    364			i = req->u.allocate_payload.number_sdp_streams - 1;
    365			buf[idx] = (req->u.allocate_payload.sdp_stream_sink[i] & 0xf) << 4;
    366			idx++;
    367		}
    368		break;
    369	case DP_QUERY_PAYLOAD:
    370		buf[idx] = (req->u.query_payload.port_number & 0xf) << 4;
    371		idx++;
    372		buf[idx] = (req->u.query_payload.vcpi & 0x7f);
    373		idx++;
    374		break;
    375	case DP_REMOTE_DPCD_READ:
    376		buf[idx] = (req->u.dpcd_read.port_number & 0xf) << 4;
    377		buf[idx] |= ((req->u.dpcd_read.dpcd_address & 0xf0000) >> 16) & 0xf;
    378		idx++;
    379		buf[idx] = (req->u.dpcd_read.dpcd_address & 0xff00) >> 8;
    380		idx++;
    381		buf[idx] = (req->u.dpcd_read.dpcd_address & 0xff);
    382		idx++;
    383		buf[idx] = (req->u.dpcd_read.num_bytes);
    384		idx++;
    385		break;
    386
    387	case DP_REMOTE_DPCD_WRITE:
    388		buf[idx] = (req->u.dpcd_write.port_number & 0xf) << 4;
    389		buf[idx] |= ((req->u.dpcd_write.dpcd_address & 0xf0000) >> 16) & 0xf;
    390		idx++;
    391		buf[idx] = (req->u.dpcd_write.dpcd_address & 0xff00) >> 8;
    392		idx++;
    393		buf[idx] = (req->u.dpcd_write.dpcd_address & 0xff);
    394		idx++;
    395		buf[idx] = (req->u.dpcd_write.num_bytes);
    396		idx++;
    397		memcpy(&buf[idx], req->u.dpcd_write.bytes, req->u.dpcd_write.num_bytes);
    398		idx += req->u.dpcd_write.num_bytes;
    399		break;
    400	case DP_REMOTE_I2C_READ:
    401		buf[idx] = (req->u.i2c_read.port_number & 0xf) << 4;
    402		buf[idx] |= (req->u.i2c_read.num_transactions & 0x3);
    403		idx++;
    404		for (i = 0; i < (req->u.i2c_read.num_transactions & 0x3); i++) {
    405			buf[idx] = req->u.i2c_read.transactions[i].i2c_dev_id & 0x7f;
    406			idx++;
    407			buf[idx] = req->u.i2c_read.transactions[i].num_bytes;
    408			idx++;
    409			memcpy(&buf[idx], req->u.i2c_read.transactions[i].bytes, req->u.i2c_read.transactions[i].num_bytes);
    410			idx += req->u.i2c_read.transactions[i].num_bytes;
    411
    412			buf[idx] = (req->u.i2c_read.transactions[i].no_stop_bit & 0x1) << 4;
    413			buf[idx] |= (req->u.i2c_read.transactions[i].i2c_transaction_delay & 0xf);
    414			idx++;
    415		}
    416		buf[idx] = (req->u.i2c_read.read_i2c_device_id) & 0x7f;
    417		idx++;
    418		buf[idx] = (req->u.i2c_read.num_bytes_read);
    419		idx++;
    420		break;
    421
    422	case DP_REMOTE_I2C_WRITE:
    423		buf[idx] = (req->u.i2c_write.port_number & 0xf) << 4;
    424		idx++;
    425		buf[idx] = (req->u.i2c_write.write_i2c_device_id) & 0x7f;
    426		idx++;
    427		buf[idx] = (req->u.i2c_write.num_bytes);
    428		idx++;
    429		memcpy(&buf[idx], req->u.i2c_write.bytes, req->u.i2c_write.num_bytes);
    430		idx += req->u.i2c_write.num_bytes;
    431		break;
    432	case DP_QUERY_STREAM_ENC_STATUS: {
    433		const struct drm_dp_query_stream_enc_status *msg;
    434
    435		msg = &req->u.enc_status;
    436		buf[idx] = msg->stream_id;
    437		idx++;
    438		memcpy(&buf[idx], msg->client_id, sizeof(msg->client_id));
    439		idx += sizeof(msg->client_id);
    440		buf[idx] = 0;
    441		buf[idx] |= FIELD_PREP(GENMASK(1, 0), msg->stream_event);
    442		buf[idx] |= msg->valid_stream_event ? BIT(2) : 0;
    443		buf[idx] |= FIELD_PREP(GENMASK(4, 3), msg->stream_behavior);
    444		buf[idx] |= msg->valid_stream_behavior ? BIT(5) : 0;
    445		idx++;
    446		}
    447		break;
    448	}
    449	raw->cur_len = idx;
    450}
    451EXPORT_SYMBOL_FOR_TESTS_ONLY(drm_dp_encode_sideband_req);
    452
    453/* Decode a sideband request we've encoded, mainly used for debugging */
    454int
    455drm_dp_decode_sideband_req(const struct drm_dp_sideband_msg_tx *raw,
    456			   struct drm_dp_sideband_msg_req_body *req)
    457{
    458	const u8 *buf = raw->msg;
    459	int i, idx = 0;
    460
    461	req->req_type = buf[idx++] & 0x7f;
    462	switch (req->req_type) {
    463	case DP_ENUM_PATH_RESOURCES:
    464	case DP_POWER_DOWN_PHY:
    465	case DP_POWER_UP_PHY:
    466		req->u.port_num.port_number = (buf[idx] >> 4) & 0xf;
    467		break;
    468	case DP_ALLOCATE_PAYLOAD:
    469		{
    470			struct drm_dp_allocate_payload *a =
    471				&req->u.allocate_payload;
    472
    473			a->number_sdp_streams = buf[idx] & 0xf;
    474			a->port_number = (buf[idx] >> 4) & 0xf;
    475
    476			WARN_ON(buf[++idx] & 0x80);
    477			a->vcpi = buf[idx] & 0x7f;
    478
    479			a->pbn = buf[++idx] << 8;
    480			a->pbn |= buf[++idx];
    481
    482			idx++;
    483			for (i = 0; i < a->number_sdp_streams; i++) {
    484				a->sdp_stream_sink[i] =
    485					(buf[idx + (i / 2)] >> ((i % 2) ? 0 : 4)) & 0xf;
    486			}
    487		}
    488		break;
    489	case DP_QUERY_PAYLOAD:
    490		req->u.query_payload.port_number = (buf[idx] >> 4) & 0xf;
    491		WARN_ON(buf[++idx] & 0x80);
    492		req->u.query_payload.vcpi = buf[idx] & 0x7f;
    493		break;
    494	case DP_REMOTE_DPCD_READ:
    495		{
    496			struct drm_dp_remote_dpcd_read *r = &req->u.dpcd_read;
    497
    498			r->port_number = (buf[idx] >> 4) & 0xf;
    499
    500			r->dpcd_address = (buf[idx] << 16) & 0xf0000;
    501			r->dpcd_address |= (buf[++idx] << 8) & 0xff00;
    502			r->dpcd_address |= buf[++idx] & 0xff;
    503
    504			r->num_bytes = buf[++idx];
    505		}
    506		break;
    507	case DP_REMOTE_DPCD_WRITE:
    508		{
    509			struct drm_dp_remote_dpcd_write *w =
    510				&req->u.dpcd_write;
    511
    512			w->port_number = (buf[idx] >> 4) & 0xf;
    513
    514			w->dpcd_address = (buf[idx] << 16) & 0xf0000;
    515			w->dpcd_address |= (buf[++idx] << 8) & 0xff00;
    516			w->dpcd_address |= buf[++idx] & 0xff;
    517
    518			w->num_bytes = buf[++idx];
    519
    520			w->bytes = kmemdup(&buf[++idx], w->num_bytes,
    521					   GFP_KERNEL);
    522			if (!w->bytes)
    523				return -ENOMEM;
    524		}
    525		break;
    526	case DP_REMOTE_I2C_READ:
    527		{
    528			struct drm_dp_remote_i2c_read *r = &req->u.i2c_read;
    529			struct drm_dp_remote_i2c_read_tx *tx;
    530			bool failed = false;
    531
    532			r->num_transactions = buf[idx] & 0x3;
    533			r->port_number = (buf[idx] >> 4) & 0xf;
    534			for (i = 0; i < r->num_transactions; i++) {
    535				tx = &r->transactions[i];
    536
    537				tx->i2c_dev_id = buf[++idx] & 0x7f;
    538				tx->num_bytes = buf[++idx];
    539				tx->bytes = kmemdup(&buf[++idx],
    540						    tx->num_bytes,
    541						    GFP_KERNEL);
    542				if (!tx->bytes) {
    543					failed = true;
    544					break;
    545				}
    546				idx += tx->num_bytes;
    547				tx->no_stop_bit = (buf[idx] >> 5) & 0x1;
    548				tx->i2c_transaction_delay = buf[idx] & 0xf;
    549			}
    550
    551			if (failed) {
    552				for (i = 0; i < r->num_transactions; i++) {
    553					tx = &r->transactions[i];
    554					kfree(tx->bytes);
    555				}
    556				return -ENOMEM;
    557			}
    558
    559			r->read_i2c_device_id = buf[++idx] & 0x7f;
    560			r->num_bytes_read = buf[++idx];
    561		}
    562		break;
    563	case DP_REMOTE_I2C_WRITE:
    564		{
    565			struct drm_dp_remote_i2c_write *w = &req->u.i2c_write;
    566
    567			w->port_number = (buf[idx] >> 4) & 0xf;
    568			w->write_i2c_device_id = buf[++idx] & 0x7f;
    569			w->num_bytes = buf[++idx];
    570			w->bytes = kmemdup(&buf[++idx], w->num_bytes,
    571					   GFP_KERNEL);
    572			if (!w->bytes)
    573				return -ENOMEM;
    574		}
    575		break;
    576	case DP_QUERY_STREAM_ENC_STATUS:
    577		req->u.enc_status.stream_id = buf[idx++];
    578		for (i = 0; i < sizeof(req->u.enc_status.client_id); i++)
    579			req->u.enc_status.client_id[i] = buf[idx++];
    580
    581		req->u.enc_status.stream_event = FIELD_GET(GENMASK(1, 0),
    582							   buf[idx]);
    583		req->u.enc_status.valid_stream_event = FIELD_GET(BIT(2),
    584								 buf[idx]);
    585		req->u.enc_status.stream_behavior = FIELD_GET(GENMASK(4, 3),
    586							      buf[idx]);
    587		req->u.enc_status.valid_stream_behavior = FIELD_GET(BIT(5),
    588								    buf[idx]);
    589		break;
    590	}
    591
    592	return 0;
    593}
    594EXPORT_SYMBOL_FOR_TESTS_ONLY(drm_dp_decode_sideband_req);
    595
    596void
    597drm_dp_dump_sideband_msg_req_body(const struct drm_dp_sideband_msg_req_body *req,
    598				  int indent, struct drm_printer *printer)
    599{
    600	int i;
    601
    602#define P(f, ...) drm_printf_indent(printer, indent, f, ##__VA_ARGS__)
    603	if (req->req_type == DP_LINK_ADDRESS) {
    604		/* No contents to print */
    605		P("type=%s\n", drm_dp_mst_req_type_str(req->req_type));
    606		return;
    607	}
    608
    609	P("type=%s contents:\n", drm_dp_mst_req_type_str(req->req_type));
    610	indent++;
    611
    612	switch (req->req_type) {
    613	case DP_ENUM_PATH_RESOURCES:
    614	case DP_POWER_DOWN_PHY:
    615	case DP_POWER_UP_PHY:
    616		P("port=%d\n", req->u.port_num.port_number);
    617		break;
    618	case DP_ALLOCATE_PAYLOAD:
    619		P("port=%d vcpi=%d pbn=%d sdp_streams=%d %*ph\n",
    620		  req->u.allocate_payload.port_number,
    621		  req->u.allocate_payload.vcpi, req->u.allocate_payload.pbn,
    622		  req->u.allocate_payload.number_sdp_streams,
    623		  req->u.allocate_payload.number_sdp_streams,
    624		  req->u.allocate_payload.sdp_stream_sink);
    625		break;
    626	case DP_QUERY_PAYLOAD:
    627		P("port=%d vcpi=%d\n",
    628		  req->u.query_payload.port_number,
    629		  req->u.query_payload.vcpi);
    630		break;
    631	case DP_REMOTE_DPCD_READ:
    632		P("port=%d dpcd_addr=%05x len=%d\n",
    633		  req->u.dpcd_read.port_number, req->u.dpcd_read.dpcd_address,
    634		  req->u.dpcd_read.num_bytes);
    635		break;
    636	case DP_REMOTE_DPCD_WRITE:
    637		P("port=%d addr=%05x len=%d: %*ph\n",
    638		  req->u.dpcd_write.port_number,
    639		  req->u.dpcd_write.dpcd_address,
    640		  req->u.dpcd_write.num_bytes, req->u.dpcd_write.num_bytes,
    641		  req->u.dpcd_write.bytes);
    642		break;
    643	case DP_REMOTE_I2C_READ:
    644		P("port=%d num_tx=%d id=%d size=%d:\n",
    645		  req->u.i2c_read.port_number,
    646		  req->u.i2c_read.num_transactions,
    647		  req->u.i2c_read.read_i2c_device_id,
    648		  req->u.i2c_read.num_bytes_read);
    649
    650		indent++;
    651		for (i = 0; i < req->u.i2c_read.num_transactions; i++) {
    652			const struct drm_dp_remote_i2c_read_tx *rtx =
    653				&req->u.i2c_read.transactions[i];
    654
    655			P("%d: id=%03d size=%03d no_stop_bit=%d tx_delay=%03d: %*ph\n",
    656			  i, rtx->i2c_dev_id, rtx->num_bytes,
    657			  rtx->no_stop_bit, rtx->i2c_transaction_delay,
    658			  rtx->num_bytes, rtx->bytes);
    659		}
    660		break;
    661	case DP_REMOTE_I2C_WRITE:
    662		P("port=%d id=%d size=%d: %*ph\n",
    663		  req->u.i2c_write.port_number,
    664		  req->u.i2c_write.write_i2c_device_id,
    665		  req->u.i2c_write.num_bytes, req->u.i2c_write.num_bytes,
    666		  req->u.i2c_write.bytes);
    667		break;
    668	case DP_QUERY_STREAM_ENC_STATUS:
    669		P("stream_id=%u client_id=%*ph stream_event=%x "
    670		  "valid_event=%d stream_behavior=%x valid_behavior=%d",
    671		  req->u.enc_status.stream_id,
    672		  (int)ARRAY_SIZE(req->u.enc_status.client_id),
    673		  req->u.enc_status.client_id, req->u.enc_status.stream_event,
    674		  req->u.enc_status.valid_stream_event,
    675		  req->u.enc_status.stream_behavior,
    676		  req->u.enc_status.valid_stream_behavior);
    677		break;
    678	default:
    679		P("???\n");
    680		break;
    681	}
    682#undef P
    683}
    684EXPORT_SYMBOL_FOR_TESTS_ONLY(drm_dp_dump_sideband_msg_req_body);
    685
    686static inline void
    687drm_dp_mst_dump_sideband_msg_tx(struct drm_printer *p,
    688				const struct drm_dp_sideband_msg_tx *txmsg)
    689{
    690	struct drm_dp_sideband_msg_req_body req;
    691	char buf[64];
    692	int ret;
    693	int i;
    694
    695	drm_dp_mst_rad_to_str(txmsg->dst->rad, txmsg->dst->lct, buf,
    696			      sizeof(buf));
    697	drm_printf(p, "txmsg cur_offset=%x cur_len=%x seqno=%x state=%s path_msg=%d dst=%s\n",
    698		   txmsg->cur_offset, txmsg->cur_len, txmsg->seqno,
    699		   drm_dp_mst_sideband_tx_state_str(txmsg->state),
    700		   txmsg->path_msg, buf);
    701
    702	ret = drm_dp_decode_sideband_req(txmsg, &req);
    703	if (ret) {
    704		drm_printf(p, "<failed to decode sideband req: %d>\n", ret);
    705		return;
    706	}
    707	drm_dp_dump_sideband_msg_req_body(&req, 1, p);
    708
    709	switch (req.req_type) {
    710	case DP_REMOTE_DPCD_WRITE:
    711		kfree(req.u.dpcd_write.bytes);
    712		break;
    713	case DP_REMOTE_I2C_READ:
    714		for (i = 0; i < req.u.i2c_read.num_transactions; i++)
    715			kfree(req.u.i2c_read.transactions[i].bytes);
    716		break;
    717	case DP_REMOTE_I2C_WRITE:
    718		kfree(req.u.i2c_write.bytes);
    719		break;
    720	}
    721}
    722
    723static void drm_dp_crc_sideband_chunk_req(u8 *msg, u8 len)
    724{
    725	u8 crc4;
    726
    727	crc4 = drm_dp_msg_data_crc4(msg, len);
    728	msg[len] = crc4;
    729}
    730
    731static void drm_dp_encode_sideband_reply(struct drm_dp_sideband_msg_reply_body *rep,
    732					 struct drm_dp_sideband_msg_tx *raw)
    733{
    734	int idx = 0;
    735	u8 *buf = raw->msg;
    736
    737	buf[idx++] = (rep->reply_type & 0x1) << 7 | (rep->req_type & 0x7f);
    738
    739	raw->cur_len = idx;
    740}
    741
    742static int drm_dp_sideband_msg_set_header(struct drm_dp_sideband_msg_rx *msg,
    743					  struct drm_dp_sideband_msg_hdr *hdr,
    744					  u8 hdrlen)
    745{
    746	/*
    747	 * ignore out-of-order messages or messages that are part of a
    748	 * failed transaction
    749	 */
    750	if (!hdr->somt && !msg->have_somt)
    751		return false;
    752
    753	/* get length contained in this portion */
    754	msg->curchunk_idx = 0;
    755	msg->curchunk_len = hdr->msg_len;
    756	msg->curchunk_hdrlen = hdrlen;
    757
    758	/* we have already gotten an somt - don't bother parsing */
    759	if (hdr->somt && msg->have_somt)
    760		return false;
    761
    762	if (hdr->somt) {
    763		memcpy(&msg->initial_hdr, hdr,
    764		       sizeof(struct drm_dp_sideband_msg_hdr));
    765		msg->have_somt = true;
    766	}
    767	if (hdr->eomt)
    768		msg->have_eomt = true;
    769
    770	return true;
    771}
    772
    773/* this adds a chunk of msg to the builder to get the final msg */
    774static bool drm_dp_sideband_append_payload(struct drm_dp_sideband_msg_rx *msg,
    775					   u8 *replybuf, u8 replybuflen)
    776{
    777	u8 crc4;
    778
    779	memcpy(&msg->chunk[msg->curchunk_idx], replybuf, replybuflen);
    780	msg->curchunk_idx += replybuflen;
    781
    782	if (msg->curchunk_idx >= msg->curchunk_len) {
    783		/* do CRC */
    784		crc4 = drm_dp_msg_data_crc4(msg->chunk, msg->curchunk_len - 1);
    785		if (crc4 != msg->chunk[msg->curchunk_len - 1])
    786			print_hex_dump(KERN_DEBUG, "wrong crc",
    787				       DUMP_PREFIX_NONE, 16, 1,
    788				       msg->chunk,  msg->curchunk_len, false);
    789		/* copy chunk into bigger msg */
    790		memcpy(&msg->msg[msg->curlen], msg->chunk, msg->curchunk_len - 1);
    791		msg->curlen += msg->curchunk_len - 1;
    792	}
    793	return true;
    794}
    795
    796static bool drm_dp_sideband_parse_link_address(const struct drm_dp_mst_topology_mgr *mgr,
    797					       struct drm_dp_sideband_msg_rx *raw,
    798					       struct drm_dp_sideband_msg_reply_body *repmsg)
    799{
    800	int idx = 1;
    801	int i;
    802
    803	memcpy(repmsg->u.link_addr.guid, &raw->msg[idx], 16);
    804	idx += 16;
    805	repmsg->u.link_addr.nports = raw->msg[idx] & 0xf;
    806	idx++;
    807	if (idx > raw->curlen)
    808		goto fail_len;
    809	for (i = 0; i < repmsg->u.link_addr.nports; i++) {
    810		if (raw->msg[idx] & 0x80)
    811			repmsg->u.link_addr.ports[i].input_port = 1;
    812
    813		repmsg->u.link_addr.ports[i].peer_device_type = (raw->msg[idx] >> 4) & 0x7;
    814		repmsg->u.link_addr.ports[i].port_number = (raw->msg[idx] & 0xf);
    815
    816		idx++;
    817		if (idx > raw->curlen)
    818			goto fail_len;
    819		repmsg->u.link_addr.ports[i].mcs = (raw->msg[idx] >> 7) & 0x1;
    820		repmsg->u.link_addr.ports[i].ddps = (raw->msg[idx] >> 6) & 0x1;
    821		if (repmsg->u.link_addr.ports[i].input_port == 0)
    822			repmsg->u.link_addr.ports[i].legacy_device_plug_status = (raw->msg[idx] >> 5) & 0x1;
    823		idx++;
    824		if (idx > raw->curlen)
    825			goto fail_len;
    826		if (repmsg->u.link_addr.ports[i].input_port == 0) {
    827			repmsg->u.link_addr.ports[i].dpcd_revision = (raw->msg[idx]);
    828			idx++;
    829			if (idx > raw->curlen)
    830				goto fail_len;
    831			memcpy(repmsg->u.link_addr.ports[i].peer_guid, &raw->msg[idx], 16);
    832			idx += 16;
    833			if (idx > raw->curlen)
    834				goto fail_len;
    835			repmsg->u.link_addr.ports[i].num_sdp_streams = (raw->msg[idx] >> 4) & 0xf;
    836			repmsg->u.link_addr.ports[i].num_sdp_stream_sinks = (raw->msg[idx] & 0xf);
    837			idx++;
    838
    839		}
    840		if (idx > raw->curlen)
    841			goto fail_len;
    842	}
    843
    844	return true;
    845fail_len:
    846	DRM_DEBUG_KMS("link address reply parse length fail %d %d\n", idx, raw->curlen);
    847	return false;
    848}
    849
    850static bool drm_dp_sideband_parse_remote_dpcd_read(struct drm_dp_sideband_msg_rx *raw,
    851						   struct drm_dp_sideband_msg_reply_body *repmsg)
    852{
    853	int idx = 1;
    854
    855	repmsg->u.remote_dpcd_read_ack.port_number = raw->msg[idx] & 0xf;
    856	idx++;
    857	if (idx > raw->curlen)
    858		goto fail_len;
    859	repmsg->u.remote_dpcd_read_ack.num_bytes = raw->msg[idx];
    860	idx++;
    861	if (idx > raw->curlen)
    862		goto fail_len;
    863
    864	memcpy(repmsg->u.remote_dpcd_read_ack.bytes, &raw->msg[idx], repmsg->u.remote_dpcd_read_ack.num_bytes);
    865	return true;
    866fail_len:
    867	DRM_DEBUG_KMS("link address reply parse length fail %d %d\n", idx, raw->curlen);
    868	return false;
    869}
    870
    871static bool drm_dp_sideband_parse_remote_dpcd_write(struct drm_dp_sideband_msg_rx *raw,
    872						      struct drm_dp_sideband_msg_reply_body *repmsg)
    873{
    874	int idx = 1;
    875
    876	repmsg->u.remote_dpcd_write_ack.port_number = raw->msg[idx] & 0xf;
    877	idx++;
    878	if (idx > raw->curlen)
    879		goto fail_len;
    880	return true;
    881fail_len:
    882	DRM_DEBUG_KMS("parse length fail %d %d\n", idx, raw->curlen);
    883	return false;
    884}
    885
    886static bool drm_dp_sideband_parse_remote_i2c_read_ack(struct drm_dp_sideband_msg_rx *raw,
    887						      struct drm_dp_sideband_msg_reply_body *repmsg)
    888{
    889	int idx = 1;
    890
    891	repmsg->u.remote_i2c_read_ack.port_number = (raw->msg[idx] & 0xf);
    892	idx++;
    893	if (idx > raw->curlen)
    894		goto fail_len;
    895	repmsg->u.remote_i2c_read_ack.num_bytes = raw->msg[idx];
    896	idx++;
    897	/* TODO check */
    898	memcpy(repmsg->u.remote_i2c_read_ack.bytes, &raw->msg[idx], repmsg->u.remote_i2c_read_ack.num_bytes);
    899	return true;
    900fail_len:
    901	DRM_DEBUG_KMS("remote i2c reply parse length fail %d %d\n", idx, raw->curlen);
    902	return false;
    903}
    904
    905static bool drm_dp_sideband_parse_enum_path_resources_ack(struct drm_dp_sideband_msg_rx *raw,
    906							  struct drm_dp_sideband_msg_reply_body *repmsg)
    907{
    908	int idx = 1;
    909
    910	repmsg->u.path_resources.port_number = (raw->msg[idx] >> 4) & 0xf;
    911	repmsg->u.path_resources.fec_capable = raw->msg[idx] & 0x1;
    912	idx++;
    913	if (idx > raw->curlen)
    914		goto fail_len;
    915	repmsg->u.path_resources.full_payload_bw_number = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
    916	idx += 2;
    917	if (idx > raw->curlen)
    918		goto fail_len;
    919	repmsg->u.path_resources.avail_payload_bw_number = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
    920	idx += 2;
    921	if (idx > raw->curlen)
    922		goto fail_len;
    923	return true;
    924fail_len:
    925	DRM_DEBUG_KMS("enum resource parse length fail %d %d\n", idx, raw->curlen);
    926	return false;
    927}
    928
    929static bool drm_dp_sideband_parse_allocate_payload_ack(struct drm_dp_sideband_msg_rx *raw,
    930							  struct drm_dp_sideband_msg_reply_body *repmsg)
    931{
    932	int idx = 1;
    933
    934	repmsg->u.allocate_payload.port_number = (raw->msg[idx] >> 4) & 0xf;
    935	idx++;
    936	if (idx > raw->curlen)
    937		goto fail_len;
    938	repmsg->u.allocate_payload.vcpi = raw->msg[idx];
    939	idx++;
    940	if (idx > raw->curlen)
    941		goto fail_len;
    942	repmsg->u.allocate_payload.allocated_pbn = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
    943	idx += 2;
    944	if (idx > raw->curlen)
    945		goto fail_len;
    946	return true;
    947fail_len:
    948	DRM_DEBUG_KMS("allocate payload parse length fail %d %d\n", idx, raw->curlen);
    949	return false;
    950}
    951
    952static bool drm_dp_sideband_parse_query_payload_ack(struct drm_dp_sideband_msg_rx *raw,
    953						    struct drm_dp_sideband_msg_reply_body *repmsg)
    954{
    955	int idx = 1;
    956
    957	repmsg->u.query_payload.port_number = (raw->msg[idx] >> 4) & 0xf;
    958	idx++;
    959	if (idx > raw->curlen)
    960		goto fail_len;
    961	repmsg->u.query_payload.allocated_pbn = (raw->msg[idx] << 8) | (raw->msg[idx + 1]);
    962	idx += 2;
    963	if (idx > raw->curlen)
    964		goto fail_len;
    965	return true;
    966fail_len:
    967	DRM_DEBUG_KMS("query payload parse length fail %d %d\n", idx, raw->curlen);
    968	return false;
    969}
    970
    971static bool drm_dp_sideband_parse_power_updown_phy_ack(struct drm_dp_sideband_msg_rx *raw,
    972						       struct drm_dp_sideband_msg_reply_body *repmsg)
    973{
    974	int idx = 1;
    975
    976	repmsg->u.port_number.port_number = (raw->msg[idx] >> 4) & 0xf;
    977	idx++;
    978	if (idx > raw->curlen) {
    979		DRM_DEBUG_KMS("power up/down phy parse length fail %d %d\n",
    980			      idx, raw->curlen);
    981		return false;
    982	}
    983	return true;
    984}
    985
    986static bool
    987drm_dp_sideband_parse_query_stream_enc_status(
    988				struct drm_dp_sideband_msg_rx *raw,
    989				struct drm_dp_sideband_msg_reply_body *repmsg)
    990{
    991	struct drm_dp_query_stream_enc_status_ack_reply *reply;
    992
    993	reply = &repmsg->u.enc_status;
    994
    995	reply->stream_id = raw->msg[3];
    996
    997	reply->reply_signed = raw->msg[2] & BIT(0);
    998
    999	/*
   1000	 * NOTE: It's my impression from reading the spec that the below parsing
   1001	 * is correct. However I noticed while testing with an HDCP 1.4 display
   1002	 * through an HDCP 2.2 hub that only bit 3 was set. In that case, I
   1003	 * would expect both bits to be set. So keep the parsing following the
   1004	 * spec, but beware reality might not match the spec (at least for some
   1005	 * configurations).
   1006	 */
   1007	reply->hdcp_1x_device_present = raw->msg[2] & BIT(4);
   1008	reply->hdcp_2x_device_present = raw->msg[2] & BIT(3);
   1009
   1010	reply->query_capable_device_present = raw->msg[2] & BIT(5);
   1011	reply->legacy_device_present = raw->msg[2] & BIT(6);
   1012	reply->unauthorizable_device_present = raw->msg[2] & BIT(7);
   1013
   1014	reply->auth_completed = !!(raw->msg[1] & BIT(3));
   1015	reply->encryption_enabled = !!(raw->msg[1] & BIT(4));
   1016	reply->repeater_present = !!(raw->msg[1] & BIT(5));
   1017	reply->state = (raw->msg[1] & GENMASK(7, 6)) >> 6;
   1018
   1019	return true;
   1020}
   1021
   1022static bool drm_dp_sideband_parse_reply(const struct drm_dp_mst_topology_mgr *mgr,
   1023					struct drm_dp_sideband_msg_rx *raw,
   1024					struct drm_dp_sideband_msg_reply_body *msg)
   1025{
   1026	memset(msg, 0, sizeof(*msg));
   1027	msg->reply_type = (raw->msg[0] & 0x80) >> 7;
   1028	msg->req_type = (raw->msg[0] & 0x7f);
   1029
   1030	if (msg->reply_type == DP_SIDEBAND_REPLY_NAK) {
   1031		memcpy(msg->u.nak.guid, &raw->msg[1], 16);
   1032		msg->u.nak.reason = raw->msg[17];
   1033		msg->u.nak.nak_data = raw->msg[18];
   1034		return false;
   1035	}
   1036
   1037	switch (msg->req_type) {
   1038	case DP_LINK_ADDRESS:
   1039		return drm_dp_sideband_parse_link_address(mgr, raw, msg);
   1040	case DP_QUERY_PAYLOAD:
   1041		return drm_dp_sideband_parse_query_payload_ack(raw, msg);
   1042	case DP_REMOTE_DPCD_READ:
   1043		return drm_dp_sideband_parse_remote_dpcd_read(raw, msg);
   1044	case DP_REMOTE_DPCD_WRITE:
   1045		return drm_dp_sideband_parse_remote_dpcd_write(raw, msg);
   1046	case DP_REMOTE_I2C_READ:
   1047		return drm_dp_sideband_parse_remote_i2c_read_ack(raw, msg);
   1048	case DP_REMOTE_I2C_WRITE:
   1049		return true; /* since there's nothing to parse */
   1050	case DP_ENUM_PATH_RESOURCES:
   1051		return drm_dp_sideband_parse_enum_path_resources_ack(raw, msg);
   1052	case DP_ALLOCATE_PAYLOAD:
   1053		return drm_dp_sideband_parse_allocate_payload_ack(raw, msg);
   1054	case DP_POWER_DOWN_PHY:
   1055	case DP_POWER_UP_PHY:
   1056		return drm_dp_sideband_parse_power_updown_phy_ack(raw, msg);
   1057	case DP_CLEAR_PAYLOAD_ID_TABLE:
   1058		return true; /* since there's nothing to parse */
   1059	case DP_QUERY_STREAM_ENC_STATUS:
   1060		return drm_dp_sideband_parse_query_stream_enc_status(raw, msg);
   1061	default:
   1062		drm_err(mgr->dev, "Got unknown reply 0x%02x (%s)\n",
   1063			msg->req_type, drm_dp_mst_req_type_str(msg->req_type));
   1064		return false;
   1065	}
   1066}
   1067
   1068static bool
   1069drm_dp_sideband_parse_connection_status_notify(const struct drm_dp_mst_topology_mgr *mgr,
   1070					       struct drm_dp_sideband_msg_rx *raw,
   1071					       struct drm_dp_sideband_msg_req_body *msg)
   1072{
   1073	int idx = 1;
   1074
   1075	msg->u.conn_stat.port_number = (raw->msg[idx] & 0xf0) >> 4;
   1076	idx++;
   1077	if (idx > raw->curlen)
   1078		goto fail_len;
   1079
   1080	memcpy(msg->u.conn_stat.guid, &raw->msg[idx], 16);
   1081	idx += 16;
   1082	if (idx > raw->curlen)
   1083		goto fail_len;
   1084
   1085	msg->u.conn_stat.legacy_device_plug_status = (raw->msg[idx] >> 6) & 0x1;
   1086	msg->u.conn_stat.displayport_device_plug_status = (raw->msg[idx] >> 5) & 0x1;
   1087	msg->u.conn_stat.message_capability_status = (raw->msg[idx] >> 4) & 0x1;
   1088	msg->u.conn_stat.input_port = (raw->msg[idx] >> 3) & 0x1;
   1089	msg->u.conn_stat.peer_device_type = (raw->msg[idx] & 0x7);
   1090	idx++;
   1091	return true;
   1092fail_len:
   1093	drm_dbg_kms(mgr->dev, "connection status reply parse length fail %d %d\n",
   1094		    idx, raw->curlen);
   1095	return false;
   1096}
   1097
   1098static bool drm_dp_sideband_parse_resource_status_notify(const struct drm_dp_mst_topology_mgr *mgr,
   1099							 struct drm_dp_sideband_msg_rx *raw,
   1100							 struct drm_dp_sideband_msg_req_body *msg)
   1101{
   1102	int idx = 1;
   1103
   1104	msg->u.resource_stat.port_number = (raw->msg[idx] & 0xf0) >> 4;
   1105	idx++;
   1106	if (idx > raw->curlen)
   1107		goto fail_len;
   1108
   1109	memcpy(msg->u.resource_stat.guid, &raw->msg[idx], 16);
   1110	idx += 16;
   1111	if (idx > raw->curlen)
   1112		goto fail_len;
   1113
   1114	msg->u.resource_stat.available_pbn = (raw->msg[idx] << 8) | (raw->msg[idx + 1]);
   1115	idx++;
   1116	return true;
   1117fail_len:
   1118	drm_dbg_kms(mgr->dev, "resource status reply parse length fail %d %d\n", idx, raw->curlen);
   1119	return false;
   1120}
   1121
   1122static bool drm_dp_sideband_parse_req(const struct drm_dp_mst_topology_mgr *mgr,
   1123				      struct drm_dp_sideband_msg_rx *raw,
   1124				      struct drm_dp_sideband_msg_req_body *msg)
   1125{
   1126	memset(msg, 0, sizeof(*msg));
   1127	msg->req_type = (raw->msg[0] & 0x7f);
   1128
   1129	switch (msg->req_type) {
   1130	case DP_CONNECTION_STATUS_NOTIFY:
   1131		return drm_dp_sideband_parse_connection_status_notify(mgr, raw, msg);
   1132	case DP_RESOURCE_STATUS_NOTIFY:
   1133		return drm_dp_sideband_parse_resource_status_notify(mgr, raw, msg);
   1134	default:
   1135		drm_err(mgr->dev, "Got unknown request 0x%02x (%s)\n",
   1136			msg->req_type, drm_dp_mst_req_type_str(msg->req_type));
   1137		return false;
   1138	}
   1139}
   1140
   1141static void build_dpcd_write(struct drm_dp_sideband_msg_tx *msg,
   1142			     u8 port_num, u32 offset, u8 num_bytes, u8 *bytes)
   1143{
   1144	struct drm_dp_sideband_msg_req_body req;
   1145
   1146	req.req_type = DP_REMOTE_DPCD_WRITE;
   1147	req.u.dpcd_write.port_number = port_num;
   1148	req.u.dpcd_write.dpcd_address = offset;
   1149	req.u.dpcd_write.num_bytes = num_bytes;
   1150	req.u.dpcd_write.bytes = bytes;
   1151	drm_dp_encode_sideband_req(&req, msg);
   1152}
   1153
   1154static void build_link_address(struct drm_dp_sideband_msg_tx *msg)
   1155{
   1156	struct drm_dp_sideband_msg_req_body req;
   1157
   1158	req.req_type = DP_LINK_ADDRESS;
   1159	drm_dp_encode_sideband_req(&req, msg);
   1160}
   1161
   1162static void build_clear_payload_id_table(struct drm_dp_sideband_msg_tx *msg)
   1163{
   1164	struct drm_dp_sideband_msg_req_body req;
   1165
   1166	req.req_type = DP_CLEAR_PAYLOAD_ID_TABLE;
   1167	drm_dp_encode_sideband_req(&req, msg);
   1168	msg->path_msg = true;
   1169}
   1170
   1171static int build_enum_path_resources(struct drm_dp_sideband_msg_tx *msg,
   1172				     int port_num)
   1173{
   1174	struct drm_dp_sideband_msg_req_body req;
   1175
   1176	req.req_type = DP_ENUM_PATH_RESOURCES;
   1177	req.u.port_num.port_number = port_num;
   1178	drm_dp_encode_sideband_req(&req, msg);
   1179	msg->path_msg = true;
   1180	return 0;
   1181}
   1182
   1183static void build_allocate_payload(struct drm_dp_sideband_msg_tx *msg,
   1184				   int port_num,
   1185				   u8 vcpi, uint16_t pbn,
   1186				   u8 number_sdp_streams,
   1187				   u8 *sdp_stream_sink)
   1188{
   1189	struct drm_dp_sideband_msg_req_body req;
   1190
   1191	memset(&req, 0, sizeof(req));
   1192	req.req_type = DP_ALLOCATE_PAYLOAD;
   1193	req.u.allocate_payload.port_number = port_num;
   1194	req.u.allocate_payload.vcpi = vcpi;
   1195	req.u.allocate_payload.pbn = pbn;
   1196	req.u.allocate_payload.number_sdp_streams = number_sdp_streams;
   1197	memcpy(req.u.allocate_payload.sdp_stream_sink, sdp_stream_sink,
   1198		   number_sdp_streams);
   1199	drm_dp_encode_sideband_req(&req, msg);
   1200	msg->path_msg = true;
   1201}
   1202
   1203static void build_power_updown_phy(struct drm_dp_sideband_msg_tx *msg,
   1204				   int port_num, bool power_up)
   1205{
   1206	struct drm_dp_sideband_msg_req_body req;
   1207
   1208	if (power_up)
   1209		req.req_type = DP_POWER_UP_PHY;
   1210	else
   1211		req.req_type = DP_POWER_DOWN_PHY;
   1212
   1213	req.u.port_num.port_number = port_num;
   1214	drm_dp_encode_sideband_req(&req, msg);
   1215	msg->path_msg = true;
   1216}
   1217
   1218static int
   1219build_query_stream_enc_status(struct drm_dp_sideband_msg_tx *msg, u8 stream_id,
   1220			      u8 *q_id)
   1221{
   1222	struct drm_dp_sideband_msg_req_body req;
   1223
   1224	req.req_type = DP_QUERY_STREAM_ENC_STATUS;
   1225	req.u.enc_status.stream_id = stream_id;
   1226	memcpy(req.u.enc_status.client_id, q_id,
   1227	       sizeof(req.u.enc_status.client_id));
   1228	req.u.enc_status.stream_event = 0;
   1229	req.u.enc_status.valid_stream_event = false;
   1230	req.u.enc_status.stream_behavior = 0;
   1231	req.u.enc_status.valid_stream_behavior = false;
   1232
   1233	drm_dp_encode_sideband_req(&req, msg);
   1234	return 0;
   1235}
   1236
   1237static int drm_dp_mst_assign_payload_id(struct drm_dp_mst_topology_mgr *mgr,
   1238					struct drm_dp_vcpi *vcpi)
   1239{
   1240	int ret, vcpi_ret;
   1241
   1242	mutex_lock(&mgr->payload_lock);
   1243	ret = find_first_zero_bit(&mgr->payload_mask, mgr->max_payloads + 1);
   1244	if (ret > mgr->max_payloads) {
   1245		ret = -EINVAL;
   1246		drm_dbg_kms(mgr->dev, "out of payload ids %d\n", ret);
   1247		goto out_unlock;
   1248	}
   1249
   1250	vcpi_ret = find_first_zero_bit(&mgr->vcpi_mask, mgr->max_payloads + 1);
   1251	if (vcpi_ret > mgr->max_payloads) {
   1252		ret = -EINVAL;
   1253		drm_dbg_kms(mgr->dev, "out of vcpi ids %d\n", ret);
   1254		goto out_unlock;
   1255	}
   1256
   1257	set_bit(ret, &mgr->payload_mask);
   1258	set_bit(vcpi_ret, &mgr->vcpi_mask);
   1259	vcpi->vcpi = vcpi_ret + 1;
   1260	mgr->proposed_vcpis[ret - 1] = vcpi;
   1261out_unlock:
   1262	mutex_unlock(&mgr->payload_lock);
   1263	return ret;
   1264}
   1265
   1266static void drm_dp_mst_put_payload_id(struct drm_dp_mst_topology_mgr *mgr,
   1267				      int vcpi)
   1268{
   1269	int i;
   1270
   1271	if (vcpi == 0)
   1272		return;
   1273
   1274	mutex_lock(&mgr->payload_lock);
   1275	drm_dbg_kms(mgr->dev, "putting payload %d\n", vcpi);
   1276	clear_bit(vcpi - 1, &mgr->vcpi_mask);
   1277
   1278	for (i = 0; i < mgr->max_payloads; i++) {
   1279		if (mgr->proposed_vcpis[i] &&
   1280		    mgr->proposed_vcpis[i]->vcpi == vcpi) {
   1281			mgr->proposed_vcpis[i] = NULL;
   1282			clear_bit(i + 1, &mgr->payload_mask);
   1283		}
   1284	}
   1285	mutex_unlock(&mgr->payload_lock);
   1286}
   1287
   1288static bool check_txmsg_state(struct drm_dp_mst_topology_mgr *mgr,
   1289			      struct drm_dp_sideband_msg_tx *txmsg)
   1290{
   1291	unsigned int state;
   1292
   1293	/*
   1294	 * All updates to txmsg->state are protected by mgr->qlock, and the two
   1295	 * cases we check here are terminal states. For those the barriers
   1296	 * provided by the wake_up/wait_event pair are enough.
   1297	 */
   1298	state = READ_ONCE(txmsg->state);
   1299	return (state == DRM_DP_SIDEBAND_TX_RX ||
   1300		state == DRM_DP_SIDEBAND_TX_TIMEOUT);
   1301}
   1302
   1303static int drm_dp_mst_wait_tx_reply(struct drm_dp_mst_branch *mstb,
   1304				    struct drm_dp_sideband_msg_tx *txmsg)
   1305{
   1306	struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
   1307	unsigned long wait_timeout = msecs_to_jiffies(4000);
   1308	unsigned long wait_expires = jiffies + wait_timeout;
   1309	int ret;
   1310
   1311	for (;;) {
   1312		/*
   1313		 * If the driver provides a way for this, change to
   1314		 * poll-waiting for the MST reply interrupt if we didn't receive
   1315		 * it for 50 msec. This would cater for cases where the HPD
   1316		 * pulse signal got lost somewhere, even though the sink raised
   1317		 * the corresponding MST interrupt correctly. One example is the
   1318		 * Club 3D CAC-1557 TypeC -> DP adapter which for some reason
   1319		 * filters out short pulses with a duration less than ~540 usec.
   1320		 *
   1321		 * The poll period is 50 msec to avoid missing an interrupt
   1322		 * after the sink has cleared it (after a 110msec timeout
   1323		 * since it raised the interrupt).
   1324		 */
   1325		ret = wait_event_timeout(mgr->tx_waitq,
   1326					 check_txmsg_state(mgr, txmsg),
   1327					 mgr->cbs->poll_hpd_irq ?
   1328						msecs_to_jiffies(50) :
   1329						wait_timeout);
   1330
   1331		if (ret || !mgr->cbs->poll_hpd_irq ||
   1332		    time_after(jiffies, wait_expires))
   1333			break;
   1334
   1335		mgr->cbs->poll_hpd_irq(mgr);
   1336	}
   1337
   1338	mutex_lock(&mgr->qlock);
   1339	if (ret > 0) {
   1340		if (txmsg->state == DRM_DP_SIDEBAND_TX_TIMEOUT) {
   1341			ret = -EIO;
   1342			goto out;
   1343		}
   1344	} else {
   1345		drm_dbg_kms(mgr->dev, "timedout msg send %p %d %d\n",
   1346			    txmsg, txmsg->state, txmsg->seqno);
   1347
   1348		/* dump some state */
   1349		ret = -EIO;
   1350
   1351		/* remove from q */
   1352		if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED ||
   1353		    txmsg->state == DRM_DP_SIDEBAND_TX_START_SEND ||
   1354		    txmsg->state == DRM_DP_SIDEBAND_TX_SENT)
   1355			list_del(&txmsg->next);
   1356	}
   1357out:
   1358	if (unlikely(ret == -EIO) && drm_debug_enabled(DRM_UT_DP)) {
   1359		struct drm_printer p = drm_debug_printer(DBG_PREFIX);
   1360
   1361		drm_dp_mst_dump_sideband_msg_tx(&p, txmsg);
   1362	}
   1363	mutex_unlock(&mgr->qlock);
   1364
   1365	drm_dp_mst_kick_tx(mgr);
   1366	return ret;
   1367}
   1368
   1369static struct drm_dp_mst_branch *drm_dp_add_mst_branch_device(u8 lct, u8 *rad)
   1370{
   1371	struct drm_dp_mst_branch *mstb;
   1372
   1373	mstb = kzalloc(sizeof(*mstb), GFP_KERNEL);
   1374	if (!mstb)
   1375		return NULL;
   1376
   1377	mstb->lct = lct;
   1378	if (lct > 1)
   1379		memcpy(mstb->rad, rad, lct / 2);
   1380	INIT_LIST_HEAD(&mstb->ports);
   1381	kref_init(&mstb->topology_kref);
   1382	kref_init(&mstb->malloc_kref);
   1383	return mstb;
   1384}
   1385
   1386static void drm_dp_free_mst_branch_device(struct kref *kref)
   1387{
   1388	struct drm_dp_mst_branch *mstb =
   1389		container_of(kref, struct drm_dp_mst_branch, malloc_kref);
   1390
   1391	if (mstb->port_parent)
   1392		drm_dp_mst_put_port_malloc(mstb->port_parent);
   1393
   1394	kfree(mstb);
   1395}
   1396
   1397/**
   1398 * DOC: Branch device and port refcounting
   1399 *
   1400 * Topology refcount overview
   1401 * ~~~~~~~~~~~~~~~~~~~~~~~~~~
   1402 *
   1403 * The refcounting schemes for &struct drm_dp_mst_branch and &struct
   1404 * drm_dp_mst_port are somewhat unusual. Both ports and branch devices have
   1405 * two different kinds of refcounts: topology refcounts, and malloc refcounts.
   1406 *
   1407 * Topology refcounts are not exposed to drivers, and are handled internally
   1408 * by the DP MST helpers. The helpers use them in order to prevent the
   1409 * in-memory topology state from being changed in the middle of critical
   1410 * operations like changing the internal state of payload allocations. This
   1411 * means each branch and port will be considered to be connected to the rest
   1412 * of the topology until its topology refcount reaches zero. Additionally,
   1413 * for ports this means that their associated &struct drm_connector will stay
   1414 * registered with userspace until the port's refcount reaches 0.
   1415 *
   1416 * Malloc refcount overview
   1417 * ~~~~~~~~~~~~~~~~~~~~~~~~
   1418 *
   1419 * Malloc references are used to keep a &struct drm_dp_mst_port or &struct
   1420 * drm_dp_mst_branch allocated even after all of its topology references have
   1421 * been dropped, so that the driver or MST helpers can safely access each
   1422 * branch's last known state before it was disconnected from the topology.
   1423 * When the malloc refcount of a port or branch reaches 0, the memory
   1424 * allocation containing the &struct drm_dp_mst_branch or &struct
   1425 * drm_dp_mst_port respectively will be freed.
   1426 *
   1427 * For &struct drm_dp_mst_branch, malloc refcounts are not currently exposed
   1428 * to drivers. As of writing this documentation, there are no drivers that
   1429 * have a usecase for accessing &struct drm_dp_mst_branch outside of the MST
   1430 * helpers. Exposing this API to drivers in a race-free manner would take more
   1431 * tweaking of the refcounting scheme, however patches are welcome provided
   1432 * there is a legitimate driver usecase for this.
   1433 *
   1434 * Refcount relationships in a topology
   1435 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
   1436 *
   1437 * Let's take a look at why the relationship between topology and malloc
   1438 * refcounts is designed the way it is.
   1439 *
   1440 * .. kernel-figure:: dp-mst/topology-figure-1.dot
   1441 *
   1442 *    An example of topology and malloc refs in a DP MST topology with two
   1443 *    active payloads. Topology refcount increments are indicated by solid
   1444 *    lines, and malloc refcount increments are indicated by dashed lines.
   1445 *    Each starts from the branch which incremented the refcount, and ends at
   1446 *    the branch to which the refcount belongs to, i.e. the arrow points the
   1447 *    same way as the C pointers used to reference a structure.
   1448 *
   1449 * As you can see in the above figure, every branch increments the topology
   1450 * refcount of its children, and increments the malloc refcount of its
   1451 * parent. Additionally, every payload increments the malloc refcount of its
   1452 * assigned port by 1.
   1453 *
   1454 * So, what would happen if MSTB #3 from the above figure was unplugged from
   1455 * the system, but the driver hadn't yet removed payload #2 from port #3? The
   1456 * topology would start to look like the figure below.
   1457 *
   1458 * .. kernel-figure:: dp-mst/topology-figure-2.dot
   1459 *
   1460 *    Ports and branch devices which have been released from memory are
   1461 *    colored grey, and references which have been removed are colored red.
   1462 *
   1463 * Whenever a port or branch device's topology refcount reaches zero, it will
   1464 * decrement the topology refcounts of all its children, the malloc refcount
   1465 * of its parent, and finally its own malloc refcount. For MSTB #4 and port
   1466 * #4, this means they both have been disconnected from the topology and freed
   1467 * from memory. But, because payload #2 is still holding a reference to port
   1468 * #3, port #3 is removed from the topology but its &struct drm_dp_mst_port
   1469 * is still accessible from memory. This also means port #3 has not yet
   1470 * decremented the malloc refcount of MSTB #3, so its &struct
   1471 * drm_dp_mst_branch will also stay allocated in memory until port #3's
   1472 * malloc refcount reaches 0.
   1473 *
   1474 * This relationship is necessary because in order to release payload #2, we
   1475 * need to be able to figure out the last relative of port #3 that's still
   1476 * connected to the topology. In this case, we would travel up the topology as
   1477 * shown below.
   1478 *
   1479 * .. kernel-figure:: dp-mst/topology-figure-3.dot
   1480 *
   1481 * And finally, remove payload #2 by communicating with port #2 through
   1482 * sideband transactions.
   1483 */
   1484
   1485/**
   1486 * drm_dp_mst_get_mstb_malloc() - Increment the malloc refcount of a branch
   1487 * device
   1488 * @mstb: The &struct drm_dp_mst_branch to increment the malloc refcount of
   1489 *
   1490 * Increments &drm_dp_mst_branch.malloc_kref. When
   1491 * &drm_dp_mst_branch.malloc_kref reaches 0, the memory allocation for @mstb
   1492 * will be released and @mstb may no longer be used.
   1493 *
   1494 * See also: drm_dp_mst_put_mstb_malloc()
   1495 */
   1496static void
   1497drm_dp_mst_get_mstb_malloc(struct drm_dp_mst_branch *mstb)
   1498{
   1499	kref_get(&mstb->malloc_kref);
   1500	drm_dbg(mstb->mgr->dev, "mstb %p (%d)\n", mstb, kref_read(&mstb->malloc_kref));
   1501}
   1502
   1503/**
   1504 * drm_dp_mst_put_mstb_malloc() - Decrement the malloc refcount of a branch
   1505 * device
   1506 * @mstb: The &struct drm_dp_mst_branch to decrement the malloc refcount of
   1507 *
   1508 * Decrements &drm_dp_mst_branch.malloc_kref. When
   1509 * &drm_dp_mst_branch.malloc_kref reaches 0, the memory allocation for @mstb
   1510 * will be released and @mstb may no longer be used.
   1511 *
   1512 * See also: drm_dp_mst_get_mstb_malloc()
   1513 */
   1514static void
   1515drm_dp_mst_put_mstb_malloc(struct drm_dp_mst_branch *mstb)
   1516{
   1517	drm_dbg(mstb->mgr->dev, "mstb %p (%d)\n", mstb, kref_read(&mstb->malloc_kref) - 1);
   1518	kref_put(&mstb->malloc_kref, drm_dp_free_mst_branch_device);
   1519}
   1520
   1521static void drm_dp_free_mst_port(struct kref *kref)
   1522{
   1523	struct drm_dp_mst_port *port =
   1524		container_of(kref, struct drm_dp_mst_port, malloc_kref);
   1525
   1526	drm_dp_mst_put_mstb_malloc(port->parent);
   1527	kfree(port);
   1528}
   1529
   1530/**
   1531 * drm_dp_mst_get_port_malloc() - Increment the malloc refcount of an MST port
   1532 * @port: The &struct drm_dp_mst_port to increment the malloc refcount of
   1533 *
   1534 * Increments &drm_dp_mst_port.malloc_kref. When &drm_dp_mst_port.malloc_kref
   1535 * reaches 0, the memory allocation for @port will be released and @port may
   1536 * no longer be used.
   1537 *
   1538 * Because @port could potentially be freed at any time by the DP MST helpers
   1539 * if &drm_dp_mst_port.malloc_kref reaches 0, including during a call to this
   1540 * function, drivers that which to make use of &struct drm_dp_mst_port should
   1541 * ensure that they grab at least one main malloc reference to their MST ports
   1542 * in &drm_dp_mst_topology_cbs.add_connector. This callback is called before
   1543 * there is any chance for &drm_dp_mst_port.malloc_kref to reach 0.
   1544 *
   1545 * See also: drm_dp_mst_put_port_malloc()
   1546 */
   1547void
   1548drm_dp_mst_get_port_malloc(struct drm_dp_mst_port *port)
   1549{
   1550	kref_get(&port->malloc_kref);
   1551	drm_dbg(port->mgr->dev, "port %p (%d)\n", port, kref_read(&port->malloc_kref));
   1552}
   1553EXPORT_SYMBOL(drm_dp_mst_get_port_malloc);
   1554
   1555/**
   1556 * drm_dp_mst_put_port_malloc() - Decrement the malloc refcount of an MST port
   1557 * @port: The &struct drm_dp_mst_port to decrement the malloc refcount of
   1558 *
   1559 * Decrements &drm_dp_mst_port.malloc_kref. When &drm_dp_mst_port.malloc_kref
   1560 * reaches 0, the memory allocation for @port will be released and @port may
   1561 * no longer be used.
   1562 *
   1563 * See also: drm_dp_mst_get_port_malloc()
   1564 */
   1565void
   1566drm_dp_mst_put_port_malloc(struct drm_dp_mst_port *port)
   1567{
   1568	drm_dbg(port->mgr->dev, "port %p (%d)\n", port, kref_read(&port->malloc_kref) - 1);
   1569	kref_put(&port->malloc_kref, drm_dp_free_mst_port);
   1570}
   1571EXPORT_SYMBOL(drm_dp_mst_put_port_malloc);
   1572
   1573#if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
   1574
   1575#define STACK_DEPTH 8
   1576
   1577static noinline void
   1578__topology_ref_save(struct drm_dp_mst_topology_mgr *mgr,
   1579		    struct drm_dp_mst_topology_ref_history *history,
   1580		    enum drm_dp_mst_topology_ref_type type)
   1581{
   1582	struct drm_dp_mst_topology_ref_entry *entry = NULL;
   1583	depot_stack_handle_t backtrace;
   1584	ulong stack_entries[STACK_DEPTH];
   1585	uint n;
   1586	int i;
   1587
   1588	n = stack_trace_save(stack_entries, ARRAY_SIZE(stack_entries), 1);
   1589	backtrace = stack_depot_save(stack_entries, n, GFP_KERNEL);
   1590	if (!backtrace)
   1591		return;
   1592
   1593	/* Try to find an existing entry for this backtrace */
   1594	for (i = 0; i < history->len; i++) {
   1595		if (history->entries[i].backtrace == backtrace) {
   1596			entry = &history->entries[i];
   1597			break;
   1598		}
   1599	}
   1600
   1601	/* Otherwise add one */
   1602	if (!entry) {
   1603		struct drm_dp_mst_topology_ref_entry *new;
   1604		int new_len = history->len + 1;
   1605
   1606		new = krealloc(history->entries, sizeof(*new) * new_len,
   1607			       GFP_KERNEL);
   1608		if (!new)
   1609			return;
   1610
   1611		entry = &new[history->len];
   1612		history->len = new_len;
   1613		history->entries = new;
   1614
   1615		entry->backtrace = backtrace;
   1616		entry->type = type;
   1617		entry->count = 0;
   1618	}
   1619	entry->count++;
   1620	entry->ts_nsec = ktime_get_ns();
   1621}
   1622
   1623static int
   1624topology_ref_history_cmp(const void *a, const void *b)
   1625{
   1626	const struct drm_dp_mst_topology_ref_entry *entry_a = a, *entry_b = b;
   1627
   1628	if (entry_a->ts_nsec > entry_b->ts_nsec)
   1629		return 1;
   1630	else if (entry_a->ts_nsec < entry_b->ts_nsec)
   1631		return -1;
   1632	else
   1633		return 0;
   1634}
   1635
   1636static inline const char *
   1637topology_ref_type_to_str(enum drm_dp_mst_topology_ref_type type)
   1638{
   1639	if (type == DRM_DP_MST_TOPOLOGY_REF_GET)
   1640		return "get";
   1641	else
   1642		return "put";
   1643}
   1644
   1645static void
   1646__dump_topology_ref_history(struct drm_dp_mst_topology_ref_history *history,
   1647			    void *ptr, const char *type_str)
   1648{
   1649	struct drm_printer p = drm_debug_printer(DBG_PREFIX);
   1650	char *buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
   1651	int i;
   1652
   1653	if (!buf)
   1654		return;
   1655
   1656	if (!history->len)
   1657		goto out;
   1658
   1659	/* First, sort the list so that it goes from oldest to newest
   1660	 * reference entry
   1661	 */
   1662	sort(history->entries, history->len, sizeof(*history->entries),
   1663	     topology_ref_history_cmp, NULL);
   1664
   1665	drm_printf(&p, "%s (%p) topology count reached 0, dumping history:\n",
   1666		   type_str, ptr);
   1667
   1668	for (i = 0; i < history->len; i++) {
   1669		const struct drm_dp_mst_topology_ref_entry *entry =
   1670			&history->entries[i];
   1671		u64 ts_nsec = entry->ts_nsec;
   1672		u32 rem_nsec = do_div(ts_nsec, 1000000000);
   1673
   1674		stack_depot_snprint(entry->backtrace, buf, PAGE_SIZE, 4);
   1675
   1676		drm_printf(&p, "  %d %ss (last at %5llu.%06u):\n%s",
   1677			   entry->count,
   1678			   topology_ref_type_to_str(entry->type),
   1679			   ts_nsec, rem_nsec / 1000, buf);
   1680	}
   1681
   1682	/* Now free the history, since this is the only time we expose it */
   1683	kfree(history->entries);
   1684out:
   1685	kfree(buf);
   1686}
   1687
   1688static __always_inline void
   1689drm_dp_mst_dump_mstb_topology_history(struct drm_dp_mst_branch *mstb)
   1690{
   1691	__dump_topology_ref_history(&mstb->topology_ref_history, mstb,
   1692				    "MSTB");
   1693}
   1694
   1695static __always_inline void
   1696drm_dp_mst_dump_port_topology_history(struct drm_dp_mst_port *port)
   1697{
   1698	__dump_topology_ref_history(&port->topology_ref_history, port,
   1699				    "Port");
   1700}
   1701
   1702static __always_inline void
   1703save_mstb_topology_ref(struct drm_dp_mst_branch *mstb,
   1704		       enum drm_dp_mst_topology_ref_type type)
   1705{
   1706	__topology_ref_save(mstb->mgr, &mstb->topology_ref_history, type);
   1707}
   1708
   1709static __always_inline void
   1710save_port_topology_ref(struct drm_dp_mst_port *port,
   1711		       enum drm_dp_mst_topology_ref_type type)
   1712{
   1713	__topology_ref_save(port->mgr, &port->topology_ref_history, type);
   1714}
   1715
   1716static inline void
   1717topology_ref_history_lock(struct drm_dp_mst_topology_mgr *mgr)
   1718{
   1719	mutex_lock(&mgr->topology_ref_history_lock);
   1720}
   1721
   1722static inline void
   1723topology_ref_history_unlock(struct drm_dp_mst_topology_mgr *mgr)
   1724{
   1725	mutex_unlock(&mgr->topology_ref_history_lock);
   1726}
   1727#else
   1728static inline void
   1729topology_ref_history_lock(struct drm_dp_mst_topology_mgr *mgr) {}
   1730static inline void
   1731topology_ref_history_unlock(struct drm_dp_mst_topology_mgr *mgr) {}
   1732static inline void
   1733drm_dp_mst_dump_mstb_topology_history(struct drm_dp_mst_branch *mstb) {}
   1734static inline void
   1735drm_dp_mst_dump_port_topology_history(struct drm_dp_mst_port *port) {}
   1736#define save_mstb_topology_ref(mstb, type)
   1737#define save_port_topology_ref(port, type)
   1738#endif
   1739
   1740static void drm_dp_destroy_mst_branch_device(struct kref *kref)
   1741{
   1742	struct drm_dp_mst_branch *mstb =
   1743		container_of(kref, struct drm_dp_mst_branch, topology_kref);
   1744	struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
   1745
   1746	drm_dp_mst_dump_mstb_topology_history(mstb);
   1747
   1748	INIT_LIST_HEAD(&mstb->destroy_next);
   1749
   1750	/*
   1751	 * This can get called under mgr->mutex, so we need to perform the
   1752	 * actual destruction of the mstb in another worker
   1753	 */
   1754	mutex_lock(&mgr->delayed_destroy_lock);
   1755	list_add(&mstb->destroy_next, &mgr->destroy_branch_device_list);
   1756	mutex_unlock(&mgr->delayed_destroy_lock);
   1757	queue_work(mgr->delayed_destroy_wq, &mgr->delayed_destroy_work);
   1758}
   1759
   1760/**
   1761 * drm_dp_mst_topology_try_get_mstb() - Increment the topology refcount of a
   1762 * branch device unless it's zero
   1763 * @mstb: &struct drm_dp_mst_branch to increment the topology refcount of
   1764 *
   1765 * Attempts to grab a topology reference to @mstb, if it hasn't yet been
   1766 * removed from the topology (e.g. &drm_dp_mst_branch.topology_kref has
   1767 * reached 0). Holding a topology reference implies that a malloc reference
   1768 * will be held to @mstb as long as the user holds the topology reference.
   1769 *
   1770 * Care should be taken to ensure that the user has at least one malloc
   1771 * reference to @mstb. If you already have a topology reference to @mstb, you
   1772 * should use drm_dp_mst_topology_get_mstb() instead.
   1773 *
   1774 * See also:
   1775 * drm_dp_mst_topology_get_mstb()
   1776 * drm_dp_mst_topology_put_mstb()
   1777 *
   1778 * Returns:
   1779 * * 1: A topology reference was grabbed successfully
   1780 * * 0: @port is no longer in the topology, no reference was grabbed
   1781 */
   1782static int __must_check
   1783drm_dp_mst_topology_try_get_mstb(struct drm_dp_mst_branch *mstb)
   1784{
   1785	int ret;
   1786
   1787	topology_ref_history_lock(mstb->mgr);
   1788	ret = kref_get_unless_zero(&mstb->topology_kref);
   1789	if (ret) {
   1790		drm_dbg(mstb->mgr->dev, "mstb %p (%d)\n", mstb, kref_read(&mstb->topology_kref));
   1791		save_mstb_topology_ref(mstb, DRM_DP_MST_TOPOLOGY_REF_GET);
   1792	}
   1793
   1794	topology_ref_history_unlock(mstb->mgr);
   1795
   1796	return ret;
   1797}
   1798
   1799/**
   1800 * drm_dp_mst_topology_get_mstb() - Increment the topology refcount of a
   1801 * branch device
   1802 * @mstb: The &struct drm_dp_mst_branch to increment the topology refcount of
   1803 *
   1804 * Increments &drm_dp_mst_branch.topology_refcount without checking whether or
   1805 * not it's already reached 0. This is only valid to use in scenarios where
   1806 * you are already guaranteed to have at least one active topology reference
   1807 * to @mstb. Otherwise, drm_dp_mst_topology_try_get_mstb() must be used.
   1808 *
   1809 * See also:
   1810 * drm_dp_mst_topology_try_get_mstb()
   1811 * drm_dp_mst_topology_put_mstb()
   1812 */
   1813static void drm_dp_mst_topology_get_mstb(struct drm_dp_mst_branch *mstb)
   1814{
   1815	topology_ref_history_lock(mstb->mgr);
   1816
   1817	save_mstb_topology_ref(mstb, DRM_DP_MST_TOPOLOGY_REF_GET);
   1818	WARN_ON(kref_read(&mstb->topology_kref) == 0);
   1819	kref_get(&mstb->topology_kref);
   1820	drm_dbg(mstb->mgr->dev, "mstb %p (%d)\n", mstb, kref_read(&mstb->topology_kref));
   1821
   1822	topology_ref_history_unlock(mstb->mgr);
   1823}
   1824
   1825/**
   1826 * drm_dp_mst_topology_put_mstb() - release a topology reference to a branch
   1827 * device
   1828 * @mstb: The &struct drm_dp_mst_branch to release the topology reference from
   1829 *
   1830 * Releases a topology reference from @mstb by decrementing
   1831 * &drm_dp_mst_branch.topology_kref.
   1832 *
   1833 * See also:
   1834 * drm_dp_mst_topology_try_get_mstb()
   1835 * drm_dp_mst_topology_get_mstb()
   1836 */
   1837static void
   1838drm_dp_mst_topology_put_mstb(struct drm_dp_mst_branch *mstb)
   1839{
   1840	topology_ref_history_lock(mstb->mgr);
   1841
   1842	drm_dbg(mstb->mgr->dev, "mstb %p (%d)\n", mstb, kref_read(&mstb->topology_kref) - 1);
   1843	save_mstb_topology_ref(mstb, DRM_DP_MST_TOPOLOGY_REF_PUT);
   1844
   1845	topology_ref_history_unlock(mstb->mgr);
   1846	kref_put(&mstb->topology_kref, drm_dp_destroy_mst_branch_device);
   1847}
   1848
   1849static void drm_dp_destroy_port(struct kref *kref)
   1850{
   1851	struct drm_dp_mst_port *port =
   1852		container_of(kref, struct drm_dp_mst_port, topology_kref);
   1853	struct drm_dp_mst_topology_mgr *mgr = port->mgr;
   1854
   1855	drm_dp_mst_dump_port_topology_history(port);
   1856
   1857	/* There's nothing that needs locking to destroy an input port yet */
   1858	if (port->input) {
   1859		drm_dp_mst_put_port_malloc(port);
   1860		return;
   1861	}
   1862
   1863	kfree(port->cached_edid);
   1864
   1865	/*
   1866	 * we can't destroy the connector here, as we might be holding the
   1867	 * mode_config.mutex from an EDID retrieval
   1868	 */
   1869	mutex_lock(&mgr->delayed_destroy_lock);
   1870	list_add(&port->next, &mgr->destroy_port_list);
   1871	mutex_unlock(&mgr->delayed_destroy_lock);
   1872	queue_work(mgr->delayed_destroy_wq, &mgr->delayed_destroy_work);
   1873}
   1874
   1875/**
   1876 * drm_dp_mst_topology_try_get_port() - Increment the topology refcount of a
   1877 * port unless it's zero
   1878 * @port: &struct drm_dp_mst_port to increment the topology refcount of
   1879 *
   1880 * Attempts to grab a topology reference to @port, if it hasn't yet been
   1881 * removed from the topology (e.g. &drm_dp_mst_port.topology_kref has reached
   1882 * 0). Holding a topology reference implies that a malloc reference will be
   1883 * held to @port as long as the user holds the topology reference.
   1884 *
   1885 * Care should be taken to ensure that the user has at least one malloc
   1886 * reference to @port. If you already have a topology reference to @port, you
   1887 * should use drm_dp_mst_topology_get_port() instead.
   1888 *
   1889 * See also:
   1890 * drm_dp_mst_topology_get_port()
   1891 * drm_dp_mst_topology_put_port()
   1892 *
   1893 * Returns:
   1894 * * 1: A topology reference was grabbed successfully
   1895 * * 0: @port is no longer in the topology, no reference was grabbed
   1896 */
   1897static int __must_check
   1898drm_dp_mst_topology_try_get_port(struct drm_dp_mst_port *port)
   1899{
   1900	int ret;
   1901
   1902	topology_ref_history_lock(port->mgr);
   1903	ret = kref_get_unless_zero(&port->topology_kref);
   1904	if (ret) {
   1905		drm_dbg(port->mgr->dev, "port %p (%d)\n", port, kref_read(&port->topology_kref));
   1906		save_port_topology_ref(port, DRM_DP_MST_TOPOLOGY_REF_GET);
   1907	}
   1908
   1909	topology_ref_history_unlock(port->mgr);
   1910	return ret;
   1911}
   1912
   1913/**
   1914 * drm_dp_mst_topology_get_port() - Increment the topology refcount of a port
   1915 * @port: The &struct drm_dp_mst_port to increment the topology refcount of
   1916 *
   1917 * Increments &drm_dp_mst_port.topology_refcount without checking whether or
   1918 * not it's already reached 0. This is only valid to use in scenarios where
   1919 * you are already guaranteed to have at least one active topology reference
   1920 * to @port. Otherwise, drm_dp_mst_topology_try_get_port() must be used.
   1921 *
   1922 * See also:
   1923 * drm_dp_mst_topology_try_get_port()
   1924 * drm_dp_mst_topology_put_port()
   1925 */
   1926static void drm_dp_mst_topology_get_port(struct drm_dp_mst_port *port)
   1927{
   1928	topology_ref_history_lock(port->mgr);
   1929
   1930	WARN_ON(kref_read(&port->topology_kref) == 0);
   1931	kref_get(&port->topology_kref);
   1932	drm_dbg(port->mgr->dev, "port %p (%d)\n", port, kref_read(&port->topology_kref));
   1933	save_port_topology_ref(port, DRM_DP_MST_TOPOLOGY_REF_GET);
   1934
   1935	topology_ref_history_unlock(port->mgr);
   1936}
   1937
   1938/**
   1939 * drm_dp_mst_topology_put_port() - release a topology reference to a port
   1940 * @port: The &struct drm_dp_mst_port to release the topology reference from
   1941 *
   1942 * Releases a topology reference from @port by decrementing
   1943 * &drm_dp_mst_port.topology_kref.
   1944 *
   1945 * See also:
   1946 * drm_dp_mst_topology_try_get_port()
   1947 * drm_dp_mst_topology_get_port()
   1948 */
   1949static void drm_dp_mst_topology_put_port(struct drm_dp_mst_port *port)
   1950{
   1951	topology_ref_history_lock(port->mgr);
   1952
   1953	drm_dbg(port->mgr->dev, "port %p (%d)\n", port, kref_read(&port->topology_kref) - 1);
   1954	save_port_topology_ref(port, DRM_DP_MST_TOPOLOGY_REF_PUT);
   1955
   1956	topology_ref_history_unlock(port->mgr);
   1957	kref_put(&port->topology_kref, drm_dp_destroy_port);
   1958}
   1959
   1960static struct drm_dp_mst_branch *
   1961drm_dp_mst_topology_get_mstb_validated_locked(struct drm_dp_mst_branch *mstb,
   1962					      struct drm_dp_mst_branch *to_find)
   1963{
   1964	struct drm_dp_mst_port *port;
   1965	struct drm_dp_mst_branch *rmstb;
   1966
   1967	if (to_find == mstb)
   1968		return mstb;
   1969
   1970	list_for_each_entry(port, &mstb->ports, next) {
   1971		if (port->mstb) {
   1972			rmstb = drm_dp_mst_topology_get_mstb_validated_locked(
   1973			    port->mstb, to_find);
   1974			if (rmstb)
   1975				return rmstb;
   1976		}
   1977	}
   1978	return NULL;
   1979}
   1980
   1981static struct drm_dp_mst_branch *
   1982drm_dp_mst_topology_get_mstb_validated(struct drm_dp_mst_topology_mgr *mgr,
   1983				       struct drm_dp_mst_branch *mstb)
   1984{
   1985	struct drm_dp_mst_branch *rmstb = NULL;
   1986
   1987	mutex_lock(&mgr->lock);
   1988	if (mgr->mst_primary) {
   1989		rmstb = drm_dp_mst_topology_get_mstb_validated_locked(
   1990		    mgr->mst_primary, mstb);
   1991
   1992		if (rmstb && !drm_dp_mst_topology_try_get_mstb(rmstb))
   1993			rmstb = NULL;
   1994	}
   1995	mutex_unlock(&mgr->lock);
   1996	return rmstb;
   1997}
   1998
   1999static struct drm_dp_mst_port *
   2000drm_dp_mst_topology_get_port_validated_locked(struct drm_dp_mst_branch *mstb,
   2001					      struct drm_dp_mst_port *to_find)
   2002{
   2003	struct drm_dp_mst_port *port, *mport;
   2004
   2005	list_for_each_entry(port, &mstb->ports, next) {
   2006		if (port == to_find)
   2007			return port;
   2008
   2009		if (port->mstb) {
   2010			mport = drm_dp_mst_topology_get_port_validated_locked(
   2011			    port->mstb, to_find);
   2012			if (mport)
   2013				return mport;
   2014		}
   2015	}
   2016	return NULL;
   2017}
   2018
   2019static struct drm_dp_mst_port *
   2020drm_dp_mst_topology_get_port_validated(struct drm_dp_mst_topology_mgr *mgr,
   2021				       struct drm_dp_mst_port *port)
   2022{
   2023	struct drm_dp_mst_port *rport = NULL;
   2024
   2025	mutex_lock(&mgr->lock);
   2026	if (mgr->mst_primary) {
   2027		rport = drm_dp_mst_topology_get_port_validated_locked(
   2028		    mgr->mst_primary, port);
   2029
   2030		if (rport && !drm_dp_mst_topology_try_get_port(rport))
   2031			rport = NULL;
   2032	}
   2033	mutex_unlock(&mgr->lock);
   2034	return rport;
   2035}
   2036
   2037static struct drm_dp_mst_port *drm_dp_get_port(struct drm_dp_mst_branch *mstb, u8 port_num)
   2038{
   2039	struct drm_dp_mst_port *port;
   2040	int ret;
   2041
   2042	list_for_each_entry(port, &mstb->ports, next) {
   2043		if (port->port_num == port_num) {
   2044			ret = drm_dp_mst_topology_try_get_port(port);
   2045			return ret ? port : NULL;
   2046		}
   2047	}
   2048
   2049	return NULL;
   2050}
   2051
   2052/*
   2053 * calculate a new RAD for this MST branch device
   2054 * if parent has an LCT of 2 then it has 1 nibble of RAD,
   2055 * if parent has an LCT of 3 then it has 2 nibbles of RAD,
   2056 */
   2057static u8 drm_dp_calculate_rad(struct drm_dp_mst_port *port,
   2058				 u8 *rad)
   2059{
   2060	int parent_lct = port->parent->lct;
   2061	int shift = 4;
   2062	int idx = (parent_lct - 1) / 2;
   2063
   2064	if (parent_lct > 1) {
   2065		memcpy(rad, port->parent->rad, idx + 1);
   2066		shift = (parent_lct % 2) ? 4 : 0;
   2067	} else
   2068		rad[0] = 0;
   2069
   2070	rad[idx] |= port->port_num << shift;
   2071	return parent_lct + 1;
   2072}
   2073
   2074static bool drm_dp_mst_is_end_device(u8 pdt, bool mcs)
   2075{
   2076	switch (pdt) {
   2077	case DP_PEER_DEVICE_DP_LEGACY_CONV:
   2078	case DP_PEER_DEVICE_SST_SINK:
   2079		return true;
   2080	case DP_PEER_DEVICE_MST_BRANCHING:
   2081		/* For sst branch device */
   2082		if (!mcs)
   2083			return true;
   2084
   2085		return false;
   2086	}
   2087	return true;
   2088}
   2089
   2090static int
   2091drm_dp_port_set_pdt(struct drm_dp_mst_port *port, u8 new_pdt,
   2092		    bool new_mcs)
   2093{
   2094	struct drm_dp_mst_topology_mgr *mgr = port->mgr;
   2095	struct drm_dp_mst_branch *mstb;
   2096	u8 rad[8], lct;
   2097	int ret = 0;
   2098
   2099	if (port->pdt == new_pdt && port->mcs == new_mcs)
   2100		return 0;
   2101
   2102	/* Teardown the old pdt, if there is one */
   2103	if (port->pdt != DP_PEER_DEVICE_NONE) {
   2104		if (drm_dp_mst_is_end_device(port->pdt, port->mcs)) {
   2105			/*
   2106			 * If the new PDT would also have an i2c bus,
   2107			 * don't bother with reregistering it
   2108			 */
   2109			if (new_pdt != DP_PEER_DEVICE_NONE &&
   2110			    drm_dp_mst_is_end_device(new_pdt, new_mcs)) {
   2111				port->pdt = new_pdt;
   2112				port->mcs = new_mcs;
   2113				return 0;
   2114			}
   2115
   2116			/* remove i2c over sideband */
   2117			drm_dp_mst_unregister_i2c_bus(port);
   2118		} else {
   2119			mutex_lock(&mgr->lock);
   2120			drm_dp_mst_topology_put_mstb(port->mstb);
   2121			port->mstb = NULL;
   2122			mutex_unlock(&mgr->lock);
   2123		}
   2124	}
   2125
   2126	port->pdt = new_pdt;
   2127	port->mcs = new_mcs;
   2128
   2129	if (port->pdt != DP_PEER_DEVICE_NONE) {
   2130		if (drm_dp_mst_is_end_device(port->pdt, port->mcs)) {
   2131			/* add i2c over sideband */
   2132			ret = drm_dp_mst_register_i2c_bus(port);
   2133		} else {
   2134			lct = drm_dp_calculate_rad(port, rad);
   2135			mstb = drm_dp_add_mst_branch_device(lct, rad);
   2136			if (!mstb) {
   2137				ret = -ENOMEM;
   2138				drm_err(mgr->dev, "Failed to create MSTB for port %p", port);
   2139				goto out;
   2140			}
   2141
   2142			mutex_lock(&mgr->lock);
   2143			port->mstb = mstb;
   2144			mstb->mgr = port->mgr;
   2145			mstb->port_parent = port;
   2146
   2147			/*
   2148			 * Make sure this port's memory allocation stays
   2149			 * around until its child MSTB releases it
   2150			 */
   2151			drm_dp_mst_get_port_malloc(port);
   2152			mutex_unlock(&mgr->lock);
   2153
   2154			/* And make sure we send a link address for this */
   2155			ret = 1;
   2156		}
   2157	}
   2158
   2159out:
   2160	if (ret < 0)
   2161		port->pdt = DP_PEER_DEVICE_NONE;
   2162	return ret;
   2163}
   2164
   2165/**
   2166 * drm_dp_mst_dpcd_read() - read a series of bytes from the DPCD via sideband
   2167 * @aux: Fake sideband AUX CH
   2168 * @offset: address of the (first) register to read
   2169 * @buffer: buffer to store the register values
   2170 * @size: number of bytes in @buffer
   2171 *
   2172 * Performs the same functionality for remote devices via
   2173 * sideband messaging as drm_dp_dpcd_read() does for local
   2174 * devices via actual AUX CH.
   2175 *
   2176 * Return: Number of bytes read, or negative error code on failure.
   2177 */
   2178ssize_t drm_dp_mst_dpcd_read(struct drm_dp_aux *aux,
   2179			     unsigned int offset, void *buffer, size_t size)
   2180{
   2181	struct drm_dp_mst_port *port = container_of(aux, struct drm_dp_mst_port,
   2182						    aux);
   2183
   2184	return drm_dp_send_dpcd_read(port->mgr, port,
   2185				     offset, size, buffer);
   2186}
   2187
   2188/**
   2189 * drm_dp_mst_dpcd_write() - write a series of bytes to the DPCD via sideband
   2190 * @aux: Fake sideband AUX CH
   2191 * @offset: address of the (first) register to write
   2192 * @buffer: buffer containing the values to write
   2193 * @size: number of bytes in @buffer
   2194 *
   2195 * Performs the same functionality for remote devices via
   2196 * sideband messaging as drm_dp_dpcd_write() does for local
   2197 * devices via actual AUX CH.
   2198 *
   2199 * Return: number of bytes written on success, negative error code on failure.
   2200 */
   2201ssize_t drm_dp_mst_dpcd_write(struct drm_dp_aux *aux,
   2202			      unsigned int offset, void *buffer, size_t size)
   2203{
   2204	struct drm_dp_mst_port *port = container_of(aux, struct drm_dp_mst_port,
   2205						    aux);
   2206
   2207	return drm_dp_send_dpcd_write(port->mgr, port,
   2208				      offset, size, buffer);
   2209}
   2210
   2211static int drm_dp_check_mstb_guid(struct drm_dp_mst_branch *mstb, u8 *guid)
   2212{
   2213	int ret = 0;
   2214
   2215	memcpy(mstb->guid, guid, 16);
   2216
   2217	if (!drm_dp_validate_guid(mstb->mgr, mstb->guid)) {
   2218		if (mstb->port_parent) {
   2219			ret = drm_dp_send_dpcd_write(mstb->mgr,
   2220						     mstb->port_parent,
   2221						     DP_GUID, 16, mstb->guid);
   2222		} else {
   2223			ret = drm_dp_dpcd_write(mstb->mgr->aux,
   2224						DP_GUID, mstb->guid, 16);
   2225		}
   2226	}
   2227
   2228	if (ret < 16 && ret > 0)
   2229		return -EPROTO;
   2230
   2231	return ret == 16 ? 0 : ret;
   2232}
   2233
   2234static void build_mst_prop_path(const struct drm_dp_mst_branch *mstb,
   2235				int pnum,
   2236				char *proppath,
   2237				size_t proppath_size)
   2238{
   2239	int i;
   2240	char temp[8];
   2241
   2242	snprintf(proppath, proppath_size, "mst:%d", mstb->mgr->conn_base_id);
   2243	for (i = 0; i < (mstb->lct - 1); i++) {
   2244		int shift = (i % 2) ? 0 : 4;
   2245		int port_num = (mstb->rad[i / 2] >> shift) & 0xf;
   2246
   2247		snprintf(temp, sizeof(temp), "-%d", port_num);
   2248		strlcat(proppath, temp, proppath_size);
   2249	}
   2250	snprintf(temp, sizeof(temp), "-%d", pnum);
   2251	strlcat(proppath, temp, proppath_size);
   2252}
   2253
   2254/**
   2255 * drm_dp_mst_connector_late_register() - Late MST connector registration
   2256 * @connector: The MST connector
   2257 * @port: The MST port for this connector
   2258 *
   2259 * Helper to register the remote aux device for this MST port. Drivers should
   2260 * call this from their mst connector's late_register hook to enable MST aux
   2261 * devices.
   2262 *
   2263 * Return: 0 on success, negative error code on failure.
   2264 */
   2265int drm_dp_mst_connector_late_register(struct drm_connector *connector,
   2266				       struct drm_dp_mst_port *port)
   2267{
   2268	drm_dbg_kms(port->mgr->dev, "registering %s remote bus for %s\n",
   2269		    port->aux.name, connector->kdev->kobj.name);
   2270
   2271	port->aux.dev = connector->kdev;
   2272	return drm_dp_aux_register_devnode(&port->aux);
   2273}
   2274EXPORT_SYMBOL(drm_dp_mst_connector_late_register);
   2275
   2276/**
   2277 * drm_dp_mst_connector_early_unregister() - Early MST connector unregistration
   2278 * @connector: The MST connector
   2279 * @port: The MST port for this connector
   2280 *
   2281 * Helper to unregister the remote aux device for this MST port, registered by
   2282 * drm_dp_mst_connector_late_register(). Drivers should call this from their mst
   2283 * connector's early_unregister hook.
   2284 */
   2285void drm_dp_mst_connector_early_unregister(struct drm_connector *connector,
   2286					   struct drm_dp_mst_port *port)
   2287{
   2288	drm_dbg_kms(port->mgr->dev, "unregistering %s remote bus for %s\n",
   2289		    port->aux.name, connector->kdev->kobj.name);
   2290	drm_dp_aux_unregister_devnode(&port->aux);
   2291}
   2292EXPORT_SYMBOL(drm_dp_mst_connector_early_unregister);
   2293
   2294static void
   2295drm_dp_mst_port_add_connector(struct drm_dp_mst_branch *mstb,
   2296			      struct drm_dp_mst_port *port)
   2297{
   2298	struct drm_dp_mst_topology_mgr *mgr = port->mgr;
   2299	char proppath[255];
   2300	int ret;
   2301
   2302	build_mst_prop_path(mstb, port->port_num, proppath, sizeof(proppath));
   2303	port->connector = mgr->cbs->add_connector(mgr, port, proppath);
   2304	if (!port->connector) {
   2305		ret = -ENOMEM;
   2306		goto error;
   2307	}
   2308
   2309	if (port->pdt != DP_PEER_DEVICE_NONE &&
   2310	    drm_dp_mst_is_end_device(port->pdt, port->mcs) &&
   2311	    port->port_num >= DP_MST_LOGICAL_PORT_0)
   2312		port->cached_edid = drm_get_edid(port->connector,
   2313						 &port->aux.ddc);
   2314
   2315	drm_connector_register(port->connector);
   2316	return;
   2317
   2318error:
   2319	drm_err(mgr->dev, "Failed to create connector for port %p: %d\n", port, ret);
   2320}
   2321
   2322/*
   2323 * Drop a topology reference, and unlink the port from the in-memory topology
   2324 * layout
   2325 */
   2326static void
   2327drm_dp_mst_topology_unlink_port(struct drm_dp_mst_topology_mgr *mgr,
   2328				struct drm_dp_mst_port *port)
   2329{
   2330	mutex_lock(&mgr->lock);
   2331	port->parent->num_ports--;
   2332	list_del(&port->next);
   2333	mutex_unlock(&mgr->lock);
   2334	drm_dp_mst_topology_put_port(port);
   2335}
   2336
   2337static struct drm_dp_mst_port *
   2338drm_dp_mst_add_port(struct drm_device *dev,
   2339		    struct drm_dp_mst_topology_mgr *mgr,
   2340		    struct drm_dp_mst_branch *mstb, u8 port_number)
   2341{
   2342	struct drm_dp_mst_port *port = kzalloc(sizeof(*port), GFP_KERNEL);
   2343
   2344	if (!port)
   2345		return NULL;
   2346
   2347	kref_init(&port->topology_kref);
   2348	kref_init(&port->malloc_kref);
   2349	port->parent = mstb;
   2350	port->port_num = port_number;
   2351	port->mgr = mgr;
   2352	port->aux.name = "DPMST";
   2353	port->aux.dev = dev->dev;
   2354	port->aux.is_remote = true;
   2355
   2356	/* initialize the MST downstream port's AUX crc work queue */
   2357	port->aux.drm_dev = dev;
   2358	drm_dp_remote_aux_init(&port->aux);
   2359
   2360	/*
   2361	 * Make sure the memory allocation for our parent branch stays
   2362	 * around until our own memory allocation is released
   2363	 */
   2364	drm_dp_mst_get_mstb_malloc(mstb);
   2365
   2366	return port;
   2367}
   2368
   2369static int
   2370drm_dp_mst_handle_link_address_port(struct drm_dp_mst_branch *mstb,
   2371				    struct drm_device *dev,
   2372				    struct drm_dp_link_addr_reply_port *port_msg)
   2373{
   2374	struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
   2375	struct drm_dp_mst_port *port;
   2376	int old_ddps = 0, ret;
   2377	u8 new_pdt = DP_PEER_DEVICE_NONE;
   2378	bool new_mcs = 0;
   2379	bool created = false, send_link_addr = false, changed = false;
   2380
   2381	port = drm_dp_get_port(mstb, port_msg->port_number);
   2382	if (!port) {
   2383		port = drm_dp_mst_add_port(dev, mgr, mstb,
   2384					   port_msg->port_number);
   2385		if (!port)
   2386			return -ENOMEM;
   2387		created = true;
   2388		changed = true;
   2389	} else if (!port->input && port_msg->input_port && port->connector) {
   2390		/* Since port->connector can't be changed here, we create a
   2391		 * new port if input_port changes from 0 to 1
   2392		 */
   2393		drm_dp_mst_topology_unlink_port(mgr, port);
   2394		drm_dp_mst_topology_put_port(port);
   2395		port = drm_dp_mst_add_port(dev, mgr, mstb,
   2396					   port_msg->port_number);
   2397		if (!port)
   2398			return -ENOMEM;
   2399		changed = true;
   2400		created = true;
   2401	} else if (port->input && !port_msg->input_port) {
   2402		changed = true;
   2403	} else if (port->connector) {
   2404		/* We're updating a port that's exposed to userspace, so do it
   2405		 * under lock
   2406		 */
   2407		drm_modeset_lock(&mgr->base.lock, NULL);
   2408
   2409		old_ddps = port->ddps;
   2410		changed = port->ddps != port_msg->ddps ||
   2411			(port->ddps &&
   2412			 (port->ldps != port_msg->legacy_device_plug_status ||
   2413			  port->dpcd_rev != port_msg->dpcd_revision ||
   2414			  port->mcs != port_msg->mcs ||
   2415			  port->pdt != port_msg->peer_device_type ||
   2416			  port->num_sdp_stream_sinks !=
   2417			  port_msg->num_sdp_stream_sinks));
   2418	}
   2419
   2420	port->input = port_msg->input_port;
   2421	if (!port->input)
   2422		new_pdt = port_msg->peer_device_type;
   2423	new_mcs = port_msg->mcs;
   2424	port->ddps = port_msg->ddps;
   2425	port->ldps = port_msg->legacy_device_plug_status;
   2426	port->dpcd_rev = port_msg->dpcd_revision;
   2427	port->num_sdp_streams = port_msg->num_sdp_streams;
   2428	port->num_sdp_stream_sinks = port_msg->num_sdp_stream_sinks;
   2429
   2430	/* manage mstb port lists with mgr lock - take a reference
   2431	   for this list */
   2432	if (created) {
   2433		mutex_lock(&mgr->lock);
   2434		drm_dp_mst_topology_get_port(port);
   2435		list_add(&port->next, &mstb->ports);
   2436		mstb->num_ports++;
   2437		mutex_unlock(&mgr->lock);
   2438	}
   2439
   2440	/*
   2441	 * Reprobe PBN caps on both hotplug, and when re-probing the link
   2442	 * for our parent mstb
   2443	 */
   2444	if (old_ddps != port->ddps || !created) {
   2445		if (port->ddps && !port->input) {
   2446			ret = drm_dp_send_enum_path_resources(mgr, mstb,
   2447							      port);
   2448			if (ret == 1)
   2449				changed = true;
   2450		} else {
   2451			port->full_pbn = 0;
   2452		}
   2453	}
   2454
   2455	ret = drm_dp_port_set_pdt(port, new_pdt, new_mcs);
   2456	if (ret == 1) {
   2457		send_link_addr = true;
   2458	} else if (ret < 0) {
   2459		drm_err(dev, "Failed to change PDT on port %p: %d\n", port, ret);
   2460		goto fail;
   2461	}
   2462
   2463	/*
   2464	 * If this port wasn't just created, then we're reprobing because
   2465	 * we're coming out of suspend. In this case, always resend the link
   2466	 * address if there's an MSTB on this port
   2467	 */
   2468	if (!created && port->pdt == DP_PEER_DEVICE_MST_BRANCHING &&
   2469	    port->mcs)
   2470		send_link_addr = true;
   2471
   2472	if (port->connector)
   2473		drm_modeset_unlock(&mgr->base.lock);
   2474	else if (!port->input)
   2475		drm_dp_mst_port_add_connector(mstb, port);
   2476
   2477	if (send_link_addr && port->mstb) {
   2478		ret = drm_dp_send_link_address(mgr, port->mstb);
   2479		if (ret == 1) /* MSTB below us changed */
   2480			changed = true;
   2481		else if (ret < 0)
   2482			goto fail_put;
   2483	}
   2484
   2485	/* put reference to this port */
   2486	drm_dp_mst_topology_put_port(port);
   2487	return changed;
   2488
   2489fail:
   2490	drm_dp_mst_topology_unlink_port(mgr, port);
   2491	if (port->connector)
   2492		drm_modeset_unlock(&mgr->base.lock);
   2493fail_put:
   2494	drm_dp_mst_topology_put_port(port);
   2495	return ret;
   2496}
   2497
   2498static void
   2499drm_dp_mst_handle_conn_stat(struct drm_dp_mst_branch *mstb,
   2500			    struct drm_dp_connection_status_notify *conn_stat)
   2501{
   2502	struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
   2503	struct drm_dp_mst_port *port;
   2504	int old_ddps, ret;
   2505	u8 new_pdt;
   2506	bool new_mcs;
   2507	bool dowork = false, create_connector = false;
   2508
   2509	port = drm_dp_get_port(mstb, conn_stat->port_number);
   2510	if (!port)
   2511		return;
   2512
   2513	if (port->connector) {
   2514		if (!port->input && conn_stat->input_port) {
   2515			/*
   2516			 * We can't remove a connector from an already exposed
   2517			 * port, so just throw the port out and make sure we
   2518			 * reprobe the link address of it's parent MSTB
   2519			 */
   2520			drm_dp_mst_topology_unlink_port(mgr, port);
   2521			mstb->link_address_sent = false;
   2522			dowork = true;
   2523			goto out;
   2524		}
   2525
   2526		/* Locking is only needed if the port's exposed to userspace */
   2527		drm_modeset_lock(&mgr->base.lock, NULL);
   2528	} else if (port->input && !conn_stat->input_port) {
   2529		create_connector = true;
   2530		/* Reprobe link address so we get num_sdp_streams */
   2531		mstb->link_address_sent = false;
   2532		dowork = true;
   2533	}
   2534
   2535	old_ddps = port->ddps;
   2536	port->input = conn_stat->input_port;
   2537	port->ldps = conn_stat->legacy_device_plug_status;
   2538	port->ddps = conn_stat->displayport_device_plug_status;
   2539
   2540	if (old_ddps != port->ddps) {
   2541		if (port->ddps && !port->input)
   2542			drm_dp_send_enum_path_resources(mgr, mstb, port);
   2543		else
   2544			port->full_pbn = 0;
   2545	}
   2546
   2547	new_pdt = port->input ? DP_PEER_DEVICE_NONE : conn_stat->peer_device_type;
   2548	new_mcs = conn_stat->message_capability_status;
   2549	ret = drm_dp_port_set_pdt(port, new_pdt, new_mcs);
   2550	if (ret == 1) {
   2551		dowork = true;
   2552	} else if (ret < 0) {
   2553		drm_err(mgr->dev, "Failed to change PDT for port %p: %d\n", port, ret);
   2554		dowork = false;
   2555	}
   2556
   2557	if (port->connector)
   2558		drm_modeset_unlock(&mgr->base.lock);
   2559	else if (create_connector)
   2560		drm_dp_mst_port_add_connector(mstb, port);
   2561
   2562out:
   2563	drm_dp_mst_topology_put_port(port);
   2564	if (dowork)
   2565		queue_work(system_long_wq, &mstb->mgr->work);
   2566}
   2567
   2568static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device(struct drm_dp_mst_topology_mgr *mgr,
   2569							       u8 lct, u8 *rad)
   2570{
   2571	struct drm_dp_mst_branch *mstb;
   2572	struct drm_dp_mst_port *port;
   2573	int i, ret;
   2574	/* find the port by iterating down */
   2575
   2576	mutex_lock(&mgr->lock);
   2577	mstb = mgr->mst_primary;
   2578
   2579	if (!mstb)
   2580		goto out;
   2581
   2582	for (i = 0; i < lct - 1; i++) {
   2583		int shift = (i % 2) ? 0 : 4;
   2584		int port_num = (rad[i / 2] >> shift) & 0xf;
   2585
   2586		list_for_each_entry(port, &mstb->ports, next) {
   2587			if (port->port_num == port_num) {
   2588				mstb = port->mstb;
   2589				if (!mstb) {
   2590					drm_err(mgr->dev,
   2591						"failed to lookup MSTB with lct %d, rad %02x\n",
   2592						lct, rad[0]);
   2593					goto out;
   2594				}
   2595
   2596				break;
   2597			}
   2598		}
   2599	}
   2600	ret = drm_dp_mst_topology_try_get_mstb(mstb);
   2601	if (!ret)
   2602		mstb = NULL;
   2603out:
   2604	mutex_unlock(&mgr->lock);
   2605	return mstb;
   2606}
   2607
   2608static struct drm_dp_mst_branch *get_mst_branch_device_by_guid_helper(
   2609	struct drm_dp_mst_branch *mstb,
   2610	const uint8_t *guid)
   2611{
   2612	struct drm_dp_mst_branch *found_mstb;
   2613	struct drm_dp_mst_port *port;
   2614
   2615	if (memcmp(mstb->guid, guid, 16) == 0)
   2616		return mstb;
   2617
   2618
   2619	list_for_each_entry(port, &mstb->ports, next) {
   2620		if (!port->mstb)
   2621			continue;
   2622
   2623		found_mstb = get_mst_branch_device_by_guid_helper(port->mstb, guid);
   2624
   2625		if (found_mstb)
   2626			return found_mstb;
   2627	}
   2628
   2629	return NULL;
   2630}
   2631
   2632static struct drm_dp_mst_branch *
   2633drm_dp_get_mst_branch_device_by_guid(struct drm_dp_mst_topology_mgr *mgr,
   2634				     const uint8_t *guid)
   2635{
   2636	struct drm_dp_mst_branch *mstb;
   2637	int ret;
   2638
   2639	/* find the port by iterating down */
   2640	mutex_lock(&mgr->lock);
   2641
   2642	mstb = get_mst_branch_device_by_guid_helper(mgr->mst_primary, guid);
   2643	if (mstb) {
   2644		ret = drm_dp_mst_topology_try_get_mstb(mstb);
   2645		if (!ret)
   2646			mstb = NULL;
   2647	}
   2648
   2649	mutex_unlock(&mgr->lock);
   2650	return mstb;
   2651}
   2652
   2653static int drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
   2654					       struct drm_dp_mst_branch *mstb)
   2655{
   2656	struct drm_dp_mst_port *port;
   2657	int ret;
   2658	bool changed = false;
   2659
   2660	if (!mstb->link_address_sent) {
   2661		ret = drm_dp_send_link_address(mgr, mstb);
   2662		if (ret == 1)
   2663			changed = true;
   2664		else if (ret < 0)
   2665			return ret;
   2666	}
   2667
   2668	list_for_each_entry(port, &mstb->ports, next) {
   2669		struct drm_dp_mst_branch *mstb_child = NULL;
   2670
   2671		if (port->input || !port->ddps)
   2672			continue;
   2673
   2674		if (port->mstb)
   2675			mstb_child = drm_dp_mst_topology_get_mstb_validated(
   2676			    mgr, port->mstb);
   2677
   2678		if (mstb_child) {
   2679			ret = drm_dp_check_and_send_link_address(mgr,
   2680								 mstb_child);
   2681			drm_dp_mst_topology_put_mstb(mstb_child);
   2682			if (ret == 1)
   2683				changed = true;
   2684			else if (ret < 0)
   2685				return ret;
   2686		}
   2687	}
   2688
   2689	return changed;
   2690}
   2691
   2692static void drm_dp_mst_link_probe_work(struct work_struct *work)
   2693{
   2694	struct drm_dp_mst_topology_mgr *mgr =
   2695		container_of(work, struct drm_dp_mst_topology_mgr, work);
   2696	struct drm_device *dev = mgr->dev;
   2697	struct drm_dp_mst_branch *mstb;
   2698	int ret;
   2699	bool clear_payload_id_table;
   2700
   2701	mutex_lock(&mgr->probe_lock);
   2702
   2703	mutex_lock(&mgr->lock);
   2704	clear_payload_id_table = !mgr->payload_id_table_cleared;
   2705	mgr->payload_id_table_cleared = true;
   2706
   2707	mstb = mgr->mst_primary;
   2708	if (mstb) {
   2709		ret = drm_dp_mst_topology_try_get_mstb(mstb);
   2710		if (!ret)
   2711			mstb = NULL;
   2712	}
   2713	mutex_unlock(&mgr->lock);
   2714	if (!mstb) {
   2715		mutex_unlock(&mgr->probe_lock);
   2716		return;
   2717	}
   2718
   2719	/*
   2720	 * Certain branch devices seem to incorrectly report an available_pbn
   2721	 * of 0 on downstream sinks, even after clearing the
   2722	 * DP_PAYLOAD_ALLOCATE_* registers in
   2723	 * drm_dp_mst_topology_mgr_set_mst(). Namely, the CableMatters USB-C
   2724	 * 2x DP hub. Sending a CLEAR_PAYLOAD_ID_TABLE message seems to make
   2725	 * things work again.
   2726	 */
   2727	if (clear_payload_id_table) {
   2728		drm_dbg_kms(dev, "Clearing payload ID table\n");
   2729		drm_dp_send_clear_payload_id_table(mgr, mstb);
   2730	}
   2731
   2732	ret = drm_dp_check_and_send_link_address(mgr, mstb);
   2733	drm_dp_mst_topology_put_mstb(mstb);
   2734
   2735	mutex_unlock(&mgr->probe_lock);
   2736	if (ret > 0)
   2737		drm_kms_helper_hotplug_event(dev);
   2738}
   2739
   2740static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
   2741				 u8 *guid)
   2742{
   2743	u64 salt;
   2744
   2745	if (memchr_inv(guid, 0, 16))
   2746		return true;
   2747
   2748	salt = get_jiffies_64();
   2749
   2750	memcpy(&guid[0], &salt, sizeof(u64));
   2751	memcpy(&guid[8], &salt, sizeof(u64));
   2752
   2753	return false;
   2754}
   2755
   2756static void build_dpcd_read(struct drm_dp_sideband_msg_tx *msg,
   2757			    u8 port_num, u32 offset, u8 num_bytes)
   2758{
   2759	struct drm_dp_sideband_msg_req_body req;
   2760
   2761	req.req_type = DP_REMOTE_DPCD_READ;
   2762	req.u.dpcd_read.port_number = port_num;
   2763	req.u.dpcd_read.dpcd_address = offset;
   2764	req.u.dpcd_read.num_bytes = num_bytes;
   2765	drm_dp_encode_sideband_req(&req, msg);
   2766}
   2767
   2768static int drm_dp_send_sideband_msg(struct drm_dp_mst_topology_mgr *mgr,
   2769				    bool up, u8 *msg, int len)
   2770{
   2771	int ret;
   2772	int regbase = up ? DP_SIDEBAND_MSG_UP_REP_BASE : DP_SIDEBAND_MSG_DOWN_REQ_BASE;
   2773	int tosend, total, offset;
   2774	int retries = 0;
   2775
   2776retry:
   2777	total = len;
   2778	offset = 0;
   2779	do {
   2780		tosend = min3(mgr->max_dpcd_transaction_bytes, 16, total);
   2781
   2782		ret = drm_dp_dpcd_write(mgr->aux, regbase + offset,
   2783					&msg[offset],
   2784					tosend);
   2785		if (ret != tosend) {
   2786			if (ret == -EIO && retries < 5) {
   2787				retries++;
   2788				goto retry;
   2789			}
   2790			drm_dbg_kms(mgr->dev, "failed to dpcd write %d %d\n", tosend, ret);
   2791
   2792			return -EIO;
   2793		}
   2794		offset += tosend;
   2795		total -= tosend;
   2796	} while (total > 0);
   2797	return 0;
   2798}
   2799
   2800static int set_hdr_from_dst_qlock(struct drm_dp_sideband_msg_hdr *hdr,
   2801				  struct drm_dp_sideband_msg_tx *txmsg)
   2802{
   2803	struct drm_dp_mst_branch *mstb = txmsg->dst;
   2804	u8 req_type;
   2805
   2806	req_type = txmsg->msg[0] & 0x7f;
   2807	if (req_type == DP_CONNECTION_STATUS_NOTIFY ||
   2808		req_type == DP_RESOURCE_STATUS_NOTIFY ||
   2809		req_type == DP_CLEAR_PAYLOAD_ID_TABLE)
   2810		hdr->broadcast = 1;
   2811	else
   2812		hdr->broadcast = 0;
   2813	hdr->path_msg = txmsg->path_msg;
   2814	if (hdr->broadcast) {
   2815		hdr->lct = 1;
   2816		hdr->lcr = 6;
   2817	} else {
   2818		hdr->lct = mstb->lct;
   2819		hdr->lcr = mstb->lct - 1;
   2820	}
   2821
   2822	memcpy(hdr->rad, mstb->rad, hdr->lct / 2);
   2823
   2824	return 0;
   2825}
   2826/*
   2827 * process a single block of the next message in the sideband queue
   2828 */
   2829static int process_single_tx_qlock(struct drm_dp_mst_topology_mgr *mgr,
   2830				   struct drm_dp_sideband_msg_tx *txmsg,
   2831				   bool up)
   2832{
   2833	u8 chunk[48];
   2834	struct drm_dp_sideband_msg_hdr hdr;
   2835	int len, space, idx, tosend;
   2836	int ret;
   2837
   2838	if (txmsg->state == DRM_DP_SIDEBAND_TX_SENT)
   2839		return 0;
   2840
   2841	memset(&hdr, 0, sizeof(struct drm_dp_sideband_msg_hdr));
   2842
   2843	if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED)
   2844		txmsg->state = DRM_DP_SIDEBAND_TX_START_SEND;
   2845
   2846	/* make hdr from dst mst */
   2847	ret = set_hdr_from_dst_qlock(&hdr, txmsg);
   2848	if (ret < 0)
   2849		return ret;
   2850
   2851	/* amount left to send in this message */
   2852	len = txmsg->cur_len - txmsg->cur_offset;
   2853
   2854	/* 48 - sideband msg size - 1 byte for data CRC, x header bytes */
   2855	space = 48 - 1 - drm_dp_calc_sb_hdr_size(&hdr);
   2856
   2857	tosend = min(len, space);
   2858	if (len == txmsg->cur_len)
   2859		hdr.somt = 1;
   2860	if (space >= len)
   2861		hdr.eomt = 1;
   2862
   2863
   2864	hdr.msg_len = tosend + 1;
   2865	drm_dp_encode_sideband_msg_hdr(&hdr, chunk, &idx);
   2866	memcpy(&chunk[idx], &txmsg->msg[txmsg->cur_offset], tosend);
   2867	/* add crc at end */
   2868	drm_dp_crc_sideband_chunk_req(&chunk[idx], tosend);
   2869	idx += tosend + 1;
   2870
   2871	ret = drm_dp_send_sideband_msg(mgr, up, chunk, idx);
   2872	if (ret) {
   2873		if (drm_debug_enabled(DRM_UT_DP)) {
   2874			struct drm_printer p = drm_debug_printer(DBG_PREFIX);
   2875
   2876			drm_printf(&p, "sideband msg failed to send\n");
   2877			drm_dp_mst_dump_sideband_msg_tx(&p, txmsg);
   2878		}
   2879		return ret;
   2880	}
   2881
   2882	txmsg->cur_offset += tosend;
   2883	if (txmsg->cur_offset == txmsg->cur_len) {
   2884		txmsg->state = DRM_DP_SIDEBAND_TX_SENT;
   2885		return 1;
   2886	}
   2887	return 0;
   2888}
   2889
   2890static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr *mgr)
   2891{
   2892	struct drm_dp_sideband_msg_tx *txmsg;
   2893	int ret;
   2894
   2895	WARN_ON(!mutex_is_locked(&mgr->qlock));
   2896
   2897	/* construct a chunk from the first msg in the tx_msg queue */
   2898	if (list_empty(&mgr->tx_msg_downq))
   2899		return;
   2900
   2901	txmsg = list_first_entry(&mgr->tx_msg_downq,
   2902				 struct drm_dp_sideband_msg_tx, next);
   2903	ret = process_single_tx_qlock(mgr, txmsg, false);
   2904	if (ret < 0) {
   2905		drm_dbg_kms(mgr->dev, "failed to send msg in q %d\n", ret);
   2906		list_del(&txmsg->next);
   2907		txmsg->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
   2908		wake_up_all(&mgr->tx_waitq);
   2909	}
   2910}
   2911
   2912static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr,
   2913				 struct drm_dp_sideband_msg_tx *txmsg)
   2914{
   2915	mutex_lock(&mgr->qlock);
   2916	list_add_tail(&txmsg->next, &mgr->tx_msg_downq);
   2917
   2918	if (drm_debug_enabled(DRM_UT_DP)) {
   2919		struct drm_printer p = drm_debug_printer(DBG_PREFIX);
   2920
   2921		drm_dp_mst_dump_sideband_msg_tx(&p, txmsg);
   2922	}
   2923
   2924	if (list_is_singular(&mgr->tx_msg_downq))
   2925		process_single_down_tx_qlock(mgr);
   2926	mutex_unlock(&mgr->qlock);
   2927}
   2928
   2929static void
   2930drm_dp_dump_link_address(const struct drm_dp_mst_topology_mgr *mgr,
   2931			 struct drm_dp_link_address_ack_reply *reply)
   2932{
   2933	struct drm_dp_link_addr_reply_port *port_reply;
   2934	int i;
   2935
   2936	for (i = 0; i < reply->nports; i++) {
   2937		port_reply = &reply->ports[i];
   2938		drm_dbg_kms(mgr->dev,
   2939			    "port %d: input %d, pdt: %d, pn: %d, dpcd_rev: %02x, mcs: %d, ddps: %d, ldps %d, sdp %d/%d\n",
   2940			    i,
   2941			    port_reply->input_port,
   2942			    port_reply->peer_device_type,
   2943			    port_reply->port_number,
   2944			    port_reply->dpcd_revision,
   2945			    port_reply->mcs,
   2946			    port_reply->ddps,
   2947			    port_reply->legacy_device_plug_status,
   2948			    port_reply->num_sdp_streams,
   2949			    port_reply->num_sdp_stream_sinks);
   2950	}
   2951}
   2952
   2953static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
   2954				     struct drm_dp_mst_branch *mstb)
   2955{
   2956	struct drm_dp_sideband_msg_tx *txmsg;
   2957	struct drm_dp_link_address_ack_reply *reply;
   2958	struct drm_dp_mst_port *port, *tmp;
   2959	int i, ret, port_mask = 0;
   2960	bool changed = false;
   2961
   2962	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
   2963	if (!txmsg)
   2964		return -ENOMEM;
   2965
   2966	txmsg->dst = mstb;
   2967	build_link_address(txmsg);
   2968
   2969	mstb->link_address_sent = true;
   2970	drm_dp_queue_down_tx(mgr, txmsg);
   2971
   2972	/* FIXME: Actually do some real error handling here */
   2973	ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
   2974	if (ret <= 0) {
   2975		drm_err(mgr->dev, "Sending link address failed with %d\n", ret);
   2976		goto out;
   2977	}
   2978	if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
   2979		drm_err(mgr->dev, "link address NAK received\n");
   2980		ret = -EIO;
   2981		goto out;
   2982	}
   2983
   2984	reply = &txmsg->reply.u.link_addr;
   2985	drm_dbg_kms(mgr->dev, "link address reply: %d\n", reply->nports);
   2986	drm_dp_dump_link_address(mgr, reply);
   2987
   2988	ret = drm_dp_check_mstb_guid(mstb, reply->guid);
   2989	if (ret) {
   2990		char buf[64];
   2991
   2992		drm_dp_mst_rad_to_str(mstb->rad, mstb->lct, buf, sizeof(buf));
   2993		drm_err(mgr->dev, "GUID check on %s failed: %d\n", buf, ret);
   2994		goto out;
   2995	}
   2996
   2997	for (i = 0; i < reply->nports; i++) {
   2998		port_mask |= BIT(reply->ports[i].port_number);
   2999		ret = drm_dp_mst_handle_link_address_port(mstb, mgr->dev,
   3000							  &reply->ports[i]);
   3001		if (ret == 1)
   3002			changed = true;
   3003		else if (ret < 0)
   3004			goto out;
   3005	}
   3006
   3007	/* Prune any ports that are currently a part of mstb in our in-memory
   3008	 * topology, but were not seen in this link address. Usually this
   3009	 * means that they were removed while the topology was out of sync,
   3010	 * e.g. during suspend/resume
   3011	 */
   3012	mutex_lock(&mgr->lock);
   3013	list_for_each_entry_safe(port, tmp, &mstb->ports, next) {
   3014		if (port_mask & BIT(port->port_num))
   3015			continue;
   3016
   3017		drm_dbg_kms(mgr->dev, "port %d was not in link address, removing\n",
   3018			    port->port_num);
   3019		list_del(&port->next);
   3020		drm_dp_mst_topology_put_port(port);
   3021		changed = true;
   3022	}
   3023	mutex_unlock(&mgr->lock);
   3024
   3025out:
   3026	if (ret <= 0)
   3027		mstb->link_address_sent = false;
   3028	kfree(txmsg);
   3029	return ret < 0 ? ret : changed;
   3030}
   3031
   3032static void
   3033drm_dp_send_clear_payload_id_table(struct drm_dp_mst_topology_mgr *mgr,
   3034				   struct drm_dp_mst_branch *mstb)
   3035{
   3036	struct drm_dp_sideband_msg_tx *txmsg;
   3037	int ret;
   3038
   3039	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
   3040	if (!txmsg)
   3041		return;
   3042
   3043	txmsg->dst = mstb;
   3044	build_clear_payload_id_table(txmsg);
   3045
   3046	drm_dp_queue_down_tx(mgr, txmsg);
   3047
   3048	ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
   3049	if (ret > 0 && txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
   3050		drm_dbg_kms(mgr->dev, "clear payload table id nak received\n");
   3051
   3052	kfree(txmsg);
   3053}
   3054
   3055static int
   3056drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
   3057				struct drm_dp_mst_branch *mstb,
   3058				struct drm_dp_mst_port *port)
   3059{
   3060	struct drm_dp_enum_path_resources_ack_reply *path_res;
   3061	struct drm_dp_sideband_msg_tx *txmsg;
   3062	int ret;
   3063
   3064	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
   3065	if (!txmsg)
   3066		return -ENOMEM;
   3067
   3068	txmsg->dst = mstb;
   3069	build_enum_path_resources(txmsg, port->port_num);
   3070
   3071	drm_dp_queue_down_tx(mgr, txmsg);
   3072
   3073	ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
   3074	if (ret > 0) {
   3075		ret = 0;
   3076		path_res = &txmsg->reply.u.path_resources;
   3077
   3078		if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
   3079			drm_dbg_kms(mgr->dev, "enum path resources nak received\n");
   3080		} else {
   3081			if (port->port_num != path_res->port_number)
   3082				DRM_ERROR("got incorrect port in response\n");
   3083
   3084			drm_dbg_kms(mgr->dev, "enum path resources %d: %d %d\n",
   3085				    path_res->port_number,
   3086				    path_res->full_payload_bw_number,
   3087				    path_res->avail_payload_bw_number);
   3088
   3089			/*
   3090			 * If something changed, make sure we send a
   3091			 * hotplug
   3092			 */
   3093			if (port->full_pbn != path_res->full_payload_bw_number ||
   3094			    port->fec_capable != path_res->fec_capable)
   3095				ret = 1;
   3096
   3097			port->full_pbn = path_res->full_payload_bw_number;
   3098			port->fec_capable = path_res->fec_capable;
   3099		}
   3100	}
   3101
   3102	kfree(txmsg);
   3103	return ret;
   3104}
   3105
   3106static struct drm_dp_mst_port *drm_dp_get_last_connected_port_to_mstb(struct drm_dp_mst_branch *mstb)
   3107{
   3108	if (!mstb->port_parent)
   3109		return NULL;
   3110
   3111	if (mstb->port_parent->mstb != mstb)
   3112		return mstb->port_parent;
   3113
   3114	return drm_dp_get_last_connected_port_to_mstb(mstb->port_parent->parent);
   3115}
   3116
   3117/*
   3118 * Searches upwards in the topology starting from mstb to try to find the
   3119 * closest available parent of mstb that's still connected to the rest of the
   3120 * topology. This can be used in order to perform operations like releasing
   3121 * payloads, where the branch device which owned the payload may no longer be
   3122 * around and thus would require that the payload on the last living relative
   3123 * be freed instead.
   3124 */
   3125static struct drm_dp_mst_branch *
   3126drm_dp_get_last_connected_port_and_mstb(struct drm_dp_mst_topology_mgr *mgr,
   3127					struct drm_dp_mst_branch *mstb,
   3128					int *port_num)
   3129{
   3130	struct drm_dp_mst_branch *rmstb = NULL;
   3131	struct drm_dp_mst_port *found_port;
   3132
   3133	mutex_lock(&mgr->lock);
   3134	if (!mgr->mst_primary)
   3135		goto out;
   3136
   3137	do {
   3138		found_port = drm_dp_get_last_connected_port_to_mstb(mstb);
   3139		if (!found_port)
   3140			break;
   3141
   3142		if (drm_dp_mst_topology_try_get_mstb(found_port->parent)) {
   3143			rmstb = found_port->parent;
   3144			*port_num = found_port->port_num;
   3145		} else {
   3146			/* Search again, starting from this parent */
   3147			mstb = found_port->parent;
   3148		}
   3149	} while (!rmstb);
   3150out:
   3151	mutex_unlock(&mgr->lock);
   3152	return rmstb;
   3153}
   3154
   3155static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
   3156				   struct drm_dp_mst_port *port,
   3157				   int id,
   3158				   int pbn)
   3159{
   3160	struct drm_dp_sideband_msg_tx *txmsg;
   3161	struct drm_dp_mst_branch *mstb;
   3162	int ret, port_num;
   3163	u8 sinks[DRM_DP_MAX_SDP_STREAMS];
   3164	int i;
   3165
   3166	port_num = port->port_num;
   3167	mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);
   3168	if (!mstb) {
   3169		mstb = drm_dp_get_last_connected_port_and_mstb(mgr,
   3170							       port->parent,
   3171							       &port_num);
   3172
   3173		if (!mstb)
   3174			return -EINVAL;
   3175	}
   3176
   3177	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
   3178	if (!txmsg) {
   3179		ret = -ENOMEM;
   3180		goto fail_put;
   3181	}
   3182
   3183	for (i = 0; i < port->num_sdp_streams; i++)
   3184		sinks[i] = i;
   3185
   3186	txmsg->dst = mstb;
   3187	build_allocate_payload(txmsg, port_num,
   3188			       id,
   3189			       pbn, port->num_sdp_streams, sinks);
   3190
   3191	drm_dp_queue_down_tx(mgr, txmsg);
   3192
   3193	/*
   3194	 * FIXME: there is a small chance that between getting the last
   3195	 * connected mstb and sending the payload message, the last connected
   3196	 * mstb could also be removed from the topology. In the future, this
   3197	 * needs to be fixed by restarting the
   3198	 * drm_dp_get_last_connected_port_and_mstb() search in the event of a
   3199	 * timeout if the topology is still connected to the system.
   3200	 */
   3201	ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
   3202	if (ret > 0) {
   3203		if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
   3204			ret = -EINVAL;
   3205		else
   3206			ret = 0;
   3207	}
   3208	kfree(txmsg);
   3209fail_put:
   3210	drm_dp_mst_topology_put_mstb(mstb);
   3211	return ret;
   3212}
   3213
   3214int drm_dp_send_power_updown_phy(struct drm_dp_mst_topology_mgr *mgr,
   3215				 struct drm_dp_mst_port *port, bool power_up)
   3216{
   3217	struct drm_dp_sideband_msg_tx *txmsg;
   3218	int ret;
   3219
   3220	port = drm_dp_mst_topology_get_port_validated(mgr, port);
   3221	if (!port)
   3222		return -EINVAL;
   3223
   3224	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
   3225	if (!txmsg) {
   3226		drm_dp_mst_topology_put_port(port);
   3227		return -ENOMEM;
   3228	}
   3229
   3230	txmsg->dst = port->parent;
   3231	build_power_updown_phy(txmsg, port->port_num, power_up);
   3232	drm_dp_queue_down_tx(mgr, txmsg);
   3233
   3234	ret = drm_dp_mst_wait_tx_reply(port->parent, txmsg);
   3235	if (ret > 0) {
   3236		if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
   3237			ret = -EINVAL;
   3238		else
   3239			ret = 0;
   3240	}
   3241	kfree(txmsg);
   3242	drm_dp_mst_topology_put_port(port);
   3243
   3244	return ret;
   3245}
   3246EXPORT_SYMBOL(drm_dp_send_power_updown_phy);
   3247
   3248int drm_dp_send_query_stream_enc_status(struct drm_dp_mst_topology_mgr *mgr,
   3249		struct drm_dp_mst_port *port,
   3250		struct drm_dp_query_stream_enc_status_ack_reply *status)
   3251{
   3252	struct drm_dp_sideband_msg_tx *txmsg;
   3253	u8 nonce[7];
   3254	int ret;
   3255
   3256	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
   3257	if (!txmsg)
   3258		return -ENOMEM;
   3259
   3260	port = drm_dp_mst_topology_get_port_validated(mgr, port);
   3261	if (!port) {
   3262		ret = -EINVAL;
   3263		goto out_get_port;
   3264	}
   3265
   3266	get_random_bytes(nonce, sizeof(nonce));
   3267
   3268	/*
   3269	 * "Source device targets the QUERY_STREAM_ENCRYPTION_STATUS message
   3270	 *  transaction at the MST Branch device directly connected to the
   3271	 *  Source"
   3272	 */
   3273	txmsg->dst = mgr->mst_primary;
   3274
   3275	build_query_stream_enc_status(txmsg, port->vcpi.vcpi, nonce);
   3276
   3277	drm_dp_queue_down_tx(mgr, txmsg);
   3278
   3279	ret = drm_dp_mst_wait_tx_reply(mgr->mst_primary, txmsg);
   3280	if (ret < 0) {
   3281		goto out;
   3282	} else if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
   3283		drm_dbg_kms(mgr->dev, "query encryption status nak received\n");
   3284		ret = -ENXIO;
   3285		goto out;
   3286	}
   3287
   3288	ret = 0;
   3289	memcpy(status, &txmsg->reply.u.enc_status, sizeof(*status));
   3290
   3291out:
   3292	drm_dp_mst_topology_put_port(port);
   3293out_get_port:
   3294	kfree(txmsg);
   3295	return ret;
   3296}
   3297EXPORT_SYMBOL(drm_dp_send_query_stream_enc_status);
   3298
   3299static int drm_dp_create_payload_step1(struct drm_dp_mst_topology_mgr *mgr,
   3300				       int id,
   3301				       struct drm_dp_payload *payload)
   3302{
   3303	int ret;
   3304
   3305	ret = drm_dp_dpcd_write_payload(mgr, id, payload);
   3306	if (ret < 0) {
   3307		payload->payload_state = 0;
   3308		return ret;
   3309	}
   3310	payload->payload_state = DP_PAYLOAD_LOCAL;
   3311	return 0;
   3312}
   3313
   3314static int drm_dp_create_payload_step2(struct drm_dp_mst_topology_mgr *mgr,
   3315				       struct drm_dp_mst_port *port,
   3316				       int id,
   3317				       struct drm_dp_payload *payload)
   3318{
   3319	int ret;
   3320
   3321	ret = drm_dp_payload_send_msg(mgr, port, id, port->vcpi.pbn);
   3322	if (ret < 0)
   3323		return ret;
   3324	payload->payload_state = DP_PAYLOAD_REMOTE;
   3325	return ret;
   3326}
   3327
   3328static int drm_dp_destroy_payload_step1(struct drm_dp_mst_topology_mgr *mgr,
   3329					struct drm_dp_mst_port *port,
   3330					int id,
   3331					struct drm_dp_payload *payload)
   3332{
   3333	drm_dbg_kms(mgr->dev, "\n");
   3334	/* it's okay for these to fail */
   3335	if (port) {
   3336		drm_dp_payload_send_msg(mgr, port, id, 0);
   3337	}
   3338
   3339	drm_dp_dpcd_write_payload(mgr, id, payload);
   3340	payload->payload_state = DP_PAYLOAD_DELETE_LOCAL;
   3341	return 0;
   3342}
   3343
   3344static int drm_dp_destroy_payload_step2(struct drm_dp_mst_topology_mgr *mgr,
   3345					int id,
   3346					struct drm_dp_payload *payload)
   3347{
   3348	payload->payload_state = 0;
   3349	return 0;
   3350}
   3351
   3352/**
   3353 * drm_dp_update_payload_part1() - Execute payload update part 1
   3354 * @mgr: manager to use.
   3355 * @start_slot: this is the cur slot
   3356 *
   3357 * NOTE: start_slot is a temporary workaround for non-atomic drivers,
   3358 * this will be removed when non-atomic mst helpers are moved out of the helper
   3359 *
   3360 * This iterates over all proposed virtual channels, and tries to
   3361 * allocate space in the link for them. For 0->slots transitions,
   3362 * this step just writes the VCPI to the MST device. For slots->0
   3363 * transitions, this writes the updated VCPIs and removes the
   3364 * remote VC payloads.
   3365 *
   3366 * after calling this the driver should generate ACT and payload
   3367 * packets.
   3368 */
   3369int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr, int start_slot)
   3370{
   3371	struct drm_dp_payload req_payload;
   3372	struct drm_dp_mst_port *port;
   3373	int i, j;
   3374	int cur_slots = start_slot;
   3375	bool skip;
   3376
   3377	mutex_lock(&mgr->payload_lock);
   3378	for (i = 0; i < mgr->max_payloads; i++) {
   3379		struct drm_dp_vcpi *vcpi = mgr->proposed_vcpis[i];
   3380		struct drm_dp_payload *payload = &mgr->payloads[i];
   3381		bool put_port = false;
   3382
   3383		/* solve the current payloads - compare to the hw ones
   3384		   - update the hw view */
   3385		req_payload.start_slot = cur_slots;
   3386		if (vcpi) {
   3387			port = container_of(vcpi, struct drm_dp_mst_port,
   3388					    vcpi);
   3389
   3390			mutex_lock(&mgr->lock);
   3391			skip = !drm_dp_mst_port_downstream_of_branch(port, mgr->mst_primary);
   3392			mutex_unlock(&mgr->lock);
   3393
   3394			if (skip) {
   3395				drm_dbg_kms(mgr->dev,
   3396					    "Virtual channel %d is not in current topology\n",
   3397					    i);
   3398				continue;
   3399			}
   3400			/* Validated ports don't matter if we're releasing
   3401			 * VCPI
   3402			 */
   3403			if (vcpi->num_slots) {
   3404				port = drm_dp_mst_topology_get_port_validated(
   3405				    mgr, port);
   3406				if (!port) {
   3407					if (vcpi->num_slots == payload->num_slots) {
   3408						cur_slots += vcpi->num_slots;
   3409						payload->start_slot = req_payload.start_slot;
   3410						continue;
   3411					} else {
   3412						drm_dbg_kms(mgr->dev,
   3413							    "Fail:set payload to invalid sink");
   3414						mutex_unlock(&mgr->payload_lock);
   3415						return -EINVAL;
   3416					}
   3417				}
   3418				put_port = true;
   3419			}
   3420
   3421			req_payload.num_slots = vcpi->num_slots;
   3422			req_payload.vcpi = vcpi->vcpi;
   3423		} else {
   3424			port = NULL;
   3425			req_payload.num_slots = 0;
   3426		}
   3427
   3428		payload->start_slot = req_payload.start_slot;
   3429		/* work out what is required to happen with this payload */
   3430		if (payload->num_slots != req_payload.num_slots) {
   3431
   3432			/* need to push an update for this payload */
   3433			if (req_payload.num_slots) {
   3434				drm_dp_create_payload_step1(mgr, vcpi->vcpi,
   3435							    &req_payload);
   3436				payload->num_slots = req_payload.num_slots;
   3437				payload->vcpi = req_payload.vcpi;
   3438
   3439			} else if (payload->num_slots) {
   3440				payload->num_slots = 0;
   3441				drm_dp_destroy_payload_step1(mgr, port,
   3442							     payload->vcpi,
   3443							     payload);
   3444				req_payload.payload_state =
   3445					payload->payload_state;
   3446				payload->start_slot = 0;
   3447			}
   3448			payload->payload_state = req_payload.payload_state;
   3449		}
   3450		cur_slots += req_payload.num_slots;
   3451
   3452		if (put_port)
   3453			drm_dp_mst_topology_put_port(port);
   3454	}
   3455
   3456	for (i = 0; i < mgr->max_payloads; /* do nothing */) {
   3457		if (mgr->payloads[i].payload_state != DP_PAYLOAD_DELETE_LOCAL) {
   3458			i++;
   3459			continue;
   3460		}
   3461
   3462		drm_dbg_kms(mgr->dev, "removing payload %d\n", i);
   3463		for (j = i; j < mgr->max_payloads - 1; j++) {
   3464			mgr->payloads[j] = mgr->payloads[j + 1];
   3465			mgr->proposed_vcpis[j] = mgr->proposed_vcpis[j + 1];
   3466
   3467			if (mgr->proposed_vcpis[j] &&
   3468			    mgr->proposed_vcpis[j]->num_slots) {
   3469				set_bit(j + 1, &mgr->payload_mask);
   3470			} else {
   3471				clear_bit(j + 1, &mgr->payload_mask);
   3472			}
   3473		}
   3474
   3475		memset(&mgr->payloads[mgr->max_payloads - 1], 0,
   3476		       sizeof(struct drm_dp_payload));
   3477		mgr->proposed_vcpis[mgr->max_payloads - 1] = NULL;
   3478		clear_bit(mgr->max_payloads, &mgr->payload_mask);
   3479	}
   3480	mutex_unlock(&mgr->payload_lock);
   3481
   3482	return 0;
   3483}
   3484EXPORT_SYMBOL(drm_dp_update_payload_part1);
   3485
   3486/**
   3487 * drm_dp_update_payload_part2() - Execute payload update part 2
   3488 * @mgr: manager to use.
   3489 *
   3490 * This iterates over all proposed virtual channels, and tries to
   3491 * allocate space in the link for them. For 0->slots transitions,
   3492 * this step writes the remote VC payload commands. For slots->0
   3493 * this just resets some internal state.
   3494 */
   3495int drm_dp_update_payload_part2(struct drm_dp_mst_topology_mgr *mgr)
   3496{
   3497	struct drm_dp_mst_port *port;
   3498	int i;
   3499	int ret = 0;
   3500	bool skip;
   3501
   3502	mutex_lock(&mgr->payload_lock);
   3503	for (i = 0; i < mgr->max_payloads; i++) {
   3504
   3505		if (!mgr->proposed_vcpis[i])
   3506			continue;
   3507
   3508		port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
   3509
   3510		mutex_lock(&mgr->lock);
   3511		skip = !drm_dp_mst_port_downstream_of_branch(port, mgr->mst_primary);
   3512		mutex_unlock(&mgr->lock);
   3513
   3514		if (skip)
   3515			continue;
   3516
   3517		drm_dbg_kms(mgr->dev, "payload %d %d\n", i, mgr->payloads[i].payload_state);
   3518		if (mgr->payloads[i].payload_state == DP_PAYLOAD_LOCAL) {
   3519			ret = drm_dp_create_payload_step2(mgr, port, mgr->proposed_vcpis[i]->vcpi, &mgr->payloads[i]);
   3520		} else if (mgr->payloads[i].payload_state == DP_PAYLOAD_DELETE_LOCAL) {
   3521			ret = drm_dp_destroy_payload_step2(mgr, mgr->proposed_vcpis[i]->vcpi, &mgr->payloads[i]);
   3522		}
   3523		if (ret) {
   3524			mutex_unlock(&mgr->payload_lock);
   3525			return ret;
   3526		}
   3527	}
   3528	mutex_unlock(&mgr->payload_lock);
   3529	return 0;
   3530}
   3531EXPORT_SYMBOL(drm_dp_update_payload_part2);
   3532
   3533static int drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr *mgr,
   3534				 struct drm_dp_mst_port *port,
   3535				 int offset, int size, u8 *bytes)
   3536{
   3537	int ret = 0;
   3538	struct drm_dp_sideband_msg_tx *txmsg;
   3539	struct drm_dp_mst_branch *mstb;
   3540
   3541	mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);
   3542	if (!mstb)
   3543		return -EINVAL;
   3544
   3545	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
   3546	if (!txmsg) {
   3547		ret = -ENOMEM;
   3548		goto fail_put;
   3549	}
   3550
   3551	build_dpcd_read(txmsg, port->port_num, offset, size);
   3552	txmsg->dst = port->parent;
   3553
   3554	drm_dp_queue_down_tx(mgr, txmsg);
   3555
   3556	ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
   3557	if (ret < 0)
   3558		goto fail_free;
   3559
   3560	if (txmsg->reply.reply_type == 1) {
   3561		drm_dbg_kms(mgr->dev, "mstb %p port %d: DPCD read on addr 0x%x for %d bytes NAKed\n",
   3562			    mstb, port->port_num, offset, size);
   3563		ret = -EIO;
   3564		goto fail_free;
   3565	}
   3566
   3567	if (txmsg->reply.u.remote_dpcd_read_ack.num_bytes != size) {
   3568		ret = -EPROTO;
   3569		goto fail_free;
   3570	}
   3571
   3572	ret = min_t(size_t, txmsg->reply.u.remote_dpcd_read_ack.num_bytes,
   3573		    size);
   3574	memcpy(bytes, txmsg->reply.u.remote_dpcd_read_ack.bytes, ret);
   3575
   3576fail_free:
   3577	kfree(txmsg);
   3578fail_put:
   3579	drm_dp_mst_topology_put_mstb(mstb);
   3580
   3581	return ret;
   3582}
   3583
   3584static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
   3585				  struct drm_dp_mst_port *port,
   3586				  int offset, int size, u8 *bytes)
   3587{
   3588	int ret;
   3589	struct drm_dp_sideband_msg_tx *txmsg;
   3590	struct drm_dp_mst_branch *mstb;
   3591
   3592	mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);
   3593	if (!mstb)
   3594		return -EINVAL;
   3595
   3596	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
   3597	if (!txmsg) {
   3598		ret = -ENOMEM;
   3599		goto fail_put;
   3600	}
   3601
   3602	build_dpcd_write(txmsg, port->port_num, offset, size, bytes);
   3603	txmsg->dst = mstb;
   3604
   3605	drm_dp_queue_down_tx(mgr, txmsg);
   3606
   3607	ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
   3608	if (ret > 0) {
   3609		if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
   3610			ret = -EIO;
   3611		else
   3612			ret = size;
   3613	}
   3614
   3615	kfree(txmsg);
   3616fail_put:
   3617	drm_dp_mst_topology_put_mstb(mstb);
   3618	return ret;
   3619}
   3620
   3621static int drm_dp_encode_up_ack_reply(struct drm_dp_sideband_msg_tx *msg, u8 req_type)
   3622{
   3623	struct drm_dp_sideband_msg_reply_body reply;
   3624
   3625	reply.reply_type = DP_SIDEBAND_REPLY_ACK;
   3626	reply.req_type = req_type;
   3627	drm_dp_encode_sideband_reply(&reply, msg);
   3628	return 0;
   3629}
   3630
   3631static int drm_dp_send_up_ack_reply(struct drm_dp_mst_topology_mgr *mgr,
   3632				    struct drm_dp_mst_branch *mstb,
   3633				    int req_type, bool broadcast)
   3634{
   3635	struct drm_dp_sideband_msg_tx *txmsg;
   3636
   3637	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
   3638	if (!txmsg)
   3639		return -ENOMEM;
   3640
   3641	txmsg->dst = mstb;
   3642	drm_dp_encode_up_ack_reply(txmsg, req_type);
   3643
   3644	mutex_lock(&mgr->qlock);
   3645	/* construct a chunk from the first msg in the tx_msg queue */
   3646	process_single_tx_qlock(mgr, txmsg, true);
   3647	mutex_unlock(&mgr->qlock);
   3648
   3649	kfree(txmsg);
   3650	return 0;
   3651}
   3652
   3653/**
   3654 * drm_dp_get_vc_payload_bw - get the VC payload BW for an MST link
   3655 * @mgr: The &drm_dp_mst_topology_mgr to use
   3656 * @link_rate: link rate in 10kbits/s units
   3657 * @link_lane_count: lane count
   3658 *
   3659 * Calculate the total bandwidth of a MultiStream Transport link. The returned
   3660 * value is in units of PBNs/(timeslots/1 MTP). This value can be used to
   3661 * convert the number of PBNs required for a given stream to the number of
   3662 * timeslots this stream requires in each MTP.
   3663 */
   3664int drm_dp_get_vc_payload_bw(const struct drm_dp_mst_topology_mgr *mgr,
   3665			     int link_rate, int link_lane_count)
   3666{
   3667	if (link_rate == 0 || link_lane_count == 0)
   3668		drm_dbg_kms(mgr->dev, "invalid link rate/lane count: (%d / %d)\n",
   3669			    link_rate, link_lane_count);
   3670
   3671	/* See DP v2.0 2.6.4.2, VCPayload_Bandwidth_for_OneTimeSlotPer_MTP_Allocation */
   3672	return link_rate * link_lane_count / 54000;
   3673}
   3674EXPORT_SYMBOL(drm_dp_get_vc_payload_bw);
   3675
   3676/**
   3677 * drm_dp_read_mst_cap() - check whether or not a sink supports MST
   3678 * @aux: The DP AUX channel to use
   3679 * @dpcd: A cached copy of the DPCD capabilities for this sink
   3680 *
   3681 * Returns: %True if the sink supports MST, %false otherwise
   3682 */
   3683bool drm_dp_read_mst_cap(struct drm_dp_aux *aux,
   3684			 const u8 dpcd[DP_RECEIVER_CAP_SIZE])
   3685{
   3686	u8 mstm_cap;
   3687
   3688	if (dpcd[DP_DPCD_REV] < DP_DPCD_REV_12)
   3689		return false;
   3690
   3691	if (drm_dp_dpcd_readb(aux, DP_MSTM_CAP, &mstm_cap) != 1)
   3692		return false;
   3693
   3694	return mstm_cap & DP_MST_CAP;
   3695}
   3696EXPORT_SYMBOL(drm_dp_read_mst_cap);
   3697
   3698/**
   3699 * drm_dp_mst_topology_mgr_set_mst() - Set the MST state for a topology manager
   3700 * @mgr: manager to set state for
   3701 * @mst_state: true to enable MST on this connector - false to disable.
   3702 *
   3703 * This is called by the driver when it detects an MST capable device plugged
   3704 * into a DP MST capable port, or when a DP MST capable device is unplugged.
   3705 */
   3706int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool mst_state)
   3707{
   3708	int ret = 0;
   3709	struct drm_dp_mst_branch *mstb = NULL;
   3710
   3711	mutex_lock(&mgr->payload_lock);
   3712	mutex_lock(&mgr->lock);
   3713	if (mst_state == mgr->mst_state)
   3714		goto out_unlock;
   3715
   3716	mgr->mst_state = mst_state;
   3717	/* set the device into MST mode */
   3718	if (mst_state) {
   3719		struct drm_dp_payload reset_pay;
   3720		int lane_count;
   3721		int link_rate;
   3722
   3723		WARN_ON(mgr->mst_primary);
   3724
   3725		/* get dpcd info */
   3726		ret = drm_dp_read_dpcd_caps(mgr->aux, mgr->dpcd);
   3727		if (ret < 0) {
   3728			drm_dbg_kms(mgr->dev, "%s: failed to read DPCD, ret %d\n",
   3729				    mgr->aux->name, ret);
   3730			goto out_unlock;
   3731		}
   3732
   3733		lane_count = min_t(int, mgr->dpcd[2] & DP_MAX_LANE_COUNT_MASK, mgr->max_lane_count);
   3734		link_rate = min_t(int, drm_dp_bw_code_to_link_rate(mgr->dpcd[1]), mgr->max_link_rate);
   3735		mgr->pbn_div = drm_dp_get_vc_payload_bw(mgr,
   3736							link_rate,
   3737							lane_count);
   3738		if (mgr->pbn_div == 0) {
   3739			ret = -EINVAL;
   3740			goto out_unlock;
   3741		}
   3742
   3743		/* add initial branch device at LCT 1 */
   3744		mstb = drm_dp_add_mst_branch_device(1, NULL);
   3745		if (mstb == NULL) {
   3746			ret = -ENOMEM;
   3747			goto out_unlock;
   3748		}
   3749		mstb->mgr = mgr;
   3750
   3751		/* give this the main reference */
   3752		mgr->mst_primary = mstb;
   3753		drm_dp_mst_topology_get_mstb(mgr->mst_primary);
   3754
   3755		ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
   3756					 DP_MST_EN |
   3757					 DP_UP_REQ_EN |
   3758					 DP_UPSTREAM_IS_SRC);
   3759		if (ret < 0)
   3760			goto out_unlock;
   3761
   3762		reset_pay.start_slot = 0;
   3763		reset_pay.num_slots = 0x3f;
   3764		drm_dp_dpcd_write_payload(mgr, 0, &reset_pay);
   3765
   3766		queue_work(system_long_wq, &mgr->work);
   3767
   3768		ret = 0;
   3769	} else {
   3770		/* disable MST on the device */
   3771		mstb = mgr->mst_primary;
   3772		mgr->mst_primary = NULL;
   3773		/* this can fail if the device is gone */
   3774		drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, 0);
   3775		ret = 0;
   3776		memset(mgr->payloads, 0,
   3777		       mgr->max_payloads * sizeof(mgr->payloads[0]));
   3778		memset(mgr->proposed_vcpis, 0,
   3779		       mgr->max_payloads * sizeof(mgr->proposed_vcpis[0]));
   3780		mgr->payload_mask = 0;
   3781		set_bit(0, &mgr->payload_mask);
   3782		mgr->vcpi_mask = 0;
   3783		mgr->payload_id_table_cleared = false;
   3784	}
   3785
   3786out_unlock:
   3787	mutex_unlock(&mgr->lock);
   3788	mutex_unlock(&mgr->payload_lock);
   3789	if (mstb)
   3790		drm_dp_mst_topology_put_mstb(mstb);
   3791	return ret;
   3792
   3793}
   3794EXPORT_SYMBOL(drm_dp_mst_topology_mgr_set_mst);
   3795
   3796static void
   3797drm_dp_mst_topology_mgr_invalidate_mstb(struct drm_dp_mst_branch *mstb)
   3798{
   3799	struct drm_dp_mst_port *port;
   3800
   3801	/* The link address will need to be re-sent on resume */
   3802	mstb->link_address_sent = false;
   3803
   3804	list_for_each_entry(port, &mstb->ports, next)
   3805		if (port->mstb)
   3806			drm_dp_mst_topology_mgr_invalidate_mstb(port->mstb);
   3807}
   3808
   3809/**
   3810 * drm_dp_mst_topology_mgr_suspend() - suspend the MST manager
   3811 * @mgr: manager to suspend
   3812 *
   3813 * This function tells the MST device that we can't handle UP messages
   3814 * anymore. This should stop it from sending any since we are suspended.
   3815 */
   3816void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr)
   3817{
   3818	mutex_lock(&mgr->lock);
   3819	drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
   3820			   DP_MST_EN | DP_UPSTREAM_IS_SRC);
   3821	mutex_unlock(&mgr->lock);
   3822	flush_work(&mgr->up_req_work);
   3823	flush_work(&mgr->work);
   3824	flush_work(&mgr->delayed_destroy_work);
   3825
   3826	mutex_lock(&mgr->lock);
   3827	if (mgr->mst_state && mgr->mst_primary)
   3828		drm_dp_mst_topology_mgr_invalidate_mstb(mgr->mst_primary);
   3829	mutex_unlock(&mgr->lock);
   3830}
   3831EXPORT_SYMBOL(drm_dp_mst_topology_mgr_suspend);
   3832
   3833/**
   3834 * drm_dp_mst_topology_mgr_resume() - resume the MST manager
   3835 * @mgr: manager to resume
   3836 * @sync: whether or not to perform topology reprobing synchronously
   3837 *
   3838 * This will fetch DPCD and see if the device is still there,
   3839 * if it is, it will rewrite the MSTM control bits, and return.
   3840 *
   3841 * If the device fails this returns -1, and the driver should do
   3842 * a full MST reprobe, in case we were undocked.
   3843 *
   3844 * During system resume (where it is assumed that the driver will be calling
   3845 * drm_atomic_helper_resume()) this function should be called beforehand with
   3846 * @sync set to true. In contexts like runtime resume where the driver is not
   3847 * expected to be calling drm_atomic_helper_resume(), this function should be
   3848 * called with @sync set to false in order to avoid deadlocking.
   3849 *
   3850 * Returns: -1 if the MST topology was removed while we were suspended, 0
   3851 * otherwise.
   3852 */
   3853int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr,
   3854				   bool sync)
   3855{
   3856	int ret;
   3857	u8 guid[16];
   3858
   3859	mutex_lock(&mgr->lock);
   3860	if (!mgr->mst_primary)
   3861		goto out_fail;
   3862
   3863	ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd,
   3864			       DP_RECEIVER_CAP_SIZE);
   3865	if (ret != DP_RECEIVER_CAP_SIZE) {
   3866		drm_dbg_kms(mgr->dev, "dpcd read failed - undocked during suspend?\n");
   3867		goto out_fail;
   3868	}
   3869
   3870	ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
   3871				 DP_MST_EN |
   3872				 DP_UP_REQ_EN |
   3873				 DP_UPSTREAM_IS_SRC);
   3874	if (ret < 0) {
   3875		drm_dbg_kms(mgr->dev, "mst write failed - undocked during suspend?\n");
   3876		goto out_fail;
   3877	}
   3878
   3879	/* Some hubs forget their guids after they resume */
   3880	ret = drm_dp_dpcd_read(mgr->aux, DP_GUID, guid, 16);
   3881	if (ret != 16) {
   3882		drm_dbg_kms(mgr->dev, "dpcd read failed - undocked during suspend?\n");
   3883		goto out_fail;
   3884	}
   3885
   3886	ret = drm_dp_check_mstb_guid(mgr->mst_primary, guid);
   3887	if (ret) {
   3888		drm_dbg_kms(mgr->dev, "check mstb failed - undocked during suspend?\n");
   3889		goto out_fail;
   3890	}
   3891
   3892	/*
   3893	 * For the final step of resuming the topology, we need to bring the
   3894	 * state of our in-memory topology back into sync with reality. So,
   3895	 * restart the probing process as if we're probing a new hub
   3896	 */
   3897	queue_work(system_long_wq, &mgr->work);
   3898	mutex_unlock(&mgr->lock);
   3899
   3900	if (sync) {
   3901		drm_dbg_kms(mgr->dev,
   3902			    "Waiting for link probe work to finish re-syncing topology...\n");
   3903		flush_work(&mgr->work);
   3904	}
   3905
   3906	return 0;
   3907
   3908out_fail:
   3909	mutex_unlock(&mgr->lock);
   3910	return -1;
   3911}
   3912EXPORT_SYMBOL(drm_dp_mst_topology_mgr_resume);
   3913
   3914static bool
   3915drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up,
   3916		      struct drm_dp_mst_branch **mstb)
   3917{
   3918	int len;
   3919	u8 replyblock[32];
   3920	int replylen, curreply;
   3921	int ret;
   3922	u8 hdrlen;
   3923	struct drm_dp_sideband_msg_hdr hdr;
   3924	struct drm_dp_sideband_msg_rx *msg =
   3925		up ? &mgr->up_req_recv : &mgr->down_rep_recv;
   3926	int basereg = up ? DP_SIDEBAND_MSG_UP_REQ_BASE :
   3927			   DP_SIDEBAND_MSG_DOWN_REP_BASE;
   3928
   3929	if (!up)
   3930		*mstb = NULL;
   3931
   3932	len = min(mgr->max_dpcd_transaction_bytes, 16);
   3933	ret = drm_dp_dpcd_read(mgr->aux, basereg, replyblock, len);
   3934	if (ret != len) {
   3935		drm_dbg_kms(mgr->dev, "failed to read DPCD down rep %d %d\n", len, ret);
   3936		return false;
   3937	}
   3938
   3939	ret = drm_dp_decode_sideband_msg_hdr(mgr, &hdr, replyblock, len, &hdrlen);
   3940	if (ret == false) {
   3941		print_hex_dump(KERN_DEBUG, "failed hdr", DUMP_PREFIX_NONE, 16,
   3942			       1, replyblock, len, false);
   3943		drm_dbg_kms(mgr->dev, "ERROR: failed header\n");
   3944		return false;
   3945	}
   3946
   3947	if (!up) {
   3948		/* Caller is responsible for giving back this reference */
   3949		*mstb = drm_dp_get_mst_branch_device(mgr, hdr.lct, hdr.rad);
   3950		if (!*mstb) {
   3951			drm_dbg_kms(mgr->dev, "Got MST reply from unknown device %d\n", hdr.lct);
   3952			return false;
   3953		}
   3954	}
   3955
   3956	if (!drm_dp_sideband_msg_set_header(msg, &hdr, hdrlen)) {
   3957		drm_dbg_kms(mgr->dev, "sideband msg set header failed %d\n", replyblock[0]);
   3958		return false;
   3959	}
   3960
   3961	replylen = min(msg->curchunk_len, (u8)(len - hdrlen));
   3962	ret = drm_dp_sideband_append_payload(msg, replyblock + hdrlen, replylen);
   3963	if (!ret) {
   3964		drm_dbg_kms(mgr->dev, "sideband msg build failed %d\n", replyblock[0]);
   3965		return false;
   3966	}
   3967
   3968	replylen = msg->curchunk_len + msg->curchunk_hdrlen - len;
   3969	curreply = len;
   3970	while (replylen > 0) {
   3971		len = min3(replylen, mgr->max_dpcd_transaction_bytes, 16);
   3972		ret = drm_dp_dpcd_read(mgr->aux, basereg + curreply,
   3973				    replyblock, len);
   3974		if (ret != len) {
   3975			drm_dbg_kms(mgr->dev, "failed to read a chunk (len %d, ret %d)\n",
   3976				    len, ret);
   3977			return false;
   3978		}
   3979
   3980		ret = drm_dp_sideband_append_payload(msg, replyblock, len);
   3981		if (!ret) {
   3982			drm_dbg_kms(mgr->dev, "failed to build sideband msg\n");
   3983			return false;
   3984		}
   3985
   3986		curreply += len;
   3987		replylen -= len;
   3988	}
   3989	return true;
   3990}
   3991
   3992static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)
   3993{
   3994	struct drm_dp_sideband_msg_tx *txmsg;
   3995	struct drm_dp_mst_branch *mstb = NULL;
   3996	struct drm_dp_sideband_msg_rx *msg = &mgr->down_rep_recv;
   3997
   3998	if (!drm_dp_get_one_sb_msg(mgr, false, &mstb))
   3999		goto out;
   4000
   4001	/* Multi-packet message transmission, don't clear the reply */
   4002	if (!msg->have_eomt)
   4003		goto out;
   4004
   4005	/* find the message */
   4006	mutex_lock(&mgr->qlock);
   4007	txmsg = list_first_entry_or_null(&mgr->tx_msg_downq,
   4008					 struct drm_dp_sideband_msg_tx, next);
   4009	mutex_unlock(&mgr->qlock);
   4010
   4011	/* Were we actually expecting a response, and from this mstb? */
   4012	if (!txmsg || txmsg->dst != mstb) {
   4013		struct drm_dp_sideband_msg_hdr *hdr;
   4014
   4015		hdr = &msg->initial_hdr;
   4016		drm_dbg_kms(mgr->dev, "Got MST reply with no msg %p %d %d %02x %02x\n",
   4017			    mstb, hdr->seqno, hdr->lct, hdr->rad[0], msg->msg[0]);
   4018		goto out_clear_reply;
   4019	}
   4020
   4021	drm_dp_sideband_parse_reply(mgr, msg, &txmsg->reply);
   4022
   4023	if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
   4024		drm_dbg_kms(mgr->dev,
   4025			    "Got NAK reply: req 0x%02x (%s), reason 0x%02x (%s), nak data 0x%02x\n",
   4026			    txmsg->reply.req_type,
   4027			    drm_dp_mst_req_type_str(txmsg->reply.req_type),
   4028			    txmsg->reply.u.nak.reason,
   4029			    drm_dp_mst_nak_reason_str(txmsg->reply.u.nak.reason),
   4030			    txmsg->reply.u.nak.nak_data);
   4031	}
   4032
   4033	memset(msg, 0, sizeof(struct drm_dp_sideband_msg_rx));
   4034	drm_dp_mst_topology_put_mstb(mstb);
   4035
   4036	mutex_lock(&mgr->qlock);
   4037	txmsg->state = DRM_DP_SIDEBAND_TX_RX;
   4038	list_del(&txmsg->next);
   4039	mutex_unlock(&mgr->qlock);
   4040
   4041	wake_up_all(&mgr->tx_waitq);
   4042
   4043	return 0;
   4044
   4045out_clear_reply:
   4046	memset(msg, 0, sizeof(struct drm_dp_sideband_msg_rx));
   4047out:
   4048	if (mstb)
   4049		drm_dp_mst_topology_put_mstb(mstb);
   4050
   4051	return 0;
   4052}
   4053
   4054static inline bool
   4055drm_dp_mst_process_up_req(struct drm_dp_mst_topology_mgr *mgr,
   4056			  struct drm_dp_pending_up_req *up_req)
   4057{
   4058	struct drm_dp_mst_branch *mstb = NULL;
   4059	struct drm_dp_sideband_msg_req_body *msg = &up_req->msg;
   4060	struct drm_dp_sideband_msg_hdr *hdr = &up_req->hdr;
   4061	bool hotplug = false;
   4062
   4063	if (hdr->broadcast) {
   4064		const u8 *guid = NULL;
   4065
   4066		if (msg->req_type == DP_CONNECTION_STATUS_NOTIFY)
   4067			guid = msg->u.conn_stat.guid;
   4068		else if (msg->req_type == DP_RESOURCE_STATUS_NOTIFY)
   4069			guid = msg->u.resource_stat.guid;
   4070
   4071		if (guid)
   4072			mstb = drm_dp_get_mst_branch_device_by_guid(mgr, guid);
   4073	} else {
   4074		mstb = drm_dp_get_mst_branch_device(mgr, hdr->lct, hdr->rad);
   4075	}
   4076
   4077	if (!mstb) {
   4078		drm_dbg_kms(mgr->dev, "Got MST reply from unknown device %d\n", hdr->lct);
   4079		return false;
   4080	}
   4081
   4082	/* TODO: Add missing handler for DP_RESOURCE_STATUS_NOTIFY events */
   4083	if (msg->req_type == DP_CONNECTION_STATUS_NOTIFY) {
   4084		drm_dp_mst_handle_conn_stat(mstb, &msg->u.conn_stat);
   4085		hotplug = true;
   4086	}
   4087
   4088	drm_dp_mst_topology_put_mstb(mstb);
   4089	return hotplug;
   4090}
   4091
   4092static void drm_dp_mst_up_req_work(struct work_struct *work)
   4093{
   4094	struct drm_dp_mst_topology_mgr *mgr =
   4095		container_of(work, struct drm_dp_mst_topology_mgr,
   4096			     up_req_work);
   4097	struct drm_dp_pending_up_req *up_req;
   4098	bool send_hotplug = false;
   4099
   4100	mutex_lock(&mgr->probe_lock);
   4101	while (true) {
   4102		mutex_lock(&mgr->up_req_lock);
   4103		up_req = list_first_entry_or_null(&mgr->up_req_list,
   4104						  struct drm_dp_pending_up_req,
   4105						  next);
   4106		if (up_req)
   4107			list_del(&up_req->next);
   4108		mutex_unlock(&mgr->up_req_lock);
   4109
   4110		if (!up_req)
   4111			break;
   4112
   4113		send_hotplug |= drm_dp_mst_process_up_req(mgr, up_req);
   4114		kfree(up_req);
   4115	}
   4116	mutex_unlock(&mgr->probe_lock);
   4117
   4118	if (send_hotplug)
   4119		drm_kms_helper_hotplug_event(mgr->dev);
   4120}
   4121
   4122static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
   4123{
   4124	struct drm_dp_pending_up_req *up_req;
   4125
   4126	if (!drm_dp_get_one_sb_msg(mgr, true, NULL))
   4127		goto out;
   4128
   4129	if (!mgr->up_req_recv.have_eomt)
   4130		return 0;
   4131
   4132	up_req = kzalloc(sizeof(*up_req), GFP_KERNEL);
   4133	if (!up_req)
   4134		return -ENOMEM;
   4135
   4136	INIT_LIST_HEAD(&up_req->next);
   4137
   4138	drm_dp_sideband_parse_req(mgr, &mgr->up_req_recv, &up_req->msg);
   4139
   4140	if (up_req->msg.req_type != DP_CONNECTION_STATUS_NOTIFY &&
   4141	    up_req->msg.req_type != DP_RESOURCE_STATUS_NOTIFY) {
   4142		drm_dbg_kms(mgr->dev, "Received unknown up req type, ignoring: %x\n",
   4143			    up_req->msg.req_type);
   4144		kfree(up_req);
   4145		goto out;
   4146	}
   4147
   4148	drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, up_req->msg.req_type,
   4149				 false);
   4150
   4151	if (up_req->msg.req_type == DP_CONNECTION_STATUS_NOTIFY) {
   4152		const struct drm_dp_connection_status_notify *conn_stat =
   4153			&up_req->msg.u.conn_stat;
   4154
   4155		drm_dbg_kms(mgr->dev, "Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n",
   4156			    conn_stat->port_number,
   4157			    conn_stat->legacy_device_plug_status,
   4158			    conn_stat->displayport_device_plug_status,
   4159			    conn_stat->message_capability_status,
   4160			    conn_stat->input_port,
   4161			    conn_stat->peer_device_type);
   4162	} else if (up_req->msg.req_type == DP_RESOURCE_STATUS_NOTIFY) {
   4163		const struct drm_dp_resource_status_notify *res_stat =
   4164			&up_req->msg.u.resource_stat;
   4165
   4166		drm_dbg_kms(mgr->dev, "Got RSN: pn: %d avail_pbn %d\n",
   4167			    res_stat->port_number,
   4168			    res_stat->available_pbn);
   4169	}
   4170
   4171	up_req->hdr = mgr->up_req_recv.initial_hdr;
   4172	mutex_lock(&mgr->up_req_lock);
   4173	list_add_tail(&up_req->next, &mgr->up_req_list);
   4174	mutex_unlock(&mgr->up_req_lock);
   4175	queue_work(system_long_wq, &mgr->up_req_work);
   4176
   4177out:
   4178	memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
   4179	return 0;
   4180}
   4181
   4182/**
   4183 * drm_dp_mst_hpd_irq() - MST hotplug IRQ notify
   4184 * @mgr: manager to notify irq for.
   4185 * @esi: 4 bytes from SINK_COUNT_ESI
   4186 * @handled: whether the hpd interrupt was consumed or not
   4187 *
   4188 * This should be called from the driver when it detects a short IRQ,
   4189 * along with the value of the DEVICE_SERVICE_IRQ_VECTOR_ESI0. The
   4190 * topology manager will process the sideband messages received as a result
   4191 * of this.
   4192 */
   4193int drm_dp_mst_hpd_irq(struct drm_dp_mst_topology_mgr *mgr, u8 *esi, bool *handled)
   4194{
   4195	int ret = 0;
   4196	int sc;
   4197	*handled = false;
   4198	sc = DP_GET_SINK_COUNT(esi[0]);
   4199
   4200	if (sc != mgr->sink_count) {
   4201		mgr->sink_count = sc;
   4202		*handled = true;
   4203	}
   4204
   4205	if (esi[1] & DP_DOWN_REP_MSG_RDY) {
   4206		ret = drm_dp_mst_handle_down_rep(mgr);
   4207		*handled = true;
   4208	}
   4209
   4210	if (esi[1] & DP_UP_REQ_MSG_RDY) {
   4211		ret |= drm_dp_mst_handle_up_req(mgr);
   4212		*handled = true;
   4213	}
   4214
   4215	drm_dp_mst_kick_tx(mgr);
   4216	return ret;
   4217}
   4218EXPORT_SYMBOL(drm_dp_mst_hpd_irq);
   4219
   4220/**
   4221 * drm_dp_mst_detect_port() - get connection status for an MST port
   4222 * @connector: DRM connector for this port
   4223 * @ctx: The acquisition context to use for grabbing locks
   4224 * @mgr: manager for this port
   4225 * @port: pointer to a port
   4226 *
   4227 * This returns the current connection state for a port.
   4228 */
   4229int
   4230drm_dp_mst_detect_port(struct drm_connector *connector,
   4231		       struct drm_modeset_acquire_ctx *ctx,
   4232		       struct drm_dp_mst_topology_mgr *mgr,
   4233		       struct drm_dp_mst_port *port)
   4234{
   4235	int ret;
   4236
   4237	/* we need to search for the port in the mgr in case it's gone */
   4238	port = drm_dp_mst_topology_get_port_validated(mgr, port);
   4239	if (!port)
   4240		return connector_status_disconnected;
   4241
   4242	ret = drm_modeset_lock(&mgr->base.lock, ctx);
   4243	if (ret)
   4244		goto out;
   4245
   4246	ret = connector_status_disconnected;
   4247
   4248	if (!port->ddps)
   4249		goto out;
   4250
   4251	switch (port->pdt) {
   4252	case DP_PEER_DEVICE_NONE:
   4253		break;
   4254	case DP_PEER_DEVICE_MST_BRANCHING:
   4255		if (!port->mcs)
   4256			ret = connector_status_connected;
   4257		break;
   4258
   4259	case DP_PEER_DEVICE_SST_SINK:
   4260		ret = connector_status_connected;
   4261		/* for logical ports - cache the EDID */
   4262		if (port->port_num >= DP_MST_LOGICAL_PORT_0 && !port->cached_edid)
   4263			port->cached_edid = drm_get_edid(connector, &port->aux.ddc);
   4264		break;
   4265	case DP_PEER_DEVICE_DP_LEGACY_CONV:
   4266		if (port->ldps)
   4267			ret = connector_status_connected;
   4268		break;
   4269	}
   4270out:
   4271	drm_dp_mst_topology_put_port(port);
   4272	return ret;
   4273}
   4274EXPORT_SYMBOL(drm_dp_mst_detect_port);
   4275
   4276/**
   4277 * drm_dp_mst_get_edid() - get EDID for an MST port
   4278 * @connector: toplevel connector to get EDID for
   4279 * @mgr: manager for this port
   4280 * @port: unverified pointer to a port.
   4281 *
   4282 * This returns an EDID for the port connected to a connector,
   4283 * It validates the pointer still exists so the caller doesn't require a
   4284 * reference.
   4285 */
   4286struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
   4287{
   4288	struct edid *edid = NULL;
   4289
   4290	/* we need to search for the port in the mgr in case it's gone */
   4291	port = drm_dp_mst_topology_get_port_validated(mgr, port);
   4292	if (!port)
   4293		return NULL;
   4294
   4295	if (port->cached_edid)
   4296		edid = drm_edid_duplicate(port->cached_edid);
   4297	else {
   4298		edid = drm_get_edid(connector, &port->aux.ddc);
   4299	}
   4300	port->has_audio = drm_detect_monitor_audio(edid);
   4301	drm_dp_mst_topology_put_port(port);
   4302	return edid;
   4303}
   4304EXPORT_SYMBOL(drm_dp_mst_get_edid);
   4305
   4306/**
   4307 * drm_dp_find_vcpi_slots() - Find VCPI slots for this PBN value
   4308 * @mgr: manager to use
   4309 * @pbn: payload bandwidth to convert into slots.
   4310 *
   4311 * Calculate the number of VCPI slots that will be required for the given PBN
   4312 * value. This function is deprecated, and should not be used in atomic
   4313 * drivers.
   4314 *
   4315 * RETURNS:
   4316 * The total slots required for this port, or error.
   4317 */
   4318int drm_dp_find_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr,
   4319			   int pbn)
   4320{
   4321	int num_slots;
   4322
   4323	num_slots = DIV_ROUND_UP(pbn, mgr->pbn_div);
   4324
   4325	/* max. time slots - one slot for MTP header */
   4326	if (num_slots > 63)
   4327		return -ENOSPC;
   4328	return num_slots;
   4329}
   4330EXPORT_SYMBOL(drm_dp_find_vcpi_slots);
   4331
   4332static int drm_dp_init_vcpi(struct drm_dp_mst_topology_mgr *mgr,
   4333			    struct drm_dp_vcpi *vcpi, int pbn, int slots)
   4334{
   4335	int ret;
   4336
   4337	vcpi->pbn = pbn;
   4338	vcpi->aligned_pbn = slots * mgr->pbn_div;
   4339	vcpi->num_slots = slots;
   4340
   4341	ret = drm_dp_mst_assign_payload_id(mgr, vcpi);
   4342	if (ret < 0)
   4343		return ret;
   4344	return 0;
   4345}
   4346
   4347/**
   4348 * drm_dp_atomic_find_vcpi_slots() - Find and add VCPI slots to the state
   4349 * @state: global atomic state
   4350 * @mgr: MST topology manager for the port
   4351 * @port: port to find vcpi slots for
   4352 * @pbn: bandwidth required for the mode in PBN
   4353 * @pbn_div: divider for DSC mode that takes FEC into account
   4354 *
   4355 * Allocates VCPI slots to @port, replacing any previous VCPI allocations it
   4356 * may have had. Any atomic drivers which support MST must call this function
   4357 * in their &drm_encoder_helper_funcs.atomic_check() callback to change the
   4358 * current VCPI allocation for the new state, but only when
   4359 * &drm_crtc_state.mode_changed or &drm_crtc_state.connectors_changed is set
   4360 * to ensure compatibility with userspace applications that still use the
   4361 * legacy modesetting UAPI.
   4362 *
   4363 * Allocations set by this function are not checked against the bandwidth
   4364 * restraints of @mgr until the driver calls drm_dp_mst_atomic_check().
   4365 *
   4366 * Additionally, it is OK to call this function multiple times on the same
   4367 * @port as needed. It is not OK however, to call this function and
   4368 * drm_dp_atomic_release_vcpi_slots() in the same atomic check phase.
   4369 *
   4370 * See also:
   4371 * drm_dp_atomic_release_vcpi_slots()
   4372 * drm_dp_mst_atomic_check()
   4373 *
   4374 * Returns:
   4375 * Total slots in the atomic state assigned for this port, or a negative error
   4376 * code if the port no longer exists
   4377 */
   4378int drm_dp_atomic_find_vcpi_slots(struct drm_atomic_state *state,
   4379				  struct drm_dp_mst_topology_mgr *mgr,
   4380				  struct drm_dp_mst_port *port, int pbn,
   4381				  int pbn_div)
   4382{
   4383	struct drm_dp_mst_topology_state *topology_state;
   4384	struct drm_dp_vcpi_allocation *pos, *vcpi = NULL;
   4385	int prev_slots, prev_bw, req_slots;
   4386
   4387	topology_state = drm_atomic_get_mst_topology_state(state, mgr);
   4388	if (IS_ERR(topology_state))
   4389		return PTR_ERR(topology_state);
   4390
   4391	/* Find the current allocation for this port, if any */
   4392	list_for_each_entry(pos, &topology_state->vcpis, next) {
   4393		if (pos->port == port) {
   4394			vcpi = pos;
   4395			prev_slots = vcpi->vcpi;
   4396			prev_bw = vcpi->pbn;
   4397
   4398			/*
   4399			 * This should never happen, unless the driver tries
   4400			 * releasing and allocating the same VCPI allocation,
   4401			 * which is an error
   4402			 */
   4403			if (WARN_ON(!prev_slots)) {
   4404				drm_err(mgr->dev,
   4405					"cannot allocate and release VCPI on [MST PORT:%p] in the same state\n",
   4406					port);
   4407				return -EINVAL;
   4408			}
   4409
   4410			break;
   4411		}
   4412	}
   4413	if (!vcpi) {
   4414		prev_slots = 0;
   4415		prev_bw = 0;
   4416	}
   4417
   4418	if (pbn_div <= 0)
   4419		pbn_div = mgr->pbn_div;
   4420
   4421	req_slots = DIV_ROUND_UP(pbn, pbn_div);
   4422
   4423	drm_dbg_atomic(mgr->dev, "[CONNECTOR:%d:%s] [MST PORT:%p] VCPI %d -> %d\n",
   4424		       port->connector->base.id, port->connector->name,
   4425		       port, prev_slots, req_slots);
   4426	drm_dbg_atomic(mgr->dev, "[CONNECTOR:%d:%s] [MST PORT:%p] PBN %d -> %d\n",
   4427		       port->connector->base.id, port->connector->name,
   4428		       port, prev_bw, pbn);
   4429
   4430	/* Add the new allocation to the state */
   4431	if (!vcpi) {
   4432		vcpi = kzalloc(sizeof(*vcpi), GFP_KERNEL);
   4433		if (!vcpi)
   4434			return -ENOMEM;
   4435
   4436		drm_dp_mst_get_port_malloc(port);
   4437		vcpi->port = port;
   4438		list_add(&vcpi->next, &topology_state->vcpis);
   4439	}
   4440	vcpi->vcpi = req_slots;
   4441	vcpi->pbn = pbn;
   4442
   4443	return req_slots;
   4444}
   4445EXPORT_SYMBOL(drm_dp_atomic_find_vcpi_slots);
   4446
   4447/**
   4448 * drm_dp_atomic_release_vcpi_slots() - Release allocated vcpi slots
   4449 * @state: global atomic state
   4450 * @mgr: MST topology manager for the port
   4451 * @port: The port to release the VCPI slots from
   4452 *
   4453 * Releases any VCPI slots that have been allocated to a port in the atomic
   4454 * state. Any atomic drivers which support MST must call this function in
   4455 * their &drm_connector_helper_funcs.atomic_check() callback when the
   4456 * connector will no longer have VCPI allocated (e.g. because its CRTC was
   4457 * removed) when it had VCPI allocated in the previous atomic state.
   4458 *
   4459 * It is OK to call this even if @port has been removed from the system.
   4460 * Additionally, it is OK to call this function multiple times on the same
   4461 * @port as needed. It is not OK however, to call this function and
   4462 * drm_dp_atomic_find_vcpi_slots() on the same @port in a single atomic check
   4463 * phase.
   4464 *
   4465 * See also:
   4466 * drm_dp_atomic_find_vcpi_slots()
   4467 * drm_dp_mst_atomic_check()
   4468 *
   4469 * Returns:
   4470 * 0 if all slots for this port were added back to
   4471 * &drm_dp_mst_topology_state.avail_slots or negative error code
   4472 */
   4473int drm_dp_atomic_release_vcpi_slots(struct drm_atomic_state *state,
   4474				     struct drm_dp_mst_topology_mgr *mgr,
   4475				     struct drm_dp_mst_port *port)
   4476{
   4477	struct drm_dp_mst_topology_state *topology_state;
   4478	struct drm_dp_vcpi_allocation *pos;
   4479	bool found = false;
   4480
   4481	topology_state = drm_atomic_get_mst_topology_state(state, mgr);
   4482	if (IS_ERR(topology_state))
   4483		return PTR_ERR(topology_state);
   4484
   4485	list_for_each_entry(pos, &topology_state->vcpis, next) {
   4486		if (pos->port == port) {
   4487			found = true;
   4488			break;
   4489		}
   4490	}
   4491	if (WARN_ON(!found)) {
   4492		drm_err(mgr->dev, "no VCPI for [MST PORT:%p] found in mst state %p\n",
   4493			port, &topology_state->base);
   4494		return -EINVAL;
   4495	}
   4496
   4497	drm_dbg_atomic(mgr->dev, "[MST PORT:%p] VCPI %d -> 0\n", port, pos->vcpi);
   4498	if (pos->vcpi) {
   4499		drm_dp_mst_put_port_malloc(port);
   4500		pos->vcpi = 0;
   4501		pos->pbn = 0;
   4502	}
   4503
   4504	return 0;
   4505}
   4506EXPORT_SYMBOL(drm_dp_atomic_release_vcpi_slots);
   4507
   4508/**
   4509 * drm_dp_mst_update_slots() - updates the slot info depending on the DP ecoding format
   4510 * @mst_state: mst_state to update
   4511 * @link_encoding_cap: the ecoding format on the link
   4512 */
   4513void drm_dp_mst_update_slots(struct drm_dp_mst_topology_state *mst_state, uint8_t link_encoding_cap)
   4514{
   4515	if (link_encoding_cap == DP_CAP_ANSI_128B132B) {
   4516		mst_state->total_avail_slots = 64;
   4517		mst_state->start_slot = 0;
   4518	} else {
   4519		mst_state->total_avail_slots = 63;
   4520		mst_state->start_slot = 1;
   4521	}
   4522
   4523	DRM_DEBUG_KMS("%s encoding format on mst_state 0x%p\n",
   4524		      (link_encoding_cap == DP_CAP_ANSI_128B132B) ? "128b/132b":"8b/10b",
   4525		      mst_state);
   4526}
   4527EXPORT_SYMBOL(drm_dp_mst_update_slots);
   4528
   4529/**
   4530 * drm_dp_mst_allocate_vcpi() - Allocate a virtual channel
   4531 * @mgr: manager for this port
   4532 * @port: port to allocate a virtual channel for.
   4533 * @pbn: payload bandwidth number to request
   4534 * @slots: returned number of slots for this PBN.
   4535 */
   4536bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr,
   4537			      struct drm_dp_mst_port *port, int pbn, int slots)
   4538{
   4539	int ret;
   4540
   4541	if (slots < 0)
   4542		return false;
   4543
   4544	port = drm_dp_mst_topology_get_port_validated(mgr, port);
   4545	if (!port)
   4546		return false;
   4547
   4548	if (port->vcpi.vcpi > 0) {
   4549		drm_dbg_kms(mgr->dev,
   4550			    "payload: vcpi %d already allocated for pbn %d - requested pbn %d\n",
   4551			    port->vcpi.vcpi, port->vcpi.pbn, pbn);
   4552		if (pbn == port->vcpi.pbn) {
   4553			drm_dp_mst_topology_put_port(port);
   4554			return true;
   4555		}
   4556	}
   4557
   4558	ret = drm_dp_init_vcpi(mgr, &port->vcpi, pbn, slots);
   4559	if (ret) {
   4560		drm_dbg_kms(mgr->dev, "failed to init vcpi slots=%d ret=%d\n",
   4561			    DIV_ROUND_UP(pbn, mgr->pbn_div), ret);
   4562		drm_dp_mst_topology_put_port(port);
   4563		goto out;
   4564	}
   4565	drm_dbg_kms(mgr->dev, "initing vcpi for pbn=%d slots=%d\n", pbn, port->vcpi.num_slots);
   4566
   4567	/* Keep port allocated until its payload has been removed */
   4568	drm_dp_mst_get_port_malloc(port);
   4569	drm_dp_mst_topology_put_port(port);
   4570	return true;
   4571out:
   4572	return false;
   4573}
   4574EXPORT_SYMBOL(drm_dp_mst_allocate_vcpi);
   4575
   4576int drm_dp_mst_get_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
   4577{
   4578	int slots = 0;
   4579
   4580	port = drm_dp_mst_topology_get_port_validated(mgr, port);
   4581	if (!port)
   4582		return slots;
   4583
   4584	slots = port->vcpi.num_slots;
   4585	drm_dp_mst_topology_put_port(port);
   4586	return slots;
   4587}
   4588EXPORT_SYMBOL(drm_dp_mst_get_vcpi_slots);
   4589
   4590/**
   4591 * drm_dp_mst_reset_vcpi_slots() - Reset number of slots to 0 for VCPI
   4592 * @mgr: manager for this port
   4593 * @port: unverified pointer to a port.
   4594 *
   4595 * This just resets the number of slots for the ports VCPI for later programming.
   4596 */
   4597void drm_dp_mst_reset_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
   4598{
   4599	/*
   4600	 * A port with VCPI will remain allocated until its VCPI is
   4601	 * released, no verified ref needed
   4602	 */
   4603
   4604	port->vcpi.num_slots = 0;
   4605}
   4606EXPORT_SYMBOL(drm_dp_mst_reset_vcpi_slots);
   4607
   4608/**
   4609 * drm_dp_mst_deallocate_vcpi() - deallocate a VCPI
   4610 * @mgr: manager for this port
   4611 * @port: port to deallocate vcpi for
   4612 *
   4613 * This can be called unconditionally, regardless of whether
   4614 * drm_dp_mst_allocate_vcpi() succeeded or not.
   4615 */
   4616void drm_dp_mst_deallocate_vcpi(struct drm_dp_mst_topology_mgr *mgr,
   4617				struct drm_dp_mst_port *port)
   4618{
   4619	bool skip;
   4620
   4621	if (!port->vcpi.vcpi)
   4622		return;
   4623
   4624	mutex_lock(&mgr->lock);
   4625	skip = !drm_dp_mst_port_downstream_of_branch(port, mgr->mst_primary);
   4626	mutex_unlock(&mgr->lock);
   4627
   4628	if (skip)
   4629		return;
   4630
   4631	drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
   4632	port->vcpi.num_slots = 0;
   4633	port->vcpi.pbn = 0;
   4634	port->vcpi.aligned_pbn = 0;
   4635	port->vcpi.vcpi = 0;
   4636	drm_dp_mst_put_port_malloc(port);
   4637}
   4638EXPORT_SYMBOL(drm_dp_mst_deallocate_vcpi);
   4639
   4640static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr,
   4641				     int id, struct drm_dp_payload *payload)
   4642{
   4643	u8 payload_alloc[3], status;
   4644	int ret;
   4645	int retries = 0;
   4646
   4647	drm_dp_dpcd_writeb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS,
   4648			   DP_PAYLOAD_TABLE_UPDATED);
   4649
   4650	payload_alloc[0] = id;
   4651	payload_alloc[1] = payload->start_slot;
   4652	payload_alloc[2] = payload->num_slots;
   4653
   4654	ret = drm_dp_dpcd_write(mgr->aux, DP_PAYLOAD_ALLOCATE_SET, payload_alloc, 3);
   4655	if (ret != 3) {
   4656		drm_dbg_kms(mgr->dev, "failed to write payload allocation %d\n", ret);
   4657		goto fail;
   4658	}
   4659
   4660retry:
   4661	ret = drm_dp_dpcd_readb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status);
   4662	if (ret < 0) {
   4663		drm_dbg_kms(mgr->dev, "failed to read payload table status %d\n", ret);
   4664		goto fail;
   4665	}
   4666
   4667	if (!(status & DP_PAYLOAD_TABLE_UPDATED)) {
   4668		retries++;
   4669		if (retries < 20) {
   4670			usleep_range(10000, 20000);
   4671			goto retry;
   4672		}
   4673		drm_dbg_kms(mgr->dev, "status not set after read payload table status %d\n",
   4674			    status);
   4675		ret = -EINVAL;
   4676		goto fail;
   4677	}
   4678	ret = 0;
   4679fail:
   4680	return ret;
   4681}
   4682
   4683static int do_get_act_status(struct drm_dp_aux *aux)
   4684{
   4685	int ret;
   4686	u8 status;
   4687
   4688	ret = drm_dp_dpcd_readb(aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status);
   4689	if (ret < 0)
   4690		return ret;
   4691
   4692	return status;
   4693}
   4694
   4695/**
   4696 * drm_dp_check_act_status() - Polls for ACT handled status.
   4697 * @mgr: manager to use
   4698 *
   4699 * Tries waiting for the MST hub to finish updating it's payload table by
   4700 * polling for the ACT handled bit for up to 3 seconds (yes-some hubs really
   4701 * take that long).
   4702 *
   4703 * Returns:
   4704 * 0 if the ACT was handled in time, negative error code on failure.
   4705 */
   4706int drm_dp_check_act_status(struct drm_dp_mst_topology_mgr *mgr)
   4707{
   4708	/*
   4709	 * There doesn't seem to be any recommended retry count or timeout in
   4710	 * the MST specification. Since some hubs have been observed to take
   4711	 * over 1 second to update their payload allocations under certain
   4712	 * conditions, we use a rather large timeout value.
   4713	 */
   4714	const int timeout_ms = 3000;
   4715	int ret, status;
   4716
   4717	ret = readx_poll_timeout(do_get_act_status, mgr->aux, status,
   4718				 status & DP_PAYLOAD_ACT_HANDLED || status < 0,
   4719				 200, timeout_ms * USEC_PER_MSEC);
   4720	if (ret < 0 && status >= 0) {
   4721		drm_err(mgr->dev, "Failed to get ACT after %dms, last status: %02x\n",
   4722			timeout_ms, status);
   4723		return -EINVAL;
   4724	} else if (status < 0) {
   4725		/*
   4726		 * Failure here isn't unexpected - the hub may have
   4727		 * just been unplugged
   4728		 */
   4729		drm_dbg_kms(mgr->dev, "Failed to read payload table status: %d\n", status);
   4730		return status;
   4731	}
   4732
   4733	return 0;
   4734}
   4735EXPORT_SYMBOL(drm_dp_check_act_status);
   4736
   4737/**
   4738 * drm_dp_calc_pbn_mode() - Calculate the PBN for a mode.
   4739 * @clock: dot clock for the mode
   4740 * @bpp: bpp for the mode.
   4741 * @dsc: DSC mode. If true, bpp has units of 1/16 of a bit per pixel
   4742 *
   4743 * This uses the formula in the spec to calculate the PBN value for a mode.
   4744 */
   4745int drm_dp_calc_pbn_mode(int clock, int bpp, bool dsc)
   4746{
   4747	/*
   4748	 * margin 5300ppm + 300ppm ~ 0.6% as per spec, factor is 1.006
   4749	 * The unit of 54/64Mbytes/sec is an arbitrary unit chosen based on
   4750	 * common multiplier to render an integer PBN for all link rate/lane
   4751	 * counts combinations
   4752	 * calculate
   4753	 * peak_kbps *= (1006/1000)
   4754	 * peak_kbps *= (64/54)
   4755	 * peak_kbps *= 8    convert to bytes
   4756	 *
   4757	 * If the bpp is in units of 1/16, further divide by 16. Put this
   4758	 * factor in the numerator rather than the denominator to avoid
   4759	 * integer overflow
   4760	 */
   4761
   4762	if (dsc)
   4763		return DIV_ROUND_UP_ULL(mul_u32_u32(clock * (bpp / 16), 64 * 1006),
   4764					8 * 54 * 1000 * 1000);
   4765
   4766	return DIV_ROUND_UP_ULL(mul_u32_u32(clock * bpp, 64 * 1006),
   4767				8 * 54 * 1000 * 1000);
   4768}
   4769EXPORT_SYMBOL(drm_dp_calc_pbn_mode);
   4770
   4771/* we want to kick the TX after we've ack the up/down IRQs. */
   4772static void drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr *mgr)
   4773{
   4774	queue_work(system_long_wq, &mgr->tx_work);
   4775}
   4776
   4777/*
   4778 * Helper function for parsing DP device types into convenient strings
   4779 * for use with dp_mst_topology
   4780 */
   4781static const char *pdt_to_string(u8 pdt)
   4782{
   4783	switch (pdt) {
   4784	case DP_PEER_DEVICE_NONE:
   4785		return "NONE";
   4786	case DP_PEER_DEVICE_SOURCE_OR_SST:
   4787		return "SOURCE OR SST";
   4788	case DP_PEER_DEVICE_MST_BRANCHING:
   4789		return "MST BRANCHING";
   4790	case DP_PEER_DEVICE_SST_SINK:
   4791		return "SST SINK";
   4792	case DP_PEER_DEVICE_DP_LEGACY_CONV:
   4793		return "DP LEGACY CONV";
   4794	default:
   4795		return "ERR";
   4796	}
   4797}
   4798
   4799static void drm_dp_mst_dump_mstb(struct seq_file *m,
   4800				 struct drm_dp_mst_branch *mstb)
   4801{
   4802	struct drm_dp_mst_port *port;
   4803	int tabs = mstb->lct;
   4804	char prefix[10];
   4805	int i;
   4806
   4807	for (i = 0; i < tabs; i++)
   4808		prefix[i] = '\t';
   4809	prefix[i] = '\0';
   4810
   4811	seq_printf(m, "%smstb - [%p]: num_ports: %d\n", prefix, mstb, mstb->num_ports);
   4812	list_for_each_entry(port, &mstb->ports, next) {
   4813		seq_printf(m, "%sport %d - [%p] (%s - %s): ddps: %d, ldps: %d, sdp: %d/%d, fec: %s, conn: %p\n",
   4814			   prefix,
   4815			   port->port_num,
   4816			   port,
   4817			   port->input ? "input" : "output",
   4818			   pdt_to_string(port->pdt),
   4819			   port->ddps,
   4820			   port->ldps,
   4821			   port->num_sdp_streams,
   4822			   port->num_sdp_stream_sinks,
   4823			   port->fec_capable ? "true" : "false",
   4824			   port->connector);
   4825		if (port->mstb)
   4826			drm_dp_mst_dump_mstb(m, port->mstb);
   4827	}
   4828}
   4829
   4830#define DP_PAYLOAD_TABLE_SIZE		64
   4831
   4832static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr,
   4833				  char *buf)
   4834{
   4835	int i;
   4836
   4837	for (i = 0; i < DP_PAYLOAD_TABLE_SIZE; i += 16) {
   4838		if (drm_dp_dpcd_read(mgr->aux,
   4839				     DP_PAYLOAD_TABLE_UPDATE_STATUS + i,
   4840				     &buf[i], 16) != 16)
   4841			return false;
   4842	}
   4843	return true;
   4844}
   4845
   4846static void fetch_monitor_name(struct drm_dp_mst_topology_mgr *mgr,
   4847			       struct drm_dp_mst_port *port, char *name,
   4848			       int namelen)
   4849{
   4850	struct edid *mst_edid;
   4851
   4852	mst_edid = drm_dp_mst_get_edid(port->connector, mgr, port);
   4853	drm_edid_get_monitor_name(mst_edid, name, namelen);
   4854	kfree(mst_edid);
   4855}
   4856
   4857/**
   4858 * drm_dp_mst_dump_topology(): dump topology to seq file.
   4859 * @m: seq_file to dump output to
   4860 * @mgr: manager to dump current topology for.
   4861 *
   4862 * helper to dump MST topology to a seq file for debugfs.
   4863 */
   4864void drm_dp_mst_dump_topology(struct seq_file *m,
   4865			      struct drm_dp_mst_topology_mgr *mgr)
   4866{
   4867	int i;
   4868	struct drm_dp_mst_port *port;
   4869
   4870	mutex_lock(&mgr->lock);
   4871	if (mgr->mst_primary)
   4872		drm_dp_mst_dump_mstb(m, mgr->mst_primary);
   4873
   4874	/* dump VCPIs */
   4875	mutex_unlock(&mgr->lock);
   4876
   4877	mutex_lock(&mgr->payload_lock);
   4878	seq_printf(m, "\n*** VCPI Info ***\n");
   4879	seq_printf(m, "payload_mask: %lx, vcpi_mask: %lx, max_payloads: %d\n", mgr->payload_mask, mgr->vcpi_mask, mgr->max_payloads);
   4880
   4881	seq_printf(m, "\n|   idx   |  port # |  vcp_id | # slots |     sink name     |\n");
   4882	for (i = 0; i < mgr->max_payloads; i++) {
   4883		if (mgr->proposed_vcpis[i]) {
   4884			char name[14];
   4885
   4886			port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
   4887			fetch_monitor_name(mgr, port, name, sizeof(name));
   4888			seq_printf(m, "%10d%10d%10d%10d%20s\n",
   4889				   i,
   4890				   port->port_num,
   4891				   port->vcpi.vcpi,
   4892				   port->vcpi.num_slots,
   4893				   (*name != 0) ? name : "Unknown");
   4894		} else
   4895			seq_printf(m, "%6d - Unused\n", i);
   4896	}
   4897	seq_printf(m, "\n*** Payload Info ***\n");
   4898	seq_printf(m, "|   idx   |  state  |  start slot  | # slots |\n");
   4899	for (i = 0; i < mgr->max_payloads; i++) {
   4900		seq_printf(m, "%10d%10d%15d%10d\n",
   4901			   i,
   4902			   mgr->payloads[i].payload_state,
   4903			   mgr->payloads[i].start_slot,
   4904			   mgr->payloads[i].num_slots);
   4905	}
   4906	mutex_unlock(&mgr->payload_lock);
   4907
   4908	seq_printf(m, "\n*** DPCD Info ***\n");
   4909	mutex_lock(&mgr->lock);
   4910	if (mgr->mst_primary) {
   4911		u8 buf[DP_PAYLOAD_TABLE_SIZE];
   4912		int ret;
   4913
   4914		ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, buf, DP_RECEIVER_CAP_SIZE);
   4915		if (ret) {
   4916			seq_printf(m, "dpcd read failed\n");
   4917			goto out;
   4918		}
   4919		seq_printf(m, "dpcd: %*ph\n", DP_RECEIVER_CAP_SIZE, buf);
   4920
   4921		ret = drm_dp_dpcd_read(mgr->aux, DP_FAUX_CAP, buf, 2);
   4922		if (ret) {
   4923			seq_printf(m, "faux/mst read failed\n");
   4924			goto out;
   4925		}
   4926		seq_printf(m, "faux/mst: %*ph\n", 2, buf);
   4927
   4928		ret = drm_dp_dpcd_read(mgr->aux, DP_MSTM_CTRL, buf, 1);
   4929		if (ret) {
   4930			seq_printf(m, "mst ctrl read failed\n");
   4931			goto out;
   4932		}
   4933		seq_printf(m, "mst ctrl: %*ph\n", 1, buf);
   4934
   4935		/* dump the standard OUI branch header */
   4936		ret = drm_dp_dpcd_read(mgr->aux, DP_BRANCH_OUI, buf, DP_BRANCH_OUI_HEADER_SIZE);
   4937		if (ret) {
   4938			seq_printf(m, "branch oui read failed\n");
   4939			goto out;
   4940		}
   4941		seq_printf(m, "branch oui: %*phN devid: ", 3, buf);
   4942
   4943		for (i = 0x3; i < 0x8 && buf[i]; i++)
   4944			seq_printf(m, "%c", buf[i]);
   4945		seq_printf(m, " revision: hw: %x.%x sw: %x.%x\n",
   4946			   buf[0x9] >> 4, buf[0x9] & 0xf, buf[0xa], buf[0xb]);
   4947		if (dump_dp_payload_table(mgr, buf))
   4948			seq_printf(m, "payload table: %*ph\n", DP_PAYLOAD_TABLE_SIZE, buf);
   4949	}
   4950
   4951out:
   4952	mutex_unlock(&mgr->lock);
   4953
   4954}
   4955EXPORT_SYMBOL(drm_dp_mst_dump_topology);
   4956
   4957static void drm_dp_tx_work(struct work_struct *work)
   4958{
   4959	struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, tx_work);
   4960
   4961	mutex_lock(&mgr->qlock);
   4962	if (!list_empty(&mgr->tx_msg_downq))
   4963		process_single_down_tx_qlock(mgr);
   4964	mutex_unlock(&mgr->qlock);
   4965}
   4966
   4967static inline void
   4968drm_dp_delayed_destroy_port(struct drm_dp_mst_port *port)
   4969{
   4970	drm_dp_port_set_pdt(port, DP_PEER_DEVICE_NONE, port->mcs);
   4971
   4972	if (port->connector) {
   4973		drm_connector_unregister(port->connector);
   4974		drm_connector_put(port->connector);
   4975	}
   4976
   4977	drm_dp_mst_put_port_malloc(port);
   4978}
   4979
   4980static inline void
   4981drm_dp_delayed_destroy_mstb(struct drm_dp_mst_branch *mstb)
   4982{
   4983	struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
   4984	struct drm_dp_mst_port *port, *port_tmp;
   4985	struct drm_dp_sideband_msg_tx *txmsg, *txmsg_tmp;
   4986	bool wake_tx = false;
   4987
   4988	mutex_lock(&mgr->lock);
   4989	list_for_each_entry_safe(port, port_tmp, &mstb->ports, next) {
   4990		list_del(&port->next);
   4991		drm_dp_mst_topology_put_port(port);
   4992	}
   4993	mutex_unlock(&mgr->lock);
   4994
   4995	/* drop any tx slot msg */
   4996	mutex_lock(&mstb->mgr->qlock);
   4997	list_for_each_entry_safe(txmsg, txmsg_tmp, &mgr->tx_msg_downq, next) {
   4998		if (txmsg->dst != mstb)
   4999			continue;
   5000
   5001		txmsg->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
   5002		list_del(&txmsg->next);
   5003		wake_tx = true;
   5004	}
   5005	mutex_unlock(&mstb->mgr->qlock);
   5006
   5007	if (wake_tx)
   5008		wake_up_all(&mstb->mgr->tx_waitq);
   5009
   5010	drm_dp_mst_put_mstb_malloc(mstb);
   5011}
   5012
   5013static void drm_dp_delayed_destroy_work(struct work_struct *work)
   5014{
   5015	struct drm_dp_mst_topology_mgr *mgr =
   5016		container_of(work, struct drm_dp_mst_topology_mgr,
   5017			     delayed_destroy_work);
   5018	bool send_hotplug = false, go_again;
   5019
   5020	/*
   5021	 * Not a regular list traverse as we have to drop the destroy
   5022	 * connector lock before destroying the mstb/port, to avoid AB->BA
   5023	 * ordering between this lock and the config mutex.
   5024	 */
   5025	do {
   5026		go_again = false;
   5027
   5028		for (;;) {
   5029			struct drm_dp_mst_branch *mstb;
   5030
   5031			mutex_lock(&mgr->delayed_destroy_lock);
   5032			mstb = list_first_entry_or_null(&mgr->destroy_branch_device_list,
   5033							struct drm_dp_mst_branch,
   5034							destroy_next);
   5035			if (mstb)
   5036				list_del(&mstb->destroy_next);
   5037			mutex_unlock(&mgr->delayed_destroy_lock);
   5038
   5039			if (!mstb)
   5040				break;
   5041
   5042			drm_dp_delayed_destroy_mstb(mstb);
   5043			go_again = true;
   5044		}
   5045
   5046		for (;;) {
   5047			struct drm_dp_mst_port *port;
   5048
   5049			mutex_lock(&mgr->delayed_destroy_lock);
   5050			port = list_first_entry_or_null(&mgr->destroy_port_list,
   5051							struct drm_dp_mst_port,
   5052							next);
   5053			if (port)
   5054				list_del(&port->next);
   5055			mutex_unlock(&mgr->delayed_destroy_lock);
   5056
   5057			if (!port)
   5058				break;
   5059
   5060			drm_dp_delayed_destroy_port(port);
   5061			send_hotplug = true;
   5062			go_again = true;
   5063		}
   5064	} while (go_again);
   5065
   5066	if (send_hotplug)
   5067		drm_kms_helper_hotplug_event(mgr->dev);
   5068}
   5069
   5070static struct drm_private_state *
   5071drm_dp_mst_duplicate_state(struct drm_private_obj *obj)
   5072{
   5073	struct drm_dp_mst_topology_state *state, *old_state =
   5074		to_dp_mst_topology_state(obj->state);
   5075	struct drm_dp_vcpi_allocation *pos, *vcpi;
   5076
   5077	state = kmemdup(old_state, sizeof(*state), GFP_KERNEL);
   5078	if (!state)
   5079		return NULL;
   5080
   5081	__drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
   5082
   5083	INIT_LIST_HEAD(&state->vcpis);
   5084
   5085	list_for_each_entry(pos, &old_state->vcpis, next) {
   5086		/* Prune leftover freed VCPI allocations */
   5087		if (!pos->vcpi)
   5088			continue;
   5089
   5090		vcpi = kmemdup(pos, sizeof(*vcpi), GFP_KERNEL);
   5091		if (!vcpi)
   5092			goto fail;
   5093
   5094		drm_dp_mst_get_port_malloc(vcpi->port);
   5095		list_add(&vcpi->next, &state->vcpis);
   5096	}
   5097
   5098	return &state->base;
   5099
   5100fail:
   5101	list_for_each_entry_safe(pos, vcpi, &state->vcpis, next) {
   5102		drm_dp_mst_put_port_malloc(pos->port);
   5103		kfree(pos);
   5104	}
   5105	kfree(state);
   5106
   5107	return NULL;
   5108}
   5109
   5110static void drm_dp_mst_destroy_state(struct drm_private_obj *obj,
   5111				     struct drm_private_state *state)
   5112{
   5113	struct drm_dp_mst_topology_state *mst_state =
   5114		to_dp_mst_topology_state(state);
   5115	struct drm_dp_vcpi_allocation *pos, *tmp;
   5116
   5117	list_for_each_entry_safe(pos, tmp, &mst_state->vcpis, next) {
   5118		/* We only keep references to ports with non-zero VCPIs */
   5119		if (pos->vcpi)
   5120			drm_dp_mst_put_port_malloc(pos->port);
   5121		kfree(pos);
   5122	}
   5123
   5124	kfree(mst_state);
   5125}
   5126
   5127static bool drm_dp_mst_port_downstream_of_branch(struct drm_dp_mst_port *port,
   5128						 struct drm_dp_mst_branch *branch)
   5129{
   5130	while (port->parent) {
   5131		if (port->parent == branch)
   5132			return true;
   5133
   5134		if (port->parent->port_parent)
   5135			port = port->parent->port_parent;
   5136		else
   5137			break;
   5138	}
   5139	return false;
   5140}
   5141
   5142static int
   5143drm_dp_mst_atomic_check_port_bw_limit(struct drm_dp_mst_port *port,
   5144				      struct drm_dp_mst_topology_state *state);
   5145
   5146static int
   5147drm_dp_mst_atomic_check_mstb_bw_limit(struct drm_dp_mst_branch *mstb,
   5148				      struct drm_dp_mst_topology_state *state)
   5149{
   5150	struct drm_dp_vcpi_allocation *vcpi;
   5151	struct drm_dp_mst_port *port;
   5152	int pbn_used = 0, ret;
   5153	bool found = false;
   5154
   5155	/* Check that we have at least one port in our state that's downstream
   5156	 * of this branch, otherwise we can skip this branch
   5157	 */
   5158	list_for_each_entry(vcpi, &state->vcpis, next) {
   5159		if (!vcpi->pbn ||
   5160		    !drm_dp_mst_port_downstream_of_branch(vcpi->port, mstb))
   5161			continue;
   5162
   5163		found = true;
   5164		break;
   5165	}
   5166	if (!found)
   5167		return 0;
   5168
   5169	if (mstb->port_parent)
   5170		drm_dbg_atomic(mstb->mgr->dev,
   5171			       "[MSTB:%p] [MST PORT:%p] Checking bandwidth limits on [MSTB:%p]\n",
   5172			       mstb->port_parent->parent, mstb->port_parent, mstb);
   5173	else
   5174		drm_dbg_atomic(mstb->mgr->dev, "[MSTB:%p] Checking bandwidth limits\n", mstb);
   5175
   5176	list_for_each_entry(port, &mstb->ports, next) {
   5177		ret = drm_dp_mst_atomic_check_port_bw_limit(port, state);
   5178		if (ret < 0)
   5179			return ret;
   5180
   5181		pbn_used += ret;
   5182	}
   5183
   5184	return pbn_used;
   5185}
   5186
   5187static int
   5188drm_dp_mst_atomic_check_port_bw_limit(struct drm_dp_mst_port *port,
   5189				      struct drm_dp_mst_topology_state *state)
   5190{
   5191	struct drm_dp_vcpi_allocation *vcpi;
   5192	int pbn_used = 0;
   5193
   5194	if (port->pdt == DP_PEER_DEVICE_NONE)
   5195		return 0;
   5196
   5197	if (drm_dp_mst_is_end_device(port->pdt, port->mcs)) {
   5198		bool found = false;
   5199
   5200		list_for_each_entry(vcpi, &state->vcpis, next) {
   5201			if (vcpi->port != port)
   5202				continue;
   5203			if (!vcpi->pbn)
   5204				return 0;
   5205
   5206			found = true;
   5207			break;
   5208		}
   5209		if (!found)
   5210			return 0;
   5211
   5212		/*
   5213		 * This could happen if the sink deasserted its HPD line, but
   5214		 * the branch device still reports it as attached (PDT != NONE).
   5215		 */
   5216		if (!port->full_pbn) {
   5217			drm_dbg_atomic(port->mgr->dev,
   5218				       "[MSTB:%p] [MST PORT:%p] no BW available for the port\n",
   5219				       port->parent, port);
   5220			return -EINVAL;
   5221		}
   5222
   5223		pbn_used = vcpi->pbn;
   5224	} else {
   5225		pbn_used = drm_dp_mst_atomic_check_mstb_bw_limit(port->mstb,
   5226								 state);
   5227		if (pbn_used <= 0)
   5228			return pbn_used;
   5229	}
   5230
   5231	if (pbn_used > port->full_pbn) {
   5232		drm_dbg_atomic(port->mgr->dev,
   5233			       "[MSTB:%p] [MST PORT:%p] required PBN of %d exceeds port limit of %d\n",
   5234			       port->parent, port, pbn_used, port->full_pbn);
   5235		return -ENOSPC;
   5236	}
   5237
   5238	drm_dbg_atomic(port->mgr->dev, "[MSTB:%p] [MST PORT:%p] uses %d out of %d PBN\n",
   5239		       port->parent, port, pbn_used, port->full_pbn);
   5240
   5241	return pbn_used;
   5242}
   5243
   5244static inline int
   5245drm_dp_mst_atomic_check_vcpi_alloc_limit(struct drm_dp_mst_topology_mgr *mgr,
   5246					 struct drm_dp_mst_topology_state *mst_state)
   5247{
   5248	struct drm_dp_vcpi_allocation *vcpi;
   5249	int avail_slots = mst_state->total_avail_slots, payload_count = 0;
   5250
   5251	list_for_each_entry(vcpi, &mst_state->vcpis, next) {
   5252		/* Releasing VCPI is always OK-even if the port is gone */
   5253		if (!vcpi->vcpi) {
   5254			drm_dbg_atomic(mgr->dev, "[MST PORT:%p] releases all VCPI slots\n",
   5255				       vcpi->port);
   5256			continue;
   5257		}
   5258
   5259		drm_dbg_atomic(mgr->dev, "[MST PORT:%p] requires %d vcpi slots\n",
   5260			       vcpi->port, vcpi->vcpi);
   5261
   5262		avail_slots -= vcpi->vcpi;
   5263		if (avail_slots < 0) {
   5264			drm_dbg_atomic(mgr->dev,
   5265				       "[MST PORT:%p] not enough VCPI slots in mst state %p (avail=%d)\n",
   5266				       vcpi->port, mst_state, avail_slots + vcpi->vcpi);
   5267			return -ENOSPC;
   5268		}
   5269
   5270		if (++payload_count > mgr->max_payloads) {
   5271			drm_dbg_atomic(mgr->dev,
   5272				       "[MST MGR:%p] state %p has too many payloads (max=%d)\n",
   5273				       mgr, mst_state, mgr->max_payloads);
   5274			return -EINVAL;
   5275		}
   5276	}
   5277	drm_dbg_atomic(mgr->dev, "[MST MGR:%p] mst state %p VCPI avail=%d used=%d\n",
   5278		       mgr, mst_state, avail_slots, mst_state->total_avail_slots - avail_slots);
   5279
   5280	return 0;
   5281}
   5282
   5283/**
   5284 * drm_dp_mst_add_affected_dsc_crtcs
   5285 * @state: Pointer to the new struct drm_dp_mst_topology_state
   5286 * @mgr: MST topology manager
   5287 *
   5288 * Whenever there is a change in mst topology
   5289 * DSC configuration would have to be recalculated
   5290 * therefore we need to trigger modeset on all affected
   5291 * CRTCs in that topology
   5292 *
   5293 * See also:
   5294 * drm_dp_mst_atomic_enable_dsc()
   5295 */
   5296int drm_dp_mst_add_affected_dsc_crtcs(struct drm_atomic_state *state, struct drm_dp_mst_topology_mgr *mgr)
   5297{
   5298	struct drm_dp_mst_topology_state *mst_state;
   5299	struct drm_dp_vcpi_allocation *pos;
   5300	struct drm_connector *connector;
   5301	struct drm_connector_state *conn_state;
   5302	struct drm_crtc *crtc;
   5303	struct drm_crtc_state *crtc_state;
   5304
   5305	mst_state = drm_atomic_get_mst_topology_state(state, mgr);
   5306
   5307	if (IS_ERR(mst_state))
   5308		return -EINVAL;
   5309
   5310	list_for_each_entry(pos, &mst_state->vcpis, next) {
   5311
   5312		connector = pos->port->connector;
   5313
   5314		if (!connector)
   5315			return -EINVAL;
   5316
   5317		conn_state = drm_atomic_get_connector_state(state, connector);
   5318
   5319		if (IS_ERR(conn_state))
   5320			return PTR_ERR(conn_state);
   5321
   5322		crtc = conn_state->crtc;
   5323
   5324		if (!crtc)
   5325			continue;
   5326
   5327		if (!drm_dp_mst_dsc_aux_for_port(pos->port))
   5328			continue;
   5329
   5330		crtc_state = drm_atomic_get_crtc_state(mst_state->base.state, crtc);
   5331
   5332		if (IS_ERR(crtc_state))
   5333			return PTR_ERR(crtc_state);
   5334
   5335		drm_dbg_atomic(mgr->dev, "[MST MGR:%p] Setting mode_changed flag on CRTC %p\n",
   5336			       mgr, crtc);
   5337
   5338		crtc_state->mode_changed = true;
   5339	}
   5340	return 0;
   5341}
   5342EXPORT_SYMBOL(drm_dp_mst_add_affected_dsc_crtcs);
   5343
   5344/**
   5345 * drm_dp_mst_atomic_enable_dsc - Set DSC Enable Flag to On/Off
   5346 * @state: Pointer to the new drm_atomic_state
   5347 * @port: Pointer to the affected MST Port
   5348 * @pbn: Newly recalculated bw required for link with DSC enabled
   5349 * @pbn_div: Divider to calculate correct number of pbn per slot
   5350 * @enable: Boolean flag to enable or disable DSC on the port
   5351 *
   5352 * This function enables DSC on the given Port
   5353 * by recalculating its vcpi from pbn provided
   5354 * and sets dsc_enable flag to keep track of which
   5355 * ports have DSC enabled
   5356 *
   5357 */
   5358int drm_dp_mst_atomic_enable_dsc(struct drm_atomic_state *state,
   5359				 struct drm_dp_mst_port *port,
   5360				 int pbn, int pbn_div,
   5361				 bool enable)
   5362{
   5363	struct drm_dp_mst_topology_state *mst_state;
   5364	struct drm_dp_vcpi_allocation *pos;
   5365	bool found = false;
   5366	int vcpi = 0;
   5367
   5368	mst_state = drm_atomic_get_mst_topology_state(state, port->mgr);
   5369
   5370	if (IS_ERR(mst_state))
   5371		return PTR_ERR(mst_state);
   5372
   5373	list_for_each_entry(pos, &mst_state->vcpis, next) {
   5374		if (pos->port == port) {
   5375			found = true;
   5376			break;
   5377		}
   5378	}
   5379
   5380	if (!found) {
   5381		drm_dbg_atomic(state->dev,
   5382			       "[MST PORT:%p] Couldn't find VCPI allocation in mst state %p\n",
   5383			       port, mst_state);
   5384		return -EINVAL;
   5385	}
   5386
   5387	if (pos->dsc_enabled == enable) {
   5388		drm_dbg_atomic(state->dev,
   5389			       "[MST PORT:%p] DSC flag is already set to %d, returning %d VCPI slots\n",
   5390			       port, enable, pos->vcpi);
   5391		vcpi = pos->vcpi;
   5392	}
   5393
   5394	if (enable) {
   5395		vcpi = drm_dp_atomic_find_vcpi_slots(state, port->mgr, port, pbn, pbn_div);
   5396		drm_dbg_atomic(state->dev,
   5397			       "[MST PORT:%p] Enabling DSC flag, reallocating %d VCPI slots on the port\n",
   5398			       port, vcpi);
   5399		if (vcpi < 0)
   5400			return -EINVAL;
   5401	}
   5402
   5403	pos->dsc_enabled = enable;
   5404
   5405	return vcpi;
   5406}
   5407EXPORT_SYMBOL(drm_dp_mst_atomic_enable_dsc);
   5408/**
   5409 * drm_dp_mst_atomic_check - Check that the new state of an MST topology in an
   5410 * atomic update is valid
   5411 * @state: Pointer to the new &struct drm_dp_mst_topology_state
   5412 *
   5413 * Checks the given topology state for an atomic update to ensure that it's
   5414 * valid. This includes checking whether there's enough bandwidth to support
   5415 * the new VCPI allocations in the atomic update.
   5416 *
   5417 * Any atomic drivers supporting DP MST must make sure to call this after
   5418 * checking the rest of their state in their
   5419 * &drm_mode_config_funcs.atomic_check() callback.
   5420 *
   5421 * See also:
   5422 * drm_dp_atomic_find_vcpi_slots()
   5423 * drm_dp_atomic_release_vcpi_slots()
   5424 *
   5425 * Returns:
   5426 *
   5427 * 0 if the new state is valid, negative error code otherwise.
   5428 */
   5429int drm_dp_mst_atomic_check(struct drm_atomic_state *state)
   5430{
   5431	struct drm_dp_mst_topology_mgr *mgr;
   5432	struct drm_dp_mst_topology_state *mst_state;
   5433	int i, ret = 0;
   5434
   5435	for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
   5436		if (!mgr->mst_state)
   5437			continue;
   5438
   5439		ret = drm_dp_mst_atomic_check_vcpi_alloc_limit(mgr, mst_state);
   5440		if (ret)
   5441			break;
   5442
   5443		mutex_lock(&mgr->lock);
   5444		ret = drm_dp_mst_atomic_check_mstb_bw_limit(mgr->mst_primary,
   5445							    mst_state);
   5446		mutex_unlock(&mgr->lock);
   5447		if (ret < 0)
   5448			break;
   5449		else
   5450			ret = 0;
   5451	}
   5452
   5453	return ret;
   5454}
   5455EXPORT_SYMBOL(drm_dp_mst_atomic_check);
   5456
   5457const struct drm_private_state_funcs drm_dp_mst_topology_state_funcs = {
   5458	.atomic_duplicate_state = drm_dp_mst_duplicate_state,
   5459	.atomic_destroy_state = drm_dp_mst_destroy_state,
   5460};
   5461EXPORT_SYMBOL(drm_dp_mst_topology_state_funcs);
   5462
   5463/**
   5464 * drm_atomic_get_mst_topology_state: get MST topology state
   5465 *
   5466 * @state: global atomic state
   5467 * @mgr: MST topology manager, also the private object in this case
   5468 *
   5469 * This function wraps drm_atomic_get_priv_obj_state() passing in the MST atomic
   5470 * state vtable so that the private object state returned is that of a MST
   5471 * topology object. Also, drm_atomic_get_private_obj_state() expects the caller
   5472 * to care of the locking, so warn if don't hold the connection_mutex.
   5473 *
   5474 * RETURNS:
   5475 *
   5476 * The MST topology state or error pointer.
   5477 */
   5478struct drm_dp_mst_topology_state *drm_atomic_get_mst_topology_state(struct drm_atomic_state *state,
   5479								    struct drm_dp_mst_topology_mgr *mgr)
   5480{
   5481	return to_dp_mst_topology_state(drm_atomic_get_private_obj_state(state, &mgr->base));
   5482}
   5483EXPORT_SYMBOL(drm_atomic_get_mst_topology_state);
   5484
   5485/**
   5486 * drm_dp_mst_topology_mgr_init - initialise a topology manager
   5487 * @mgr: manager struct to initialise
   5488 * @dev: device providing this structure - for i2c addition.
   5489 * @aux: DP helper aux channel to talk to this device
   5490 * @max_dpcd_transaction_bytes: hw specific DPCD transaction limit
   5491 * @max_payloads: maximum number of payloads this GPU can source
   5492 * @max_lane_count: maximum number of lanes this GPU supports
   5493 * @max_link_rate: maximum link rate per lane this GPU supports in kHz
   5494 * @conn_base_id: the connector object ID the MST device is connected to.
   5495 *
   5496 * Return 0 for success, or negative error code on failure
   5497 */
   5498int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
   5499				 struct drm_device *dev, struct drm_dp_aux *aux,
   5500				 int max_dpcd_transaction_bytes, int max_payloads,
   5501				 int max_lane_count, int max_link_rate,
   5502				 int conn_base_id)
   5503{
   5504	struct drm_dp_mst_topology_state *mst_state;
   5505
   5506	mutex_init(&mgr->lock);
   5507	mutex_init(&mgr->qlock);
   5508	mutex_init(&mgr->payload_lock);
   5509	mutex_init(&mgr->delayed_destroy_lock);
   5510	mutex_init(&mgr->up_req_lock);
   5511	mutex_init(&mgr->probe_lock);
   5512#if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
   5513	mutex_init(&mgr->topology_ref_history_lock);
   5514	stack_depot_init();
   5515#endif
   5516	INIT_LIST_HEAD(&mgr->tx_msg_downq);
   5517	INIT_LIST_HEAD(&mgr->destroy_port_list);
   5518	INIT_LIST_HEAD(&mgr->destroy_branch_device_list);
   5519	INIT_LIST_HEAD(&mgr->up_req_list);
   5520
   5521	/*
   5522	 * delayed_destroy_work will be queued on a dedicated WQ, so that any
   5523	 * requeuing will be also flushed when deiniting the topology manager.
   5524	 */
   5525	mgr->delayed_destroy_wq = alloc_ordered_workqueue("drm_dp_mst_wq", 0);
   5526	if (mgr->delayed_destroy_wq == NULL)
   5527		return -ENOMEM;
   5528
   5529	INIT_WORK(&mgr->work, drm_dp_mst_link_probe_work);
   5530	INIT_WORK(&mgr->tx_work, drm_dp_tx_work);
   5531	INIT_WORK(&mgr->delayed_destroy_work, drm_dp_delayed_destroy_work);
   5532	INIT_WORK(&mgr->up_req_work, drm_dp_mst_up_req_work);
   5533	init_waitqueue_head(&mgr->tx_waitq);
   5534	mgr->dev = dev;
   5535	mgr->aux = aux;
   5536	mgr->max_dpcd_transaction_bytes = max_dpcd_transaction_bytes;
   5537	mgr->max_payloads = max_payloads;
   5538	mgr->max_lane_count = max_lane_count;
   5539	mgr->max_link_rate = max_link_rate;
   5540	mgr->conn_base_id = conn_base_id;
   5541	if (max_payloads + 1 > sizeof(mgr->payload_mask) * 8 ||
   5542	    max_payloads + 1 > sizeof(mgr->vcpi_mask) * 8)
   5543		return -EINVAL;
   5544	mgr->payloads = kcalloc(max_payloads, sizeof(struct drm_dp_payload), GFP_KERNEL);
   5545	if (!mgr->payloads)
   5546		return -ENOMEM;
   5547	mgr->proposed_vcpis = kcalloc(max_payloads, sizeof(struct drm_dp_vcpi *), GFP_KERNEL);
   5548	if (!mgr->proposed_vcpis)
   5549		return -ENOMEM;
   5550	set_bit(0, &mgr->payload_mask);
   5551
   5552	mst_state = kzalloc(sizeof(*mst_state), GFP_KERNEL);
   5553	if (mst_state == NULL)
   5554		return -ENOMEM;
   5555
   5556	mst_state->total_avail_slots = 63;
   5557	mst_state->start_slot = 1;
   5558
   5559	mst_state->mgr = mgr;
   5560	INIT_LIST_HEAD(&mst_state->vcpis);
   5561
   5562	drm_atomic_private_obj_init(dev, &mgr->base,
   5563				    &mst_state->base,
   5564				    &drm_dp_mst_topology_state_funcs);
   5565
   5566	return 0;
   5567}
   5568EXPORT_SYMBOL(drm_dp_mst_topology_mgr_init);
   5569
   5570/**
   5571 * drm_dp_mst_topology_mgr_destroy() - destroy topology manager.
   5572 * @mgr: manager to destroy
   5573 */
   5574void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr)
   5575{
   5576	drm_dp_mst_topology_mgr_set_mst(mgr, false);
   5577	flush_work(&mgr->work);
   5578	/* The following will also drain any requeued work on the WQ. */
   5579	if (mgr->delayed_destroy_wq) {
   5580		destroy_workqueue(mgr->delayed_destroy_wq);
   5581		mgr->delayed_destroy_wq = NULL;
   5582	}
   5583	mutex_lock(&mgr->payload_lock);
   5584	kfree(mgr->payloads);
   5585	mgr->payloads = NULL;
   5586	kfree(mgr->proposed_vcpis);
   5587	mgr->proposed_vcpis = NULL;
   5588	mutex_unlock(&mgr->payload_lock);
   5589	mgr->dev = NULL;
   5590	mgr->aux = NULL;
   5591	drm_atomic_private_obj_fini(&mgr->base);
   5592	mgr->funcs = NULL;
   5593
   5594	mutex_destroy(&mgr->delayed_destroy_lock);
   5595	mutex_destroy(&mgr->payload_lock);
   5596	mutex_destroy(&mgr->qlock);
   5597	mutex_destroy(&mgr->lock);
   5598	mutex_destroy(&mgr->up_req_lock);
   5599	mutex_destroy(&mgr->probe_lock);
   5600#if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
   5601	mutex_destroy(&mgr->topology_ref_history_lock);
   5602#endif
   5603}
   5604EXPORT_SYMBOL(drm_dp_mst_topology_mgr_destroy);
   5605
   5606static bool remote_i2c_read_ok(const struct i2c_msg msgs[], int num)
   5607{
   5608	int i;
   5609
   5610	if (num - 1 > DP_REMOTE_I2C_READ_MAX_TRANSACTIONS)
   5611		return false;
   5612
   5613	for (i = 0; i < num - 1; i++) {
   5614		if (msgs[i].flags & I2C_M_RD ||
   5615		    msgs[i].len > 0xff)
   5616			return false;
   5617	}
   5618
   5619	return msgs[num - 1].flags & I2C_M_RD &&
   5620		msgs[num - 1].len <= 0xff;
   5621}
   5622
   5623static bool remote_i2c_write_ok(const struct i2c_msg msgs[], int num)
   5624{
   5625	int i;
   5626
   5627	for (i = 0; i < num - 1; i++) {
   5628		if (msgs[i].flags & I2C_M_RD || !(msgs[i].flags & I2C_M_STOP) ||
   5629		    msgs[i].len > 0xff)
   5630			return false;
   5631	}
   5632
   5633	return !(msgs[num - 1].flags & I2C_M_RD) && msgs[num - 1].len <= 0xff;
   5634}
   5635
   5636static int drm_dp_mst_i2c_read(struct drm_dp_mst_branch *mstb,
   5637			       struct drm_dp_mst_port *port,
   5638			       struct i2c_msg *msgs, int num)
   5639{
   5640	struct drm_dp_mst_topology_mgr *mgr = port->mgr;
   5641	unsigned int i;
   5642	struct drm_dp_sideband_msg_req_body msg;
   5643	struct drm_dp_sideband_msg_tx *txmsg = NULL;
   5644	int ret;
   5645
   5646	memset(&msg, 0, sizeof(msg));
   5647	msg.req_type = DP_REMOTE_I2C_READ;
   5648	msg.u.i2c_read.num_transactions = num - 1;
   5649	msg.u.i2c_read.port_number = port->port_num;
   5650	for (i = 0; i < num - 1; i++) {
   5651		msg.u.i2c_read.transactions[i].i2c_dev_id = msgs[i].addr;
   5652		msg.u.i2c_read.transactions[i].num_bytes = msgs[i].len;
   5653		msg.u.i2c_read.transactions[i].bytes = msgs[i].buf;
   5654		msg.u.i2c_read.transactions[i].no_stop_bit = !(msgs[i].flags & I2C_M_STOP);
   5655	}
   5656	msg.u.i2c_read.read_i2c_device_id = msgs[num - 1].addr;
   5657	msg.u.i2c_read.num_bytes_read = msgs[num - 1].len;
   5658
   5659	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
   5660	if (!txmsg) {
   5661		ret = -ENOMEM;
   5662		goto out;
   5663	}
   5664
   5665	txmsg->dst = mstb;
   5666	drm_dp_encode_sideband_req(&msg, txmsg);
   5667
   5668	drm_dp_queue_down_tx(mgr, txmsg);
   5669
   5670	ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
   5671	if (ret > 0) {
   5672
   5673		if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
   5674			ret = -EREMOTEIO;
   5675			goto out;
   5676		}
   5677		if (txmsg->reply.u.remote_i2c_read_ack.num_bytes != msgs[num - 1].len) {
   5678			ret = -EIO;
   5679			goto out;
   5680		}
   5681		memcpy(msgs[num - 1].buf, txmsg->reply.u.remote_i2c_read_ack.bytes, msgs[num - 1].len);
   5682		ret = num;
   5683	}
   5684out:
   5685	kfree(txmsg);
   5686	return ret;
   5687}
   5688
   5689static int drm_dp_mst_i2c_write(struct drm_dp_mst_branch *mstb,
   5690				struct drm_dp_mst_port *port,
   5691				struct i2c_msg *msgs, int num)
   5692{
   5693	struct drm_dp_mst_topology_mgr *mgr = port->mgr;
   5694	unsigned int i;
   5695	struct drm_dp_sideband_msg_req_body msg;
   5696	struct drm_dp_sideband_msg_tx *txmsg = NULL;
   5697	int ret;
   5698
   5699	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
   5700	if (!txmsg) {
   5701		ret = -ENOMEM;
   5702		goto out;
   5703	}
   5704	for (i = 0; i < num; i++) {
   5705		memset(&msg, 0, sizeof(msg));
   5706		msg.req_type = DP_REMOTE_I2C_WRITE;
   5707		msg.u.i2c_write.port_number = port->port_num;
   5708		msg.u.i2c_write.write_i2c_device_id = msgs[i].addr;
   5709		msg.u.i2c_write.num_bytes = msgs[i].len;
   5710		msg.u.i2c_write.bytes = msgs[i].buf;
   5711
   5712		memset(txmsg, 0, sizeof(*txmsg));
   5713		txmsg->dst = mstb;
   5714
   5715		drm_dp_encode_sideband_req(&msg, txmsg);
   5716		drm_dp_queue_down_tx(mgr, txmsg);
   5717
   5718		ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
   5719		if (ret > 0) {
   5720			if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
   5721				ret = -EREMOTEIO;
   5722				goto out;
   5723			}
   5724		} else {
   5725			goto out;
   5726		}
   5727	}
   5728	ret = num;
   5729out:
   5730	kfree(txmsg);
   5731	return ret;
   5732}
   5733
   5734/* I2C device */
   5735static int drm_dp_mst_i2c_xfer(struct i2c_adapter *adapter,
   5736			       struct i2c_msg *msgs, int num)
   5737{
   5738	struct drm_dp_aux *aux = adapter->algo_data;
   5739	struct drm_dp_mst_port *port =
   5740		container_of(aux, struct drm_dp_mst_port, aux);
   5741	struct drm_dp_mst_branch *mstb;
   5742	struct drm_dp_mst_topology_mgr *mgr = port->mgr;
   5743	int ret;
   5744
   5745	mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);
   5746	if (!mstb)
   5747		return -EREMOTEIO;
   5748
   5749	if (remote_i2c_read_ok(msgs, num)) {
   5750		ret = drm_dp_mst_i2c_read(mstb, port, msgs, num);
   5751	} else if (remote_i2c_write_ok(msgs, num)) {
   5752		ret = drm_dp_mst_i2c_write(mstb, port, msgs, num);
   5753	} else {
   5754		drm_dbg_kms(mgr->dev, "Unsupported I2C transaction for MST device\n");
   5755		ret = -EIO;
   5756	}
   5757
   5758	drm_dp_mst_topology_put_mstb(mstb);
   5759	return ret;
   5760}
   5761
   5762static u32 drm_dp_mst_i2c_functionality(struct i2c_adapter *adapter)
   5763{
   5764	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL |
   5765	       I2C_FUNC_SMBUS_READ_BLOCK_DATA |
   5766	       I2C_FUNC_SMBUS_BLOCK_PROC_CALL |
   5767	       I2C_FUNC_10BIT_ADDR;
   5768}
   5769
   5770static const struct i2c_algorithm drm_dp_mst_i2c_algo = {
   5771	.functionality = drm_dp_mst_i2c_functionality,
   5772	.master_xfer = drm_dp_mst_i2c_xfer,
   5773};
   5774
   5775/**
   5776 * drm_dp_mst_register_i2c_bus() - register an I2C adapter for I2C-over-AUX
   5777 * @port: The port to add the I2C bus on
   5778 *
   5779 * Returns 0 on success or a negative error code on failure.
   5780 */
   5781static int drm_dp_mst_register_i2c_bus(struct drm_dp_mst_port *port)
   5782{
   5783	struct drm_dp_aux *aux = &port->aux;
   5784	struct device *parent_dev = port->mgr->dev->dev;
   5785
   5786	aux->ddc.algo = &drm_dp_mst_i2c_algo;
   5787	aux->ddc.algo_data = aux;
   5788	aux->ddc.retries = 3;
   5789
   5790	aux->ddc.class = I2C_CLASS_DDC;
   5791	aux->ddc.owner = THIS_MODULE;
   5792	/* FIXME: set the kdev of the port's connector as parent */
   5793	aux->ddc.dev.parent = parent_dev;
   5794	aux->ddc.dev.of_node = parent_dev->of_node;
   5795
   5796	strlcpy(aux->ddc.name, aux->name ? aux->name : dev_name(parent_dev),
   5797		sizeof(aux->ddc.name));
   5798
   5799	return i2c_add_adapter(&aux->ddc);
   5800}
   5801
   5802/**
   5803 * drm_dp_mst_unregister_i2c_bus() - unregister an I2C-over-AUX adapter
   5804 * @port: The port to remove the I2C bus from
   5805 */
   5806static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_mst_port *port)
   5807{
   5808	i2c_del_adapter(&port->aux.ddc);
   5809}
   5810
   5811/**
   5812 * drm_dp_mst_is_virtual_dpcd() - Is the given port a virtual DP Peer Device
   5813 * @port: The port to check
   5814 *
   5815 * A single physical MST hub object can be represented in the topology
   5816 * by multiple branches, with virtual ports between those branches.
   5817 *
   5818 * As of DP1.4, An MST hub with internal (virtual) ports must expose
   5819 * certain DPCD registers over those ports. See sections 2.6.1.1.1
   5820 * and 2.6.1.1.2 of Display Port specification v1.4 for details.
   5821 *
   5822 * May acquire mgr->lock
   5823 *
   5824 * Returns:
   5825 * true if the port is a virtual DP peer device, false otherwise
   5826 */
   5827static bool drm_dp_mst_is_virtual_dpcd(struct drm_dp_mst_port *port)
   5828{
   5829	struct drm_dp_mst_port *downstream_port;
   5830
   5831	if (!port || port->dpcd_rev < DP_DPCD_REV_14)
   5832		return false;
   5833
   5834	/* Virtual DP Sink (Internal Display Panel) */
   5835	if (port->port_num >= 8)
   5836		return true;
   5837
   5838	/* DP-to-HDMI Protocol Converter */
   5839	if (port->pdt == DP_PEER_DEVICE_DP_LEGACY_CONV &&
   5840	    !port->mcs &&
   5841	    port->ldps)
   5842		return true;
   5843
   5844	/* DP-to-DP */
   5845	mutex_lock(&port->mgr->lock);
   5846	if (port->pdt == DP_PEER_DEVICE_MST_BRANCHING &&
   5847	    port->mstb &&
   5848	    port->mstb->num_ports == 2) {
   5849		list_for_each_entry(downstream_port, &port->mstb->ports, next) {
   5850			if (downstream_port->pdt == DP_PEER_DEVICE_SST_SINK &&
   5851			    !downstream_port->input) {
   5852				mutex_unlock(&port->mgr->lock);
   5853				return true;
   5854			}
   5855		}
   5856	}
   5857	mutex_unlock(&port->mgr->lock);
   5858
   5859	return false;
   5860}
   5861
   5862/**
   5863 * drm_dp_mst_dsc_aux_for_port() - Find the correct aux for DSC
   5864 * @port: The port to check. A leaf of the MST tree with an attached display.
   5865 *
   5866 * Depending on the situation, DSC may be enabled via the endpoint aux,
   5867 * the immediately upstream aux, or the connector's physical aux.
   5868 *
   5869 * This is both the correct aux to read DSC_CAPABILITY and the
   5870 * correct aux to write DSC_ENABLED.
   5871 *
   5872 * This operation can be expensive (up to four aux reads), so
   5873 * the caller should cache the return.
   5874 *
   5875 * Returns:
   5876 * NULL if DSC cannot be enabled on this port, otherwise the aux device
   5877 */
   5878struct drm_dp_aux *drm_dp_mst_dsc_aux_for_port(struct drm_dp_mst_port *port)
   5879{
   5880	struct drm_dp_mst_port *immediate_upstream_port;
   5881	struct drm_dp_mst_port *fec_port;
   5882	struct drm_dp_desc desc = {};
   5883	u8 endpoint_fec;
   5884	u8 endpoint_dsc;
   5885
   5886	if (!port)
   5887		return NULL;
   5888
   5889	if (port->parent->port_parent)
   5890		immediate_upstream_port = port->parent->port_parent;
   5891	else
   5892		immediate_upstream_port = NULL;
   5893
   5894	fec_port = immediate_upstream_port;
   5895	while (fec_port) {
   5896		/*
   5897		 * Each physical link (i.e. not a virtual port) between the
   5898		 * output and the primary device must support FEC
   5899		 */
   5900		if (!drm_dp_mst_is_virtual_dpcd(fec_port) &&
   5901		    !fec_port->fec_capable)
   5902			return NULL;
   5903
   5904		fec_port = fec_port->parent->port_parent;
   5905	}
   5906
   5907	/* DP-to-DP peer device */
   5908	if (drm_dp_mst_is_virtual_dpcd(immediate_upstream_port)) {
   5909		u8 upstream_dsc;
   5910
   5911		if (drm_dp_dpcd_read(&port->aux,
   5912				     DP_DSC_SUPPORT, &endpoint_dsc, 1) != 1)
   5913			return NULL;
   5914		if (drm_dp_dpcd_read(&port->aux,
   5915				     DP_FEC_CAPABILITY, &endpoint_fec, 1) != 1)
   5916			return NULL;
   5917		if (drm_dp_dpcd_read(&immediate_upstream_port->aux,
   5918				     DP_DSC_SUPPORT, &upstream_dsc, 1) != 1)
   5919			return NULL;
   5920
   5921		/* Enpoint decompression with DP-to-DP peer device */
   5922		if ((endpoint_dsc & DP_DSC_DECOMPRESSION_IS_SUPPORTED) &&
   5923		    (endpoint_fec & DP_FEC_CAPABLE) &&
   5924		    (upstream_dsc & 0x2) /* DSC passthrough */)
   5925			return &port->aux;
   5926
   5927		/* Virtual DPCD decompression with DP-to-DP peer device */
   5928		return &immediate_upstream_port->aux;
   5929	}
   5930
   5931	/* Virtual DPCD decompression with DP-to-HDMI or Virtual DP Sink */
   5932	if (drm_dp_mst_is_virtual_dpcd(port))
   5933		return &port->aux;
   5934
   5935	/*
   5936	 * Synaptics quirk
   5937	 * Applies to ports for which:
   5938	 * - Physical aux has Synaptics OUI
   5939	 * - DPv1.4 or higher
   5940	 * - Port is on primary branch device
   5941	 * - Not a VGA adapter (DP_DWN_STRM_PORT_TYPE_ANALOG)
   5942	 */
   5943	if (drm_dp_read_desc(port->mgr->aux, &desc, true))
   5944		return NULL;
   5945
   5946	if (drm_dp_has_quirk(&desc, DP_DPCD_QUIRK_DSC_WITHOUT_VIRTUAL_DPCD) &&
   5947	    port->mgr->dpcd[DP_DPCD_REV] >= DP_DPCD_REV_14 &&
   5948	    port->parent == port->mgr->mst_primary) {
   5949		u8 dpcd_ext[DP_RECEIVER_CAP_SIZE];
   5950
   5951		if (drm_dp_read_dpcd_caps(port->mgr->aux, dpcd_ext) < 0)
   5952			return NULL;
   5953
   5954		if ((dpcd_ext[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT) &&
   5955		    ((dpcd_ext[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_TYPE_MASK)
   5956		     != DP_DWN_STRM_PORT_TYPE_ANALOG))
   5957			return port->mgr->aux;
   5958	}
   5959
   5960	/*
   5961	 * The check below verifies if the MST sink
   5962	 * connected to the GPU is capable of DSC -
   5963	 * therefore the endpoint needs to be
   5964	 * both DSC and FEC capable.
   5965	 */
   5966	if (drm_dp_dpcd_read(&port->aux,
   5967	   DP_DSC_SUPPORT, &endpoint_dsc, 1) != 1)
   5968		return NULL;
   5969	if (drm_dp_dpcd_read(&port->aux,
   5970	   DP_FEC_CAPABILITY, &endpoint_fec, 1) != 1)
   5971		return NULL;
   5972	if ((endpoint_dsc & DP_DSC_DECOMPRESSION_IS_SUPPORTED) &&
   5973	   (endpoint_fec & DP_FEC_CAPABLE))
   5974		return &port->aux;
   5975
   5976	return NULL;
   5977}
   5978EXPORT_SYMBOL(drm_dp_mst_dsc_aux_for_port);