cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

intel_hdcp.c (73699B)


      1/* SPDX-License-Identifier: MIT */
      2/*
      3 * Copyright (C) 2017 Google, Inc.
      4 * Copyright _ 2017-2019, Intel Corporation.
      5 *
      6 * Authors:
      7 * Sean Paul <seanpaul@chromium.org>
      8 * Ramalingam C <ramalingam.c@intel.com>
      9 */
     10
     11#include <linux/component.h>
     12#include <linux/i2c.h>
     13#include <linux/random.h>
     14
     15#include <drm/display/drm_hdcp_helper.h>
     16#include <drm/i915_component.h>
     17
     18#include "i915_drv.h"
     19#include "i915_reg.h"
     20#include "intel_connector.h"
     21#include "intel_de.h"
     22#include "intel_display_power.h"
     23#include "intel_display_power_well.h"
     24#include "intel_display_types.h"
     25#include "intel_hdcp.h"
     26#include "intel_pcode.h"
     27
     28#define KEY_LOAD_TRIES	5
     29#define HDCP2_LC_RETRY_CNT			3
     30
     31static int intel_conn_to_vcpi(struct intel_connector *connector)
     32{
     33	/* For HDMI this is forced to be 0x0. For DP SST also this is 0x0. */
     34	return connector->port	? connector->port->vcpi.vcpi : 0;
     35}
     36
     37/*
     38 * intel_hdcp_required_content_stream selects the most highest common possible HDCP
     39 * content_type for all streams in DP MST topology because security f/w doesn't
     40 * have any provision to mark content_type for each stream separately, it marks
     41 * all available streams with the content_type proivided at the time of port
     42 * authentication. This may prohibit the userspace to use type1 content on
     43 * HDCP 2.2 capable sink because of other sink are not capable of HDCP 2.2 in
     44 * DP MST topology. Though it is not compulsory, security fw should change its
     45 * policy to mark different content_types for different streams.
     46 */
     47static int
     48intel_hdcp_required_content_stream(struct intel_digital_port *dig_port)
     49{
     50	struct drm_connector_list_iter conn_iter;
     51	struct intel_digital_port *conn_dig_port;
     52	struct intel_connector *connector;
     53	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
     54	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
     55	bool enforce_type0 = false;
     56	int k;
     57
     58	data->k = 0;
     59
     60	if (dig_port->hdcp_auth_status)
     61		return 0;
     62
     63	drm_connector_list_iter_begin(&i915->drm, &conn_iter);
     64	for_each_intel_connector_iter(connector, &conn_iter) {
     65		if (connector->base.status == connector_status_disconnected)
     66			continue;
     67
     68		if (!intel_encoder_is_mst(intel_attached_encoder(connector)))
     69			continue;
     70
     71		conn_dig_port = intel_attached_dig_port(connector);
     72		if (conn_dig_port != dig_port)
     73			continue;
     74
     75		if (!enforce_type0 && !dig_port->hdcp_mst_type1_capable)
     76			enforce_type0 = true;
     77
     78		data->streams[data->k].stream_id = intel_conn_to_vcpi(connector);
     79		data->k++;
     80
     81		/* if there is only one active stream */
     82		if (dig_port->dp.active_mst_links <= 1)
     83			break;
     84	}
     85	drm_connector_list_iter_end(&conn_iter);
     86
     87	if (drm_WARN_ON(&i915->drm, data->k > INTEL_NUM_PIPES(i915) || data->k == 0))
     88		return -EINVAL;
     89
     90	/*
     91	 * Apply common protection level across all streams in DP MST Topology.
     92	 * Use highest supported content type for all streams in DP MST Topology.
     93	 */
     94	for (k = 0; k < data->k; k++)
     95		data->streams[k].stream_type =
     96			enforce_type0 ? DRM_MODE_HDCP_CONTENT_TYPE0 : DRM_MODE_HDCP_CONTENT_TYPE1;
     97
     98	return 0;
     99}
    100
    101static int intel_hdcp_prepare_streams(struct intel_connector *connector)
    102{
    103	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
    104	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
    105	struct intel_hdcp *hdcp = &connector->hdcp;
    106	int ret;
    107
    108	if (!intel_encoder_is_mst(intel_attached_encoder(connector))) {
    109		data->k = 1;
    110		data->streams[0].stream_type = hdcp->content_type;
    111	} else {
    112		ret = intel_hdcp_required_content_stream(dig_port);
    113		if (ret)
    114			return ret;
    115	}
    116
    117	return 0;
    118}
    119
    120static
    121bool intel_hdcp_is_ksv_valid(u8 *ksv)
    122{
    123	int i, ones = 0;
    124	/* KSV has 20 1's and 20 0's */
    125	for (i = 0; i < DRM_HDCP_KSV_LEN; i++)
    126		ones += hweight8(ksv[i]);
    127	if (ones != 20)
    128		return false;
    129
    130	return true;
    131}
    132
    133static
    134int intel_hdcp_read_valid_bksv(struct intel_digital_port *dig_port,
    135			       const struct intel_hdcp_shim *shim, u8 *bksv)
    136{
    137	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
    138	int ret, i, tries = 2;
    139
    140	/* HDCP spec states that we must retry the bksv if it is invalid */
    141	for (i = 0; i < tries; i++) {
    142		ret = shim->read_bksv(dig_port, bksv);
    143		if (ret)
    144			return ret;
    145		if (intel_hdcp_is_ksv_valid(bksv))
    146			break;
    147	}
    148	if (i == tries) {
    149		drm_dbg_kms(&i915->drm, "Bksv is invalid\n");
    150		return -ENODEV;
    151	}
    152
    153	return 0;
    154}
    155
    156/* Is HDCP1.4 capable on Platform and Sink */
    157bool intel_hdcp_capable(struct intel_connector *connector)
    158{
    159	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
    160	const struct intel_hdcp_shim *shim = connector->hdcp.shim;
    161	bool capable = false;
    162	u8 bksv[5];
    163
    164	if (!shim)
    165		return capable;
    166
    167	if (shim->hdcp_capable) {
    168		shim->hdcp_capable(dig_port, &capable);
    169	} else {
    170		if (!intel_hdcp_read_valid_bksv(dig_port, shim, bksv))
    171			capable = true;
    172	}
    173
    174	return capable;
    175}
    176
    177/* Is HDCP2.2 capable on Platform and Sink */
    178bool intel_hdcp2_capable(struct intel_connector *connector)
    179{
    180	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
    181	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
    182	struct intel_hdcp *hdcp = &connector->hdcp;
    183	bool capable = false;
    184
    185	/* I915 support for HDCP2.2 */
    186	if (!hdcp->hdcp2_supported)
    187		return false;
    188
    189	/* MEI interface is solid */
    190	mutex_lock(&dev_priv->hdcp_comp_mutex);
    191	if (!dev_priv->hdcp_comp_added ||  !dev_priv->hdcp_master) {
    192		mutex_unlock(&dev_priv->hdcp_comp_mutex);
    193		return false;
    194	}
    195	mutex_unlock(&dev_priv->hdcp_comp_mutex);
    196
    197	/* Sink's capability for HDCP2.2 */
    198	hdcp->shim->hdcp_2_2_capable(dig_port, &capable);
    199
    200	return capable;
    201}
    202
    203static bool intel_hdcp_in_use(struct drm_i915_private *dev_priv,
    204			      enum transcoder cpu_transcoder, enum port port)
    205{
    206	return intel_de_read(dev_priv,
    207	                     HDCP_STATUS(dev_priv, cpu_transcoder, port)) &
    208	       HDCP_STATUS_ENC;
    209}
    210
    211static bool intel_hdcp2_in_use(struct drm_i915_private *dev_priv,
    212			       enum transcoder cpu_transcoder, enum port port)
    213{
    214	return intel_de_read(dev_priv,
    215	                     HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
    216	       LINK_ENCRYPTION_STATUS;
    217}
    218
    219static int intel_hdcp_poll_ksv_fifo(struct intel_digital_port *dig_port,
    220				    const struct intel_hdcp_shim *shim)
    221{
    222	int ret, read_ret;
    223	bool ksv_ready;
    224
    225	/* Poll for ksv list ready (spec says max time allowed is 5s) */
    226	ret = __wait_for(read_ret = shim->read_ksv_ready(dig_port,
    227							 &ksv_ready),
    228			 read_ret || ksv_ready, 5 * 1000 * 1000, 1000,
    229			 100 * 1000);
    230	if (ret)
    231		return ret;
    232	if (read_ret)
    233		return read_ret;
    234	if (!ksv_ready)
    235		return -ETIMEDOUT;
    236
    237	return 0;
    238}
    239
    240static bool hdcp_key_loadable(struct drm_i915_private *dev_priv)
    241{
    242	enum i915_power_well_id id;
    243	intel_wakeref_t wakeref;
    244	bool enabled = false;
    245
    246	/*
    247	 * On HSW and BDW, Display HW loads the Key as soon as Display resumes.
    248	 * On all BXT+, SW can load the keys only when the PW#1 is turned on.
    249	 */
    250	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
    251		id = HSW_DISP_PW_GLOBAL;
    252	else
    253		id = SKL_DISP_PW_1;
    254
    255	/* PG1 (power well #1) needs to be enabled */
    256	with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref)
    257		enabled = intel_display_power_well_is_enabled(dev_priv, id);
    258
    259	/*
    260	 * Another req for hdcp key loadability is enabled state of pll for
    261	 * cdclk. Without active crtc we wont land here. So we are assuming that
    262	 * cdclk is already on.
    263	 */
    264
    265	return enabled;
    266}
    267
    268static void intel_hdcp_clear_keys(struct drm_i915_private *dev_priv)
    269{
    270	intel_de_write(dev_priv, HDCP_KEY_CONF, HDCP_CLEAR_KEYS_TRIGGER);
    271	intel_de_write(dev_priv, HDCP_KEY_STATUS,
    272		       HDCP_KEY_LOAD_DONE | HDCP_KEY_LOAD_STATUS | HDCP_FUSE_IN_PROGRESS | HDCP_FUSE_ERROR | HDCP_FUSE_DONE);
    273}
    274
    275static int intel_hdcp_load_keys(struct drm_i915_private *dev_priv)
    276{
    277	int ret;
    278	u32 val;
    279
    280	val = intel_de_read(dev_priv, HDCP_KEY_STATUS);
    281	if ((val & HDCP_KEY_LOAD_DONE) && (val & HDCP_KEY_LOAD_STATUS))
    282		return 0;
    283
    284	/*
    285	 * On HSW and BDW HW loads the HDCP1.4 Key when Display comes
    286	 * out of reset. So if Key is not already loaded, its an error state.
    287	 */
    288	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
    289		if (!(intel_de_read(dev_priv, HDCP_KEY_STATUS) & HDCP_KEY_LOAD_DONE))
    290			return -ENXIO;
    291
    292	/*
    293	 * Initiate loading the HDCP key from fuses.
    294	 *
    295	 * BXT+ platforms, HDCP key needs to be loaded by SW. Only display
    296	 * version 9 platforms (minus BXT) differ in the key load trigger
    297	 * process from other platforms. These platforms use the GT Driver
    298	 * Mailbox interface.
    299	 */
    300	if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv)) {
    301		ret = snb_pcode_write(dev_priv, SKL_PCODE_LOAD_HDCP_KEYS, 1);
    302		if (ret) {
    303			drm_err(&dev_priv->drm,
    304				"Failed to initiate HDCP key load (%d)\n",
    305				ret);
    306			return ret;
    307		}
    308	} else {
    309		intel_de_write(dev_priv, HDCP_KEY_CONF, HDCP_KEY_LOAD_TRIGGER);
    310	}
    311
    312	/* Wait for the keys to load (500us) */
    313	ret = __intel_wait_for_register(&dev_priv->uncore, HDCP_KEY_STATUS,
    314					HDCP_KEY_LOAD_DONE, HDCP_KEY_LOAD_DONE,
    315					10, 1, &val);
    316	if (ret)
    317		return ret;
    318	else if (!(val & HDCP_KEY_LOAD_STATUS))
    319		return -ENXIO;
    320
    321	/* Send Aksv over to PCH display for use in authentication */
    322	intel_de_write(dev_priv, HDCP_KEY_CONF, HDCP_AKSV_SEND_TRIGGER);
    323
    324	return 0;
    325}
    326
    327/* Returns updated SHA-1 index */
    328static int intel_write_sha_text(struct drm_i915_private *dev_priv, u32 sha_text)
    329{
    330	intel_de_write(dev_priv, HDCP_SHA_TEXT, sha_text);
    331	if (intel_de_wait_for_set(dev_priv, HDCP_REP_CTL, HDCP_SHA1_READY, 1)) {
    332		drm_err(&dev_priv->drm, "Timed out waiting for SHA1 ready\n");
    333		return -ETIMEDOUT;
    334	}
    335	return 0;
    336}
    337
    338static
    339u32 intel_hdcp_get_repeater_ctl(struct drm_i915_private *dev_priv,
    340				enum transcoder cpu_transcoder, enum port port)
    341{
    342	if (DISPLAY_VER(dev_priv) >= 12) {
    343		switch (cpu_transcoder) {
    344		case TRANSCODER_A:
    345			return HDCP_TRANSA_REP_PRESENT |
    346			       HDCP_TRANSA_SHA1_M0;
    347		case TRANSCODER_B:
    348			return HDCP_TRANSB_REP_PRESENT |
    349			       HDCP_TRANSB_SHA1_M0;
    350		case TRANSCODER_C:
    351			return HDCP_TRANSC_REP_PRESENT |
    352			       HDCP_TRANSC_SHA1_M0;
    353		case TRANSCODER_D:
    354			return HDCP_TRANSD_REP_PRESENT |
    355			       HDCP_TRANSD_SHA1_M0;
    356		default:
    357			drm_err(&dev_priv->drm, "Unknown transcoder %d\n",
    358				cpu_transcoder);
    359			return -EINVAL;
    360		}
    361	}
    362
    363	switch (port) {
    364	case PORT_A:
    365		return HDCP_DDIA_REP_PRESENT | HDCP_DDIA_SHA1_M0;
    366	case PORT_B:
    367		return HDCP_DDIB_REP_PRESENT | HDCP_DDIB_SHA1_M0;
    368	case PORT_C:
    369		return HDCP_DDIC_REP_PRESENT | HDCP_DDIC_SHA1_M0;
    370	case PORT_D:
    371		return HDCP_DDID_REP_PRESENT | HDCP_DDID_SHA1_M0;
    372	case PORT_E:
    373		return HDCP_DDIE_REP_PRESENT | HDCP_DDIE_SHA1_M0;
    374	default:
    375		drm_err(&dev_priv->drm, "Unknown port %d\n", port);
    376		return -EINVAL;
    377	}
    378}
    379
    380static
    381int intel_hdcp_validate_v_prime(struct intel_connector *connector,
    382				const struct intel_hdcp_shim *shim,
    383				u8 *ksv_fifo, u8 num_downstream, u8 *bstatus)
    384{
    385	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
    386	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
    387	enum transcoder cpu_transcoder = connector->hdcp.cpu_transcoder;
    388	enum port port = dig_port->base.port;
    389	u32 vprime, sha_text, sha_leftovers, rep_ctl;
    390	int ret, i, j, sha_idx;
    391
    392	/* Process V' values from the receiver */
    393	for (i = 0; i < DRM_HDCP_V_PRIME_NUM_PARTS; i++) {
    394		ret = shim->read_v_prime_part(dig_port, i, &vprime);
    395		if (ret)
    396			return ret;
    397		intel_de_write(dev_priv, HDCP_SHA_V_PRIME(i), vprime);
    398	}
    399
    400	/*
    401	 * We need to write the concatenation of all device KSVs, BINFO (DP) ||
    402	 * BSTATUS (HDMI), and M0 (which is added via HDCP_REP_CTL). This byte
    403	 * stream is written via the HDCP_SHA_TEXT register in 32-bit
    404	 * increments. Every 64 bytes, we need to write HDCP_REP_CTL again. This
    405	 * index will keep track of our progress through the 64 bytes as well as
    406	 * helping us work the 40-bit KSVs through our 32-bit register.
    407	 *
    408	 * NOTE: data passed via HDCP_SHA_TEXT should be big-endian
    409	 */
    410	sha_idx = 0;
    411	sha_text = 0;
    412	sha_leftovers = 0;
    413	rep_ctl = intel_hdcp_get_repeater_ctl(dev_priv, cpu_transcoder, port);
    414	intel_de_write(dev_priv, HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
    415	for (i = 0; i < num_downstream; i++) {
    416		unsigned int sha_empty;
    417		u8 *ksv = &ksv_fifo[i * DRM_HDCP_KSV_LEN];
    418
    419		/* Fill up the empty slots in sha_text and write it out */
    420		sha_empty = sizeof(sha_text) - sha_leftovers;
    421		for (j = 0; j < sha_empty; j++) {
    422			u8 off = ((sizeof(sha_text) - j - 1 - sha_leftovers) * 8);
    423			sha_text |= ksv[j] << off;
    424		}
    425
    426		ret = intel_write_sha_text(dev_priv, sha_text);
    427		if (ret < 0)
    428			return ret;
    429
    430		/* Programming guide writes this every 64 bytes */
    431		sha_idx += sizeof(sha_text);
    432		if (!(sha_idx % 64))
    433			intel_de_write(dev_priv, HDCP_REP_CTL,
    434				       rep_ctl | HDCP_SHA1_TEXT_32);
    435
    436		/* Store the leftover bytes from the ksv in sha_text */
    437		sha_leftovers = DRM_HDCP_KSV_LEN - sha_empty;
    438		sha_text = 0;
    439		for (j = 0; j < sha_leftovers; j++)
    440			sha_text |= ksv[sha_empty + j] <<
    441					((sizeof(sha_text) - j - 1) * 8);
    442
    443		/*
    444		 * If we still have room in sha_text for more data, continue.
    445		 * Otherwise, write it out immediately.
    446		 */
    447		if (sizeof(sha_text) > sha_leftovers)
    448			continue;
    449
    450		ret = intel_write_sha_text(dev_priv, sha_text);
    451		if (ret < 0)
    452			return ret;
    453		sha_leftovers = 0;
    454		sha_text = 0;
    455		sha_idx += sizeof(sha_text);
    456	}
    457
    458	/*
    459	 * We need to write BINFO/BSTATUS, and M0 now. Depending on how many
    460	 * bytes are leftover from the last ksv, we might be able to fit them
    461	 * all in sha_text (first 2 cases), or we might need to split them up
    462	 * into 2 writes (last 2 cases).
    463	 */
    464	if (sha_leftovers == 0) {
    465		/* Write 16 bits of text, 16 bits of M0 */
    466		intel_de_write(dev_priv, HDCP_REP_CTL,
    467			       rep_ctl | HDCP_SHA1_TEXT_16);
    468		ret = intel_write_sha_text(dev_priv,
    469					   bstatus[0] << 8 | bstatus[1]);
    470		if (ret < 0)
    471			return ret;
    472		sha_idx += sizeof(sha_text);
    473
    474		/* Write 32 bits of M0 */
    475		intel_de_write(dev_priv, HDCP_REP_CTL,
    476			       rep_ctl | HDCP_SHA1_TEXT_0);
    477		ret = intel_write_sha_text(dev_priv, 0);
    478		if (ret < 0)
    479			return ret;
    480		sha_idx += sizeof(sha_text);
    481
    482		/* Write 16 bits of M0 */
    483		intel_de_write(dev_priv, HDCP_REP_CTL,
    484			       rep_ctl | HDCP_SHA1_TEXT_16);
    485		ret = intel_write_sha_text(dev_priv, 0);
    486		if (ret < 0)
    487			return ret;
    488		sha_idx += sizeof(sha_text);
    489
    490	} else if (sha_leftovers == 1) {
    491		/* Write 24 bits of text, 8 bits of M0 */
    492		intel_de_write(dev_priv, HDCP_REP_CTL,
    493			       rep_ctl | HDCP_SHA1_TEXT_24);
    494		sha_text |= bstatus[0] << 16 | bstatus[1] << 8;
    495		/* Only 24-bits of data, must be in the LSB */
    496		sha_text = (sha_text & 0xffffff00) >> 8;
    497		ret = intel_write_sha_text(dev_priv, sha_text);
    498		if (ret < 0)
    499			return ret;
    500		sha_idx += sizeof(sha_text);
    501
    502		/* Write 32 bits of M0 */
    503		intel_de_write(dev_priv, HDCP_REP_CTL,
    504			       rep_ctl | HDCP_SHA1_TEXT_0);
    505		ret = intel_write_sha_text(dev_priv, 0);
    506		if (ret < 0)
    507			return ret;
    508		sha_idx += sizeof(sha_text);
    509
    510		/* Write 24 bits of M0 */
    511		intel_de_write(dev_priv, HDCP_REP_CTL,
    512			       rep_ctl | HDCP_SHA1_TEXT_8);
    513		ret = intel_write_sha_text(dev_priv, 0);
    514		if (ret < 0)
    515			return ret;
    516		sha_idx += sizeof(sha_text);
    517
    518	} else if (sha_leftovers == 2) {
    519		/* Write 32 bits of text */
    520		intel_de_write(dev_priv, HDCP_REP_CTL,
    521			       rep_ctl | HDCP_SHA1_TEXT_32);
    522		sha_text |= bstatus[0] << 8 | bstatus[1];
    523		ret = intel_write_sha_text(dev_priv, sha_text);
    524		if (ret < 0)
    525			return ret;
    526		sha_idx += sizeof(sha_text);
    527
    528		/* Write 64 bits of M0 */
    529		intel_de_write(dev_priv, HDCP_REP_CTL,
    530			       rep_ctl | HDCP_SHA1_TEXT_0);
    531		for (i = 0; i < 2; i++) {
    532			ret = intel_write_sha_text(dev_priv, 0);
    533			if (ret < 0)
    534				return ret;
    535			sha_idx += sizeof(sha_text);
    536		}
    537
    538		/*
    539		 * Terminate the SHA-1 stream by hand. For the other leftover
    540		 * cases this is appended by the hardware.
    541		 */
    542		intel_de_write(dev_priv, HDCP_REP_CTL,
    543			       rep_ctl | HDCP_SHA1_TEXT_32);
    544		sha_text = DRM_HDCP_SHA1_TERMINATOR << 24;
    545		ret = intel_write_sha_text(dev_priv, sha_text);
    546		if (ret < 0)
    547			return ret;
    548		sha_idx += sizeof(sha_text);
    549	} else if (sha_leftovers == 3) {
    550		/* Write 32 bits of text (filled from LSB) */
    551		intel_de_write(dev_priv, HDCP_REP_CTL,
    552			       rep_ctl | HDCP_SHA1_TEXT_32);
    553		sha_text |= bstatus[0];
    554		ret = intel_write_sha_text(dev_priv, sha_text);
    555		if (ret < 0)
    556			return ret;
    557		sha_idx += sizeof(sha_text);
    558
    559		/* Write 8 bits of text (filled from LSB), 24 bits of M0 */
    560		intel_de_write(dev_priv, HDCP_REP_CTL,
    561			       rep_ctl | HDCP_SHA1_TEXT_8);
    562		ret = intel_write_sha_text(dev_priv, bstatus[1]);
    563		if (ret < 0)
    564			return ret;
    565		sha_idx += sizeof(sha_text);
    566
    567		/* Write 32 bits of M0 */
    568		intel_de_write(dev_priv, HDCP_REP_CTL,
    569			       rep_ctl | HDCP_SHA1_TEXT_0);
    570		ret = intel_write_sha_text(dev_priv, 0);
    571		if (ret < 0)
    572			return ret;
    573		sha_idx += sizeof(sha_text);
    574
    575		/* Write 8 bits of M0 */
    576		intel_de_write(dev_priv, HDCP_REP_CTL,
    577			       rep_ctl | HDCP_SHA1_TEXT_24);
    578		ret = intel_write_sha_text(dev_priv, 0);
    579		if (ret < 0)
    580			return ret;
    581		sha_idx += sizeof(sha_text);
    582	} else {
    583		drm_dbg_kms(&dev_priv->drm, "Invalid number of leftovers %d\n",
    584			    sha_leftovers);
    585		return -EINVAL;
    586	}
    587
    588	intel_de_write(dev_priv, HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
    589	/* Fill up to 64-4 bytes with zeros (leave the last write for length) */
    590	while ((sha_idx % 64) < (64 - sizeof(sha_text))) {
    591		ret = intel_write_sha_text(dev_priv, 0);
    592		if (ret < 0)
    593			return ret;
    594		sha_idx += sizeof(sha_text);
    595	}
    596
    597	/*
    598	 * Last write gets the length of the concatenation in bits. That is:
    599	 *  - 5 bytes per device
    600	 *  - 10 bytes for BINFO/BSTATUS(2), M0(8)
    601	 */
    602	sha_text = (num_downstream * 5 + 10) * 8;
    603	ret = intel_write_sha_text(dev_priv, sha_text);
    604	if (ret < 0)
    605		return ret;
    606
    607	/* Tell the HW we're done with the hash and wait for it to ACK */
    608	intel_de_write(dev_priv, HDCP_REP_CTL,
    609		       rep_ctl | HDCP_SHA1_COMPLETE_HASH);
    610	if (intel_de_wait_for_set(dev_priv, HDCP_REP_CTL,
    611				  HDCP_SHA1_COMPLETE, 1)) {
    612		drm_err(&dev_priv->drm, "Timed out waiting for SHA1 complete\n");
    613		return -ETIMEDOUT;
    614	}
    615	if (!(intel_de_read(dev_priv, HDCP_REP_CTL) & HDCP_SHA1_V_MATCH)) {
    616		drm_dbg_kms(&dev_priv->drm, "SHA-1 mismatch, HDCP failed\n");
    617		return -ENXIO;
    618	}
    619
    620	return 0;
    621}
    622
    623/* Implements Part 2 of the HDCP authorization procedure */
    624static
    625int intel_hdcp_auth_downstream(struct intel_connector *connector)
    626{
    627	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
    628	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
    629	const struct intel_hdcp_shim *shim = connector->hdcp.shim;
    630	u8 bstatus[2], num_downstream, *ksv_fifo;
    631	int ret, i, tries = 3;
    632
    633	ret = intel_hdcp_poll_ksv_fifo(dig_port, shim);
    634	if (ret) {
    635		drm_dbg_kms(&dev_priv->drm,
    636			    "KSV list failed to become ready (%d)\n", ret);
    637		return ret;
    638	}
    639
    640	ret = shim->read_bstatus(dig_port, bstatus);
    641	if (ret)
    642		return ret;
    643
    644	if (DRM_HDCP_MAX_DEVICE_EXCEEDED(bstatus[0]) ||
    645	    DRM_HDCP_MAX_CASCADE_EXCEEDED(bstatus[1])) {
    646		drm_dbg_kms(&dev_priv->drm, "Max Topology Limit Exceeded\n");
    647		return -EPERM;
    648	}
    649
    650	/*
    651	 * When repeater reports 0 device count, HDCP1.4 spec allows disabling
    652	 * the HDCP encryption. That implies that repeater can't have its own
    653	 * display. As there is no consumption of encrypted content in the
    654	 * repeater with 0 downstream devices, we are failing the
    655	 * authentication.
    656	 */
    657	num_downstream = DRM_HDCP_NUM_DOWNSTREAM(bstatus[0]);
    658	if (num_downstream == 0) {
    659		drm_dbg_kms(&dev_priv->drm,
    660			    "Repeater with zero downstream devices\n");
    661		return -EINVAL;
    662	}
    663
    664	ksv_fifo = kcalloc(DRM_HDCP_KSV_LEN, num_downstream, GFP_KERNEL);
    665	if (!ksv_fifo) {
    666		drm_dbg_kms(&dev_priv->drm, "Out of mem: ksv_fifo\n");
    667		return -ENOMEM;
    668	}
    669
    670	ret = shim->read_ksv_fifo(dig_port, num_downstream, ksv_fifo);
    671	if (ret)
    672		goto err;
    673
    674	if (drm_hdcp_check_ksvs_revoked(&dev_priv->drm, ksv_fifo,
    675					num_downstream) > 0) {
    676		drm_err(&dev_priv->drm, "Revoked Ksv(s) in ksv_fifo\n");
    677		ret = -EPERM;
    678		goto err;
    679	}
    680
    681	/*
    682	 * When V prime mismatches, DP Spec mandates re-read of
    683	 * V prime atleast twice.
    684	 */
    685	for (i = 0; i < tries; i++) {
    686		ret = intel_hdcp_validate_v_prime(connector, shim,
    687						  ksv_fifo, num_downstream,
    688						  bstatus);
    689		if (!ret)
    690			break;
    691	}
    692
    693	if (i == tries) {
    694		drm_dbg_kms(&dev_priv->drm,
    695			    "V Prime validation failed.(%d)\n", ret);
    696		goto err;
    697	}
    698
    699	drm_dbg_kms(&dev_priv->drm, "HDCP is enabled (%d downstream devices)\n",
    700		    num_downstream);
    701	ret = 0;
    702err:
    703	kfree(ksv_fifo);
    704	return ret;
    705}
    706
    707/* Implements Part 1 of the HDCP authorization procedure */
    708static int intel_hdcp_auth(struct intel_connector *connector)
    709{
    710	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
    711	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
    712	struct intel_hdcp *hdcp = &connector->hdcp;
    713	const struct intel_hdcp_shim *shim = hdcp->shim;
    714	enum transcoder cpu_transcoder = connector->hdcp.cpu_transcoder;
    715	enum port port = dig_port->base.port;
    716	unsigned long r0_prime_gen_start;
    717	int ret, i, tries = 2;
    718	union {
    719		u32 reg[2];
    720		u8 shim[DRM_HDCP_AN_LEN];
    721	} an;
    722	union {
    723		u32 reg[2];
    724		u8 shim[DRM_HDCP_KSV_LEN];
    725	} bksv;
    726	union {
    727		u32 reg;
    728		u8 shim[DRM_HDCP_RI_LEN];
    729	} ri;
    730	bool repeater_present, hdcp_capable;
    731
    732	/*
    733	 * Detects whether the display is HDCP capable. Although we check for
    734	 * valid Bksv below, the HDCP over DP spec requires that we check
    735	 * whether the display supports HDCP before we write An. For HDMI
    736	 * displays, this is not necessary.
    737	 */
    738	if (shim->hdcp_capable) {
    739		ret = shim->hdcp_capable(dig_port, &hdcp_capable);
    740		if (ret)
    741			return ret;
    742		if (!hdcp_capable) {
    743			drm_dbg_kms(&dev_priv->drm,
    744				    "Panel is not HDCP capable\n");
    745			return -EINVAL;
    746		}
    747	}
    748
    749	/* Initialize An with 2 random values and acquire it */
    750	for (i = 0; i < 2; i++)
    751		intel_de_write(dev_priv,
    752			       HDCP_ANINIT(dev_priv, cpu_transcoder, port),
    753			       get_random_u32());
    754	intel_de_write(dev_priv, HDCP_CONF(dev_priv, cpu_transcoder, port),
    755		       HDCP_CONF_CAPTURE_AN);
    756
    757	/* Wait for An to be acquired */
    758	if (intel_de_wait_for_set(dev_priv,
    759				  HDCP_STATUS(dev_priv, cpu_transcoder, port),
    760				  HDCP_STATUS_AN_READY, 1)) {
    761		drm_err(&dev_priv->drm, "Timed out waiting for An\n");
    762		return -ETIMEDOUT;
    763	}
    764
    765	an.reg[0] = intel_de_read(dev_priv,
    766				  HDCP_ANLO(dev_priv, cpu_transcoder, port));
    767	an.reg[1] = intel_de_read(dev_priv,
    768				  HDCP_ANHI(dev_priv, cpu_transcoder, port));
    769	ret = shim->write_an_aksv(dig_port, an.shim);
    770	if (ret)
    771		return ret;
    772
    773	r0_prime_gen_start = jiffies;
    774
    775	memset(&bksv, 0, sizeof(bksv));
    776
    777	ret = intel_hdcp_read_valid_bksv(dig_port, shim, bksv.shim);
    778	if (ret < 0)
    779		return ret;
    780
    781	if (drm_hdcp_check_ksvs_revoked(&dev_priv->drm, bksv.shim, 1) > 0) {
    782		drm_err(&dev_priv->drm, "BKSV is revoked\n");
    783		return -EPERM;
    784	}
    785
    786	intel_de_write(dev_priv, HDCP_BKSVLO(dev_priv, cpu_transcoder, port),
    787		       bksv.reg[0]);
    788	intel_de_write(dev_priv, HDCP_BKSVHI(dev_priv, cpu_transcoder, port),
    789		       bksv.reg[1]);
    790
    791	ret = shim->repeater_present(dig_port, &repeater_present);
    792	if (ret)
    793		return ret;
    794	if (repeater_present)
    795		intel_de_write(dev_priv, HDCP_REP_CTL,
    796			       intel_hdcp_get_repeater_ctl(dev_priv, cpu_transcoder, port));
    797
    798	ret = shim->toggle_signalling(dig_port, cpu_transcoder, true);
    799	if (ret)
    800		return ret;
    801
    802	intel_de_write(dev_priv, HDCP_CONF(dev_priv, cpu_transcoder, port),
    803		       HDCP_CONF_AUTH_AND_ENC);
    804
    805	/* Wait for R0 ready */
    806	if (wait_for(intel_de_read(dev_priv, HDCP_STATUS(dev_priv, cpu_transcoder, port)) &
    807		     (HDCP_STATUS_R0_READY | HDCP_STATUS_ENC), 1)) {
    808		drm_err(&dev_priv->drm, "Timed out waiting for R0 ready\n");
    809		return -ETIMEDOUT;
    810	}
    811
    812	/*
    813	 * Wait for R0' to become available. The spec says 100ms from Aksv, but
    814	 * some monitors can take longer than this. We'll set the timeout at
    815	 * 300ms just to be sure.
    816	 *
    817	 * On DP, there's an R0_READY bit available but no such bit
    818	 * exists on HDMI. Since the upper-bound is the same, we'll just do
    819	 * the stupid thing instead of polling on one and not the other.
    820	 */
    821	wait_remaining_ms_from_jiffies(r0_prime_gen_start, 300);
    822
    823	tries = 3;
    824
    825	/*
    826	 * DP HDCP Spec mandates the two more reattempt to read R0, incase
    827	 * of R0 mismatch.
    828	 */
    829	for (i = 0; i < tries; i++) {
    830		ri.reg = 0;
    831		ret = shim->read_ri_prime(dig_port, ri.shim);
    832		if (ret)
    833			return ret;
    834		intel_de_write(dev_priv,
    835			       HDCP_RPRIME(dev_priv, cpu_transcoder, port),
    836			       ri.reg);
    837
    838		/* Wait for Ri prime match */
    839		if (!wait_for(intel_de_read(dev_priv, HDCP_STATUS(dev_priv, cpu_transcoder, port)) &
    840			      (HDCP_STATUS_RI_MATCH | HDCP_STATUS_ENC), 1))
    841			break;
    842	}
    843
    844	if (i == tries) {
    845		drm_dbg_kms(&dev_priv->drm,
    846			    "Timed out waiting for Ri prime match (%x)\n",
    847			    intel_de_read(dev_priv, HDCP_STATUS(dev_priv,
    848					  cpu_transcoder, port)));
    849		return -ETIMEDOUT;
    850	}
    851
    852	/* Wait for encryption confirmation */
    853	if (intel_de_wait_for_set(dev_priv,
    854				  HDCP_STATUS(dev_priv, cpu_transcoder, port),
    855				  HDCP_STATUS_ENC,
    856				  HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
    857		drm_err(&dev_priv->drm, "Timed out waiting for encryption\n");
    858		return -ETIMEDOUT;
    859	}
    860
    861	/* DP MST Auth Part 1 Step 2.a and Step 2.b */
    862	if (shim->stream_encryption) {
    863		ret = shim->stream_encryption(connector, true);
    864		if (ret) {
    865			drm_err(&dev_priv->drm, "[%s:%d] Failed to enable HDCP 1.4 stream enc\n",
    866				connector->base.name, connector->base.base.id);
    867			return ret;
    868		}
    869		drm_dbg_kms(&dev_priv->drm, "HDCP 1.4 transcoder: %s stream encrypted\n",
    870			    transcoder_name(hdcp->stream_transcoder));
    871	}
    872
    873	if (repeater_present)
    874		return intel_hdcp_auth_downstream(connector);
    875
    876	drm_dbg_kms(&dev_priv->drm, "HDCP is enabled (no repeater present)\n");
    877	return 0;
    878}
    879
    880static int _intel_hdcp_disable(struct intel_connector *connector)
    881{
    882	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
    883	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
    884	struct intel_hdcp *hdcp = &connector->hdcp;
    885	enum port port = dig_port->base.port;
    886	enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
    887	u32 repeater_ctl;
    888	int ret;
    889
    890	drm_dbg_kms(&dev_priv->drm, "[%s:%d] HDCP is being disabled...\n",
    891		    connector->base.name, connector->base.base.id);
    892
    893	if (hdcp->shim->stream_encryption) {
    894		ret = hdcp->shim->stream_encryption(connector, false);
    895		if (ret) {
    896			drm_err(&dev_priv->drm, "[%s:%d] Failed to disable HDCP 1.4 stream enc\n",
    897				connector->base.name, connector->base.base.id);
    898			return ret;
    899		}
    900		drm_dbg_kms(&dev_priv->drm, "HDCP 1.4 transcoder: %s stream encryption disabled\n",
    901			    transcoder_name(hdcp->stream_transcoder));
    902		/*
    903		 * If there are other connectors on this port using HDCP,
    904		 * don't disable it until it disabled HDCP encryption for
    905		 * all connectors in MST topology.
    906		 */
    907		if (dig_port->num_hdcp_streams > 0)
    908			return 0;
    909	}
    910
    911	hdcp->hdcp_encrypted = false;
    912	intel_de_write(dev_priv, HDCP_CONF(dev_priv, cpu_transcoder, port), 0);
    913	if (intel_de_wait_for_clear(dev_priv,
    914				    HDCP_STATUS(dev_priv, cpu_transcoder, port),
    915				    ~0, HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
    916		drm_err(&dev_priv->drm,
    917			"Failed to disable HDCP, timeout clearing status\n");
    918		return -ETIMEDOUT;
    919	}
    920
    921	repeater_ctl = intel_hdcp_get_repeater_ctl(dev_priv, cpu_transcoder,
    922						   port);
    923	intel_de_write(dev_priv, HDCP_REP_CTL,
    924		       intel_de_read(dev_priv, HDCP_REP_CTL) & ~repeater_ctl);
    925
    926	ret = hdcp->shim->toggle_signalling(dig_port, cpu_transcoder, false);
    927	if (ret) {
    928		drm_err(&dev_priv->drm, "Failed to disable HDCP signalling\n");
    929		return ret;
    930	}
    931
    932	drm_dbg_kms(&dev_priv->drm, "HDCP is disabled\n");
    933	return 0;
    934}
    935
    936static int _intel_hdcp_enable(struct intel_connector *connector)
    937{
    938	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
    939	struct intel_hdcp *hdcp = &connector->hdcp;
    940	int i, ret, tries = 3;
    941
    942	drm_dbg_kms(&dev_priv->drm, "[%s:%d] HDCP is being enabled...\n",
    943		    connector->base.name, connector->base.base.id);
    944
    945	if (!hdcp_key_loadable(dev_priv)) {
    946		drm_err(&dev_priv->drm, "HDCP key Load is not possible\n");
    947		return -ENXIO;
    948	}
    949
    950	for (i = 0; i < KEY_LOAD_TRIES; i++) {
    951		ret = intel_hdcp_load_keys(dev_priv);
    952		if (!ret)
    953			break;
    954		intel_hdcp_clear_keys(dev_priv);
    955	}
    956	if (ret) {
    957		drm_err(&dev_priv->drm, "Could not load HDCP keys, (%d)\n",
    958			ret);
    959		return ret;
    960	}
    961
    962	/* Incase of authentication failures, HDCP spec expects reauth. */
    963	for (i = 0; i < tries; i++) {
    964		ret = intel_hdcp_auth(connector);
    965		if (!ret) {
    966			hdcp->hdcp_encrypted = true;
    967			return 0;
    968		}
    969
    970		drm_dbg_kms(&dev_priv->drm, "HDCP Auth failure (%d)\n", ret);
    971
    972		/* Ensuring HDCP encryption and signalling are stopped. */
    973		_intel_hdcp_disable(connector);
    974	}
    975
    976	drm_dbg_kms(&dev_priv->drm,
    977		    "HDCP authentication failed (%d tries/%d)\n", tries, ret);
    978	return ret;
    979}
    980
    981static struct intel_connector *intel_hdcp_to_connector(struct intel_hdcp *hdcp)
    982{
    983	return container_of(hdcp, struct intel_connector, hdcp);
    984}
    985
    986static void intel_hdcp_update_value(struct intel_connector *connector,
    987				    u64 value, bool update_property)
    988{
    989	struct drm_device *dev = connector->base.dev;
    990	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
    991	struct intel_hdcp *hdcp = &connector->hdcp;
    992
    993	drm_WARN_ON(connector->base.dev, !mutex_is_locked(&hdcp->mutex));
    994
    995	if (hdcp->value == value)
    996		return;
    997
    998	drm_WARN_ON(dev, !mutex_is_locked(&dig_port->hdcp_mutex));
    999
   1000	if (hdcp->value == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
   1001		if (!drm_WARN_ON(dev, dig_port->num_hdcp_streams == 0))
   1002			dig_port->num_hdcp_streams--;
   1003	} else if (value == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
   1004		dig_port->num_hdcp_streams++;
   1005	}
   1006
   1007	hdcp->value = value;
   1008	if (update_property) {
   1009		drm_connector_get(&connector->base);
   1010		schedule_work(&hdcp->prop_work);
   1011	}
   1012}
   1013
   1014/* Implements Part 3 of the HDCP authorization procedure */
   1015static int intel_hdcp_check_link(struct intel_connector *connector)
   1016{
   1017	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
   1018	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
   1019	struct intel_hdcp *hdcp = &connector->hdcp;
   1020	enum port port = dig_port->base.port;
   1021	enum transcoder cpu_transcoder;
   1022	int ret = 0;
   1023
   1024	mutex_lock(&hdcp->mutex);
   1025	mutex_lock(&dig_port->hdcp_mutex);
   1026
   1027	cpu_transcoder = hdcp->cpu_transcoder;
   1028
   1029	/* Check_link valid only when HDCP1.4 is enabled */
   1030	if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_ENABLED ||
   1031	    !hdcp->hdcp_encrypted) {
   1032		ret = -EINVAL;
   1033		goto out;
   1034	}
   1035
   1036	if (drm_WARN_ON(&dev_priv->drm,
   1037			!intel_hdcp_in_use(dev_priv, cpu_transcoder, port))) {
   1038		drm_err(&dev_priv->drm,
   1039			"%s:%d HDCP link stopped encryption,%x\n",
   1040			connector->base.name, connector->base.base.id,
   1041			intel_de_read(dev_priv, HDCP_STATUS(dev_priv, cpu_transcoder, port)));
   1042		ret = -ENXIO;
   1043		intel_hdcp_update_value(connector,
   1044					DRM_MODE_CONTENT_PROTECTION_DESIRED,
   1045					true);
   1046		goto out;
   1047	}
   1048
   1049	if (hdcp->shim->check_link(dig_port, connector)) {
   1050		if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
   1051			intel_hdcp_update_value(connector,
   1052				DRM_MODE_CONTENT_PROTECTION_ENABLED, true);
   1053		}
   1054		goto out;
   1055	}
   1056
   1057	drm_dbg_kms(&dev_priv->drm,
   1058		    "[%s:%d] HDCP link failed, retrying authentication\n",
   1059		    connector->base.name, connector->base.base.id);
   1060
   1061	ret = _intel_hdcp_disable(connector);
   1062	if (ret) {
   1063		drm_err(&dev_priv->drm, "Failed to disable hdcp (%d)\n", ret);
   1064		intel_hdcp_update_value(connector,
   1065					DRM_MODE_CONTENT_PROTECTION_DESIRED,
   1066					true);
   1067		goto out;
   1068	}
   1069
   1070	ret = _intel_hdcp_enable(connector);
   1071	if (ret) {
   1072		drm_err(&dev_priv->drm, "Failed to enable hdcp (%d)\n", ret);
   1073		intel_hdcp_update_value(connector,
   1074					DRM_MODE_CONTENT_PROTECTION_DESIRED,
   1075					true);
   1076		goto out;
   1077	}
   1078
   1079out:
   1080	mutex_unlock(&dig_port->hdcp_mutex);
   1081	mutex_unlock(&hdcp->mutex);
   1082	return ret;
   1083}
   1084
   1085static void intel_hdcp_prop_work(struct work_struct *work)
   1086{
   1087	struct intel_hdcp *hdcp = container_of(work, struct intel_hdcp,
   1088					       prop_work);
   1089	struct intel_connector *connector = intel_hdcp_to_connector(hdcp);
   1090	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
   1091
   1092	drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex, NULL);
   1093	mutex_lock(&hdcp->mutex);
   1094
   1095	/*
   1096	 * This worker is only used to flip between ENABLED/DESIRED. Either of
   1097	 * those to UNDESIRED is handled by core. If value == UNDESIRED,
   1098	 * we're running just after hdcp has been disabled, so just exit
   1099	 */
   1100	if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
   1101		drm_hdcp_update_content_protection(&connector->base,
   1102						   hdcp->value);
   1103
   1104	mutex_unlock(&hdcp->mutex);
   1105	drm_modeset_unlock(&dev_priv->drm.mode_config.connection_mutex);
   1106
   1107	drm_connector_put(&connector->base);
   1108}
   1109
   1110bool is_hdcp_supported(struct drm_i915_private *dev_priv, enum port port)
   1111{
   1112	return INTEL_INFO(dev_priv)->display.has_hdcp &&
   1113			(DISPLAY_VER(dev_priv) >= 12 || port < PORT_E);
   1114}
   1115
   1116static int
   1117hdcp2_prepare_ake_init(struct intel_connector *connector,
   1118		       struct hdcp2_ake_init *ake_data)
   1119{
   1120	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
   1121	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
   1122	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
   1123	struct i915_hdcp_comp_master *comp;
   1124	int ret;
   1125
   1126	mutex_lock(&dev_priv->hdcp_comp_mutex);
   1127	comp = dev_priv->hdcp_master;
   1128
   1129	if (!comp || !comp->ops) {
   1130		mutex_unlock(&dev_priv->hdcp_comp_mutex);
   1131		return -EINVAL;
   1132	}
   1133
   1134	ret = comp->ops->initiate_hdcp2_session(comp->mei_dev, data, ake_data);
   1135	if (ret)
   1136		drm_dbg_kms(&dev_priv->drm, "Prepare_ake_init failed. %d\n",
   1137			    ret);
   1138	mutex_unlock(&dev_priv->hdcp_comp_mutex);
   1139
   1140	return ret;
   1141}
   1142
   1143static int
   1144hdcp2_verify_rx_cert_prepare_km(struct intel_connector *connector,
   1145				struct hdcp2_ake_send_cert *rx_cert,
   1146				bool *paired,
   1147				struct hdcp2_ake_no_stored_km *ek_pub_km,
   1148				size_t *msg_sz)
   1149{
   1150	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
   1151	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
   1152	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
   1153	struct i915_hdcp_comp_master *comp;
   1154	int ret;
   1155
   1156	mutex_lock(&dev_priv->hdcp_comp_mutex);
   1157	comp = dev_priv->hdcp_master;
   1158
   1159	if (!comp || !comp->ops) {
   1160		mutex_unlock(&dev_priv->hdcp_comp_mutex);
   1161		return -EINVAL;
   1162	}
   1163
   1164	ret = comp->ops->verify_receiver_cert_prepare_km(comp->mei_dev, data,
   1165							 rx_cert, paired,
   1166							 ek_pub_km, msg_sz);
   1167	if (ret < 0)
   1168		drm_dbg_kms(&dev_priv->drm, "Verify rx_cert failed. %d\n",
   1169			    ret);
   1170	mutex_unlock(&dev_priv->hdcp_comp_mutex);
   1171
   1172	return ret;
   1173}
   1174
   1175static int hdcp2_verify_hprime(struct intel_connector *connector,
   1176			       struct hdcp2_ake_send_hprime *rx_hprime)
   1177{
   1178	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
   1179	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
   1180	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
   1181	struct i915_hdcp_comp_master *comp;
   1182	int ret;
   1183
   1184	mutex_lock(&dev_priv->hdcp_comp_mutex);
   1185	comp = dev_priv->hdcp_master;
   1186
   1187	if (!comp || !comp->ops) {
   1188		mutex_unlock(&dev_priv->hdcp_comp_mutex);
   1189		return -EINVAL;
   1190	}
   1191
   1192	ret = comp->ops->verify_hprime(comp->mei_dev, data, rx_hprime);
   1193	if (ret < 0)
   1194		drm_dbg_kms(&dev_priv->drm, "Verify hprime failed. %d\n", ret);
   1195	mutex_unlock(&dev_priv->hdcp_comp_mutex);
   1196
   1197	return ret;
   1198}
   1199
   1200static int
   1201hdcp2_store_pairing_info(struct intel_connector *connector,
   1202			 struct hdcp2_ake_send_pairing_info *pairing_info)
   1203{
   1204	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
   1205	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
   1206	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
   1207	struct i915_hdcp_comp_master *comp;
   1208	int ret;
   1209
   1210	mutex_lock(&dev_priv->hdcp_comp_mutex);
   1211	comp = dev_priv->hdcp_master;
   1212
   1213	if (!comp || !comp->ops) {
   1214		mutex_unlock(&dev_priv->hdcp_comp_mutex);
   1215		return -EINVAL;
   1216	}
   1217
   1218	ret = comp->ops->store_pairing_info(comp->mei_dev, data, pairing_info);
   1219	if (ret < 0)
   1220		drm_dbg_kms(&dev_priv->drm, "Store pairing info failed. %d\n",
   1221			    ret);
   1222	mutex_unlock(&dev_priv->hdcp_comp_mutex);
   1223
   1224	return ret;
   1225}
   1226
   1227static int
   1228hdcp2_prepare_lc_init(struct intel_connector *connector,
   1229		      struct hdcp2_lc_init *lc_init)
   1230{
   1231	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
   1232	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
   1233	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
   1234	struct i915_hdcp_comp_master *comp;
   1235	int ret;
   1236
   1237	mutex_lock(&dev_priv->hdcp_comp_mutex);
   1238	comp = dev_priv->hdcp_master;
   1239
   1240	if (!comp || !comp->ops) {
   1241		mutex_unlock(&dev_priv->hdcp_comp_mutex);
   1242		return -EINVAL;
   1243	}
   1244
   1245	ret = comp->ops->initiate_locality_check(comp->mei_dev, data, lc_init);
   1246	if (ret < 0)
   1247		drm_dbg_kms(&dev_priv->drm, "Prepare lc_init failed. %d\n",
   1248			    ret);
   1249	mutex_unlock(&dev_priv->hdcp_comp_mutex);
   1250
   1251	return ret;
   1252}
   1253
   1254static int
   1255hdcp2_verify_lprime(struct intel_connector *connector,
   1256		    struct hdcp2_lc_send_lprime *rx_lprime)
   1257{
   1258	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
   1259	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
   1260	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
   1261	struct i915_hdcp_comp_master *comp;
   1262	int ret;
   1263
   1264	mutex_lock(&dev_priv->hdcp_comp_mutex);
   1265	comp = dev_priv->hdcp_master;
   1266
   1267	if (!comp || !comp->ops) {
   1268		mutex_unlock(&dev_priv->hdcp_comp_mutex);
   1269		return -EINVAL;
   1270	}
   1271
   1272	ret = comp->ops->verify_lprime(comp->mei_dev, data, rx_lprime);
   1273	if (ret < 0)
   1274		drm_dbg_kms(&dev_priv->drm, "Verify L_Prime failed. %d\n",
   1275			    ret);
   1276	mutex_unlock(&dev_priv->hdcp_comp_mutex);
   1277
   1278	return ret;
   1279}
   1280
   1281static int hdcp2_prepare_skey(struct intel_connector *connector,
   1282			      struct hdcp2_ske_send_eks *ske_data)
   1283{
   1284	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
   1285	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
   1286	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
   1287	struct i915_hdcp_comp_master *comp;
   1288	int ret;
   1289
   1290	mutex_lock(&dev_priv->hdcp_comp_mutex);
   1291	comp = dev_priv->hdcp_master;
   1292
   1293	if (!comp || !comp->ops) {
   1294		mutex_unlock(&dev_priv->hdcp_comp_mutex);
   1295		return -EINVAL;
   1296	}
   1297
   1298	ret = comp->ops->get_session_key(comp->mei_dev, data, ske_data);
   1299	if (ret < 0)
   1300		drm_dbg_kms(&dev_priv->drm, "Get session key failed. %d\n",
   1301			    ret);
   1302	mutex_unlock(&dev_priv->hdcp_comp_mutex);
   1303
   1304	return ret;
   1305}
   1306
   1307static int
   1308hdcp2_verify_rep_topology_prepare_ack(struct intel_connector *connector,
   1309				      struct hdcp2_rep_send_receiverid_list
   1310								*rep_topology,
   1311				      struct hdcp2_rep_send_ack *rep_send_ack)
   1312{
   1313	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
   1314	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
   1315	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
   1316	struct i915_hdcp_comp_master *comp;
   1317	int ret;
   1318
   1319	mutex_lock(&dev_priv->hdcp_comp_mutex);
   1320	comp = dev_priv->hdcp_master;
   1321
   1322	if (!comp || !comp->ops) {
   1323		mutex_unlock(&dev_priv->hdcp_comp_mutex);
   1324		return -EINVAL;
   1325	}
   1326
   1327	ret = comp->ops->repeater_check_flow_prepare_ack(comp->mei_dev, data,
   1328							 rep_topology,
   1329							 rep_send_ack);
   1330	if (ret < 0)
   1331		drm_dbg_kms(&dev_priv->drm,
   1332			    "Verify rep topology failed. %d\n", ret);
   1333	mutex_unlock(&dev_priv->hdcp_comp_mutex);
   1334
   1335	return ret;
   1336}
   1337
   1338static int
   1339hdcp2_verify_mprime(struct intel_connector *connector,
   1340		    struct hdcp2_rep_stream_ready *stream_ready)
   1341{
   1342	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
   1343	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
   1344	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
   1345	struct i915_hdcp_comp_master *comp;
   1346	int ret;
   1347
   1348	mutex_lock(&dev_priv->hdcp_comp_mutex);
   1349	comp = dev_priv->hdcp_master;
   1350
   1351	if (!comp || !comp->ops) {
   1352		mutex_unlock(&dev_priv->hdcp_comp_mutex);
   1353		return -EINVAL;
   1354	}
   1355
   1356	ret = comp->ops->verify_mprime(comp->mei_dev, data, stream_ready);
   1357	if (ret < 0)
   1358		drm_dbg_kms(&dev_priv->drm, "Verify mprime failed. %d\n", ret);
   1359	mutex_unlock(&dev_priv->hdcp_comp_mutex);
   1360
   1361	return ret;
   1362}
   1363
   1364static int hdcp2_authenticate_port(struct intel_connector *connector)
   1365{
   1366	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
   1367	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
   1368	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
   1369	struct i915_hdcp_comp_master *comp;
   1370	int ret;
   1371
   1372	mutex_lock(&dev_priv->hdcp_comp_mutex);
   1373	comp = dev_priv->hdcp_master;
   1374
   1375	if (!comp || !comp->ops) {
   1376		mutex_unlock(&dev_priv->hdcp_comp_mutex);
   1377		return -EINVAL;
   1378	}
   1379
   1380	ret = comp->ops->enable_hdcp_authentication(comp->mei_dev, data);
   1381	if (ret < 0)
   1382		drm_dbg_kms(&dev_priv->drm, "Enable hdcp auth failed. %d\n",
   1383			    ret);
   1384	mutex_unlock(&dev_priv->hdcp_comp_mutex);
   1385
   1386	return ret;
   1387}
   1388
   1389static int hdcp2_close_mei_session(struct intel_connector *connector)
   1390{
   1391	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
   1392	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
   1393	struct i915_hdcp_comp_master *comp;
   1394	int ret;
   1395
   1396	mutex_lock(&dev_priv->hdcp_comp_mutex);
   1397	comp = dev_priv->hdcp_master;
   1398
   1399	if (!comp || !comp->ops) {
   1400		mutex_unlock(&dev_priv->hdcp_comp_mutex);
   1401		return -EINVAL;
   1402	}
   1403
   1404	ret = comp->ops->close_hdcp_session(comp->mei_dev,
   1405					     &dig_port->hdcp_port_data);
   1406	mutex_unlock(&dev_priv->hdcp_comp_mutex);
   1407
   1408	return ret;
   1409}
   1410
   1411static int hdcp2_deauthenticate_port(struct intel_connector *connector)
   1412{
   1413	return hdcp2_close_mei_session(connector);
   1414}
   1415
   1416/* Authentication flow starts from here */
   1417static int hdcp2_authentication_key_exchange(struct intel_connector *connector)
   1418{
   1419	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
   1420	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
   1421	struct intel_hdcp *hdcp = &connector->hdcp;
   1422	union {
   1423		struct hdcp2_ake_init ake_init;
   1424		struct hdcp2_ake_send_cert send_cert;
   1425		struct hdcp2_ake_no_stored_km no_stored_km;
   1426		struct hdcp2_ake_send_hprime send_hprime;
   1427		struct hdcp2_ake_send_pairing_info pairing_info;
   1428	} msgs;
   1429	const struct intel_hdcp_shim *shim = hdcp->shim;
   1430	size_t size;
   1431	int ret;
   1432
   1433	/* Init for seq_num */
   1434	hdcp->seq_num_v = 0;
   1435	hdcp->seq_num_m = 0;
   1436
   1437	ret = hdcp2_prepare_ake_init(connector, &msgs.ake_init);
   1438	if (ret < 0)
   1439		return ret;
   1440
   1441	ret = shim->write_2_2_msg(dig_port, &msgs.ake_init,
   1442				  sizeof(msgs.ake_init));
   1443	if (ret < 0)
   1444		return ret;
   1445
   1446	ret = shim->read_2_2_msg(dig_port, HDCP_2_2_AKE_SEND_CERT,
   1447				 &msgs.send_cert, sizeof(msgs.send_cert));
   1448	if (ret < 0)
   1449		return ret;
   1450
   1451	if (msgs.send_cert.rx_caps[0] != HDCP_2_2_RX_CAPS_VERSION_VAL) {
   1452		drm_dbg_kms(&dev_priv->drm, "cert.rx_caps dont claim HDCP2.2\n");
   1453		return -EINVAL;
   1454	}
   1455
   1456	hdcp->is_repeater = HDCP_2_2_RX_REPEATER(msgs.send_cert.rx_caps[2]);
   1457
   1458	if (drm_hdcp_check_ksvs_revoked(&dev_priv->drm,
   1459					msgs.send_cert.cert_rx.receiver_id,
   1460					1) > 0) {
   1461		drm_err(&dev_priv->drm, "Receiver ID is revoked\n");
   1462		return -EPERM;
   1463	}
   1464
   1465	/*
   1466	 * Here msgs.no_stored_km will hold msgs corresponding to the km
   1467	 * stored also.
   1468	 */
   1469	ret = hdcp2_verify_rx_cert_prepare_km(connector, &msgs.send_cert,
   1470					      &hdcp->is_paired,
   1471					      &msgs.no_stored_km, &size);
   1472	if (ret < 0)
   1473		return ret;
   1474
   1475	ret = shim->write_2_2_msg(dig_port, &msgs.no_stored_km, size);
   1476	if (ret < 0)
   1477		return ret;
   1478
   1479	ret = shim->read_2_2_msg(dig_port, HDCP_2_2_AKE_SEND_HPRIME,
   1480				 &msgs.send_hprime, sizeof(msgs.send_hprime));
   1481	if (ret < 0)
   1482		return ret;
   1483
   1484	ret = hdcp2_verify_hprime(connector, &msgs.send_hprime);
   1485	if (ret < 0)
   1486		return ret;
   1487
   1488	if (!hdcp->is_paired) {
   1489		/* Pairing is required */
   1490		ret = shim->read_2_2_msg(dig_port,
   1491					 HDCP_2_2_AKE_SEND_PAIRING_INFO,
   1492					 &msgs.pairing_info,
   1493					 sizeof(msgs.pairing_info));
   1494		if (ret < 0)
   1495			return ret;
   1496
   1497		ret = hdcp2_store_pairing_info(connector, &msgs.pairing_info);
   1498		if (ret < 0)
   1499			return ret;
   1500		hdcp->is_paired = true;
   1501	}
   1502
   1503	return 0;
   1504}
   1505
   1506static int hdcp2_locality_check(struct intel_connector *connector)
   1507{
   1508	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
   1509	struct intel_hdcp *hdcp = &connector->hdcp;
   1510	union {
   1511		struct hdcp2_lc_init lc_init;
   1512		struct hdcp2_lc_send_lprime send_lprime;
   1513	} msgs;
   1514	const struct intel_hdcp_shim *shim = hdcp->shim;
   1515	int tries = HDCP2_LC_RETRY_CNT, ret, i;
   1516
   1517	for (i = 0; i < tries; i++) {
   1518		ret = hdcp2_prepare_lc_init(connector, &msgs.lc_init);
   1519		if (ret < 0)
   1520			continue;
   1521
   1522		ret = shim->write_2_2_msg(dig_port, &msgs.lc_init,
   1523				      sizeof(msgs.lc_init));
   1524		if (ret < 0)
   1525			continue;
   1526
   1527		ret = shim->read_2_2_msg(dig_port,
   1528					 HDCP_2_2_LC_SEND_LPRIME,
   1529					 &msgs.send_lprime,
   1530					 sizeof(msgs.send_lprime));
   1531		if (ret < 0)
   1532			continue;
   1533
   1534		ret = hdcp2_verify_lprime(connector, &msgs.send_lprime);
   1535		if (!ret)
   1536			break;
   1537	}
   1538
   1539	return ret;
   1540}
   1541
   1542static int hdcp2_session_key_exchange(struct intel_connector *connector)
   1543{
   1544	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
   1545	struct intel_hdcp *hdcp = &connector->hdcp;
   1546	struct hdcp2_ske_send_eks send_eks;
   1547	int ret;
   1548
   1549	ret = hdcp2_prepare_skey(connector, &send_eks);
   1550	if (ret < 0)
   1551		return ret;
   1552
   1553	ret = hdcp->shim->write_2_2_msg(dig_port, &send_eks,
   1554					sizeof(send_eks));
   1555	if (ret < 0)
   1556		return ret;
   1557
   1558	return 0;
   1559}
   1560
   1561static
   1562int _hdcp2_propagate_stream_management_info(struct intel_connector *connector)
   1563{
   1564	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
   1565	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
   1566	struct intel_hdcp *hdcp = &connector->hdcp;
   1567	union {
   1568		struct hdcp2_rep_stream_manage stream_manage;
   1569		struct hdcp2_rep_stream_ready stream_ready;
   1570	} msgs;
   1571	const struct intel_hdcp_shim *shim = hdcp->shim;
   1572	int ret, streams_size_delta, i;
   1573
   1574	if (connector->hdcp.seq_num_m > HDCP_2_2_SEQ_NUM_MAX)
   1575		return -ERANGE;
   1576
   1577	/* Prepare RepeaterAuth_Stream_Manage msg */
   1578	msgs.stream_manage.msg_id = HDCP_2_2_REP_STREAM_MANAGE;
   1579	drm_hdcp_cpu_to_be24(msgs.stream_manage.seq_num_m, hdcp->seq_num_m);
   1580
   1581	msgs.stream_manage.k = cpu_to_be16(data->k);
   1582
   1583	for (i = 0; i < data->k; i++) {
   1584		msgs.stream_manage.streams[i].stream_id = data->streams[i].stream_id;
   1585		msgs.stream_manage.streams[i].stream_type = data->streams[i].stream_type;
   1586	}
   1587
   1588	streams_size_delta = (HDCP_2_2_MAX_CONTENT_STREAMS_CNT - data->k) *
   1589				sizeof(struct hdcp2_streamid_type);
   1590	/* Send it to Repeater */
   1591	ret = shim->write_2_2_msg(dig_port, &msgs.stream_manage,
   1592				  sizeof(msgs.stream_manage) - streams_size_delta);
   1593	if (ret < 0)
   1594		goto out;
   1595
   1596	ret = shim->read_2_2_msg(dig_port, HDCP_2_2_REP_STREAM_READY,
   1597				 &msgs.stream_ready, sizeof(msgs.stream_ready));
   1598	if (ret < 0)
   1599		goto out;
   1600
   1601	data->seq_num_m = hdcp->seq_num_m;
   1602
   1603	ret = hdcp2_verify_mprime(connector, &msgs.stream_ready);
   1604
   1605out:
   1606	hdcp->seq_num_m++;
   1607
   1608	return ret;
   1609}
   1610
   1611static
   1612int hdcp2_authenticate_repeater_topology(struct intel_connector *connector)
   1613{
   1614	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
   1615	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
   1616	struct intel_hdcp *hdcp = &connector->hdcp;
   1617	union {
   1618		struct hdcp2_rep_send_receiverid_list recvid_list;
   1619		struct hdcp2_rep_send_ack rep_ack;
   1620	} msgs;
   1621	const struct intel_hdcp_shim *shim = hdcp->shim;
   1622	u32 seq_num_v, device_cnt;
   1623	u8 *rx_info;
   1624	int ret;
   1625
   1626	ret = shim->read_2_2_msg(dig_port, HDCP_2_2_REP_SEND_RECVID_LIST,
   1627				 &msgs.recvid_list, sizeof(msgs.recvid_list));
   1628	if (ret < 0)
   1629		return ret;
   1630
   1631	rx_info = msgs.recvid_list.rx_info;
   1632
   1633	if (HDCP_2_2_MAX_CASCADE_EXCEEDED(rx_info[1]) ||
   1634	    HDCP_2_2_MAX_DEVS_EXCEEDED(rx_info[1])) {
   1635		drm_dbg_kms(&dev_priv->drm, "Topology Max Size Exceeded\n");
   1636		return -EINVAL;
   1637	}
   1638
   1639	/*
   1640	 * MST topology is not Type 1 capable if it contains a downstream
   1641	 * device that is only HDCP 1.x or Legacy HDCP 2.0/2.1 compliant.
   1642	 */
   1643	dig_port->hdcp_mst_type1_capable =
   1644		!HDCP_2_2_HDCP1_DEVICE_CONNECTED(rx_info[1]) &&
   1645		!HDCP_2_2_HDCP_2_0_REP_CONNECTED(rx_info[1]);
   1646
   1647	/* Converting and Storing the seq_num_v to local variable as DWORD */
   1648	seq_num_v =
   1649		drm_hdcp_be24_to_cpu((const u8 *)msgs.recvid_list.seq_num_v);
   1650
   1651	if (!hdcp->hdcp2_encrypted && seq_num_v) {
   1652		drm_dbg_kms(&dev_priv->drm,
   1653			    "Non zero Seq_num_v at first RecvId_List msg\n");
   1654		return -EINVAL;
   1655	}
   1656
   1657	if (seq_num_v < hdcp->seq_num_v) {
   1658		/* Roll over of the seq_num_v from repeater. Reauthenticate. */
   1659		drm_dbg_kms(&dev_priv->drm, "Seq_num_v roll over.\n");
   1660		return -EINVAL;
   1661	}
   1662
   1663	device_cnt = (HDCP_2_2_DEV_COUNT_HI(rx_info[0]) << 4 |
   1664		      HDCP_2_2_DEV_COUNT_LO(rx_info[1]));
   1665	if (drm_hdcp_check_ksvs_revoked(&dev_priv->drm,
   1666					msgs.recvid_list.receiver_ids,
   1667					device_cnt) > 0) {
   1668		drm_err(&dev_priv->drm, "Revoked receiver ID(s) is in list\n");
   1669		return -EPERM;
   1670	}
   1671
   1672	ret = hdcp2_verify_rep_topology_prepare_ack(connector,
   1673						    &msgs.recvid_list,
   1674						    &msgs.rep_ack);
   1675	if (ret < 0)
   1676		return ret;
   1677
   1678	hdcp->seq_num_v = seq_num_v;
   1679	ret = shim->write_2_2_msg(dig_port, &msgs.rep_ack,
   1680				  sizeof(msgs.rep_ack));
   1681	if (ret < 0)
   1682		return ret;
   1683
   1684	return 0;
   1685}
   1686
   1687static int hdcp2_authenticate_sink(struct intel_connector *connector)
   1688{
   1689	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
   1690	struct drm_i915_private *i915 = to_i915(connector->base.dev);
   1691	struct intel_hdcp *hdcp = &connector->hdcp;
   1692	const struct intel_hdcp_shim *shim = hdcp->shim;
   1693	int ret;
   1694
   1695	ret = hdcp2_authentication_key_exchange(connector);
   1696	if (ret < 0) {
   1697		drm_dbg_kms(&i915->drm, "AKE Failed. Err : %d\n", ret);
   1698		return ret;
   1699	}
   1700
   1701	ret = hdcp2_locality_check(connector);
   1702	if (ret < 0) {
   1703		drm_dbg_kms(&i915->drm,
   1704			    "Locality Check failed. Err : %d\n", ret);
   1705		return ret;
   1706	}
   1707
   1708	ret = hdcp2_session_key_exchange(connector);
   1709	if (ret < 0) {
   1710		drm_dbg_kms(&i915->drm, "SKE Failed. Err : %d\n", ret);
   1711		return ret;
   1712	}
   1713
   1714	if (shim->config_stream_type) {
   1715		ret = shim->config_stream_type(dig_port,
   1716					       hdcp->is_repeater,
   1717					       hdcp->content_type);
   1718		if (ret < 0)
   1719			return ret;
   1720	}
   1721
   1722	if (hdcp->is_repeater) {
   1723		ret = hdcp2_authenticate_repeater_topology(connector);
   1724		if (ret < 0) {
   1725			drm_dbg_kms(&i915->drm,
   1726				    "Repeater Auth Failed. Err: %d\n", ret);
   1727			return ret;
   1728		}
   1729	}
   1730
   1731	return ret;
   1732}
   1733
   1734static int hdcp2_enable_stream_encryption(struct intel_connector *connector)
   1735{
   1736	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
   1737	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
   1738	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
   1739	struct intel_hdcp *hdcp = &connector->hdcp;
   1740	enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
   1741	enum port port = dig_port->base.port;
   1742	int ret = 0;
   1743
   1744	if (!(intel_de_read(dev_priv, HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
   1745			    LINK_ENCRYPTION_STATUS)) {
   1746		drm_err(&dev_priv->drm, "[%s:%d] HDCP 2.2 Link is not encrypted\n",
   1747			connector->base.name, connector->base.base.id);
   1748		ret = -EPERM;
   1749		goto link_recover;
   1750	}
   1751
   1752	if (hdcp->shim->stream_2_2_encryption) {
   1753		ret = hdcp->shim->stream_2_2_encryption(connector, true);
   1754		if (ret) {
   1755			drm_err(&dev_priv->drm, "[%s:%d] Failed to enable HDCP 2.2 stream enc\n",
   1756				connector->base.name, connector->base.base.id);
   1757			return ret;
   1758		}
   1759		drm_dbg_kms(&dev_priv->drm, "HDCP 2.2 transcoder: %s stream encrypted\n",
   1760			    transcoder_name(hdcp->stream_transcoder));
   1761	}
   1762
   1763	return 0;
   1764
   1765link_recover:
   1766	if (hdcp2_deauthenticate_port(connector) < 0)
   1767		drm_dbg_kms(&dev_priv->drm, "Port deauth failed.\n");
   1768
   1769	dig_port->hdcp_auth_status = false;
   1770	data->k = 0;
   1771
   1772	return ret;
   1773}
   1774
   1775static int hdcp2_enable_encryption(struct intel_connector *connector)
   1776{
   1777	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
   1778	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
   1779	struct intel_hdcp *hdcp = &connector->hdcp;
   1780	enum port port = dig_port->base.port;
   1781	enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
   1782	int ret;
   1783
   1784	drm_WARN_ON(&dev_priv->drm,
   1785		    intel_de_read(dev_priv, HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
   1786		    LINK_ENCRYPTION_STATUS);
   1787	if (hdcp->shim->toggle_signalling) {
   1788		ret = hdcp->shim->toggle_signalling(dig_port, cpu_transcoder,
   1789						    true);
   1790		if (ret) {
   1791			drm_err(&dev_priv->drm,
   1792				"Failed to enable HDCP signalling. %d\n",
   1793				ret);
   1794			return ret;
   1795		}
   1796	}
   1797
   1798	if (intel_de_read(dev_priv, HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
   1799	    LINK_AUTH_STATUS) {
   1800		/* Link is Authenticated. Now set for Encryption */
   1801		intel_de_write(dev_priv,
   1802			       HDCP2_CTL(dev_priv, cpu_transcoder, port),
   1803			       intel_de_read(dev_priv, HDCP2_CTL(dev_priv, cpu_transcoder, port)) | CTL_LINK_ENCRYPTION_REQ);
   1804	}
   1805
   1806	ret = intel_de_wait_for_set(dev_priv,
   1807				    HDCP2_STATUS(dev_priv, cpu_transcoder,
   1808						 port),
   1809				    LINK_ENCRYPTION_STATUS,
   1810				    HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS);
   1811	dig_port->hdcp_auth_status = true;
   1812
   1813	return ret;
   1814}
   1815
   1816static int hdcp2_disable_encryption(struct intel_connector *connector)
   1817{
   1818	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
   1819	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
   1820	struct intel_hdcp *hdcp = &connector->hdcp;
   1821	enum port port = dig_port->base.port;
   1822	enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
   1823	int ret;
   1824
   1825	drm_WARN_ON(&dev_priv->drm, !(intel_de_read(dev_priv, HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
   1826				      LINK_ENCRYPTION_STATUS));
   1827
   1828	intel_de_write(dev_priv, HDCP2_CTL(dev_priv, cpu_transcoder, port),
   1829		       intel_de_read(dev_priv, HDCP2_CTL(dev_priv, cpu_transcoder, port)) & ~CTL_LINK_ENCRYPTION_REQ);
   1830
   1831	ret = intel_de_wait_for_clear(dev_priv,
   1832				      HDCP2_STATUS(dev_priv, cpu_transcoder,
   1833						   port),
   1834				      LINK_ENCRYPTION_STATUS,
   1835				      HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS);
   1836	if (ret == -ETIMEDOUT)
   1837		drm_dbg_kms(&dev_priv->drm, "Disable Encryption Timedout");
   1838
   1839	if (hdcp->shim->toggle_signalling) {
   1840		ret = hdcp->shim->toggle_signalling(dig_port, cpu_transcoder,
   1841						    false);
   1842		if (ret) {
   1843			drm_err(&dev_priv->drm,
   1844				"Failed to disable HDCP signalling. %d\n",
   1845				ret);
   1846			return ret;
   1847		}
   1848	}
   1849
   1850	return ret;
   1851}
   1852
   1853static int
   1854hdcp2_propagate_stream_management_info(struct intel_connector *connector)
   1855{
   1856	struct drm_i915_private *i915 = to_i915(connector->base.dev);
   1857	int i, tries = 3, ret;
   1858
   1859	if (!connector->hdcp.is_repeater)
   1860		return 0;
   1861
   1862	for (i = 0; i < tries; i++) {
   1863		ret = _hdcp2_propagate_stream_management_info(connector);
   1864		if (!ret)
   1865			break;
   1866
   1867		/* Lets restart the auth incase of seq_num_m roll over */
   1868		if (connector->hdcp.seq_num_m > HDCP_2_2_SEQ_NUM_MAX) {
   1869			drm_dbg_kms(&i915->drm,
   1870				    "seq_num_m roll over.(%d)\n", ret);
   1871			break;
   1872		}
   1873
   1874		drm_dbg_kms(&i915->drm,
   1875			    "HDCP2 stream management %d of %d Failed.(%d)\n",
   1876			    i + 1, tries, ret);
   1877	}
   1878
   1879	return ret;
   1880}
   1881
   1882static int hdcp2_authenticate_and_encrypt(struct intel_connector *connector)
   1883{
   1884	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
   1885	struct drm_i915_private *i915 = to_i915(connector->base.dev);
   1886	int ret = 0, i, tries = 3;
   1887
   1888	for (i = 0; i < tries && !dig_port->hdcp_auth_status; i++) {
   1889		ret = hdcp2_authenticate_sink(connector);
   1890		if (!ret) {
   1891			ret = intel_hdcp_prepare_streams(connector);
   1892			if (ret) {
   1893				drm_dbg_kms(&i915->drm,
   1894					    "Prepare streams failed.(%d)\n",
   1895					    ret);
   1896				break;
   1897			}
   1898
   1899			ret = hdcp2_propagate_stream_management_info(connector);
   1900			if (ret) {
   1901				drm_dbg_kms(&i915->drm,
   1902					    "Stream management failed.(%d)\n",
   1903					    ret);
   1904				break;
   1905			}
   1906
   1907			ret = hdcp2_authenticate_port(connector);
   1908			if (!ret)
   1909				break;
   1910			drm_dbg_kms(&i915->drm, "HDCP2 port auth failed.(%d)\n",
   1911				    ret);
   1912		}
   1913
   1914		/* Clearing the mei hdcp session */
   1915		drm_dbg_kms(&i915->drm, "HDCP2.2 Auth %d of %d Failed.(%d)\n",
   1916			    i + 1, tries, ret);
   1917		if (hdcp2_deauthenticate_port(connector) < 0)
   1918			drm_dbg_kms(&i915->drm, "Port deauth failed.\n");
   1919	}
   1920
   1921	if (!ret && !dig_port->hdcp_auth_status) {
   1922		/*
   1923		 * Ensuring the required 200mSec min time interval between
   1924		 * Session Key Exchange and encryption.
   1925		 */
   1926		msleep(HDCP_2_2_DELAY_BEFORE_ENCRYPTION_EN);
   1927		ret = hdcp2_enable_encryption(connector);
   1928		if (ret < 0) {
   1929			drm_dbg_kms(&i915->drm,
   1930				    "Encryption Enable Failed.(%d)\n", ret);
   1931			if (hdcp2_deauthenticate_port(connector) < 0)
   1932				drm_dbg_kms(&i915->drm, "Port deauth failed.\n");
   1933		}
   1934	}
   1935
   1936	if (!ret)
   1937		ret = hdcp2_enable_stream_encryption(connector);
   1938
   1939	return ret;
   1940}
   1941
   1942static int _intel_hdcp2_enable(struct intel_connector *connector)
   1943{
   1944	struct drm_i915_private *i915 = to_i915(connector->base.dev);
   1945	struct intel_hdcp *hdcp = &connector->hdcp;
   1946	int ret;
   1947
   1948	drm_dbg_kms(&i915->drm, "[%s:%d] HDCP2.2 is being enabled. Type: %d\n",
   1949		    connector->base.name, connector->base.base.id,
   1950		    hdcp->content_type);
   1951
   1952	ret = hdcp2_authenticate_and_encrypt(connector);
   1953	if (ret) {
   1954		drm_dbg_kms(&i915->drm, "HDCP2 Type%d  Enabling Failed. (%d)\n",
   1955			    hdcp->content_type, ret);
   1956		return ret;
   1957	}
   1958
   1959	drm_dbg_kms(&i915->drm, "[%s:%d] HDCP2.2 is enabled. Type %d\n",
   1960		    connector->base.name, connector->base.base.id,
   1961		    hdcp->content_type);
   1962
   1963	hdcp->hdcp2_encrypted = true;
   1964	return 0;
   1965}
   1966
   1967static int
   1968_intel_hdcp2_disable(struct intel_connector *connector, bool hdcp2_link_recovery)
   1969{
   1970	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
   1971	struct drm_i915_private *i915 = to_i915(connector->base.dev);
   1972	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
   1973	struct intel_hdcp *hdcp = &connector->hdcp;
   1974	int ret;
   1975
   1976	drm_dbg_kms(&i915->drm, "[%s:%d] HDCP2.2 is being Disabled\n",
   1977		    connector->base.name, connector->base.base.id);
   1978
   1979	if (hdcp->shim->stream_2_2_encryption) {
   1980		ret = hdcp->shim->stream_2_2_encryption(connector, false);
   1981		if (ret) {
   1982			drm_err(&i915->drm, "[%s:%d] Failed to disable HDCP 2.2 stream enc\n",
   1983				connector->base.name, connector->base.base.id);
   1984			return ret;
   1985		}
   1986		drm_dbg_kms(&i915->drm, "HDCP 2.2 transcoder: %s stream encryption disabled\n",
   1987			    transcoder_name(hdcp->stream_transcoder));
   1988
   1989		if (dig_port->num_hdcp_streams > 0 && !hdcp2_link_recovery)
   1990			return 0;
   1991	}
   1992
   1993	ret = hdcp2_disable_encryption(connector);
   1994
   1995	if (hdcp2_deauthenticate_port(connector) < 0)
   1996		drm_dbg_kms(&i915->drm, "Port deauth failed.\n");
   1997
   1998	connector->hdcp.hdcp2_encrypted = false;
   1999	dig_port->hdcp_auth_status = false;
   2000	data->k = 0;
   2001
   2002	return ret;
   2003}
   2004
   2005/* Implements the Link Integrity Check for HDCP2.2 */
   2006static int intel_hdcp2_check_link(struct intel_connector *connector)
   2007{
   2008	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
   2009	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
   2010	struct intel_hdcp *hdcp = &connector->hdcp;
   2011	enum port port = dig_port->base.port;
   2012	enum transcoder cpu_transcoder;
   2013	int ret = 0;
   2014
   2015	mutex_lock(&hdcp->mutex);
   2016	mutex_lock(&dig_port->hdcp_mutex);
   2017	cpu_transcoder = hdcp->cpu_transcoder;
   2018
   2019	/* hdcp2_check_link is expected only when HDCP2.2 is Enabled */
   2020	if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_ENABLED ||
   2021	    !hdcp->hdcp2_encrypted) {
   2022		ret = -EINVAL;
   2023		goto out;
   2024	}
   2025
   2026	if (drm_WARN_ON(&dev_priv->drm,
   2027			!intel_hdcp2_in_use(dev_priv, cpu_transcoder, port))) {
   2028		drm_err(&dev_priv->drm,
   2029			"HDCP2.2 link stopped the encryption, %x\n",
   2030			intel_de_read(dev_priv, HDCP2_STATUS(dev_priv, cpu_transcoder, port)));
   2031		ret = -ENXIO;
   2032		_intel_hdcp2_disable(connector, true);
   2033		intel_hdcp_update_value(connector,
   2034					DRM_MODE_CONTENT_PROTECTION_DESIRED,
   2035					true);
   2036		goto out;
   2037	}
   2038
   2039	ret = hdcp->shim->check_2_2_link(dig_port, connector);
   2040	if (ret == HDCP_LINK_PROTECTED) {
   2041		if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
   2042			intel_hdcp_update_value(connector,
   2043					DRM_MODE_CONTENT_PROTECTION_ENABLED,
   2044					true);
   2045		}
   2046		goto out;
   2047	}
   2048
   2049	if (ret == HDCP_TOPOLOGY_CHANGE) {
   2050		if (hdcp->value == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
   2051			goto out;
   2052
   2053		drm_dbg_kms(&dev_priv->drm,
   2054			    "HDCP2.2 Downstream topology change\n");
   2055		ret = hdcp2_authenticate_repeater_topology(connector);
   2056		if (!ret) {
   2057			intel_hdcp_update_value(connector,
   2058					DRM_MODE_CONTENT_PROTECTION_ENABLED,
   2059					true);
   2060			goto out;
   2061		}
   2062		drm_dbg_kms(&dev_priv->drm,
   2063			    "[%s:%d] Repeater topology auth failed.(%d)\n",
   2064			    connector->base.name, connector->base.base.id,
   2065			    ret);
   2066	} else {
   2067		drm_dbg_kms(&dev_priv->drm,
   2068			    "[%s:%d] HDCP2.2 link failed, retrying auth\n",
   2069			    connector->base.name, connector->base.base.id);
   2070	}
   2071
   2072	ret = _intel_hdcp2_disable(connector, true);
   2073	if (ret) {
   2074		drm_err(&dev_priv->drm,
   2075			"[%s:%d] Failed to disable hdcp2.2 (%d)\n",
   2076			connector->base.name, connector->base.base.id, ret);
   2077		intel_hdcp_update_value(connector,
   2078				DRM_MODE_CONTENT_PROTECTION_DESIRED, true);
   2079		goto out;
   2080	}
   2081
   2082	ret = _intel_hdcp2_enable(connector);
   2083	if (ret) {
   2084		drm_dbg_kms(&dev_priv->drm,
   2085			    "[%s:%d] Failed to enable hdcp2.2 (%d)\n",
   2086			    connector->base.name, connector->base.base.id,
   2087			    ret);
   2088		intel_hdcp_update_value(connector,
   2089					DRM_MODE_CONTENT_PROTECTION_DESIRED,
   2090					true);
   2091		goto out;
   2092	}
   2093
   2094out:
   2095	mutex_unlock(&dig_port->hdcp_mutex);
   2096	mutex_unlock(&hdcp->mutex);
   2097	return ret;
   2098}
   2099
   2100static void intel_hdcp_check_work(struct work_struct *work)
   2101{
   2102	struct intel_hdcp *hdcp = container_of(to_delayed_work(work),
   2103					       struct intel_hdcp,
   2104					       check_work);
   2105	struct intel_connector *connector = intel_hdcp_to_connector(hdcp);
   2106
   2107	if (drm_connector_is_unregistered(&connector->base))
   2108		return;
   2109
   2110	if (!intel_hdcp2_check_link(connector))
   2111		schedule_delayed_work(&hdcp->check_work,
   2112				      DRM_HDCP2_CHECK_PERIOD_MS);
   2113	else if (!intel_hdcp_check_link(connector))
   2114		schedule_delayed_work(&hdcp->check_work,
   2115				      DRM_HDCP_CHECK_PERIOD_MS);
   2116}
   2117
   2118static int i915_hdcp_component_bind(struct device *i915_kdev,
   2119				    struct device *mei_kdev, void *data)
   2120{
   2121	struct drm_i915_private *dev_priv = kdev_to_i915(i915_kdev);
   2122
   2123	drm_dbg(&dev_priv->drm, "I915 HDCP comp bind\n");
   2124	mutex_lock(&dev_priv->hdcp_comp_mutex);
   2125	dev_priv->hdcp_master = (struct i915_hdcp_comp_master *)data;
   2126	dev_priv->hdcp_master->mei_dev = mei_kdev;
   2127	mutex_unlock(&dev_priv->hdcp_comp_mutex);
   2128
   2129	return 0;
   2130}
   2131
   2132static void i915_hdcp_component_unbind(struct device *i915_kdev,
   2133				       struct device *mei_kdev, void *data)
   2134{
   2135	struct drm_i915_private *dev_priv = kdev_to_i915(i915_kdev);
   2136
   2137	drm_dbg(&dev_priv->drm, "I915 HDCP comp unbind\n");
   2138	mutex_lock(&dev_priv->hdcp_comp_mutex);
   2139	dev_priv->hdcp_master = NULL;
   2140	mutex_unlock(&dev_priv->hdcp_comp_mutex);
   2141}
   2142
   2143static const struct component_ops i915_hdcp_component_ops = {
   2144	.bind   = i915_hdcp_component_bind,
   2145	.unbind = i915_hdcp_component_unbind,
   2146};
   2147
   2148static enum mei_fw_ddi intel_get_mei_fw_ddi_index(enum port port)
   2149{
   2150	switch (port) {
   2151	case PORT_A:
   2152		return MEI_DDI_A;
   2153	case PORT_B ... PORT_F:
   2154		return (enum mei_fw_ddi)port;
   2155	default:
   2156		return MEI_DDI_INVALID_PORT;
   2157	}
   2158}
   2159
   2160static enum mei_fw_tc intel_get_mei_fw_tc(enum transcoder cpu_transcoder)
   2161{
   2162	switch (cpu_transcoder) {
   2163	case TRANSCODER_A ... TRANSCODER_D:
   2164		return (enum mei_fw_tc)(cpu_transcoder | 0x10);
   2165	default: /* eDP, DSI TRANSCODERS are non HDCP capable */
   2166		return MEI_INVALID_TRANSCODER;
   2167	}
   2168}
   2169
   2170static int initialize_hdcp_port_data(struct intel_connector *connector,
   2171				     struct intel_digital_port *dig_port,
   2172				     const struct intel_hdcp_shim *shim)
   2173{
   2174	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
   2175	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
   2176	struct intel_hdcp *hdcp = &connector->hdcp;
   2177	enum port port = dig_port->base.port;
   2178
   2179	if (DISPLAY_VER(dev_priv) < 12)
   2180		data->fw_ddi = intel_get_mei_fw_ddi_index(port);
   2181	else
   2182		/*
   2183		 * As per ME FW API expectation, for GEN 12+, fw_ddi is filled
   2184		 * with zero(INVALID PORT index).
   2185		 */
   2186		data->fw_ddi = MEI_DDI_INVALID_PORT;
   2187
   2188	/*
   2189	 * As associated transcoder is set and modified at modeset, here fw_tc
   2190	 * is initialized to zero (invalid transcoder index). This will be
   2191	 * retained for <Gen12 forever.
   2192	 */
   2193	data->fw_tc = MEI_INVALID_TRANSCODER;
   2194
   2195	data->port_type = (u8)HDCP_PORT_TYPE_INTEGRATED;
   2196	data->protocol = (u8)shim->protocol;
   2197
   2198	if (!data->streams)
   2199		data->streams = kcalloc(INTEL_NUM_PIPES(dev_priv),
   2200					sizeof(struct hdcp2_streamid_type),
   2201					GFP_KERNEL);
   2202	if (!data->streams) {
   2203		drm_err(&dev_priv->drm, "Out of Memory\n");
   2204		return -ENOMEM;
   2205	}
   2206	/* For SST */
   2207	data->streams[0].stream_id = 0;
   2208	data->streams[0].stream_type = hdcp->content_type;
   2209
   2210	return 0;
   2211}
   2212
   2213static bool is_hdcp2_supported(struct drm_i915_private *dev_priv)
   2214{
   2215	if (!IS_ENABLED(CONFIG_INTEL_MEI_HDCP))
   2216		return false;
   2217
   2218	return (DISPLAY_VER(dev_priv) >= 10 ||
   2219		IS_KABYLAKE(dev_priv) ||
   2220		IS_COFFEELAKE(dev_priv) ||
   2221		IS_COMETLAKE(dev_priv));
   2222}
   2223
   2224void intel_hdcp_component_init(struct drm_i915_private *dev_priv)
   2225{
   2226	int ret;
   2227
   2228	if (!is_hdcp2_supported(dev_priv))
   2229		return;
   2230
   2231	mutex_lock(&dev_priv->hdcp_comp_mutex);
   2232	drm_WARN_ON(&dev_priv->drm, dev_priv->hdcp_comp_added);
   2233
   2234	dev_priv->hdcp_comp_added = true;
   2235	mutex_unlock(&dev_priv->hdcp_comp_mutex);
   2236	ret = component_add_typed(dev_priv->drm.dev, &i915_hdcp_component_ops,
   2237				  I915_COMPONENT_HDCP);
   2238	if (ret < 0) {
   2239		drm_dbg_kms(&dev_priv->drm, "Failed at component add(%d)\n",
   2240			    ret);
   2241		mutex_lock(&dev_priv->hdcp_comp_mutex);
   2242		dev_priv->hdcp_comp_added = false;
   2243		mutex_unlock(&dev_priv->hdcp_comp_mutex);
   2244		return;
   2245	}
   2246}
   2247
   2248static void intel_hdcp2_init(struct intel_connector *connector,
   2249			     struct intel_digital_port *dig_port,
   2250			     const struct intel_hdcp_shim *shim)
   2251{
   2252	struct drm_i915_private *i915 = to_i915(connector->base.dev);
   2253	struct intel_hdcp *hdcp = &connector->hdcp;
   2254	int ret;
   2255
   2256	ret = initialize_hdcp_port_data(connector, dig_port, shim);
   2257	if (ret) {
   2258		drm_dbg_kms(&i915->drm, "Mei hdcp data init failed\n");
   2259		return;
   2260	}
   2261
   2262	hdcp->hdcp2_supported = true;
   2263}
   2264
   2265int intel_hdcp_init(struct intel_connector *connector,
   2266		    struct intel_digital_port *dig_port,
   2267		    const struct intel_hdcp_shim *shim)
   2268{
   2269	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
   2270	struct intel_hdcp *hdcp = &connector->hdcp;
   2271	int ret;
   2272
   2273	if (!shim)
   2274		return -EINVAL;
   2275
   2276	if (is_hdcp2_supported(dev_priv))
   2277		intel_hdcp2_init(connector, dig_port, shim);
   2278
   2279	ret =
   2280	drm_connector_attach_content_protection_property(&connector->base,
   2281							 hdcp->hdcp2_supported);
   2282	if (ret) {
   2283		hdcp->hdcp2_supported = false;
   2284		kfree(dig_port->hdcp_port_data.streams);
   2285		return ret;
   2286	}
   2287
   2288	hdcp->shim = shim;
   2289	mutex_init(&hdcp->mutex);
   2290	INIT_DELAYED_WORK(&hdcp->check_work, intel_hdcp_check_work);
   2291	INIT_WORK(&hdcp->prop_work, intel_hdcp_prop_work);
   2292	init_waitqueue_head(&hdcp->cp_irq_queue);
   2293
   2294	return 0;
   2295}
   2296
   2297int intel_hdcp_enable(struct intel_connector *connector,
   2298		      const struct intel_crtc_state *pipe_config, u8 content_type)
   2299{
   2300	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
   2301	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
   2302	struct intel_hdcp *hdcp = &connector->hdcp;
   2303	unsigned long check_link_interval = DRM_HDCP_CHECK_PERIOD_MS;
   2304	int ret = -EINVAL;
   2305
   2306	if (!hdcp->shim)
   2307		return -ENOENT;
   2308
   2309	if (!connector->encoder) {
   2310		drm_err(&dev_priv->drm, "[%s:%d] encoder is not initialized\n",
   2311			connector->base.name, connector->base.base.id);
   2312		return -ENODEV;
   2313	}
   2314
   2315	mutex_lock(&hdcp->mutex);
   2316	mutex_lock(&dig_port->hdcp_mutex);
   2317	drm_WARN_ON(&dev_priv->drm,
   2318		    hdcp->value == DRM_MODE_CONTENT_PROTECTION_ENABLED);
   2319	hdcp->content_type = content_type;
   2320
   2321	if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DP_MST)) {
   2322		hdcp->cpu_transcoder = pipe_config->mst_master_transcoder;
   2323		hdcp->stream_transcoder = pipe_config->cpu_transcoder;
   2324	} else {
   2325		hdcp->cpu_transcoder = pipe_config->cpu_transcoder;
   2326		hdcp->stream_transcoder = INVALID_TRANSCODER;
   2327	}
   2328
   2329	if (DISPLAY_VER(dev_priv) >= 12)
   2330		dig_port->hdcp_port_data.fw_tc = intel_get_mei_fw_tc(hdcp->cpu_transcoder);
   2331
   2332	/*
   2333	 * Considering that HDCP2.2 is more secure than HDCP1.4, If the setup
   2334	 * is capable of HDCP2.2, it is preferred to use HDCP2.2.
   2335	 */
   2336	if (intel_hdcp2_capable(connector)) {
   2337		ret = _intel_hdcp2_enable(connector);
   2338		if (!ret)
   2339			check_link_interval = DRM_HDCP2_CHECK_PERIOD_MS;
   2340	}
   2341
   2342	/*
   2343	 * When HDCP2.2 fails and Content Type is not Type1, HDCP1.4 will
   2344	 * be attempted.
   2345	 */
   2346	if (ret && intel_hdcp_capable(connector) &&
   2347	    hdcp->content_type != DRM_MODE_HDCP_CONTENT_TYPE1) {
   2348		ret = _intel_hdcp_enable(connector);
   2349	}
   2350
   2351	if (!ret) {
   2352		schedule_delayed_work(&hdcp->check_work, check_link_interval);
   2353		intel_hdcp_update_value(connector,
   2354					DRM_MODE_CONTENT_PROTECTION_ENABLED,
   2355					true);
   2356	}
   2357
   2358	mutex_unlock(&dig_port->hdcp_mutex);
   2359	mutex_unlock(&hdcp->mutex);
   2360	return ret;
   2361}
   2362
   2363int intel_hdcp_disable(struct intel_connector *connector)
   2364{
   2365	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
   2366	struct intel_hdcp *hdcp = &connector->hdcp;
   2367	int ret = 0;
   2368
   2369	if (!hdcp->shim)
   2370		return -ENOENT;
   2371
   2372	mutex_lock(&hdcp->mutex);
   2373	mutex_lock(&dig_port->hdcp_mutex);
   2374
   2375	if (hdcp->value == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
   2376		goto out;
   2377
   2378	intel_hdcp_update_value(connector,
   2379				DRM_MODE_CONTENT_PROTECTION_UNDESIRED, false);
   2380	if (hdcp->hdcp2_encrypted)
   2381		ret = _intel_hdcp2_disable(connector, false);
   2382	else if (hdcp->hdcp_encrypted)
   2383		ret = _intel_hdcp_disable(connector);
   2384
   2385out:
   2386	mutex_unlock(&dig_port->hdcp_mutex);
   2387	mutex_unlock(&hdcp->mutex);
   2388	cancel_delayed_work_sync(&hdcp->check_work);
   2389	return ret;
   2390}
   2391
   2392void intel_hdcp_update_pipe(struct intel_atomic_state *state,
   2393			    struct intel_encoder *encoder,
   2394			    const struct intel_crtc_state *crtc_state,
   2395			    const struct drm_connector_state *conn_state)
   2396{
   2397	struct intel_connector *connector =
   2398				to_intel_connector(conn_state->connector);
   2399	struct intel_hdcp *hdcp = &connector->hdcp;
   2400	bool content_protection_type_changed, desired_and_not_enabled = false;
   2401
   2402	if (!connector->hdcp.shim)
   2403		return;
   2404
   2405	content_protection_type_changed =
   2406		(conn_state->hdcp_content_type != hdcp->content_type &&
   2407		 conn_state->content_protection !=
   2408		 DRM_MODE_CONTENT_PROTECTION_UNDESIRED);
   2409
   2410	/*
   2411	 * During the HDCP encryption session if Type change is requested,
   2412	 * disable the HDCP and reenable it with new TYPE value.
   2413	 */
   2414	if (conn_state->content_protection ==
   2415	    DRM_MODE_CONTENT_PROTECTION_UNDESIRED ||
   2416	    content_protection_type_changed)
   2417		intel_hdcp_disable(connector);
   2418
   2419	/*
   2420	 * Mark the hdcp state as DESIRED after the hdcp disable of type
   2421	 * change procedure.
   2422	 */
   2423	if (content_protection_type_changed) {
   2424		mutex_lock(&hdcp->mutex);
   2425		hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
   2426		drm_connector_get(&connector->base);
   2427		schedule_work(&hdcp->prop_work);
   2428		mutex_unlock(&hdcp->mutex);
   2429	}
   2430
   2431	if (conn_state->content_protection ==
   2432	    DRM_MODE_CONTENT_PROTECTION_DESIRED) {
   2433		mutex_lock(&hdcp->mutex);
   2434		/* Avoid enabling hdcp, if it already ENABLED */
   2435		desired_and_not_enabled =
   2436			hdcp->value != DRM_MODE_CONTENT_PROTECTION_ENABLED;
   2437		mutex_unlock(&hdcp->mutex);
   2438		/*
   2439		 * If HDCP already ENABLED and CP property is DESIRED, schedule
   2440		 * prop_work to update correct CP property to user space.
   2441		 */
   2442		if (!desired_and_not_enabled && !content_protection_type_changed) {
   2443			drm_connector_get(&connector->base);
   2444			schedule_work(&hdcp->prop_work);
   2445		}
   2446	}
   2447
   2448	if (desired_and_not_enabled || content_protection_type_changed)
   2449		intel_hdcp_enable(connector,
   2450				  crtc_state,
   2451				  (u8)conn_state->hdcp_content_type);
   2452}
   2453
   2454void intel_hdcp_component_fini(struct drm_i915_private *dev_priv)
   2455{
   2456	mutex_lock(&dev_priv->hdcp_comp_mutex);
   2457	if (!dev_priv->hdcp_comp_added) {
   2458		mutex_unlock(&dev_priv->hdcp_comp_mutex);
   2459		return;
   2460	}
   2461
   2462	dev_priv->hdcp_comp_added = false;
   2463	mutex_unlock(&dev_priv->hdcp_comp_mutex);
   2464
   2465	component_del(dev_priv->drm.dev, &i915_hdcp_component_ops);
   2466}
   2467
   2468void intel_hdcp_cleanup(struct intel_connector *connector)
   2469{
   2470	struct intel_hdcp *hdcp = &connector->hdcp;
   2471
   2472	if (!hdcp->shim)
   2473		return;
   2474
   2475	/*
   2476	 * If the connector is registered, it's possible userspace could kick
   2477	 * off another HDCP enable, which would re-spawn the workers.
   2478	 */
   2479	drm_WARN_ON(connector->base.dev,
   2480		connector->base.registration_state == DRM_CONNECTOR_REGISTERED);
   2481
   2482	/*
   2483	 * Now that the connector is not registered, check_work won't be run,
   2484	 * but cancel any outstanding instances of it
   2485	 */
   2486	cancel_delayed_work_sync(&hdcp->check_work);
   2487
   2488	/*
   2489	 * We don't cancel prop_work in the same way as check_work since it
   2490	 * requires connection_mutex which could be held while calling this
   2491	 * function. Instead, we rely on the connector references grabbed before
   2492	 * scheduling prop_work to ensure the connector is alive when prop_work
   2493	 * is run. So if we're in the destroy path (which is where this
   2494	 * function should be called), we're "guaranteed" that prop_work is not
   2495	 * active (tl;dr This Should Never Happen).
   2496	 */
   2497	drm_WARN_ON(connector->base.dev, work_pending(&hdcp->prop_work));
   2498
   2499	mutex_lock(&hdcp->mutex);
   2500	hdcp->shim = NULL;
   2501	mutex_unlock(&hdcp->mutex);
   2502}
   2503
   2504void intel_hdcp_atomic_check(struct drm_connector *connector,
   2505			     struct drm_connector_state *old_state,
   2506			     struct drm_connector_state *new_state)
   2507{
   2508	u64 old_cp = old_state->content_protection;
   2509	u64 new_cp = new_state->content_protection;
   2510	struct drm_crtc_state *crtc_state;
   2511
   2512	if (!new_state->crtc) {
   2513		/*
   2514		 * If the connector is being disabled with CP enabled, mark it
   2515		 * desired so it's re-enabled when the connector is brought back
   2516		 */
   2517		if (old_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED)
   2518			new_state->content_protection =
   2519				DRM_MODE_CONTENT_PROTECTION_DESIRED;
   2520		return;
   2521	}
   2522
   2523	crtc_state = drm_atomic_get_new_crtc_state(new_state->state,
   2524						   new_state->crtc);
   2525	/*
   2526	 * Fix the HDCP uapi content protection state in case of modeset.
   2527	 * FIXME: As per HDCP content protection property uapi doc, an uevent()
   2528	 * need to be sent if there is transition from ENABLED->DESIRED.
   2529	 */
   2530	if (drm_atomic_crtc_needs_modeset(crtc_state) &&
   2531	    (old_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
   2532	    new_cp != DRM_MODE_CONTENT_PROTECTION_UNDESIRED))
   2533		new_state->content_protection =
   2534			DRM_MODE_CONTENT_PROTECTION_DESIRED;
   2535
   2536	/*
   2537	 * Nothing to do if the state didn't change, or HDCP was activated since
   2538	 * the last commit. And also no change in hdcp content type.
   2539	 */
   2540	if (old_cp == new_cp ||
   2541	    (old_cp == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
   2542	     new_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED)) {
   2543		if (old_state->hdcp_content_type ==
   2544				new_state->hdcp_content_type)
   2545			return;
   2546	}
   2547
   2548	crtc_state->mode_changed = true;
   2549}
   2550
   2551/* Handles the CP_IRQ raised from the DP HDCP sink */
   2552void intel_hdcp_handle_cp_irq(struct intel_connector *connector)
   2553{
   2554	struct intel_hdcp *hdcp = &connector->hdcp;
   2555
   2556	if (!hdcp->shim)
   2557		return;
   2558
   2559	atomic_inc(&connector->hdcp.cp_irq_count);
   2560	wake_up_all(&connector->hdcp.cp_irq_queue);
   2561
   2562	schedule_delayed_work(&hdcp->check_work, 0);
   2563}