cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

pci.c (11075B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/* Copyright(c) 2021 Intel Corporation. All rights reserved. */
      3#include <linux/io-64-nonatomic-lo-hi.h>
      4#include <linux/device.h>
      5#include <linux/delay.h>
      6#include <linux/pci.h>
      7#include <cxlpci.h>
      8#include <cxlmem.h>
      9#include <cxl.h>
     10#include "core.h"
     11
     12/**
     13 * DOC: cxl core pci
     14 *
     15 * Compute Express Link protocols are layered on top of PCIe. CXL core provides
     16 * a set of helpers for CXL interactions which occur via PCIe.
     17 */
     18
     19static unsigned short media_ready_timeout = 60;
     20module_param(media_ready_timeout, ushort, 0644);
     21MODULE_PARM_DESC(media_ready_timeout, "seconds to wait for media ready");
     22
     23struct cxl_walk_context {
     24	struct pci_bus *bus;
     25	struct cxl_port *port;
     26	int type;
     27	int error;
     28	int count;
     29};
     30
     31static int match_add_dports(struct pci_dev *pdev, void *data)
     32{
     33	struct cxl_walk_context *ctx = data;
     34	struct cxl_port *port = ctx->port;
     35	int type = pci_pcie_type(pdev);
     36	struct cxl_register_map map;
     37	struct cxl_dport *dport;
     38	u32 lnkcap, port_num;
     39	int rc;
     40
     41	if (pdev->bus != ctx->bus)
     42		return 0;
     43	if (!pci_is_pcie(pdev))
     44		return 0;
     45	if (type != ctx->type)
     46		return 0;
     47	if (pci_read_config_dword(pdev, pci_pcie_cap(pdev) + PCI_EXP_LNKCAP,
     48				  &lnkcap))
     49		return 0;
     50
     51	rc = cxl_find_regblock(pdev, CXL_REGLOC_RBI_COMPONENT, &map);
     52	if (rc)
     53		dev_dbg(&port->dev, "failed to find component registers\n");
     54
     55	port_num = FIELD_GET(PCI_EXP_LNKCAP_PN, lnkcap);
     56	dport = devm_cxl_add_dport(port, &pdev->dev, port_num,
     57				   cxl_regmap_to_base(pdev, &map));
     58	if (IS_ERR(dport)) {
     59		ctx->error = PTR_ERR(dport);
     60		return PTR_ERR(dport);
     61	}
     62	ctx->count++;
     63
     64	dev_dbg(&port->dev, "add dport%d: %s\n", port_num, dev_name(&pdev->dev));
     65
     66	return 0;
     67}
     68
     69/**
     70 * devm_cxl_port_enumerate_dports - enumerate downstream ports of the upstream port
     71 * @port: cxl_port whose ->uport is the upstream of dports to be enumerated
     72 *
     73 * Returns a positive number of dports enumerated or a negative error
     74 * code.
     75 */
     76int devm_cxl_port_enumerate_dports(struct cxl_port *port)
     77{
     78	struct pci_bus *bus = cxl_port_to_pci_bus(port);
     79	struct cxl_walk_context ctx;
     80	int type;
     81
     82	if (!bus)
     83		return -ENXIO;
     84
     85	if (pci_is_root_bus(bus))
     86		type = PCI_EXP_TYPE_ROOT_PORT;
     87	else
     88		type = PCI_EXP_TYPE_DOWNSTREAM;
     89
     90	ctx = (struct cxl_walk_context) {
     91		.port = port,
     92		.bus = bus,
     93		.type = type,
     94	};
     95	pci_walk_bus(bus, match_add_dports, &ctx);
     96
     97	if (ctx.count == 0)
     98		return -ENODEV;
     99	if (ctx.error)
    100		return ctx.error;
    101	return ctx.count;
    102}
    103EXPORT_SYMBOL_NS_GPL(devm_cxl_port_enumerate_dports, CXL);
    104
    105/*
    106 * Wait up to @media_ready_timeout for the device to report memory
    107 * active.
    108 */
    109int cxl_await_media_ready(struct cxl_dev_state *cxlds)
    110{
    111	struct pci_dev *pdev = to_pci_dev(cxlds->dev);
    112	int d = cxlds->cxl_dvsec;
    113	bool active = false;
    114	u64 md_status;
    115	int rc, i;
    116
    117	for (i = media_ready_timeout; i; i--) {
    118		u32 temp;
    119
    120		rc = pci_read_config_dword(
    121			pdev, d + CXL_DVSEC_RANGE_SIZE_LOW(0), &temp);
    122		if (rc)
    123			return rc;
    124
    125		active = FIELD_GET(CXL_DVSEC_MEM_ACTIVE, temp);
    126		if (active)
    127			break;
    128		msleep(1000);
    129	}
    130
    131	if (!active) {
    132		dev_err(&pdev->dev,
    133			"timeout awaiting memory active after %d seconds\n",
    134			media_ready_timeout);
    135		return -ETIMEDOUT;
    136	}
    137
    138	md_status = readq(cxlds->regs.memdev + CXLMDEV_STATUS_OFFSET);
    139	if (!CXLMDEV_READY(md_status))
    140		return -EIO;
    141
    142	return 0;
    143}
    144EXPORT_SYMBOL_NS_GPL(cxl_await_media_ready, CXL);
    145
    146static int wait_for_valid(struct cxl_dev_state *cxlds)
    147{
    148	struct pci_dev *pdev = to_pci_dev(cxlds->dev);
    149	int d = cxlds->cxl_dvsec, rc;
    150	u32 val;
    151
    152	/*
    153	 * Memory_Info_Valid: When set, indicates that the CXL Range 1 Size high
    154	 * and Size Low registers are valid. Must be set within 1 second of
    155	 * deassertion of reset to CXL device. Likely it is already set by the
    156	 * time this runs, but otherwise give a 1.5 second timeout in case of
    157	 * clock skew.
    158	 */
    159	rc = pci_read_config_dword(pdev, d + CXL_DVSEC_RANGE_SIZE_LOW(0), &val);
    160	if (rc)
    161		return rc;
    162
    163	if (val & CXL_DVSEC_MEM_INFO_VALID)
    164		return 0;
    165
    166	msleep(1500);
    167
    168	rc = pci_read_config_dword(pdev, d + CXL_DVSEC_RANGE_SIZE_LOW(0), &val);
    169	if (rc)
    170		return rc;
    171
    172	if (val & CXL_DVSEC_MEM_INFO_VALID)
    173		return 0;
    174
    175	return -ETIMEDOUT;
    176}
    177
    178static int cxl_set_mem_enable(struct cxl_dev_state *cxlds, u16 val)
    179{
    180	struct pci_dev *pdev = to_pci_dev(cxlds->dev);
    181	int d = cxlds->cxl_dvsec;
    182	u16 ctrl;
    183	int rc;
    184
    185	rc = pci_read_config_word(pdev, d + CXL_DVSEC_CTRL_OFFSET, &ctrl);
    186	if (rc < 0)
    187		return rc;
    188
    189	if ((ctrl & CXL_DVSEC_MEM_ENABLE) == val)
    190		return 1;
    191	ctrl &= ~CXL_DVSEC_MEM_ENABLE;
    192	ctrl |= val;
    193
    194	rc = pci_write_config_word(pdev, d + CXL_DVSEC_CTRL_OFFSET, ctrl);
    195	if (rc < 0)
    196		return rc;
    197
    198	return 0;
    199}
    200
    201static void clear_mem_enable(void *cxlds)
    202{
    203	cxl_set_mem_enable(cxlds, 0);
    204}
    205
    206static int devm_cxl_enable_mem(struct device *host, struct cxl_dev_state *cxlds)
    207{
    208	int rc;
    209
    210	rc = cxl_set_mem_enable(cxlds, CXL_DVSEC_MEM_ENABLE);
    211	if (rc < 0)
    212		return rc;
    213	if (rc > 0)
    214		return 0;
    215	return devm_add_action_or_reset(host, clear_mem_enable, cxlds);
    216}
    217
    218static bool range_contains(struct range *r1, struct range *r2)
    219{
    220	return r1->start <= r2->start && r1->end >= r2->end;
    221}
    222
    223/* require dvsec ranges to be covered by a locked platform window */
    224static int dvsec_range_allowed(struct device *dev, void *arg)
    225{
    226	struct range *dev_range = arg;
    227	struct cxl_decoder *cxld;
    228	struct range root_range;
    229
    230	if (!is_root_decoder(dev))
    231		return 0;
    232
    233	cxld = to_cxl_decoder(dev);
    234
    235	if (!(cxld->flags & CXL_DECODER_F_LOCK))
    236		return 0;
    237	if (!(cxld->flags & CXL_DECODER_F_RAM))
    238		return 0;
    239
    240	root_range = (struct range) {
    241		.start = cxld->platform_res.start,
    242		.end = cxld->platform_res.end,
    243	};
    244
    245	return range_contains(&root_range, dev_range);
    246}
    247
    248static void disable_hdm(void *_cxlhdm)
    249{
    250	u32 global_ctrl;
    251	struct cxl_hdm *cxlhdm = _cxlhdm;
    252	void __iomem *hdm = cxlhdm->regs.hdm_decoder;
    253
    254	global_ctrl = readl(hdm + CXL_HDM_DECODER_CTRL_OFFSET);
    255	writel(global_ctrl & ~CXL_HDM_DECODER_ENABLE,
    256	       hdm + CXL_HDM_DECODER_CTRL_OFFSET);
    257}
    258
    259static int devm_cxl_enable_hdm(struct device *host, struct cxl_hdm *cxlhdm)
    260{
    261	void __iomem *hdm = cxlhdm->regs.hdm_decoder;
    262	u32 global_ctrl;
    263
    264	global_ctrl = readl(hdm + CXL_HDM_DECODER_CTRL_OFFSET);
    265	writel(global_ctrl | CXL_HDM_DECODER_ENABLE,
    266	       hdm + CXL_HDM_DECODER_CTRL_OFFSET);
    267
    268	return devm_add_action_or_reset(host, disable_hdm, cxlhdm);
    269}
    270
    271static bool __cxl_hdm_decode_init(struct cxl_dev_state *cxlds,
    272				  struct cxl_hdm *cxlhdm,
    273				  struct cxl_endpoint_dvsec_info *info)
    274{
    275	void __iomem *hdm = cxlhdm->regs.hdm_decoder;
    276	struct cxl_port *port = cxlhdm->port;
    277	struct device *dev = cxlds->dev;
    278	struct cxl_port *root;
    279	int i, rc, allowed;
    280	u32 global_ctrl;
    281
    282	global_ctrl = readl(hdm + CXL_HDM_DECODER_CTRL_OFFSET);
    283
    284	/*
    285	 * If the HDM Decoder Capability is already enabled then assume
    286	 * that some other agent like platform firmware set it up.
    287	 */
    288	if (global_ctrl & CXL_HDM_DECODER_ENABLE) {
    289		rc = devm_cxl_enable_mem(&port->dev, cxlds);
    290		if (rc)
    291			return false;
    292		return true;
    293	}
    294
    295	root = to_cxl_port(port->dev.parent);
    296	while (!is_cxl_root(root) && is_cxl_port(root->dev.parent))
    297		root = to_cxl_port(root->dev.parent);
    298	if (!is_cxl_root(root)) {
    299		dev_err(dev, "Failed to acquire root port for HDM enable\n");
    300		return false;
    301	}
    302
    303	for (i = 0, allowed = 0; info->mem_enabled && i < info->ranges; i++) {
    304		struct device *cxld_dev;
    305
    306		cxld_dev = device_find_child(&root->dev, &info->dvsec_range[i],
    307					     dvsec_range_allowed);
    308		if (!cxld_dev) {
    309			dev_dbg(dev, "DVSEC Range%d denied by platform\n", i);
    310			continue;
    311		}
    312		dev_dbg(dev, "DVSEC Range%d allowed by platform\n", i);
    313		put_device(cxld_dev);
    314		allowed++;
    315	}
    316
    317	if (!allowed) {
    318		cxl_set_mem_enable(cxlds, 0);
    319		info->mem_enabled = 0;
    320	}
    321
    322	/*
    323	 * Per CXL 2.0 Section 8.1.3.8.3 and 8.1.3.8.4 DVSEC CXL Range 1 Base
    324	 * [High,Low] when HDM operation is enabled the range register values
    325	 * are ignored by the device, but the spec also recommends matching the
    326	 * DVSEC Range 1,2 to HDM Decoder Range 0,1. So, non-zero info->ranges
    327	 * are expected even though Linux does not require or maintain that
    328	 * match. If at least one DVSEC range is enabled and allowed, skip HDM
    329	 * Decoder Capability Enable.
    330	 */
    331	if (info->mem_enabled)
    332		return false;
    333
    334	rc = devm_cxl_enable_hdm(&port->dev, cxlhdm);
    335	if (rc)
    336		return false;
    337
    338	rc = devm_cxl_enable_mem(&port->dev, cxlds);
    339	if (rc)
    340		return false;
    341
    342	return true;
    343}
    344
    345/**
    346 * cxl_hdm_decode_init() - Setup HDM decoding for the endpoint
    347 * @cxlds: Device state
    348 * @cxlhdm: Mapped HDM decoder Capability
    349 *
    350 * Try to enable the endpoint's HDM Decoder Capability
    351 */
    352int cxl_hdm_decode_init(struct cxl_dev_state *cxlds, struct cxl_hdm *cxlhdm)
    353{
    354	struct pci_dev *pdev = to_pci_dev(cxlds->dev);
    355	struct cxl_endpoint_dvsec_info info = { 0 };
    356	int hdm_count, rc, i, ranges = 0;
    357	struct device *dev = &pdev->dev;
    358	int d = cxlds->cxl_dvsec;
    359	u16 cap, ctrl;
    360
    361	if (!d) {
    362		dev_dbg(dev, "No DVSEC Capability\n");
    363		return -ENXIO;
    364	}
    365
    366	rc = pci_read_config_word(pdev, d + CXL_DVSEC_CAP_OFFSET, &cap);
    367	if (rc)
    368		return rc;
    369
    370	rc = pci_read_config_word(pdev, d + CXL_DVSEC_CTRL_OFFSET, &ctrl);
    371	if (rc)
    372		return rc;
    373
    374	if (!(cap & CXL_DVSEC_MEM_CAPABLE)) {
    375		dev_dbg(dev, "Not MEM Capable\n");
    376		return -ENXIO;
    377	}
    378
    379	/*
    380	 * It is not allowed by spec for MEM.capable to be set and have 0 legacy
    381	 * HDM decoders (values > 2 are also undefined as of CXL 2.0). As this
    382	 * driver is for a spec defined class code which must be CXL.mem
    383	 * capable, there is no point in continuing to enable CXL.mem.
    384	 */
    385	hdm_count = FIELD_GET(CXL_DVSEC_HDM_COUNT_MASK, cap);
    386	if (!hdm_count || hdm_count > 2)
    387		return -EINVAL;
    388
    389	rc = wait_for_valid(cxlds);
    390	if (rc) {
    391		dev_dbg(dev, "Failure awaiting MEM_INFO_VALID (%d)\n", rc);
    392		return rc;
    393	}
    394
    395	/*
    396	 * The current DVSEC values are moot if the memory capability is
    397	 * disabled, and they will remain moot after the HDM Decoder
    398	 * capability is enabled.
    399	 */
    400	info.mem_enabled = FIELD_GET(CXL_DVSEC_MEM_ENABLE, ctrl);
    401	if (!info.mem_enabled)
    402		goto hdm_init;
    403
    404	for (i = 0; i < hdm_count; i++) {
    405		u64 base, size;
    406		u32 temp;
    407
    408		rc = pci_read_config_dword(
    409			pdev, d + CXL_DVSEC_RANGE_SIZE_HIGH(i), &temp);
    410		if (rc)
    411			return rc;
    412
    413		size = (u64)temp << 32;
    414
    415		rc = pci_read_config_dword(
    416			pdev, d + CXL_DVSEC_RANGE_SIZE_LOW(i), &temp);
    417		if (rc)
    418			return rc;
    419
    420		size |= temp & CXL_DVSEC_MEM_SIZE_LOW_MASK;
    421
    422		rc = pci_read_config_dword(
    423			pdev, d + CXL_DVSEC_RANGE_BASE_HIGH(i), &temp);
    424		if (rc)
    425			return rc;
    426
    427		base = (u64)temp << 32;
    428
    429		rc = pci_read_config_dword(
    430			pdev, d + CXL_DVSEC_RANGE_BASE_LOW(i), &temp);
    431		if (rc)
    432			return rc;
    433
    434		base |= temp & CXL_DVSEC_MEM_BASE_LOW_MASK;
    435
    436		info.dvsec_range[i] = (struct range) {
    437			.start = base,
    438			.end = base + size - 1
    439		};
    440
    441		if (size)
    442			ranges++;
    443	}
    444
    445	info.ranges = ranges;
    446
    447	/*
    448	 * If DVSEC ranges are being used instead of HDM decoder registers there
    449	 * is no use in trying to manage those.
    450	 */
    451hdm_init:
    452	if (!__cxl_hdm_decode_init(cxlds, cxlhdm, &info)) {
    453		dev_err(dev,
    454			"Legacy range registers configuration prevents HDM operation.\n");
    455		return -EBUSY;
    456	}
    457
    458	return 0;
    459}
    460EXPORT_SYMBOL_NS_GPL(cxl_hdm_decode_init, CXL);