cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

dw-edma-pcie.c (10835B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * Copyright (c) 2018-2019 Synopsys, Inc. and/or its affiliates.
      4 * Synopsys DesignWare eDMA PCIe driver
      5 *
      6 * Author: Gustavo Pimentel <gustavo.pimentel@synopsys.com>
      7 */
      8
      9#include <linux/kernel.h>
     10#include <linux/module.h>
     11#include <linux/pci.h>
     12#include <linux/device.h>
     13#include <linux/dma/edma.h>
     14#include <linux/pci-epf.h>
     15#include <linux/msi.h>
     16#include <linux/bitfield.h>
     17
     18#include "dw-edma-core.h"
     19
     20#define DW_PCIE_VSEC_DMA_ID			0x6
     21#define DW_PCIE_VSEC_DMA_BAR			GENMASK(10, 8)
     22#define DW_PCIE_VSEC_DMA_MAP			GENMASK(2, 0)
     23#define DW_PCIE_VSEC_DMA_WR_CH			GENMASK(9, 0)
     24#define DW_PCIE_VSEC_DMA_RD_CH			GENMASK(25, 16)
     25
     26#define DW_BLOCK(a, b, c) \
     27	{ \
     28		.bar = a, \
     29		.off = b, \
     30		.sz = c, \
     31	},
     32
     33struct dw_edma_block {
     34	enum pci_barno			bar;
     35	off_t				off;
     36	size_t				sz;
     37};
     38
     39struct dw_edma_pcie_data {
     40	/* eDMA registers location */
     41	struct dw_edma_block		rg;
     42	/* eDMA memory linked list location */
     43	struct dw_edma_block		ll_wr[EDMA_MAX_WR_CH];
     44	struct dw_edma_block		ll_rd[EDMA_MAX_RD_CH];
     45	/* eDMA memory data location */
     46	struct dw_edma_block		dt_wr[EDMA_MAX_WR_CH];
     47	struct dw_edma_block		dt_rd[EDMA_MAX_RD_CH];
     48	/* Other */
     49	enum dw_edma_map_format		mf;
     50	u8				irqs;
     51	u16				wr_ch_cnt;
     52	u16				rd_ch_cnt;
     53};
     54
     55static const struct dw_edma_pcie_data snps_edda_data = {
     56	/* eDMA registers location */
     57	.rg.bar				= BAR_0,
     58	.rg.off				= 0x00001000,	/*  4 Kbytes */
     59	.rg.sz				= 0x00002000,	/*  8 Kbytes */
     60	/* eDMA memory linked list location */
     61	.ll_wr = {
     62		/* Channel 0 - BAR 2, offset 0 Mbytes, size 2 Kbytes */
     63		DW_BLOCK(BAR_2, 0x00000000, 0x00000800)
     64		/* Channel 1 - BAR 2, offset 2 Mbytes, size 2 Kbytes */
     65		DW_BLOCK(BAR_2, 0x00200000, 0x00000800)
     66	},
     67	.ll_rd = {
     68		/* Channel 0 - BAR 2, offset 4 Mbytes, size 2 Kbytes */
     69		DW_BLOCK(BAR_2, 0x00400000, 0x00000800)
     70		/* Channel 1 - BAR 2, offset 6 Mbytes, size 2 Kbytes */
     71		DW_BLOCK(BAR_2, 0x00600000, 0x00000800)
     72	},
     73	/* eDMA memory data location */
     74	.dt_wr = {
     75		/* Channel 0 - BAR 2, offset 8 Mbytes, size 2 Kbytes */
     76		DW_BLOCK(BAR_2, 0x00800000, 0x00000800)
     77		/* Channel 1 - BAR 2, offset 9 Mbytes, size 2 Kbytes */
     78		DW_BLOCK(BAR_2, 0x00900000, 0x00000800)
     79	},
     80	.dt_rd = {
     81		/* Channel 0 - BAR 2, offset 10 Mbytes, size 2 Kbytes */
     82		DW_BLOCK(BAR_2, 0x00a00000, 0x00000800)
     83		/* Channel 1 - BAR 2, offset 11 Mbytes, size 2 Kbytes */
     84		DW_BLOCK(BAR_2, 0x00b00000, 0x00000800)
     85	},
     86	/* Other */
     87	.mf				= EDMA_MF_EDMA_UNROLL,
     88	.irqs				= 1,
     89	.wr_ch_cnt			= 2,
     90	.rd_ch_cnt			= 2,
     91};
     92
     93static int dw_edma_pcie_irq_vector(struct device *dev, unsigned int nr)
     94{
     95	return pci_irq_vector(to_pci_dev(dev), nr);
     96}
     97
     98static const struct dw_edma_core_ops dw_edma_pcie_core_ops = {
     99	.irq_vector = dw_edma_pcie_irq_vector,
    100};
    101
    102static void dw_edma_pcie_get_vsec_dma_data(struct pci_dev *pdev,
    103					   struct dw_edma_pcie_data *pdata)
    104{
    105	u32 val, map;
    106	u16 vsec;
    107	u64 off;
    108
    109	vsec = pci_find_vsec_capability(pdev, PCI_VENDOR_ID_SYNOPSYS,
    110					DW_PCIE_VSEC_DMA_ID);
    111	if (!vsec)
    112		return;
    113
    114	pci_read_config_dword(pdev, vsec + PCI_VNDR_HEADER, &val);
    115	if (PCI_VNDR_HEADER_REV(val) != 0x00 ||
    116	    PCI_VNDR_HEADER_LEN(val) != 0x18)
    117		return;
    118
    119	pci_dbg(pdev, "Detected PCIe Vendor-Specific Extended Capability DMA\n");
    120	pci_read_config_dword(pdev, vsec + 0x8, &val);
    121	map = FIELD_GET(DW_PCIE_VSEC_DMA_MAP, val);
    122	if (map != EDMA_MF_EDMA_LEGACY &&
    123	    map != EDMA_MF_EDMA_UNROLL &&
    124	    map != EDMA_MF_HDMA_COMPAT)
    125		return;
    126
    127	pdata->mf = map;
    128	pdata->rg.bar = FIELD_GET(DW_PCIE_VSEC_DMA_BAR, val);
    129
    130	pci_read_config_dword(pdev, vsec + 0xc, &val);
    131	pdata->wr_ch_cnt = min_t(u16, pdata->wr_ch_cnt,
    132				 FIELD_GET(DW_PCIE_VSEC_DMA_WR_CH, val));
    133	pdata->rd_ch_cnt = min_t(u16, pdata->rd_ch_cnt,
    134				 FIELD_GET(DW_PCIE_VSEC_DMA_RD_CH, val));
    135
    136	pci_read_config_dword(pdev, vsec + 0x14, &val);
    137	off = val;
    138	pci_read_config_dword(pdev, vsec + 0x10, &val);
    139	off <<= 32;
    140	off |= val;
    141	pdata->rg.off = off;
    142}
    143
    144static int dw_edma_pcie_probe(struct pci_dev *pdev,
    145			      const struct pci_device_id *pid)
    146{
    147	struct dw_edma_pcie_data *pdata = (void *)pid->driver_data;
    148	struct dw_edma_pcie_data vsec_data;
    149	struct device *dev = &pdev->dev;
    150	struct dw_edma_chip *chip;
    151	struct dw_edma *dw;
    152	int err, nr_irqs;
    153	int i, mask;
    154
    155	/* Enable PCI device */
    156	err = pcim_enable_device(pdev);
    157	if (err) {
    158		pci_err(pdev, "enabling device failed\n");
    159		return err;
    160	}
    161
    162	memcpy(&vsec_data, pdata, sizeof(struct dw_edma_pcie_data));
    163
    164	/*
    165	 * Tries to find if exists a PCIe Vendor-Specific Extended Capability
    166	 * for the DMA, if one exists, then reconfigures it.
    167	 */
    168	dw_edma_pcie_get_vsec_dma_data(pdev, &vsec_data);
    169
    170	/* Mapping PCI BAR regions */
    171	mask = BIT(vsec_data.rg.bar);
    172	for (i = 0; i < vsec_data.wr_ch_cnt; i++) {
    173		mask |= BIT(vsec_data.ll_wr[i].bar);
    174		mask |= BIT(vsec_data.dt_wr[i].bar);
    175	}
    176	for (i = 0; i < vsec_data.rd_ch_cnt; i++) {
    177		mask |= BIT(vsec_data.ll_rd[i].bar);
    178		mask |= BIT(vsec_data.dt_rd[i].bar);
    179	}
    180	err = pcim_iomap_regions(pdev, mask, pci_name(pdev));
    181	if (err) {
    182		pci_err(pdev, "eDMA BAR I/O remapping failed\n");
    183		return err;
    184	}
    185
    186	pci_set_master(pdev);
    187
    188	/* DMA configuration */
    189	err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
    190	if (err) {
    191		pci_err(pdev, "DMA mask 64 set failed\n");
    192		return err;
    193	}
    194
    195	/* Data structure allocation */
    196	chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
    197	if (!chip)
    198		return -ENOMEM;
    199
    200	dw = devm_kzalloc(dev, sizeof(*dw), GFP_KERNEL);
    201	if (!dw)
    202		return -ENOMEM;
    203
    204	/* IRQs allocation */
    205	nr_irqs = pci_alloc_irq_vectors(pdev, 1, vsec_data.irqs,
    206					PCI_IRQ_MSI | PCI_IRQ_MSIX);
    207	if (nr_irqs < 1) {
    208		pci_err(pdev, "fail to alloc IRQ vector (number of IRQs=%u)\n",
    209			nr_irqs);
    210		return -EPERM;
    211	}
    212
    213	/* Data structure initialization */
    214	chip->dw = dw;
    215	chip->dev = dev;
    216	chip->id = pdev->devfn;
    217	chip->irq = pdev->irq;
    218
    219	dw->mf = vsec_data.mf;
    220	dw->nr_irqs = nr_irqs;
    221	dw->ops = &dw_edma_pcie_core_ops;
    222	dw->wr_ch_cnt = vsec_data.wr_ch_cnt;
    223	dw->rd_ch_cnt = vsec_data.rd_ch_cnt;
    224
    225	dw->rg_region.vaddr = pcim_iomap_table(pdev)[vsec_data.rg.bar];
    226	if (!dw->rg_region.vaddr)
    227		return -ENOMEM;
    228
    229	dw->rg_region.vaddr += vsec_data.rg.off;
    230	dw->rg_region.paddr = pdev->resource[vsec_data.rg.bar].start;
    231	dw->rg_region.paddr += vsec_data.rg.off;
    232	dw->rg_region.sz = vsec_data.rg.sz;
    233
    234	for (i = 0; i < dw->wr_ch_cnt; i++) {
    235		struct dw_edma_region *ll_region = &dw->ll_region_wr[i];
    236		struct dw_edma_region *dt_region = &dw->dt_region_wr[i];
    237		struct dw_edma_block *ll_block = &vsec_data.ll_wr[i];
    238		struct dw_edma_block *dt_block = &vsec_data.dt_wr[i];
    239
    240		ll_region->vaddr = pcim_iomap_table(pdev)[ll_block->bar];
    241		if (!ll_region->vaddr)
    242			return -ENOMEM;
    243
    244		ll_region->vaddr += ll_block->off;
    245		ll_region->paddr = pdev->resource[ll_block->bar].start;
    246		ll_region->paddr += ll_block->off;
    247		ll_region->sz = ll_block->sz;
    248
    249		dt_region->vaddr = pcim_iomap_table(pdev)[dt_block->bar];
    250		if (!dt_region->vaddr)
    251			return -ENOMEM;
    252
    253		dt_region->vaddr += dt_block->off;
    254		dt_region->paddr = pdev->resource[dt_block->bar].start;
    255		dt_region->paddr += dt_block->off;
    256		dt_region->sz = dt_block->sz;
    257	}
    258
    259	for (i = 0; i < dw->rd_ch_cnt; i++) {
    260		struct dw_edma_region *ll_region = &dw->ll_region_rd[i];
    261		struct dw_edma_region *dt_region = &dw->dt_region_rd[i];
    262		struct dw_edma_block *ll_block = &vsec_data.ll_rd[i];
    263		struct dw_edma_block *dt_block = &vsec_data.dt_rd[i];
    264
    265		ll_region->vaddr = pcim_iomap_table(pdev)[ll_block->bar];
    266		if (!ll_region->vaddr)
    267			return -ENOMEM;
    268
    269		ll_region->vaddr += ll_block->off;
    270		ll_region->paddr = pdev->resource[ll_block->bar].start;
    271		ll_region->paddr += ll_block->off;
    272		ll_region->sz = ll_block->sz;
    273
    274		dt_region->vaddr = pcim_iomap_table(pdev)[dt_block->bar];
    275		if (!dt_region->vaddr)
    276			return -ENOMEM;
    277
    278		dt_region->vaddr += dt_block->off;
    279		dt_region->paddr = pdev->resource[dt_block->bar].start;
    280		dt_region->paddr += dt_block->off;
    281		dt_region->sz = dt_block->sz;
    282	}
    283
    284	/* Debug info */
    285	if (dw->mf == EDMA_MF_EDMA_LEGACY)
    286		pci_dbg(pdev, "Version:\teDMA Port Logic (0x%x)\n", dw->mf);
    287	else if (dw->mf == EDMA_MF_EDMA_UNROLL)
    288		pci_dbg(pdev, "Version:\teDMA Unroll (0x%x)\n", dw->mf);
    289	else if (dw->mf == EDMA_MF_HDMA_COMPAT)
    290		pci_dbg(pdev, "Version:\tHDMA Compatible (0x%x)\n", dw->mf);
    291	else
    292		pci_dbg(pdev, "Version:\tUnknown (0x%x)\n", dw->mf);
    293
    294	pci_dbg(pdev, "Registers:\tBAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n",
    295		vsec_data.rg.bar, vsec_data.rg.off, vsec_data.rg.sz,
    296		dw->rg_region.vaddr, &dw->rg_region.paddr);
    297
    298
    299	for (i = 0; i < dw->wr_ch_cnt; i++) {
    300		pci_dbg(pdev, "L. List:\tWRITE CH%.2u, BAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n",
    301			i, vsec_data.ll_wr[i].bar,
    302			vsec_data.ll_wr[i].off, dw->ll_region_wr[i].sz,
    303			dw->ll_region_wr[i].vaddr, &dw->ll_region_wr[i].paddr);
    304
    305		pci_dbg(pdev, "Data:\tWRITE CH%.2u, BAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n",
    306			i, vsec_data.dt_wr[i].bar,
    307			vsec_data.dt_wr[i].off, dw->dt_region_wr[i].sz,
    308			dw->dt_region_wr[i].vaddr, &dw->dt_region_wr[i].paddr);
    309	}
    310
    311	for (i = 0; i < dw->rd_ch_cnt; i++) {
    312		pci_dbg(pdev, "L. List:\tREAD CH%.2u, BAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n",
    313			i, vsec_data.ll_rd[i].bar,
    314			vsec_data.ll_rd[i].off, dw->ll_region_rd[i].sz,
    315			dw->ll_region_rd[i].vaddr, &dw->ll_region_rd[i].paddr);
    316
    317		pci_dbg(pdev, "Data:\tREAD CH%.2u, BAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n",
    318			i, vsec_data.dt_rd[i].bar,
    319			vsec_data.dt_rd[i].off, dw->dt_region_rd[i].sz,
    320			dw->dt_region_rd[i].vaddr, &dw->dt_region_rd[i].paddr);
    321	}
    322
    323	pci_dbg(pdev, "Nr. IRQs:\t%u\n", dw->nr_irqs);
    324
    325	/* Validating if PCI interrupts were enabled */
    326	if (!pci_dev_msi_enabled(pdev)) {
    327		pci_err(pdev, "enable interrupt failed\n");
    328		return -EPERM;
    329	}
    330
    331	dw->irq = devm_kcalloc(dev, nr_irqs, sizeof(*dw->irq), GFP_KERNEL);
    332	if (!dw->irq)
    333		return -ENOMEM;
    334
    335	/* Starting eDMA driver */
    336	err = dw_edma_probe(chip);
    337	if (err) {
    338		pci_err(pdev, "eDMA probe failed\n");
    339		return err;
    340	}
    341
    342	/* Saving data structure reference */
    343	pci_set_drvdata(pdev, chip);
    344
    345	return 0;
    346}
    347
    348static void dw_edma_pcie_remove(struct pci_dev *pdev)
    349{
    350	struct dw_edma_chip *chip = pci_get_drvdata(pdev);
    351	int err;
    352
    353	/* Stopping eDMA driver */
    354	err = dw_edma_remove(chip);
    355	if (err)
    356		pci_warn(pdev, "can't remove device properly: %d\n", err);
    357
    358	/* Freeing IRQs */
    359	pci_free_irq_vectors(pdev);
    360}
    361
    362static const struct pci_device_id dw_edma_pcie_id_table[] = {
    363	{ PCI_DEVICE_DATA(SYNOPSYS, EDDA, &snps_edda_data) },
    364	{ }
    365};
    366MODULE_DEVICE_TABLE(pci, dw_edma_pcie_id_table);
    367
    368static struct pci_driver dw_edma_pcie_driver = {
    369	.name		= "dw-edma-pcie",
    370	.id_table	= dw_edma_pcie_id_table,
    371	.probe		= dw_edma_pcie_probe,
    372	.remove		= dw_edma_pcie_remove,
    373};
    374
    375module_pci_driver(dw_edma_pcie_driver);
    376
    377MODULE_LICENSE("GPL v2");
    378MODULE_DESCRIPTION("Synopsys DesignWare eDMA PCIe driver");
    379MODULE_AUTHOR("Gustavo Pimentel <gustavo.pimentel@synopsys.com>");