cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

dfl-afu-main.c (23801B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * Driver for FPGA Accelerated Function Unit (AFU)
      4 *
      5 * Copyright (C) 2017-2018 Intel Corporation, Inc.
      6 *
      7 * Authors:
      8 *   Wu Hao <hao.wu@intel.com>
      9 *   Xiao Guangrong <guangrong.xiao@linux.intel.com>
     10 *   Joseph Grecco <joe.grecco@intel.com>
     11 *   Enno Luebbers <enno.luebbers@intel.com>
     12 *   Tim Whisonant <tim.whisonant@intel.com>
     13 *   Ananda Ravuri <ananda.ravuri@intel.com>
     14 *   Henry Mitchel <henry.mitchel@intel.com>
     15 */
     16
     17#include <linux/kernel.h>
     18#include <linux/module.h>
     19#include <linux/uaccess.h>
     20#include <linux/fpga-dfl.h>
     21
     22#include "dfl-afu.h"
     23
     24#define RST_POLL_INVL 10 /* us */
     25#define RST_POLL_TIMEOUT 1000 /* us */
     26
     27/**
     28 * __afu_port_enable - enable a port by clear reset
     29 * @pdev: port platform device.
     30 *
     31 * Enable Port by clear the port soft reset bit, which is set by default.
     32 * The AFU is unable to respond to any MMIO access while in reset.
     33 * __afu_port_enable function should only be used after __afu_port_disable
     34 * function.
     35 *
     36 * The caller needs to hold lock for protection.
     37 */
     38int __afu_port_enable(struct platform_device *pdev)
     39{
     40	struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
     41	void __iomem *base;
     42	u64 v;
     43
     44	WARN_ON(!pdata->disable_count);
     45
     46	if (--pdata->disable_count != 0)
     47		return 0;
     48
     49	base = dfl_get_feature_ioaddr_by_id(&pdev->dev, PORT_FEATURE_ID_HEADER);
     50
     51	/* Clear port soft reset */
     52	v = readq(base + PORT_HDR_CTRL);
     53	v &= ~PORT_CTRL_SFTRST;
     54	writeq(v, base + PORT_HDR_CTRL);
     55
     56	/*
     57	 * HW clears the ack bit to indicate that the port is fully out
     58	 * of reset.
     59	 */
     60	if (readq_poll_timeout(base + PORT_HDR_CTRL, v,
     61			       !(v & PORT_CTRL_SFTRST_ACK),
     62			       RST_POLL_INVL, RST_POLL_TIMEOUT)) {
     63		dev_err(&pdev->dev, "timeout, failure to enable device\n");
     64		return -ETIMEDOUT;
     65	}
     66
     67	return 0;
     68}
     69
     70/**
     71 * __afu_port_disable - disable a port by hold reset
     72 * @pdev: port platform device.
     73 *
     74 * Disable Port by setting the port soft reset bit, it puts the port into reset.
     75 *
     76 * The caller needs to hold lock for protection.
     77 */
     78int __afu_port_disable(struct platform_device *pdev)
     79{
     80	struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
     81	void __iomem *base;
     82	u64 v;
     83
     84	if (pdata->disable_count++ != 0)
     85		return 0;
     86
     87	base = dfl_get_feature_ioaddr_by_id(&pdev->dev, PORT_FEATURE_ID_HEADER);
     88
     89	/* Set port soft reset */
     90	v = readq(base + PORT_HDR_CTRL);
     91	v |= PORT_CTRL_SFTRST;
     92	writeq(v, base + PORT_HDR_CTRL);
     93
     94	/*
     95	 * HW sets ack bit to 1 when all outstanding requests have been drained
     96	 * on this port and minimum soft reset pulse width has elapsed.
     97	 * Driver polls port_soft_reset_ack to determine if reset done by HW.
     98	 */
     99	if (readq_poll_timeout(base + PORT_HDR_CTRL, v,
    100			       v & PORT_CTRL_SFTRST_ACK,
    101			       RST_POLL_INVL, RST_POLL_TIMEOUT)) {
    102		dev_err(&pdev->dev, "timeout, failure to disable device\n");
    103		return -ETIMEDOUT;
    104	}
    105
    106	return 0;
    107}
    108
    109/*
    110 * This function resets the FPGA Port and its accelerator (AFU) by function
    111 * __port_disable and __port_enable (set port soft reset bit and then clear
    112 * it). Userspace can do Port reset at any time, e.g. during DMA or Partial
    113 * Reconfiguration. But it should never cause any system level issue, only
    114 * functional failure (e.g. DMA or PR operation failure) and be recoverable
    115 * from the failure.
    116 *
    117 * Note: the accelerator (AFU) is not accessible when its port is in reset
    118 * (disabled). Any attempts on MMIO access to AFU while in reset, will
    119 * result errors reported via port error reporting sub feature (if present).
    120 */
    121static int __port_reset(struct platform_device *pdev)
    122{
    123	int ret;
    124
    125	ret = __afu_port_disable(pdev);
    126	if (ret)
    127		return ret;
    128
    129	return __afu_port_enable(pdev);
    130}
    131
    132static int port_reset(struct platform_device *pdev)
    133{
    134	struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
    135	int ret;
    136
    137	mutex_lock(&pdata->lock);
    138	ret = __port_reset(pdev);
    139	mutex_unlock(&pdata->lock);
    140
    141	return ret;
    142}
    143
    144static int port_get_id(struct platform_device *pdev)
    145{
    146	void __iomem *base;
    147
    148	base = dfl_get_feature_ioaddr_by_id(&pdev->dev, PORT_FEATURE_ID_HEADER);
    149
    150	return FIELD_GET(PORT_CAP_PORT_NUM, readq(base + PORT_HDR_CAP));
    151}
    152
    153static ssize_t
    154id_show(struct device *dev, struct device_attribute *attr, char *buf)
    155{
    156	int id = port_get_id(to_platform_device(dev));
    157
    158	return scnprintf(buf, PAGE_SIZE, "%d\n", id);
    159}
    160static DEVICE_ATTR_RO(id);
    161
    162static ssize_t
    163ltr_show(struct device *dev, struct device_attribute *attr, char *buf)
    164{
    165	struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
    166	void __iomem *base;
    167	u64 v;
    168
    169	base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
    170
    171	mutex_lock(&pdata->lock);
    172	v = readq(base + PORT_HDR_CTRL);
    173	mutex_unlock(&pdata->lock);
    174
    175	return sprintf(buf, "%x\n", (u8)FIELD_GET(PORT_CTRL_LATENCY, v));
    176}
    177
    178static ssize_t
    179ltr_store(struct device *dev, struct device_attribute *attr,
    180	  const char *buf, size_t count)
    181{
    182	struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
    183	void __iomem *base;
    184	bool ltr;
    185	u64 v;
    186
    187	if (kstrtobool(buf, &ltr))
    188		return -EINVAL;
    189
    190	base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
    191
    192	mutex_lock(&pdata->lock);
    193	v = readq(base + PORT_HDR_CTRL);
    194	v &= ~PORT_CTRL_LATENCY;
    195	v |= FIELD_PREP(PORT_CTRL_LATENCY, ltr ? 1 : 0);
    196	writeq(v, base + PORT_HDR_CTRL);
    197	mutex_unlock(&pdata->lock);
    198
    199	return count;
    200}
    201static DEVICE_ATTR_RW(ltr);
    202
    203static ssize_t
    204ap1_event_show(struct device *dev, struct device_attribute *attr, char *buf)
    205{
    206	struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
    207	void __iomem *base;
    208	u64 v;
    209
    210	base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
    211
    212	mutex_lock(&pdata->lock);
    213	v = readq(base + PORT_HDR_STS);
    214	mutex_unlock(&pdata->lock);
    215
    216	return sprintf(buf, "%x\n", (u8)FIELD_GET(PORT_STS_AP1_EVT, v));
    217}
    218
    219static ssize_t
    220ap1_event_store(struct device *dev, struct device_attribute *attr,
    221		const char *buf, size_t count)
    222{
    223	struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
    224	void __iomem *base;
    225	bool clear;
    226
    227	if (kstrtobool(buf, &clear) || !clear)
    228		return -EINVAL;
    229
    230	base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
    231
    232	mutex_lock(&pdata->lock);
    233	writeq(PORT_STS_AP1_EVT, base + PORT_HDR_STS);
    234	mutex_unlock(&pdata->lock);
    235
    236	return count;
    237}
    238static DEVICE_ATTR_RW(ap1_event);
    239
    240static ssize_t
    241ap2_event_show(struct device *dev, struct device_attribute *attr,
    242	       char *buf)
    243{
    244	struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
    245	void __iomem *base;
    246	u64 v;
    247
    248	base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
    249
    250	mutex_lock(&pdata->lock);
    251	v = readq(base + PORT_HDR_STS);
    252	mutex_unlock(&pdata->lock);
    253
    254	return sprintf(buf, "%x\n", (u8)FIELD_GET(PORT_STS_AP2_EVT, v));
    255}
    256
    257static ssize_t
    258ap2_event_store(struct device *dev, struct device_attribute *attr,
    259		const char *buf, size_t count)
    260{
    261	struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
    262	void __iomem *base;
    263	bool clear;
    264
    265	if (kstrtobool(buf, &clear) || !clear)
    266		return -EINVAL;
    267
    268	base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
    269
    270	mutex_lock(&pdata->lock);
    271	writeq(PORT_STS_AP2_EVT, base + PORT_HDR_STS);
    272	mutex_unlock(&pdata->lock);
    273
    274	return count;
    275}
    276static DEVICE_ATTR_RW(ap2_event);
    277
    278static ssize_t
    279power_state_show(struct device *dev, struct device_attribute *attr, char *buf)
    280{
    281	struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
    282	void __iomem *base;
    283	u64 v;
    284
    285	base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
    286
    287	mutex_lock(&pdata->lock);
    288	v = readq(base + PORT_HDR_STS);
    289	mutex_unlock(&pdata->lock);
    290
    291	return sprintf(buf, "0x%x\n", (u8)FIELD_GET(PORT_STS_PWR_STATE, v));
    292}
    293static DEVICE_ATTR_RO(power_state);
    294
    295static ssize_t
    296userclk_freqcmd_store(struct device *dev, struct device_attribute *attr,
    297		      const char *buf, size_t count)
    298{
    299	struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
    300	u64 userclk_freq_cmd;
    301	void __iomem *base;
    302
    303	if (kstrtou64(buf, 0, &userclk_freq_cmd))
    304		return -EINVAL;
    305
    306	base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
    307
    308	mutex_lock(&pdata->lock);
    309	writeq(userclk_freq_cmd, base + PORT_HDR_USRCLK_CMD0);
    310	mutex_unlock(&pdata->lock);
    311
    312	return count;
    313}
    314static DEVICE_ATTR_WO(userclk_freqcmd);
    315
    316static ssize_t
    317userclk_freqcntrcmd_store(struct device *dev, struct device_attribute *attr,
    318			  const char *buf, size_t count)
    319{
    320	struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
    321	u64 userclk_freqcntr_cmd;
    322	void __iomem *base;
    323
    324	if (kstrtou64(buf, 0, &userclk_freqcntr_cmd))
    325		return -EINVAL;
    326
    327	base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
    328
    329	mutex_lock(&pdata->lock);
    330	writeq(userclk_freqcntr_cmd, base + PORT_HDR_USRCLK_CMD1);
    331	mutex_unlock(&pdata->lock);
    332
    333	return count;
    334}
    335static DEVICE_ATTR_WO(userclk_freqcntrcmd);
    336
    337static ssize_t
    338userclk_freqsts_show(struct device *dev, struct device_attribute *attr,
    339		     char *buf)
    340{
    341	struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
    342	u64 userclk_freqsts;
    343	void __iomem *base;
    344
    345	base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
    346
    347	mutex_lock(&pdata->lock);
    348	userclk_freqsts = readq(base + PORT_HDR_USRCLK_STS0);
    349	mutex_unlock(&pdata->lock);
    350
    351	return sprintf(buf, "0x%llx\n", (unsigned long long)userclk_freqsts);
    352}
    353static DEVICE_ATTR_RO(userclk_freqsts);
    354
    355static ssize_t
    356userclk_freqcntrsts_show(struct device *dev, struct device_attribute *attr,
    357			 char *buf)
    358{
    359	struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
    360	u64 userclk_freqcntrsts;
    361	void __iomem *base;
    362
    363	base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
    364
    365	mutex_lock(&pdata->lock);
    366	userclk_freqcntrsts = readq(base + PORT_HDR_USRCLK_STS1);
    367	mutex_unlock(&pdata->lock);
    368
    369	return sprintf(buf, "0x%llx\n",
    370		       (unsigned long long)userclk_freqcntrsts);
    371}
    372static DEVICE_ATTR_RO(userclk_freqcntrsts);
    373
    374static struct attribute *port_hdr_attrs[] = {
    375	&dev_attr_id.attr,
    376	&dev_attr_ltr.attr,
    377	&dev_attr_ap1_event.attr,
    378	&dev_attr_ap2_event.attr,
    379	&dev_attr_power_state.attr,
    380	&dev_attr_userclk_freqcmd.attr,
    381	&dev_attr_userclk_freqcntrcmd.attr,
    382	&dev_attr_userclk_freqsts.attr,
    383	&dev_attr_userclk_freqcntrsts.attr,
    384	NULL,
    385};
    386
    387static umode_t port_hdr_attrs_visible(struct kobject *kobj,
    388				      struct attribute *attr, int n)
    389{
    390	struct device *dev = kobj_to_dev(kobj);
    391	umode_t mode = attr->mode;
    392	void __iomem *base;
    393
    394	base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
    395
    396	if (dfl_feature_revision(base) > 0) {
    397		/*
    398		 * userclk sysfs interfaces are only visible in case port
    399		 * revision is 0, as hardware with revision >0 doesn't
    400		 * support this.
    401		 */
    402		if (attr == &dev_attr_userclk_freqcmd.attr ||
    403		    attr == &dev_attr_userclk_freqcntrcmd.attr ||
    404		    attr == &dev_attr_userclk_freqsts.attr ||
    405		    attr == &dev_attr_userclk_freqcntrsts.attr)
    406			mode = 0;
    407	}
    408
    409	return mode;
    410}
    411
    412static const struct attribute_group port_hdr_group = {
    413	.attrs      = port_hdr_attrs,
    414	.is_visible = port_hdr_attrs_visible,
    415};
    416
    417static int port_hdr_init(struct platform_device *pdev,
    418			 struct dfl_feature *feature)
    419{
    420	port_reset(pdev);
    421
    422	return 0;
    423}
    424
    425static long
    426port_hdr_ioctl(struct platform_device *pdev, struct dfl_feature *feature,
    427	       unsigned int cmd, unsigned long arg)
    428{
    429	long ret;
    430
    431	switch (cmd) {
    432	case DFL_FPGA_PORT_RESET:
    433		if (!arg)
    434			ret = port_reset(pdev);
    435		else
    436			ret = -EINVAL;
    437		break;
    438	default:
    439		dev_dbg(&pdev->dev, "%x cmd not handled", cmd);
    440		ret = -ENODEV;
    441	}
    442
    443	return ret;
    444}
    445
    446static const struct dfl_feature_id port_hdr_id_table[] = {
    447	{.id = PORT_FEATURE_ID_HEADER,},
    448	{0,}
    449};
    450
    451static const struct dfl_feature_ops port_hdr_ops = {
    452	.init = port_hdr_init,
    453	.ioctl = port_hdr_ioctl,
    454};
    455
    456static ssize_t
    457afu_id_show(struct device *dev, struct device_attribute *attr, char *buf)
    458{
    459	struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
    460	void __iomem *base;
    461	u64 guidl, guidh;
    462
    463	base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_AFU);
    464
    465	mutex_lock(&pdata->lock);
    466	if (pdata->disable_count) {
    467		mutex_unlock(&pdata->lock);
    468		return -EBUSY;
    469	}
    470
    471	guidl = readq(base + GUID_L);
    472	guidh = readq(base + GUID_H);
    473	mutex_unlock(&pdata->lock);
    474
    475	return scnprintf(buf, PAGE_SIZE, "%016llx%016llx\n", guidh, guidl);
    476}
    477static DEVICE_ATTR_RO(afu_id);
    478
    479static struct attribute *port_afu_attrs[] = {
    480	&dev_attr_afu_id.attr,
    481	NULL
    482};
    483
    484static umode_t port_afu_attrs_visible(struct kobject *kobj,
    485				      struct attribute *attr, int n)
    486{
    487	struct device *dev = kobj_to_dev(kobj);
    488
    489	/*
    490	 * sysfs entries are visible only if related private feature is
    491	 * enumerated.
    492	 */
    493	if (!dfl_get_feature_by_id(dev, PORT_FEATURE_ID_AFU))
    494		return 0;
    495
    496	return attr->mode;
    497}
    498
    499static const struct attribute_group port_afu_group = {
    500	.attrs      = port_afu_attrs,
    501	.is_visible = port_afu_attrs_visible,
    502};
    503
    504static int port_afu_init(struct platform_device *pdev,
    505			 struct dfl_feature *feature)
    506{
    507	struct resource *res = &pdev->resource[feature->resource_index];
    508
    509	return afu_mmio_region_add(dev_get_platdata(&pdev->dev),
    510				   DFL_PORT_REGION_INDEX_AFU,
    511				   resource_size(res), res->start,
    512				   DFL_PORT_REGION_MMAP | DFL_PORT_REGION_READ |
    513				   DFL_PORT_REGION_WRITE);
    514}
    515
    516static const struct dfl_feature_id port_afu_id_table[] = {
    517	{.id = PORT_FEATURE_ID_AFU,},
    518	{0,}
    519};
    520
    521static const struct dfl_feature_ops port_afu_ops = {
    522	.init = port_afu_init,
    523};
    524
    525static int port_stp_init(struct platform_device *pdev,
    526			 struct dfl_feature *feature)
    527{
    528	struct resource *res = &pdev->resource[feature->resource_index];
    529
    530	return afu_mmio_region_add(dev_get_platdata(&pdev->dev),
    531				   DFL_PORT_REGION_INDEX_STP,
    532				   resource_size(res), res->start,
    533				   DFL_PORT_REGION_MMAP | DFL_PORT_REGION_READ |
    534				   DFL_PORT_REGION_WRITE);
    535}
    536
    537static const struct dfl_feature_id port_stp_id_table[] = {
    538	{.id = PORT_FEATURE_ID_STP,},
    539	{0,}
    540};
    541
    542static const struct dfl_feature_ops port_stp_ops = {
    543	.init = port_stp_init,
    544};
    545
    546static long
    547port_uint_ioctl(struct platform_device *pdev, struct dfl_feature *feature,
    548		unsigned int cmd, unsigned long arg)
    549{
    550	switch (cmd) {
    551	case DFL_FPGA_PORT_UINT_GET_IRQ_NUM:
    552		return dfl_feature_ioctl_get_num_irqs(pdev, feature, arg);
    553	case DFL_FPGA_PORT_UINT_SET_IRQ:
    554		return dfl_feature_ioctl_set_irq(pdev, feature, arg);
    555	default:
    556		dev_dbg(&pdev->dev, "%x cmd not handled", cmd);
    557		return -ENODEV;
    558	}
    559}
    560
    561static const struct dfl_feature_id port_uint_id_table[] = {
    562	{.id = PORT_FEATURE_ID_UINT,},
    563	{0,}
    564};
    565
    566static const struct dfl_feature_ops port_uint_ops = {
    567	.ioctl = port_uint_ioctl,
    568};
    569
    570static struct dfl_feature_driver port_feature_drvs[] = {
    571	{
    572		.id_table = port_hdr_id_table,
    573		.ops = &port_hdr_ops,
    574	},
    575	{
    576		.id_table = port_afu_id_table,
    577		.ops = &port_afu_ops,
    578	},
    579	{
    580		.id_table = port_err_id_table,
    581		.ops = &port_err_ops,
    582	},
    583	{
    584		.id_table = port_stp_id_table,
    585		.ops = &port_stp_ops,
    586	},
    587	{
    588		.id_table = port_uint_id_table,
    589		.ops = &port_uint_ops,
    590	},
    591	{
    592		.ops = NULL,
    593	}
    594};
    595
    596static int afu_open(struct inode *inode, struct file *filp)
    597{
    598	struct platform_device *fdev = dfl_fpga_inode_to_feature_dev(inode);
    599	struct dfl_feature_platform_data *pdata;
    600	int ret;
    601
    602	pdata = dev_get_platdata(&fdev->dev);
    603	if (WARN_ON(!pdata))
    604		return -ENODEV;
    605
    606	mutex_lock(&pdata->lock);
    607	ret = dfl_feature_dev_use_begin(pdata, filp->f_flags & O_EXCL);
    608	if (!ret) {
    609		dev_dbg(&fdev->dev, "Device File Opened %d Times\n",
    610			dfl_feature_dev_use_count(pdata));
    611		filp->private_data = fdev;
    612	}
    613	mutex_unlock(&pdata->lock);
    614
    615	return ret;
    616}
    617
    618static int afu_release(struct inode *inode, struct file *filp)
    619{
    620	struct platform_device *pdev = filp->private_data;
    621	struct dfl_feature_platform_data *pdata;
    622	struct dfl_feature *feature;
    623
    624	dev_dbg(&pdev->dev, "Device File Release\n");
    625
    626	pdata = dev_get_platdata(&pdev->dev);
    627
    628	mutex_lock(&pdata->lock);
    629	dfl_feature_dev_use_end(pdata);
    630
    631	if (!dfl_feature_dev_use_count(pdata)) {
    632		dfl_fpga_dev_for_each_feature(pdata, feature)
    633			dfl_fpga_set_irq_triggers(feature, 0,
    634						  feature->nr_irqs, NULL);
    635		__port_reset(pdev);
    636		afu_dma_region_destroy(pdata);
    637	}
    638	mutex_unlock(&pdata->lock);
    639
    640	return 0;
    641}
    642
    643static long afu_ioctl_check_extension(struct dfl_feature_platform_data *pdata,
    644				      unsigned long arg)
    645{
    646	/* No extension support for now */
    647	return 0;
    648}
    649
    650static long
    651afu_ioctl_get_info(struct dfl_feature_platform_data *pdata, void __user *arg)
    652{
    653	struct dfl_fpga_port_info info;
    654	struct dfl_afu *afu;
    655	unsigned long minsz;
    656
    657	minsz = offsetofend(struct dfl_fpga_port_info, num_umsgs);
    658
    659	if (copy_from_user(&info, arg, minsz))
    660		return -EFAULT;
    661
    662	if (info.argsz < minsz)
    663		return -EINVAL;
    664
    665	mutex_lock(&pdata->lock);
    666	afu = dfl_fpga_pdata_get_private(pdata);
    667	info.flags = 0;
    668	info.num_regions = afu->num_regions;
    669	info.num_umsgs = afu->num_umsgs;
    670	mutex_unlock(&pdata->lock);
    671
    672	if (copy_to_user(arg, &info, sizeof(info)))
    673		return -EFAULT;
    674
    675	return 0;
    676}
    677
    678static long afu_ioctl_get_region_info(struct dfl_feature_platform_data *pdata,
    679				      void __user *arg)
    680{
    681	struct dfl_fpga_port_region_info rinfo;
    682	struct dfl_afu_mmio_region region;
    683	unsigned long minsz;
    684	long ret;
    685
    686	minsz = offsetofend(struct dfl_fpga_port_region_info, offset);
    687
    688	if (copy_from_user(&rinfo, arg, minsz))
    689		return -EFAULT;
    690
    691	if (rinfo.argsz < minsz || rinfo.padding)
    692		return -EINVAL;
    693
    694	ret = afu_mmio_region_get_by_index(pdata, rinfo.index, &region);
    695	if (ret)
    696		return ret;
    697
    698	rinfo.flags = region.flags;
    699	rinfo.size = region.size;
    700	rinfo.offset = region.offset;
    701
    702	if (copy_to_user(arg, &rinfo, sizeof(rinfo)))
    703		return -EFAULT;
    704
    705	return 0;
    706}
    707
    708static long
    709afu_ioctl_dma_map(struct dfl_feature_platform_data *pdata, void __user *arg)
    710{
    711	struct dfl_fpga_port_dma_map map;
    712	unsigned long minsz;
    713	long ret;
    714
    715	minsz = offsetofend(struct dfl_fpga_port_dma_map, iova);
    716
    717	if (copy_from_user(&map, arg, minsz))
    718		return -EFAULT;
    719
    720	if (map.argsz < minsz || map.flags)
    721		return -EINVAL;
    722
    723	ret = afu_dma_map_region(pdata, map.user_addr, map.length, &map.iova);
    724	if (ret)
    725		return ret;
    726
    727	if (copy_to_user(arg, &map, sizeof(map))) {
    728		afu_dma_unmap_region(pdata, map.iova);
    729		return -EFAULT;
    730	}
    731
    732	dev_dbg(&pdata->dev->dev, "dma map: ua=%llx, len=%llx, iova=%llx\n",
    733		(unsigned long long)map.user_addr,
    734		(unsigned long long)map.length,
    735		(unsigned long long)map.iova);
    736
    737	return 0;
    738}
    739
    740static long
    741afu_ioctl_dma_unmap(struct dfl_feature_platform_data *pdata, void __user *arg)
    742{
    743	struct dfl_fpga_port_dma_unmap unmap;
    744	unsigned long minsz;
    745
    746	minsz = offsetofend(struct dfl_fpga_port_dma_unmap, iova);
    747
    748	if (copy_from_user(&unmap, arg, minsz))
    749		return -EFAULT;
    750
    751	if (unmap.argsz < minsz || unmap.flags)
    752		return -EINVAL;
    753
    754	return afu_dma_unmap_region(pdata, unmap.iova);
    755}
    756
    757static long afu_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
    758{
    759	struct platform_device *pdev = filp->private_data;
    760	struct dfl_feature_platform_data *pdata;
    761	struct dfl_feature *f;
    762	long ret;
    763
    764	dev_dbg(&pdev->dev, "%s cmd 0x%x\n", __func__, cmd);
    765
    766	pdata = dev_get_platdata(&pdev->dev);
    767
    768	switch (cmd) {
    769	case DFL_FPGA_GET_API_VERSION:
    770		return DFL_FPGA_API_VERSION;
    771	case DFL_FPGA_CHECK_EXTENSION:
    772		return afu_ioctl_check_extension(pdata, arg);
    773	case DFL_FPGA_PORT_GET_INFO:
    774		return afu_ioctl_get_info(pdata, (void __user *)arg);
    775	case DFL_FPGA_PORT_GET_REGION_INFO:
    776		return afu_ioctl_get_region_info(pdata, (void __user *)arg);
    777	case DFL_FPGA_PORT_DMA_MAP:
    778		return afu_ioctl_dma_map(pdata, (void __user *)arg);
    779	case DFL_FPGA_PORT_DMA_UNMAP:
    780		return afu_ioctl_dma_unmap(pdata, (void __user *)arg);
    781	default:
    782		/*
    783		 * Let sub-feature's ioctl function to handle the cmd
    784		 * Sub-feature's ioctl returns -ENODEV when cmd is not
    785		 * handled in this sub feature, and returns 0 and other
    786		 * error code if cmd is handled.
    787		 */
    788		dfl_fpga_dev_for_each_feature(pdata, f)
    789			if (f->ops && f->ops->ioctl) {
    790				ret = f->ops->ioctl(pdev, f, cmd, arg);
    791				if (ret != -ENODEV)
    792					return ret;
    793			}
    794	}
    795
    796	return -EINVAL;
    797}
    798
    799static const struct vm_operations_struct afu_vma_ops = {
    800#ifdef CONFIG_HAVE_IOREMAP_PROT
    801	.access = generic_access_phys,
    802#endif
    803};
    804
    805static int afu_mmap(struct file *filp, struct vm_area_struct *vma)
    806{
    807	struct platform_device *pdev = filp->private_data;
    808	struct dfl_feature_platform_data *pdata;
    809	u64 size = vma->vm_end - vma->vm_start;
    810	struct dfl_afu_mmio_region region;
    811	u64 offset;
    812	int ret;
    813
    814	if (!(vma->vm_flags & VM_SHARED))
    815		return -EINVAL;
    816
    817	pdata = dev_get_platdata(&pdev->dev);
    818
    819	offset = vma->vm_pgoff << PAGE_SHIFT;
    820	ret = afu_mmio_region_get_by_offset(pdata, offset, size, &region);
    821	if (ret)
    822		return ret;
    823
    824	if (!(region.flags & DFL_PORT_REGION_MMAP))
    825		return -EINVAL;
    826
    827	if ((vma->vm_flags & VM_READ) && !(region.flags & DFL_PORT_REGION_READ))
    828		return -EPERM;
    829
    830	if ((vma->vm_flags & VM_WRITE) &&
    831	    !(region.flags & DFL_PORT_REGION_WRITE))
    832		return -EPERM;
    833
    834	/* Support debug access to the mapping */
    835	vma->vm_ops = &afu_vma_ops;
    836
    837	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
    838
    839	return remap_pfn_range(vma, vma->vm_start,
    840			(region.phys + (offset - region.offset)) >> PAGE_SHIFT,
    841			size, vma->vm_page_prot);
    842}
    843
    844static const struct file_operations afu_fops = {
    845	.owner = THIS_MODULE,
    846	.open = afu_open,
    847	.release = afu_release,
    848	.unlocked_ioctl = afu_ioctl,
    849	.mmap = afu_mmap,
    850};
    851
    852static int afu_dev_init(struct platform_device *pdev)
    853{
    854	struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
    855	struct dfl_afu *afu;
    856
    857	afu = devm_kzalloc(&pdev->dev, sizeof(*afu), GFP_KERNEL);
    858	if (!afu)
    859		return -ENOMEM;
    860
    861	afu->pdata = pdata;
    862
    863	mutex_lock(&pdata->lock);
    864	dfl_fpga_pdata_set_private(pdata, afu);
    865	afu_mmio_region_init(pdata);
    866	afu_dma_region_init(pdata);
    867	mutex_unlock(&pdata->lock);
    868
    869	return 0;
    870}
    871
    872static int afu_dev_destroy(struct platform_device *pdev)
    873{
    874	struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
    875
    876	mutex_lock(&pdata->lock);
    877	afu_mmio_region_destroy(pdata);
    878	afu_dma_region_destroy(pdata);
    879	dfl_fpga_pdata_set_private(pdata, NULL);
    880	mutex_unlock(&pdata->lock);
    881
    882	return 0;
    883}
    884
    885static int port_enable_set(struct platform_device *pdev, bool enable)
    886{
    887	struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
    888	int ret;
    889
    890	mutex_lock(&pdata->lock);
    891	if (enable)
    892		ret = __afu_port_enable(pdev);
    893	else
    894		ret = __afu_port_disable(pdev);
    895	mutex_unlock(&pdata->lock);
    896
    897	return ret;
    898}
    899
    900static struct dfl_fpga_port_ops afu_port_ops = {
    901	.name = DFL_FPGA_FEATURE_DEV_PORT,
    902	.owner = THIS_MODULE,
    903	.get_id = port_get_id,
    904	.enable_set = port_enable_set,
    905};
    906
    907static int afu_probe(struct platform_device *pdev)
    908{
    909	int ret;
    910
    911	dev_dbg(&pdev->dev, "%s\n", __func__);
    912
    913	ret = afu_dev_init(pdev);
    914	if (ret)
    915		goto exit;
    916
    917	ret = dfl_fpga_dev_feature_init(pdev, port_feature_drvs);
    918	if (ret)
    919		goto dev_destroy;
    920
    921	ret = dfl_fpga_dev_ops_register(pdev, &afu_fops, THIS_MODULE);
    922	if (ret) {
    923		dfl_fpga_dev_feature_uinit(pdev);
    924		goto dev_destroy;
    925	}
    926
    927	return 0;
    928
    929dev_destroy:
    930	afu_dev_destroy(pdev);
    931exit:
    932	return ret;
    933}
    934
    935static int afu_remove(struct platform_device *pdev)
    936{
    937	dev_dbg(&pdev->dev, "%s\n", __func__);
    938
    939	dfl_fpga_dev_ops_unregister(pdev);
    940	dfl_fpga_dev_feature_uinit(pdev);
    941	afu_dev_destroy(pdev);
    942
    943	return 0;
    944}
    945
    946static const struct attribute_group *afu_dev_groups[] = {
    947	&port_hdr_group,
    948	&port_afu_group,
    949	&port_err_group,
    950	NULL
    951};
    952
    953static struct platform_driver afu_driver = {
    954	.driver	= {
    955		.name	    = DFL_FPGA_FEATURE_DEV_PORT,
    956		.dev_groups = afu_dev_groups,
    957	},
    958	.probe   = afu_probe,
    959	.remove  = afu_remove,
    960};
    961
    962static int __init afu_init(void)
    963{
    964	int ret;
    965
    966	dfl_fpga_port_ops_add(&afu_port_ops);
    967
    968	ret = platform_driver_register(&afu_driver);
    969	if (ret)
    970		dfl_fpga_port_ops_del(&afu_port_ops);
    971
    972	return ret;
    973}
    974
    975static void __exit afu_exit(void)
    976{
    977	platform_driver_unregister(&afu_driver);
    978
    979	dfl_fpga_port_ops_del(&afu_port_ops);
    980}
    981
    982module_init(afu_init);
    983module_exit(afu_exit);
    984
    985MODULE_DESCRIPTION("FPGA Accelerated Function Unit driver");
    986MODULE_AUTHOR("Intel Corporation");
    987MODULE_LICENSE("GPL v2");
    988MODULE_ALIAS("platform:dfl-port");