cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

switchtec.c (49420B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * Microsemi Switchtec(tm) PCIe Management Driver
      4 * Copyright (c) 2017, Microsemi Corporation
      5 */
      6
      7#include <linux/switchtec.h>
      8#include <linux/switchtec_ioctl.h>
      9
     10#include <linux/interrupt.h>
     11#include <linux/module.h>
     12#include <linux/fs.h>
     13#include <linux/uaccess.h>
     14#include <linux/poll.h>
     15#include <linux/wait.h>
     16#include <linux/io-64-nonatomic-lo-hi.h>
     17#include <linux/nospec.h>
     18
     19MODULE_DESCRIPTION("Microsemi Switchtec(tm) PCIe Management Driver");
     20MODULE_VERSION("0.1");
     21MODULE_LICENSE("GPL");
     22MODULE_AUTHOR("Microsemi Corporation");
     23
     24static int max_devices = 16;
     25module_param(max_devices, int, 0644);
     26MODULE_PARM_DESC(max_devices, "max number of switchtec device instances");
     27
     28static bool use_dma_mrpc = true;
     29module_param(use_dma_mrpc, bool, 0644);
     30MODULE_PARM_DESC(use_dma_mrpc,
     31		 "Enable the use of the DMA MRPC feature");
     32
     33static int nirqs = 32;
     34module_param(nirqs, int, 0644);
     35MODULE_PARM_DESC(nirqs, "number of interrupts to allocate (more may be useful for NTB applications)");
     36
     37static dev_t switchtec_devt;
     38static DEFINE_IDA(switchtec_minor_ida);
     39
     40struct class *switchtec_class;
     41EXPORT_SYMBOL_GPL(switchtec_class);
     42
     43enum mrpc_state {
     44	MRPC_IDLE = 0,
     45	MRPC_QUEUED,
     46	MRPC_RUNNING,
     47	MRPC_DONE,
     48	MRPC_IO_ERROR,
     49};
     50
     51struct switchtec_user {
     52	struct switchtec_dev *stdev;
     53
     54	enum mrpc_state state;
     55
     56	wait_queue_head_t cmd_comp;
     57	struct kref kref;
     58	struct list_head list;
     59
     60	bool cmd_done;
     61	u32 cmd;
     62	u32 status;
     63	u32 return_code;
     64	size_t data_len;
     65	size_t read_len;
     66	unsigned char data[SWITCHTEC_MRPC_PAYLOAD_SIZE];
     67	int event_cnt;
     68};
     69
     70/*
     71 * The MMIO reads to the device_id register should always return the device ID
     72 * of the device, otherwise the firmware is probably stuck or unreachable
     73 * due to a firmware reset which clears PCI state including the BARs and Memory
     74 * Space Enable bits.
     75 */
     76static int is_firmware_running(struct switchtec_dev *stdev)
     77{
     78	u32 device = ioread32(&stdev->mmio_sys_info->device_id);
     79
     80	return stdev->pdev->device == device;
     81}
     82
     83static struct switchtec_user *stuser_create(struct switchtec_dev *stdev)
     84{
     85	struct switchtec_user *stuser;
     86
     87	stuser = kzalloc(sizeof(*stuser), GFP_KERNEL);
     88	if (!stuser)
     89		return ERR_PTR(-ENOMEM);
     90
     91	get_device(&stdev->dev);
     92	stuser->stdev = stdev;
     93	kref_init(&stuser->kref);
     94	INIT_LIST_HEAD(&stuser->list);
     95	init_waitqueue_head(&stuser->cmd_comp);
     96	stuser->event_cnt = atomic_read(&stdev->event_cnt);
     97
     98	dev_dbg(&stdev->dev, "%s: %p\n", __func__, stuser);
     99
    100	return stuser;
    101}
    102
    103static void stuser_free(struct kref *kref)
    104{
    105	struct switchtec_user *stuser;
    106
    107	stuser = container_of(kref, struct switchtec_user, kref);
    108
    109	dev_dbg(&stuser->stdev->dev, "%s: %p\n", __func__, stuser);
    110
    111	put_device(&stuser->stdev->dev);
    112	kfree(stuser);
    113}
    114
    115static void stuser_put(struct switchtec_user *stuser)
    116{
    117	kref_put(&stuser->kref, stuser_free);
    118}
    119
    120static void stuser_set_state(struct switchtec_user *stuser,
    121			     enum mrpc_state state)
    122{
    123	/* requires the mrpc_mutex to already be held when called */
    124
    125	static const char * const state_names[] = {
    126		[MRPC_IDLE] = "IDLE",
    127		[MRPC_QUEUED] = "QUEUED",
    128		[MRPC_RUNNING] = "RUNNING",
    129		[MRPC_DONE] = "DONE",
    130		[MRPC_IO_ERROR] = "IO_ERROR",
    131	};
    132
    133	stuser->state = state;
    134
    135	dev_dbg(&stuser->stdev->dev, "stuser state %p -> %s",
    136		stuser, state_names[state]);
    137}
    138
    139static void mrpc_complete_cmd(struct switchtec_dev *stdev);
    140
    141static void flush_wc_buf(struct switchtec_dev *stdev)
    142{
    143	struct ntb_dbmsg_regs __iomem *mmio_dbmsg;
    144
    145	/*
    146	 * odb (outbound doorbell) register is processed by low latency
    147	 * hardware and w/o side effect
    148	 */
    149	mmio_dbmsg = (void __iomem *)stdev->mmio_ntb +
    150		SWITCHTEC_NTB_REG_DBMSG_OFFSET;
    151	ioread32(&mmio_dbmsg->odb);
    152}
    153
    154static void mrpc_cmd_submit(struct switchtec_dev *stdev)
    155{
    156	/* requires the mrpc_mutex to already be held when called */
    157
    158	struct switchtec_user *stuser;
    159
    160	if (stdev->mrpc_busy)
    161		return;
    162
    163	if (list_empty(&stdev->mrpc_queue))
    164		return;
    165
    166	stuser = list_entry(stdev->mrpc_queue.next, struct switchtec_user,
    167			    list);
    168
    169	if (stdev->dma_mrpc) {
    170		stdev->dma_mrpc->status = SWITCHTEC_MRPC_STATUS_INPROGRESS;
    171		memset(stdev->dma_mrpc->data, 0xFF, SWITCHTEC_MRPC_PAYLOAD_SIZE);
    172	}
    173
    174	stuser_set_state(stuser, MRPC_RUNNING);
    175	stdev->mrpc_busy = 1;
    176	memcpy_toio(&stdev->mmio_mrpc->input_data,
    177		    stuser->data, stuser->data_len);
    178	flush_wc_buf(stdev);
    179	iowrite32(stuser->cmd, &stdev->mmio_mrpc->cmd);
    180
    181	schedule_delayed_work(&stdev->mrpc_timeout,
    182			      msecs_to_jiffies(500));
    183}
    184
    185static int mrpc_queue_cmd(struct switchtec_user *stuser)
    186{
    187	/* requires the mrpc_mutex to already be held when called */
    188
    189	struct switchtec_dev *stdev = stuser->stdev;
    190
    191	kref_get(&stuser->kref);
    192	stuser->read_len = sizeof(stuser->data);
    193	stuser_set_state(stuser, MRPC_QUEUED);
    194	stuser->cmd_done = false;
    195	list_add_tail(&stuser->list, &stdev->mrpc_queue);
    196
    197	mrpc_cmd_submit(stdev);
    198
    199	return 0;
    200}
    201
    202static void mrpc_cleanup_cmd(struct switchtec_dev *stdev)
    203{
    204	/* requires the mrpc_mutex to already be held when called */
    205
    206	struct switchtec_user *stuser = list_entry(stdev->mrpc_queue.next,
    207						   struct switchtec_user, list);
    208
    209	stuser->cmd_done = true;
    210	wake_up_interruptible(&stuser->cmd_comp);
    211	list_del_init(&stuser->list);
    212	stuser_put(stuser);
    213	stdev->mrpc_busy = 0;
    214
    215	mrpc_cmd_submit(stdev);
    216}
    217
    218static void mrpc_complete_cmd(struct switchtec_dev *stdev)
    219{
    220	/* requires the mrpc_mutex to already be held when called */
    221
    222	struct switchtec_user *stuser;
    223
    224	if (list_empty(&stdev->mrpc_queue))
    225		return;
    226
    227	stuser = list_entry(stdev->mrpc_queue.next, struct switchtec_user,
    228			    list);
    229
    230	if (stdev->dma_mrpc)
    231		stuser->status = stdev->dma_mrpc->status;
    232	else
    233		stuser->status = ioread32(&stdev->mmio_mrpc->status);
    234
    235	if (stuser->status == SWITCHTEC_MRPC_STATUS_INPROGRESS)
    236		return;
    237
    238	stuser_set_state(stuser, MRPC_DONE);
    239	stuser->return_code = 0;
    240
    241	if (stuser->status != SWITCHTEC_MRPC_STATUS_DONE &&
    242	    stuser->status != SWITCHTEC_MRPC_STATUS_ERROR)
    243		goto out;
    244
    245	if (stdev->dma_mrpc)
    246		stuser->return_code = stdev->dma_mrpc->rtn_code;
    247	else
    248		stuser->return_code = ioread32(&stdev->mmio_mrpc->ret_value);
    249	if (stuser->return_code != 0)
    250		goto out;
    251
    252	if (stdev->dma_mrpc)
    253		memcpy(stuser->data, &stdev->dma_mrpc->data,
    254			      stuser->read_len);
    255	else
    256		memcpy_fromio(stuser->data, &stdev->mmio_mrpc->output_data,
    257			      stuser->read_len);
    258out:
    259	mrpc_cleanup_cmd(stdev);
    260}
    261
    262static void mrpc_event_work(struct work_struct *work)
    263{
    264	struct switchtec_dev *stdev;
    265
    266	stdev = container_of(work, struct switchtec_dev, mrpc_work);
    267
    268	dev_dbg(&stdev->dev, "%s\n", __func__);
    269
    270	mutex_lock(&stdev->mrpc_mutex);
    271	cancel_delayed_work(&stdev->mrpc_timeout);
    272	mrpc_complete_cmd(stdev);
    273	mutex_unlock(&stdev->mrpc_mutex);
    274}
    275
    276static void mrpc_error_complete_cmd(struct switchtec_dev *stdev)
    277{
    278	/* requires the mrpc_mutex to already be held when called */
    279
    280	struct switchtec_user *stuser;
    281
    282	if (list_empty(&stdev->mrpc_queue))
    283		return;
    284
    285	stuser = list_entry(stdev->mrpc_queue.next,
    286			    struct switchtec_user, list);
    287
    288	stuser_set_state(stuser, MRPC_IO_ERROR);
    289
    290	mrpc_cleanup_cmd(stdev);
    291}
    292
    293static void mrpc_timeout_work(struct work_struct *work)
    294{
    295	struct switchtec_dev *stdev;
    296	u32 status;
    297
    298	stdev = container_of(work, struct switchtec_dev, mrpc_timeout.work);
    299
    300	dev_dbg(&stdev->dev, "%s\n", __func__);
    301
    302	mutex_lock(&stdev->mrpc_mutex);
    303
    304	if (!is_firmware_running(stdev)) {
    305		mrpc_error_complete_cmd(stdev);
    306		goto out;
    307	}
    308
    309	if (stdev->dma_mrpc)
    310		status = stdev->dma_mrpc->status;
    311	else
    312		status = ioread32(&stdev->mmio_mrpc->status);
    313	if (status == SWITCHTEC_MRPC_STATUS_INPROGRESS) {
    314		schedule_delayed_work(&stdev->mrpc_timeout,
    315				      msecs_to_jiffies(500));
    316		goto out;
    317	}
    318
    319	mrpc_complete_cmd(stdev);
    320out:
    321	mutex_unlock(&stdev->mrpc_mutex);
    322}
    323
    324static ssize_t device_version_show(struct device *dev,
    325	struct device_attribute *attr, char *buf)
    326{
    327	struct switchtec_dev *stdev = to_stdev(dev);
    328	u32 ver;
    329
    330	ver = ioread32(&stdev->mmio_sys_info->device_version);
    331
    332	return sysfs_emit(buf, "%x\n", ver);
    333}
    334static DEVICE_ATTR_RO(device_version);
    335
    336static ssize_t fw_version_show(struct device *dev,
    337	struct device_attribute *attr, char *buf)
    338{
    339	struct switchtec_dev *stdev = to_stdev(dev);
    340	u32 ver;
    341
    342	ver = ioread32(&stdev->mmio_sys_info->firmware_version);
    343
    344	return sysfs_emit(buf, "%08x\n", ver);
    345}
    346static DEVICE_ATTR_RO(fw_version);
    347
    348static ssize_t io_string_show(char *buf, void __iomem *attr, size_t len)
    349{
    350	int i;
    351
    352	memcpy_fromio(buf, attr, len);
    353	buf[len] = '\n';
    354	buf[len + 1] = 0;
    355
    356	for (i = len - 1; i > 0; i--) {
    357		if (buf[i] != ' ')
    358			break;
    359		buf[i] = '\n';
    360		buf[i + 1] = 0;
    361	}
    362
    363	return strlen(buf);
    364}
    365
    366#define DEVICE_ATTR_SYS_INFO_STR(field) \
    367static ssize_t field ## _show(struct device *dev, \
    368	struct device_attribute *attr, char *buf) \
    369{ \
    370	struct switchtec_dev *stdev = to_stdev(dev); \
    371	struct sys_info_regs __iomem *si = stdev->mmio_sys_info; \
    372	if (stdev->gen == SWITCHTEC_GEN3) \
    373		return io_string_show(buf, &si->gen3.field, \
    374				      sizeof(si->gen3.field)); \
    375	else if (stdev->gen == SWITCHTEC_GEN4) \
    376		return io_string_show(buf, &si->gen4.field, \
    377				      sizeof(si->gen4.field)); \
    378	else \
    379		return -EOPNOTSUPP; \
    380} \
    381\
    382static DEVICE_ATTR_RO(field)
    383
    384DEVICE_ATTR_SYS_INFO_STR(vendor_id);
    385DEVICE_ATTR_SYS_INFO_STR(product_id);
    386DEVICE_ATTR_SYS_INFO_STR(product_revision);
    387
    388static ssize_t component_vendor_show(struct device *dev,
    389				     struct device_attribute *attr, char *buf)
    390{
    391	struct switchtec_dev *stdev = to_stdev(dev);
    392	struct sys_info_regs __iomem *si = stdev->mmio_sys_info;
    393
    394	/* component_vendor field not supported after gen3 */
    395	if (stdev->gen != SWITCHTEC_GEN3)
    396		return sysfs_emit(buf, "none\n");
    397
    398	return io_string_show(buf, &si->gen3.component_vendor,
    399			      sizeof(si->gen3.component_vendor));
    400}
    401static DEVICE_ATTR_RO(component_vendor);
    402
    403static ssize_t component_id_show(struct device *dev,
    404	struct device_attribute *attr, char *buf)
    405{
    406	struct switchtec_dev *stdev = to_stdev(dev);
    407	int id = ioread16(&stdev->mmio_sys_info->gen3.component_id);
    408
    409	/* component_id field not supported after gen3 */
    410	if (stdev->gen != SWITCHTEC_GEN3)
    411		return sysfs_emit(buf, "none\n");
    412
    413	return sysfs_emit(buf, "PM%04X\n", id);
    414}
    415static DEVICE_ATTR_RO(component_id);
    416
    417static ssize_t component_revision_show(struct device *dev,
    418	struct device_attribute *attr, char *buf)
    419{
    420	struct switchtec_dev *stdev = to_stdev(dev);
    421	int rev = ioread8(&stdev->mmio_sys_info->gen3.component_revision);
    422
    423	/* component_revision field not supported after gen3 */
    424	if (stdev->gen != SWITCHTEC_GEN3)
    425		return sysfs_emit(buf, "255\n");
    426
    427	return sysfs_emit(buf, "%d\n", rev);
    428}
    429static DEVICE_ATTR_RO(component_revision);
    430
    431static ssize_t partition_show(struct device *dev,
    432	struct device_attribute *attr, char *buf)
    433{
    434	struct switchtec_dev *stdev = to_stdev(dev);
    435
    436	return sysfs_emit(buf, "%d\n", stdev->partition);
    437}
    438static DEVICE_ATTR_RO(partition);
    439
    440static ssize_t partition_count_show(struct device *dev,
    441	struct device_attribute *attr, char *buf)
    442{
    443	struct switchtec_dev *stdev = to_stdev(dev);
    444
    445	return sysfs_emit(buf, "%d\n", stdev->partition_count);
    446}
    447static DEVICE_ATTR_RO(partition_count);
    448
    449static struct attribute *switchtec_device_attrs[] = {
    450	&dev_attr_device_version.attr,
    451	&dev_attr_fw_version.attr,
    452	&dev_attr_vendor_id.attr,
    453	&dev_attr_product_id.attr,
    454	&dev_attr_product_revision.attr,
    455	&dev_attr_component_vendor.attr,
    456	&dev_attr_component_id.attr,
    457	&dev_attr_component_revision.attr,
    458	&dev_attr_partition.attr,
    459	&dev_attr_partition_count.attr,
    460	NULL,
    461};
    462
    463ATTRIBUTE_GROUPS(switchtec_device);
    464
    465static int switchtec_dev_open(struct inode *inode, struct file *filp)
    466{
    467	struct switchtec_dev *stdev;
    468	struct switchtec_user *stuser;
    469
    470	stdev = container_of(inode->i_cdev, struct switchtec_dev, cdev);
    471
    472	stuser = stuser_create(stdev);
    473	if (IS_ERR(stuser))
    474		return PTR_ERR(stuser);
    475
    476	filp->private_data = stuser;
    477	stream_open(inode, filp);
    478
    479	dev_dbg(&stdev->dev, "%s: %p\n", __func__, stuser);
    480
    481	return 0;
    482}
    483
    484static int switchtec_dev_release(struct inode *inode, struct file *filp)
    485{
    486	struct switchtec_user *stuser = filp->private_data;
    487
    488	stuser_put(stuser);
    489
    490	return 0;
    491}
    492
    493static int lock_mutex_and_test_alive(struct switchtec_dev *stdev)
    494{
    495	if (mutex_lock_interruptible(&stdev->mrpc_mutex))
    496		return -EINTR;
    497
    498	if (!stdev->alive) {
    499		mutex_unlock(&stdev->mrpc_mutex);
    500		return -ENODEV;
    501	}
    502
    503	return 0;
    504}
    505
    506static ssize_t switchtec_dev_write(struct file *filp, const char __user *data,
    507				   size_t size, loff_t *off)
    508{
    509	struct switchtec_user *stuser = filp->private_data;
    510	struct switchtec_dev *stdev = stuser->stdev;
    511	int rc;
    512
    513	if (size < sizeof(stuser->cmd) ||
    514	    size > sizeof(stuser->cmd) + sizeof(stuser->data))
    515		return -EINVAL;
    516
    517	stuser->data_len = size - sizeof(stuser->cmd);
    518
    519	rc = lock_mutex_and_test_alive(stdev);
    520	if (rc)
    521		return rc;
    522
    523	if (stuser->state != MRPC_IDLE) {
    524		rc = -EBADE;
    525		goto out;
    526	}
    527
    528	rc = copy_from_user(&stuser->cmd, data, sizeof(stuser->cmd));
    529	if (rc) {
    530		rc = -EFAULT;
    531		goto out;
    532	}
    533	if (((MRPC_CMD_ID(stuser->cmd) == MRPC_GAS_WRITE) ||
    534	     (MRPC_CMD_ID(stuser->cmd) == MRPC_GAS_READ)) &&
    535	    !capable(CAP_SYS_ADMIN)) {
    536		rc = -EPERM;
    537		goto out;
    538	}
    539
    540	data += sizeof(stuser->cmd);
    541	rc = copy_from_user(&stuser->data, data, size - sizeof(stuser->cmd));
    542	if (rc) {
    543		rc = -EFAULT;
    544		goto out;
    545	}
    546
    547	rc = mrpc_queue_cmd(stuser);
    548
    549out:
    550	mutex_unlock(&stdev->mrpc_mutex);
    551
    552	if (rc)
    553		return rc;
    554
    555	return size;
    556}
    557
    558static ssize_t switchtec_dev_read(struct file *filp, char __user *data,
    559				  size_t size, loff_t *off)
    560{
    561	struct switchtec_user *stuser = filp->private_data;
    562	struct switchtec_dev *stdev = stuser->stdev;
    563	int rc;
    564
    565	if (size < sizeof(stuser->cmd) ||
    566	    size > sizeof(stuser->cmd) + sizeof(stuser->data))
    567		return -EINVAL;
    568
    569	rc = lock_mutex_and_test_alive(stdev);
    570	if (rc)
    571		return rc;
    572
    573	if (stuser->state == MRPC_IDLE) {
    574		mutex_unlock(&stdev->mrpc_mutex);
    575		return -EBADE;
    576	}
    577
    578	stuser->read_len = size - sizeof(stuser->return_code);
    579
    580	mutex_unlock(&stdev->mrpc_mutex);
    581
    582	if (filp->f_flags & O_NONBLOCK) {
    583		if (!stuser->cmd_done)
    584			return -EAGAIN;
    585	} else {
    586		rc = wait_event_interruptible(stuser->cmd_comp,
    587					      stuser->cmd_done);
    588		if (rc < 0)
    589			return rc;
    590	}
    591
    592	rc = lock_mutex_and_test_alive(stdev);
    593	if (rc)
    594		return rc;
    595
    596	if (stuser->state == MRPC_IO_ERROR) {
    597		mutex_unlock(&stdev->mrpc_mutex);
    598		return -EIO;
    599	}
    600
    601	if (stuser->state != MRPC_DONE) {
    602		mutex_unlock(&stdev->mrpc_mutex);
    603		return -EBADE;
    604	}
    605
    606	rc = copy_to_user(data, &stuser->return_code,
    607			  sizeof(stuser->return_code));
    608	if (rc) {
    609		rc = -EFAULT;
    610		goto out;
    611	}
    612
    613	data += sizeof(stuser->return_code);
    614	rc = copy_to_user(data, &stuser->data,
    615			  size - sizeof(stuser->return_code));
    616	if (rc) {
    617		rc = -EFAULT;
    618		goto out;
    619	}
    620
    621	stuser_set_state(stuser, MRPC_IDLE);
    622
    623out:
    624	mutex_unlock(&stdev->mrpc_mutex);
    625
    626	if (stuser->status == SWITCHTEC_MRPC_STATUS_DONE ||
    627	    stuser->status == SWITCHTEC_MRPC_STATUS_ERROR)
    628		return size;
    629	else if (stuser->status == SWITCHTEC_MRPC_STATUS_INTERRUPTED)
    630		return -ENXIO;
    631	else
    632		return -EBADMSG;
    633}
    634
    635static __poll_t switchtec_dev_poll(struct file *filp, poll_table *wait)
    636{
    637	struct switchtec_user *stuser = filp->private_data;
    638	struct switchtec_dev *stdev = stuser->stdev;
    639	__poll_t ret = 0;
    640
    641	poll_wait(filp, &stuser->cmd_comp, wait);
    642	poll_wait(filp, &stdev->event_wq, wait);
    643
    644	if (lock_mutex_and_test_alive(stdev))
    645		return EPOLLIN | EPOLLRDHUP | EPOLLOUT | EPOLLERR | EPOLLHUP;
    646
    647	mutex_unlock(&stdev->mrpc_mutex);
    648
    649	if (stuser->cmd_done)
    650		ret |= EPOLLIN | EPOLLRDNORM;
    651
    652	if (stuser->event_cnt != atomic_read(&stdev->event_cnt))
    653		ret |= EPOLLPRI | EPOLLRDBAND;
    654
    655	return ret;
    656}
    657
    658static int ioctl_flash_info(struct switchtec_dev *stdev,
    659			    struct switchtec_ioctl_flash_info __user *uinfo)
    660{
    661	struct switchtec_ioctl_flash_info info = {0};
    662	struct flash_info_regs __iomem *fi = stdev->mmio_flash_info;
    663
    664	if (stdev->gen == SWITCHTEC_GEN3) {
    665		info.flash_length = ioread32(&fi->gen3.flash_length);
    666		info.num_partitions = SWITCHTEC_NUM_PARTITIONS_GEN3;
    667	} else if (stdev->gen == SWITCHTEC_GEN4) {
    668		info.flash_length = ioread32(&fi->gen4.flash_length);
    669		info.num_partitions = SWITCHTEC_NUM_PARTITIONS_GEN4;
    670	} else {
    671		return -EOPNOTSUPP;
    672	}
    673
    674	if (copy_to_user(uinfo, &info, sizeof(info)))
    675		return -EFAULT;
    676
    677	return 0;
    678}
    679
    680static void set_fw_info_part(struct switchtec_ioctl_flash_part_info *info,
    681			     struct partition_info __iomem *pi)
    682{
    683	info->address = ioread32(&pi->address);
    684	info->length = ioread32(&pi->length);
    685}
    686
    687static int flash_part_info_gen3(struct switchtec_dev *stdev,
    688		struct switchtec_ioctl_flash_part_info *info)
    689{
    690	struct flash_info_regs_gen3 __iomem *fi =
    691		&stdev->mmio_flash_info->gen3;
    692	struct sys_info_regs_gen3 __iomem *si = &stdev->mmio_sys_info->gen3;
    693	u32 active_addr = -1;
    694
    695	switch (info->flash_partition) {
    696	case SWITCHTEC_IOCTL_PART_CFG0:
    697		active_addr = ioread32(&fi->active_cfg);
    698		set_fw_info_part(info, &fi->cfg0);
    699		if (ioread16(&si->cfg_running) == SWITCHTEC_GEN3_CFG0_RUNNING)
    700			info->active |= SWITCHTEC_IOCTL_PART_RUNNING;
    701		break;
    702	case SWITCHTEC_IOCTL_PART_CFG1:
    703		active_addr = ioread32(&fi->active_cfg);
    704		set_fw_info_part(info, &fi->cfg1);
    705		if (ioread16(&si->cfg_running) == SWITCHTEC_GEN3_CFG1_RUNNING)
    706			info->active |= SWITCHTEC_IOCTL_PART_RUNNING;
    707		break;
    708	case SWITCHTEC_IOCTL_PART_IMG0:
    709		active_addr = ioread32(&fi->active_img);
    710		set_fw_info_part(info, &fi->img0);
    711		if (ioread16(&si->img_running) == SWITCHTEC_GEN3_IMG0_RUNNING)
    712			info->active |= SWITCHTEC_IOCTL_PART_RUNNING;
    713		break;
    714	case SWITCHTEC_IOCTL_PART_IMG1:
    715		active_addr = ioread32(&fi->active_img);
    716		set_fw_info_part(info, &fi->img1);
    717		if (ioread16(&si->img_running) == SWITCHTEC_GEN3_IMG1_RUNNING)
    718			info->active |= SWITCHTEC_IOCTL_PART_RUNNING;
    719		break;
    720	case SWITCHTEC_IOCTL_PART_NVLOG:
    721		set_fw_info_part(info, &fi->nvlog);
    722		break;
    723	case SWITCHTEC_IOCTL_PART_VENDOR0:
    724		set_fw_info_part(info, &fi->vendor[0]);
    725		break;
    726	case SWITCHTEC_IOCTL_PART_VENDOR1:
    727		set_fw_info_part(info, &fi->vendor[1]);
    728		break;
    729	case SWITCHTEC_IOCTL_PART_VENDOR2:
    730		set_fw_info_part(info, &fi->vendor[2]);
    731		break;
    732	case SWITCHTEC_IOCTL_PART_VENDOR3:
    733		set_fw_info_part(info, &fi->vendor[3]);
    734		break;
    735	case SWITCHTEC_IOCTL_PART_VENDOR4:
    736		set_fw_info_part(info, &fi->vendor[4]);
    737		break;
    738	case SWITCHTEC_IOCTL_PART_VENDOR5:
    739		set_fw_info_part(info, &fi->vendor[5]);
    740		break;
    741	case SWITCHTEC_IOCTL_PART_VENDOR6:
    742		set_fw_info_part(info, &fi->vendor[6]);
    743		break;
    744	case SWITCHTEC_IOCTL_PART_VENDOR7:
    745		set_fw_info_part(info, &fi->vendor[7]);
    746		break;
    747	default:
    748		return -EINVAL;
    749	}
    750
    751	if (info->address == active_addr)
    752		info->active |= SWITCHTEC_IOCTL_PART_ACTIVE;
    753
    754	return 0;
    755}
    756
    757static int flash_part_info_gen4(struct switchtec_dev *stdev,
    758		struct switchtec_ioctl_flash_part_info *info)
    759{
    760	struct flash_info_regs_gen4 __iomem *fi = &stdev->mmio_flash_info->gen4;
    761	struct sys_info_regs_gen4 __iomem *si = &stdev->mmio_sys_info->gen4;
    762	struct active_partition_info_gen4 __iomem *af = &fi->active_flag;
    763
    764	switch (info->flash_partition) {
    765	case SWITCHTEC_IOCTL_PART_MAP_0:
    766		set_fw_info_part(info, &fi->map0);
    767		break;
    768	case SWITCHTEC_IOCTL_PART_MAP_1:
    769		set_fw_info_part(info, &fi->map1);
    770		break;
    771	case SWITCHTEC_IOCTL_PART_KEY_0:
    772		set_fw_info_part(info, &fi->key0);
    773		if (ioread8(&af->key) == SWITCHTEC_GEN4_KEY0_ACTIVE)
    774			info->active |= SWITCHTEC_IOCTL_PART_ACTIVE;
    775		if (ioread16(&si->key_running) == SWITCHTEC_GEN4_KEY0_RUNNING)
    776			info->active |= SWITCHTEC_IOCTL_PART_RUNNING;
    777		break;
    778	case SWITCHTEC_IOCTL_PART_KEY_1:
    779		set_fw_info_part(info, &fi->key1);
    780		if (ioread8(&af->key) == SWITCHTEC_GEN4_KEY1_ACTIVE)
    781			info->active |= SWITCHTEC_IOCTL_PART_ACTIVE;
    782		if (ioread16(&si->key_running) == SWITCHTEC_GEN4_KEY1_RUNNING)
    783			info->active |= SWITCHTEC_IOCTL_PART_RUNNING;
    784		break;
    785	case SWITCHTEC_IOCTL_PART_BL2_0:
    786		set_fw_info_part(info, &fi->bl2_0);
    787		if (ioread8(&af->bl2) == SWITCHTEC_GEN4_BL2_0_ACTIVE)
    788			info->active |= SWITCHTEC_IOCTL_PART_ACTIVE;
    789		if (ioread16(&si->bl2_running) == SWITCHTEC_GEN4_BL2_0_RUNNING)
    790			info->active |= SWITCHTEC_IOCTL_PART_RUNNING;
    791		break;
    792	case SWITCHTEC_IOCTL_PART_BL2_1:
    793		set_fw_info_part(info, &fi->bl2_1);
    794		if (ioread8(&af->bl2) == SWITCHTEC_GEN4_BL2_1_ACTIVE)
    795			info->active |= SWITCHTEC_IOCTL_PART_ACTIVE;
    796		if (ioread16(&si->bl2_running) == SWITCHTEC_GEN4_BL2_1_RUNNING)
    797			info->active |= SWITCHTEC_IOCTL_PART_RUNNING;
    798		break;
    799	case SWITCHTEC_IOCTL_PART_CFG0:
    800		set_fw_info_part(info, &fi->cfg0);
    801		if (ioread8(&af->cfg) == SWITCHTEC_GEN4_CFG0_ACTIVE)
    802			info->active |= SWITCHTEC_IOCTL_PART_ACTIVE;
    803		if (ioread16(&si->cfg_running) == SWITCHTEC_GEN4_CFG0_RUNNING)
    804			info->active |= SWITCHTEC_IOCTL_PART_RUNNING;
    805		break;
    806	case SWITCHTEC_IOCTL_PART_CFG1:
    807		set_fw_info_part(info, &fi->cfg1);
    808		if (ioread8(&af->cfg) == SWITCHTEC_GEN4_CFG1_ACTIVE)
    809			info->active |= SWITCHTEC_IOCTL_PART_ACTIVE;
    810		if (ioread16(&si->cfg_running) == SWITCHTEC_GEN4_CFG1_RUNNING)
    811			info->active |= SWITCHTEC_IOCTL_PART_RUNNING;
    812		break;
    813	case SWITCHTEC_IOCTL_PART_IMG0:
    814		set_fw_info_part(info, &fi->img0);
    815		if (ioread8(&af->img) == SWITCHTEC_GEN4_IMG0_ACTIVE)
    816			info->active |= SWITCHTEC_IOCTL_PART_ACTIVE;
    817		if (ioread16(&si->img_running) == SWITCHTEC_GEN4_IMG0_RUNNING)
    818			info->active |= SWITCHTEC_IOCTL_PART_RUNNING;
    819		break;
    820	case SWITCHTEC_IOCTL_PART_IMG1:
    821		set_fw_info_part(info, &fi->img1);
    822		if (ioread8(&af->img) == SWITCHTEC_GEN4_IMG1_ACTIVE)
    823			info->active |= SWITCHTEC_IOCTL_PART_ACTIVE;
    824		if (ioread16(&si->img_running) == SWITCHTEC_GEN4_IMG1_RUNNING)
    825			info->active |= SWITCHTEC_IOCTL_PART_RUNNING;
    826		break;
    827	case SWITCHTEC_IOCTL_PART_NVLOG:
    828		set_fw_info_part(info, &fi->nvlog);
    829		break;
    830	case SWITCHTEC_IOCTL_PART_VENDOR0:
    831		set_fw_info_part(info, &fi->vendor[0]);
    832		break;
    833	case SWITCHTEC_IOCTL_PART_VENDOR1:
    834		set_fw_info_part(info, &fi->vendor[1]);
    835		break;
    836	case SWITCHTEC_IOCTL_PART_VENDOR2:
    837		set_fw_info_part(info, &fi->vendor[2]);
    838		break;
    839	case SWITCHTEC_IOCTL_PART_VENDOR3:
    840		set_fw_info_part(info, &fi->vendor[3]);
    841		break;
    842	case SWITCHTEC_IOCTL_PART_VENDOR4:
    843		set_fw_info_part(info, &fi->vendor[4]);
    844		break;
    845	case SWITCHTEC_IOCTL_PART_VENDOR5:
    846		set_fw_info_part(info, &fi->vendor[5]);
    847		break;
    848	case SWITCHTEC_IOCTL_PART_VENDOR6:
    849		set_fw_info_part(info, &fi->vendor[6]);
    850		break;
    851	case SWITCHTEC_IOCTL_PART_VENDOR7:
    852		set_fw_info_part(info, &fi->vendor[7]);
    853		break;
    854	default:
    855		return -EINVAL;
    856	}
    857
    858	return 0;
    859}
    860
    861static int ioctl_flash_part_info(struct switchtec_dev *stdev,
    862		struct switchtec_ioctl_flash_part_info __user *uinfo)
    863{
    864	int ret;
    865	struct switchtec_ioctl_flash_part_info info = {0};
    866
    867	if (copy_from_user(&info, uinfo, sizeof(info)))
    868		return -EFAULT;
    869
    870	if (stdev->gen == SWITCHTEC_GEN3) {
    871		ret = flash_part_info_gen3(stdev, &info);
    872		if (ret)
    873			return ret;
    874	} else if (stdev->gen == SWITCHTEC_GEN4) {
    875		ret = flash_part_info_gen4(stdev, &info);
    876		if (ret)
    877			return ret;
    878	} else {
    879		return -EOPNOTSUPP;
    880	}
    881
    882	if (copy_to_user(uinfo, &info, sizeof(info)))
    883		return -EFAULT;
    884
    885	return 0;
    886}
    887
    888static int ioctl_event_summary(struct switchtec_dev *stdev,
    889	struct switchtec_user *stuser,
    890	struct switchtec_ioctl_event_summary __user *usum,
    891	size_t size)
    892{
    893	struct switchtec_ioctl_event_summary *s;
    894	int i;
    895	u32 reg;
    896	int ret = 0;
    897
    898	s = kzalloc(sizeof(*s), GFP_KERNEL);
    899	if (!s)
    900		return -ENOMEM;
    901
    902	s->global = ioread32(&stdev->mmio_sw_event->global_summary);
    903	s->part_bitmap = ioread64(&stdev->mmio_sw_event->part_event_bitmap);
    904	s->local_part = ioread32(&stdev->mmio_part_cfg->part_event_summary);
    905
    906	for (i = 0; i < stdev->partition_count; i++) {
    907		reg = ioread32(&stdev->mmio_part_cfg_all[i].part_event_summary);
    908		s->part[i] = reg;
    909	}
    910
    911	for (i = 0; i < stdev->pff_csr_count; i++) {
    912		reg = ioread32(&stdev->mmio_pff_csr[i].pff_event_summary);
    913		s->pff[i] = reg;
    914	}
    915
    916	if (copy_to_user(usum, s, size)) {
    917		ret = -EFAULT;
    918		goto error_case;
    919	}
    920
    921	stuser->event_cnt = atomic_read(&stdev->event_cnt);
    922
    923error_case:
    924	kfree(s);
    925	return ret;
    926}
    927
    928static u32 __iomem *global_ev_reg(struct switchtec_dev *stdev,
    929				  size_t offset, int index)
    930{
    931	return (void __iomem *)stdev->mmio_sw_event + offset;
    932}
    933
    934static u32 __iomem *part_ev_reg(struct switchtec_dev *stdev,
    935				size_t offset, int index)
    936{
    937	return (void __iomem *)&stdev->mmio_part_cfg_all[index] + offset;
    938}
    939
    940static u32 __iomem *pff_ev_reg(struct switchtec_dev *stdev,
    941			       size_t offset, int index)
    942{
    943	return (void __iomem *)&stdev->mmio_pff_csr[index] + offset;
    944}
    945
    946#define EV_GLB(i, r)[i] = {offsetof(struct sw_event_regs, r), global_ev_reg}
    947#define EV_PAR(i, r)[i] = {offsetof(struct part_cfg_regs, r), part_ev_reg}
    948#define EV_PFF(i, r)[i] = {offsetof(struct pff_csr_regs, r), pff_ev_reg}
    949
    950static const struct event_reg {
    951	size_t offset;
    952	u32 __iomem *(*map_reg)(struct switchtec_dev *stdev,
    953				size_t offset, int index);
    954} event_regs[] = {
    955	EV_GLB(SWITCHTEC_IOCTL_EVENT_STACK_ERROR, stack_error_event_hdr),
    956	EV_GLB(SWITCHTEC_IOCTL_EVENT_PPU_ERROR, ppu_error_event_hdr),
    957	EV_GLB(SWITCHTEC_IOCTL_EVENT_ISP_ERROR, isp_error_event_hdr),
    958	EV_GLB(SWITCHTEC_IOCTL_EVENT_SYS_RESET, sys_reset_event_hdr),
    959	EV_GLB(SWITCHTEC_IOCTL_EVENT_FW_EXC, fw_exception_hdr),
    960	EV_GLB(SWITCHTEC_IOCTL_EVENT_FW_NMI, fw_nmi_hdr),
    961	EV_GLB(SWITCHTEC_IOCTL_EVENT_FW_NON_FATAL, fw_non_fatal_hdr),
    962	EV_GLB(SWITCHTEC_IOCTL_EVENT_FW_FATAL, fw_fatal_hdr),
    963	EV_GLB(SWITCHTEC_IOCTL_EVENT_TWI_MRPC_COMP, twi_mrpc_comp_hdr),
    964	EV_GLB(SWITCHTEC_IOCTL_EVENT_TWI_MRPC_COMP_ASYNC,
    965	       twi_mrpc_comp_async_hdr),
    966	EV_GLB(SWITCHTEC_IOCTL_EVENT_CLI_MRPC_COMP, cli_mrpc_comp_hdr),
    967	EV_GLB(SWITCHTEC_IOCTL_EVENT_CLI_MRPC_COMP_ASYNC,
    968	       cli_mrpc_comp_async_hdr),
    969	EV_GLB(SWITCHTEC_IOCTL_EVENT_GPIO_INT, gpio_interrupt_hdr),
    970	EV_GLB(SWITCHTEC_IOCTL_EVENT_GFMS, gfms_event_hdr),
    971	EV_PAR(SWITCHTEC_IOCTL_EVENT_PART_RESET, part_reset_hdr),
    972	EV_PAR(SWITCHTEC_IOCTL_EVENT_MRPC_COMP, mrpc_comp_hdr),
    973	EV_PAR(SWITCHTEC_IOCTL_EVENT_MRPC_COMP_ASYNC, mrpc_comp_async_hdr),
    974	EV_PAR(SWITCHTEC_IOCTL_EVENT_DYN_PART_BIND_COMP, dyn_binding_hdr),
    975	EV_PAR(SWITCHTEC_IOCTL_EVENT_INTERCOMM_REQ_NOTIFY,
    976	       intercomm_notify_hdr),
    977	EV_PFF(SWITCHTEC_IOCTL_EVENT_AER_IN_P2P, aer_in_p2p_hdr),
    978	EV_PFF(SWITCHTEC_IOCTL_EVENT_AER_IN_VEP, aer_in_vep_hdr),
    979	EV_PFF(SWITCHTEC_IOCTL_EVENT_DPC, dpc_hdr),
    980	EV_PFF(SWITCHTEC_IOCTL_EVENT_CTS, cts_hdr),
    981	EV_PFF(SWITCHTEC_IOCTL_EVENT_UEC, uec_hdr),
    982	EV_PFF(SWITCHTEC_IOCTL_EVENT_HOTPLUG, hotplug_hdr),
    983	EV_PFF(SWITCHTEC_IOCTL_EVENT_IER, ier_hdr),
    984	EV_PFF(SWITCHTEC_IOCTL_EVENT_THRESH, threshold_hdr),
    985	EV_PFF(SWITCHTEC_IOCTL_EVENT_POWER_MGMT, power_mgmt_hdr),
    986	EV_PFF(SWITCHTEC_IOCTL_EVENT_TLP_THROTTLING, tlp_throttling_hdr),
    987	EV_PFF(SWITCHTEC_IOCTL_EVENT_FORCE_SPEED, force_speed_hdr),
    988	EV_PFF(SWITCHTEC_IOCTL_EVENT_CREDIT_TIMEOUT, credit_timeout_hdr),
    989	EV_PFF(SWITCHTEC_IOCTL_EVENT_LINK_STATE, link_state_hdr),
    990};
    991
    992static u32 __iomem *event_hdr_addr(struct switchtec_dev *stdev,
    993				   int event_id, int index)
    994{
    995	size_t off;
    996
    997	if (event_id < 0 || event_id >= SWITCHTEC_IOCTL_MAX_EVENTS)
    998		return (u32 __iomem *)ERR_PTR(-EINVAL);
    999
   1000	off = event_regs[event_id].offset;
   1001
   1002	if (event_regs[event_id].map_reg == part_ev_reg) {
   1003		if (index == SWITCHTEC_IOCTL_EVENT_LOCAL_PART_IDX)
   1004			index = stdev->partition;
   1005		else if (index < 0 || index >= stdev->partition_count)
   1006			return (u32 __iomem *)ERR_PTR(-EINVAL);
   1007	} else if (event_regs[event_id].map_reg == pff_ev_reg) {
   1008		if (index < 0 || index >= stdev->pff_csr_count)
   1009			return (u32 __iomem *)ERR_PTR(-EINVAL);
   1010	}
   1011
   1012	return event_regs[event_id].map_reg(stdev, off, index);
   1013}
   1014
   1015static int event_ctl(struct switchtec_dev *stdev,
   1016		     struct switchtec_ioctl_event_ctl *ctl)
   1017{
   1018	int i;
   1019	u32 __iomem *reg;
   1020	u32 hdr;
   1021
   1022	reg = event_hdr_addr(stdev, ctl->event_id, ctl->index);
   1023	if (IS_ERR(reg))
   1024		return PTR_ERR(reg);
   1025
   1026	hdr = ioread32(reg);
   1027	if (hdr & SWITCHTEC_EVENT_NOT_SUPP)
   1028		return -EOPNOTSUPP;
   1029
   1030	for (i = 0; i < ARRAY_SIZE(ctl->data); i++)
   1031		ctl->data[i] = ioread32(&reg[i + 1]);
   1032
   1033	ctl->occurred = hdr & SWITCHTEC_EVENT_OCCURRED;
   1034	ctl->count = (hdr >> 5) & 0xFF;
   1035
   1036	if (!(ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_CLEAR))
   1037		hdr &= ~SWITCHTEC_EVENT_CLEAR;
   1038	if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_EN_POLL)
   1039		hdr |= SWITCHTEC_EVENT_EN_IRQ;
   1040	if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_DIS_POLL)
   1041		hdr &= ~SWITCHTEC_EVENT_EN_IRQ;
   1042	if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_EN_LOG)
   1043		hdr |= SWITCHTEC_EVENT_EN_LOG;
   1044	if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_DIS_LOG)
   1045		hdr &= ~SWITCHTEC_EVENT_EN_LOG;
   1046	if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_EN_CLI)
   1047		hdr |= SWITCHTEC_EVENT_EN_CLI;
   1048	if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_DIS_CLI)
   1049		hdr &= ~SWITCHTEC_EVENT_EN_CLI;
   1050	if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_EN_FATAL)
   1051		hdr |= SWITCHTEC_EVENT_FATAL;
   1052	if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_DIS_FATAL)
   1053		hdr &= ~SWITCHTEC_EVENT_FATAL;
   1054
   1055	if (ctl->flags)
   1056		iowrite32(hdr, reg);
   1057
   1058	ctl->flags = 0;
   1059	if (hdr & SWITCHTEC_EVENT_EN_IRQ)
   1060		ctl->flags |= SWITCHTEC_IOCTL_EVENT_FLAG_EN_POLL;
   1061	if (hdr & SWITCHTEC_EVENT_EN_LOG)
   1062		ctl->flags |= SWITCHTEC_IOCTL_EVENT_FLAG_EN_LOG;
   1063	if (hdr & SWITCHTEC_EVENT_EN_CLI)
   1064		ctl->flags |= SWITCHTEC_IOCTL_EVENT_FLAG_EN_CLI;
   1065	if (hdr & SWITCHTEC_EVENT_FATAL)
   1066		ctl->flags |= SWITCHTEC_IOCTL_EVENT_FLAG_EN_FATAL;
   1067
   1068	return 0;
   1069}
   1070
   1071static int ioctl_event_ctl(struct switchtec_dev *stdev,
   1072	struct switchtec_ioctl_event_ctl __user *uctl)
   1073{
   1074	int ret;
   1075	int nr_idxs;
   1076	unsigned int event_flags;
   1077	struct switchtec_ioctl_event_ctl ctl;
   1078
   1079	if (copy_from_user(&ctl, uctl, sizeof(ctl)))
   1080		return -EFAULT;
   1081
   1082	if (ctl.event_id >= SWITCHTEC_IOCTL_MAX_EVENTS)
   1083		return -EINVAL;
   1084
   1085	if (ctl.flags & SWITCHTEC_IOCTL_EVENT_FLAG_UNUSED)
   1086		return -EINVAL;
   1087
   1088	if (ctl.index == SWITCHTEC_IOCTL_EVENT_IDX_ALL) {
   1089		if (event_regs[ctl.event_id].map_reg == global_ev_reg)
   1090			nr_idxs = 1;
   1091		else if (event_regs[ctl.event_id].map_reg == part_ev_reg)
   1092			nr_idxs = stdev->partition_count;
   1093		else if (event_regs[ctl.event_id].map_reg == pff_ev_reg)
   1094			nr_idxs = stdev->pff_csr_count;
   1095		else
   1096			return -EINVAL;
   1097
   1098		event_flags = ctl.flags;
   1099		for (ctl.index = 0; ctl.index < nr_idxs; ctl.index++) {
   1100			ctl.flags = event_flags;
   1101			ret = event_ctl(stdev, &ctl);
   1102			if (ret < 0 && ret != -EOPNOTSUPP)
   1103				return ret;
   1104		}
   1105	} else {
   1106		ret = event_ctl(stdev, &ctl);
   1107		if (ret < 0)
   1108			return ret;
   1109	}
   1110
   1111	if (copy_to_user(uctl, &ctl, sizeof(ctl)))
   1112		return -EFAULT;
   1113
   1114	return 0;
   1115}
   1116
   1117static int ioctl_pff_to_port(struct switchtec_dev *stdev,
   1118			     struct switchtec_ioctl_pff_port __user *up)
   1119{
   1120	int i, part;
   1121	u32 reg;
   1122	struct part_cfg_regs __iomem *pcfg;
   1123	struct switchtec_ioctl_pff_port p;
   1124
   1125	if (copy_from_user(&p, up, sizeof(p)))
   1126		return -EFAULT;
   1127
   1128	p.port = -1;
   1129	for (part = 0; part < stdev->partition_count; part++) {
   1130		pcfg = &stdev->mmio_part_cfg_all[part];
   1131		p.partition = part;
   1132
   1133		reg = ioread32(&pcfg->usp_pff_inst_id);
   1134		if (reg == p.pff) {
   1135			p.port = 0;
   1136			break;
   1137		}
   1138
   1139		reg = ioread32(&pcfg->vep_pff_inst_id) & 0xFF;
   1140		if (reg == p.pff) {
   1141			p.port = SWITCHTEC_IOCTL_PFF_VEP;
   1142			break;
   1143		}
   1144
   1145		for (i = 0; i < ARRAY_SIZE(pcfg->dsp_pff_inst_id); i++) {
   1146			reg = ioread32(&pcfg->dsp_pff_inst_id[i]);
   1147			if (reg != p.pff)
   1148				continue;
   1149
   1150			p.port = i + 1;
   1151			break;
   1152		}
   1153
   1154		if (p.port != -1)
   1155			break;
   1156	}
   1157
   1158	if (copy_to_user(up, &p, sizeof(p)))
   1159		return -EFAULT;
   1160
   1161	return 0;
   1162}
   1163
   1164static int ioctl_port_to_pff(struct switchtec_dev *stdev,
   1165			     struct switchtec_ioctl_pff_port __user *up)
   1166{
   1167	struct switchtec_ioctl_pff_port p;
   1168	struct part_cfg_regs __iomem *pcfg;
   1169
   1170	if (copy_from_user(&p, up, sizeof(p)))
   1171		return -EFAULT;
   1172
   1173	if (p.partition == SWITCHTEC_IOCTL_EVENT_LOCAL_PART_IDX)
   1174		pcfg = stdev->mmio_part_cfg;
   1175	else if (p.partition < stdev->partition_count)
   1176		pcfg = &stdev->mmio_part_cfg_all[p.partition];
   1177	else
   1178		return -EINVAL;
   1179
   1180	switch (p.port) {
   1181	case 0:
   1182		p.pff = ioread32(&pcfg->usp_pff_inst_id);
   1183		break;
   1184	case SWITCHTEC_IOCTL_PFF_VEP:
   1185		p.pff = ioread32(&pcfg->vep_pff_inst_id) & 0xFF;
   1186		break;
   1187	default:
   1188		if (p.port > ARRAY_SIZE(pcfg->dsp_pff_inst_id))
   1189			return -EINVAL;
   1190		p.port = array_index_nospec(p.port,
   1191					ARRAY_SIZE(pcfg->dsp_pff_inst_id) + 1);
   1192		p.pff = ioread32(&pcfg->dsp_pff_inst_id[p.port - 1]);
   1193		break;
   1194	}
   1195
   1196	if (copy_to_user(up, &p, sizeof(p)))
   1197		return -EFAULT;
   1198
   1199	return 0;
   1200}
   1201
   1202static long switchtec_dev_ioctl(struct file *filp, unsigned int cmd,
   1203				unsigned long arg)
   1204{
   1205	struct switchtec_user *stuser = filp->private_data;
   1206	struct switchtec_dev *stdev = stuser->stdev;
   1207	int rc;
   1208	void __user *argp = (void __user *)arg;
   1209
   1210	rc = lock_mutex_and_test_alive(stdev);
   1211	if (rc)
   1212		return rc;
   1213
   1214	switch (cmd) {
   1215	case SWITCHTEC_IOCTL_FLASH_INFO:
   1216		rc = ioctl_flash_info(stdev, argp);
   1217		break;
   1218	case SWITCHTEC_IOCTL_FLASH_PART_INFO:
   1219		rc = ioctl_flash_part_info(stdev, argp);
   1220		break;
   1221	case SWITCHTEC_IOCTL_EVENT_SUMMARY_LEGACY:
   1222		rc = ioctl_event_summary(stdev, stuser, argp,
   1223					 sizeof(struct switchtec_ioctl_event_summary_legacy));
   1224		break;
   1225	case SWITCHTEC_IOCTL_EVENT_CTL:
   1226		rc = ioctl_event_ctl(stdev, argp);
   1227		break;
   1228	case SWITCHTEC_IOCTL_PFF_TO_PORT:
   1229		rc = ioctl_pff_to_port(stdev, argp);
   1230		break;
   1231	case SWITCHTEC_IOCTL_PORT_TO_PFF:
   1232		rc = ioctl_port_to_pff(stdev, argp);
   1233		break;
   1234	case SWITCHTEC_IOCTL_EVENT_SUMMARY:
   1235		rc = ioctl_event_summary(stdev, stuser, argp,
   1236					 sizeof(struct switchtec_ioctl_event_summary));
   1237		break;
   1238	default:
   1239		rc = -ENOTTY;
   1240		break;
   1241	}
   1242
   1243	mutex_unlock(&stdev->mrpc_mutex);
   1244	return rc;
   1245}
   1246
   1247static const struct file_operations switchtec_fops = {
   1248	.owner = THIS_MODULE,
   1249	.open = switchtec_dev_open,
   1250	.release = switchtec_dev_release,
   1251	.write = switchtec_dev_write,
   1252	.read = switchtec_dev_read,
   1253	.poll = switchtec_dev_poll,
   1254	.unlocked_ioctl = switchtec_dev_ioctl,
   1255	.compat_ioctl = compat_ptr_ioctl,
   1256};
   1257
   1258static void link_event_work(struct work_struct *work)
   1259{
   1260	struct switchtec_dev *stdev;
   1261
   1262	stdev = container_of(work, struct switchtec_dev, link_event_work);
   1263
   1264	if (stdev->link_notifier)
   1265		stdev->link_notifier(stdev);
   1266}
   1267
   1268static void check_link_state_events(struct switchtec_dev *stdev)
   1269{
   1270	int idx;
   1271	u32 reg;
   1272	int count;
   1273	int occurred = 0;
   1274
   1275	for (idx = 0; idx < stdev->pff_csr_count; idx++) {
   1276		reg = ioread32(&stdev->mmio_pff_csr[idx].link_state_hdr);
   1277		dev_dbg(&stdev->dev, "link_state: %d->%08x\n", idx, reg);
   1278		count = (reg >> 5) & 0xFF;
   1279
   1280		if (count != stdev->link_event_count[idx]) {
   1281			occurred = 1;
   1282			stdev->link_event_count[idx] = count;
   1283		}
   1284	}
   1285
   1286	if (occurred)
   1287		schedule_work(&stdev->link_event_work);
   1288}
   1289
   1290static void enable_link_state_events(struct switchtec_dev *stdev)
   1291{
   1292	int idx;
   1293
   1294	for (idx = 0; idx < stdev->pff_csr_count; idx++) {
   1295		iowrite32(SWITCHTEC_EVENT_CLEAR |
   1296			  SWITCHTEC_EVENT_EN_IRQ,
   1297			  &stdev->mmio_pff_csr[idx].link_state_hdr);
   1298	}
   1299}
   1300
   1301static void enable_dma_mrpc(struct switchtec_dev *stdev)
   1302{
   1303	writeq(stdev->dma_mrpc_dma_addr, &stdev->mmio_mrpc->dma_addr);
   1304	flush_wc_buf(stdev);
   1305	iowrite32(SWITCHTEC_DMA_MRPC_EN, &stdev->mmio_mrpc->dma_en);
   1306}
   1307
   1308static void stdev_release(struct device *dev)
   1309{
   1310	struct switchtec_dev *stdev = to_stdev(dev);
   1311
   1312	if (stdev->dma_mrpc) {
   1313		iowrite32(0, &stdev->mmio_mrpc->dma_en);
   1314		flush_wc_buf(stdev);
   1315		writeq(0, &stdev->mmio_mrpc->dma_addr);
   1316		dma_free_coherent(&stdev->pdev->dev, sizeof(*stdev->dma_mrpc),
   1317				stdev->dma_mrpc, stdev->dma_mrpc_dma_addr);
   1318	}
   1319	kfree(stdev);
   1320}
   1321
   1322static void stdev_kill(struct switchtec_dev *stdev)
   1323{
   1324	struct switchtec_user *stuser, *tmpuser;
   1325
   1326	pci_clear_master(stdev->pdev);
   1327
   1328	cancel_delayed_work_sync(&stdev->mrpc_timeout);
   1329
   1330	/* Mark the hardware as unavailable and complete all completions */
   1331	mutex_lock(&stdev->mrpc_mutex);
   1332	stdev->alive = false;
   1333
   1334	/* Wake up and kill any users waiting on an MRPC request */
   1335	list_for_each_entry_safe(stuser, tmpuser, &stdev->mrpc_queue, list) {
   1336		stuser->cmd_done = true;
   1337		wake_up_interruptible(&stuser->cmd_comp);
   1338		list_del_init(&stuser->list);
   1339		stuser_put(stuser);
   1340	}
   1341
   1342	mutex_unlock(&stdev->mrpc_mutex);
   1343
   1344	/* Wake up any users waiting on event_wq */
   1345	wake_up_interruptible(&stdev->event_wq);
   1346}
   1347
   1348static struct switchtec_dev *stdev_create(struct pci_dev *pdev)
   1349{
   1350	struct switchtec_dev *stdev;
   1351	int minor;
   1352	struct device *dev;
   1353	struct cdev *cdev;
   1354	int rc;
   1355
   1356	stdev = kzalloc_node(sizeof(*stdev), GFP_KERNEL,
   1357			     dev_to_node(&pdev->dev));
   1358	if (!stdev)
   1359		return ERR_PTR(-ENOMEM);
   1360
   1361	stdev->alive = true;
   1362	stdev->pdev = pdev;
   1363	INIT_LIST_HEAD(&stdev->mrpc_queue);
   1364	mutex_init(&stdev->mrpc_mutex);
   1365	stdev->mrpc_busy = 0;
   1366	INIT_WORK(&stdev->mrpc_work, mrpc_event_work);
   1367	INIT_DELAYED_WORK(&stdev->mrpc_timeout, mrpc_timeout_work);
   1368	INIT_WORK(&stdev->link_event_work, link_event_work);
   1369	init_waitqueue_head(&stdev->event_wq);
   1370	atomic_set(&stdev->event_cnt, 0);
   1371
   1372	dev = &stdev->dev;
   1373	device_initialize(dev);
   1374	dev->class = switchtec_class;
   1375	dev->parent = &pdev->dev;
   1376	dev->groups = switchtec_device_groups;
   1377	dev->release = stdev_release;
   1378
   1379	minor = ida_simple_get(&switchtec_minor_ida, 0, 0,
   1380			       GFP_KERNEL);
   1381	if (minor < 0) {
   1382		rc = minor;
   1383		goto err_put;
   1384	}
   1385
   1386	dev->devt = MKDEV(MAJOR(switchtec_devt), minor);
   1387	dev_set_name(dev, "switchtec%d", minor);
   1388
   1389	cdev = &stdev->cdev;
   1390	cdev_init(cdev, &switchtec_fops);
   1391	cdev->owner = THIS_MODULE;
   1392
   1393	return stdev;
   1394
   1395err_put:
   1396	put_device(&stdev->dev);
   1397	return ERR_PTR(rc);
   1398}
   1399
   1400static int mask_event(struct switchtec_dev *stdev, int eid, int idx)
   1401{
   1402	size_t off = event_regs[eid].offset;
   1403	u32 __iomem *hdr_reg;
   1404	u32 hdr;
   1405
   1406	hdr_reg = event_regs[eid].map_reg(stdev, off, idx);
   1407	hdr = ioread32(hdr_reg);
   1408
   1409	if (hdr & SWITCHTEC_EVENT_NOT_SUPP)
   1410		return 0;
   1411
   1412	if (!(hdr & SWITCHTEC_EVENT_OCCURRED && hdr & SWITCHTEC_EVENT_EN_IRQ))
   1413		return 0;
   1414
   1415	dev_dbg(&stdev->dev, "%s: %d %d %x\n", __func__, eid, idx, hdr);
   1416	hdr &= ~(SWITCHTEC_EVENT_EN_IRQ | SWITCHTEC_EVENT_OCCURRED);
   1417	iowrite32(hdr, hdr_reg);
   1418
   1419	return 1;
   1420}
   1421
   1422static int mask_all_events(struct switchtec_dev *stdev, int eid)
   1423{
   1424	int idx;
   1425	int count = 0;
   1426
   1427	if (event_regs[eid].map_reg == part_ev_reg) {
   1428		for (idx = 0; idx < stdev->partition_count; idx++)
   1429			count += mask_event(stdev, eid, idx);
   1430	} else if (event_regs[eid].map_reg == pff_ev_reg) {
   1431		for (idx = 0; idx < stdev->pff_csr_count; idx++) {
   1432			if (!stdev->pff_local[idx])
   1433				continue;
   1434
   1435			count += mask_event(stdev, eid, idx);
   1436		}
   1437	} else {
   1438		count += mask_event(stdev, eid, 0);
   1439	}
   1440
   1441	return count;
   1442}
   1443
   1444static irqreturn_t switchtec_event_isr(int irq, void *dev)
   1445{
   1446	struct switchtec_dev *stdev = dev;
   1447	u32 reg;
   1448	irqreturn_t ret = IRQ_NONE;
   1449	int eid, event_count = 0;
   1450
   1451	reg = ioread32(&stdev->mmio_part_cfg->mrpc_comp_hdr);
   1452	if (reg & SWITCHTEC_EVENT_OCCURRED) {
   1453		dev_dbg(&stdev->dev, "%s: mrpc comp\n", __func__);
   1454		ret = IRQ_HANDLED;
   1455		schedule_work(&stdev->mrpc_work);
   1456		iowrite32(reg, &stdev->mmio_part_cfg->mrpc_comp_hdr);
   1457	}
   1458
   1459	check_link_state_events(stdev);
   1460
   1461	for (eid = 0; eid < SWITCHTEC_IOCTL_MAX_EVENTS; eid++) {
   1462		if (eid == SWITCHTEC_IOCTL_EVENT_LINK_STATE ||
   1463		    eid == SWITCHTEC_IOCTL_EVENT_MRPC_COMP)
   1464			continue;
   1465
   1466		event_count += mask_all_events(stdev, eid);
   1467	}
   1468
   1469	if (event_count) {
   1470		atomic_inc(&stdev->event_cnt);
   1471		wake_up_interruptible(&stdev->event_wq);
   1472		dev_dbg(&stdev->dev, "%s: %d events\n", __func__,
   1473			event_count);
   1474		return IRQ_HANDLED;
   1475	}
   1476
   1477	return ret;
   1478}
   1479
   1480
   1481static irqreturn_t switchtec_dma_mrpc_isr(int irq, void *dev)
   1482{
   1483	struct switchtec_dev *stdev = dev;
   1484	irqreturn_t ret = IRQ_NONE;
   1485
   1486	iowrite32(SWITCHTEC_EVENT_CLEAR |
   1487		  SWITCHTEC_EVENT_EN_IRQ,
   1488		  &stdev->mmio_part_cfg->mrpc_comp_hdr);
   1489	schedule_work(&stdev->mrpc_work);
   1490
   1491	ret = IRQ_HANDLED;
   1492	return ret;
   1493}
   1494
   1495static int switchtec_init_isr(struct switchtec_dev *stdev)
   1496{
   1497	int nvecs;
   1498	int event_irq;
   1499	int dma_mrpc_irq;
   1500	int rc;
   1501
   1502	if (nirqs < 4)
   1503		nirqs = 4;
   1504
   1505	nvecs = pci_alloc_irq_vectors(stdev->pdev, 1, nirqs,
   1506				      PCI_IRQ_MSIX | PCI_IRQ_MSI |
   1507				      PCI_IRQ_VIRTUAL);
   1508	if (nvecs < 0)
   1509		return nvecs;
   1510
   1511	event_irq = ioread16(&stdev->mmio_part_cfg->vep_vector_number);
   1512	if (event_irq < 0 || event_irq >= nvecs)
   1513		return -EFAULT;
   1514
   1515	event_irq = pci_irq_vector(stdev->pdev, event_irq);
   1516	if (event_irq < 0)
   1517		return event_irq;
   1518
   1519	rc = devm_request_irq(&stdev->pdev->dev, event_irq,
   1520				switchtec_event_isr, 0,
   1521				KBUILD_MODNAME, stdev);
   1522
   1523	if (rc)
   1524		return rc;
   1525
   1526	if (!stdev->dma_mrpc)
   1527		return rc;
   1528
   1529	dma_mrpc_irq = ioread32(&stdev->mmio_mrpc->dma_vector);
   1530	if (dma_mrpc_irq < 0 || dma_mrpc_irq >= nvecs)
   1531		return -EFAULT;
   1532
   1533	dma_mrpc_irq  = pci_irq_vector(stdev->pdev, dma_mrpc_irq);
   1534	if (dma_mrpc_irq < 0)
   1535		return dma_mrpc_irq;
   1536
   1537	rc = devm_request_irq(&stdev->pdev->dev, dma_mrpc_irq,
   1538				switchtec_dma_mrpc_isr, 0,
   1539				KBUILD_MODNAME, stdev);
   1540
   1541	return rc;
   1542}
   1543
   1544static void init_pff(struct switchtec_dev *stdev)
   1545{
   1546	int i;
   1547	u32 reg;
   1548	struct part_cfg_regs __iomem *pcfg = stdev->mmio_part_cfg;
   1549
   1550	for (i = 0; i < SWITCHTEC_MAX_PFF_CSR; i++) {
   1551		reg = ioread16(&stdev->mmio_pff_csr[i].vendor_id);
   1552		if (reg != PCI_VENDOR_ID_MICROSEMI)
   1553			break;
   1554	}
   1555
   1556	stdev->pff_csr_count = i;
   1557
   1558	reg = ioread32(&pcfg->usp_pff_inst_id);
   1559	if (reg < stdev->pff_csr_count)
   1560		stdev->pff_local[reg] = 1;
   1561
   1562	reg = ioread32(&pcfg->vep_pff_inst_id) & 0xFF;
   1563	if (reg < stdev->pff_csr_count)
   1564		stdev->pff_local[reg] = 1;
   1565
   1566	for (i = 0; i < ARRAY_SIZE(pcfg->dsp_pff_inst_id); i++) {
   1567		reg = ioread32(&pcfg->dsp_pff_inst_id[i]);
   1568		if (reg < stdev->pff_csr_count)
   1569			stdev->pff_local[reg] = 1;
   1570	}
   1571}
   1572
   1573static int switchtec_init_pci(struct switchtec_dev *stdev,
   1574			      struct pci_dev *pdev)
   1575{
   1576	int rc;
   1577	void __iomem *map;
   1578	unsigned long res_start, res_len;
   1579	u32 __iomem *part_id;
   1580
   1581	rc = pcim_enable_device(pdev);
   1582	if (rc)
   1583		return rc;
   1584
   1585	rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
   1586	if (rc)
   1587		return rc;
   1588
   1589	pci_set_master(pdev);
   1590
   1591	res_start = pci_resource_start(pdev, 0);
   1592	res_len = pci_resource_len(pdev, 0);
   1593
   1594	if (!devm_request_mem_region(&pdev->dev, res_start,
   1595				     res_len, KBUILD_MODNAME))
   1596		return -EBUSY;
   1597
   1598	stdev->mmio_mrpc = devm_ioremap_wc(&pdev->dev, res_start,
   1599					   SWITCHTEC_GAS_TOP_CFG_OFFSET);
   1600	if (!stdev->mmio_mrpc)
   1601		return -ENOMEM;
   1602
   1603	map = devm_ioremap(&pdev->dev,
   1604			   res_start + SWITCHTEC_GAS_TOP_CFG_OFFSET,
   1605			   res_len - SWITCHTEC_GAS_TOP_CFG_OFFSET);
   1606	if (!map)
   1607		return -ENOMEM;
   1608
   1609	stdev->mmio = map - SWITCHTEC_GAS_TOP_CFG_OFFSET;
   1610	stdev->mmio_sw_event = stdev->mmio + SWITCHTEC_GAS_SW_EVENT_OFFSET;
   1611	stdev->mmio_sys_info = stdev->mmio + SWITCHTEC_GAS_SYS_INFO_OFFSET;
   1612	stdev->mmio_flash_info = stdev->mmio + SWITCHTEC_GAS_FLASH_INFO_OFFSET;
   1613	stdev->mmio_ntb = stdev->mmio + SWITCHTEC_GAS_NTB_OFFSET;
   1614
   1615	if (stdev->gen == SWITCHTEC_GEN3)
   1616		part_id = &stdev->mmio_sys_info->gen3.partition_id;
   1617	else if (stdev->gen == SWITCHTEC_GEN4)
   1618		part_id = &stdev->mmio_sys_info->gen4.partition_id;
   1619	else
   1620		return -EOPNOTSUPP;
   1621
   1622	stdev->partition = ioread8(part_id);
   1623	stdev->partition_count = ioread8(&stdev->mmio_ntb->partition_count);
   1624	stdev->mmio_part_cfg_all = stdev->mmio + SWITCHTEC_GAS_PART_CFG_OFFSET;
   1625	stdev->mmio_part_cfg = &stdev->mmio_part_cfg_all[stdev->partition];
   1626	stdev->mmio_pff_csr = stdev->mmio + SWITCHTEC_GAS_PFF_CSR_OFFSET;
   1627
   1628	if (stdev->partition_count < 1)
   1629		stdev->partition_count = 1;
   1630
   1631	init_pff(stdev);
   1632
   1633	pci_set_drvdata(pdev, stdev);
   1634
   1635	if (!use_dma_mrpc)
   1636		return 0;
   1637
   1638	if (ioread32(&stdev->mmio_mrpc->dma_ver) == 0)
   1639		return 0;
   1640
   1641	stdev->dma_mrpc = dma_alloc_coherent(&stdev->pdev->dev,
   1642					     sizeof(*stdev->dma_mrpc),
   1643					     &stdev->dma_mrpc_dma_addr,
   1644					     GFP_KERNEL);
   1645	if (stdev->dma_mrpc == NULL)
   1646		return -ENOMEM;
   1647
   1648	return 0;
   1649}
   1650
   1651static int switchtec_pci_probe(struct pci_dev *pdev,
   1652			       const struct pci_device_id *id)
   1653{
   1654	struct switchtec_dev *stdev;
   1655	int rc;
   1656
   1657	if (pdev->class == (PCI_CLASS_BRIDGE_OTHER << 8))
   1658		request_module_nowait("ntb_hw_switchtec");
   1659
   1660	stdev = stdev_create(pdev);
   1661	if (IS_ERR(stdev))
   1662		return PTR_ERR(stdev);
   1663
   1664	stdev->gen = id->driver_data;
   1665
   1666	rc = switchtec_init_pci(stdev, pdev);
   1667	if (rc)
   1668		goto err_put;
   1669
   1670	rc = switchtec_init_isr(stdev);
   1671	if (rc) {
   1672		dev_err(&stdev->dev, "failed to init isr.\n");
   1673		goto err_put;
   1674	}
   1675
   1676	iowrite32(SWITCHTEC_EVENT_CLEAR |
   1677		  SWITCHTEC_EVENT_EN_IRQ,
   1678		  &stdev->mmio_part_cfg->mrpc_comp_hdr);
   1679	enable_link_state_events(stdev);
   1680
   1681	if (stdev->dma_mrpc)
   1682		enable_dma_mrpc(stdev);
   1683
   1684	rc = cdev_device_add(&stdev->cdev, &stdev->dev);
   1685	if (rc)
   1686		goto err_devadd;
   1687
   1688	dev_info(&stdev->dev, "Management device registered.\n");
   1689
   1690	return 0;
   1691
   1692err_devadd:
   1693	stdev_kill(stdev);
   1694err_put:
   1695	ida_simple_remove(&switchtec_minor_ida, MINOR(stdev->dev.devt));
   1696	put_device(&stdev->dev);
   1697	return rc;
   1698}
   1699
   1700static void switchtec_pci_remove(struct pci_dev *pdev)
   1701{
   1702	struct switchtec_dev *stdev = pci_get_drvdata(pdev);
   1703
   1704	pci_set_drvdata(pdev, NULL);
   1705
   1706	cdev_device_del(&stdev->cdev, &stdev->dev);
   1707	ida_simple_remove(&switchtec_minor_ida, MINOR(stdev->dev.devt));
   1708	dev_info(&stdev->dev, "unregistered.\n");
   1709	stdev_kill(stdev);
   1710	put_device(&stdev->dev);
   1711}
   1712
   1713#define SWITCHTEC_PCI_DEVICE(device_id, gen) \
   1714	{ \
   1715		.vendor     = PCI_VENDOR_ID_MICROSEMI, \
   1716		.device     = device_id, \
   1717		.subvendor  = PCI_ANY_ID, \
   1718		.subdevice  = PCI_ANY_ID, \
   1719		.class      = (PCI_CLASS_MEMORY_OTHER << 8), \
   1720		.class_mask = 0xFFFFFFFF, \
   1721		.driver_data = gen, \
   1722	}, \
   1723	{ \
   1724		.vendor     = PCI_VENDOR_ID_MICROSEMI, \
   1725		.device     = device_id, \
   1726		.subvendor  = PCI_ANY_ID, \
   1727		.subdevice  = PCI_ANY_ID, \
   1728		.class      = (PCI_CLASS_BRIDGE_OTHER << 8), \
   1729		.class_mask = 0xFFFFFFFF, \
   1730		.driver_data = gen, \
   1731	}
   1732
   1733static const struct pci_device_id switchtec_pci_tbl[] = {
   1734	SWITCHTEC_PCI_DEVICE(0x8531, SWITCHTEC_GEN3),  //PFX 24xG3
   1735	SWITCHTEC_PCI_DEVICE(0x8532, SWITCHTEC_GEN3),  //PFX 32xG3
   1736	SWITCHTEC_PCI_DEVICE(0x8533, SWITCHTEC_GEN3),  //PFX 48xG3
   1737	SWITCHTEC_PCI_DEVICE(0x8534, SWITCHTEC_GEN3),  //PFX 64xG3
   1738	SWITCHTEC_PCI_DEVICE(0x8535, SWITCHTEC_GEN3),  //PFX 80xG3
   1739	SWITCHTEC_PCI_DEVICE(0x8536, SWITCHTEC_GEN3),  //PFX 96xG3
   1740	SWITCHTEC_PCI_DEVICE(0x8541, SWITCHTEC_GEN3),  //PSX 24xG3
   1741	SWITCHTEC_PCI_DEVICE(0x8542, SWITCHTEC_GEN3),  //PSX 32xG3
   1742	SWITCHTEC_PCI_DEVICE(0x8543, SWITCHTEC_GEN3),  //PSX 48xG3
   1743	SWITCHTEC_PCI_DEVICE(0x8544, SWITCHTEC_GEN3),  //PSX 64xG3
   1744	SWITCHTEC_PCI_DEVICE(0x8545, SWITCHTEC_GEN3),  //PSX 80xG3
   1745	SWITCHTEC_PCI_DEVICE(0x8546, SWITCHTEC_GEN3),  //PSX 96xG3
   1746	SWITCHTEC_PCI_DEVICE(0x8551, SWITCHTEC_GEN3),  //PAX 24XG3
   1747	SWITCHTEC_PCI_DEVICE(0x8552, SWITCHTEC_GEN3),  //PAX 32XG3
   1748	SWITCHTEC_PCI_DEVICE(0x8553, SWITCHTEC_GEN3),  //PAX 48XG3
   1749	SWITCHTEC_PCI_DEVICE(0x8554, SWITCHTEC_GEN3),  //PAX 64XG3
   1750	SWITCHTEC_PCI_DEVICE(0x8555, SWITCHTEC_GEN3),  //PAX 80XG3
   1751	SWITCHTEC_PCI_DEVICE(0x8556, SWITCHTEC_GEN3),  //PAX 96XG3
   1752	SWITCHTEC_PCI_DEVICE(0x8561, SWITCHTEC_GEN3),  //PFXL 24XG3
   1753	SWITCHTEC_PCI_DEVICE(0x8562, SWITCHTEC_GEN3),  //PFXL 32XG3
   1754	SWITCHTEC_PCI_DEVICE(0x8563, SWITCHTEC_GEN3),  //PFXL 48XG3
   1755	SWITCHTEC_PCI_DEVICE(0x8564, SWITCHTEC_GEN3),  //PFXL 64XG3
   1756	SWITCHTEC_PCI_DEVICE(0x8565, SWITCHTEC_GEN3),  //PFXL 80XG3
   1757	SWITCHTEC_PCI_DEVICE(0x8566, SWITCHTEC_GEN3),  //PFXL 96XG3
   1758	SWITCHTEC_PCI_DEVICE(0x8571, SWITCHTEC_GEN3),  //PFXI 24XG3
   1759	SWITCHTEC_PCI_DEVICE(0x8572, SWITCHTEC_GEN3),  //PFXI 32XG3
   1760	SWITCHTEC_PCI_DEVICE(0x8573, SWITCHTEC_GEN3),  //PFXI 48XG3
   1761	SWITCHTEC_PCI_DEVICE(0x8574, SWITCHTEC_GEN3),  //PFXI 64XG3
   1762	SWITCHTEC_PCI_DEVICE(0x8575, SWITCHTEC_GEN3),  //PFXI 80XG3
   1763	SWITCHTEC_PCI_DEVICE(0x8576, SWITCHTEC_GEN3),  //PFXI 96XG3
   1764	SWITCHTEC_PCI_DEVICE(0x4000, SWITCHTEC_GEN4),  //PFX 100XG4
   1765	SWITCHTEC_PCI_DEVICE(0x4084, SWITCHTEC_GEN4),  //PFX 84XG4
   1766	SWITCHTEC_PCI_DEVICE(0x4068, SWITCHTEC_GEN4),  //PFX 68XG4
   1767	SWITCHTEC_PCI_DEVICE(0x4052, SWITCHTEC_GEN4),  //PFX 52XG4
   1768	SWITCHTEC_PCI_DEVICE(0x4036, SWITCHTEC_GEN4),  //PFX 36XG4
   1769	SWITCHTEC_PCI_DEVICE(0x4028, SWITCHTEC_GEN4),  //PFX 28XG4
   1770	SWITCHTEC_PCI_DEVICE(0x4100, SWITCHTEC_GEN4),  //PSX 100XG4
   1771	SWITCHTEC_PCI_DEVICE(0x4184, SWITCHTEC_GEN4),  //PSX 84XG4
   1772	SWITCHTEC_PCI_DEVICE(0x4168, SWITCHTEC_GEN4),  //PSX 68XG4
   1773	SWITCHTEC_PCI_DEVICE(0x4152, SWITCHTEC_GEN4),  //PSX 52XG4
   1774	SWITCHTEC_PCI_DEVICE(0x4136, SWITCHTEC_GEN4),  //PSX 36XG4
   1775	SWITCHTEC_PCI_DEVICE(0x4128, SWITCHTEC_GEN4),  //PSX 28XG4
   1776	SWITCHTEC_PCI_DEVICE(0x4200, SWITCHTEC_GEN4),  //PAX 100XG4
   1777	SWITCHTEC_PCI_DEVICE(0x4284, SWITCHTEC_GEN4),  //PAX 84XG4
   1778	SWITCHTEC_PCI_DEVICE(0x4268, SWITCHTEC_GEN4),  //PAX 68XG4
   1779	SWITCHTEC_PCI_DEVICE(0x4252, SWITCHTEC_GEN4),  //PAX 52XG4
   1780	SWITCHTEC_PCI_DEVICE(0x4236, SWITCHTEC_GEN4),  //PAX 36XG4
   1781	SWITCHTEC_PCI_DEVICE(0x4228, SWITCHTEC_GEN4),  //PAX 28XG4
   1782	SWITCHTEC_PCI_DEVICE(0x4352, SWITCHTEC_GEN4),  //PFXA 52XG4
   1783	SWITCHTEC_PCI_DEVICE(0x4336, SWITCHTEC_GEN4),  //PFXA 36XG4
   1784	SWITCHTEC_PCI_DEVICE(0x4328, SWITCHTEC_GEN4),  //PFXA 28XG4
   1785	SWITCHTEC_PCI_DEVICE(0x4452, SWITCHTEC_GEN4),  //PSXA 52XG4
   1786	SWITCHTEC_PCI_DEVICE(0x4436, SWITCHTEC_GEN4),  //PSXA 36XG4
   1787	SWITCHTEC_PCI_DEVICE(0x4428, SWITCHTEC_GEN4),  //PSXA 28XG4
   1788	SWITCHTEC_PCI_DEVICE(0x4552, SWITCHTEC_GEN4),  //PAXA 52XG4
   1789	SWITCHTEC_PCI_DEVICE(0x4536, SWITCHTEC_GEN4),  //PAXA 36XG4
   1790	SWITCHTEC_PCI_DEVICE(0x4528, SWITCHTEC_GEN4),  //PAXA 28XG4
   1791	{0}
   1792};
   1793MODULE_DEVICE_TABLE(pci, switchtec_pci_tbl);
   1794
   1795static struct pci_driver switchtec_pci_driver = {
   1796	.name		= KBUILD_MODNAME,
   1797	.id_table	= switchtec_pci_tbl,
   1798	.probe		= switchtec_pci_probe,
   1799	.remove		= switchtec_pci_remove,
   1800};
   1801
   1802static int __init switchtec_init(void)
   1803{
   1804	int rc;
   1805
   1806	rc = alloc_chrdev_region(&switchtec_devt, 0, max_devices,
   1807				 "switchtec");
   1808	if (rc)
   1809		return rc;
   1810
   1811	switchtec_class = class_create(THIS_MODULE, "switchtec");
   1812	if (IS_ERR(switchtec_class)) {
   1813		rc = PTR_ERR(switchtec_class);
   1814		goto err_create_class;
   1815	}
   1816
   1817	rc = pci_register_driver(&switchtec_pci_driver);
   1818	if (rc)
   1819		goto err_pci_register;
   1820
   1821	pr_info(KBUILD_MODNAME ": loaded.\n");
   1822
   1823	return 0;
   1824
   1825err_pci_register:
   1826	class_destroy(switchtec_class);
   1827
   1828err_create_class:
   1829	unregister_chrdev_region(switchtec_devt, max_devices);
   1830
   1831	return rc;
   1832}
   1833module_init(switchtec_init);
   1834
   1835static void __exit switchtec_exit(void)
   1836{
   1837	pci_unregister_driver(&switchtec_pci_driver);
   1838	class_destroy(switchtec_class);
   1839	unregister_chrdev_region(switchtec_devt, max_devices);
   1840	ida_destroy(&switchtec_minor_ida);
   1841
   1842	pr_info(KBUILD_MODNAME ": unloaded.\n");
   1843}
   1844module_exit(switchtec_exit);