cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

virtio_pci_modern_dev.c (20122B)


      1// SPDX-License-Identifier: GPL-2.0-or-later
      2
      3#include <linux/virtio_pci_modern.h>
      4#include <linux/module.h>
      5#include <linux/pci.h>
      6
      7/*
      8 * vp_modern_map_capability - map a part of virtio pci capability
      9 * @mdev: the modern virtio-pci device
     10 * @off: offset of the capability
     11 * @minlen: minimal length of the capability
     12 * @align: align requirement
     13 * @start: start from the capability
     14 * @size: map size
     15 * @len: the length that is actually mapped
     16 * @pa: physical address of the capability
     17 *
     18 * Returns the io address of for the part of the capability
     19 */
     20static void __iomem *
     21vp_modern_map_capability(struct virtio_pci_modern_device *mdev, int off,
     22			 size_t minlen, u32 align, u32 start, u32 size,
     23			 size_t *len, resource_size_t *pa)
     24{
     25	struct pci_dev *dev = mdev->pci_dev;
     26	u8 bar;
     27	u32 offset, length;
     28	void __iomem *p;
     29
     30	pci_read_config_byte(dev, off + offsetof(struct virtio_pci_cap,
     31						 bar),
     32			     &bar);
     33	pci_read_config_dword(dev, off + offsetof(struct virtio_pci_cap, offset),
     34			     &offset);
     35	pci_read_config_dword(dev, off + offsetof(struct virtio_pci_cap, length),
     36			      &length);
     37
     38	/* Check if the BAR may have changed since we requested the region. */
     39	if (bar >= PCI_STD_NUM_BARS || !(mdev->modern_bars & (1 << bar))) {
     40		dev_err(&dev->dev,
     41			"virtio_pci: bar unexpectedly changed to %u\n", bar);
     42		return NULL;
     43	}
     44
     45	if (length <= start) {
     46		dev_err(&dev->dev,
     47			"virtio_pci: bad capability len %u (>%u expected)\n",
     48			length, start);
     49		return NULL;
     50	}
     51
     52	if (length - start < minlen) {
     53		dev_err(&dev->dev,
     54			"virtio_pci: bad capability len %u (>=%zu expected)\n",
     55			length, minlen);
     56		return NULL;
     57	}
     58
     59	length -= start;
     60
     61	if (start + offset < offset) {
     62		dev_err(&dev->dev,
     63			"virtio_pci: map wrap-around %u+%u\n",
     64			start, offset);
     65		return NULL;
     66	}
     67
     68	offset += start;
     69
     70	if (offset & (align - 1)) {
     71		dev_err(&dev->dev,
     72			"virtio_pci: offset %u not aligned to %u\n",
     73			offset, align);
     74		return NULL;
     75	}
     76
     77	if (length > size)
     78		length = size;
     79
     80	if (len)
     81		*len = length;
     82
     83	if (minlen + offset < minlen ||
     84	    minlen + offset > pci_resource_len(dev, bar)) {
     85		dev_err(&dev->dev,
     86			"virtio_pci: map virtio %zu@%u "
     87			"out of range on bar %i length %lu\n",
     88			minlen, offset,
     89			bar, (unsigned long)pci_resource_len(dev, bar));
     90		return NULL;
     91	}
     92
     93	p = pci_iomap_range(dev, bar, offset, length);
     94	if (!p)
     95		dev_err(&dev->dev,
     96			"virtio_pci: unable to map virtio %u@%u on bar %i\n",
     97			length, offset, bar);
     98	else if (pa)
     99		*pa = pci_resource_start(dev, bar) + offset;
    100
    101	return p;
    102}
    103
    104/**
    105 * virtio_pci_find_capability - walk capabilities to find device info.
    106 * @dev: the pci device
    107 * @cfg_type: the VIRTIO_PCI_CAP_* value we seek
    108 * @ioresource_types: IORESOURCE_MEM and/or IORESOURCE_IO.
    109 * @bars: the bitmask of BARs
    110 *
    111 * Returns offset of the capability, or 0.
    112 */
    113static inline int virtio_pci_find_capability(struct pci_dev *dev, u8 cfg_type,
    114					     u32 ioresource_types, int *bars)
    115{
    116	int pos;
    117
    118	for (pos = pci_find_capability(dev, PCI_CAP_ID_VNDR);
    119	     pos > 0;
    120	     pos = pci_find_next_capability(dev, pos, PCI_CAP_ID_VNDR)) {
    121		u8 type, bar;
    122		pci_read_config_byte(dev, pos + offsetof(struct virtio_pci_cap,
    123							 cfg_type),
    124				     &type);
    125		pci_read_config_byte(dev, pos + offsetof(struct virtio_pci_cap,
    126							 bar),
    127				     &bar);
    128
    129		/* Ignore structures with reserved BAR values */
    130		if (bar >= PCI_STD_NUM_BARS)
    131			continue;
    132
    133		if (type == cfg_type) {
    134			if (pci_resource_len(dev, bar) &&
    135			    pci_resource_flags(dev, bar) & ioresource_types) {
    136				*bars |= (1 << bar);
    137				return pos;
    138			}
    139		}
    140	}
    141	return 0;
    142}
    143
    144/* This is part of the ABI.  Don't screw with it. */
    145static inline void check_offsets(void)
    146{
    147	/* Note: disk space was harmed in compilation of this function. */
    148	BUILD_BUG_ON(VIRTIO_PCI_CAP_VNDR !=
    149		     offsetof(struct virtio_pci_cap, cap_vndr));
    150	BUILD_BUG_ON(VIRTIO_PCI_CAP_NEXT !=
    151		     offsetof(struct virtio_pci_cap, cap_next));
    152	BUILD_BUG_ON(VIRTIO_PCI_CAP_LEN !=
    153		     offsetof(struct virtio_pci_cap, cap_len));
    154	BUILD_BUG_ON(VIRTIO_PCI_CAP_CFG_TYPE !=
    155		     offsetof(struct virtio_pci_cap, cfg_type));
    156	BUILD_BUG_ON(VIRTIO_PCI_CAP_BAR !=
    157		     offsetof(struct virtio_pci_cap, bar));
    158	BUILD_BUG_ON(VIRTIO_PCI_CAP_OFFSET !=
    159		     offsetof(struct virtio_pci_cap, offset));
    160	BUILD_BUG_ON(VIRTIO_PCI_CAP_LENGTH !=
    161		     offsetof(struct virtio_pci_cap, length));
    162	BUILD_BUG_ON(VIRTIO_PCI_NOTIFY_CAP_MULT !=
    163		     offsetof(struct virtio_pci_notify_cap,
    164			      notify_off_multiplier));
    165	BUILD_BUG_ON(VIRTIO_PCI_COMMON_DFSELECT !=
    166		     offsetof(struct virtio_pci_common_cfg,
    167			      device_feature_select));
    168	BUILD_BUG_ON(VIRTIO_PCI_COMMON_DF !=
    169		     offsetof(struct virtio_pci_common_cfg, device_feature));
    170	BUILD_BUG_ON(VIRTIO_PCI_COMMON_GFSELECT !=
    171		     offsetof(struct virtio_pci_common_cfg,
    172			      guest_feature_select));
    173	BUILD_BUG_ON(VIRTIO_PCI_COMMON_GF !=
    174		     offsetof(struct virtio_pci_common_cfg, guest_feature));
    175	BUILD_BUG_ON(VIRTIO_PCI_COMMON_MSIX !=
    176		     offsetof(struct virtio_pci_common_cfg, msix_config));
    177	BUILD_BUG_ON(VIRTIO_PCI_COMMON_NUMQ !=
    178		     offsetof(struct virtio_pci_common_cfg, num_queues));
    179	BUILD_BUG_ON(VIRTIO_PCI_COMMON_STATUS !=
    180		     offsetof(struct virtio_pci_common_cfg, device_status));
    181	BUILD_BUG_ON(VIRTIO_PCI_COMMON_CFGGENERATION !=
    182		     offsetof(struct virtio_pci_common_cfg, config_generation));
    183	BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_SELECT !=
    184		     offsetof(struct virtio_pci_common_cfg, queue_select));
    185	BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_SIZE !=
    186		     offsetof(struct virtio_pci_common_cfg, queue_size));
    187	BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_MSIX !=
    188		     offsetof(struct virtio_pci_common_cfg, queue_msix_vector));
    189	BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_ENABLE !=
    190		     offsetof(struct virtio_pci_common_cfg, queue_enable));
    191	BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_NOFF !=
    192		     offsetof(struct virtio_pci_common_cfg, queue_notify_off));
    193	BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_DESCLO !=
    194		     offsetof(struct virtio_pci_common_cfg, queue_desc_lo));
    195	BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_DESCHI !=
    196		     offsetof(struct virtio_pci_common_cfg, queue_desc_hi));
    197	BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_AVAILLO !=
    198		     offsetof(struct virtio_pci_common_cfg, queue_avail_lo));
    199	BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_AVAILHI !=
    200		     offsetof(struct virtio_pci_common_cfg, queue_avail_hi));
    201	BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_USEDLO !=
    202		     offsetof(struct virtio_pci_common_cfg, queue_used_lo));
    203	BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_USEDHI !=
    204		     offsetof(struct virtio_pci_common_cfg, queue_used_hi));
    205}
    206
    207/*
    208 * vp_modern_probe: probe the modern virtio pci device, note that the
    209 * caller is required to enable PCI device before calling this function.
    210 * @mdev: the modern virtio-pci device
    211 *
    212 * Return 0 on succeed otherwise fail
    213 */
    214int vp_modern_probe(struct virtio_pci_modern_device *mdev)
    215{
    216	struct pci_dev *pci_dev = mdev->pci_dev;
    217	int err, common, isr, notify, device;
    218	u32 notify_length;
    219	u32 notify_offset;
    220
    221	check_offsets();
    222
    223	/* We only own devices >= 0x1000 and <= 0x107f: leave the rest. */
    224	if (pci_dev->device < 0x1000 || pci_dev->device > 0x107f)
    225		return -ENODEV;
    226
    227	if (pci_dev->device < 0x1040) {
    228		/* Transitional devices: use the PCI subsystem device id as
    229		 * virtio device id, same as legacy driver always did.
    230		 */
    231		mdev->id.device = pci_dev->subsystem_device;
    232	} else {
    233		/* Modern devices: simply use PCI device id, but start from 0x1040. */
    234		mdev->id.device = pci_dev->device - 0x1040;
    235	}
    236	mdev->id.vendor = pci_dev->subsystem_vendor;
    237
    238	/* check for a common config: if not, use legacy mode (bar 0). */
    239	common = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_COMMON_CFG,
    240					    IORESOURCE_IO | IORESOURCE_MEM,
    241					    &mdev->modern_bars);
    242	if (!common) {
    243		dev_info(&pci_dev->dev,
    244			 "virtio_pci: leaving for legacy driver\n");
    245		return -ENODEV;
    246	}
    247
    248	/* If common is there, these should be too... */
    249	isr = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_ISR_CFG,
    250					 IORESOURCE_IO | IORESOURCE_MEM,
    251					 &mdev->modern_bars);
    252	notify = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_NOTIFY_CFG,
    253					    IORESOURCE_IO | IORESOURCE_MEM,
    254					    &mdev->modern_bars);
    255	if (!isr || !notify) {
    256		dev_err(&pci_dev->dev,
    257			"virtio_pci: missing capabilities %i/%i/%i\n",
    258			common, isr, notify);
    259		return -EINVAL;
    260	}
    261
    262	err = dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(64));
    263	if (err)
    264		err = dma_set_mask_and_coherent(&pci_dev->dev,
    265						DMA_BIT_MASK(32));
    266	if (err)
    267		dev_warn(&pci_dev->dev, "Failed to enable 64-bit or 32-bit DMA.  Trying to continue, but this might not work.\n");
    268
    269	/* Device capability is only mandatory for devices that have
    270	 * device-specific configuration.
    271	 */
    272	device = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_DEVICE_CFG,
    273					    IORESOURCE_IO | IORESOURCE_MEM,
    274					    &mdev->modern_bars);
    275
    276	err = pci_request_selected_regions(pci_dev, mdev->modern_bars,
    277					   "virtio-pci-modern");
    278	if (err)
    279		return err;
    280
    281	err = -EINVAL;
    282	mdev->common = vp_modern_map_capability(mdev, common,
    283				      sizeof(struct virtio_pci_common_cfg), 4,
    284				      0, sizeof(struct virtio_pci_common_cfg),
    285				      NULL, NULL);
    286	if (!mdev->common)
    287		goto err_map_common;
    288	mdev->isr = vp_modern_map_capability(mdev, isr, sizeof(u8), 1,
    289					     0, 1,
    290					     NULL, NULL);
    291	if (!mdev->isr)
    292		goto err_map_isr;
    293
    294	/* Read notify_off_multiplier from config space. */
    295	pci_read_config_dword(pci_dev,
    296			      notify + offsetof(struct virtio_pci_notify_cap,
    297						notify_off_multiplier),
    298			      &mdev->notify_offset_multiplier);
    299	/* Read notify length and offset from config space. */
    300	pci_read_config_dword(pci_dev,
    301			      notify + offsetof(struct virtio_pci_notify_cap,
    302						cap.length),
    303			      &notify_length);
    304
    305	pci_read_config_dword(pci_dev,
    306			      notify + offsetof(struct virtio_pci_notify_cap,
    307						cap.offset),
    308			      &notify_offset);
    309
    310	/* We don't know how many VQs we'll map, ahead of the time.
    311	 * If notify length is small, map it all now.
    312	 * Otherwise, map each VQ individually later.
    313	 */
    314	if ((u64)notify_length + (notify_offset % PAGE_SIZE) <= PAGE_SIZE) {
    315		mdev->notify_base = vp_modern_map_capability(mdev, notify,
    316							     2, 2,
    317							     0, notify_length,
    318							     &mdev->notify_len,
    319							     &mdev->notify_pa);
    320		if (!mdev->notify_base)
    321			goto err_map_notify;
    322	} else {
    323		mdev->notify_map_cap = notify;
    324	}
    325
    326	/* Again, we don't know how much we should map, but PAGE_SIZE
    327	 * is more than enough for all existing devices.
    328	 */
    329	if (device) {
    330		mdev->device = vp_modern_map_capability(mdev, device, 0, 4,
    331							0, PAGE_SIZE,
    332							&mdev->device_len,
    333							NULL);
    334		if (!mdev->device)
    335			goto err_map_device;
    336	}
    337
    338	return 0;
    339
    340err_map_device:
    341	if (mdev->notify_base)
    342		pci_iounmap(pci_dev, mdev->notify_base);
    343err_map_notify:
    344	pci_iounmap(pci_dev, mdev->isr);
    345err_map_isr:
    346	pci_iounmap(pci_dev, mdev->common);
    347err_map_common:
    348	pci_release_selected_regions(pci_dev, mdev->modern_bars);
    349	return err;
    350}
    351EXPORT_SYMBOL_GPL(vp_modern_probe);
    352
    353/*
    354 * vp_modern_remove: remove and cleanup the modern virtio pci device
    355 * @mdev: the modern virtio-pci device
    356 */
    357void vp_modern_remove(struct virtio_pci_modern_device *mdev)
    358{
    359	struct pci_dev *pci_dev = mdev->pci_dev;
    360
    361	if (mdev->device)
    362		pci_iounmap(pci_dev, mdev->device);
    363	if (mdev->notify_base)
    364		pci_iounmap(pci_dev, mdev->notify_base);
    365	pci_iounmap(pci_dev, mdev->isr);
    366	pci_iounmap(pci_dev, mdev->common);
    367	pci_release_selected_regions(pci_dev, mdev->modern_bars);
    368}
    369EXPORT_SYMBOL_GPL(vp_modern_remove);
    370
    371/*
    372 * vp_modern_get_features - get features from device
    373 * @mdev: the modern virtio-pci device
    374 *
    375 * Returns the features read from the device
    376 */
    377u64 vp_modern_get_features(struct virtio_pci_modern_device *mdev)
    378{
    379	struct virtio_pci_common_cfg __iomem *cfg = mdev->common;
    380
    381	u64 features;
    382
    383	vp_iowrite32(0, &cfg->device_feature_select);
    384	features = vp_ioread32(&cfg->device_feature);
    385	vp_iowrite32(1, &cfg->device_feature_select);
    386	features |= ((u64)vp_ioread32(&cfg->device_feature) << 32);
    387
    388	return features;
    389}
    390EXPORT_SYMBOL_GPL(vp_modern_get_features);
    391
    392/*
    393 * vp_modern_get_driver_features - get driver features from device
    394 * @mdev: the modern virtio-pci device
    395 *
    396 * Returns the driver features read from the device
    397 */
    398u64 vp_modern_get_driver_features(struct virtio_pci_modern_device *mdev)
    399{
    400	struct virtio_pci_common_cfg __iomem *cfg = mdev->common;
    401
    402	u64 features;
    403
    404	vp_iowrite32(0, &cfg->guest_feature_select);
    405	features = vp_ioread32(&cfg->guest_feature);
    406	vp_iowrite32(1, &cfg->guest_feature_select);
    407	features |= ((u64)vp_ioread32(&cfg->guest_feature) << 32);
    408
    409	return features;
    410}
    411EXPORT_SYMBOL_GPL(vp_modern_get_driver_features);
    412
    413/*
    414 * vp_modern_set_features - set features to device
    415 * @mdev: the modern virtio-pci device
    416 * @features: the features set to device
    417 */
    418void vp_modern_set_features(struct virtio_pci_modern_device *mdev,
    419			    u64 features)
    420{
    421	struct virtio_pci_common_cfg __iomem *cfg = mdev->common;
    422
    423	vp_iowrite32(0, &cfg->guest_feature_select);
    424	vp_iowrite32((u32)features, &cfg->guest_feature);
    425	vp_iowrite32(1, &cfg->guest_feature_select);
    426	vp_iowrite32(features >> 32, &cfg->guest_feature);
    427}
    428EXPORT_SYMBOL_GPL(vp_modern_set_features);
    429
    430/*
    431 * vp_modern_generation - get the device genreation
    432 * @mdev: the modern virtio-pci device
    433 *
    434 * Returns the genreation read from device
    435 */
    436u32 vp_modern_generation(struct virtio_pci_modern_device *mdev)
    437{
    438	struct virtio_pci_common_cfg __iomem *cfg = mdev->common;
    439
    440	return vp_ioread8(&cfg->config_generation);
    441}
    442EXPORT_SYMBOL_GPL(vp_modern_generation);
    443
    444/*
    445 * vp_modern_get_status - get the device status
    446 * @mdev: the modern virtio-pci device
    447 *
    448 * Returns the status read from device
    449 */
    450u8 vp_modern_get_status(struct virtio_pci_modern_device *mdev)
    451{
    452	struct virtio_pci_common_cfg __iomem *cfg = mdev->common;
    453
    454	return vp_ioread8(&cfg->device_status);
    455}
    456EXPORT_SYMBOL_GPL(vp_modern_get_status);
    457
    458/*
    459 * vp_modern_set_status - set status to device
    460 * @mdev: the modern virtio-pci device
    461 * @status: the status set to device
    462 */
    463void vp_modern_set_status(struct virtio_pci_modern_device *mdev,
    464				 u8 status)
    465{
    466	struct virtio_pci_common_cfg __iomem *cfg = mdev->common;
    467
    468	/*
    469	 * Per memory-barriers.txt, wmb() is not needed to guarantee
    470	 * that the cache coherent memory writes have completed
    471	 * before writing to the MMIO region.
    472	 */
    473	vp_iowrite8(status, &cfg->device_status);
    474}
    475EXPORT_SYMBOL_GPL(vp_modern_set_status);
    476
    477/*
    478 * vp_modern_queue_vector - set the MSIX vector for a specific virtqueue
    479 * @mdev: the modern virtio-pci device
    480 * @index: queue index
    481 * @vector: the config vector
    482 *
    483 * Returns the config vector read from the device
    484 */
    485u16 vp_modern_queue_vector(struct virtio_pci_modern_device *mdev,
    486			   u16 index, u16 vector)
    487{
    488	struct virtio_pci_common_cfg __iomem *cfg = mdev->common;
    489
    490	vp_iowrite16(index, &cfg->queue_select);
    491	vp_iowrite16(vector, &cfg->queue_msix_vector);
    492	/* Flush the write out to device */
    493	return vp_ioread16(&cfg->queue_msix_vector);
    494}
    495EXPORT_SYMBOL_GPL(vp_modern_queue_vector);
    496
    497/*
    498 * vp_modern_config_vector - set the vector for config interrupt
    499 * @mdev: the modern virtio-pci device
    500 * @vector: the config vector
    501 *
    502 * Returns the config vector read from the device
    503 */
    504u16 vp_modern_config_vector(struct virtio_pci_modern_device *mdev,
    505			    u16 vector)
    506{
    507	struct virtio_pci_common_cfg __iomem *cfg = mdev->common;
    508
    509	/* Setup the vector used for configuration events */
    510	vp_iowrite16(vector, &cfg->msix_config);
    511	/* Verify we had enough resources to assign the vector */
    512	/* Will also flush the write out to device */
    513	return vp_ioread16(&cfg->msix_config);
    514}
    515EXPORT_SYMBOL_GPL(vp_modern_config_vector);
    516
    517/*
    518 * vp_modern_queue_address - set the virtqueue address
    519 * @mdev: the modern virtio-pci device
    520 * @index: the queue index
    521 * @desc_addr: address of the descriptor area
    522 * @driver_addr: address of the driver area
    523 * @device_addr: address of the device area
    524 */
    525void vp_modern_queue_address(struct virtio_pci_modern_device *mdev,
    526			     u16 index, u64 desc_addr, u64 driver_addr,
    527			     u64 device_addr)
    528{
    529	struct virtio_pci_common_cfg __iomem *cfg = mdev->common;
    530
    531	vp_iowrite16(index, &cfg->queue_select);
    532
    533	vp_iowrite64_twopart(desc_addr, &cfg->queue_desc_lo,
    534			     &cfg->queue_desc_hi);
    535	vp_iowrite64_twopart(driver_addr, &cfg->queue_avail_lo,
    536			     &cfg->queue_avail_hi);
    537	vp_iowrite64_twopart(device_addr, &cfg->queue_used_lo,
    538			     &cfg->queue_used_hi);
    539}
    540EXPORT_SYMBOL_GPL(vp_modern_queue_address);
    541
    542/*
    543 * vp_modern_set_queue_enable - enable a virtqueue
    544 * @mdev: the modern virtio-pci device
    545 * @index: the queue index
    546 * @enable: whether the virtqueue is enable or not
    547 */
    548void vp_modern_set_queue_enable(struct virtio_pci_modern_device *mdev,
    549				u16 index, bool enable)
    550{
    551	vp_iowrite16(index, &mdev->common->queue_select);
    552	vp_iowrite16(enable, &mdev->common->queue_enable);
    553}
    554EXPORT_SYMBOL_GPL(vp_modern_set_queue_enable);
    555
    556/*
    557 * vp_modern_get_queue_enable - enable a virtqueue
    558 * @mdev: the modern virtio-pci device
    559 * @index: the queue index
    560 *
    561 * Returns whether a virtqueue is enabled or not
    562 */
    563bool vp_modern_get_queue_enable(struct virtio_pci_modern_device *mdev,
    564				u16 index)
    565{
    566	vp_iowrite16(index, &mdev->common->queue_select);
    567
    568	return vp_ioread16(&mdev->common->queue_enable);
    569}
    570EXPORT_SYMBOL_GPL(vp_modern_get_queue_enable);
    571
    572/*
    573 * vp_modern_set_queue_size - set size for a virtqueue
    574 * @mdev: the modern virtio-pci device
    575 * @index: the queue index
    576 * @size: the size of the virtqueue
    577 */
    578void vp_modern_set_queue_size(struct virtio_pci_modern_device *mdev,
    579			      u16 index, u16 size)
    580{
    581	vp_iowrite16(index, &mdev->common->queue_select);
    582	vp_iowrite16(size, &mdev->common->queue_size);
    583
    584}
    585EXPORT_SYMBOL_GPL(vp_modern_set_queue_size);
    586
    587/*
    588 * vp_modern_get_queue_size - get size for a virtqueue
    589 * @mdev: the modern virtio-pci device
    590 * @index: the queue index
    591 *
    592 * Returns the size of the virtqueue
    593 */
    594u16 vp_modern_get_queue_size(struct virtio_pci_modern_device *mdev,
    595			     u16 index)
    596{
    597	vp_iowrite16(index, &mdev->common->queue_select);
    598
    599	return vp_ioread16(&mdev->common->queue_size);
    600
    601}
    602EXPORT_SYMBOL_GPL(vp_modern_get_queue_size);
    603
    604/*
    605 * vp_modern_get_num_queues - get the number of virtqueues
    606 * @mdev: the modern virtio-pci device
    607 *
    608 * Returns the number of virtqueues
    609 */
    610u16 vp_modern_get_num_queues(struct virtio_pci_modern_device *mdev)
    611{
    612	return vp_ioread16(&mdev->common->num_queues);
    613}
    614EXPORT_SYMBOL_GPL(vp_modern_get_num_queues);
    615
    616/*
    617 * vp_modern_get_queue_notify_off - get notification offset for a virtqueue
    618 * @mdev: the modern virtio-pci device
    619 * @index: the queue index
    620 *
    621 * Returns the notification offset for a virtqueue
    622 */
    623static u16 vp_modern_get_queue_notify_off(struct virtio_pci_modern_device *mdev,
    624					  u16 index)
    625{
    626	vp_iowrite16(index, &mdev->common->queue_select);
    627
    628	return vp_ioread16(&mdev->common->queue_notify_off);
    629}
    630
    631/*
    632 * vp_modern_map_vq_notify - map notification area for a
    633 * specific virtqueue
    634 * @mdev: the modern virtio-pci device
    635 * @index: the queue index
    636 * @pa: the pointer to the physical address of the nofity area
    637 *
    638 * Returns the address of the notification area
    639 */
    640void __iomem *vp_modern_map_vq_notify(struct virtio_pci_modern_device *mdev,
    641				      u16 index, resource_size_t *pa)
    642{
    643	u16 off = vp_modern_get_queue_notify_off(mdev, index);
    644
    645	if (mdev->notify_base) {
    646		/* offset should not wrap */
    647		if ((u64)off * mdev->notify_offset_multiplier + 2
    648			> mdev->notify_len) {
    649			dev_warn(&mdev->pci_dev->dev,
    650				 "bad notification offset %u (x %u) "
    651				 "for queue %u > %zd",
    652				 off, mdev->notify_offset_multiplier,
    653				 index, mdev->notify_len);
    654			return NULL;
    655		}
    656		if (pa)
    657			*pa = mdev->notify_pa +
    658			      off * mdev->notify_offset_multiplier;
    659		return mdev->notify_base + off * mdev->notify_offset_multiplier;
    660	} else {
    661		return vp_modern_map_capability(mdev,
    662				       mdev->notify_map_cap, 2, 2,
    663				       off * mdev->notify_offset_multiplier, 2,
    664				       NULL, pa);
    665	}
    666}
    667EXPORT_SYMBOL_GPL(vp_modern_map_vq_notify);
    668
    669MODULE_VERSION("0.1");
    670MODULE_DESCRIPTION("Modern Virtio PCI Device");
    671MODULE_AUTHOR("Jason Wang <jasowang@redhat.com>");
    672MODULE_LICENSE("GPL");