cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

vfio_pci_config.c (54223B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 * VFIO PCI config space virtualization
      4 *
      5 * Copyright (C) 2012 Red Hat, Inc.  All rights reserved.
      6 *     Author: Alex Williamson <alex.williamson@redhat.com>
      7 *
      8 * Derived from original vfio:
      9 * Copyright 2010 Cisco Systems, Inc.  All rights reserved.
     10 * Author: Tom Lyon, pugs@cisco.com
     11 */
     12
     13/*
     14 * This code handles reading and writing of PCI configuration registers.
     15 * This is hairy because we want to allow a lot of flexibility to the
     16 * user driver, but cannot trust it with all of the config fields.
     17 * Tables determine which fields can be read and written, as well as
     18 * which fields are 'virtualized' - special actions and translations to
     19 * make it appear to the user that he has control, when in fact things
     20 * must be negotiated with the underlying OS.
     21 */
     22
     23#include <linux/fs.h>
     24#include <linux/pci.h>
     25#include <linux/uaccess.h>
     26#include <linux/vfio.h>
     27#include <linux/slab.h>
     28
     29#include <linux/vfio_pci_core.h>
     30
     31/* Fake capability ID for standard config space */
     32#define PCI_CAP_ID_BASIC	0
     33
     34#define is_bar(offset)	\
     35	((offset >= PCI_BASE_ADDRESS_0 && offset < PCI_BASE_ADDRESS_5 + 4) || \
     36	 (offset >= PCI_ROM_ADDRESS && offset < PCI_ROM_ADDRESS + 4))
     37
     38/*
     39 * Lengths of PCI Config Capabilities
     40 *   0: Removed from the user visible capability list
     41 *   FF: Variable length
     42 */
     43static const u8 pci_cap_length[PCI_CAP_ID_MAX + 1] = {
     44	[PCI_CAP_ID_BASIC]	= PCI_STD_HEADER_SIZEOF, /* pci config header */
     45	[PCI_CAP_ID_PM]		= PCI_PM_SIZEOF,
     46	[PCI_CAP_ID_AGP]	= PCI_AGP_SIZEOF,
     47	[PCI_CAP_ID_VPD]	= PCI_CAP_VPD_SIZEOF,
     48	[PCI_CAP_ID_SLOTID]	= 0,		/* bridge - don't care */
     49	[PCI_CAP_ID_MSI]	= 0xFF,		/* 10, 14, 20, or 24 */
     50	[PCI_CAP_ID_CHSWP]	= 0,		/* cpci - not yet */
     51	[PCI_CAP_ID_PCIX]	= 0xFF,		/* 8 or 24 */
     52	[PCI_CAP_ID_HT]		= 0xFF,		/* hypertransport */
     53	[PCI_CAP_ID_VNDR]	= 0xFF,		/* variable */
     54	[PCI_CAP_ID_DBG]	= 0,		/* debug - don't care */
     55	[PCI_CAP_ID_CCRC]	= 0,		/* cpci - not yet */
     56	[PCI_CAP_ID_SHPC]	= 0,		/* hotswap - not yet */
     57	[PCI_CAP_ID_SSVID]	= 0,		/* bridge - don't care */
     58	[PCI_CAP_ID_AGP3]	= 0,		/* AGP8x - not yet */
     59	[PCI_CAP_ID_SECDEV]	= 0,		/* secure device not yet */
     60	[PCI_CAP_ID_EXP]	= 0xFF,		/* 20 or 44 */
     61	[PCI_CAP_ID_MSIX]	= PCI_CAP_MSIX_SIZEOF,
     62	[PCI_CAP_ID_SATA]	= 0xFF,
     63	[PCI_CAP_ID_AF]		= PCI_CAP_AF_SIZEOF,
     64};
     65
     66/*
     67 * Lengths of PCIe/PCI-X Extended Config Capabilities
     68 *   0: Removed or masked from the user visible capability list
     69 *   FF: Variable length
     70 */
     71static const u16 pci_ext_cap_length[PCI_EXT_CAP_ID_MAX + 1] = {
     72	[PCI_EXT_CAP_ID_ERR]	=	PCI_ERR_ROOT_COMMAND,
     73	[PCI_EXT_CAP_ID_VC]	=	0xFF,
     74	[PCI_EXT_CAP_ID_DSN]	=	PCI_EXT_CAP_DSN_SIZEOF,
     75	[PCI_EXT_CAP_ID_PWR]	=	PCI_EXT_CAP_PWR_SIZEOF,
     76	[PCI_EXT_CAP_ID_RCLD]	=	0,	/* root only - don't care */
     77	[PCI_EXT_CAP_ID_RCILC]	=	0,	/* root only - don't care */
     78	[PCI_EXT_CAP_ID_RCEC]	=	0,	/* root only - don't care */
     79	[PCI_EXT_CAP_ID_MFVC]	=	0xFF,
     80	[PCI_EXT_CAP_ID_VC9]	=	0xFF,	/* same as CAP_ID_VC */
     81	[PCI_EXT_CAP_ID_RCRB]	=	0,	/* root only - don't care */
     82	[PCI_EXT_CAP_ID_VNDR]	=	0xFF,
     83	[PCI_EXT_CAP_ID_CAC]	=	0,	/* obsolete */
     84	[PCI_EXT_CAP_ID_ACS]	=	0xFF,
     85	[PCI_EXT_CAP_ID_ARI]	=	PCI_EXT_CAP_ARI_SIZEOF,
     86	[PCI_EXT_CAP_ID_ATS]	=	PCI_EXT_CAP_ATS_SIZEOF,
     87	[PCI_EXT_CAP_ID_SRIOV]	=	PCI_EXT_CAP_SRIOV_SIZEOF,
     88	[PCI_EXT_CAP_ID_MRIOV]	=	0,	/* not yet */
     89	[PCI_EXT_CAP_ID_MCAST]	=	PCI_EXT_CAP_MCAST_ENDPOINT_SIZEOF,
     90	[PCI_EXT_CAP_ID_PRI]	=	PCI_EXT_CAP_PRI_SIZEOF,
     91	[PCI_EXT_CAP_ID_AMD_XXX] =	0,	/* not yet */
     92	[PCI_EXT_CAP_ID_REBAR]	=	0xFF,
     93	[PCI_EXT_CAP_ID_DPA]	=	0xFF,
     94	[PCI_EXT_CAP_ID_TPH]	=	0xFF,
     95	[PCI_EXT_CAP_ID_LTR]	=	PCI_EXT_CAP_LTR_SIZEOF,
     96	[PCI_EXT_CAP_ID_SECPCI]	=	0,	/* not yet */
     97	[PCI_EXT_CAP_ID_PMUX]	=	0,	/* not yet */
     98	[PCI_EXT_CAP_ID_PASID]	=	0,	/* not yet */
     99};
    100
    101/*
    102 * Read/Write Permission Bits - one bit for each bit in capability
    103 * Any field can be read if it exists, but what is read depends on
    104 * whether the field is 'virtualized', or just pass through to the
    105 * hardware.  Any virtualized field is also virtualized for writes.
    106 * Writes are only permitted if they have a 1 bit here.
    107 */
    108struct perm_bits {
    109	u8	*virt;		/* read/write virtual data, not hw */
    110	u8	*write;		/* writeable bits */
    111	int	(*readfn)(struct vfio_pci_core_device *vdev, int pos, int count,
    112			  struct perm_bits *perm, int offset, __le32 *val);
    113	int	(*writefn)(struct vfio_pci_core_device *vdev, int pos, int count,
    114			   struct perm_bits *perm, int offset, __le32 val);
    115};
    116
    117#define	NO_VIRT		0
    118#define	ALL_VIRT	0xFFFFFFFFU
    119#define	NO_WRITE	0
    120#define	ALL_WRITE	0xFFFFFFFFU
    121
    122static int vfio_user_config_read(struct pci_dev *pdev, int offset,
    123				 __le32 *val, int count)
    124{
    125	int ret = -EINVAL;
    126	u32 tmp_val = 0;
    127
    128	switch (count) {
    129	case 1:
    130	{
    131		u8 tmp;
    132		ret = pci_user_read_config_byte(pdev, offset, &tmp);
    133		tmp_val = tmp;
    134		break;
    135	}
    136	case 2:
    137	{
    138		u16 tmp;
    139		ret = pci_user_read_config_word(pdev, offset, &tmp);
    140		tmp_val = tmp;
    141		break;
    142	}
    143	case 4:
    144		ret = pci_user_read_config_dword(pdev, offset, &tmp_val);
    145		break;
    146	}
    147
    148	*val = cpu_to_le32(tmp_val);
    149
    150	return ret;
    151}
    152
    153static int vfio_user_config_write(struct pci_dev *pdev, int offset,
    154				  __le32 val, int count)
    155{
    156	int ret = -EINVAL;
    157	u32 tmp_val = le32_to_cpu(val);
    158
    159	switch (count) {
    160	case 1:
    161		ret = pci_user_write_config_byte(pdev, offset, tmp_val);
    162		break;
    163	case 2:
    164		ret = pci_user_write_config_word(pdev, offset, tmp_val);
    165		break;
    166	case 4:
    167		ret = pci_user_write_config_dword(pdev, offset, tmp_val);
    168		break;
    169	}
    170
    171	return ret;
    172}
    173
    174static int vfio_default_config_read(struct vfio_pci_core_device *vdev, int pos,
    175				    int count, struct perm_bits *perm,
    176				    int offset, __le32 *val)
    177{
    178	__le32 virt = 0;
    179
    180	memcpy(val, vdev->vconfig + pos, count);
    181
    182	memcpy(&virt, perm->virt + offset, count);
    183
    184	/* Any non-virtualized bits? */
    185	if (cpu_to_le32(~0U >> (32 - (count * 8))) != virt) {
    186		struct pci_dev *pdev = vdev->pdev;
    187		__le32 phys_val = 0;
    188		int ret;
    189
    190		ret = vfio_user_config_read(pdev, pos, &phys_val, count);
    191		if (ret)
    192			return ret;
    193
    194		*val = (phys_val & ~virt) | (*val & virt);
    195	}
    196
    197	return count;
    198}
    199
    200static int vfio_default_config_write(struct vfio_pci_core_device *vdev, int pos,
    201				     int count, struct perm_bits *perm,
    202				     int offset, __le32 val)
    203{
    204	__le32 virt = 0, write = 0;
    205
    206	memcpy(&write, perm->write + offset, count);
    207
    208	if (!write)
    209		return count; /* drop, no writable bits */
    210
    211	memcpy(&virt, perm->virt + offset, count);
    212
    213	/* Virtualized and writable bits go to vconfig */
    214	if (write & virt) {
    215		__le32 virt_val = 0;
    216
    217		memcpy(&virt_val, vdev->vconfig + pos, count);
    218
    219		virt_val &= ~(write & virt);
    220		virt_val |= (val & (write & virt));
    221
    222		memcpy(vdev->vconfig + pos, &virt_val, count);
    223	}
    224
    225	/* Non-virtualzed and writable bits go to hardware */
    226	if (write & ~virt) {
    227		struct pci_dev *pdev = vdev->pdev;
    228		__le32 phys_val = 0;
    229		int ret;
    230
    231		ret = vfio_user_config_read(pdev, pos, &phys_val, count);
    232		if (ret)
    233			return ret;
    234
    235		phys_val &= ~(write & ~virt);
    236		phys_val |= (val & (write & ~virt));
    237
    238		ret = vfio_user_config_write(pdev, pos, phys_val, count);
    239		if (ret)
    240			return ret;
    241	}
    242
    243	return count;
    244}
    245
    246/* Allow direct read from hardware, except for capability next pointer */
    247static int vfio_direct_config_read(struct vfio_pci_core_device *vdev, int pos,
    248				   int count, struct perm_bits *perm,
    249				   int offset, __le32 *val)
    250{
    251	int ret;
    252
    253	ret = vfio_user_config_read(vdev->pdev, pos, val, count);
    254	if (ret)
    255		return ret;
    256
    257	if (pos >= PCI_CFG_SPACE_SIZE) { /* Extended cap header mangling */
    258		if (offset < 4)
    259			memcpy(val, vdev->vconfig + pos, count);
    260	} else if (pos >= PCI_STD_HEADER_SIZEOF) { /* Std cap mangling */
    261		if (offset == PCI_CAP_LIST_ID && count > 1)
    262			memcpy(val, vdev->vconfig + pos,
    263			       min(PCI_CAP_FLAGS, count));
    264		else if (offset == PCI_CAP_LIST_NEXT)
    265			memcpy(val, vdev->vconfig + pos, 1);
    266	}
    267
    268	return count;
    269}
    270
    271/* Raw access skips any kind of virtualization */
    272static int vfio_raw_config_write(struct vfio_pci_core_device *vdev, int pos,
    273				 int count, struct perm_bits *perm,
    274				 int offset, __le32 val)
    275{
    276	int ret;
    277
    278	ret = vfio_user_config_write(vdev->pdev, pos, val, count);
    279	if (ret)
    280		return ret;
    281
    282	return count;
    283}
    284
    285static int vfio_raw_config_read(struct vfio_pci_core_device *vdev, int pos,
    286				int count, struct perm_bits *perm,
    287				int offset, __le32 *val)
    288{
    289	int ret;
    290
    291	ret = vfio_user_config_read(vdev->pdev, pos, val, count);
    292	if (ret)
    293		return ret;
    294
    295	return count;
    296}
    297
    298/* Virt access uses only virtualization */
    299static int vfio_virt_config_write(struct vfio_pci_core_device *vdev, int pos,
    300				  int count, struct perm_bits *perm,
    301				  int offset, __le32 val)
    302{
    303	memcpy(vdev->vconfig + pos, &val, count);
    304	return count;
    305}
    306
    307static int vfio_virt_config_read(struct vfio_pci_core_device *vdev, int pos,
    308				 int count, struct perm_bits *perm,
    309				 int offset, __le32 *val)
    310{
    311	memcpy(val, vdev->vconfig + pos, count);
    312	return count;
    313}
    314
    315/* Default capability regions to read-only, no-virtualization */
    316static struct perm_bits cap_perms[PCI_CAP_ID_MAX + 1] = {
    317	[0 ... PCI_CAP_ID_MAX] = { .readfn = vfio_direct_config_read }
    318};
    319static struct perm_bits ecap_perms[PCI_EXT_CAP_ID_MAX + 1] = {
    320	[0 ... PCI_EXT_CAP_ID_MAX] = { .readfn = vfio_direct_config_read }
    321};
    322/*
    323 * Default unassigned regions to raw read-write access.  Some devices
    324 * require this to function as they hide registers between the gaps in
    325 * config space (be2net).  Like MMIO and I/O port registers, we have
    326 * to trust the hardware isolation.
    327 */
    328static struct perm_bits unassigned_perms = {
    329	.readfn = vfio_raw_config_read,
    330	.writefn = vfio_raw_config_write
    331};
    332
    333static struct perm_bits virt_perms = {
    334	.readfn = vfio_virt_config_read,
    335	.writefn = vfio_virt_config_write
    336};
    337
    338static void free_perm_bits(struct perm_bits *perm)
    339{
    340	kfree(perm->virt);
    341	kfree(perm->write);
    342	perm->virt = NULL;
    343	perm->write = NULL;
    344}
    345
    346static int alloc_perm_bits(struct perm_bits *perm, int size)
    347{
    348	/*
    349	 * Round up all permission bits to the next dword, this lets us
    350	 * ignore whether a read/write exceeds the defined capability
    351	 * structure.  We can do this because:
    352	 *  - Standard config space is already dword aligned
    353	 *  - Capabilities are all dword aligned (bits 0:1 of next reserved)
    354	 *  - Express capabilities defined as dword aligned
    355	 */
    356	size = round_up(size, 4);
    357
    358	/*
    359	 * Zero state is
    360	 * - All Readable, None Writeable, None Virtualized
    361	 */
    362	perm->virt = kzalloc(size, GFP_KERNEL);
    363	perm->write = kzalloc(size, GFP_KERNEL);
    364	if (!perm->virt || !perm->write) {
    365		free_perm_bits(perm);
    366		return -ENOMEM;
    367	}
    368
    369	perm->readfn = vfio_default_config_read;
    370	perm->writefn = vfio_default_config_write;
    371
    372	return 0;
    373}
    374
    375/*
    376 * Helper functions for filling in permission tables
    377 */
    378static inline void p_setb(struct perm_bits *p, int off, u8 virt, u8 write)
    379{
    380	p->virt[off] = virt;
    381	p->write[off] = write;
    382}
    383
    384/* Handle endian-ness - pci and tables are little-endian */
    385static inline void p_setw(struct perm_bits *p, int off, u16 virt, u16 write)
    386{
    387	*(__le16 *)(&p->virt[off]) = cpu_to_le16(virt);
    388	*(__le16 *)(&p->write[off]) = cpu_to_le16(write);
    389}
    390
    391/* Handle endian-ness - pci and tables are little-endian */
    392static inline void p_setd(struct perm_bits *p, int off, u32 virt, u32 write)
    393{
    394	*(__le32 *)(&p->virt[off]) = cpu_to_le32(virt);
    395	*(__le32 *)(&p->write[off]) = cpu_to_le32(write);
    396}
    397
    398/* Caller should hold memory_lock semaphore */
    399bool __vfio_pci_memory_enabled(struct vfio_pci_core_device *vdev)
    400{
    401	struct pci_dev *pdev = vdev->pdev;
    402	u16 cmd = le16_to_cpu(*(__le16 *)&vdev->vconfig[PCI_COMMAND]);
    403
    404	/*
    405	 * Memory region cannot be accessed if device power state is D3.
    406	 *
    407	 * SR-IOV VF memory enable is handled by the MSE bit in the
    408	 * PF SR-IOV capability, there's therefore no need to trigger
    409	 * faults based on the virtual value.
    410	 */
    411	return pdev->current_state < PCI_D3hot &&
    412	       (pdev->no_command_memory || (cmd & PCI_COMMAND_MEMORY));
    413}
    414
    415/*
    416 * Restore the *real* BARs after we detect a FLR or backdoor reset.
    417 * (backdoor = some device specific technique that we didn't catch)
    418 */
    419static void vfio_bar_restore(struct vfio_pci_core_device *vdev)
    420{
    421	struct pci_dev *pdev = vdev->pdev;
    422	u32 *rbar = vdev->rbar;
    423	u16 cmd;
    424	int i;
    425
    426	if (pdev->is_virtfn)
    427		return;
    428
    429	pci_info(pdev, "%s: reset recovery - restoring BARs\n", __func__);
    430
    431	for (i = PCI_BASE_ADDRESS_0; i <= PCI_BASE_ADDRESS_5; i += 4, rbar++)
    432		pci_user_write_config_dword(pdev, i, *rbar);
    433
    434	pci_user_write_config_dword(pdev, PCI_ROM_ADDRESS, *rbar);
    435
    436	if (vdev->nointx) {
    437		pci_user_read_config_word(pdev, PCI_COMMAND, &cmd);
    438		cmd |= PCI_COMMAND_INTX_DISABLE;
    439		pci_user_write_config_word(pdev, PCI_COMMAND, cmd);
    440	}
    441}
    442
    443static __le32 vfio_generate_bar_flags(struct pci_dev *pdev, int bar)
    444{
    445	unsigned long flags = pci_resource_flags(pdev, bar);
    446	u32 val;
    447
    448	if (flags & IORESOURCE_IO)
    449		return cpu_to_le32(PCI_BASE_ADDRESS_SPACE_IO);
    450
    451	val = PCI_BASE_ADDRESS_SPACE_MEMORY;
    452
    453	if (flags & IORESOURCE_PREFETCH)
    454		val |= PCI_BASE_ADDRESS_MEM_PREFETCH;
    455
    456	if (flags & IORESOURCE_MEM_64)
    457		val |= PCI_BASE_ADDRESS_MEM_TYPE_64;
    458
    459	return cpu_to_le32(val);
    460}
    461
    462/*
    463 * Pretend we're hardware and tweak the values of the *virtual* PCI BARs
    464 * to reflect the hardware capabilities.  This implements BAR sizing.
    465 */
    466static void vfio_bar_fixup(struct vfio_pci_core_device *vdev)
    467{
    468	struct pci_dev *pdev = vdev->pdev;
    469	int i;
    470	__le32 *vbar;
    471	u64 mask;
    472
    473	if (!vdev->bardirty)
    474		return;
    475
    476	vbar = (__le32 *)&vdev->vconfig[PCI_BASE_ADDRESS_0];
    477
    478	for (i = 0; i < PCI_STD_NUM_BARS; i++, vbar++) {
    479		int bar = i + PCI_STD_RESOURCES;
    480
    481		if (!pci_resource_start(pdev, bar)) {
    482			*vbar = 0; /* Unmapped by host = unimplemented to user */
    483			continue;
    484		}
    485
    486		mask = ~(pci_resource_len(pdev, bar) - 1);
    487
    488		*vbar &= cpu_to_le32((u32)mask);
    489		*vbar |= vfio_generate_bar_flags(pdev, bar);
    490
    491		if (*vbar & cpu_to_le32(PCI_BASE_ADDRESS_MEM_TYPE_64)) {
    492			vbar++;
    493			*vbar &= cpu_to_le32((u32)(mask >> 32));
    494			i++;
    495		}
    496	}
    497
    498	vbar = (__le32 *)&vdev->vconfig[PCI_ROM_ADDRESS];
    499
    500	/*
    501	 * NB. REGION_INFO will have reported zero size if we weren't able
    502	 * to read the ROM, but we still return the actual BAR size here if
    503	 * it exists (or the shadow ROM space).
    504	 */
    505	if (pci_resource_start(pdev, PCI_ROM_RESOURCE)) {
    506		mask = ~(pci_resource_len(pdev, PCI_ROM_RESOURCE) - 1);
    507		mask |= PCI_ROM_ADDRESS_ENABLE;
    508		*vbar &= cpu_to_le32((u32)mask);
    509	} else if (pdev->resource[PCI_ROM_RESOURCE].flags &
    510					IORESOURCE_ROM_SHADOW) {
    511		mask = ~(0x20000 - 1);
    512		mask |= PCI_ROM_ADDRESS_ENABLE;
    513		*vbar &= cpu_to_le32((u32)mask);
    514	} else
    515		*vbar = 0;
    516
    517	vdev->bardirty = false;
    518}
    519
    520static int vfio_basic_config_read(struct vfio_pci_core_device *vdev, int pos,
    521				  int count, struct perm_bits *perm,
    522				  int offset, __le32 *val)
    523{
    524	if (is_bar(offset)) /* pos == offset for basic config */
    525		vfio_bar_fixup(vdev);
    526
    527	count = vfio_default_config_read(vdev, pos, count, perm, offset, val);
    528
    529	/* Mask in virtual memory enable */
    530	if (offset == PCI_COMMAND && vdev->pdev->no_command_memory) {
    531		u16 cmd = le16_to_cpu(*(__le16 *)&vdev->vconfig[PCI_COMMAND]);
    532		u32 tmp_val = le32_to_cpu(*val);
    533
    534		tmp_val |= cmd & PCI_COMMAND_MEMORY;
    535		*val = cpu_to_le32(tmp_val);
    536	}
    537
    538	return count;
    539}
    540
    541/* Test whether BARs match the value we think they should contain */
    542static bool vfio_need_bar_restore(struct vfio_pci_core_device *vdev)
    543{
    544	int i = 0, pos = PCI_BASE_ADDRESS_0, ret;
    545	u32 bar;
    546
    547	for (; pos <= PCI_BASE_ADDRESS_5; i++, pos += 4) {
    548		if (vdev->rbar[i]) {
    549			ret = pci_user_read_config_dword(vdev->pdev, pos, &bar);
    550			if (ret || vdev->rbar[i] != bar)
    551				return true;
    552		}
    553	}
    554
    555	return false;
    556}
    557
    558static int vfio_basic_config_write(struct vfio_pci_core_device *vdev, int pos,
    559				   int count, struct perm_bits *perm,
    560				   int offset, __le32 val)
    561{
    562	struct pci_dev *pdev = vdev->pdev;
    563	__le16 *virt_cmd;
    564	u16 new_cmd = 0;
    565	int ret;
    566
    567	virt_cmd = (__le16 *)&vdev->vconfig[PCI_COMMAND];
    568
    569	if (offset == PCI_COMMAND) {
    570		bool phys_mem, virt_mem, new_mem, phys_io, virt_io, new_io;
    571		u16 phys_cmd;
    572
    573		ret = pci_user_read_config_word(pdev, PCI_COMMAND, &phys_cmd);
    574		if (ret)
    575			return ret;
    576
    577		new_cmd = le32_to_cpu(val);
    578
    579		phys_io = !!(phys_cmd & PCI_COMMAND_IO);
    580		virt_io = !!(le16_to_cpu(*virt_cmd) & PCI_COMMAND_IO);
    581		new_io = !!(new_cmd & PCI_COMMAND_IO);
    582
    583		phys_mem = !!(phys_cmd & PCI_COMMAND_MEMORY);
    584		virt_mem = !!(le16_to_cpu(*virt_cmd) & PCI_COMMAND_MEMORY);
    585		new_mem = !!(new_cmd & PCI_COMMAND_MEMORY);
    586
    587		if (!new_mem)
    588			vfio_pci_zap_and_down_write_memory_lock(vdev);
    589		else
    590			down_write(&vdev->memory_lock);
    591
    592		/*
    593		 * If the user is writing mem/io enable (new_mem/io) and we
    594		 * think it's already enabled (virt_mem/io), but the hardware
    595		 * shows it disabled (phys_mem/io, then the device has
    596		 * undergone some kind of backdoor reset and needs to be
    597		 * restored before we allow it to enable the bars.
    598		 * SR-IOV devices will trigger this - for mem enable let's
    599		 * catch this now and for io enable it will be caught later
    600		 */
    601		if ((new_mem && virt_mem && !phys_mem &&
    602		     !pdev->no_command_memory) ||
    603		    (new_io && virt_io && !phys_io) ||
    604		    vfio_need_bar_restore(vdev))
    605			vfio_bar_restore(vdev);
    606	}
    607
    608	count = vfio_default_config_write(vdev, pos, count, perm, offset, val);
    609	if (count < 0) {
    610		if (offset == PCI_COMMAND)
    611			up_write(&vdev->memory_lock);
    612		return count;
    613	}
    614
    615	/*
    616	 * Save current memory/io enable bits in vconfig to allow for
    617	 * the test above next time.
    618	 */
    619	if (offset == PCI_COMMAND) {
    620		u16 mask = PCI_COMMAND_MEMORY | PCI_COMMAND_IO;
    621
    622		*virt_cmd &= cpu_to_le16(~mask);
    623		*virt_cmd |= cpu_to_le16(new_cmd & mask);
    624
    625		up_write(&vdev->memory_lock);
    626	}
    627
    628	/* Emulate INTx disable */
    629	if (offset >= PCI_COMMAND && offset <= PCI_COMMAND + 1) {
    630		bool virt_intx_disable;
    631
    632		virt_intx_disable = !!(le16_to_cpu(*virt_cmd) &
    633				       PCI_COMMAND_INTX_DISABLE);
    634
    635		if (virt_intx_disable && !vdev->virq_disabled) {
    636			vdev->virq_disabled = true;
    637			vfio_pci_intx_mask(vdev);
    638		} else if (!virt_intx_disable && vdev->virq_disabled) {
    639			vdev->virq_disabled = false;
    640			vfio_pci_intx_unmask(vdev);
    641		}
    642	}
    643
    644	if (is_bar(offset))
    645		vdev->bardirty = true;
    646
    647	return count;
    648}
    649
    650/* Permissions for the Basic PCI Header */
    651static int __init init_pci_cap_basic_perm(struct perm_bits *perm)
    652{
    653	if (alloc_perm_bits(perm, PCI_STD_HEADER_SIZEOF))
    654		return -ENOMEM;
    655
    656	perm->readfn = vfio_basic_config_read;
    657	perm->writefn = vfio_basic_config_write;
    658
    659	/* Virtualized for SR-IOV functions, which just have FFFF */
    660	p_setw(perm, PCI_VENDOR_ID, (u16)ALL_VIRT, NO_WRITE);
    661	p_setw(perm, PCI_DEVICE_ID, (u16)ALL_VIRT, NO_WRITE);
    662
    663	/*
    664	 * Virtualize INTx disable, we use it internally for interrupt
    665	 * control and can emulate it for non-PCI 2.3 devices.
    666	 */
    667	p_setw(perm, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE, (u16)ALL_WRITE);
    668
    669	/* Virtualize capability list, we might want to skip/disable */
    670	p_setw(perm, PCI_STATUS, PCI_STATUS_CAP_LIST, NO_WRITE);
    671
    672	/* No harm to write */
    673	p_setb(perm, PCI_CACHE_LINE_SIZE, NO_VIRT, (u8)ALL_WRITE);
    674	p_setb(perm, PCI_LATENCY_TIMER, NO_VIRT, (u8)ALL_WRITE);
    675	p_setb(perm, PCI_BIST, NO_VIRT, (u8)ALL_WRITE);
    676
    677	/* Virtualize all bars, can't touch the real ones */
    678	p_setd(perm, PCI_BASE_ADDRESS_0, ALL_VIRT, ALL_WRITE);
    679	p_setd(perm, PCI_BASE_ADDRESS_1, ALL_VIRT, ALL_WRITE);
    680	p_setd(perm, PCI_BASE_ADDRESS_2, ALL_VIRT, ALL_WRITE);
    681	p_setd(perm, PCI_BASE_ADDRESS_3, ALL_VIRT, ALL_WRITE);
    682	p_setd(perm, PCI_BASE_ADDRESS_4, ALL_VIRT, ALL_WRITE);
    683	p_setd(perm, PCI_BASE_ADDRESS_5, ALL_VIRT, ALL_WRITE);
    684	p_setd(perm, PCI_ROM_ADDRESS, ALL_VIRT, ALL_WRITE);
    685
    686	/* Allow us to adjust capability chain */
    687	p_setb(perm, PCI_CAPABILITY_LIST, (u8)ALL_VIRT, NO_WRITE);
    688
    689	/* Sometimes used by sw, just virtualize */
    690	p_setb(perm, PCI_INTERRUPT_LINE, (u8)ALL_VIRT, (u8)ALL_WRITE);
    691
    692	/* Virtualize interrupt pin to allow hiding INTx */
    693	p_setb(perm, PCI_INTERRUPT_PIN, (u8)ALL_VIRT, (u8)NO_WRITE);
    694
    695	return 0;
    696}
    697
    698/*
    699 * It takes all the required locks to protect the access of power related
    700 * variables and then invokes vfio_pci_set_power_state().
    701 */
    702static void vfio_lock_and_set_power_state(struct vfio_pci_core_device *vdev,
    703					  pci_power_t state)
    704{
    705	if (state >= PCI_D3hot)
    706		vfio_pci_zap_and_down_write_memory_lock(vdev);
    707	else
    708		down_write(&vdev->memory_lock);
    709
    710	vfio_pci_set_power_state(vdev, state);
    711	up_write(&vdev->memory_lock);
    712}
    713
    714static int vfio_pm_config_write(struct vfio_pci_core_device *vdev, int pos,
    715				int count, struct perm_bits *perm,
    716				int offset, __le32 val)
    717{
    718	count = vfio_default_config_write(vdev, pos, count, perm, offset, val);
    719	if (count < 0)
    720		return count;
    721
    722	if (offset == PCI_PM_CTRL) {
    723		pci_power_t state;
    724
    725		switch (le32_to_cpu(val) & PCI_PM_CTRL_STATE_MASK) {
    726		case 0:
    727			state = PCI_D0;
    728			break;
    729		case 1:
    730			state = PCI_D1;
    731			break;
    732		case 2:
    733			state = PCI_D2;
    734			break;
    735		case 3:
    736			state = PCI_D3hot;
    737			break;
    738		}
    739
    740		vfio_lock_and_set_power_state(vdev, state);
    741	}
    742
    743	return count;
    744}
    745
    746/* Permissions for the Power Management capability */
    747static int __init init_pci_cap_pm_perm(struct perm_bits *perm)
    748{
    749	if (alloc_perm_bits(perm, pci_cap_length[PCI_CAP_ID_PM]))
    750		return -ENOMEM;
    751
    752	perm->writefn = vfio_pm_config_write;
    753
    754	/*
    755	 * We always virtualize the next field so we can remove
    756	 * capabilities from the chain if we want to.
    757	 */
    758	p_setb(perm, PCI_CAP_LIST_NEXT, (u8)ALL_VIRT, NO_WRITE);
    759
    760	/*
    761	 * The guests can't process PME events. If any PME event will be
    762	 * generated, then it will be mostly handled in the host and the
    763	 * host will clear the PME_STATUS. So virtualize PME_Support bits.
    764	 * The vconfig bits will be cleared during device capability
    765	 * initialization.
    766	 */
    767	p_setw(perm, PCI_PM_PMC, PCI_PM_CAP_PME_MASK, NO_WRITE);
    768
    769	/*
    770	 * Power management is defined *per function*, so we can let
    771	 * the user change power state, but we trap and initiate the
    772	 * change ourselves, so the state bits are read-only.
    773	 *
    774	 * The guest can't process PME from D3cold so virtualize PME_Status
    775	 * and PME_En bits. The vconfig bits will be cleared during device
    776	 * capability initialization.
    777	 */
    778	p_setd(perm, PCI_PM_CTRL,
    779	       PCI_PM_CTRL_PME_ENABLE | PCI_PM_CTRL_PME_STATUS,
    780	       ~(PCI_PM_CTRL_PME_ENABLE | PCI_PM_CTRL_PME_STATUS |
    781		 PCI_PM_CTRL_STATE_MASK));
    782
    783	return 0;
    784}
    785
    786static int vfio_vpd_config_write(struct vfio_pci_core_device *vdev, int pos,
    787				 int count, struct perm_bits *perm,
    788				 int offset, __le32 val)
    789{
    790	struct pci_dev *pdev = vdev->pdev;
    791	__le16 *paddr = (__le16 *)(vdev->vconfig + pos - offset + PCI_VPD_ADDR);
    792	__le32 *pdata = (__le32 *)(vdev->vconfig + pos - offset + PCI_VPD_DATA);
    793	u16 addr;
    794	u32 data;
    795
    796	/*
    797	 * Write through to emulation.  If the write includes the upper byte
    798	 * of PCI_VPD_ADDR, then the PCI_VPD_ADDR_F bit is written and we
    799	 * have work to do.
    800	 */
    801	count = vfio_default_config_write(vdev, pos, count, perm, offset, val);
    802	if (count < 0 || offset > PCI_VPD_ADDR + 1 ||
    803	    offset + count <= PCI_VPD_ADDR + 1)
    804		return count;
    805
    806	addr = le16_to_cpu(*paddr);
    807
    808	if (addr & PCI_VPD_ADDR_F) {
    809		data = le32_to_cpu(*pdata);
    810		if (pci_write_vpd(pdev, addr & ~PCI_VPD_ADDR_F, 4, &data) != 4)
    811			return count;
    812	} else {
    813		data = 0;
    814		if (pci_read_vpd(pdev, addr, 4, &data) < 0)
    815			return count;
    816		*pdata = cpu_to_le32(data);
    817	}
    818
    819	/*
    820	 * Toggle PCI_VPD_ADDR_F in the emulated PCI_VPD_ADDR register to
    821	 * signal completion.  If an error occurs above, we assume that not
    822	 * toggling this bit will induce a driver timeout.
    823	 */
    824	addr ^= PCI_VPD_ADDR_F;
    825	*paddr = cpu_to_le16(addr);
    826
    827	return count;
    828}
    829
    830/* Permissions for Vital Product Data capability */
    831static int __init init_pci_cap_vpd_perm(struct perm_bits *perm)
    832{
    833	if (alloc_perm_bits(perm, pci_cap_length[PCI_CAP_ID_VPD]))
    834		return -ENOMEM;
    835
    836	perm->writefn = vfio_vpd_config_write;
    837
    838	/*
    839	 * We always virtualize the next field so we can remove
    840	 * capabilities from the chain if we want to.
    841	 */
    842	p_setb(perm, PCI_CAP_LIST_NEXT, (u8)ALL_VIRT, NO_WRITE);
    843
    844	/*
    845	 * Both the address and data registers are virtualized to
    846	 * enable access through the pci_vpd_read/write functions
    847	 */
    848	p_setw(perm, PCI_VPD_ADDR, (u16)ALL_VIRT, (u16)ALL_WRITE);
    849	p_setd(perm, PCI_VPD_DATA, ALL_VIRT, ALL_WRITE);
    850
    851	return 0;
    852}
    853
    854/* Permissions for PCI-X capability */
    855static int __init init_pci_cap_pcix_perm(struct perm_bits *perm)
    856{
    857	/* Alloc 24, but only 8 are used in v0 */
    858	if (alloc_perm_bits(perm, PCI_CAP_PCIX_SIZEOF_V2))
    859		return -ENOMEM;
    860
    861	p_setb(perm, PCI_CAP_LIST_NEXT, (u8)ALL_VIRT, NO_WRITE);
    862
    863	p_setw(perm, PCI_X_CMD, NO_VIRT, (u16)ALL_WRITE);
    864	p_setd(perm, PCI_X_ECC_CSR, NO_VIRT, ALL_WRITE);
    865	return 0;
    866}
    867
    868static int vfio_exp_config_write(struct vfio_pci_core_device *vdev, int pos,
    869				 int count, struct perm_bits *perm,
    870				 int offset, __le32 val)
    871{
    872	__le16 *ctrl = (__le16 *)(vdev->vconfig + pos -
    873				  offset + PCI_EXP_DEVCTL);
    874	int readrq = le16_to_cpu(*ctrl) & PCI_EXP_DEVCTL_READRQ;
    875
    876	count = vfio_default_config_write(vdev, pos, count, perm, offset, val);
    877	if (count < 0)
    878		return count;
    879
    880	/*
    881	 * The FLR bit is virtualized, if set and the device supports PCIe
    882	 * FLR, issue a reset_function.  Regardless, clear the bit, the spec
    883	 * requires it to be always read as zero.  NB, reset_function might
    884	 * not use a PCIe FLR, we don't have that level of granularity.
    885	 */
    886	if (*ctrl & cpu_to_le16(PCI_EXP_DEVCTL_BCR_FLR)) {
    887		u32 cap;
    888		int ret;
    889
    890		*ctrl &= ~cpu_to_le16(PCI_EXP_DEVCTL_BCR_FLR);
    891
    892		ret = pci_user_read_config_dword(vdev->pdev,
    893						 pos - offset + PCI_EXP_DEVCAP,
    894						 &cap);
    895
    896		if (!ret && (cap & PCI_EXP_DEVCAP_FLR)) {
    897			vfio_pci_zap_and_down_write_memory_lock(vdev);
    898			pci_try_reset_function(vdev->pdev);
    899			up_write(&vdev->memory_lock);
    900		}
    901	}
    902
    903	/*
    904	 * MPS is virtualized to the user, writes do not change the physical
    905	 * register since determining a proper MPS value requires a system wide
    906	 * device view.  The MRRS is largely independent of MPS, but since the
    907	 * user does not have that system-wide view, they might set a safe, but
    908	 * inefficiently low value.  Here we allow writes through to hardware,
    909	 * but we set the floor to the physical device MPS setting, so that
    910	 * we can at least use full TLPs, as defined by the MPS value.
    911	 *
    912	 * NB, if any devices actually depend on an artificially low MRRS
    913	 * setting, this will need to be revisited, perhaps with a quirk
    914	 * though pcie_set_readrq().
    915	 */
    916	if (readrq != (le16_to_cpu(*ctrl) & PCI_EXP_DEVCTL_READRQ)) {
    917		readrq = 128 <<
    918			((le16_to_cpu(*ctrl) & PCI_EXP_DEVCTL_READRQ) >> 12);
    919		readrq = max(readrq, pcie_get_mps(vdev->pdev));
    920
    921		pcie_set_readrq(vdev->pdev, readrq);
    922	}
    923
    924	return count;
    925}
    926
    927/* Permissions for PCI Express capability */
    928static int __init init_pci_cap_exp_perm(struct perm_bits *perm)
    929{
    930	/* Alloc largest of possible sizes */
    931	if (alloc_perm_bits(perm, PCI_CAP_EXP_ENDPOINT_SIZEOF_V2))
    932		return -ENOMEM;
    933
    934	perm->writefn = vfio_exp_config_write;
    935
    936	p_setb(perm, PCI_CAP_LIST_NEXT, (u8)ALL_VIRT, NO_WRITE);
    937
    938	/*
    939	 * Allow writes to device control fields, except devctl_phantom,
    940	 * which could confuse IOMMU, MPS, which can break communication
    941	 * with other physical devices, and the ARI bit in devctl2, which
    942	 * is set at probe time.  FLR and MRRS get virtualized via our
    943	 * writefn.
    944	 */
    945	p_setw(perm, PCI_EXP_DEVCTL,
    946	       PCI_EXP_DEVCTL_BCR_FLR | PCI_EXP_DEVCTL_PAYLOAD |
    947	       PCI_EXP_DEVCTL_READRQ, ~PCI_EXP_DEVCTL_PHANTOM);
    948	p_setw(perm, PCI_EXP_DEVCTL2, NO_VIRT, ~PCI_EXP_DEVCTL2_ARI);
    949	return 0;
    950}
    951
    952static int vfio_af_config_write(struct vfio_pci_core_device *vdev, int pos,
    953				int count, struct perm_bits *perm,
    954				int offset, __le32 val)
    955{
    956	u8 *ctrl = vdev->vconfig + pos - offset + PCI_AF_CTRL;
    957
    958	count = vfio_default_config_write(vdev, pos, count, perm, offset, val);
    959	if (count < 0)
    960		return count;
    961
    962	/*
    963	 * The FLR bit is virtualized, if set and the device supports AF
    964	 * FLR, issue a reset_function.  Regardless, clear the bit, the spec
    965	 * requires it to be always read as zero.  NB, reset_function might
    966	 * not use an AF FLR, we don't have that level of granularity.
    967	 */
    968	if (*ctrl & PCI_AF_CTRL_FLR) {
    969		u8 cap;
    970		int ret;
    971
    972		*ctrl &= ~PCI_AF_CTRL_FLR;
    973
    974		ret = pci_user_read_config_byte(vdev->pdev,
    975						pos - offset + PCI_AF_CAP,
    976						&cap);
    977
    978		if (!ret && (cap & PCI_AF_CAP_FLR) && (cap & PCI_AF_CAP_TP)) {
    979			vfio_pci_zap_and_down_write_memory_lock(vdev);
    980			pci_try_reset_function(vdev->pdev);
    981			up_write(&vdev->memory_lock);
    982		}
    983	}
    984
    985	return count;
    986}
    987
    988/* Permissions for Advanced Function capability */
    989static int __init init_pci_cap_af_perm(struct perm_bits *perm)
    990{
    991	if (alloc_perm_bits(perm, pci_cap_length[PCI_CAP_ID_AF]))
    992		return -ENOMEM;
    993
    994	perm->writefn = vfio_af_config_write;
    995
    996	p_setb(perm, PCI_CAP_LIST_NEXT, (u8)ALL_VIRT, NO_WRITE);
    997	p_setb(perm, PCI_AF_CTRL, PCI_AF_CTRL_FLR, PCI_AF_CTRL_FLR);
    998	return 0;
    999}
   1000
   1001/* Permissions for Advanced Error Reporting extended capability */
   1002static int __init init_pci_ext_cap_err_perm(struct perm_bits *perm)
   1003{
   1004	u32 mask;
   1005
   1006	if (alloc_perm_bits(perm, pci_ext_cap_length[PCI_EXT_CAP_ID_ERR]))
   1007		return -ENOMEM;
   1008
   1009	/*
   1010	 * Virtualize the first dword of all express capabilities
   1011	 * because it includes the next pointer.  This lets us later
   1012	 * remove capabilities from the chain if we need to.
   1013	 */
   1014	p_setd(perm, 0, ALL_VIRT, NO_WRITE);
   1015
   1016	/* Writable bits mask */
   1017	mask =	PCI_ERR_UNC_UND |		/* Undefined */
   1018		PCI_ERR_UNC_DLP |		/* Data Link Protocol */
   1019		PCI_ERR_UNC_SURPDN |		/* Surprise Down */
   1020		PCI_ERR_UNC_POISON_TLP |	/* Poisoned TLP */
   1021		PCI_ERR_UNC_FCP |		/* Flow Control Protocol */
   1022		PCI_ERR_UNC_COMP_TIME |		/* Completion Timeout */
   1023		PCI_ERR_UNC_COMP_ABORT |	/* Completer Abort */
   1024		PCI_ERR_UNC_UNX_COMP |		/* Unexpected Completion */
   1025		PCI_ERR_UNC_RX_OVER |		/* Receiver Overflow */
   1026		PCI_ERR_UNC_MALF_TLP |		/* Malformed TLP */
   1027		PCI_ERR_UNC_ECRC |		/* ECRC Error Status */
   1028		PCI_ERR_UNC_UNSUP |		/* Unsupported Request */
   1029		PCI_ERR_UNC_ACSV |		/* ACS Violation */
   1030		PCI_ERR_UNC_INTN |		/* internal error */
   1031		PCI_ERR_UNC_MCBTLP |		/* MC blocked TLP */
   1032		PCI_ERR_UNC_ATOMEG |		/* Atomic egress blocked */
   1033		PCI_ERR_UNC_TLPPRE;		/* TLP prefix blocked */
   1034	p_setd(perm, PCI_ERR_UNCOR_STATUS, NO_VIRT, mask);
   1035	p_setd(perm, PCI_ERR_UNCOR_MASK, NO_VIRT, mask);
   1036	p_setd(perm, PCI_ERR_UNCOR_SEVER, NO_VIRT, mask);
   1037
   1038	mask =	PCI_ERR_COR_RCVR |		/* Receiver Error Status */
   1039		PCI_ERR_COR_BAD_TLP |		/* Bad TLP Status */
   1040		PCI_ERR_COR_BAD_DLLP |		/* Bad DLLP Status */
   1041		PCI_ERR_COR_REP_ROLL |		/* REPLAY_NUM Rollover */
   1042		PCI_ERR_COR_REP_TIMER |		/* Replay Timer Timeout */
   1043		PCI_ERR_COR_ADV_NFAT |		/* Advisory Non-Fatal */
   1044		PCI_ERR_COR_INTERNAL |		/* Corrected Internal */
   1045		PCI_ERR_COR_LOG_OVER;		/* Header Log Overflow */
   1046	p_setd(perm, PCI_ERR_COR_STATUS, NO_VIRT, mask);
   1047	p_setd(perm, PCI_ERR_COR_MASK, NO_VIRT, mask);
   1048
   1049	mask =	PCI_ERR_CAP_ECRC_GENE |		/* ECRC Generation Enable */
   1050		PCI_ERR_CAP_ECRC_CHKE;		/* ECRC Check Enable */
   1051	p_setd(perm, PCI_ERR_CAP, NO_VIRT, mask);
   1052	return 0;
   1053}
   1054
   1055/* Permissions for Power Budgeting extended capability */
   1056static int __init init_pci_ext_cap_pwr_perm(struct perm_bits *perm)
   1057{
   1058	if (alloc_perm_bits(perm, pci_ext_cap_length[PCI_EXT_CAP_ID_PWR]))
   1059		return -ENOMEM;
   1060
   1061	p_setd(perm, 0, ALL_VIRT, NO_WRITE);
   1062
   1063	/* Writing the data selector is OK, the info is still read-only */
   1064	p_setb(perm, PCI_PWR_DATA, NO_VIRT, (u8)ALL_WRITE);
   1065	return 0;
   1066}
   1067
   1068/*
   1069 * Initialize the shared permission tables
   1070 */
   1071void vfio_pci_uninit_perm_bits(void)
   1072{
   1073	free_perm_bits(&cap_perms[PCI_CAP_ID_BASIC]);
   1074
   1075	free_perm_bits(&cap_perms[PCI_CAP_ID_PM]);
   1076	free_perm_bits(&cap_perms[PCI_CAP_ID_VPD]);
   1077	free_perm_bits(&cap_perms[PCI_CAP_ID_PCIX]);
   1078	free_perm_bits(&cap_perms[PCI_CAP_ID_EXP]);
   1079	free_perm_bits(&cap_perms[PCI_CAP_ID_AF]);
   1080
   1081	free_perm_bits(&ecap_perms[PCI_EXT_CAP_ID_ERR]);
   1082	free_perm_bits(&ecap_perms[PCI_EXT_CAP_ID_PWR]);
   1083}
   1084
   1085int __init vfio_pci_init_perm_bits(void)
   1086{
   1087	int ret;
   1088
   1089	/* Basic config space */
   1090	ret = init_pci_cap_basic_perm(&cap_perms[PCI_CAP_ID_BASIC]);
   1091
   1092	/* Capabilities */
   1093	ret |= init_pci_cap_pm_perm(&cap_perms[PCI_CAP_ID_PM]);
   1094	ret |= init_pci_cap_vpd_perm(&cap_perms[PCI_CAP_ID_VPD]);
   1095	ret |= init_pci_cap_pcix_perm(&cap_perms[PCI_CAP_ID_PCIX]);
   1096	cap_perms[PCI_CAP_ID_VNDR].writefn = vfio_raw_config_write;
   1097	ret |= init_pci_cap_exp_perm(&cap_perms[PCI_CAP_ID_EXP]);
   1098	ret |= init_pci_cap_af_perm(&cap_perms[PCI_CAP_ID_AF]);
   1099
   1100	/* Extended capabilities */
   1101	ret |= init_pci_ext_cap_err_perm(&ecap_perms[PCI_EXT_CAP_ID_ERR]);
   1102	ret |= init_pci_ext_cap_pwr_perm(&ecap_perms[PCI_EXT_CAP_ID_PWR]);
   1103	ecap_perms[PCI_EXT_CAP_ID_VNDR].writefn = vfio_raw_config_write;
   1104
   1105	if (ret)
   1106		vfio_pci_uninit_perm_bits();
   1107
   1108	return ret;
   1109}
   1110
   1111static int vfio_find_cap_start(struct vfio_pci_core_device *vdev, int pos)
   1112{
   1113	u8 cap;
   1114	int base = (pos >= PCI_CFG_SPACE_SIZE) ? PCI_CFG_SPACE_SIZE :
   1115						 PCI_STD_HEADER_SIZEOF;
   1116	cap = vdev->pci_config_map[pos];
   1117
   1118	if (cap == PCI_CAP_ID_BASIC)
   1119		return 0;
   1120
   1121	/* XXX Can we have to abutting capabilities of the same type? */
   1122	while (pos - 1 >= base && vdev->pci_config_map[pos - 1] == cap)
   1123		pos--;
   1124
   1125	return pos;
   1126}
   1127
   1128static int vfio_msi_config_read(struct vfio_pci_core_device *vdev, int pos,
   1129				int count, struct perm_bits *perm,
   1130				int offset, __le32 *val)
   1131{
   1132	/* Update max available queue size from msi_qmax */
   1133	if (offset <= PCI_MSI_FLAGS && offset + count >= PCI_MSI_FLAGS) {
   1134		__le16 *flags;
   1135		int start;
   1136
   1137		start = vfio_find_cap_start(vdev, pos);
   1138
   1139		flags = (__le16 *)&vdev->vconfig[start];
   1140
   1141		*flags &= cpu_to_le16(~PCI_MSI_FLAGS_QMASK);
   1142		*flags |= cpu_to_le16(vdev->msi_qmax << 1);
   1143	}
   1144
   1145	return vfio_default_config_read(vdev, pos, count, perm, offset, val);
   1146}
   1147
   1148static int vfio_msi_config_write(struct vfio_pci_core_device *vdev, int pos,
   1149				 int count, struct perm_bits *perm,
   1150				 int offset, __le32 val)
   1151{
   1152	count = vfio_default_config_write(vdev, pos, count, perm, offset, val);
   1153	if (count < 0)
   1154		return count;
   1155
   1156	/* Fixup and write configured queue size and enable to hardware */
   1157	if (offset <= PCI_MSI_FLAGS && offset + count >= PCI_MSI_FLAGS) {
   1158		__le16 *pflags;
   1159		u16 flags;
   1160		int start, ret;
   1161
   1162		start = vfio_find_cap_start(vdev, pos);
   1163
   1164		pflags = (__le16 *)&vdev->vconfig[start + PCI_MSI_FLAGS];
   1165
   1166		flags = le16_to_cpu(*pflags);
   1167
   1168		/* MSI is enabled via ioctl */
   1169		if  (!is_msi(vdev))
   1170			flags &= ~PCI_MSI_FLAGS_ENABLE;
   1171
   1172		/* Check queue size */
   1173		if ((flags & PCI_MSI_FLAGS_QSIZE) >> 4 > vdev->msi_qmax) {
   1174			flags &= ~PCI_MSI_FLAGS_QSIZE;
   1175			flags |= vdev->msi_qmax << 4;
   1176		}
   1177
   1178		/* Write back to virt and to hardware */
   1179		*pflags = cpu_to_le16(flags);
   1180		ret = pci_user_write_config_word(vdev->pdev,
   1181						 start + PCI_MSI_FLAGS,
   1182						 flags);
   1183		if (ret)
   1184			return ret;
   1185	}
   1186
   1187	return count;
   1188}
   1189
   1190/*
   1191 * MSI determination is per-device, so this routine gets used beyond
   1192 * initialization time. Don't add __init
   1193 */
   1194static int init_pci_cap_msi_perm(struct perm_bits *perm, int len, u16 flags)
   1195{
   1196	if (alloc_perm_bits(perm, len))
   1197		return -ENOMEM;
   1198
   1199	perm->readfn = vfio_msi_config_read;
   1200	perm->writefn = vfio_msi_config_write;
   1201
   1202	p_setb(perm, PCI_CAP_LIST_NEXT, (u8)ALL_VIRT, NO_WRITE);
   1203
   1204	/*
   1205	 * The upper byte of the control register is reserved,
   1206	 * just setup the lower byte.
   1207	 */
   1208	p_setb(perm, PCI_MSI_FLAGS, (u8)ALL_VIRT, (u8)ALL_WRITE);
   1209	p_setd(perm, PCI_MSI_ADDRESS_LO, ALL_VIRT, ALL_WRITE);
   1210	if (flags & PCI_MSI_FLAGS_64BIT) {
   1211		p_setd(perm, PCI_MSI_ADDRESS_HI, ALL_VIRT, ALL_WRITE);
   1212		p_setw(perm, PCI_MSI_DATA_64, (u16)ALL_VIRT, (u16)ALL_WRITE);
   1213		if (flags & PCI_MSI_FLAGS_MASKBIT) {
   1214			p_setd(perm, PCI_MSI_MASK_64, NO_VIRT, ALL_WRITE);
   1215			p_setd(perm, PCI_MSI_PENDING_64, NO_VIRT, ALL_WRITE);
   1216		}
   1217	} else {
   1218		p_setw(perm, PCI_MSI_DATA_32, (u16)ALL_VIRT, (u16)ALL_WRITE);
   1219		if (flags & PCI_MSI_FLAGS_MASKBIT) {
   1220			p_setd(perm, PCI_MSI_MASK_32, NO_VIRT, ALL_WRITE);
   1221			p_setd(perm, PCI_MSI_PENDING_32, NO_VIRT, ALL_WRITE);
   1222		}
   1223	}
   1224	return 0;
   1225}
   1226
   1227/* Determine MSI CAP field length; initialize msi_perms on 1st call per vdev */
   1228static int vfio_msi_cap_len(struct vfio_pci_core_device *vdev, u8 pos)
   1229{
   1230	struct pci_dev *pdev = vdev->pdev;
   1231	int len, ret;
   1232	u16 flags;
   1233
   1234	ret = pci_read_config_word(pdev, pos + PCI_MSI_FLAGS, &flags);
   1235	if (ret)
   1236		return pcibios_err_to_errno(ret);
   1237
   1238	len = 10; /* Minimum size */
   1239	if (flags & PCI_MSI_FLAGS_64BIT)
   1240		len += 4;
   1241	if (flags & PCI_MSI_FLAGS_MASKBIT)
   1242		len += 10;
   1243
   1244	if (vdev->msi_perm)
   1245		return len;
   1246
   1247	vdev->msi_perm = kmalloc(sizeof(struct perm_bits), GFP_KERNEL);
   1248	if (!vdev->msi_perm)
   1249		return -ENOMEM;
   1250
   1251	ret = init_pci_cap_msi_perm(vdev->msi_perm, len, flags);
   1252	if (ret) {
   1253		kfree(vdev->msi_perm);
   1254		return ret;
   1255	}
   1256
   1257	return len;
   1258}
   1259
   1260/* Determine extended capability length for VC (2 & 9) and MFVC */
   1261static int vfio_vc_cap_len(struct vfio_pci_core_device *vdev, u16 pos)
   1262{
   1263	struct pci_dev *pdev = vdev->pdev;
   1264	u32 tmp;
   1265	int ret, evcc, phases, vc_arb;
   1266	int len = PCI_CAP_VC_BASE_SIZEOF;
   1267
   1268	ret = pci_read_config_dword(pdev, pos + PCI_VC_PORT_CAP1, &tmp);
   1269	if (ret)
   1270		return pcibios_err_to_errno(ret);
   1271
   1272	evcc = tmp & PCI_VC_CAP1_EVCC; /* extended vc count */
   1273	ret = pci_read_config_dword(pdev, pos + PCI_VC_PORT_CAP2, &tmp);
   1274	if (ret)
   1275		return pcibios_err_to_errno(ret);
   1276
   1277	if (tmp & PCI_VC_CAP2_128_PHASE)
   1278		phases = 128;
   1279	else if (tmp & PCI_VC_CAP2_64_PHASE)
   1280		phases = 64;
   1281	else if (tmp & PCI_VC_CAP2_32_PHASE)
   1282		phases = 32;
   1283	else
   1284		phases = 0;
   1285
   1286	vc_arb = phases * 4;
   1287
   1288	/*
   1289	 * Port arbitration tables are root & switch only;
   1290	 * function arbitration tables are function 0 only.
   1291	 * In either case, we'll never let user write them so
   1292	 * we don't care how big they are
   1293	 */
   1294	len += (1 + evcc) * PCI_CAP_VC_PER_VC_SIZEOF;
   1295	if (vc_arb) {
   1296		len = round_up(len, 16);
   1297		len += vc_arb / 8;
   1298	}
   1299	return len;
   1300}
   1301
   1302static int vfio_cap_len(struct vfio_pci_core_device *vdev, u8 cap, u8 pos)
   1303{
   1304	struct pci_dev *pdev = vdev->pdev;
   1305	u32 dword;
   1306	u16 word;
   1307	u8 byte;
   1308	int ret;
   1309
   1310	switch (cap) {
   1311	case PCI_CAP_ID_MSI:
   1312		return vfio_msi_cap_len(vdev, pos);
   1313	case PCI_CAP_ID_PCIX:
   1314		ret = pci_read_config_word(pdev, pos + PCI_X_CMD, &word);
   1315		if (ret)
   1316			return pcibios_err_to_errno(ret);
   1317
   1318		if (PCI_X_CMD_VERSION(word)) {
   1319			if (pdev->cfg_size > PCI_CFG_SPACE_SIZE) {
   1320				/* Test for extended capabilities */
   1321				pci_read_config_dword(pdev, PCI_CFG_SPACE_SIZE,
   1322						      &dword);
   1323				vdev->extended_caps = (dword != 0);
   1324			}
   1325			return PCI_CAP_PCIX_SIZEOF_V2;
   1326		} else
   1327			return PCI_CAP_PCIX_SIZEOF_V0;
   1328	case PCI_CAP_ID_VNDR:
   1329		/* length follows next field */
   1330		ret = pci_read_config_byte(pdev, pos + PCI_CAP_FLAGS, &byte);
   1331		if (ret)
   1332			return pcibios_err_to_errno(ret);
   1333
   1334		return byte;
   1335	case PCI_CAP_ID_EXP:
   1336		if (pdev->cfg_size > PCI_CFG_SPACE_SIZE) {
   1337			/* Test for extended capabilities */
   1338			pci_read_config_dword(pdev, PCI_CFG_SPACE_SIZE, &dword);
   1339			vdev->extended_caps = (dword != 0);
   1340		}
   1341
   1342		/* length based on version and type */
   1343		if ((pcie_caps_reg(pdev) & PCI_EXP_FLAGS_VERS) == 1) {
   1344			if (pci_pcie_type(pdev) == PCI_EXP_TYPE_RC_END)
   1345				return 0xc; /* "All Devices" only, no link */
   1346			return PCI_CAP_EXP_ENDPOINT_SIZEOF_V1;
   1347		} else {
   1348			if (pci_pcie_type(pdev) == PCI_EXP_TYPE_RC_END)
   1349				return 0x2c; /* No link */
   1350			return PCI_CAP_EXP_ENDPOINT_SIZEOF_V2;
   1351		}
   1352	case PCI_CAP_ID_HT:
   1353		ret = pci_read_config_byte(pdev, pos + 3, &byte);
   1354		if (ret)
   1355			return pcibios_err_to_errno(ret);
   1356
   1357		return (byte & HT_3BIT_CAP_MASK) ?
   1358			HT_CAP_SIZEOF_SHORT : HT_CAP_SIZEOF_LONG;
   1359	case PCI_CAP_ID_SATA:
   1360		ret = pci_read_config_byte(pdev, pos + PCI_SATA_REGS, &byte);
   1361		if (ret)
   1362			return pcibios_err_to_errno(ret);
   1363
   1364		byte &= PCI_SATA_REGS_MASK;
   1365		if (byte == PCI_SATA_REGS_INLINE)
   1366			return PCI_SATA_SIZEOF_LONG;
   1367		else
   1368			return PCI_SATA_SIZEOF_SHORT;
   1369	default:
   1370		pci_warn(pdev, "%s: unknown length for PCI cap %#x@%#x\n",
   1371			 __func__, cap, pos);
   1372	}
   1373
   1374	return 0;
   1375}
   1376
   1377static int vfio_ext_cap_len(struct vfio_pci_core_device *vdev, u16 ecap, u16 epos)
   1378{
   1379	struct pci_dev *pdev = vdev->pdev;
   1380	u8 byte;
   1381	u32 dword;
   1382	int ret;
   1383
   1384	switch (ecap) {
   1385	case PCI_EXT_CAP_ID_VNDR:
   1386		ret = pci_read_config_dword(pdev, epos + PCI_VSEC_HDR, &dword);
   1387		if (ret)
   1388			return pcibios_err_to_errno(ret);
   1389
   1390		return dword >> PCI_VSEC_HDR_LEN_SHIFT;
   1391	case PCI_EXT_CAP_ID_VC:
   1392	case PCI_EXT_CAP_ID_VC9:
   1393	case PCI_EXT_CAP_ID_MFVC:
   1394		return vfio_vc_cap_len(vdev, epos);
   1395	case PCI_EXT_CAP_ID_ACS:
   1396		ret = pci_read_config_byte(pdev, epos + PCI_ACS_CAP, &byte);
   1397		if (ret)
   1398			return pcibios_err_to_errno(ret);
   1399
   1400		if (byte & PCI_ACS_EC) {
   1401			int bits;
   1402
   1403			ret = pci_read_config_byte(pdev,
   1404						   epos + PCI_ACS_EGRESS_BITS,
   1405						   &byte);
   1406			if (ret)
   1407				return pcibios_err_to_errno(ret);
   1408
   1409			bits = byte ? round_up(byte, 32) : 256;
   1410			return 8 + (bits / 8);
   1411		}
   1412		return 8;
   1413
   1414	case PCI_EXT_CAP_ID_REBAR:
   1415		ret = pci_read_config_byte(pdev, epos + PCI_REBAR_CTRL, &byte);
   1416		if (ret)
   1417			return pcibios_err_to_errno(ret);
   1418
   1419		byte &= PCI_REBAR_CTRL_NBAR_MASK;
   1420		byte >>= PCI_REBAR_CTRL_NBAR_SHIFT;
   1421
   1422		return 4 + (byte * 8);
   1423	case PCI_EXT_CAP_ID_DPA:
   1424		ret = pci_read_config_byte(pdev, epos + PCI_DPA_CAP, &byte);
   1425		if (ret)
   1426			return pcibios_err_to_errno(ret);
   1427
   1428		byte &= PCI_DPA_CAP_SUBSTATE_MASK;
   1429		return PCI_DPA_BASE_SIZEOF + byte + 1;
   1430	case PCI_EXT_CAP_ID_TPH:
   1431		ret = pci_read_config_dword(pdev, epos + PCI_TPH_CAP, &dword);
   1432		if (ret)
   1433			return pcibios_err_to_errno(ret);
   1434
   1435		if ((dword & PCI_TPH_CAP_LOC_MASK) == PCI_TPH_LOC_CAP) {
   1436			int sts;
   1437
   1438			sts = dword & PCI_TPH_CAP_ST_MASK;
   1439			sts >>= PCI_TPH_CAP_ST_SHIFT;
   1440			return PCI_TPH_BASE_SIZEOF + (sts * 2) + 2;
   1441		}
   1442		return PCI_TPH_BASE_SIZEOF;
   1443	default:
   1444		pci_warn(pdev, "%s: unknown length for PCI ecap %#x@%#x\n",
   1445			 __func__, ecap, epos);
   1446	}
   1447
   1448	return 0;
   1449}
   1450
   1451static void vfio_update_pm_vconfig_bytes(struct vfio_pci_core_device *vdev,
   1452					 int offset)
   1453{
   1454	__le16 *pmc = (__le16 *)&vdev->vconfig[offset + PCI_PM_PMC];
   1455	__le16 *ctrl = (__le16 *)&vdev->vconfig[offset + PCI_PM_CTRL];
   1456
   1457	/* Clear vconfig PME_Support, PME_Status, and PME_En bits */
   1458	*pmc &= ~cpu_to_le16(PCI_PM_CAP_PME_MASK);
   1459	*ctrl &= ~cpu_to_le16(PCI_PM_CTRL_PME_ENABLE | PCI_PM_CTRL_PME_STATUS);
   1460}
   1461
   1462static int vfio_fill_vconfig_bytes(struct vfio_pci_core_device *vdev,
   1463				   int offset, int size)
   1464{
   1465	struct pci_dev *pdev = vdev->pdev;
   1466	int ret = 0;
   1467
   1468	/*
   1469	 * We try to read physical config space in the largest chunks
   1470	 * we can, assuming that all of the fields support dword access.
   1471	 * pci_save_state() makes this same assumption and seems to do ok.
   1472	 */
   1473	while (size) {
   1474		int filled;
   1475
   1476		if (size >= 4 && !(offset % 4)) {
   1477			__le32 *dwordp = (__le32 *)&vdev->vconfig[offset];
   1478			u32 dword;
   1479
   1480			ret = pci_read_config_dword(pdev, offset, &dword);
   1481			if (ret)
   1482				return ret;
   1483			*dwordp = cpu_to_le32(dword);
   1484			filled = 4;
   1485		} else if (size >= 2 && !(offset % 2)) {
   1486			__le16 *wordp = (__le16 *)&vdev->vconfig[offset];
   1487			u16 word;
   1488
   1489			ret = pci_read_config_word(pdev, offset, &word);
   1490			if (ret)
   1491				return ret;
   1492			*wordp = cpu_to_le16(word);
   1493			filled = 2;
   1494		} else {
   1495			u8 *byte = &vdev->vconfig[offset];
   1496			ret = pci_read_config_byte(pdev, offset, byte);
   1497			if (ret)
   1498				return ret;
   1499			filled = 1;
   1500		}
   1501
   1502		offset += filled;
   1503		size -= filled;
   1504	}
   1505
   1506	return ret;
   1507}
   1508
   1509static int vfio_cap_init(struct vfio_pci_core_device *vdev)
   1510{
   1511	struct pci_dev *pdev = vdev->pdev;
   1512	u8 *map = vdev->pci_config_map;
   1513	u16 status;
   1514	u8 pos, *prev, cap;
   1515	int loops, ret, caps = 0;
   1516
   1517	/* Any capabilities? */
   1518	ret = pci_read_config_word(pdev, PCI_STATUS, &status);
   1519	if (ret)
   1520		return ret;
   1521
   1522	if (!(status & PCI_STATUS_CAP_LIST))
   1523		return 0; /* Done */
   1524
   1525	ret = pci_read_config_byte(pdev, PCI_CAPABILITY_LIST, &pos);
   1526	if (ret)
   1527		return ret;
   1528
   1529	/* Mark the previous position in case we want to skip a capability */
   1530	prev = &vdev->vconfig[PCI_CAPABILITY_LIST];
   1531
   1532	/* We can bound our loop, capabilities are dword aligned */
   1533	loops = (PCI_CFG_SPACE_SIZE - PCI_STD_HEADER_SIZEOF) / PCI_CAP_SIZEOF;
   1534	while (pos && loops--) {
   1535		u8 next;
   1536		int i, len = 0;
   1537
   1538		ret = pci_read_config_byte(pdev, pos, &cap);
   1539		if (ret)
   1540			return ret;
   1541
   1542		ret = pci_read_config_byte(pdev,
   1543					   pos + PCI_CAP_LIST_NEXT, &next);
   1544		if (ret)
   1545			return ret;
   1546
   1547		/*
   1548		 * ID 0 is a NULL capability, conflicting with our fake
   1549		 * PCI_CAP_ID_BASIC.  As it has no content, consider it
   1550		 * hidden for now.
   1551		 */
   1552		if (cap && cap <= PCI_CAP_ID_MAX) {
   1553			len = pci_cap_length[cap];
   1554			if (len == 0xFF) { /* Variable length */
   1555				len = vfio_cap_len(vdev, cap, pos);
   1556				if (len < 0)
   1557					return len;
   1558			}
   1559		}
   1560
   1561		if (!len) {
   1562			pci_info(pdev, "%s: hiding cap %#x@%#x\n", __func__,
   1563				 cap, pos);
   1564			*prev = next;
   1565			pos = next;
   1566			continue;
   1567		}
   1568
   1569		/* Sanity check, do we overlap other capabilities? */
   1570		for (i = 0; i < len; i++) {
   1571			if (likely(map[pos + i] == PCI_CAP_ID_INVALID))
   1572				continue;
   1573
   1574			pci_warn(pdev, "%s: PCI config conflict @%#x, was cap %#x now cap %#x\n",
   1575				 __func__, pos + i, map[pos + i], cap);
   1576		}
   1577
   1578		BUILD_BUG_ON(PCI_CAP_ID_MAX >= PCI_CAP_ID_INVALID_VIRT);
   1579
   1580		memset(map + pos, cap, len);
   1581		ret = vfio_fill_vconfig_bytes(vdev, pos, len);
   1582		if (ret)
   1583			return ret;
   1584
   1585		if (cap == PCI_CAP_ID_PM)
   1586			vfio_update_pm_vconfig_bytes(vdev, pos);
   1587
   1588		prev = &vdev->vconfig[pos + PCI_CAP_LIST_NEXT];
   1589		pos = next;
   1590		caps++;
   1591	}
   1592
   1593	/* If we didn't fill any capabilities, clear the status flag */
   1594	if (!caps) {
   1595		__le16 *vstatus = (__le16 *)&vdev->vconfig[PCI_STATUS];
   1596		*vstatus &= ~cpu_to_le16(PCI_STATUS_CAP_LIST);
   1597	}
   1598
   1599	return 0;
   1600}
   1601
   1602static int vfio_ecap_init(struct vfio_pci_core_device *vdev)
   1603{
   1604	struct pci_dev *pdev = vdev->pdev;
   1605	u8 *map = vdev->pci_config_map;
   1606	u16 epos;
   1607	__le32 *prev = NULL;
   1608	int loops, ret, ecaps = 0;
   1609
   1610	if (!vdev->extended_caps)
   1611		return 0;
   1612
   1613	epos = PCI_CFG_SPACE_SIZE;
   1614
   1615	loops = (pdev->cfg_size - PCI_CFG_SPACE_SIZE) / PCI_CAP_SIZEOF;
   1616
   1617	while (loops-- && epos >= PCI_CFG_SPACE_SIZE) {
   1618		u32 header;
   1619		u16 ecap;
   1620		int i, len = 0;
   1621		bool hidden = false;
   1622
   1623		ret = pci_read_config_dword(pdev, epos, &header);
   1624		if (ret)
   1625			return ret;
   1626
   1627		ecap = PCI_EXT_CAP_ID(header);
   1628
   1629		if (ecap <= PCI_EXT_CAP_ID_MAX) {
   1630			len = pci_ext_cap_length[ecap];
   1631			if (len == 0xFF) {
   1632				len = vfio_ext_cap_len(vdev, ecap, epos);
   1633				if (len < 0)
   1634					return len;
   1635			}
   1636		}
   1637
   1638		if (!len) {
   1639			pci_info(pdev, "%s: hiding ecap %#x@%#x\n",
   1640				 __func__, ecap, epos);
   1641
   1642			/* If not the first in the chain, we can skip over it */
   1643			if (prev) {
   1644				u32 val = epos = PCI_EXT_CAP_NEXT(header);
   1645				*prev &= cpu_to_le32(~(0xffcU << 20));
   1646				*prev |= cpu_to_le32(val << 20);
   1647				continue;
   1648			}
   1649
   1650			/*
   1651			 * Otherwise, fill in a placeholder, the direct
   1652			 * readfn will virtualize this automatically
   1653			 */
   1654			len = PCI_CAP_SIZEOF;
   1655			hidden = true;
   1656		}
   1657
   1658		for (i = 0; i < len; i++) {
   1659			if (likely(map[epos + i] == PCI_CAP_ID_INVALID))
   1660				continue;
   1661
   1662			pci_warn(pdev, "%s: PCI config conflict @%#x, was ecap %#x now ecap %#x\n",
   1663				 __func__, epos + i, map[epos + i], ecap);
   1664		}
   1665
   1666		/*
   1667		 * Even though ecap is 2 bytes, we're currently a long way
   1668		 * from exceeding 1 byte capabilities.  If we ever make it
   1669		 * up to 0xFE we'll need to up this to a two-byte, byte map.
   1670		 */
   1671		BUILD_BUG_ON(PCI_EXT_CAP_ID_MAX >= PCI_CAP_ID_INVALID_VIRT);
   1672
   1673		memset(map + epos, ecap, len);
   1674		ret = vfio_fill_vconfig_bytes(vdev, epos, len);
   1675		if (ret)
   1676			return ret;
   1677
   1678		/*
   1679		 * If we're just using this capability to anchor the list,
   1680		 * hide the real ID.  Only count real ecaps.  XXX PCI spec
   1681		 * indicates to use cap id = 0, version = 0, next = 0 if
   1682		 * ecaps are absent, hope users check all the way to next.
   1683		 */
   1684		if (hidden)
   1685			*(__le32 *)&vdev->vconfig[epos] &=
   1686				cpu_to_le32((0xffcU << 20));
   1687		else
   1688			ecaps++;
   1689
   1690		prev = (__le32 *)&vdev->vconfig[epos];
   1691		epos = PCI_EXT_CAP_NEXT(header);
   1692	}
   1693
   1694	if (!ecaps)
   1695		*(u32 *)&vdev->vconfig[PCI_CFG_SPACE_SIZE] = 0;
   1696
   1697	return 0;
   1698}
   1699
   1700/*
   1701 * Nag about hardware bugs, hopefully to have vendors fix them, but at least
   1702 * to collect a list of dependencies for the VF INTx pin quirk below.
   1703 */
   1704static const struct pci_device_id known_bogus_vf_intx_pin[] = {
   1705	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x270c) },
   1706	{}
   1707};
   1708
   1709/*
   1710 * For each device we allocate a pci_config_map that indicates the
   1711 * capability occupying each dword and thus the struct perm_bits we
   1712 * use for read and write.  We also allocate a virtualized config
   1713 * space which tracks reads and writes to bits that we emulate for
   1714 * the user.  Initial values filled from device.
   1715 *
   1716 * Using shared struct perm_bits between all vfio-pci devices saves
   1717 * us from allocating cfg_size buffers for virt and write for every
   1718 * device.  We could remove vconfig and allocate individual buffers
   1719 * for each area requiring emulated bits, but the array of pointers
   1720 * would be comparable in size (at least for standard config space).
   1721 */
   1722int vfio_config_init(struct vfio_pci_core_device *vdev)
   1723{
   1724	struct pci_dev *pdev = vdev->pdev;
   1725	u8 *map, *vconfig;
   1726	int ret;
   1727
   1728	/*
   1729	 * Config space, caps and ecaps are all dword aligned, so we could
   1730	 * use one byte per dword to record the type.  However, there are
   1731	 * no requiremenst on the length of a capability, so the gap between
   1732	 * capabilities needs byte granularity.
   1733	 */
   1734	map = kmalloc(pdev->cfg_size, GFP_KERNEL);
   1735	if (!map)
   1736		return -ENOMEM;
   1737
   1738	vconfig = kmalloc(pdev->cfg_size, GFP_KERNEL);
   1739	if (!vconfig) {
   1740		kfree(map);
   1741		return -ENOMEM;
   1742	}
   1743
   1744	vdev->pci_config_map = map;
   1745	vdev->vconfig = vconfig;
   1746
   1747	memset(map, PCI_CAP_ID_BASIC, PCI_STD_HEADER_SIZEOF);
   1748	memset(map + PCI_STD_HEADER_SIZEOF, PCI_CAP_ID_INVALID,
   1749	       pdev->cfg_size - PCI_STD_HEADER_SIZEOF);
   1750
   1751	ret = vfio_fill_vconfig_bytes(vdev, 0, PCI_STD_HEADER_SIZEOF);
   1752	if (ret)
   1753		goto out;
   1754
   1755	vdev->bardirty = true;
   1756
   1757	/*
   1758	 * XXX can we just pci_load_saved_state/pci_restore_state?
   1759	 * may need to rebuild vconfig after that
   1760	 */
   1761
   1762	/* For restore after reset */
   1763	vdev->rbar[0] = le32_to_cpu(*(__le32 *)&vconfig[PCI_BASE_ADDRESS_0]);
   1764	vdev->rbar[1] = le32_to_cpu(*(__le32 *)&vconfig[PCI_BASE_ADDRESS_1]);
   1765	vdev->rbar[2] = le32_to_cpu(*(__le32 *)&vconfig[PCI_BASE_ADDRESS_2]);
   1766	vdev->rbar[3] = le32_to_cpu(*(__le32 *)&vconfig[PCI_BASE_ADDRESS_3]);
   1767	vdev->rbar[4] = le32_to_cpu(*(__le32 *)&vconfig[PCI_BASE_ADDRESS_4]);
   1768	vdev->rbar[5] = le32_to_cpu(*(__le32 *)&vconfig[PCI_BASE_ADDRESS_5]);
   1769	vdev->rbar[6] = le32_to_cpu(*(__le32 *)&vconfig[PCI_ROM_ADDRESS]);
   1770
   1771	if (pdev->is_virtfn) {
   1772		*(__le16 *)&vconfig[PCI_VENDOR_ID] = cpu_to_le16(pdev->vendor);
   1773		*(__le16 *)&vconfig[PCI_DEVICE_ID] = cpu_to_le16(pdev->device);
   1774
   1775		/*
   1776		 * Per SR-IOV spec rev 1.1, 3.4.1.18 the interrupt pin register
   1777		 * does not apply to VFs and VFs must implement this register
   1778		 * as read-only with value zero.  Userspace is not readily able
   1779		 * to identify whether a device is a VF and thus that the pin
   1780		 * definition on the device is bogus should it violate this
   1781		 * requirement.  We already virtualize the pin register for
   1782		 * other purposes, so we simply need to replace the bogus value
   1783		 * and consider VFs when we determine INTx IRQ count.
   1784		 */
   1785		if (vconfig[PCI_INTERRUPT_PIN] &&
   1786		    !pci_match_id(known_bogus_vf_intx_pin, pdev))
   1787			pci_warn(pdev,
   1788				 "Hardware bug: VF reports bogus INTx pin %d\n",
   1789				 vconfig[PCI_INTERRUPT_PIN]);
   1790
   1791		vconfig[PCI_INTERRUPT_PIN] = 0; /* Gratuitous for good VFs */
   1792	}
   1793	if (pdev->no_command_memory) {
   1794		/*
   1795		 * VFs and devices that set pdev->no_command_memory do not
   1796		 * implement the memory enable bit of the COMMAND register
   1797		 * therefore we'll not have it set in our initial copy of
   1798		 * config space after pci_enable_device().  For consistency
   1799		 * with PFs, set the virtual enable bit here.
   1800		 */
   1801		*(__le16 *)&vconfig[PCI_COMMAND] |=
   1802					cpu_to_le16(PCI_COMMAND_MEMORY);
   1803	}
   1804
   1805	if (!IS_ENABLED(CONFIG_VFIO_PCI_INTX) || vdev->nointx)
   1806		vconfig[PCI_INTERRUPT_PIN] = 0;
   1807
   1808	ret = vfio_cap_init(vdev);
   1809	if (ret)
   1810		goto out;
   1811
   1812	ret = vfio_ecap_init(vdev);
   1813	if (ret)
   1814		goto out;
   1815
   1816	return 0;
   1817
   1818out:
   1819	kfree(map);
   1820	vdev->pci_config_map = NULL;
   1821	kfree(vconfig);
   1822	vdev->vconfig = NULL;
   1823	return pcibios_err_to_errno(ret);
   1824}
   1825
   1826void vfio_config_free(struct vfio_pci_core_device *vdev)
   1827{
   1828	kfree(vdev->vconfig);
   1829	vdev->vconfig = NULL;
   1830	kfree(vdev->pci_config_map);
   1831	vdev->pci_config_map = NULL;
   1832	if (vdev->msi_perm) {
   1833		free_perm_bits(vdev->msi_perm);
   1834		kfree(vdev->msi_perm);
   1835		vdev->msi_perm = NULL;
   1836	}
   1837}
   1838
   1839/*
   1840 * Find the remaining number of bytes in a dword that match the given
   1841 * position.  Stop at either the end of the capability or the dword boundary.
   1842 */
   1843static size_t vfio_pci_cap_remaining_dword(struct vfio_pci_core_device *vdev,
   1844					   loff_t pos)
   1845{
   1846	u8 cap = vdev->pci_config_map[pos];
   1847	size_t i;
   1848
   1849	for (i = 1; (pos + i) % 4 && vdev->pci_config_map[pos + i] == cap; i++)
   1850		/* nop */;
   1851
   1852	return i;
   1853}
   1854
   1855static ssize_t vfio_config_do_rw(struct vfio_pci_core_device *vdev, char __user *buf,
   1856				 size_t count, loff_t *ppos, bool iswrite)
   1857{
   1858	struct pci_dev *pdev = vdev->pdev;
   1859	struct perm_bits *perm;
   1860	__le32 val = 0;
   1861	int cap_start = 0, offset;
   1862	u8 cap_id;
   1863	ssize_t ret;
   1864
   1865	if (*ppos < 0 || *ppos >= pdev->cfg_size ||
   1866	    *ppos + count > pdev->cfg_size)
   1867		return -EFAULT;
   1868
   1869	/*
   1870	 * Chop accesses into aligned chunks containing no more than a
   1871	 * single capability.  Caller increments to the next chunk.
   1872	 */
   1873	count = min(count, vfio_pci_cap_remaining_dword(vdev, *ppos));
   1874	if (count >= 4 && !(*ppos % 4))
   1875		count = 4;
   1876	else if (count >= 2 && !(*ppos % 2))
   1877		count = 2;
   1878	else
   1879		count = 1;
   1880
   1881	ret = count;
   1882
   1883	cap_id = vdev->pci_config_map[*ppos];
   1884
   1885	if (cap_id == PCI_CAP_ID_INVALID) {
   1886		perm = &unassigned_perms;
   1887		cap_start = *ppos;
   1888	} else if (cap_id == PCI_CAP_ID_INVALID_VIRT) {
   1889		perm = &virt_perms;
   1890		cap_start = *ppos;
   1891	} else {
   1892		if (*ppos >= PCI_CFG_SPACE_SIZE) {
   1893			WARN_ON(cap_id > PCI_EXT_CAP_ID_MAX);
   1894
   1895			perm = &ecap_perms[cap_id];
   1896			cap_start = vfio_find_cap_start(vdev, *ppos);
   1897		} else {
   1898			WARN_ON(cap_id > PCI_CAP_ID_MAX);
   1899
   1900			perm = &cap_perms[cap_id];
   1901
   1902			if (cap_id == PCI_CAP_ID_MSI)
   1903				perm = vdev->msi_perm;
   1904
   1905			if (cap_id > PCI_CAP_ID_BASIC)
   1906				cap_start = vfio_find_cap_start(vdev, *ppos);
   1907		}
   1908	}
   1909
   1910	WARN_ON(!cap_start && cap_id != PCI_CAP_ID_BASIC);
   1911	WARN_ON(cap_start > *ppos);
   1912
   1913	offset = *ppos - cap_start;
   1914
   1915	if (iswrite) {
   1916		if (!perm->writefn)
   1917			return ret;
   1918
   1919		if (copy_from_user(&val, buf, count))
   1920			return -EFAULT;
   1921
   1922		ret = perm->writefn(vdev, *ppos, count, perm, offset, val);
   1923	} else {
   1924		if (perm->readfn) {
   1925			ret = perm->readfn(vdev, *ppos, count,
   1926					   perm, offset, &val);
   1927			if (ret < 0)
   1928				return ret;
   1929		}
   1930
   1931		if (copy_to_user(buf, &val, count))
   1932			return -EFAULT;
   1933	}
   1934
   1935	return ret;
   1936}
   1937
   1938ssize_t vfio_pci_config_rw(struct vfio_pci_core_device *vdev, char __user *buf,
   1939			   size_t count, loff_t *ppos, bool iswrite)
   1940{
   1941	size_t done = 0;
   1942	int ret = 0;
   1943	loff_t pos = *ppos;
   1944
   1945	pos &= VFIO_PCI_OFFSET_MASK;
   1946
   1947	while (count) {
   1948		ret = vfio_config_do_rw(vdev, buf, count, &pos, iswrite);
   1949		if (ret < 0)
   1950			return ret;
   1951
   1952		count -= ret;
   1953		done += ret;
   1954		buf += ret;
   1955		pos += ret;
   1956	}
   1957
   1958	*ppos += done;
   1959
   1960	return done;
   1961}