cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

vfio.h (51673B)


      1/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
      2/*
      3 * VFIO API definition
      4 *
      5 * Copyright (C) 2012 Red Hat, Inc.  All rights reserved.
      6 *     Author: Alex Williamson <alex.williamson@redhat.com>
      7 *
      8 * This program is free software; you can redistribute it and/or modify
      9 * it under the terms of the GNU General Public License version 2 as
     10 * published by the Free Software Foundation.
     11 */
     12#ifndef _UAPIVFIO_H
     13#define _UAPIVFIO_H
     14
     15#include <linux/types.h>
     16#include <linux/ioctl.h>
     17
     18#define VFIO_API_VERSION	0
     19
     20
     21/* Kernel & User level defines for VFIO IOCTLs. */
     22
     23/* Extensions */
     24
     25#define VFIO_TYPE1_IOMMU		1
     26#define VFIO_SPAPR_TCE_IOMMU		2
     27#define VFIO_TYPE1v2_IOMMU		3
     28/*
     29 * IOMMU enforces DMA cache coherence (ex. PCIe NoSnoop stripping).  This
     30 * capability is subject to change as groups are added or removed.
     31 */
     32#define VFIO_DMA_CC_IOMMU		4
     33
     34/* Check if EEH is supported */
     35#define VFIO_EEH			5
     36
     37/* Two-stage IOMMU */
     38#define VFIO_TYPE1_NESTING_IOMMU	6	/* Implies v2 */
     39
     40#define VFIO_SPAPR_TCE_v2_IOMMU		7
     41
     42/*
     43 * The No-IOMMU IOMMU offers no translation or isolation for devices and
     44 * supports no ioctls outside of VFIO_CHECK_EXTENSION.  Use of VFIO's No-IOMMU
     45 * code will taint the host kernel and should be used with extreme caution.
     46 */
     47#define VFIO_NOIOMMU_IOMMU		8
     48
     49/* Supports VFIO_DMA_UNMAP_FLAG_ALL */
     50#define VFIO_UNMAP_ALL			9
     51
     52/* Supports the vaddr flag for DMA map and unmap */
     53#define VFIO_UPDATE_VADDR		10
     54
     55/*
     56 * The IOCTL interface is designed for extensibility by embedding the
     57 * structure length (argsz) and flags into structures passed between
     58 * kernel and userspace.  We therefore use the _IO() macro for these
     59 * defines to avoid implicitly embedding a size into the ioctl request.
     60 * As structure fields are added, argsz will increase to match and flag
     61 * bits will be defined to indicate additional fields with valid data.
     62 * It's *always* the caller's responsibility to indicate the size of
     63 * the structure passed by setting argsz appropriately.
     64 */
     65
     66#define VFIO_TYPE	(';')
     67#define VFIO_BASE	100
     68
     69/*
     70 * For extension of INFO ioctls, VFIO makes use of a capability chain
     71 * designed after PCI/e capabilities.  A flag bit indicates whether
     72 * this capability chain is supported and a field defined in the fixed
     73 * structure defines the offset of the first capability in the chain.
     74 * This field is only valid when the corresponding bit in the flags
     75 * bitmap is set.  This offset field is relative to the start of the
     76 * INFO buffer, as is the next field within each capability header.
     77 * The id within the header is a shared address space per INFO ioctl,
     78 * while the version field is specific to the capability id.  The
     79 * contents following the header are specific to the capability id.
     80 */
     81struct vfio_info_cap_header {
     82	__u16	id;		/* Identifies capability */
     83	__u16	version;	/* Version specific to the capability ID */
     84	__u32	next;		/* Offset of next capability */
     85};
     86
     87/*
     88 * Callers of INFO ioctls passing insufficiently sized buffers will see
     89 * the capability chain flag bit set, a zero value for the first capability
     90 * offset (if available within the provided argsz), and argsz will be
     91 * updated to report the necessary buffer size.  For compatibility, the
     92 * INFO ioctl will not report error in this case, but the capability chain
     93 * will not be available.
     94 */
     95
     96/* -------- IOCTLs for VFIO file descriptor (/dev/vfio/vfio) -------- */
     97
     98/**
     99 * VFIO_GET_API_VERSION - _IO(VFIO_TYPE, VFIO_BASE + 0)
    100 *
    101 * Report the version of the VFIO API.  This allows us to bump the entire
    102 * API version should we later need to add or change features in incompatible
    103 * ways.
    104 * Return: VFIO_API_VERSION
    105 * Availability: Always
    106 */
    107#define VFIO_GET_API_VERSION		_IO(VFIO_TYPE, VFIO_BASE + 0)
    108
    109/**
    110 * VFIO_CHECK_EXTENSION - _IOW(VFIO_TYPE, VFIO_BASE + 1, __u32)
    111 *
    112 * Check whether an extension is supported.
    113 * Return: 0 if not supported, 1 (or some other positive integer) if supported.
    114 * Availability: Always
    115 */
    116#define VFIO_CHECK_EXTENSION		_IO(VFIO_TYPE, VFIO_BASE + 1)
    117
    118/**
    119 * VFIO_SET_IOMMU - _IOW(VFIO_TYPE, VFIO_BASE + 2, __s32)
    120 *
    121 * Set the iommu to the given type.  The type must be supported by an
    122 * iommu driver as verified by calling CHECK_EXTENSION using the same
    123 * type.  A group must be set to this file descriptor before this
    124 * ioctl is available.  The IOMMU interfaces enabled by this call are
    125 * specific to the value set.
    126 * Return: 0 on success, -errno on failure
    127 * Availability: When VFIO group attached
    128 */
    129#define VFIO_SET_IOMMU			_IO(VFIO_TYPE, VFIO_BASE + 2)
    130
    131/* -------- IOCTLs for GROUP file descriptors (/dev/vfio/$GROUP) -------- */
    132
    133/**
    134 * VFIO_GROUP_GET_STATUS - _IOR(VFIO_TYPE, VFIO_BASE + 3,
    135 *						struct vfio_group_status)
    136 *
    137 * Retrieve information about the group.  Fills in provided
    138 * struct vfio_group_info.  Caller sets argsz.
    139 * Return: 0 on succes, -errno on failure.
    140 * Availability: Always
    141 */
    142struct vfio_group_status {
    143	__u32	argsz;
    144	__u32	flags;
    145#define VFIO_GROUP_FLAGS_VIABLE		(1 << 0)
    146#define VFIO_GROUP_FLAGS_CONTAINER_SET	(1 << 1)
    147};
    148#define VFIO_GROUP_GET_STATUS		_IO(VFIO_TYPE, VFIO_BASE + 3)
    149
    150/**
    151 * VFIO_GROUP_SET_CONTAINER - _IOW(VFIO_TYPE, VFIO_BASE + 4, __s32)
    152 *
    153 * Set the container for the VFIO group to the open VFIO file
    154 * descriptor provided.  Groups may only belong to a single
    155 * container.  Containers may, at their discretion, support multiple
    156 * groups.  Only when a container is set are all of the interfaces
    157 * of the VFIO file descriptor and the VFIO group file descriptor
    158 * available to the user.
    159 * Return: 0 on success, -errno on failure.
    160 * Availability: Always
    161 */
    162#define VFIO_GROUP_SET_CONTAINER	_IO(VFIO_TYPE, VFIO_BASE + 4)
    163
    164/**
    165 * VFIO_GROUP_UNSET_CONTAINER - _IO(VFIO_TYPE, VFIO_BASE + 5)
    166 *
    167 * Remove the group from the attached container.  This is the
    168 * opposite of the SET_CONTAINER call and returns the group to
    169 * an initial state.  All device file descriptors must be released
    170 * prior to calling this interface.  When removing the last group
    171 * from a container, the IOMMU will be disabled and all state lost,
    172 * effectively also returning the VFIO file descriptor to an initial
    173 * state.
    174 * Return: 0 on success, -errno on failure.
    175 * Availability: When attached to container
    176 */
    177#define VFIO_GROUP_UNSET_CONTAINER	_IO(VFIO_TYPE, VFIO_BASE + 5)
    178
    179/**
    180 * VFIO_GROUP_GET_DEVICE_FD - _IOW(VFIO_TYPE, VFIO_BASE + 6, char)
    181 *
    182 * Return a new file descriptor for the device object described by
    183 * the provided string.  The string should match a device listed in
    184 * the devices subdirectory of the IOMMU group sysfs entry.  The
    185 * group containing the device must already be added to this context.
    186 * Return: new file descriptor on success, -errno on failure.
    187 * Availability: When attached to container
    188 */
    189#define VFIO_GROUP_GET_DEVICE_FD	_IO(VFIO_TYPE, VFIO_BASE + 6)
    190
    191/* --------------- IOCTLs for DEVICE file descriptors --------------- */
    192
    193/**
    194 * VFIO_DEVICE_GET_INFO - _IOR(VFIO_TYPE, VFIO_BASE + 7,
    195 *						struct vfio_device_info)
    196 *
    197 * Retrieve information about the device.  Fills in provided
    198 * struct vfio_device_info.  Caller sets argsz.
    199 * Return: 0 on success, -errno on failure.
    200 */
    201struct vfio_device_info {
    202	__u32	argsz;
    203	__u32	flags;
    204#define VFIO_DEVICE_FLAGS_RESET	(1 << 0)	/* Device supports reset */
    205#define VFIO_DEVICE_FLAGS_PCI	(1 << 1)	/* vfio-pci device */
    206#define VFIO_DEVICE_FLAGS_PLATFORM (1 << 2)	/* vfio-platform device */
    207#define VFIO_DEVICE_FLAGS_AMBA  (1 << 3)	/* vfio-amba device */
    208#define VFIO_DEVICE_FLAGS_CCW	(1 << 4)	/* vfio-ccw device */
    209#define VFIO_DEVICE_FLAGS_AP	(1 << 5)	/* vfio-ap device */
    210#define VFIO_DEVICE_FLAGS_FSL_MC (1 << 6)	/* vfio-fsl-mc device */
    211#define VFIO_DEVICE_FLAGS_CAPS	(1 << 7)	/* Info supports caps */
    212	__u32	num_regions;	/* Max region index + 1 */
    213	__u32	num_irqs;	/* Max IRQ index + 1 */
    214	__u32   cap_offset;	/* Offset within info struct of first cap */
    215};
    216#define VFIO_DEVICE_GET_INFO		_IO(VFIO_TYPE, VFIO_BASE + 7)
    217
    218/*
    219 * Vendor driver using Mediated device framework should provide device_api
    220 * attribute in supported type attribute groups. Device API string should be one
    221 * of the following corresponding to device flags in vfio_device_info structure.
    222 */
    223
    224#define VFIO_DEVICE_API_PCI_STRING		"vfio-pci"
    225#define VFIO_DEVICE_API_PLATFORM_STRING		"vfio-platform"
    226#define VFIO_DEVICE_API_AMBA_STRING		"vfio-amba"
    227#define VFIO_DEVICE_API_CCW_STRING		"vfio-ccw"
    228#define VFIO_DEVICE_API_AP_STRING		"vfio-ap"
    229
    230/*
    231 * The following capabilities are unique to s390 zPCI devices.  Their contents
    232 * are further-defined in vfio_zdev.h
    233 */
    234#define VFIO_DEVICE_INFO_CAP_ZPCI_BASE		1
    235#define VFIO_DEVICE_INFO_CAP_ZPCI_GROUP		2
    236#define VFIO_DEVICE_INFO_CAP_ZPCI_UTIL		3
    237#define VFIO_DEVICE_INFO_CAP_ZPCI_PFIP		4
    238
    239/**
    240 * VFIO_DEVICE_GET_REGION_INFO - _IOWR(VFIO_TYPE, VFIO_BASE + 8,
    241 *				       struct vfio_region_info)
    242 *
    243 * Retrieve information about a device region.  Caller provides
    244 * struct vfio_region_info with index value set.  Caller sets argsz.
    245 * Implementation of region mapping is bus driver specific.  This is
    246 * intended to describe MMIO, I/O port, as well as bus specific
    247 * regions (ex. PCI config space).  Zero sized regions may be used
    248 * to describe unimplemented regions (ex. unimplemented PCI BARs).
    249 * Return: 0 on success, -errno on failure.
    250 */
    251struct vfio_region_info {
    252	__u32	argsz;
    253	__u32	flags;
    254#define VFIO_REGION_INFO_FLAG_READ	(1 << 0) /* Region supports read */
    255#define VFIO_REGION_INFO_FLAG_WRITE	(1 << 1) /* Region supports write */
    256#define VFIO_REGION_INFO_FLAG_MMAP	(1 << 2) /* Region supports mmap */
    257#define VFIO_REGION_INFO_FLAG_CAPS	(1 << 3) /* Info supports caps */
    258	__u32	index;		/* Region index */
    259	__u32	cap_offset;	/* Offset within info struct of first cap */
    260	__u64	size;		/* Region size (bytes) */
    261	__u64	offset;		/* Region offset from start of device fd */
    262};
    263#define VFIO_DEVICE_GET_REGION_INFO	_IO(VFIO_TYPE, VFIO_BASE + 8)
    264
    265/*
    266 * The sparse mmap capability allows finer granularity of specifying areas
    267 * within a region with mmap support.  When specified, the user should only
    268 * mmap the offset ranges specified by the areas array.  mmaps outside of the
    269 * areas specified may fail (such as the range covering a PCI MSI-X table) or
    270 * may result in improper device behavior.
    271 *
    272 * The structures below define version 1 of this capability.
    273 */
    274#define VFIO_REGION_INFO_CAP_SPARSE_MMAP	1
    275
    276struct vfio_region_sparse_mmap_area {
    277	__u64	offset;	/* Offset of mmap'able area within region */
    278	__u64	size;	/* Size of mmap'able area */
    279};
    280
    281struct vfio_region_info_cap_sparse_mmap {
    282	struct vfio_info_cap_header header;
    283	__u32	nr_areas;
    284	__u32	reserved;
    285	struct vfio_region_sparse_mmap_area areas[];
    286};
    287
    288/*
    289 * The device specific type capability allows regions unique to a specific
    290 * device or class of devices to be exposed.  This helps solve the problem for
    291 * vfio bus drivers of defining which region indexes correspond to which region
    292 * on the device, without needing to resort to static indexes, as done by
    293 * vfio-pci.  For instance, if we were to go back in time, we might remove
    294 * VFIO_PCI_VGA_REGION_INDEX and let vfio-pci simply define that all indexes
    295 * greater than or equal to VFIO_PCI_NUM_REGIONS are device specific and we'd
    296 * make a "VGA" device specific type to describe the VGA access space.  This
    297 * means that non-VGA devices wouldn't need to waste this index, and thus the
    298 * address space associated with it due to implementation of device file
    299 * descriptor offsets in vfio-pci.
    300 *
    301 * The current implementation is now part of the user ABI, so we can't use this
    302 * for VGA, but there are other upcoming use cases, such as opregions for Intel
    303 * IGD devices and framebuffers for vGPU devices.  We missed VGA, but we'll
    304 * use this for future additions.
    305 *
    306 * The structure below defines version 1 of this capability.
    307 */
    308#define VFIO_REGION_INFO_CAP_TYPE	2
    309
    310struct vfio_region_info_cap_type {
    311	struct vfio_info_cap_header header;
    312	__u32 type;	/* global per bus driver */
    313	__u32 subtype;	/* type specific */
    314};
    315
    316/*
    317 * List of region types, global per bus driver.
    318 * If you introduce a new type, please add it here.
    319 */
    320
    321/* PCI region type containing a PCI vendor part */
    322#define VFIO_REGION_TYPE_PCI_VENDOR_TYPE	(1 << 31)
    323#define VFIO_REGION_TYPE_PCI_VENDOR_MASK	(0xffff)
    324#define VFIO_REGION_TYPE_GFX                    (1)
    325#define VFIO_REGION_TYPE_CCW			(2)
    326#define VFIO_REGION_TYPE_MIGRATION_DEPRECATED   (3)
    327
    328/* sub-types for VFIO_REGION_TYPE_PCI_* */
    329
    330/* 8086 vendor PCI sub-types */
    331#define VFIO_REGION_SUBTYPE_INTEL_IGD_OPREGION	(1)
    332#define VFIO_REGION_SUBTYPE_INTEL_IGD_HOST_CFG	(2)
    333#define VFIO_REGION_SUBTYPE_INTEL_IGD_LPC_CFG	(3)
    334
    335/* 10de vendor PCI sub-types */
    336/*
    337 * NVIDIA GPU NVlink2 RAM is coherent RAM mapped onto the host address space.
    338 *
    339 * Deprecated, region no longer provided
    340 */
    341#define VFIO_REGION_SUBTYPE_NVIDIA_NVLINK2_RAM	(1)
    342
    343/* 1014 vendor PCI sub-types */
    344/*
    345 * IBM NPU NVlink2 ATSD (Address Translation Shootdown) register of NPU
    346 * to do TLB invalidation on a GPU.
    347 *
    348 * Deprecated, region no longer provided
    349 */
    350#define VFIO_REGION_SUBTYPE_IBM_NVLINK2_ATSD	(1)
    351
    352/* sub-types for VFIO_REGION_TYPE_GFX */
    353#define VFIO_REGION_SUBTYPE_GFX_EDID            (1)
    354
    355/**
    356 * struct vfio_region_gfx_edid - EDID region layout.
    357 *
    358 * Set display link state and EDID blob.
    359 *
    360 * The EDID blob has monitor information such as brand, name, serial
    361 * number, physical size, supported video modes and more.
    362 *
    363 * This special region allows userspace (typically qemu) set a virtual
    364 * EDID for the virtual monitor, which allows a flexible display
    365 * configuration.
    366 *
    367 * For the edid blob spec look here:
    368 *    https://en.wikipedia.org/wiki/Extended_Display_Identification_Data
    369 *
    370 * On linux systems you can find the EDID blob in sysfs:
    371 *    /sys/class/drm/${card}/${connector}/edid
    372 *
    373 * You can use the edid-decode ulility (comes with xorg-x11-utils) to
    374 * decode the EDID blob.
    375 *
    376 * @edid_offset: location of the edid blob, relative to the
    377 *               start of the region (readonly).
    378 * @edid_max_size: max size of the edid blob (readonly).
    379 * @edid_size: actual edid size (read/write).
    380 * @link_state: display link state (read/write).
    381 * VFIO_DEVICE_GFX_LINK_STATE_UP: Monitor is turned on.
    382 * VFIO_DEVICE_GFX_LINK_STATE_DOWN: Monitor is turned off.
    383 * @max_xres: max display width (0 == no limitation, readonly).
    384 * @max_yres: max display height (0 == no limitation, readonly).
    385 *
    386 * EDID update protocol:
    387 *   (1) set link-state to down.
    388 *   (2) update edid blob and size.
    389 *   (3) set link-state to up.
    390 */
    391struct vfio_region_gfx_edid {
    392	__u32 edid_offset;
    393	__u32 edid_max_size;
    394	__u32 edid_size;
    395	__u32 max_xres;
    396	__u32 max_yres;
    397	__u32 link_state;
    398#define VFIO_DEVICE_GFX_LINK_STATE_UP    1
    399#define VFIO_DEVICE_GFX_LINK_STATE_DOWN  2
    400};
    401
    402/* sub-types for VFIO_REGION_TYPE_CCW */
    403#define VFIO_REGION_SUBTYPE_CCW_ASYNC_CMD	(1)
    404#define VFIO_REGION_SUBTYPE_CCW_SCHIB		(2)
    405#define VFIO_REGION_SUBTYPE_CCW_CRW		(3)
    406
    407/* sub-types for VFIO_REGION_TYPE_MIGRATION */
    408#define VFIO_REGION_SUBTYPE_MIGRATION_DEPRECATED (1)
    409
    410struct vfio_device_migration_info {
    411	__u32 device_state;         /* VFIO device state */
    412#define VFIO_DEVICE_STATE_V1_STOP      (0)
    413#define VFIO_DEVICE_STATE_V1_RUNNING   (1 << 0)
    414#define VFIO_DEVICE_STATE_V1_SAVING    (1 << 1)
    415#define VFIO_DEVICE_STATE_V1_RESUMING  (1 << 2)
    416#define VFIO_DEVICE_STATE_MASK      (VFIO_DEVICE_STATE_V1_RUNNING | \
    417				     VFIO_DEVICE_STATE_V1_SAVING |  \
    418				     VFIO_DEVICE_STATE_V1_RESUMING)
    419
    420#define VFIO_DEVICE_STATE_VALID(state) \
    421	(state & VFIO_DEVICE_STATE_V1_RESUMING ? \
    422	(state & VFIO_DEVICE_STATE_MASK) == VFIO_DEVICE_STATE_V1_RESUMING : 1)
    423
    424#define VFIO_DEVICE_STATE_IS_ERROR(state) \
    425	((state & VFIO_DEVICE_STATE_MASK) == (VFIO_DEVICE_STATE_V1_SAVING | \
    426					      VFIO_DEVICE_STATE_V1_RESUMING))
    427
    428#define VFIO_DEVICE_STATE_SET_ERROR(state) \
    429	((state & ~VFIO_DEVICE_STATE_MASK) | VFIO_DEVICE_STATE_V1_SAVING | \
    430					     VFIO_DEVICE_STATE_V1_RESUMING)
    431
    432	__u32 reserved;
    433	__u64 pending_bytes;
    434	__u64 data_offset;
    435	__u64 data_size;
    436};
    437
    438/*
    439 * The MSIX mappable capability informs that MSIX data of a BAR can be mmapped
    440 * which allows direct access to non-MSIX registers which happened to be within
    441 * the same system page.
    442 *
    443 * Even though the userspace gets direct access to the MSIX data, the existing
    444 * VFIO_DEVICE_SET_IRQS interface must still be used for MSIX configuration.
    445 */
    446#define VFIO_REGION_INFO_CAP_MSIX_MAPPABLE	3
    447
    448/*
    449 * Capability with compressed real address (aka SSA - small system address)
    450 * where GPU RAM is mapped on a system bus. Used by a GPU for DMA routing
    451 * and by the userspace to associate a NVLink bridge with a GPU.
    452 *
    453 * Deprecated, capability no longer provided
    454 */
    455#define VFIO_REGION_INFO_CAP_NVLINK2_SSATGT	4
    456
    457struct vfio_region_info_cap_nvlink2_ssatgt {
    458	struct vfio_info_cap_header header;
    459	__u64 tgt;
    460};
    461
    462/*
    463 * Capability with an NVLink link speed. The value is read by
    464 * the NVlink2 bridge driver from the bridge's "ibm,nvlink-speed"
    465 * property in the device tree. The value is fixed in the hardware
    466 * and failing to provide the correct value results in the link
    467 * not working with no indication from the driver why.
    468 *
    469 * Deprecated, capability no longer provided
    470 */
    471#define VFIO_REGION_INFO_CAP_NVLINK2_LNKSPD	5
    472
    473struct vfio_region_info_cap_nvlink2_lnkspd {
    474	struct vfio_info_cap_header header;
    475	__u32 link_speed;
    476	__u32 __pad;
    477};
    478
    479/**
    480 * VFIO_DEVICE_GET_IRQ_INFO - _IOWR(VFIO_TYPE, VFIO_BASE + 9,
    481 *				    struct vfio_irq_info)
    482 *
    483 * Retrieve information about a device IRQ.  Caller provides
    484 * struct vfio_irq_info with index value set.  Caller sets argsz.
    485 * Implementation of IRQ mapping is bus driver specific.  Indexes
    486 * using multiple IRQs are primarily intended to support MSI-like
    487 * interrupt blocks.  Zero count irq blocks may be used to describe
    488 * unimplemented interrupt types.
    489 *
    490 * The EVENTFD flag indicates the interrupt index supports eventfd based
    491 * signaling.
    492 *
    493 * The MASKABLE flags indicates the index supports MASK and UNMASK
    494 * actions described below.
    495 *
    496 * AUTOMASKED indicates that after signaling, the interrupt line is
    497 * automatically masked by VFIO and the user needs to unmask the line
    498 * to receive new interrupts.  This is primarily intended to distinguish
    499 * level triggered interrupts.
    500 *
    501 * The NORESIZE flag indicates that the interrupt lines within the index
    502 * are setup as a set and new subindexes cannot be enabled without first
    503 * disabling the entire index.  This is used for interrupts like PCI MSI
    504 * and MSI-X where the driver may only use a subset of the available
    505 * indexes, but VFIO needs to enable a specific number of vectors
    506 * upfront.  In the case of MSI-X, where the user can enable MSI-X and
    507 * then add and unmask vectors, it's up to userspace to make the decision
    508 * whether to allocate the maximum supported number of vectors or tear
    509 * down setup and incrementally increase the vectors as each is enabled.
    510 */
    511struct vfio_irq_info {
    512	__u32	argsz;
    513	__u32	flags;
    514#define VFIO_IRQ_INFO_EVENTFD		(1 << 0)
    515#define VFIO_IRQ_INFO_MASKABLE		(1 << 1)
    516#define VFIO_IRQ_INFO_AUTOMASKED	(1 << 2)
    517#define VFIO_IRQ_INFO_NORESIZE		(1 << 3)
    518	__u32	index;		/* IRQ index */
    519	__u32	count;		/* Number of IRQs within this index */
    520};
    521#define VFIO_DEVICE_GET_IRQ_INFO	_IO(VFIO_TYPE, VFIO_BASE + 9)
    522
    523/**
    524 * VFIO_DEVICE_SET_IRQS - _IOW(VFIO_TYPE, VFIO_BASE + 10, struct vfio_irq_set)
    525 *
    526 * Set signaling, masking, and unmasking of interrupts.  Caller provides
    527 * struct vfio_irq_set with all fields set.  'start' and 'count' indicate
    528 * the range of subindexes being specified.
    529 *
    530 * The DATA flags specify the type of data provided.  If DATA_NONE, the
    531 * operation performs the specified action immediately on the specified
    532 * interrupt(s).  For example, to unmask AUTOMASKED interrupt [0,0]:
    533 * flags = (DATA_NONE|ACTION_UNMASK), index = 0, start = 0, count = 1.
    534 *
    535 * DATA_BOOL allows sparse support for the same on arrays of interrupts.
    536 * For example, to mask interrupts [0,1] and [0,3] (but not [0,2]):
    537 * flags = (DATA_BOOL|ACTION_MASK), index = 0, start = 1, count = 3,
    538 * data = {1,0,1}
    539 *
    540 * DATA_EVENTFD binds the specified ACTION to the provided __s32 eventfd.
    541 * A value of -1 can be used to either de-assign interrupts if already
    542 * assigned or skip un-assigned interrupts.  For example, to set an eventfd
    543 * to be trigger for interrupts [0,0] and [0,2]:
    544 * flags = (DATA_EVENTFD|ACTION_TRIGGER), index = 0, start = 0, count = 3,
    545 * data = {fd1, -1, fd2}
    546 * If index [0,1] is previously set, two count = 1 ioctls calls would be
    547 * required to set [0,0] and [0,2] without changing [0,1].
    548 *
    549 * Once a signaling mechanism is set, DATA_BOOL or DATA_NONE can be used
    550 * with ACTION_TRIGGER to perform kernel level interrupt loopback testing
    551 * from userspace (ie. simulate hardware triggering).
    552 *
    553 * Setting of an event triggering mechanism to userspace for ACTION_TRIGGER
    554 * enables the interrupt index for the device.  Individual subindex interrupts
    555 * can be disabled using the -1 value for DATA_EVENTFD or the index can be
    556 * disabled as a whole with: flags = (DATA_NONE|ACTION_TRIGGER), count = 0.
    557 *
    558 * Note that ACTION_[UN]MASK specify user->kernel signaling (irqfds) while
    559 * ACTION_TRIGGER specifies kernel->user signaling.
    560 */
    561struct vfio_irq_set {
    562	__u32	argsz;
    563	__u32	flags;
    564#define VFIO_IRQ_SET_DATA_NONE		(1 << 0) /* Data not present */
    565#define VFIO_IRQ_SET_DATA_BOOL		(1 << 1) /* Data is bool (u8) */
    566#define VFIO_IRQ_SET_DATA_EVENTFD	(1 << 2) /* Data is eventfd (s32) */
    567#define VFIO_IRQ_SET_ACTION_MASK	(1 << 3) /* Mask interrupt */
    568#define VFIO_IRQ_SET_ACTION_UNMASK	(1 << 4) /* Unmask interrupt */
    569#define VFIO_IRQ_SET_ACTION_TRIGGER	(1 << 5) /* Trigger interrupt */
    570	__u32	index;
    571	__u32	start;
    572	__u32	count;
    573	__u8	data[];
    574};
    575#define VFIO_DEVICE_SET_IRQS		_IO(VFIO_TYPE, VFIO_BASE + 10)
    576
    577#define VFIO_IRQ_SET_DATA_TYPE_MASK	(VFIO_IRQ_SET_DATA_NONE | \
    578					 VFIO_IRQ_SET_DATA_BOOL | \
    579					 VFIO_IRQ_SET_DATA_EVENTFD)
    580#define VFIO_IRQ_SET_ACTION_TYPE_MASK	(VFIO_IRQ_SET_ACTION_MASK | \
    581					 VFIO_IRQ_SET_ACTION_UNMASK | \
    582					 VFIO_IRQ_SET_ACTION_TRIGGER)
    583/**
    584 * VFIO_DEVICE_RESET - _IO(VFIO_TYPE, VFIO_BASE + 11)
    585 *
    586 * Reset a device.
    587 */
    588#define VFIO_DEVICE_RESET		_IO(VFIO_TYPE, VFIO_BASE + 11)
    589
    590/*
    591 * The VFIO-PCI bus driver makes use of the following fixed region and
    592 * IRQ index mapping.  Unimplemented regions return a size of zero.
    593 * Unimplemented IRQ types return a count of zero.
    594 */
    595
    596enum {
    597	VFIO_PCI_BAR0_REGION_INDEX,
    598	VFIO_PCI_BAR1_REGION_INDEX,
    599	VFIO_PCI_BAR2_REGION_INDEX,
    600	VFIO_PCI_BAR3_REGION_INDEX,
    601	VFIO_PCI_BAR4_REGION_INDEX,
    602	VFIO_PCI_BAR5_REGION_INDEX,
    603	VFIO_PCI_ROM_REGION_INDEX,
    604	VFIO_PCI_CONFIG_REGION_INDEX,
    605	/*
    606	 * Expose VGA regions defined for PCI base class 03, subclass 00.
    607	 * This includes I/O port ranges 0x3b0 to 0x3bb and 0x3c0 to 0x3df
    608	 * as well as the MMIO range 0xa0000 to 0xbffff.  Each implemented
    609	 * range is found at it's identity mapped offset from the region
    610	 * offset, for example 0x3b0 is region_info.offset + 0x3b0.  Areas
    611	 * between described ranges are unimplemented.
    612	 */
    613	VFIO_PCI_VGA_REGION_INDEX,
    614	VFIO_PCI_NUM_REGIONS = 9 /* Fixed user ABI, region indexes >=9 use */
    615				 /* device specific cap to define content. */
    616};
    617
    618enum {
    619	VFIO_PCI_INTX_IRQ_INDEX,
    620	VFIO_PCI_MSI_IRQ_INDEX,
    621	VFIO_PCI_MSIX_IRQ_INDEX,
    622	VFIO_PCI_ERR_IRQ_INDEX,
    623	VFIO_PCI_REQ_IRQ_INDEX,
    624	VFIO_PCI_NUM_IRQS
    625};
    626
    627/*
    628 * The vfio-ccw bus driver makes use of the following fixed region and
    629 * IRQ index mapping. Unimplemented regions return a size of zero.
    630 * Unimplemented IRQ types return a count of zero.
    631 */
    632
    633enum {
    634	VFIO_CCW_CONFIG_REGION_INDEX,
    635	VFIO_CCW_NUM_REGIONS
    636};
    637
    638enum {
    639	VFIO_CCW_IO_IRQ_INDEX,
    640	VFIO_CCW_CRW_IRQ_INDEX,
    641	VFIO_CCW_REQ_IRQ_INDEX,
    642	VFIO_CCW_NUM_IRQS
    643};
    644
    645/**
    646 * VFIO_DEVICE_GET_PCI_HOT_RESET_INFO - _IOWR(VFIO_TYPE, VFIO_BASE + 12,
    647 *					      struct vfio_pci_hot_reset_info)
    648 *
    649 * Return: 0 on success, -errno on failure:
    650 *	-enospc = insufficient buffer, -enodev = unsupported for device.
    651 */
    652struct vfio_pci_dependent_device {
    653	__u32	group_id;
    654	__u16	segment;
    655	__u8	bus;
    656	__u8	devfn; /* Use PCI_SLOT/PCI_FUNC */
    657};
    658
    659struct vfio_pci_hot_reset_info {
    660	__u32	argsz;
    661	__u32	flags;
    662	__u32	count;
    663	struct vfio_pci_dependent_device	devices[];
    664};
    665
    666#define VFIO_DEVICE_GET_PCI_HOT_RESET_INFO	_IO(VFIO_TYPE, VFIO_BASE + 12)
    667
    668/**
    669 * VFIO_DEVICE_PCI_HOT_RESET - _IOW(VFIO_TYPE, VFIO_BASE + 13,
    670 *				    struct vfio_pci_hot_reset)
    671 *
    672 * Return: 0 on success, -errno on failure.
    673 */
    674struct vfio_pci_hot_reset {
    675	__u32	argsz;
    676	__u32	flags;
    677	__u32	count;
    678	__s32	group_fds[];
    679};
    680
    681#define VFIO_DEVICE_PCI_HOT_RESET	_IO(VFIO_TYPE, VFIO_BASE + 13)
    682
    683/**
    684 * VFIO_DEVICE_QUERY_GFX_PLANE - _IOW(VFIO_TYPE, VFIO_BASE + 14,
    685 *                                    struct vfio_device_query_gfx_plane)
    686 *
    687 * Set the drm_plane_type and flags, then retrieve the gfx plane info.
    688 *
    689 * flags supported:
    690 * - VFIO_GFX_PLANE_TYPE_PROBE and VFIO_GFX_PLANE_TYPE_DMABUF are set
    691 *   to ask if the mdev supports dma-buf. 0 on support, -EINVAL on no
    692 *   support for dma-buf.
    693 * - VFIO_GFX_PLANE_TYPE_PROBE and VFIO_GFX_PLANE_TYPE_REGION are set
    694 *   to ask if the mdev supports region. 0 on support, -EINVAL on no
    695 *   support for region.
    696 * - VFIO_GFX_PLANE_TYPE_DMABUF or VFIO_GFX_PLANE_TYPE_REGION is set
    697 *   with each call to query the plane info.
    698 * - Others are invalid and return -EINVAL.
    699 *
    700 * Note:
    701 * 1. Plane could be disabled by guest. In that case, success will be
    702 *    returned with zero-initialized drm_format, size, width and height
    703 *    fields.
    704 * 2. x_hot/y_hot is set to 0xFFFFFFFF if no hotspot information available
    705 *
    706 * Return: 0 on success, -errno on other failure.
    707 */
    708struct vfio_device_gfx_plane_info {
    709	__u32 argsz;
    710	__u32 flags;
    711#define VFIO_GFX_PLANE_TYPE_PROBE (1 << 0)
    712#define VFIO_GFX_PLANE_TYPE_DMABUF (1 << 1)
    713#define VFIO_GFX_PLANE_TYPE_REGION (1 << 2)
    714	/* in */
    715	__u32 drm_plane_type;	/* type of plane: DRM_PLANE_TYPE_* */
    716	/* out */
    717	__u32 drm_format;	/* drm format of plane */
    718	__u64 drm_format_mod;   /* tiled mode */
    719	__u32 width;	/* width of plane */
    720	__u32 height;	/* height of plane */
    721	__u32 stride;	/* stride of plane */
    722	__u32 size;	/* size of plane in bytes, align on page*/
    723	__u32 x_pos;	/* horizontal position of cursor plane */
    724	__u32 y_pos;	/* vertical position of cursor plane*/
    725	__u32 x_hot;    /* horizontal position of cursor hotspot */
    726	__u32 y_hot;    /* vertical position of cursor hotspot */
    727	union {
    728		__u32 region_index;	/* region index */
    729		__u32 dmabuf_id;	/* dma-buf id */
    730	};
    731};
    732
    733#define VFIO_DEVICE_QUERY_GFX_PLANE _IO(VFIO_TYPE, VFIO_BASE + 14)
    734
    735/**
    736 * VFIO_DEVICE_GET_GFX_DMABUF - _IOW(VFIO_TYPE, VFIO_BASE + 15, __u32)
    737 *
    738 * Return a new dma-buf file descriptor for an exposed guest framebuffer
    739 * described by the provided dmabuf_id. The dmabuf_id is returned from VFIO_
    740 * DEVICE_QUERY_GFX_PLANE as a token of the exposed guest framebuffer.
    741 */
    742
    743#define VFIO_DEVICE_GET_GFX_DMABUF _IO(VFIO_TYPE, VFIO_BASE + 15)
    744
    745/**
    746 * VFIO_DEVICE_IOEVENTFD - _IOW(VFIO_TYPE, VFIO_BASE + 16,
    747 *                              struct vfio_device_ioeventfd)
    748 *
    749 * Perform a write to the device at the specified device fd offset, with
    750 * the specified data and width when the provided eventfd is triggered.
    751 * vfio bus drivers may not support this for all regions, for all widths,
    752 * or at all.  vfio-pci currently only enables support for BAR regions,
    753 * excluding the MSI-X vector table.
    754 *
    755 * Return: 0 on success, -errno on failure.
    756 */
    757struct vfio_device_ioeventfd {
    758	__u32	argsz;
    759	__u32	flags;
    760#define VFIO_DEVICE_IOEVENTFD_8		(1 << 0) /* 1-byte write */
    761#define VFIO_DEVICE_IOEVENTFD_16	(1 << 1) /* 2-byte write */
    762#define VFIO_DEVICE_IOEVENTFD_32	(1 << 2) /* 4-byte write */
    763#define VFIO_DEVICE_IOEVENTFD_64	(1 << 3) /* 8-byte write */
    764#define VFIO_DEVICE_IOEVENTFD_SIZE_MASK	(0xf)
    765	__u64	offset;			/* device fd offset of write */
    766	__u64	data;			/* data to be written */
    767	__s32	fd;			/* -1 for de-assignment */
    768};
    769
    770#define VFIO_DEVICE_IOEVENTFD		_IO(VFIO_TYPE, VFIO_BASE + 16)
    771
    772/**
    773 * VFIO_DEVICE_FEATURE - _IOWR(VFIO_TYPE, VFIO_BASE + 17,
    774 *			       struct vfio_device_feature)
    775 *
    776 * Get, set, or probe feature data of the device.  The feature is selected
    777 * using the FEATURE_MASK portion of the flags field.  Support for a feature
    778 * can be probed by setting both the FEATURE_MASK and PROBE bits.  A probe
    779 * may optionally include the GET and/or SET bits to determine read vs write
    780 * access of the feature respectively.  Probing a feature will return success
    781 * if the feature is supported and all of the optionally indicated GET/SET
    782 * methods are supported.  The format of the data portion of the structure is
    783 * specific to the given feature.  The data portion is not required for
    784 * probing.  GET and SET are mutually exclusive, except for use with PROBE.
    785 *
    786 * Return 0 on success, -errno on failure.
    787 */
    788struct vfio_device_feature {
    789	__u32	argsz;
    790	__u32	flags;
    791#define VFIO_DEVICE_FEATURE_MASK	(0xffff) /* 16-bit feature index */
    792#define VFIO_DEVICE_FEATURE_GET		(1 << 16) /* Get feature into data[] */
    793#define VFIO_DEVICE_FEATURE_SET		(1 << 17) /* Set feature from data[] */
    794#define VFIO_DEVICE_FEATURE_PROBE	(1 << 18) /* Probe feature support */
    795	__u8	data[];
    796};
    797
    798#define VFIO_DEVICE_FEATURE		_IO(VFIO_TYPE, VFIO_BASE + 17)
    799
    800/*
    801 * Provide support for setting a PCI VF Token, which is used as a shared
    802 * secret between PF and VF drivers.  This feature may only be set on a
    803 * PCI SR-IOV PF when SR-IOV is enabled on the PF and there are no existing
    804 * open VFs.  Data provided when setting this feature is a 16-byte array
    805 * (__u8 b[16]), representing a UUID.
    806 */
    807#define VFIO_DEVICE_FEATURE_PCI_VF_TOKEN	(0)
    808
    809/*
    810 * Indicates the device can support the migration API through
    811 * VFIO_DEVICE_FEATURE_MIG_DEVICE_STATE. If this GET succeeds, the RUNNING and
    812 * ERROR states are always supported. Support for additional states is
    813 * indicated via the flags field; at least VFIO_MIGRATION_STOP_COPY must be
    814 * set.
    815 *
    816 * VFIO_MIGRATION_STOP_COPY means that STOP, STOP_COPY and
    817 * RESUMING are supported.
    818 *
    819 * VFIO_MIGRATION_STOP_COPY | VFIO_MIGRATION_P2P means that RUNNING_P2P
    820 * is supported in addition to the STOP_COPY states.
    821 *
    822 * Other combinations of flags have behavior to be defined in the future.
    823 */
    824struct vfio_device_feature_migration {
    825	__aligned_u64 flags;
    826#define VFIO_MIGRATION_STOP_COPY	(1 << 0)
    827#define VFIO_MIGRATION_P2P		(1 << 1)
    828};
    829#define VFIO_DEVICE_FEATURE_MIGRATION 1
    830
    831/*
    832 * Upon VFIO_DEVICE_FEATURE_SET, execute a migration state change on the VFIO
    833 * device. The new state is supplied in device_state, see enum
    834 * vfio_device_mig_state for details
    835 *
    836 * The kernel migration driver must fully transition the device to the new state
    837 * value before the operation returns to the user.
    838 *
    839 * The kernel migration driver must not generate asynchronous device state
    840 * transitions outside of manipulation by the user or the VFIO_DEVICE_RESET
    841 * ioctl as described above.
    842 *
    843 * If this function fails then current device_state may be the original
    844 * operating state or some other state along the combination transition path.
    845 * The user can then decide if it should execute a VFIO_DEVICE_RESET, attempt
    846 * to return to the original state, or attempt to return to some other state
    847 * such as RUNNING or STOP.
    848 *
    849 * If the new_state starts a new data transfer session then the FD associated
    850 * with that session is returned in data_fd. The user is responsible to close
    851 * this FD when it is finished. The user must consider the migration data stream
    852 * carried over the FD to be opaque and must preserve the byte order of the
    853 * stream. The user is not required to preserve buffer segmentation when writing
    854 * the data stream during the RESUMING operation.
    855 *
    856 * Upon VFIO_DEVICE_FEATURE_GET, get the current migration state of the VFIO
    857 * device, data_fd will be -1.
    858 */
    859struct vfio_device_feature_mig_state {
    860	__u32 device_state; /* From enum vfio_device_mig_state */
    861	__s32 data_fd;
    862};
    863#define VFIO_DEVICE_FEATURE_MIG_DEVICE_STATE 2
    864
    865/*
    866 * The device migration Finite State Machine is described by the enum
    867 * vfio_device_mig_state. Some of the FSM arcs will create a migration data
    868 * transfer session by returning a FD, in this case the migration data will
    869 * flow over the FD using read() and write() as discussed below.
    870 *
    871 * There are 5 states to support VFIO_MIGRATION_STOP_COPY:
    872 *  RUNNING - The device is running normally
    873 *  STOP - The device does not change the internal or external state
    874 *  STOP_COPY - The device internal state can be read out
    875 *  RESUMING - The device is stopped and is loading a new internal state
    876 *  ERROR - The device has failed and must be reset
    877 *
    878 * And 1 optional state to support VFIO_MIGRATION_P2P:
    879 *  RUNNING_P2P - RUNNING, except the device cannot do peer to peer DMA
    880 *
    881 * The FSM takes actions on the arcs between FSM states. The driver implements
    882 * the following behavior for the FSM arcs:
    883 *
    884 * RUNNING_P2P -> STOP
    885 * STOP_COPY -> STOP
    886 *   While in STOP the device must stop the operation of the device. The device
    887 *   must not generate interrupts, DMA, or any other change to external state.
    888 *   It must not change its internal state. When stopped the device and kernel
    889 *   migration driver must accept and respond to interaction to support external
    890 *   subsystems in the STOP state, for example PCI MSI-X and PCI config space.
    891 *   Failure by the user to restrict device access while in STOP must not result
    892 *   in error conditions outside the user context (ex. host system faults).
    893 *
    894 *   The STOP_COPY arc will terminate a data transfer session.
    895 *
    896 * RESUMING -> STOP
    897 *   Leaving RESUMING terminates a data transfer session and indicates the
    898 *   device should complete processing of the data delivered by write(). The
    899 *   kernel migration driver should complete the incorporation of data written
    900 *   to the data transfer FD into the device internal state and perform
    901 *   final validity and consistency checking of the new device state. If the
    902 *   user provided data is found to be incomplete, inconsistent, or otherwise
    903 *   invalid, the migration driver must fail the SET_STATE ioctl and
    904 *   optionally go to the ERROR state as described below.
    905 *
    906 *   While in STOP the device has the same behavior as other STOP states
    907 *   described above.
    908 *
    909 *   To abort a RESUMING session the device must be reset.
    910 *
    911 * RUNNING_P2P -> RUNNING
    912 *   While in RUNNING the device is fully operational, the device may generate
    913 *   interrupts, DMA, respond to MMIO, all vfio device regions are functional,
    914 *   and the device may advance its internal state.
    915 *
    916 * RUNNING -> RUNNING_P2P
    917 * STOP -> RUNNING_P2P
    918 *   While in RUNNING_P2P the device is partially running in the P2P quiescent
    919 *   state defined below.
    920 *
    921 * STOP -> STOP_COPY
    922 *   This arc begin the process of saving the device state and will return a
    923 *   new data_fd.
    924 *
    925 *   While in the STOP_COPY state the device has the same behavior as STOP
    926 *   with the addition that the data transfers session continues to stream the
    927 *   migration state. End of stream on the FD indicates the entire device
    928 *   state has been transferred.
    929 *
    930 *   The user should take steps to restrict access to vfio device regions while
    931 *   the device is in STOP_COPY or risk corruption of the device migration data
    932 *   stream.
    933 *
    934 * STOP -> RESUMING
    935 *   Entering the RESUMING state starts a process of restoring the device state
    936 *   and will return a new data_fd. The data stream fed into the data_fd should
    937 *   be taken from the data transfer output of a single FD during saving from
    938 *   a compatible device. The migration driver may alter/reset the internal
    939 *   device state for this arc if required to prepare the device to receive the
    940 *   migration data.
    941 *
    942 * any -> ERROR
    943 *   ERROR cannot be specified as a device state, however any transition request
    944 *   can be failed with an errno return and may then move the device_state into
    945 *   ERROR. In this case the device was unable to execute the requested arc and
    946 *   was also unable to restore the device to any valid device_state.
    947 *   To recover from ERROR VFIO_DEVICE_RESET must be used to return the
    948 *   device_state back to RUNNING.
    949 *
    950 * The optional peer to peer (P2P) quiescent state is intended to be a quiescent
    951 * state for the device for the purposes of managing multiple devices within a
    952 * user context where peer-to-peer DMA between devices may be active. The
    953 * RUNNING_P2P states must prevent the device from initiating
    954 * any new P2P DMA transactions. If the device can identify P2P transactions
    955 * then it can stop only P2P DMA, otherwise it must stop all DMA. The migration
    956 * driver must complete any such outstanding operations prior to completing the
    957 * FSM arc into a P2P state. For the purpose of specification the states
    958 * behave as though the device was fully running if not supported. Like while in
    959 * STOP or STOP_COPY the user must not touch the device, otherwise the state
    960 * can be exited.
    961 *
    962 * The remaining possible transitions are interpreted as combinations of the
    963 * above FSM arcs. As there are multiple paths through the FSM arcs the path
    964 * should be selected based on the following rules:
    965 *   - Select the shortest path.
    966 * Refer to vfio_mig_get_next_state() for the result of the algorithm.
    967 *
    968 * The automatic transit through the FSM arcs that make up the combination
    969 * transition is invisible to the user. When working with combination arcs the
    970 * user may see any step along the path in the device_state if SET_STATE
    971 * fails. When handling these types of errors users should anticipate future
    972 * revisions of this protocol using new states and those states becoming
    973 * visible in this case.
    974 *
    975 * The optional states cannot be used with SET_STATE if the device does not
    976 * support them. The user can discover if these states are supported by using
    977 * VFIO_DEVICE_FEATURE_MIGRATION. By using combination transitions the user can
    978 * avoid knowing about these optional states if the kernel driver supports them.
    979 */
    980enum vfio_device_mig_state {
    981	VFIO_DEVICE_STATE_ERROR = 0,
    982	VFIO_DEVICE_STATE_STOP = 1,
    983	VFIO_DEVICE_STATE_RUNNING = 2,
    984	VFIO_DEVICE_STATE_STOP_COPY = 3,
    985	VFIO_DEVICE_STATE_RESUMING = 4,
    986	VFIO_DEVICE_STATE_RUNNING_P2P = 5,
    987};
    988
    989/* -------- API for Type1 VFIO IOMMU -------- */
    990
    991/**
    992 * VFIO_IOMMU_GET_INFO - _IOR(VFIO_TYPE, VFIO_BASE + 12, struct vfio_iommu_info)
    993 *
    994 * Retrieve information about the IOMMU object. Fills in provided
    995 * struct vfio_iommu_info. Caller sets argsz.
    996 *
    997 * XXX Should we do these by CHECK_EXTENSION too?
    998 */
    999struct vfio_iommu_type1_info {
   1000	__u32	argsz;
   1001	__u32	flags;
   1002#define VFIO_IOMMU_INFO_PGSIZES (1 << 0)	/* supported page sizes info */
   1003#define VFIO_IOMMU_INFO_CAPS	(1 << 1)	/* Info supports caps */
   1004	__u64	iova_pgsizes;	/* Bitmap of supported page sizes */
   1005	__u32   cap_offset;	/* Offset within info struct of first cap */
   1006};
   1007
   1008/*
   1009 * The IOVA capability allows to report the valid IOVA range(s)
   1010 * excluding any non-relaxable reserved regions exposed by
   1011 * devices attached to the container. Any DMA map attempt
   1012 * outside the valid iova range will return error.
   1013 *
   1014 * The structures below define version 1 of this capability.
   1015 */
   1016#define VFIO_IOMMU_TYPE1_INFO_CAP_IOVA_RANGE  1
   1017
   1018struct vfio_iova_range {
   1019	__u64	start;
   1020	__u64	end;
   1021};
   1022
   1023struct vfio_iommu_type1_info_cap_iova_range {
   1024	struct	vfio_info_cap_header header;
   1025	__u32	nr_iovas;
   1026	__u32	reserved;
   1027	struct	vfio_iova_range iova_ranges[];
   1028};
   1029
   1030/*
   1031 * The migration capability allows to report supported features for migration.
   1032 *
   1033 * The structures below define version 1 of this capability.
   1034 *
   1035 * The existence of this capability indicates that IOMMU kernel driver supports
   1036 * dirty page logging.
   1037 *
   1038 * pgsize_bitmap: Kernel driver returns bitmap of supported page sizes for dirty
   1039 * page logging.
   1040 * max_dirty_bitmap_size: Kernel driver returns maximum supported dirty bitmap
   1041 * size in bytes that can be used by user applications when getting the dirty
   1042 * bitmap.
   1043 */
   1044#define VFIO_IOMMU_TYPE1_INFO_CAP_MIGRATION  2
   1045
   1046struct vfio_iommu_type1_info_cap_migration {
   1047	struct	vfio_info_cap_header header;
   1048	__u32	flags;
   1049	__u64	pgsize_bitmap;
   1050	__u64	max_dirty_bitmap_size;		/* in bytes */
   1051};
   1052
   1053/*
   1054 * The DMA available capability allows to report the current number of
   1055 * simultaneously outstanding DMA mappings that are allowed.
   1056 *
   1057 * The structure below defines version 1 of this capability.
   1058 *
   1059 * avail: specifies the current number of outstanding DMA mappings allowed.
   1060 */
   1061#define VFIO_IOMMU_TYPE1_INFO_DMA_AVAIL 3
   1062
   1063struct vfio_iommu_type1_info_dma_avail {
   1064	struct	vfio_info_cap_header header;
   1065	__u32	avail;
   1066};
   1067
   1068#define VFIO_IOMMU_GET_INFO _IO(VFIO_TYPE, VFIO_BASE + 12)
   1069
   1070/**
   1071 * VFIO_IOMMU_MAP_DMA - _IOW(VFIO_TYPE, VFIO_BASE + 13, struct vfio_dma_map)
   1072 *
   1073 * Map process virtual addresses to IO virtual addresses using the
   1074 * provided struct vfio_dma_map. Caller sets argsz. READ &/ WRITE required.
   1075 *
   1076 * If flags & VFIO_DMA_MAP_FLAG_VADDR, update the base vaddr for iova, and
   1077 * unblock translation of host virtual addresses in the iova range.  The vaddr
   1078 * must have previously been invalidated with VFIO_DMA_UNMAP_FLAG_VADDR.  To
   1079 * maintain memory consistency within the user application, the updated vaddr
   1080 * must address the same memory object as originally mapped.  Failure to do so
   1081 * will result in user memory corruption and/or device misbehavior.  iova and
   1082 * size must match those in the original MAP_DMA call.  Protection is not
   1083 * changed, and the READ & WRITE flags must be 0.
   1084 */
   1085struct vfio_iommu_type1_dma_map {
   1086	__u32	argsz;
   1087	__u32	flags;
   1088#define VFIO_DMA_MAP_FLAG_READ (1 << 0)		/* readable from device */
   1089#define VFIO_DMA_MAP_FLAG_WRITE (1 << 1)	/* writable from device */
   1090#define VFIO_DMA_MAP_FLAG_VADDR (1 << 2)
   1091	__u64	vaddr;				/* Process virtual address */
   1092	__u64	iova;				/* IO virtual address */
   1093	__u64	size;				/* Size of mapping (bytes) */
   1094};
   1095
   1096#define VFIO_IOMMU_MAP_DMA _IO(VFIO_TYPE, VFIO_BASE + 13)
   1097
   1098struct vfio_bitmap {
   1099	__u64        pgsize;	/* page size for bitmap in bytes */
   1100	__u64        size;	/* in bytes */
   1101	__u64 __user *data;	/* one bit per page */
   1102};
   1103
   1104/**
   1105 * VFIO_IOMMU_UNMAP_DMA - _IOWR(VFIO_TYPE, VFIO_BASE + 14,
   1106 *							struct vfio_dma_unmap)
   1107 *
   1108 * Unmap IO virtual addresses using the provided struct vfio_dma_unmap.
   1109 * Caller sets argsz.  The actual unmapped size is returned in the size
   1110 * field.  No guarantee is made to the user that arbitrary unmaps of iova
   1111 * or size different from those used in the original mapping call will
   1112 * succeed.
   1113 *
   1114 * VFIO_DMA_UNMAP_FLAG_GET_DIRTY_BITMAP should be set to get the dirty bitmap
   1115 * before unmapping IO virtual addresses. When this flag is set, the user must
   1116 * provide a struct vfio_bitmap in data[]. User must provide zero-allocated
   1117 * memory via vfio_bitmap.data and its size in the vfio_bitmap.size field.
   1118 * A bit in the bitmap represents one page, of user provided page size in
   1119 * vfio_bitmap.pgsize field, consecutively starting from iova offset. Bit set
   1120 * indicates that the page at that offset from iova is dirty. A Bitmap of the
   1121 * pages in the range of unmapped size is returned in the user-provided
   1122 * vfio_bitmap.data.
   1123 *
   1124 * If flags & VFIO_DMA_UNMAP_FLAG_ALL, unmap all addresses.  iova and size
   1125 * must be 0.  This cannot be combined with the get-dirty-bitmap flag.
   1126 *
   1127 * If flags & VFIO_DMA_UNMAP_FLAG_VADDR, do not unmap, but invalidate host
   1128 * virtual addresses in the iova range.  Tasks that attempt to translate an
   1129 * iova's vaddr will block.  DMA to already-mapped pages continues.  This
   1130 * cannot be combined with the get-dirty-bitmap flag.
   1131 */
   1132struct vfio_iommu_type1_dma_unmap {
   1133	__u32	argsz;
   1134	__u32	flags;
   1135#define VFIO_DMA_UNMAP_FLAG_GET_DIRTY_BITMAP (1 << 0)
   1136#define VFIO_DMA_UNMAP_FLAG_ALL		     (1 << 1)
   1137#define VFIO_DMA_UNMAP_FLAG_VADDR	     (1 << 2)
   1138	__u64	iova;				/* IO virtual address */
   1139	__u64	size;				/* Size of mapping (bytes) */
   1140	__u8    data[];
   1141};
   1142
   1143#define VFIO_IOMMU_UNMAP_DMA _IO(VFIO_TYPE, VFIO_BASE + 14)
   1144
   1145/*
   1146 * IOCTLs to enable/disable IOMMU container usage.
   1147 * No parameters are supported.
   1148 */
   1149#define VFIO_IOMMU_ENABLE	_IO(VFIO_TYPE, VFIO_BASE + 15)
   1150#define VFIO_IOMMU_DISABLE	_IO(VFIO_TYPE, VFIO_BASE + 16)
   1151
   1152/**
   1153 * VFIO_IOMMU_DIRTY_PAGES - _IOWR(VFIO_TYPE, VFIO_BASE + 17,
   1154 *                                     struct vfio_iommu_type1_dirty_bitmap)
   1155 * IOCTL is used for dirty pages logging.
   1156 * Caller should set flag depending on which operation to perform, details as
   1157 * below:
   1158 *
   1159 * Calling the IOCTL with VFIO_IOMMU_DIRTY_PAGES_FLAG_START flag set, instructs
   1160 * the IOMMU driver to log pages that are dirtied or potentially dirtied by
   1161 * the device; designed to be used when a migration is in progress. Dirty pages
   1162 * are logged until logging is disabled by user application by calling the IOCTL
   1163 * with VFIO_IOMMU_DIRTY_PAGES_FLAG_STOP flag.
   1164 *
   1165 * Calling the IOCTL with VFIO_IOMMU_DIRTY_PAGES_FLAG_STOP flag set, instructs
   1166 * the IOMMU driver to stop logging dirtied pages.
   1167 *
   1168 * Calling the IOCTL with VFIO_IOMMU_DIRTY_PAGES_FLAG_GET_BITMAP flag set
   1169 * returns the dirty pages bitmap for IOMMU container for a given IOVA range.
   1170 * The user must specify the IOVA range and the pgsize through the structure
   1171 * vfio_iommu_type1_dirty_bitmap_get in the data[] portion. This interface
   1172 * supports getting a bitmap of the smallest supported pgsize only and can be
   1173 * modified in future to get a bitmap of any specified supported pgsize. The
   1174 * user must provide a zeroed memory area for the bitmap memory and specify its
   1175 * size in bitmap.size. One bit is used to represent one page consecutively
   1176 * starting from iova offset. The user should provide page size in bitmap.pgsize
   1177 * field. A bit set in the bitmap indicates that the page at that offset from
   1178 * iova is dirty. The caller must set argsz to a value including the size of
   1179 * structure vfio_iommu_type1_dirty_bitmap_get, but excluding the size of the
   1180 * actual bitmap. If dirty pages logging is not enabled, an error will be
   1181 * returned.
   1182 *
   1183 * Only one of the flags _START, _STOP and _GET may be specified at a time.
   1184 *
   1185 */
   1186struct vfio_iommu_type1_dirty_bitmap {
   1187	__u32        argsz;
   1188	__u32        flags;
   1189#define VFIO_IOMMU_DIRTY_PAGES_FLAG_START	(1 << 0)
   1190#define VFIO_IOMMU_DIRTY_PAGES_FLAG_STOP	(1 << 1)
   1191#define VFIO_IOMMU_DIRTY_PAGES_FLAG_GET_BITMAP	(1 << 2)
   1192	__u8         data[];
   1193};
   1194
   1195struct vfio_iommu_type1_dirty_bitmap_get {
   1196	__u64              iova;	/* IO virtual address */
   1197	__u64              size;	/* Size of iova range */
   1198	struct vfio_bitmap bitmap;
   1199};
   1200
   1201#define VFIO_IOMMU_DIRTY_PAGES             _IO(VFIO_TYPE, VFIO_BASE + 17)
   1202
   1203/* -------- Additional API for SPAPR TCE (Server POWERPC) IOMMU -------- */
   1204
   1205/*
   1206 * The SPAPR TCE DDW info struct provides the information about
   1207 * the details of Dynamic DMA window capability.
   1208 *
   1209 * @pgsizes contains a page size bitmask, 4K/64K/16M are supported.
   1210 * @max_dynamic_windows_supported tells the maximum number of windows
   1211 * which the platform can create.
   1212 * @levels tells the maximum number of levels in multi-level IOMMU tables;
   1213 * this allows splitting a table into smaller chunks which reduces
   1214 * the amount of physically contiguous memory required for the table.
   1215 */
   1216struct vfio_iommu_spapr_tce_ddw_info {
   1217	__u64 pgsizes;			/* Bitmap of supported page sizes */
   1218	__u32 max_dynamic_windows_supported;
   1219	__u32 levels;
   1220};
   1221
   1222/*
   1223 * The SPAPR TCE info struct provides the information about the PCI bus
   1224 * address ranges available for DMA, these values are programmed into
   1225 * the hardware so the guest has to know that information.
   1226 *
   1227 * The DMA 32 bit window start is an absolute PCI bus address.
   1228 * The IOVA address passed via map/unmap ioctls are absolute PCI bus
   1229 * addresses too so the window works as a filter rather than an offset
   1230 * for IOVA addresses.
   1231 *
   1232 * Flags supported:
   1233 * - VFIO_IOMMU_SPAPR_INFO_DDW: informs the userspace that dynamic DMA windows
   1234 *   (DDW) support is present. @ddw is only supported when DDW is present.
   1235 */
   1236struct vfio_iommu_spapr_tce_info {
   1237	__u32 argsz;
   1238	__u32 flags;
   1239#define VFIO_IOMMU_SPAPR_INFO_DDW	(1 << 0)	/* DDW supported */
   1240	__u32 dma32_window_start;	/* 32 bit window start (bytes) */
   1241	__u32 dma32_window_size;	/* 32 bit window size (bytes) */
   1242	struct vfio_iommu_spapr_tce_ddw_info ddw;
   1243};
   1244
   1245#define VFIO_IOMMU_SPAPR_TCE_GET_INFO	_IO(VFIO_TYPE, VFIO_BASE + 12)
   1246
   1247/*
   1248 * EEH PE operation struct provides ways to:
   1249 * - enable/disable EEH functionality;
   1250 * - unfreeze IO/DMA for frozen PE;
   1251 * - read PE state;
   1252 * - reset PE;
   1253 * - configure PE;
   1254 * - inject EEH error.
   1255 */
   1256struct vfio_eeh_pe_err {
   1257	__u32 type;
   1258	__u32 func;
   1259	__u64 addr;
   1260	__u64 mask;
   1261};
   1262
   1263struct vfio_eeh_pe_op {
   1264	__u32 argsz;
   1265	__u32 flags;
   1266	__u32 op;
   1267	union {
   1268		struct vfio_eeh_pe_err err;
   1269	};
   1270};
   1271
   1272#define VFIO_EEH_PE_DISABLE		0	/* Disable EEH functionality */
   1273#define VFIO_EEH_PE_ENABLE		1	/* Enable EEH functionality  */
   1274#define VFIO_EEH_PE_UNFREEZE_IO		2	/* Enable IO for frozen PE   */
   1275#define VFIO_EEH_PE_UNFREEZE_DMA	3	/* Enable DMA for frozen PE  */
   1276#define VFIO_EEH_PE_GET_STATE		4	/* PE state retrieval        */
   1277#define  VFIO_EEH_PE_STATE_NORMAL	0	/* PE in functional state    */
   1278#define  VFIO_EEH_PE_STATE_RESET	1	/* PE reset in progress      */
   1279#define  VFIO_EEH_PE_STATE_STOPPED	2	/* Stopped DMA and IO        */
   1280#define  VFIO_EEH_PE_STATE_STOPPED_DMA	4	/* Stopped DMA only          */
   1281#define  VFIO_EEH_PE_STATE_UNAVAIL	5	/* State unavailable         */
   1282#define VFIO_EEH_PE_RESET_DEACTIVATE	5	/* Deassert PE reset         */
   1283#define VFIO_EEH_PE_RESET_HOT		6	/* Assert hot reset          */
   1284#define VFIO_EEH_PE_RESET_FUNDAMENTAL	7	/* Assert fundamental reset  */
   1285#define VFIO_EEH_PE_CONFIGURE		8	/* PE configuration          */
   1286#define VFIO_EEH_PE_INJECT_ERR		9	/* Inject EEH error          */
   1287
   1288#define VFIO_EEH_PE_OP			_IO(VFIO_TYPE, VFIO_BASE + 21)
   1289
   1290/**
   1291 * VFIO_IOMMU_SPAPR_REGISTER_MEMORY - _IOW(VFIO_TYPE, VFIO_BASE + 17, struct vfio_iommu_spapr_register_memory)
   1292 *
   1293 * Registers user space memory where DMA is allowed. It pins
   1294 * user pages and does the locked memory accounting so
   1295 * subsequent VFIO_IOMMU_MAP_DMA/VFIO_IOMMU_UNMAP_DMA calls
   1296 * get faster.
   1297 */
   1298struct vfio_iommu_spapr_register_memory {
   1299	__u32	argsz;
   1300	__u32	flags;
   1301	__u64	vaddr;				/* Process virtual address */
   1302	__u64	size;				/* Size of mapping (bytes) */
   1303};
   1304#define VFIO_IOMMU_SPAPR_REGISTER_MEMORY	_IO(VFIO_TYPE, VFIO_BASE + 17)
   1305
   1306/**
   1307 * VFIO_IOMMU_SPAPR_UNREGISTER_MEMORY - _IOW(VFIO_TYPE, VFIO_BASE + 18, struct vfio_iommu_spapr_register_memory)
   1308 *
   1309 * Unregisters user space memory registered with
   1310 * VFIO_IOMMU_SPAPR_REGISTER_MEMORY.
   1311 * Uses vfio_iommu_spapr_register_memory for parameters.
   1312 */
   1313#define VFIO_IOMMU_SPAPR_UNREGISTER_MEMORY	_IO(VFIO_TYPE, VFIO_BASE + 18)
   1314
   1315/**
   1316 * VFIO_IOMMU_SPAPR_TCE_CREATE - _IOWR(VFIO_TYPE, VFIO_BASE + 19, struct vfio_iommu_spapr_tce_create)
   1317 *
   1318 * Creates an additional TCE table and programs it (sets a new DMA window)
   1319 * to every IOMMU group in the container. It receives page shift, window
   1320 * size and number of levels in the TCE table being created.
   1321 *
   1322 * It allocates and returns an offset on a PCI bus of the new DMA window.
   1323 */
   1324struct vfio_iommu_spapr_tce_create {
   1325	__u32 argsz;
   1326	__u32 flags;
   1327	/* in */
   1328	__u32 page_shift;
   1329	__u32 __resv1;
   1330	__u64 window_size;
   1331	__u32 levels;
   1332	__u32 __resv2;
   1333	/* out */
   1334	__u64 start_addr;
   1335};
   1336#define VFIO_IOMMU_SPAPR_TCE_CREATE	_IO(VFIO_TYPE, VFIO_BASE + 19)
   1337
   1338/**
   1339 * VFIO_IOMMU_SPAPR_TCE_REMOVE - _IOW(VFIO_TYPE, VFIO_BASE + 20, struct vfio_iommu_spapr_tce_remove)
   1340 *
   1341 * Unprograms a TCE table from all groups in the container and destroys it.
   1342 * It receives a PCI bus offset as a window id.
   1343 */
   1344struct vfio_iommu_spapr_tce_remove {
   1345	__u32 argsz;
   1346	__u32 flags;
   1347	/* in */
   1348	__u64 start_addr;
   1349};
   1350#define VFIO_IOMMU_SPAPR_TCE_REMOVE	_IO(VFIO_TYPE, VFIO_BASE + 20)
   1351
   1352/* ***************************************************************** */
   1353
   1354#endif /* _UAPIVFIO_H */