cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

vfio.h (8197B)


      1/* SPDX-License-Identifier: GPL-2.0-only */
      2/*
      3 * VFIO API definition
      4 *
      5 * Copyright (C) 2012 Red Hat, Inc.  All rights reserved.
      6 *     Author: Alex Williamson <alex.williamson@redhat.com>
      7 */
      8#ifndef VFIO_H
      9#define VFIO_H
     10
     11
     12#include <linux/iommu.h>
     13#include <linux/mm.h>
     14#include <linux/workqueue.h>
     15#include <linux/poll.h>
     16#include <uapi/linux/vfio.h>
     17
     18struct kvm;
     19
     20/*
     21 * VFIO devices can be placed in a set, this allows all devices to share this
     22 * structure and the VFIO core will provide a lock that is held around
     23 * open_device()/close_device() for all devices in the set.
     24 */
     25struct vfio_device_set {
     26	void *set_id;
     27	struct mutex lock;
     28	struct list_head device_list;
     29	unsigned int device_count;
     30};
     31
     32struct vfio_device {
     33	struct device *dev;
     34	const struct vfio_device_ops *ops;
     35	struct vfio_group *group;
     36	struct vfio_device_set *dev_set;
     37	struct list_head dev_set_list;
     38	unsigned int migration_flags;
     39	/* Driver must reference the kvm during open_device or never touch it */
     40	struct kvm *kvm;
     41
     42	/* Members below here are private, not for driver use */
     43	refcount_t refcount;
     44	unsigned int open_count;
     45	struct completion comp;
     46	struct list_head group_next;
     47};
     48
     49/**
     50 * struct vfio_device_ops - VFIO bus driver device callbacks
     51 *
     52 * @open_device: Called when the first file descriptor is opened for this device
     53 * @close_device: Opposite of open_device
     54 * @read: Perform read(2) on device file descriptor
     55 * @write: Perform write(2) on device file descriptor
     56 * @ioctl: Perform ioctl(2) on device file descriptor, supporting VFIO_DEVICE_*
     57 *         operations documented below
     58 * @mmap: Perform mmap(2) on a region of the device file descriptor
     59 * @request: Request for the bus driver to release the device
     60 * @match: Optional device name match callback (return: 0 for no-match, >0 for
     61 *         match, -errno for abort (ex. match with insufficient or incorrect
     62 *         additional args)
     63 * @device_feature: Optional, fill in the VFIO_DEVICE_FEATURE ioctl
     64 * @migration_set_state: Optional callback to change the migration state for
     65 *         devices that support migration. It's mandatory for
     66 *         VFIO_DEVICE_FEATURE_MIGRATION migration support.
     67 *         The returned FD is used for data transfer according to the FSM
     68 *         definition. The driver is responsible to ensure that FD reaches end
     69 *         of stream or error whenever the migration FSM leaves a data transfer
     70 *         state or before close_device() returns.
     71 * @migration_get_state: Optional callback to get the migration state for
     72 *         devices that support migration. It's mandatory for
     73 *         VFIO_DEVICE_FEATURE_MIGRATION migration support.
     74 */
     75struct vfio_device_ops {
     76	char	*name;
     77	int	(*open_device)(struct vfio_device *vdev);
     78	void	(*close_device)(struct vfio_device *vdev);
     79	ssize_t	(*read)(struct vfio_device *vdev, char __user *buf,
     80			size_t count, loff_t *ppos);
     81	ssize_t	(*write)(struct vfio_device *vdev, const char __user *buf,
     82			 size_t count, loff_t *size);
     83	long	(*ioctl)(struct vfio_device *vdev, unsigned int cmd,
     84			 unsigned long arg);
     85	int	(*mmap)(struct vfio_device *vdev, struct vm_area_struct *vma);
     86	void	(*request)(struct vfio_device *vdev, unsigned int count);
     87	int	(*match)(struct vfio_device *vdev, char *buf);
     88	int	(*device_feature)(struct vfio_device *device, u32 flags,
     89				  void __user *arg, size_t argsz);
     90	struct file *(*migration_set_state)(
     91		struct vfio_device *device,
     92		enum vfio_device_mig_state new_state);
     93	int (*migration_get_state)(struct vfio_device *device,
     94				   enum vfio_device_mig_state *curr_state);
     95};
     96
     97/**
     98 * vfio_check_feature - Validate user input for the VFIO_DEVICE_FEATURE ioctl
     99 * @flags: Arg from the device_feature op
    100 * @argsz: Arg from the device_feature op
    101 * @supported_ops: Combination of VFIO_DEVICE_FEATURE_GET and SET the driver
    102 *                 supports
    103 * @minsz: Minimum data size the driver accepts
    104 *
    105 * For use in a driver's device_feature op. Checks that the inputs to the
    106 * VFIO_DEVICE_FEATURE ioctl are correct for the driver's feature. Returns 1 if
    107 * the driver should execute the get or set, otherwise the relevant
    108 * value should be returned.
    109 */
    110static inline int vfio_check_feature(u32 flags, size_t argsz, u32 supported_ops,
    111				    size_t minsz)
    112{
    113	if ((flags & (VFIO_DEVICE_FEATURE_GET | VFIO_DEVICE_FEATURE_SET)) &
    114	    ~supported_ops)
    115		return -EINVAL;
    116	if (flags & VFIO_DEVICE_FEATURE_PROBE)
    117		return 0;
    118	/* Without PROBE one of GET or SET must be requested */
    119	if (!(flags & (VFIO_DEVICE_FEATURE_GET | VFIO_DEVICE_FEATURE_SET)))
    120		return -EINVAL;
    121	if (argsz < minsz)
    122		return -EINVAL;
    123	return 1;
    124}
    125
    126void vfio_init_group_dev(struct vfio_device *device, struct device *dev,
    127			 const struct vfio_device_ops *ops);
    128void vfio_uninit_group_dev(struct vfio_device *device);
    129int vfio_register_group_dev(struct vfio_device *device);
    130int vfio_register_emulated_iommu_dev(struct vfio_device *device);
    131void vfio_unregister_group_dev(struct vfio_device *device);
    132
    133int vfio_assign_device_set(struct vfio_device *device, void *set_id);
    134
    135int vfio_mig_get_next_state(struct vfio_device *device,
    136			    enum vfio_device_mig_state cur_fsm,
    137			    enum vfio_device_mig_state new_fsm,
    138			    enum vfio_device_mig_state *next_fsm);
    139
    140/*
    141 * External user API
    142 */
    143extern struct iommu_group *vfio_file_iommu_group(struct file *file);
    144extern bool vfio_file_enforced_coherent(struct file *file);
    145extern void vfio_file_set_kvm(struct file *file, struct kvm *kvm);
    146extern bool vfio_file_has_dev(struct file *file, struct vfio_device *device);
    147
    148#define VFIO_PIN_PAGES_MAX_ENTRIES	(PAGE_SIZE/sizeof(unsigned long))
    149
    150extern int vfio_pin_pages(struct vfio_device *device, unsigned long *user_pfn,
    151			  int npage, int prot, unsigned long *phys_pfn);
    152extern int vfio_unpin_pages(struct vfio_device *device, unsigned long *user_pfn,
    153			    int npage);
    154extern int vfio_dma_rw(struct vfio_device *device, dma_addr_t user_iova,
    155		       void *data, size_t len, bool write);
    156
    157/* each type has independent events */
    158enum vfio_notify_type {
    159	VFIO_IOMMU_NOTIFY = 0,
    160};
    161
    162/* events for VFIO_IOMMU_NOTIFY */
    163#define VFIO_IOMMU_NOTIFY_DMA_UNMAP	BIT(0)
    164
    165extern int vfio_register_notifier(struct vfio_device *device,
    166				  enum vfio_notify_type type,
    167				  unsigned long *required_events,
    168				  struct notifier_block *nb);
    169extern int vfio_unregister_notifier(struct vfio_device *device,
    170				    enum vfio_notify_type type,
    171				    struct notifier_block *nb);
    172
    173
    174/*
    175 * Sub-module helpers
    176 */
    177struct vfio_info_cap {
    178	struct vfio_info_cap_header *buf;
    179	size_t size;
    180};
    181extern struct vfio_info_cap_header *vfio_info_cap_add(
    182		struct vfio_info_cap *caps, size_t size, u16 id, u16 version);
    183extern void vfio_info_cap_shift(struct vfio_info_cap *caps, size_t offset);
    184
    185extern int vfio_info_add_capability(struct vfio_info_cap *caps,
    186				    struct vfio_info_cap_header *cap,
    187				    size_t size);
    188
    189extern int vfio_set_irqs_validate_and_prepare(struct vfio_irq_set *hdr,
    190					      int num_irqs, int max_irq_type,
    191					      size_t *data_size);
    192
    193struct pci_dev;
    194#if IS_ENABLED(CONFIG_VFIO_SPAPR_EEH)
    195extern void vfio_spapr_pci_eeh_open(struct pci_dev *pdev);
    196extern void vfio_spapr_pci_eeh_release(struct pci_dev *pdev);
    197extern long vfio_spapr_iommu_eeh_ioctl(struct iommu_group *group,
    198				       unsigned int cmd,
    199				       unsigned long arg);
    200#else
    201static inline void vfio_spapr_pci_eeh_open(struct pci_dev *pdev)
    202{
    203}
    204
    205static inline void vfio_spapr_pci_eeh_release(struct pci_dev *pdev)
    206{
    207}
    208
    209static inline long vfio_spapr_iommu_eeh_ioctl(struct iommu_group *group,
    210					      unsigned int cmd,
    211					      unsigned long arg)
    212{
    213	return -ENOTTY;
    214}
    215#endif /* CONFIG_VFIO_SPAPR_EEH */
    216
    217/*
    218 * IRQfd - generic
    219 */
    220struct virqfd {
    221	void			*opaque;
    222	struct eventfd_ctx	*eventfd;
    223	int			(*handler)(void *, void *);
    224	void			(*thread)(void *, void *);
    225	void			*data;
    226	struct work_struct	inject;
    227	wait_queue_entry_t		wait;
    228	poll_table		pt;
    229	struct work_struct	shutdown;
    230	struct virqfd		**pvirqfd;
    231};
    232
    233extern int vfio_virqfd_enable(void *opaque,
    234			      int (*handler)(void *, void *),
    235			      void (*thread)(void *, void *),
    236			      void *data, struct virqfd **pvirqfd, int fd);
    237extern void vfio_virqfd_disable(struct virqfd **pvirqfd);
    238
    239#endif /* VFIO_H */