cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

vhost.h (9509B)


      1/* SPDX-License-Identifier: GPL-2.0 */
      2#ifndef _VHOST_H
      3#define _VHOST_H
      4
      5#include <linux/eventfd.h>
      6#include <linux/vhost.h>
      7#include <linux/mm.h>
      8#include <linux/mutex.h>
      9#include <linux/poll.h>
     10#include <linux/file.h>
     11#include <linux/uio.h>
     12#include <linux/virtio_config.h>
     13#include <linux/virtio_ring.h>
     14#include <linux/atomic.h>
     15#include <linux/vhost_iotlb.h>
     16#include <linux/irqbypass.h>
     17
     18struct vhost_work;
     19typedef void (*vhost_work_fn_t)(struct vhost_work *work);
     20
     21#define VHOST_WORK_QUEUED 1
     22struct vhost_work {
     23	struct llist_node	node;
     24	vhost_work_fn_t		fn;
     25	unsigned long		flags;
     26};
     27
     28/* Poll a file (eventfd or socket) */
     29/* Note: there's nothing vhost specific about this structure. */
     30struct vhost_poll {
     31	poll_table		table;
     32	wait_queue_head_t	*wqh;
     33	wait_queue_entry_t	wait;
     34	struct vhost_work	work;
     35	__poll_t		mask;
     36	struct vhost_dev	*dev;
     37};
     38
     39void vhost_work_init(struct vhost_work *work, vhost_work_fn_t fn);
     40void vhost_work_queue(struct vhost_dev *dev, struct vhost_work *work);
     41bool vhost_has_work(struct vhost_dev *dev);
     42
     43void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn,
     44		     __poll_t mask, struct vhost_dev *dev);
     45int vhost_poll_start(struct vhost_poll *poll, struct file *file);
     46void vhost_poll_stop(struct vhost_poll *poll);
     47void vhost_poll_queue(struct vhost_poll *poll);
     48void vhost_dev_flush(struct vhost_dev *dev);
     49
     50struct vhost_log {
     51	u64 addr;
     52	u64 len;
     53};
     54
     55enum vhost_uaddr_type {
     56	VHOST_ADDR_DESC = 0,
     57	VHOST_ADDR_AVAIL = 1,
     58	VHOST_ADDR_USED = 2,
     59	VHOST_NUM_ADDRS = 3,
     60};
     61
     62struct vhost_vring_call {
     63	struct eventfd_ctx *ctx;
     64	struct irq_bypass_producer producer;
     65};
     66
     67/* The virtqueue structure describes a queue attached to a device. */
     68struct vhost_virtqueue {
     69	struct vhost_dev *dev;
     70
     71	/* The actual ring of buffers. */
     72	struct mutex mutex;
     73	unsigned int num;
     74	vring_desc_t __user *desc;
     75	vring_avail_t __user *avail;
     76	vring_used_t __user *used;
     77	const struct vhost_iotlb_map *meta_iotlb[VHOST_NUM_ADDRS];
     78	struct file *kick;
     79	struct vhost_vring_call call_ctx;
     80	struct eventfd_ctx *error_ctx;
     81	struct eventfd_ctx *log_ctx;
     82
     83	struct vhost_poll poll;
     84
     85	/* The routine to call when the Guest pings us, or timeout. */
     86	vhost_work_fn_t handle_kick;
     87
     88	/* Last available index we saw. */
     89	u16 last_avail_idx;
     90
     91	/* Caches available index value from user. */
     92	u16 avail_idx;
     93
     94	/* Last index we used. */
     95	u16 last_used_idx;
     96
     97	/* Used flags */
     98	u16 used_flags;
     99
    100	/* Last used index value we have signalled on */
    101	u16 signalled_used;
    102
    103	/* Last used index value we have signalled on */
    104	bool signalled_used_valid;
    105
    106	/* Log writes to used structure. */
    107	bool log_used;
    108	u64 log_addr;
    109
    110	struct iovec iov[UIO_MAXIOV];
    111	struct iovec iotlb_iov[64];
    112	struct iovec *indirect;
    113	struct vring_used_elem *heads;
    114	/* Protected by virtqueue mutex. */
    115	struct vhost_iotlb *umem;
    116	struct vhost_iotlb *iotlb;
    117	void *private_data;
    118	u64 acked_features;
    119	u64 acked_backend_features;
    120	/* Log write descriptors */
    121	void __user *log_base;
    122	struct vhost_log *log;
    123	struct iovec log_iov[64];
    124
    125	/* Ring endianness. Defaults to legacy native endianness.
    126	 * Set to true when starting a modern virtio device. */
    127	bool is_le;
    128#ifdef CONFIG_VHOST_CROSS_ENDIAN_LEGACY
    129	/* Ring endianness requested by userspace for cross-endian support. */
    130	bool user_be;
    131#endif
    132	u32 busyloop_timeout;
    133};
    134
    135struct vhost_msg_node {
    136  union {
    137	  struct vhost_msg msg;
    138	  struct vhost_msg_v2 msg_v2;
    139  };
    140  struct vhost_virtqueue *vq;
    141  struct list_head node;
    142};
    143
    144struct vhost_dev {
    145	struct mm_struct *mm;
    146	struct mutex mutex;
    147	struct vhost_virtqueue **vqs;
    148	int nvqs;
    149	struct eventfd_ctx *log_ctx;
    150	struct llist_head work_list;
    151	struct task_struct *worker;
    152	struct vhost_iotlb *umem;
    153	struct vhost_iotlb *iotlb;
    154	spinlock_t iotlb_lock;
    155	struct list_head read_list;
    156	struct list_head pending_list;
    157	wait_queue_head_t wait;
    158	int iov_limit;
    159	int weight;
    160	int byte_weight;
    161	u64 kcov_handle;
    162	bool use_worker;
    163	int (*msg_handler)(struct vhost_dev *dev, u32 asid,
    164			   struct vhost_iotlb_msg *msg);
    165};
    166
    167bool vhost_exceeds_weight(struct vhost_virtqueue *vq, int pkts, int total_len);
    168void vhost_dev_init(struct vhost_dev *, struct vhost_virtqueue **vqs,
    169		    int nvqs, int iov_limit, int weight, int byte_weight,
    170		    bool use_worker,
    171		    int (*msg_handler)(struct vhost_dev *dev, u32 asid,
    172				       struct vhost_iotlb_msg *msg));
    173long vhost_dev_set_owner(struct vhost_dev *dev);
    174bool vhost_dev_has_owner(struct vhost_dev *dev);
    175long vhost_dev_check_owner(struct vhost_dev *);
    176struct vhost_iotlb *vhost_dev_reset_owner_prepare(void);
    177void vhost_dev_reset_owner(struct vhost_dev *dev, struct vhost_iotlb *iotlb);
    178void vhost_dev_cleanup(struct vhost_dev *);
    179void vhost_dev_stop(struct vhost_dev *);
    180long vhost_dev_ioctl(struct vhost_dev *, unsigned int ioctl, void __user *argp);
    181long vhost_vring_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp);
    182bool vhost_vq_access_ok(struct vhost_virtqueue *vq);
    183bool vhost_log_access_ok(struct vhost_dev *);
    184
    185int vhost_get_vq_desc(struct vhost_virtqueue *,
    186		      struct iovec iov[], unsigned int iov_count,
    187		      unsigned int *out_num, unsigned int *in_num,
    188		      struct vhost_log *log, unsigned int *log_num);
    189void vhost_discard_vq_desc(struct vhost_virtqueue *, int n);
    190
    191bool vhost_vq_is_setup(struct vhost_virtqueue *vq);
    192int vhost_vq_init_access(struct vhost_virtqueue *);
    193int vhost_add_used(struct vhost_virtqueue *, unsigned int head, int len);
    194int vhost_add_used_n(struct vhost_virtqueue *, struct vring_used_elem *heads,
    195		     unsigned count);
    196void vhost_add_used_and_signal(struct vhost_dev *, struct vhost_virtqueue *,
    197			       unsigned int id, int len);
    198void vhost_add_used_and_signal_n(struct vhost_dev *, struct vhost_virtqueue *,
    199			       struct vring_used_elem *heads, unsigned count);
    200void vhost_signal(struct vhost_dev *, struct vhost_virtqueue *);
    201void vhost_disable_notify(struct vhost_dev *, struct vhost_virtqueue *);
    202bool vhost_vq_avail_empty(struct vhost_dev *, struct vhost_virtqueue *);
    203bool vhost_enable_notify(struct vhost_dev *, struct vhost_virtqueue *);
    204
    205int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
    206		    unsigned int log_num, u64 len,
    207		    struct iovec *iov, int count);
    208int vq_meta_prefetch(struct vhost_virtqueue *vq);
    209
    210struct vhost_msg_node *vhost_new_msg(struct vhost_virtqueue *vq, int type);
    211void vhost_enqueue_msg(struct vhost_dev *dev,
    212		       struct list_head *head,
    213		       struct vhost_msg_node *node);
    214struct vhost_msg_node *vhost_dequeue_msg(struct vhost_dev *dev,
    215					 struct list_head *head);
    216void vhost_set_backend_features(struct vhost_dev *dev, u64 features);
    217
    218__poll_t vhost_chr_poll(struct file *file, struct vhost_dev *dev,
    219			    poll_table *wait);
    220ssize_t vhost_chr_read_iter(struct vhost_dev *dev, struct iov_iter *to,
    221			    int noblock);
    222ssize_t vhost_chr_write_iter(struct vhost_dev *dev,
    223			     struct iov_iter *from);
    224int vhost_init_device_iotlb(struct vhost_dev *d, bool enabled);
    225
    226void vhost_iotlb_map_free(struct vhost_iotlb *iotlb,
    227			  struct vhost_iotlb_map *map);
    228
    229#define vq_err(vq, fmt, ...) do {                                  \
    230		pr_debug(pr_fmt(fmt), ##__VA_ARGS__);       \
    231		if ((vq)->error_ctx)                               \
    232				eventfd_signal((vq)->error_ctx, 1);\
    233	} while (0)
    234
    235enum {
    236	VHOST_FEATURES = (1ULL << VIRTIO_F_NOTIFY_ON_EMPTY) |
    237			 (1ULL << VIRTIO_RING_F_INDIRECT_DESC) |
    238			 (1ULL << VIRTIO_RING_F_EVENT_IDX) |
    239			 (1ULL << VHOST_F_LOG_ALL) |
    240			 (1ULL << VIRTIO_F_ANY_LAYOUT) |
    241			 (1ULL << VIRTIO_F_VERSION_1)
    242};
    243
    244/**
    245 * vhost_vq_set_backend - Set backend.
    246 *
    247 * @vq            Virtqueue.
    248 * @private_data  The private data.
    249 *
    250 * Context: Need to call with vq->mutex acquired.
    251 */
    252static inline void vhost_vq_set_backend(struct vhost_virtqueue *vq,
    253					void *private_data)
    254{
    255	vq->private_data = private_data;
    256}
    257
    258/**
    259 * vhost_vq_get_backend - Get backend.
    260 *
    261 * @vq            Virtqueue.
    262 *
    263 * Context: Need to call with vq->mutex acquired.
    264 * Return: Private data previously set with vhost_vq_set_backend.
    265 */
    266static inline void *vhost_vq_get_backend(struct vhost_virtqueue *vq)
    267{
    268	return vq->private_data;
    269}
    270
    271static inline bool vhost_has_feature(struct vhost_virtqueue *vq, int bit)
    272{
    273	return vq->acked_features & (1ULL << bit);
    274}
    275
    276static inline bool vhost_backend_has_feature(struct vhost_virtqueue *vq, int bit)
    277{
    278	return vq->acked_backend_features & (1ULL << bit);
    279}
    280
    281#ifdef CONFIG_VHOST_CROSS_ENDIAN_LEGACY
    282static inline bool vhost_is_little_endian(struct vhost_virtqueue *vq)
    283{
    284	return vq->is_le;
    285}
    286#else
    287static inline bool vhost_is_little_endian(struct vhost_virtqueue *vq)
    288{
    289	return virtio_legacy_is_little_endian() || vq->is_le;
    290}
    291#endif
    292
    293/* Memory accessors */
    294static inline u16 vhost16_to_cpu(struct vhost_virtqueue *vq, __virtio16 val)
    295{
    296	return __virtio16_to_cpu(vhost_is_little_endian(vq), val);
    297}
    298
    299static inline __virtio16 cpu_to_vhost16(struct vhost_virtqueue *vq, u16 val)
    300{
    301	return __cpu_to_virtio16(vhost_is_little_endian(vq), val);
    302}
    303
    304static inline u32 vhost32_to_cpu(struct vhost_virtqueue *vq, __virtio32 val)
    305{
    306	return __virtio32_to_cpu(vhost_is_little_endian(vq), val);
    307}
    308
    309static inline __virtio32 cpu_to_vhost32(struct vhost_virtqueue *vq, u32 val)
    310{
    311	return __cpu_to_virtio32(vhost_is_little_endian(vq), val);
    312}
    313
    314static inline u64 vhost64_to_cpu(struct vhost_virtqueue *vq, __virtio64 val)
    315{
    316	return __virtio64_to_cpu(vhost_is_little_endian(vq), val);
    317}
    318
    319static inline __virtio64 cpu_to_vhost64(struct vhost_virtqueue *vq, u64 val)
    320{
    321	return __cpu_to_virtio64(vhost_is_little_endian(vq), val);
    322}
    323#endif