cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

vringh.h (8830B)


      1/* SPDX-License-Identifier: GPL-2.0-or-later */
      2/*
      3 * Linux host-side vring helpers; for when the kernel needs to access
      4 * someone else's vring.
      5 *
      6 * Copyright IBM Corporation, 2013.
      7 * Parts taken from drivers/vhost/vhost.c Copyright 2009 Red Hat, Inc.
      8 *
      9 * Written by: Rusty Russell <rusty@rustcorp.com.au>
     10 */
     11#ifndef _LINUX_VRINGH_H
     12#define _LINUX_VRINGH_H
     13#include <uapi/linux/virtio_ring.h>
     14#include <linux/virtio_byteorder.h>
     15#include <linux/uio.h>
     16#include <linux/slab.h>
     17#include <linux/spinlock.h>
     18#if IS_REACHABLE(CONFIG_VHOST_IOTLB)
     19#include <linux/dma-direction.h>
     20#include <linux/vhost_iotlb.h>
     21#endif
     22#include <asm/barrier.h>
     23
     24/* virtio_ring with information needed for host access. */
     25struct vringh {
     26	/* Everything is little endian */
     27	bool little_endian;
     28
     29	/* Guest publishes used event idx (note: we always do). */
     30	bool event_indices;
     31
     32	/* Can we get away with weak barriers? */
     33	bool weak_barriers;
     34
     35	/* Last available index we saw (ie. where we're up to). */
     36	u16 last_avail_idx;
     37
     38	/* Last index we used. */
     39	u16 last_used_idx;
     40
     41	/* How many descriptors we've completed since last need_notify(). */
     42	u32 completed;
     43
     44	/* The vring (note: it may contain user pointers!) */
     45	struct vring vring;
     46
     47	/* IOTLB for this vring */
     48	struct vhost_iotlb *iotlb;
     49
     50	/* spinlock to synchronize IOTLB accesses */
     51	spinlock_t *iotlb_lock;
     52
     53	/* The function to call to notify the guest about added buffers */
     54	void (*notify)(struct vringh *);
     55};
     56
     57/**
     58 * struct vringh_config_ops - ops for creating a host vring from a virtio driver
     59 * @find_vrhs: find the host vrings and instantiate them
     60 *	vdev: the virtio_device
     61 *	nhvrs: the number of host vrings to find
     62 *	hvrs: on success, includes new host vrings
     63 *	callbacks: array of driver callbacks, for each host vring
     64 *		include a NULL entry for vqs that do not need a callback
     65 *	Returns 0 on success or error status
     66 * @del_vrhs: free the host vrings found by find_vrhs().
     67 */
     68struct virtio_device;
     69typedef void vrh_callback_t(struct virtio_device *, struct vringh *);
     70struct vringh_config_ops {
     71	int (*find_vrhs)(struct virtio_device *vdev, unsigned nhvrs,
     72			 struct vringh *vrhs[], vrh_callback_t *callbacks[]);
     73	void (*del_vrhs)(struct virtio_device *vdev);
     74};
     75
     76/* The memory the vring can access, and what offset to apply. */
     77struct vringh_range {
     78	u64 start, end_incl;
     79	u64 offset;
     80};
     81
     82/**
     83 * struct vringh_iov - iovec mangler.
     84 *
     85 * Mangles iovec in place, and restores it.
     86 * Remaining data is iov + i, of used - i elements.
     87 */
     88struct vringh_iov {
     89	struct iovec *iov;
     90	size_t consumed; /* Within iov[i] */
     91	unsigned i, used, max_num;
     92};
     93
     94/**
     95 * struct vringh_iov - kvec mangler.
     96 *
     97 * Mangles kvec in place, and restores it.
     98 * Remaining data is iov + i, of used - i elements.
     99 */
    100struct vringh_kiov {
    101	struct kvec *iov;
    102	size_t consumed; /* Within iov[i] */
    103	unsigned i, used, max_num;
    104};
    105
    106/* Flag on max_num to indicate we're kmalloced. */
    107#define VRINGH_IOV_ALLOCATED 0x8000000
    108
    109/* Helpers for userspace vrings. */
    110int vringh_init_user(struct vringh *vrh, u64 features,
    111		     unsigned int num, bool weak_barriers,
    112		     vring_desc_t __user *desc,
    113		     vring_avail_t __user *avail,
    114		     vring_used_t __user *used);
    115
    116static inline void vringh_iov_init(struct vringh_iov *iov,
    117				   struct iovec *iovec, unsigned num)
    118{
    119	iov->used = iov->i = 0;
    120	iov->consumed = 0;
    121	iov->max_num = num;
    122	iov->iov = iovec;
    123}
    124
    125static inline void vringh_iov_reset(struct vringh_iov *iov)
    126{
    127	iov->iov[iov->i].iov_len += iov->consumed;
    128	iov->iov[iov->i].iov_base -= iov->consumed;
    129	iov->consumed = 0;
    130	iov->i = 0;
    131}
    132
    133static inline void vringh_iov_cleanup(struct vringh_iov *iov)
    134{
    135	if (iov->max_num & VRINGH_IOV_ALLOCATED)
    136		kfree(iov->iov);
    137	iov->max_num = iov->used = iov->i = iov->consumed = 0;
    138	iov->iov = NULL;
    139}
    140
    141/* Convert a descriptor into iovecs. */
    142int vringh_getdesc_user(struct vringh *vrh,
    143			struct vringh_iov *riov,
    144			struct vringh_iov *wiov,
    145			bool (*getrange)(struct vringh *vrh,
    146					 u64 addr, struct vringh_range *r),
    147			u16 *head);
    148
    149/* Copy bytes from readable vsg, consuming it (and incrementing wiov->i). */
    150ssize_t vringh_iov_pull_user(struct vringh_iov *riov, void *dst, size_t len);
    151
    152/* Copy bytes into writable vsg, consuming it (and incrementing wiov->i). */
    153ssize_t vringh_iov_push_user(struct vringh_iov *wiov,
    154			     const void *src, size_t len);
    155
    156/* Mark a descriptor as used. */
    157int vringh_complete_user(struct vringh *vrh, u16 head, u32 len);
    158int vringh_complete_multi_user(struct vringh *vrh,
    159			       const struct vring_used_elem used[],
    160			       unsigned num_used);
    161
    162/* Pretend we've never seen descriptor (for easy error handling). */
    163void vringh_abandon_user(struct vringh *vrh, unsigned int num);
    164
    165/* Do we need to fire the eventfd to notify the other side? */
    166int vringh_need_notify_user(struct vringh *vrh);
    167
    168bool vringh_notify_enable_user(struct vringh *vrh);
    169void vringh_notify_disable_user(struct vringh *vrh);
    170
    171/* Helpers for kernelspace vrings. */
    172int vringh_init_kern(struct vringh *vrh, u64 features,
    173		     unsigned int num, bool weak_barriers,
    174		     struct vring_desc *desc,
    175		     struct vring_avail *avail,
    176		     struct vring_used *used);
    177
    178static inline void vringh_kiov_init(struct vringh_kiov *kiov,
    179				    struct kvec *kvec, unsigned num)
    180{
    181	kiov->used = kiov->i = 0;
    182	kiov->consumed = 0;
    183	kiov->max_num = num;
    184	kiov->iov = kvec;
    185}
    186
    187static inline void vringh_kiov_reset(struct vringh_kiov *kiov)
    188{
    189	kiov->iov[kiov->i].iov_len += kiov->consumed;
    190	kiov->iov[kiov->i].iov_base -= kiov->consumed;
    191	kiov->consumed = 0;
    192	kiov->i = 0;
    193}
    194
    195static inline void vringh_kiov_cleanup(struct vringh_kiov *kiov)
    196{
    197	if (kiov->max_num & VRINGH_IOV_ALLOCATED)
    198		kfree(kiov->iov);
    199	kiov->max_num = kiov->used = kiov->i = kiov->consumed = 0;
    200	kiov->iov = NULL;
    201}
    202
    203static inline size_t vringh_kiov_length(struct vringh_kiov *kiov)
    204{
    205	size_t len = 0;
    206	int i;
    207
    208	for (i = kiov->i; i < kiov->used; i++)
    209		len += kiov->iov[i].iov_len;
    210
    211	return len;
    212}
    213
    214void vringh_kiov_advance(struct vringh_kiov *kiov, size_t len);
    215
    216int vringh_getdesc_kern(struct vringh *vrh,
    217			struct vringh_kiov *riov,
    218			struct vringh_kiov *wiov,
    219			u16 *head,
    220			gfp_t gfp);
    221
    222ssize_t vringh_iov_pull_kern(struct vringh_kiov *riov, void *dst, size_t len);
    223ssize_t vringh_iov_push_kern(struct vringh_kiov *wiov,
    224			     const void *src, size_t len);
    225void vringh_abandon_kern(struct vringh *vrh, unsigned int num);
    226int vringh_complete_kern(struct vringh *vrh, u16 head, u32 len);
    227
    228bool vringh_notify_enable_kern(struct vringh *vrh);
    229void vringh_notify_disable_kern(struct vringh *vrh);
    230
    231int vringh_need_notify_kern(struct vringh *vrh);
    232
    233/* Notify the guest about buffers added to the used ring */
    234static inline void vringh_notify(struct vringh *vrh)
    235{
    236	if (vrh->notify)
    237		vrh->notify(vrh);
    238}
    239
    240static inline bool vringh_is_little_endian(const struct vringh *vrh)
    241{
    242	return vrh->little_endian ||
    243		virtio_legacy_is_little_endian();
    244}
    245
    246static inline u16 vringh16_to_cpu(const struct vringh *vrh, __virtio16 val)
    247{
    248	return __virtio16_to_cpu(vringh_is_little_endian(vrh), val);
    249}
    250
    251static inline __virtio16 cpu_to_vringh16(const struct vringh *vrh, u16 val)
    252{
    253	return __cpu_to_virtio16(vringh_is_little_endian(vrh), val);
    254}
    255
    256static inline u32 vringh32_to_cpu(const struct vringh *vrh, __virtio32 val)
    257{
    258	return __virtio32_to_cpu(vringh_is_little_endian(vrh), val);
    259}
    260
    261static inline __virtio32 cpu_to_vringh32(const struct vringh *vrh, u32 val)
    262{
    263	return __cpu_to_virtio32(vringh_is_little_endian(vrh), val);
    264}
    265
    266static inline u64 vringh64_to_cpu(const struct vringh *vrh, __virtio64 val)
    267{
    268	return __virtio64_to_cpu(vringh_is_little_endian(vrh), val);
    269}
    270
    271static inline __virtio64 cpu_to_vringh64(const struct vringh *vrh, u64 val)
    272{
    273	return __cpu_to_virtio64(vringh_is_little_endian(vrh), val);
    274}
    275
    276#if IS_REACHABLE(CONFIG_VHOST_IOTLB)
    277
    278void vringh_set_iotlb(struct vringh *vrh, struct vhost_iotlb *iotlb,
    279		      spinlock_t *iotlb_lock);
    280
    281int vringh_init_iotlb(struct vringh *vrh, u64 features,
    282		      unsigned int num, bool weak_barriers,
    283		      struct vring_desc *desc,
    284		      struct vring_avail *avail,
    285		      struct vring_used *used);
    286
    287int vringh_getdesc_iotlb(struct vringh *vrh,
    288			 struct vringh_kiov *riov,
    289			 struct vringh_kiov *wiov,
    290			 u16 *head,
    291			 gfp_t gfp);
    292
    293ssize_t vringh_iov_pull_iotlb(struct vringh *vrh,
    294			      struct vringh_kiov *riov,
    295			      void *dst, size_t len);
    296ssize_t vringh_iov_push_iotlb(struct vringh *vrh,
    297			      struct vringh_kiov *wiov,
    298			      const void *src, size_t len);
    299
    300void vringh_abandon_iotlb(struct vringh *vrh, unsigned int num);
    301
    302int vringh_complete_iotlb(struct vringh *vrh, u16 head, u32 len);
    303
    304bool vringh_notify_enable_iotlb(struct vringh *vrh);
    305void vringh_notify_disable_iotlb(struct vringh *vrh);
    306
    307int vringh_need_notify_iotlb(struct vringh *vrh);
    308
    309#endif /* CONFIG_VHOST_IOTLB */
    310
    311#endif /* _LINUX_VRINGH_H */