cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

virtio_ring.h (3020B)


      1/* SPDX-License-Identifier: GPL-2.0 */
      2#ifndef _LINUX_VIRTIO_RING_H
      3#define _LINUX_VIRTIO_RING_H
      4
      5#include <asm/barrier.h>
      6#include <linux/irqreturn.h>
      7#include <uapi/linux/virtio_ring.h>
      8
      9/*
     10 * Barriers in virtio are tricky.  Non-SMP virtio guests can't assume
     11 * they're not on an SMP host system, so they need to assume real
     12 * barriers.  Non-SMP virtio hosts could skip the barriers, but does
     13 * anyone care?
     14 *
     15 * For virtio_pci on SMP, we don't need to order with respect to MMIO
     16 * accesses through relaxed memory I/O windows, so virt_mb() et al are
     17 * sufficient.
     18 *
     19 * For using virtio to talk to real devices (eg. other heterogeneous
     20 * CPUs) we do need real barriers.  In theory, we could be using both
     21 * kinds of virtio, so it's a runtime decision, and the branch is
     22 * actually quite cheap.
     23 */
     24
     25static inline void virtio_mb(bool weak_barriers)
     26{
     27	if (weak_barriers)
     28		virt_mb();
     29	else
     30		mb();
     31}
     32
     33static inline void virtio_rmb(bool weak_barriers)
     34{
     35	if (weak_barriers)
     36		virt_rmb();
     37	else
     38		dma_rmb();
     39}
     40
     41static inline void virtio_wmb(bool weak_barriers)
     42{
     43	if (weak_barriers)
     44		virt_wmb();
     45	else
     46		dma_wmb();
     47}
     48
     49#define virtio_store_mb(weak_barriers, p, v) \
     50do { \
     51	if (weak_barriers) { \
     52		virt_store_mb(*p, v); \
     53	} else { \
     54		WRITE_ONCE(*p, v); \
     55		mb(); \
     56	} \
     57} while (0) \
     58
     59struct virtio_device;
     60struct virtqueue;
     61
     62/*
     63 * Creates a virtqueue and allocates the descriptor ring.  If
     64 * may_reduce_num is set, then this may allocate a smaller ring than
     65 * expected.  The caller should query virtqueue_get_vring_size to learn
     66 * the actual size of the ring.
     67 */
     68struct virtqueue *vring_create_virtqueue(unsigned int index,
     69					 unsigned int num,
     70					 unsigned int vring_align,
     71					 struct virtio_device *vdev,
     72					 bool weak_barriers,
     73					 bool may_reduce_num,
     74					 bool ctx,
     75					 bool (*notify)(struct virtqueue *vq),
     76					 void (*callback)(struct virtqueue *vq),
     77					 const char *name);
     78
     79/* Creates a virtqueue with a custom layout. */
     80struct virtqueue *__vring_new_virtqueue(unsigned int index,
     81					struct vring vring,
     82					struct virtio_device *vdev,
     83					bool weak_barriers,
     84					bool ctx,
     85					bool (*notify)(struct virtqueue *),
     86					void (*callback)(struct virtqueue *),
     87					const char *name);
     88
     89/*
     90 * Creates a virtqueue with a standard layout but a caller-allocated
     91 * ring.
     92 */
     93struct virtqueue *vring_new_virtqueue(unsigned int index,
     94				      unsigned int num,
     95				      unsigned int vring_align,
     96				      struct virtio_device *vdev,
     97				      bool weak_barriers,
     98				      bool ctx,
     99				      void *pages,
    100				      bool (*notify)(struct virtqueue *vq),
    101				      void (*callback)(struct virtqueue *vq),
    102				      const char *name);
    103
    104/*
    105 * Destroys a virtqueue.  If created with vring_create_virtqueue, this
    106 * also frees the ring.
    107 */
    108void vring_del_virtqueue(struct virtqueue *vq);
    109
    110/* Filter out transport-specific feature bits. */
    111void vring_transport_features(struct virtio_device *vdev);
    112
    113irqreturn_t vring_interrupt(int irq, void *_vq);
    114#endif /* _LINUX_VIRTIO_RING_H */