cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

fun_queue.h (4037B)


      1/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
      2
      3#ifndef _FUN_QEUEUE_H
      4#define _FUN_QEUEUE_H
      5
      6#include <linux/interrupt.h>
      7#include <linux/io.h>
      8
      9struct device;
     10struct fun_dev;
     11struct fun_queue;
     12struct fun_cqe_info;
     13struct fun_rsp_common;
     14
     15typedef void (*cq_callback_t)(struct fun_queue *funq, void *data, void *msg,
     16			      const struct fun_cqe_info *info);
     17
     18struct fun_rq_info {
     19	dma_addr_t dma;
     20	struct page *page;
     21};
     22
     23/* A queue group consisting of an SQ, a CQ, and an optional RQ. */
     24struct fun_queue {
     25	struct fun_dev *fdev;
     26	spinlock_t sq_lock;
     27
     28	dma_addr_t cq_dma_addr;
     29	dma_addr_t sq_dma_addr;
     30	dma_addr_t rq_dma_addr;
     31
     32	u32 __iomem *cq_db;
     33	u32 __iomem *sq_db;
     34	u32 __iomem *rq_db;
     35
     36	void *cqes;
     37	void *sq_cmds;
     38	struct fun_eprq_rqbuf *rqes;
     39	struct fun_rq_info *rq_info;
     40
     41	u32 cqid;
     42	u32 sqid;
     43	u32 rqid;
     44
     45	u32 cq_depth;
     46	u32 sq_depth;
     47	u32 rq_depth;
     48
     49	u16 cq_head;
     50	u16 sq_tail;
     51	u16 rq_tail;
     52
     53	u8 cqe_size_log2;
     54	u8 sqe_size_log2;
     55
     56	u16 cqe_info_offset;
     57
     58	u16 rq_buf_idx;
     59	int rq_buf_offset;
     60	u16 num_rqe_to_fill;
     61
     62	u8 cq_intcoal_usec;
     63	u8 cq_intcoal_nentries;
     64	u8 sq_intcoal_usec;
     65	u8 sq_intcoal_nentries;
     66
     67	u16 cq_flags;
     68	u16 sq_flags;
     69	u16 rq_flags;
     70
     71	/* SQ head writeback */
     72	u16 sq_comp;
     73
     74	volatile __be64 *sq_head;
     75
     76	cq_callback_t cq_cb;
     77	void *cb_data;
     78
     79	irq_handler_t irq_handler;
     80	void *irq_data;
     81	s16 cq_vector;
     82	u8 cq_phase;
     83
     84	/* I/O q index */
     85	u16 qid;
     86
     87	char irqname[24];
     88};
     89
     90static inline void *fun_sqe_at(const struct fun_queue *funq, unsigned int pos)
     91{
     92	return funq->sq_cmds + (pos << funq->sqe_size_log2);
     93}
     94
     95static inline void funq_sq_post_tail(struct fun_queue *funq, u16 tail)
     96{
     97	if (++tail == funq->sq_depth)
     98		tail = 0;
     99	funq->sq_tail = tail;
    100	writel(tail, funq->sq_db);
    101}
    102
    103static inline struct fun_cqe_info *funq_cqe_info(const struct fun_queue *funq,
    104						 void *cqe)
    105{
    106	return cqe + funq->cqe_info_offset;
    107}
    108
    109static inline void funq_rq_post(struct fun_queue *funq)
    110{
    111	writel(funq->rq_tail, funq->rq_db);
    112}
    113
    114struct fun_queue_alloc_req {
    115	u8  cqe_size_log2;
    116	u8  sqe_size_log2;
    117
    118	u16 cq_flags;
    119	u16 sq_flags;
    120	u16 rq_flags;
    121
    122	u32 cq_depth;
    123	u32 sq_depth;
    124	u32 rq_depth;
    125
    126	u8 cq_intcoal_usec;
    127	u8 cq_intcoal_nentries;
    128	u8 sq_intcoal_usec;
    129	u8 sq_intcoal_nentries;
    130};
    131
    132int fun_sq_create(struct fun_dev *fdev, u16 flags, u32 sqid, u32 cqid,
    133		  u8 sqe_size_log2, u32 sq_depth, dma_addr_t dma_addr,
    134		  u8 coal_nentries, u8 coal_usec, u32 irq_num,
    135		  u32 scan_start_id, u32 scan_end_id,
    136		  u32 rq_buf_size_log2, u32 *sqidp, u32 __iomem **dbp);
    137int fun_cq_create(struct fun_dev *fdev, u16 flags, u32 cqid, u32 rqid,
    138		  u8 cqe_size_log2, u32 cq_depth, dma_addr_t dma_addr,
    139		  u16 headroom, u16 tailroom, u8 coal_nentries, u8 coal_usec,
    140		  u32 irq_num, u32 scan_start_id, u32 scan_end_id,
    141		  u32 *cqidp, u32 __iomem **dbp);
    142void *fun_alloc_ring_mem(struct device *dma_dev, size_t depth,
    143			 size_t hw_desc_sz, size_t sw_desc_size, bool wb,
    144			 int numa_node, dma_addr_t *dma_addr, void **sw_va,
    145			 volatile __be64 **wb_va);
    146void fun_free_ring_mem(struct device *dma_dev, size_t depth, size_t hw_desc_sz,
    147		       bool wb, void *hw_va, dma_addr_t dma_addr, void *sw_va);
    148
    149#define fun_destroy_sq(fdev, sqid) \
    150	fun_res_destroy((fdev), FUN_ADMIN_OP_EPSQ, 0, (sqid))
    151#define fun_destroy_cq(fdev, cqid) \
    152	fun_res_destroy((fdev), FUN_ADMIN_OP_EPCQ, 0, (cqid))
    153
    154struct fun_queue *fun_alloc_queue(struct fun_dev *fdev, int qid,
    155				  const struct fun_queue_alloc_req *req);
    156void fun_free_queue(struct fun_queue *funq);
    157
    158static inline void fun_set_cq_callback(struct fun_queue *funq, cq_callback_t cb,
    159				       void *cb_data)
    160{
    161	funq->cq_cb = cb;
    162	funq->cb_data = cb_data;
    163}
    164
    165int fun_create_rq(struct fun_queue *funq);
    166int fun_create_queue(struct fun_queue *funq);
    167
    168void fun_free_irq(struct fun_queue *funq);
    169int fun_request_irq(struct fun_queue *funq, const char *devname,
    170		    irq_handler_t handler, void *data);
    171
    172unsigned int __fun_process_cq(struct fun_queue *funq, unsigned int max);
    173unsigned int fun_process_cq(struct fun_queue *funq, unsigned int max);
    174
    175#endif /* _FUN_QEUEUE_H */