cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

hw_channel.h (3692B)


      1/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
      2/* Copyright (c) 2021, Microsoft Corporation. */
      3
      4#ifndef _HW_CHANNEL_H
      5#define _HW_CHANNEL_H
      6
      7#define DEFAULT_LOG2_THROTTLING_FOR_ERROR_EQ  4
      8
      9#define HW_CHANNEL_MAX_REQUEST_SIZE  0x1000
     10#define HW_CHANNEL_MAX_RESPONSE_SIZE 0x1000
     11
     12#define HW_CHANNEL_VF_BOOTSTRAP_QUEUE_DEPTH 1
     13
     14#define HWC_INIT_DATA_CQID		1
     15#define HWC_INIT_DATA_RQID		2
     16#define HWC_INIT_DATA_SQID		3
     17#define HWC_INIT_DATA_QUEUE_DEPTH	4
     18#define HWC_INIT_DATA_MAX_REQUEST	5
     19#define HWC_INIT_DATA_MAX_RESPONSE	6
     20#define HWC_INIT_DATA_MAX_NUM_CQS	7
     21#define HWC_INIT_DATA_PDID		8
     22#define HWC_INIT_DATA_GPA_MKEY		9
     23
     24/* Structures labeled with "HW DATA" are exchanged with the hardware. All of
     25 * them are naturally aligned and hence don't need __packed.
     26 */
     27
     28union hwc_init_eq_id_db {
     29	u32 as_uint32;
     30
     31	struct {
     32		u32 eq_id	: 16;
     33		u32 doorbell	: 16;
     34	};
     35}; /* HW DATA */
     36
     37union hwc_init_type_data {
     38	u32 as_uint32;
     39
     40	struct {
     41		u32 value	: 24;
     42		u32 type	:  8;
     43	};
     44}; /* HW DATA */
     45
     46struct hwc_rx_oob {
     47	u32 type	: 6;
     48	u32 eom		: 1;
     49	u32 som		: 1;
     50	u32 vendor_err	: 8;
     51	u32 reserved1	: 16;
     52
     53	u32 src_virt_wq	: 24;
     54	u32 src_vfid	: 8;
     55
     56	u32 reserved2;
     57
     58	union {
     59		u32 wqe_addr_low;
     60		u32 wqe_offset;
     61	};
     62
     63	u32 wqe_addr_high;
     64
     65	u32 client_data_unit	: 14;
     66	u32 reserved3		: 18;
     67
     68	u32 tx_oob_data_size;
     69
     70	u32 chunk_offset	: 21;
     71	u32 reserved4		: 11;
     72}; /* HW DATA */
     73
     74struct hwc_tx_oob {
     75	u32 reserved1;
     76
     77	u32 reserved2;
     78
     79	u32 vrq_id	: 24;
     80	u32 dest_vfid	: 8;
     81
     82	u32 vrcq_id	: 24;
     83	u32 reserved3	: 8;
     84
     85	u32 vscq_id	: 24;
     86	u32 loopback	: 1;
     87	u32 lso_override: 1;
     88	u32 dest_pf	: 1;
     89	u32 reserved4	: 5;
     90
     91	u32 vsq_id	: 24;
     92	u32 reserved5	: 8;
     93}; /* HW DATA */
     94
     95struct hwc_work_request {
     96	void *buf_va;
     97	void *buf_sge_addr;
     98	u32 buf_len;
     99	u32 msg_size;
    100
    101	struct gdma_wqe_request wqe_req;
    102	struct hwc_tx_oob tx_oob;
    103
    104	struct gdma_sge sge;
    105};
    106
    107/* hwc_dma_buf represents the array of in-flight WQEs.
    108 * mem_info as know as the GDMA mapped memory is partitioned and used by
    109 * in-flight WQEs.
    110 * The number of WQEs is determined by the number of in-flight messages.
    111 */
    112struct hwc_dma_buf {
    113	struct gdma_mem_info mem_info;
    114
    115	u32 gpa_mkey;
    116
    117	u32 num_reqs;
    118	struct hwc_work_request reqs[];
    119};
    120
    121typedef void hwc_rx_event_handler_t(void *ctx, u32 gdma_rxq_id,
    122				    const struct hwc_rx_oob *rx_oob);
    123
    124typedef void hwc_tx_event_handler_t(void *ctx, u32 gdma_txq_id,
    125				    const struct hwc_rx_oob *rx_oob);
    126
    127struct hwc_cq {
    128	struct hw_channel_context *hwc;
    129
    130	struct gdma_queue *gdma_cq;
    131	struct gdma_queue *gdma_eq;
    132	struct gdma_comp *comp_buf;
    133	u16 queue_depth;
    134
    135	hwc_rx_event_handler_t *rx_event_handler;
    136	void *rx_event_ctx;
    137
    138	hwc_tx_event_handler_t *tx_event_handler;
    139	void *tx_event_ctx;
    140};
    141
    142struct hwc_wq {
    143	struct hw_channel_context *hwc;
    144
    145	struct gdma_queue *gdma_wq;
    146	struct hwc_dma_buf *msg_buf;
    147	u16 queue_depth;
    148
    149	struct hwc_cq *hwc_cq;
    150};
    151
    152struct hwc_caller_ctx {
    153	struct completion comp_event;
    154	void *output_buf;
    155	u32 output_buflen;
    156
    157	u32 error; /* Linux error code */
    158	u32 status_code;
    159};
    160
    161struct hw_channel_context {
    162	struct gdma_dev *gdma_dev;
    163	struct device *dev;
    164
    165	u16 num_inflight_msg;
    166	u32 max_req_msg_size;
    167
    168	u16 hwc_init_q_depth_max;
    169	u32 hwc_init_max_req_msg_size;
    170	u32 hwc_init_max_resp_msg_size;
    171
    172	struct completion hwc_init_eqe_comp;
    173
    174	struct hwc_wq *rxq;
    175	struct hwc_wq *txq;
    176	struct hwc_cq *cq;
    177
    178	struct semaphore sema;
    179	struct gdma_resource inflight_msg_res;
    180
    181	struct hwc_caller_ctx *caller_ctx;
    182};
    183
    184int mana_hwc_create_channel(struct gdma_context *gc);
    185void mana_hwc_destroy_channel(struct gdma_context *gc);
    186
    187int mana_hwc_send_request(struct hw_channel_context *hwc, u32 req_len,
    188			  const void *req, u32 resp_len, void *resp);
    189
    190#endif /* _HW_CHANNEL_H */