cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

qed_sp.h (12239B)


      1/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
      2/* QLogic qed NIC Driver
      3 * Copyright (c) 2015-2017  QLogic Corporation
      4 * Copyright (c) 2019-2020 Marvell International Ltd.
      5 */
      6
      7#ifndef _QED_SP_H
      8#define _QED_SP_H
      9
     10#include <linux/types.h>
     11#include <linux/kernel.h>
     12#include <linux/list.h>
     13#include <linux/slab.h>
     14#include <linux/spinlock.h>
     15#include <linux/qed/qed_chain.h>
     16#include "qed.h"
     17#include "qed_hsi.h"
     18
     19enum spq_mode {
     20	QED_SPQ_MODE_BLOCK,     /* Client will poll a designated mem. address */
     21	QED_SPQ_MODE_CB,        /* Client supplies a callback */
     22	QED_SPQ_MODE_EBLOCK,    /* QED should block until completion */
     23};
     24
     25struct qed_spq_comp_cb {
     26	void	(*function)(struct qed_hwfn *p_hwfn,
     27			    void *cookie,
     28			    union event_ring_data *data,
     29			    u8 fw_return_code);
     30	void	*cookie;
     31};
     32
     33/**
     34 * qed_eth_cqe_completion(): handles the completion of a
     35 *                           ramrod on the cqe ring.
     36 *
     37 * @p_hwfn: HW device data.
     38 * @cqe: CQE.
     39 *
     40 * Return: Int.
     41 */
     42int qed_eth_cqe_completion(struct qed_hwfn *p_hwfn,
     43			   struct eth_slow_path_rx_cqe *cqe);
     44
     45 /*  QED Slow-hwfn queue interface */
     46union ramrod_data {
     47	struct pf_start_ramrod_data pf_start;
     48	struct pf_update_ramrod_data pf_update;
     49	struct rx_queue_start_ramrod_data rx_queue_start;
     50	struct rx_queue_update_ramrod_data rx_queue_update;
     51	struct rx_queue_stop_ramrod_data rx_queue_stop;
     52	struct tx_queue_start_ramrod_data tx_queue_start;
     53	struct tx_queue_stop_ramrod_data tx_queue_stop;
     54	struct vport_start_ramrod_data vport_start;
     55	struct vport_stop_ramrod_data vport_stop;
     56	struct rx_update_gft_filter_ramrod_data rx_update_gft;
     57	struct vport_update_ramrod_data vport_update;
     58	struct core_rx_start_ramrod_data core_rx_queue_start;
     59	struct core_rx_stop_ramrod_data core_rx_queue_stop;
     60	struct core_tx_start_ramrod_data core_tx_queue_start;
     61	struct core_tx_stop_ramrod_data core_tx_queue_stop;
     62	struct vport_filter_update_ramrod_data vport_filter_update;
     63
     64	struct rdma_init_func_ramrod_data rdma_init_func;
     65	struct rdma_close_func_ramrod_data rdma_close_func;
     66	struct rdma_register_tid_ramrod_data rdma_register_tid;
     67	struct rdma_deregister_tid_ramrod_data rdma_deregister_tid;
     68	struct roce_create_qp_resp_ramrod_data roce_create_qp_resp;
     69	struct roce_create_qp_req_ramrod_data roce_create_qp_req;
     70	struct roce_modify_qp_resp_ramrod_data roce_modify_qp_resp;
     71	struct roce_modify_qp_req_ramrod_data roce_modify_qp_req;
     72	struct roce_query_qp_resp_ramrod_data roce_query_qp_resp;
     73	struct roce_query_qp_req_ramrod_data roce_query_qp_req;
     74	struct roce_destroy_qp_resp_ramrod_data roce_destroy_qp_resp;
     75	struct roce_destroy_qp_req_ramrod_data roce_destroy_qp_req;
     76	struct roce_init_func_ramrod_data roce_init_func;
     77	struct rdma_create_cq_ramrod_data rdma_create_cq;
     78	struct rdma_destroy_cq_ramrod_data rdma_destroy_cq;
     79	struct rdma_srq_create_ramrod_data rdma_create_srq;
     80	struct rdma_srq_destroy_ramrod_data rdma_destroy_srq;
     81	struct rdma_srq_modify_ramrod_data rdma_modify_srq;
     82	struct iwarp_create_qp_ramrod_data iwarp_create_qp;
     83	struct iwarp_tcp_offload_ramrod_data iwarp_tcp_offload;
     84	struct iwarp_mpa_offload_ramrod_data iwarp_mpa_offload;
     85	struct iwarp_modify_qp_ramrod_data iwarp_modify_qp;
     86	struct iwarp_init_func_ramrod_data iwarp_init_func;
     87	struct fcoe_init_ramrod_params fcoe_init;
     88	struct fcoe_conn_offload_ramrod_params fcoe_conn_ofld;
     89	struct fcoe_conn_terminate_ramrod_params fcoe_conn_terminate;
     90	struct fcoe_stat_ramrod_params fcoe_stat;
     91
     92	struct iscsi_init_ramrod_params iscsi_init;
     93	struct iscsi_spe_conn_offload iscsi_conn_offload;
     94	struct iscsi_conn_update_ramrod_params iscsi_conn_update;
     95	struct iscsi_spe_conn_mac_update iscsi_conn_mac_update;
     96	struct iscsi_spe_conn_termination iscsi_conn_terminate;
     97
     98	struct nvmetcp_init_ramrod_params nvmetcp_init;
     99	struct nvmetcp_spe_conn_offload nvmetcp_conn_offload;
    100	struct nvmetcp_conn_update_ramrod_params nvmetcp_conn_update;
    101	struct nvmetcp_spe_conn_termination nvmetcp_conn_terminate;
    102
    103	struct vf_start_ramrod_data vf_start;
    104	struct vf_stop_ramrod_data vf_stop;
    105};
    106
    107#define EQ_MAX_CREDIT   0xffffffff
    108
    109enum spq_priority {
    110	QED_SPQ_PRIORITY_NORMAL,
    111	QED_SPQ_PRIORITY_HIGH,
    112};
    113
    114union qed_spq_req_comp {
    115	struct qed_spq_comp_cb	cb;
    116	u64			*done_addr;
    117};
    118
    119struct qed_spq_comp_done {
    120	unsigned int	done;
    121	u8		fw_return_code;
    122};
    123
    124struct qed_spq_entry {
    125	struct list_head		list;
    126
    127	u8				flags;
    128
    129	/* HSI slow path element */
    130	struct slow_path_element	elem;
    131
    132	union ramrod_data		ramrod;
    133
    134	enum spq_priority		priority;
    135
    136	/* pending queue for this entry */
    137	struct list_head		*queue;
    138
    139	enum spq_mode			comp_mode;
    140	struct qed_spq_comp_cb		comp_cb;
    141	struct qed_spq_comp_done	comp_done; /* SPQ_MODE_EBLOCK */
    142
    143	/* Posted entry for unlimited list entry in EBLOCK mode */
    144	struct qed_spq_entry		*post_ent;
    145};
    146
    147struct qed_eq {
    148	struct qed_chain	chain;
    149	u8			eq_sb_index;    /* index within the SB */
    150	__le16			*p_fw_cons;     /* ptr to index value */
    151};
    152
    153struct qed_consq {
    154	struct qed_chain chain;
    155};
    156
    157typedef int (*qed_spq_async_comp_cb)(struct qed_hwfn *p_hwfn, u8 opcode,
    158				     __le16 echo, union event_ring_data *data,
    159				     u8 fw_return_code);
    160
    161int
    162qed_spq_register_async_cb(struct qed_hwfn *p_hwfn,
    163			  enum protocol_type protocol_id,
    164			  qed_spq_async_comp_cb cb);
    165
    166void
    167qed_spq_unregister_async_cb(struct qed_hwfn *p_hwfn,
    168			    enum protocol_type protocol_id);
    169
    170struct qed_spq {
    171	spinlock_t		lock; /* SPQ lock */
    172
    173	struct list_head	unlimited_pending;
    174	struct list_head	pending;
    175	struct list_head	completion_pending;
    176	struct list_head	free_pool;
    177
    178	struct qed_chain	chain;
    179
    180	/* allocated dma-able memory for spq entries (+ramrod data) */
    181	dma_addr_t		p_phys;
    182	struct qed_spq_entry	*p_virt;
    183
    184#define SPQ_RING_SIZE \
    185	(CORE_SPQE_PAGE_SIZE_BYTES / sizeof(struct slow_path_element))
    186
    187	/* Bitmap for handling out-of-order completions */
    188	DECLARE_BITMAP(p_comp_bitmap, SPQ_RING_SIZE);
    189	u8			comp_bitmap_idx;
    190
    191	/* Statistics */
    192	u32			unlimited_pending_count;
    193	u32			normal_count;
    194	u32			high_count;
    195	u32			comp_sent_count;
    196	u32			comp_count;
    197
    198	u32			cid;
    199	u32			db_addr_offset;
    200	struct core_db_data	db_data;
    201	qed_spq_async_comp_cb	async_comp_cb[MAX_PROTOCOL_TYPE];
    202};
    203
    204/**
    205 * qed_spq_post(): Posts a Slow hwfn request to FW, or lacking that
    206 *                 Pends it to the future list.
    207 *
    208 * @p_hwfn: HW device data.
    209 * @p_ent: Ent.
    210 * @fw_return_code: Return code from firmware.
    211 *
    212 * Return: Int.
    213 */
    214int qed_spq_post(struct qed_hwfn *p_hwfn,
    215		 struct qed_spq_entry *p_ent,
    216		 u8 *fw_return_code);
    217
    218/**
    219 * qed_spq_alloc(): Alloocates & initializes the SPQ and EQ.
    220 *
    221 * @p_hwfn: HW device data.
    222 *
    223 * Return: Int.
    224 */
    225int qed_spq_alloc(struct qed_hwfn *p_hwfn);
    226
    227/**
    228 * qed_spq_setup(): Reset the SPQ to its start state.
    229 *
    230 * @p_hwfn: HW device data.
    231 *
    232 * Return: Void.
    233 */
    234void qed_spq_setup(struct qed_hwfn *p_hwfn);
    235
    236/**
    237 * qed_spq_free(): Deallocates the given SPQ struct.
    238 *
    239 * @p_hwfn: HW device data.
    240 *
    241 * Return: Void.
    242 */
    243void qed_spq_free(struct qed_hwfn *p_hwfn);
    244
    245/**
    246 * qed_spq_get_entry(): Obtain an entrry from the spq
    247 *                      free pool list.
    248 *
    249 * @p_hwfn: HW device data.
    250 * @pp_ent: PP ENT.
    251 *
    252 * Return: Int.
    253 */
    254int
    255qed_spq_get_entry(struct qed_hwfn *p_hwfn,
    256		  struct qed_spq_entry **pp_ent);
    257
    258/**
    259 * qed_spq_return_entry(): Return an entry to spq free pool list.
    260 *
    261 * @p_hwfn: HW device data.
    262 * @p_ent: P ENT.
    263 *
    264 * Return: Void.
    265 */
    266void qed_spq_return_entry(struct qed_hwfn *p_hwfn,
    267			  struct qed_spq_entry *p_ent);
    268/**
    269 * qed_eq_alloc(): Allocates & initializes an EQ struct.
    270 *
    271 * @p_hwfn: HW device data.
    272 * @num_elem: number of elements in the eq.
    273 *
    274 * Return: Int.
    275 */
    276int qed_eq_alloc(struct qed_hwfn *p_hwfn, u16 num_elem);
    277
    278/**
    279 * qed_eq_setup(): Reset the EQ to its start state.
    280 *
    281 * @p_hwfn: HW device data.
    282 *
    283 * Return: Void.
    284 */
    285void qed_eq_setup(struct qed_hwfn *p_hwfn);
    286
    287/**
    288 * qed_eq_free(): deallocates the given EQ struct.
    289 *
    290 * @p_hwfn: HW device data.
    291 *
    292 * Return: Void.
    293 */
    294void qed_eq_free(struct qed_hwfn *p_hwfn);
    295
    296/**
    297 * qed_eq_prod_update(): update the FW with default EQ producer.
    298 *
    299 * @p_hwfn: HW device data.
    300 * @prod: Prod.
    301 *
    302 * Return: Void.
    303 */
    304void qed_eq_prod_update(struct qed_hwfn *p_hwfn,
    305			u16 prod);
    306
    307/**
    308 * qed_eq_completion(): Completes currently pending EQ elements.
    309 *
    310 * @p_hwfn: HW device data.
    311 * @cookie: Cookie.
    312 *
    313 * Return: Int.
    314 */
    315int qed_eq_completion(struct qed_hwfn *p_hwfn,
    316		      void *cookie);
    317
    318/**
    319 * qed_spq_completion(): Completes a single event.
    320 *
    321 * @p_hwfn: HW device data.
    322 * @echo: echo value from cookie (used for determining completion).
    323 * @fw_return_code: FW return code.
    324 * @p_data: data from cookie (used in callback function if applicable).
    325 *
    326 * Return: Int.
    327 */
    328int qed_spq_completion(struct qed_hwfn *p_hwfn,
    329		       __le16 echo,
    330		       u8 fw_return_code,
    331		       union event_ring_data *p_data);
    332
    333/**
    334 * qed_spq_get_cid(): Given p_hwfn, return cid for the hwfn's SPQ.
    335 *
    336 * @p_hwfn: HW device data.
    337 *
    338 * Return: u32 - SPQ CID.
    339 */
    340u32 qed_spq_get_cid(struct qed_hwfn *p_hwfn);
    341
    342/**
    343 * qed_consq_alloc(): Allocates & initializes an ConsQ struct.
    344 *
    345 * @p_hwfn: HW device data.
    346 *
    347 * Return: Int.
    348 */
    349int qed_consq_alloc(struct qed_hwfn *p_hwfn);
    350
    351/**
    352 * qed_consq_setup(): Reset the ConsQ to its start state.
    353 *
    354 * @p_hwfn: HW device data.
    355 *
    356 * Return Void.
    357 */
    358void qed_consq_setup(struct qed_hwfn *p_hwfn);
    359
    360/**
    361 * qed_consq_free(): deallocates the given ConsQ struct.
    362 *
    363 * @p_hwfn: HW device data.
    364 *
    365 * Return Void.
    366 */
    367void qed_consq_free(struct qed_hwfn *p_hwfn);
    368int qed_spq_pend_post(struct qed_hwfn *p_hwfn);
    369
    370/* Slow-hwfn low-level commands (Ramrods) function definitions. */
    371
    372#define QED_SP_EQ_COMPLETION  0x01
    373#define QED_SP_CQE_COMPLETION 0x02
    374
    375struct qed_sp_init_data {
    376	u32			cid;
    377	u16			opaque_fid;
    378
    379	/* Information regarding operation upon sending & completion */
    380	enum spq_mode		comp_mode;
    381	struct qed_spq_comp_cb *p_comp_data;
    382};
    383
    384/**
    385 * qed_sp_destroy_request(): Returns a SPQ entry to the pool / frees the
    386 *                           entry if allocated. Should be called on in error
    387 *                           flows after initializing the SPQ entry
    388 *                           and before posting it.
    389 *
    390 * @p_hwfn: HW device data.
    391 * @p_ent: Ent.
    392 *
    393 * Return: Void.
    394 */
    395void qed_sp_destroy_request(struct qed_hwfn *p_hwfn,
    396			    struct qed_spq_entry *p_ent);
    397
    398int qed_sp_init_request(struct qed_hwfn *p_hwfn,
    399			struct qed_spq_entry **pp_ent,
    400			u8 cmd,
    401			u8 protocol,
    402			struct qed_sp_init_data *p_data);
    403
    404/**
    405 * qed_sp_pf_start(): PF Function Start Ramrod.
    406 *
    407 * @p_hwfn: HW device data.
    408 * @p_ptt: P_ptt.
    409 * @p_tunn: P_tunn.
    410 * @allow_npar_tx_switch: Allow NPAR TX Switch.
    411 *
    412 * Return: Int.
    413 *
    414 * This ramrod is sent to initialize a physical function (PF). It will
    415 * configure the function related parameters and write its completion to the
    416 * event ring specified in the parameters.
    417 *
    418 * Ramrods complete on the common event ring for the PF. This ring is
    419 * allocated by the driver on host memory and its parameters are written
    420 * to the internal RAM of the UStorm by the Function Start Ramrod.
    421 *
    422 */
    423
    424int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
    425		    struct qed_ptt *p_ptt,
    426		    struct qed_tunnel_info *p_tunn,
    427		    bool allow_npar_tx_switch);
    428
    429/**
    430 * qed_sp_pf_update(): PF Function Update Ramrod.
    431 *
    432 * @p_hwfn: HW device data.
    433 *
    434 * Return: Int.
    435 *
    436 * This ramrod updates function-related parameters. Every parameter can be
    437 * updated independently, according to configuration flags.
    438 */
    439
    440int qed_sp_pf_update(struct qed_hwfn *p_hwfn);
    441
    442/**
    443 * qed_sp_pf_update_stag(): Update firmware of new outer tag.
    444 *
    445 * @p_hwfn: HW device data.
    446 *
    447 * Return: Int.
    448 */
    449int qed_sp_pf_update_stag(struct qed_hwfn *p_hwfn);
    450
    451/**
    452 * qed_sp_pf_update_ufp(): PF ufp update Ramrod.
    453 *
    454 * @p_hwfn: HW device data.
    455 *
    456 * Return: Int.
    457 */
    458int qed_sp_pf_update_ufp(struct qed_hwfn *p_hwfn);
    459
    460int qed_sp_pf_stop(struct qed_hwfn *p_hwfn);
    461
    462int qed_sp_pf_update_tunn_cfg(struct qed_hwfn *p_hwfn,
    463			      struct qed_ptt *p_ptt,
    464			      struct qed_tunnel_info *p_tunn,
    465			      enum spq_mode comp_mode,
    466			      struct qed_spq_comp_cb *p_comp_data);
    467/**
    468 * qed_sp_heartbeat_ramrod(): Send empty Ramrod.
    469 *
    470 * @p_hwfn: HW device data.
    471 *
    472 * Return: Int.
    473 */
    474
    475int qed_sp_heartbeat_ramrod(struct qed_hwfn *p_hwfn);
    476
    477#endif