cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

qed_chain.h (16929B)


      1/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
      2/* QLogic qed NIC Driver
      3 * Copyright (c) 2015-2017  QLogic Corporation
      4 * Copyright (c) 2019-2020 Marvell International Ltd.
      5 */
      6
      7#ifndef _QED_CHAIN_H
      8#define _QED_CHAIN_H
      9
     10#include <linux/types.h>
     11#include <asm/byteorder.h>
     12#include <linux/kernel.h>
     13#include <linux/list.h>
     14#include <linux/sizes.h>
     15#include <linux/slab.h>
     16#include <linux/qed/common_hsi.h>
     17
     18enum qed_chain_mode {
     19	/* Each Page contains a next pointer at its end */
     20	QED_CHAIN_MODE_NEXT_PTR,
     21
     22	/* Chain is a single page (next ptr) is not required */
     23	QED_CHAIN_MODE_SINGLE,
     24
     25	/* Page pointers are located in a side list */
     26	QED_CHAIN_MODE_PBL,
     27};
     28
     29enum qed_chain_use_mode {
     30	QED_CHAIN_USE_TO_PRODUCE,			/* Chain starts empty */
     31	QED_CHAIN_USE_TO_CONSUME,			/* Chain starts full */
     32	QED_CHAIN_USE_TO_CONSUME_PRODUCE,		/* Chain starts empty */
     33};
     34
     35enum qed_chain_cnt_type {
     36	/* The chain's size/prod/cons are kept in 16-bit variables */
     37	QED_CHAIN_CNT_TYPE_U16,
     38
     39	/* The chain's size/prod/cons are kept in 32-bit variables  */
     40	QED_CHAIN_CNT_TYPE_U32,
     41};
     42
     43struct qed_chain_next {
     44	struct regpair					next_phys;
     45	void						*next_virt;
     46};
     47
     48struct qed_chain_pbl_u16 {
     49	u16						prod_page_idx;
     50	u16						cons_page_idx;
     51};
     52
     53struct qed_chain_pbl_u32 {
     54	u32						prod_page_idx;
     55	u32						cons_page_idx;
     56};
     57
     58struct qed_chain_u16 {
     59	/* Cyclic index of next element to produce/consume */
     60	u16						prod_idx;
     61	u16						cons_idx;
     62};
     63
     64struct qed_chain_u32 {
     65	/* Cyclic index of next element to produce/consume */
     66	u32						prod_idx;
     67	u32						cons_idx;
     68};
     69
     70struct addr_tbl_entry {
     71	void						*virt_addr;
     72	dma_addr_t					dma_map;
     73};
     74
     75struct qed_chain {
     76	/* Fastpath portion of the chain - required for commands such
     77	 * as produce / consume.
     78	 */
     79
     80	/* Point to next element to produce/consume */
     81	void						*p_prod_elem;
     82	void						*p_cons_elem;
     83
     84	/* Fastpath portions of the PBL [if exists] */
     85
     86	struct {
     87		/* Table for keeping the virtual and physical addresses of the
     88		 * chain pages, respectively to the physical addresses
     89		 * in the pbl table.
     90		 */
     91		struct addr_tbl_entry			*pp_addr_tbl;
     92
     93		union {
     94			struct qed_chain_pbl_u16	u16;
     95			struct qed_chain_pbl_u32	u32;
     96		}					c;
     97	}						pbl;
     98
     99	union {
    100		struct qed_chain_u16			chain16;
    101		struct qed_chain_u32			chain32;
    102	}						u;
    103
    104	/* Capacity counts only usable elements */
    105	u32						capacity;
    106	u32						page_cnt;
    107
    108	enum qed_chain_mode				mode;
    109
    110	/* Elements information for fast calculations */
    111	u16						elem_per_page;
    112	u16						elem_per_page_mask;
    113	u16						elem_size;
    114	u16						next_page_mask;
    115	u16						usable_per_page;
    116	u8						elem_unusable;
    117
    118	enum qed_chain_cnt_type				cnt_type;
    119
    120	/* Slowpath of the chain - required for initialization and destruction,
    121	 * but isn't involved in regular functionality.
    122	 */
    123
    124	u32						page_size;
    125
    126	/* Base address of a pre-allocated buffer for pbl */
    127	struct {
    128		__le64					*table_virt;
    129		dma_addr_t				table_phys;
    130		size_t					table_size;
    131	}						pbl_sp;
    132
    133	/* Address of first page of the chain - the address is required
    134	 * for fastpath operation [consume/produce] but only for the SINGLE
    135	 * flavour which isn't considered fastpath [== SPQ].
    136	 */
    137	void						*p_virt_addr;
    138	dma_addr_t					p_phys_addr;
    139
    140	/* Total number of elements [for entire chain] */
    141	u32						size;
    142
    143	enum qed_chain_use_mode				intended_use;
    144
    145	bool						b_external_pbl;
    146};
    147
    148struct qed_chain_init_params {
    149	enum qed_chain_mode				mode;
    150	enum qed_chain_use_mode				intended_use;
    151	enum qed_chain_cnt_type				cnt_type;
    152
    153	u32						page_size;
    154	u32						num_elems;
    155	size_t						elem_size;
    156
    157	void						*ext_pbl_virt;
    158	dma_addr_t					ext_pbl_phys;
    159};
    160
    161#define QED_CHAIN_PAGE_SIZE				SZ_4K
    162
    163#define ELEMS_PER_PAGE(elem_size, page_size)				     \
    164	((page_size) / (elem_size))
    165
    166#define UNUSABLE_ELEMS_PER_PAGE(elem_size, mode)			     \
    167	(((mode) == QED_CHAIN_MODE_NEXT_PTR) ?				     \
    168	 (u8)(1 + ((sizeof(struct qed_chain_next) - 1) / (elem_size))) :     \
    169	 0)
    170
    171#define USABLE_ELEMS_PER_PAGE(elem_size, page_size, mode)		     \
    172	((u32)(ELEMS_PER_PAGE((elem_size), (page_size)) -		     \
    173	       UNUSABLE_ELEMS_PER_PAGE((elem_size), (mode))))
    174
    175#define QED_CHAIN_PAGE_CNT(elem_cnt, elem_size, page_size, mode)	     \
    176	DIV_ROUND_UP((elem_cnt),					     \
    177		     USABLE_ELEMS_PER_PAGE((elem_size), (page_size), (mode)))
    178
    179#define is_chain_u16(p)							     \
    180	((p)->cnt_type == QED_CHAIN_CNT_TYPE_U16)
    181#define is_chain_u32(p)							     \
    182	((p)->cnt_type == QED_CHAIN_CNT_TYPE_U32)
    183
    184/* Accessors */
    185
    186static inline u16 qed_chain_get_prod_idx(const struct qed_chain *chain)
    187{
    188	return chain->u.chain16.prod_idx;
    189}
    190
    191static inline u16 qed_chain_get_cons_idx(const struct qed_chain *chain)
    192{
    193	return chain->u.chain16.cons_idx;
    194}
    195
    196static inline u32 qed_chain_get_prod_idx_u32(const struct qed_chain *chain)
    197{
    198	return chain->u.chain32.prod_idx;
    199}
    200
    201static inline u32 qed_chain_get_cons_idx_u32(const struct qed_chain *chain)
    202{
    203	return chain->u.chain32.cons_idx;
    204}
    205
    206static inline u16 qed_chain_get_elem_used(const struct qed_chain *chain)
    207{
    208	u32 prod = qed_chain_get_prod_idx(chain);
    209	u32 cons = qed_chain_get_cons_idx(chain);
    210	u16 elem_per_page = chain->elem_per_page;
    211	u16 used;
    212
    213	if (prod < cons)
    214		prod += (u32)U16_MAX + 1;
    215
    216	used = (u16)(prod - cons);
    217	if (chain->mode == QED_CHAIN_MODE_NEXT_PTR)
    218		used -= (u16)(prod / elem_per_page - cons / elem_per_page);
    219
    220	return used;
    221}
    222
    223static inline u16 qed_chain_get_elem_left(const struct qed_chain *chain)
    224{
    225	return (u16)(chain->capacity - qed_chain_get_elem_used(chain));
    226}
    227
    228static inline u32 qed_chain_get_elem_used_u32(const struct qed_chain *chain)
    229{
    230	u64 prod = qed_chain_get_prod_idx_u32(chain);
    231	u64 cons = qed_chain_get_cons_idx_u32(chain);
    232	u16 elem_per_page = chain->elem_per_page;
    233	u32 used;
    234
    235	if (prod < cons)
    236		prod += (u64)U32_MAX + 1;
    237
    238	used = (u32)(prod - cons);
    239	if (chain->mode == QED_CHAIN_MODE_NEXT_PTR)
    240		used -= (u32)(prod / elem_per_page - cons / elem_per_page);
    241
    242	return used;
    243}
    244
    245static inline u32 qed_chain_get_elem_left_u32(const struct qed_chain *chain)
    246{
    247	return chain->capacity - qed_chain_get_elem_used_u32(chain);
    248}
    249
    250static inline u16 qed_chain_get_usable_per_page(const struct qed_chain *chain)
    251{
    252	return chain->usable_per_page;
    253}
    254
    255static inline u8 qed_chain_get_unusable_per_page(const struct qed_chain *chain)
    256{
    257	return chain->elem_unusable;
    258}
    259
    260static inline u32 qed_chain_get_page_cnt(const struct qed_chain *chain)
    261{
    262	return chain->page_cnt;
    263}
    264
    265static inline dma_addr_t qed_chain_get_pbl_phys(const struct qed_chain *chain)
    266{
    267	return chain->pbl_sp.table_phys;
    268}
    269
    270/**
    271 * qed_chain_advance_page(): Advance the next element across pages for a
    272 *                           linked chain.
    273 *
    274 * @p_chain: P_chain.
    275 * @p_next_elem: P_next_elem.
    276 * @idx_to_inc: Idx_to_inc.
    277 * @page_to_inc: page_to_inc.
    278 *
    279 * Return: Void.
    280 */
    281static inline void
    282qed_chain_advance_page(struct qed_chain *p_chain,
    283		       void **p_next_elem, void *idx_to_inc, void *page_to_inc)
    284{
    285	struct qed_chain_next *p_next = NULL;
    286	u32 page_index = 0;
    287
    288	switch (p_chain->mode) {
    289	case QED_CHAIN_MODE_NEXT_PTR:
    290		p_next = *p_next_elem;
    291		*p_next_elem = p_next->next_virt;
    292		if (is_chain_u16(p_chain))
    293			*(u16 *)idx_to_inc += p_chain->elem_unusable;
    294		else
    295			*(u32 *)idx_to_inc += p_chain->elem_unusable;
    296		break;
    297	case QED_CHAIN_MODE_SINGLE:
    298		*p_next_elem = p_chain->p_virt_addr;
    299		break;
    300
    301	case QED_CHAIN_MODE_PBL:
    302		if (is_chain_u16(p_chain)) {
    303			if (++(*(u16 *)page_to_inc) == p_chain->page_cnt)
    304				*(u16 *)page_to_inc = 0;
    305			page_index = *(u16 *)page_to_inc;
    306		} else {
    307			if (++(*(u32 *)page_to_inc) == p_chain->page_cnt)
    308				*(u32 *)page_to_inc = 0;
    309			page_index = *(u32 *)page_to_inc;
    310		}
    311		*p_next_elem = p_chain->pbl.pp_addr_tbl[page_index].virt_addr;
    312	}
    313}
    314
    315#define is_unusable_idx(p, idx)	\
    316	(((p)->u.chain16.idx & (p)->elem_per_page_mask) == (p)->usable_per_page)
    317
    318#define is_unusable_idx_u32(p, idx) \
    319	(((p)->u.chain32.idx & (p)->elem_per_page_mask) == (p)->usable_per_page)
    320#define is_unusable_next_idx(p, idx)				 \
    321	((((p)->u.chain16.idx + 1) & (p)->elem_per_page_mask) == \
    322	 (p)->usable_per_page)
    323
    324#define is_unusable_next_idx_u32(p, idx)			 \
    325	((((p)->u.chain32.idx + 1) & (p)->elem_per_page_mask) == \
    326	 (p)->usable_per_page)
    327
    328#define test_and_skip(p, idx)						   \
    329	do {						\
    330		if (is_chain_u16(p)) {					   \
    331			if (is_unusable_idx(p, idx))			   \
    332				(p)->u.chain16.idx += (p)->elem_unusable;  \
    333		} else {						   \
    334			if (is_unusable_idx_u32(p, idx))		   \
    335				(p)->u.chain32.idx += (p)->elem_unusable;  \
    336		}					\
    337	} while (0)
    338
    339/**
    340 * qed_chain_return_produced(): A chain in which the driver "Produces"
    341 *                              elements should use this API
    342 *                              to indicate previous produced elements
    343 *                              are now consumed.
    344 *
    345 * @p_chain: Chain.
    346 *
    347 * Return: Void.
    348 */
    349static inline void qed_chain_return_produced(struct qed_chain *p_chain)
    350{
    351	if (is_chain_u16(p_chain))
    352		p_chain->u.chain16.cons_idx++;
    353	else
    354		p_chain->u.chain32.cons_idx++;
    355	test_and_skip(p_chain, cons_idx);
    356}
    357
    358/**
    359 * qed_chain_produce(): A chain in which the driver "Produces"
    360 *                      elements should use this to get a pointer to
    361 *                      the next element which can be "Produced". It's driver
    362 *                      responsibility to validate that the chain has room for
    363 *                      new element.
    364 *
    365 * @p_chain: Chain.
    366 *
    367 * Return: void*, a pointer to next element.
    368 */
    369static inline void *qed_chain_produce(struct qed_chain *p_chain)
    370{
    371	void *p_ret = NULL, *p_prod_idx, *p_prod_page_idx;
    372
    373	if (is_chain_u16(p_chain)) {
    374		if ((p_chain->u.chain16.prod_idx &
    375		     p_chain->elem_per_page_mask) == p_chain->next_page_mask) {
    376			p_prod_idx = &p_chain->u.chain16.prod_idx;
    377			p_prod_page_idx = &p_chain->pbl.c.u16.prod_page_idx;
    378			qed_chain_advance_page(p_chain, &p_chain->p_prod_elem,
    379					       p_prod_idx, p_prod_page_idx);
    380		}
    381		p_chain->u.chain16.prod_idx++;
    382	} else {
    383		if ((p_chain->u.chain32.prod_idx &
    384		     p_chain->elem_per_page_mask) == p_chain->next_page_mask) {
    385			p_prod_idx = &p_chain->u.chain32.prod_idx;
    386			p_prod_page_idx = &p_chain->pbl.c.u32.prod_page_idx;
    387			qed_chain_advance_page(p_chain, &p_chain->p_prod_elem,
    388					       p_prod_idx, p_prod_page_idx);
    389		}
    390		p_chain->u.chain32.prod_idx++;
    391	}
    392
    393	p_ret = p_chain->p_prod_elem;
    394	p_chain->p_prod_elem = (void *)(((u8 *)p_chain->p_prod_elem) +
    395					p_chain->elem_size);
    396
    397	return p_ret;
    398}
    399
    400/**
    401 * qed_chain_get_capacity(): Get the maximum number of BDs in chain
    402 *
    403 * @p_chain: Chain.
    404 *
    405 * Return: number of unusable BDs.
    406 */
    407static inline u32 qed_chain_get_capacity(struct qed_chain *p_chain)
    408{
    409	return p_chain->capacity;
    410}
    411
    412/**
    413 * qed_chain_recycle_consumed(): Returns an element which was
    414 *                               previously consumed;
    415 *                               Increments producers so they could
    416 *                               be written to FW.
    417 *
    418 * @p_chain: Chain.
    419 *
    420 * Return: Void.
    421 */
    422static inline void qed_chain_recycle_consumed(struct qed_chain *p_chain)
    423{
    424	test_and_skip(p_chain, prod_idx);
    425	if (is_chain_u16(p_chain))
    426		p_chain->u.chain16.prod_idx++;
    427	else
    428		p_chain->u.chain32.prod_idx++;
    429}
    430
    431/**
    432 * qed_chain_consume(): A Chain in which the driver utilizes data written
    433 *                      by a different source (i.e., FW) should use this to
    434 *                      access passed buffers.
    435 *
    436 * @p_chain: Chain.
    437 *
    438 * Return: void*, a pointer to the next buffer written.
    439 */
    440static inline void *qed_chain_consume(struct qed_chain *p_chain)
    441{
    442	void *p_ret = NULL, *p_cons_idx, *p_cons_page_idx;
    443
    444	if (is_chain_u16(p_chain)) {
    445		if ((p_chain->u.chain16.cons_idx &
    446		     p_chain->elem_per_page_mask) == p_chain->next_page_mask) {
    447			p_cons_idx = &p_chain->u.chain16.cons_idx;
    448			p_cons_page_idx = &p_chain->pbl.c.u16.cons_page_idx;
    449			qed_chain_advance_page(p_chain, &p_chain->p_cons_elem,
    450					       p_cons_idx, p_cons_page_idx);
    451		}
    452		p_chain->u.chain16.cons_idx++;
    453	} else {
    454		if ((p_chain->u.chain32.cons_idx &
    455		     p_chain->elem_per_page_mask) == p_chain->next_page_mask) {
    456			p_cons_idx = &p_chain->u.chain32.cons_idx;
    457			p_cons_page_idx = &p_chain->pbl.c.u32.cons_page_idx;
    458			qed_chain_advance_page(p_chain, &p_chain->p_cons_elem,
    459					       p_cons_idx, p_cons_page_idx);
    460		}
    461		p_chain->u.chain32.cons_idx++;
    462	}
    463
    464	p_ret = p_chain->p_cons_elem;
    465	p_chain->p_cons_elem = (void *)(((u8 *)p_chain->p_cons_elem) +
    466					p_chain->elem_size);
    467
    468	return p_ret;
    469}
    470
    471/**
    472 * qed_chain_reset(): Resets the chain to its start state.
    473 *
    474 * @p_chain: pointer to a previously allocated chain.
    475 *
    476 * Return Void.
    477 */
    478static inline void qed_chain_reset(struct qed_chain *p_chain)
    479{
    480	u32 i;
    481
    482	if (is_chain_u16(p_chain)) {
    483		p_chain->u.chain16.prod_idx = 0;
    484		p_chain->u.chain16.cons_idx = 0;
    485	} else {
    486		p_chain->u.chain32.prod_idx = 0;
    487		p_chain->u.chain32.cons_idx = 0;
    488	}
    489	p_chain->p_cons_elem = p_chain->p_virt_addr;
    490	p_chain->p_prod_elem = p_chain->p_virt_addr;
    491
    492	if (p_chain->mode == QED_CHAIN_MODE_PBL) {
    493		/* Use (page_cnt - 1) as a reset value for the prod/cons page's
    494		 * indices, to avoid unnecessary page advancing on the first
    495		 * call to qed_chain_produce/consume. Instead, the indices
    496		 * will be advanced to page_cnt and then will be wrapped to 0.
    497		 */
    498		u32 reset_val = p_chain->page_cnt - 1;
    499
    500		if (is_chain_u16(p_chain)) {
    501			p_chain->pbl.c.u16.prod_page_idx = (u16)reset_val;
    502			p_chain->pbl.c.u16.cons_page_idx = (u16)reset_val;
    503		} else {
    504			p_chain->pbl.c.u32.prod_page_idx = reset_val;
    505			p_chain->pbl.c.u32.cons_page_idx = reset_val;
    506		}
    507	}
    508
    509	switch (p_chain->intended_use) {
    510	case QED_CHAIN_USE_TO_CONSUME:
    511		/* produce empty elements */
    512		for (i = 0; i < p_chain->capacity; i++)
    513			qed_chain_recycle_consumed(p_chain);
    514		break;
    515
    516	case QED_CHAIN_USE_TO_CONSUME_PRODUCE:
    517	case QED_CHAIN_USE_TO_PRODUCE:
    518	default:
    519		/* Do nothing */
    520		break;
    521	}
    522}
    523
    524/**
    525 * qed_chain_get_last_elem(): Returns a pointer to the last element of the
    526 *                            chain.
    527 *
    528 * @p_chain: Chain.
    529 *
    530 * Return: void*.
    531 */
    532static inline void *qed_chain_get_last_elem(struct qed_chain *p_chain)
    533{
    534	struct qed_chain_next *p_next = NULL;
    535	void *p_virt_addr = NULL;
    536	u32 size, last_page_idx;
    537
    538	if (!p_chain->p_virt_addr)
    539		goto out;
    540
    541	switch (p_chain->mode) {
    542	case QED_CHAIN_MODE_NEXT_PTR:
    543		size = p_chain->elem_size * p_chain->usable_per_page;
    544		p_virt_addr = p_chain->p_virt_addr;
    545		p_next = (struct qed_chain_next *)((u8 *)p_virt_addr + size);
    546		while (p_next->next_virt != p_chain->p_virt_addr) {
    547			p_virt_addr = p_next->next_virt;
    548			p_next = (struct qed_chain_next *)((u8 *)p_virt_addr +
    549							   size);
    550		}
    551		break;
    552	case QED_CHAIN_MODE_SINGLE:
    553		p_virt_addr = p_chain->p_virt_addr;
    554		break;
    555	case QED_CHAIN_MODE_PBL:
    556		last_page_idx = p_chain->page_cnt - 1;
    557		p_virt_addr = p_chain->pbl.pp_addr_tbl[last_page_idx].virt_addr;
    558		break;
    559	}
    560	/* p_virt_addr points at this stage to the last page of the chain */
    561	size = p_chain->elem_size * (p_chain->usable_per_page - 1);
    562	p_virt_addr = (u8 *)p_virt_addr + size;
    563out:
    564	return p_virt_addr;
    565}
    566
    567/**
    568 * qed_chain_set_prod(): sets the prod to the given value.
    569 *
    570 * @p_chain: Chain.
    571 * @prod_idx: Prod Idx.
    572 * @p_prod_elem: Prod elem.
    573 *
    574 * Return Void.
    575 */
    576static inline void qed_chain_set_prod(struct qed_chain *p_chain,
    577				      u32 prod_idx, void *p_prod_elem)
    578{
    579	if (p_chain->mode == QED_CHAIN_MODE_PBL) {
    580		u32 cur_prod, page_mask, page_cnt, page_diff;
    581
    582		cur_prod = is_chain_u16(p_chain) ? p_chain->u.chain16.prod_idx :
    583			   p_chain->u.chain32.prod_idx;
    584
    585		/* Assume that number of elements in a page is power of 2 */
    586		page_mask = ~p_chain->elem_per_page_mask;
    587
    588		/* Use "cur_prod - 1" and "prod_idx - 1" since producer index
    589		 * reaches the first element of next page before the page index
    590		 * is incremented. See qed_chain_produce().
    591		 * Index wrap around is not a problem because the difference
    592		 * between current and given producer indices is always
    593		 * positive and lower than the chain's capacity.
    594		 */
    595		page_diff = (((cur_prod - 1) & page_mask) -
    596			     ((prod_idx - 1) & page_mask)) /
    597			    p_chain->elem_per_page;
    598
    599		page_cnt = qed_chain_get_page_cnt(p_chain);
    600		if (is_chain_u16(p_chain))
    601			p_chain->pbl.c.u16.prod_page_idx =
    602				(p_chain->pbl.c.u16.prod_page_idx -
    603				 page_diff + page_cnt) % page_cnt;
    604		else
    605			p_chain->pbl.c.u32.prod_page_idx =
    606				(p_chain->pbl.c.u32.prod_page_idx -
    607				 page_diff + page_cnt) % page_cnt;
    608	}
    609
    610	if (is_chain_u16(p_chain))
    611		p_chain->u.chain16.prod_idx = (u16) prod_idx;
    612	else
    613		p_chain->u.chain32.prod_idx = prod_idx;
    614	p_chain->p_prod_elem = p_prod_elem;
    615}
    616
    617/**
    618 * qed_chain_pbl_zero_mem(): set chain memory to 0.
    619 *
    620 * @p_chain: Chain.
    621 *
    622 * Return: Void.
    623 */
    624static inline void qed_chain_pbl_zero_mem(struct qed_chain *p_chain)
    625{
    626	u32 i, page_cnt;
    627
    628	if (p_chain->mode != QED_CHAIN_MODE_PBL)
    629		return;
    630
    631	page_cnt = qed_chain_get_page_cnt(p_chain);
    632
    633	for (i = 0; i < page_cnt; i++)
    634		memset(p_chain->pbl.pp_addr_tbl[i].virt_addr, 0,
    635		       p_chain->page_size);
    636}
    637
    638#endif