cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

qed_cxt.c (72840B)


      1// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
      2/* QLogic qed NIC Driver
      3 * Copyright (c) 2015-2017  QLogic Corporation
      4 * Copyright (c) 2019-2020 Marvell International Ltd.
      5 */
      6
      7#include <linux/types.h>
      8#include <linux/bitops.h>
      9#include <linux/dma-mapping.h>
     10#include <linux/errno.h>
     11#include <linux/kernel.h>
     12#include <linux/list.h>
     13#include <linux/log2.h>
     14#include <linux/pci.h>
     15#include <linux/slab.h>
     16#include <linux/string.h>
     17#include "qed.h"
     18#include "qed_cxt.h"
     19#include "qed_dev_api.h"
     20#include "qed_hsi.h"
     21#include "qed_hw.h"
     22#include "qed_init_ops.h"
     23#include "qed_rdma.h"
     24#include "qed_reg_addr.h"
     25#include "qed_sriov.h"
     26
     27/* QM constants */
     28#define QM_PQ_ELEMENT_SIZE	4 /* in bytes */
     29
     30/* Doorbell-Queue constants */
     31#define DQ_RANGE_SHIFT		4
     32#define DQ_RANGE_ALIGN		BIT(DQ_RANGE_SHIFT)
     33
     34/* Searcher constants */
     35#define SRC_MIN_NUM_ELEMS 256
     36
     37/* Timers constants */
     38#define TM_SHIFT        7
     39#define TM_ALIGN        BIT(TM_SHIFT)
     40#define TM_ELEM_SIZE    4
     41
     42#define ILT_DEFAULT_HW_P_SIZE	4
     43
     44#define ILT_PAGE_IN_BYTES(hw_p_size)	(1U << ((hw_p_size) + 12))
     45#define ILT_CFG_REG(cli, reg)	PSWRQ2_REG_ ## cli ## _ ## reg ## _RT_OFFSET
     46
     47/* ILT entry structure */
     48#define ILT_ENTRY_PHY_ADDR_MASK		(~0ULL >> 12)
     49#define ILT_ENTRY_PHY_ADDR_SHIFT	0
     50#define ILT_ENTRY_VALID_MASK		0x1ULL
     51#define ILT_ENTRY_VALID_SHIFT		52
     52#define ILT_ENTRY_IN_REGS		2
     53#define ILT_REG_SIZE_IN_BYTES		4
     54
     55/* connection context union */
     56union conn_context {
     57	struct core_conn_context core_ctx;
     58	struct eth_conn_context eth_ctx;
     59	struct iscsi_conn_context iscsi_ctx;
     60	struct fcoe_conn_context fcoe_ctx;
     61	struct roce_conn_context roce_ctx;
     62};
     63
     64/* TYPE-0 task context - iSCSI, FCOE */
     65union type0_task_context {
     66	struct iscsi_task_context iscsi_ctx;
     67	struct fcoe_task_context fcoe_ctx;
     68};
     69
     70/* TYPE-1 task context - ROCE */
     71union type1_task_context {
     72	struct rdma_task_context roce_ctx;
     73};
     74
     75struct src_ent {
     76	__u8				opaque[56];
     77	__be64				next;
     78};
     79
     80#define CDUT_SEG_ALIGNMET		3 /* in 4k chunks */
     81#define CDUT_SEG_ALIGNMET_IN_BYTES	BIT(CDUT_SEG_ALIGNMET + 12)
     82
     83#define CONN_CXT_SIZE(p_hwfn) \
     84	ALIGNED_TYPE_SIZE(union conn_context, p_hwfn)
     85
     86#define SRQ_CXT_SIZE (sizeof(struct rdma_srq_context))
     87#define XRC_SRQ_CXT_SIZE (sizeof(struct rdma_xrc_srq_context))
     88
     89#define TYPE0_TASK_CXT_SIZE(p_hwfn) \
     90	ALIGNED_TYPE_SIZE(union type0_task_context, p_hwfn)
     91
     92/* Alignment is inherent to the type1_task_context structure */
     93#define TYPE1_TASK_CXT_SIZE(p_hwfn) sizeof(union type1_task_context)
     94
     95static bool src_proto(enum protocol_type type)
     96{
     97	return type == PROTOCOLID_TCP_ULP ||
     98	       type == PROTOCOLID_FCOE ||
     99	       type == PROTOCOLID_IWARP;
    100}
    101
    102static bool tm_cid_proto(enum protocol_type type)
    103{
    104	return type == PROTOCOLID_TCP_ULP ||
    105	       type == PROTOCOLID_FCOE ||
    106	       type == PROTOCOLID_ROCE ||
    107	       type == PROTOCOLID_IWARP;
    108}
    109
    110static bool tm_tid_proto(enum protocol_type type)
    111{
    112	return type == PROTOCOLID_FCOE;
    113}
    114
    115/* counts the iids for the CDU/CDUC ILT client configuration */
    116struct qed_cdu_iids {
    117	u32 pf_cids;
    118	u32 per_vf_cids;
    119};
    120
    121static void qed_cxt_cdu_iids(struct qed_cxt_mngr *p_mngr,
    122			     struct qed_cdu_iids *iids)
    123{
    124	u32 type;
    125
    126	for (type = 0; type < MAX_CONN_TYPES; type++) {
    127		iids->pf_cids += p_mngr->conn_cfg[type].cid_count;
    128		iids->per_vf_cids += p_mngr->conn_cfg[type].cids_per_vf;
    129	}
    130}
    131
    132/* counts the iids for the Searcher block configuration */
    133struct qed_src_iids {
    134	u32 pf_cids;
    135	u32 per_vf_cids;
    136};
    137
    138static void qed_cxt_src_iids(struct qed_cxt_mngr *p_mngr,
    139			     struct qed_src_iids *iids)
    140{
    141	u32 i;
    142
    143	for (i = 0; i < MAX_CONN_TYPES; i++) {
    144		if (!src_proto(i))
    145			continue;
    146
    147		iids->pf_cids += p_mngr->conn_cfg[i].cid_count;
    148		iids->per_vf_cids += p_mngr->conn_cfg[i].cids_per_vf;
    149	}
    150
    151	/* Add L2 filtering filters in addition */
    152	iids->pf_cids += p_mngr->arfs_count;
    153}
    154
    155/* counts the iids for the Timers block configuration */
    156struct qed_tm_iids {
    157	u32 pf_cids;
    158	u32 pf_tids[NUM_TASK_PF_SEGMENTS];	/* per segment */
    159	u32 pf_tids_total;
    160	u32 per_vf_cids;
    161	u32 per_vf_tids;
    162};
    163
    164static void qed_cxt_tm_iids(struct qed_hwfn *p_hwfn,
    165			    struct qed_cxt_mngr *p_mngr,
    166			    struct qed_tm_iids *iids)
    167{
    168	bool tm_vf_required = false;
    169	bool tm_required = false;
    170	int i, j;
    171
    172	/* Timers is a special case -> we don't count how many cids require
    173	 * timers but what's the max cid that will be used by the timer block.
    174	 * therefore we traverse in reverse order, and once we hit a protocol
    175	 * that requires the timers memory, we'll sum all the protocols up
    176	 * to that one.
    177	 */
    178	for (i = MAX_CONN_TYPES - 1; i >= 0; i--) {
    179		struct qed_conn_type_cfg *p_cfg = &p_mngr->conn_cfg[i];
    180
    181		if (tm_cid_proto(i) || tm_required) {
    182			if (p_cfg->cid_count)
    183				tm_required = true;
    184
    185			iids->pf_cids += p_cfg->cid_count;
    186		}
    187
    188		if (tm_cid_proto(i) || tm_vf_required) {
    189			if (p_cfg->cids_per_vf)
    190				tm_vf_required = true;
    191
    192			iids->per_vf_cids += p_cfg->cids_per_vf;
    193		}
    194
    195		if (tm_tid_proto(i)) {
    196			struct qed_tid_seg *segs = p_cfg->tid_seg;
    197
    198			/* for each segment there is at most one
    199			 * protocol for which count is not 0.
    200			 */
    201			for (j = 0; j < NUM_TASK_PF_SEGMENTS; j++)
    202				iids->pf_tids[j] += segs[j].count;
    203
    204			/* The last array elelment is for the VFs. As for PF
    205			 * segments there can be only one protocol for
    206			 * which this value is not 0.
    207			 */
    208			iids->per_vf_tids += segs[NUM_TASK_PF_SEGMENTS].count;
    209		}
    210	}
    211
    212	iids->pf_cids = roundup(iids->pf_cids, TM_ALIGN);
    213	iids->per_vf_cids = roundup(iids->per_vf_cids, TM_ALIGN);
    214	iids->per_vf_tids = roundup(iids->per_vf_tids, TM_ALIGN);
    215
    216	for (iids->pf_tids_total = 0, j = 0; j < NUM_TASK_PF_SEGMENTS; j++) {
    217		iids->pf_tids[j] = roundup(iids->pf_tids[j], TM_ALIGN);
    218		iids->pf_tids_total += iids->pf_tids[j];
    219	}
    220}
    221
    222static void qed_cxt_qm_iids(struct qed_hwfn *p_hwfn,
    223			    struct qed_qm_iids *iids)
    224{
    225	struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
    226	struct qed_tid_seg *segs;
    227	u32 vf_cids = 0, type, j;
    228	u32 vf_tids = 0;
    229
    230	for (type = 0; type < MAX_CONN_TYPES; type++) {
    231		iids->cids += p_mngr->conn_cfg[type].cid_count;
    232		vf_cids += p_mngr->conn_cfg[type].cids_per_vf;
    233
    234		segs = p_mngr->conn_cfg[type].tid_seg;
    235		/* for each segment there is at most one
    236		 * protocol for which count is not 0.
    237		 */
    238		for (j = 0; j < NUM_TASK_PF_SEGMENTS; j++)
    239			iids->tids += segs[j].count;
    240
    241		/* The last array elelment is for the VFs. As for PF
    242		 * segments there can be only one protocol for
    243		 * which this value is not 0.
    244		 */
    245		vf_tids += segs[NUM_TASK_PF_SEGMENTS].count;
    246	}
    247
    248	iids->vf_cids = vf_cids;
    249	iids->tids += vf_tids * p_mngr->vf_count;
    250
    251	DP_VERBOSE(p_hwfn, QED_MSG_ILT,
    252		   "iids: CIDS %08x vf_cids %08x tids %08x vf_tids %08x\n",
    253		   iids->cids, iids->vf_cids, iids->tids, vf_tids);
    254}
    255
    256static struct qed_tid_seg *qed_cxt_tid_seg_info(struct qed_hwfn *p_hwfn,
    257						u32 seg)
    258{
    259	struct qed_cxt_mngr *p_cfg = p_hwfn->p_cxt_mngr;
    260	u32 i;
    261
    262	/* Find the protocol with tid count > 0 for this segment.
    263	 * Note: there can only be one and this is already validated.
    264	 */
    265	for (i = 0; i < MAX_CONN_TYPES; i++)
    266		if (p_cfg->conn_cfg[i].tid_seg[seg].count)
    267			return &p_cfg->conn_cfg[i].tid_seg[seg];
    268	return NULL;
    269}
    270
    271static void qed_cxt_set_srq_count(struct qed_hwfn *p_hwfn,
    272				  u32 num_srqs, u32 num_xrc_srqs)
    273{
    274	struct qed_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr;
    275
    276	p_mgr->srq_count = num_srqs;
    277	p_mgr->xrc_srq_count = num_xrc_srqs;
    278}
    279
    280u32 qed_cxt_get_ilt_page_size(struct qed_hwfn *p_hwfn,
    281			      enum ilt_clients ilt_client)
    282{
    283	struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
    284	struct qed_ilt_client_cfg *p_cli = &p_mngr->clients[ilt_client];
    285
    286	return ILT_PAGE_IN_BYTES(p_cli->p_size.val);
    287}
    288
    289static u32 qed_cxt_xrc_srqs_per_page(struct qed_hwfn *p_hwfn)
    290{
    291	u32 page_size;
    292
    293	page_size = qed_cxt_get_ilt_page_size(p_hwfn, ILT_CLI_TSDM);
    294	return page_size / XRC_SRQ_CXT_SIZE;
    295}
    296
    297u32 qed_cxt_get_total_srq_count(struct qed_hwfn *p_hwfn)
    298{
    299	struct qed_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr;
    300	u32 total_srqs;
    301
    302	total_srqs = p_mgr->srq_count + p_mgr->xrc_srq_count;
    303
    304	return total_srqs;
    305}
    306
    307/* set the iids count per protocol */
    308static void qed_cxt_set_proto_cid_count(struct qed_hwfn *p_hwfn,
    309					enum protocol_type type,
    310					u32 cid_count, u32 vf_cid_cnt)
    311{
    312	struct qed_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr;
    313	struct qed_conn_type_cfg *p_conn = &p_mgr->conn_cfg[type];
    314
    315	p_conn->cid_count = roundup(cid_count, DQ_RANGE_ALIGN);
    316	p_conn->cids_per_vf = roundup(vf_cid_cnt, DQ_RANGE_ALIGN);
    317
    318	if (type == PROTOCOLID_ROCE) {
    319		u32 page_sz = p_mgr->clients[ILT_CLI_CDUC].p_size.val;
    320		u32 cxt_size = CONN_CXT_SIZE(p_hwfn);
    321		u32 elems_per_page = ILT_PAGE_IN_BYTES(page_sz) / cxt_size;
    322		u32 align = elems_per_page * DQ_RANGE_ALIGN;
    323
    324		p_conn->cid_count = roundup(p_conn->cid_count, align);
    325	}
    326}
    327
    328u32 qed_cxt_get_proto_cid_count(struct qed_hwfn *p_hwfn,
    329				enum protocol_type type, u32 *vf_cid)
    330{
    331	if (vf_cid)
    332		*vf_cid = p_hwfn->p_cxt_mngr->conn_cfg[type].cids_per_vf;
    333
    334	return p_hwfn->p_cxt_mngr->conn_cfg[type].cid_count;
    335}
    336
    337u32 qed_cxt_get_proto_cid_start(struct qed_hwfn *p_hwfn,
    338				enum protocol_type type)
    339{
    340	return p_hwfn->p_cxt_mngr->acquired[type].start_cid;
    341}
    342
    343u32 qed_cxt_get_proto_tid_count(struct qed_hwfn *p_hwfn,
    344				enum protocol_type type)
    345{
    346	u32 cnt = 0;
    347	int i;
    348
    349	for (i = 0; i < TASK_SEGMENTS; i++)
    350		cnt += p_hwfn->p_cxt_mngr->conn_cfg[type].tid_seg[i].count;
    351
    352	return cnt;
    353}
    354
    355static void qed_cxt_set_proto_tid_count(struct qed_hwfn *p_hwfn,
    356					enum protocol_type proto,
    357					u8 seg,
    358					u8 seg_type, u32 count, bool has_fl)
    359{
    360	struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
    361	struct qed_tid_seg *p_seg = &p_mngr->conn_cfg[proto].tid_seg[seg];
    362
    363	p_seg->count = count;
    364	p_seg->has_fl_mem = has_fl;
    365	p_seg->type = seg_type;
    366}
    367
    368static void qed_ilt_cli_blk_fill(struct qed_ilt_client_cfg *p_cli,
    369				 struct qed_ilt_cli_blk *p_blk,
    370				 u32 start_line, u32 total_size, u32 elem_size)
    371{
    372	u32 ilt_size = ILT_PAGE_IN_BYTES(p_cli->p_size.val);
    373
    374	/* verify thatits called only once for each block */
    375	if (p_blk->total_size)
    376		return;
    377
    378	p_blk->total_size = total_size;
    379	p_blk->real_size_in_page = 0;
    380	if (elem_size)
    381		p_blk->real_size_in_page = (ilt_size / elem_size) * elem_size;
    382	p_blk->start_line = start_line;
    383}
    384
    385static void qed_ilt_cli_adv_line(struct qed_hwfn *p_hwfn,
    386				 struct qed_ilt_client_cfg *p_cli,
    387				 struct qed_ilt_cli_blk *p_blk,
    388				 u32 *p_line, enum ilt_clients client_id)
    389{
    390	if (!p_blk->total_size)
    391		return;
    392
    393	if (!p_cli->active)
    394		p_cli->first.val = *p_line;
    395
    396	p_cli->active = true;
    397	*p_line += DIV_ROUND_UP(p_blk->total_size, p_blk->real_size_in_page);
    398	p_cli->last.val = *p_line - 1;
    399
    400	DP_VERBOSE(p_hwfn, QED_MSG_ILT,
    401		   "ILT[Client %d] - Lines: [%08x - %08x]. Block - Size %08x [Real %08x] Start line %d\n",
    402		   client_id, p_cli->first.val,
    403		   p_cli->last.val, p_blk->total_size,
    404		   p_blk->real_size_in_page, p_blk->start_line);
    405}
    406
    407static u32 qed_ilt_get_dynamic_line_cnt(struct qed_hwfn *p_hwfn,
    408					enum ilt_clients ilt_client)
    409{
    410	u32 cid_count = p_hwfn->p_cxt_mngr->conn_cfg[PROTOCOLID_ROCE].cid_count;
    411	struct qed_ilt_client_cfg *p_cli;
    412	u32 lines_to_skip = 0;
    413	u32 cxts_per_p;
    414
    415	if (ilt_client == ILT_CLI_CDUC) {
    416		p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC];
    417
    418		cxts_per_p = ILT_PAGE_IN_BYTES(p_cli->p_size.val) /
    419		    (u32) CONN_CXT_SIZE(p_hwfn);
    420
    421		lines_to_skip = cid_count / cxts_per_p;
    422	}
    423
    424	return lines_to_skip;
    425}
    426
    427static struct qed_ilt_client_cfg *qed_cxt_set_cli(struct qed_ilt_client_cfg
    428						  *p_cli)
    429{
    430	p_cli->active = false;
    431	p_cli->first.val = 0;
    432	p_cli->last.val = 0;
    433	return p_cli;
    434}
    435
    436static struct qed_ilt_cli_blk *qed_cxt_set_blk(struct qed_ilt_cli_blk *p_blk)
    437{
    438	p_blk->total_size = 0;
    439	return p_blk;
    440}
    441
    442static void qed_cxt_ilt_blk_reset(struct qed_hwfn *p_hwfn)
    443{
    444	struct qed_ilt_client_cfg *clients = p_hwfn->p_cxt_mngr->clients;
    445	u32 cli_idx, blk_idx;
    446
    447	for (cli_idx = 0; cli_idx < MAX_ILT_CLIENTS; cli_idx++) {
    448		for (blk_idx = 0; blk_idx < ILT_CLI_PF_BLOCKS; blk_idx++)
    449			clients[cli_idx].pf_blks[blk_idx].total_size = 0;
    450
    451		for (blk_idx = 0; blk_idx < ILT_CLI_VF_BLOCKS; blk_idx++)
    452			clients[cli_idx].vf_blks[blk_idx].total_size = 0;
    453	}
    454}
    455
    456int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn, u32 *line_count)
    457{
    458	struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
    459	u32 curr_line, total, i, task_size, line;
    460	struct qed_ilt_client_cfg *p_cli;
    461	struct qed_ilt_cli_blk *p_blk;
    462	struct qed_cdu_iids cdu_iids;
    463	struct qed_src_iids src_iids;
    464	struct qed_qm_iids qm_iids;
    465	struct qed_tm_iids tm_iids;
    466	struct qed_tid_seg *p_seg;
    467
    468	memset(&qm_iids, 0, sizeof(qm_iids));
    469	memset(&cdu_iids, 0, sizeof(cdu_iids));
    470	memset(&src_iids, 0, sizeof(src_iids));
    471	memset(&tm_iids, 0, sizeof(tm_iids));
    472
    473	p_mngr->pf_start_line = RESC_START(p_hwfn, QED_ILT);
    474
    475	/* Reset all ILT blocks at the beginning of ILT computing in order
    476	 * to prevent memory allocation for irrelevant blocks afterwards.
    477	 */
    478	qed_cxt_ilt_blk_reset(p_hwfn);
    479
    480	DP_VERBOSE(p_hwfn, QED_MSG_ILT,
    481		   "hwfn [%d] - Set context manager starting line to be 0x%08x\n",
    482		   p_hwfn->my_id, p_hwfn->p_cxt_mngr->pf_start_line);
    483
    484	/* CDUC */
    485	p_cli = qed_cxt_set_cli(&p_mngr->clients[ILT_CLI_CDUC]);
    486
    487	curr_line = p_mngr->pf_start_line;
    488
    489	/* CDUC PF */
    490	p_cli->pf_total_lines = 0;
    491
    492	/* get the counters for the CDUC and QM clients  */
    493	qed_cxt_cdu_iids(p_mngr, &cdu_iids);
    494
    495	p_blk = qed_cxt_set_blk(&p_cli->pf_blks[CDUC_BLK]);
    496
    497	total = cdu_iids.pf_cids * CONN_CXT_SIZE(p_hwfn);
    498
    499	qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
    500			     total, CONN_CXT_SIZE(p_hwfn));
    501
    502	qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_CDUC);
    503	p_cli->pf_total_lines = curr_line - p_blk->start_line;
    504
    505	p_blk->dynamic_line_cnt = qed_ilt_get_dynamic_line_cnt(p_hwfn,
    506							       ILT_CLI_CDUC);
    507
    508	/* CDUC VF */
    509	p_blk = qed_cxt_set_blk(&p_cli->vf_blks[CDUC_BLK]);
    510	total = cdu_iids.per_vf_cids * CONN_CXT_SIZE(p_hwfn);
    511
    512	qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
    513			     total, CONN_CXT_SIZE(p_hwfn));
    514
    515	qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_CDUC);
    516	p_cli->vf_total_lines = curr_line - p_blk->start_line;
    517
    518	for (i = 1; i < p_mngr->vf_count; i++)
    519		qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
    520				     ILT_CLI_CDUC);
    521
    522	/* CDUT PF */
    523	p_cli = qed_cxt_set_cli(&p_mngr->clients[ILT_CLI_CDUT]);
    524	p_cli->first.val = curr_line;
    525
    526	/* first the 'working' task memory */
    527	for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) {
    528		p_seg = qed_cxt_tid_seg_info(p_hwfn, i);
    529		if (!p_seg || p_seg->count == 0)
    530			continue;
    531
    532		p_blk = qed_cxt_set_blk(&p_cli->pf_blks[CDUT_SEG_BLK(i)]);
    533		total = p_seg->count * p_mngr->task_type_size[p_seg->type];
    534		qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line, total,
    535				     p_mngr->task_type_size[p_seg->type]);
    536
    537		qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
    538				     ILT_CLI_CDUT);
    539	}
    540
    541	/* next the 'init' task memory (forced load memory) */
    542	for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) {
    543		p_seg = qed_cxt_tid_seg_info(p_hwfn, i);
    544		if (!p_seg || p_seg->count == 0)
    545			continue;
    546
    547		p_blk =
    548		    qed_cxt_set_blk(&p_cli->pf_blks[CDUT_FL_SEG_BLK(i, PF)]);
    549
    550		if (!p_seg->has_fl_mem) {
    551			/* The segment is active (total size pf 'working'
    552			 * memory is > 0) but has no FL (forced-load, Init)
    553			 * memory. Thus:
    554			 *
    555			 * 1.   The total-size in the corrsponding FL block of
    556			 *      the ILT client is set to 0 - No ILT line are
    557			 *      provisioned and no ILT memory allocated.
    558			 *
    559			 * 2.   The start-line of said block is set to the
    560			 *      start line of the matching working memory
    561			 *      block in the ILT client. This is later used to
    562			 *      configure the CDU segment offset registers and
    563			 *      results in an FL command for TIDs of this
    564			 *      segement behaves as regular load commands
    565			 *      (loading TIDs from the working memory).
    566			 */
    567			line = p_cli->pf_blks[CDUT_SEG_BLK(i)].start_line;
    568
    569			qed_ilt_cli_blk_fill(p_cli, p_blk, line, 0, 0);
    570			continue;
    571		}
    572		total = p_seg->count * p_mngr->task_type_size[p_seg->type];
    573
    574		qed_ilt_cli_blk_fill(p_cli, p_blk,
    575				     curr_line, total,
    576				     p_mngr->task_type_size[p_seg->type]);
    577
    578		qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
    579				     ILT_CLI_CDUT);
    580	}
    581	p_cli->pf_total_lines = curr_line - p_cli->pf_blks[0].start_line;
    582
    583	/* CDUT VF */
    584	p_seg = qed_cxt_tid_seg_info(p_hwfn, TASK_SEGMENT_VF);
    585	if (p_seg && p_seg->count) {
    586		/* Stricly speaking we need to iterate over all VF
    587		 * task segment types, but a VF has only 1 segment
    588		 */
    589
    590		/* 'working' memory */
    591		total = p_seg->count * p_mngr->task_type_size[p_seg->type];
    592
    593		p_blk = qed_cxt_set_blk(&p_cli->vf_blks[CDUT_SEG_BLK(0)]);
    594		qed_ilt_cli_blk_fill(p_cli, p_blk,
    595				     curr_line, total,
    596				     p_mngr->task_type_size[p_seg->type]);
    597
    598		qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
    599				     ILT_CLI_CDUT);
    600
    601		/* 'init' memory */
    602		p_blk =
    603		    qed_cxt_set_blk(&p_cli->vf_blks[CDUT_FL_SEG_BLK(0, VF)]);
    604		if (!p_seg->has_fl_mem) {
    605			/* see comment above */
    606			line = p_cli->vf_blks[CDUT_SEG_BLK(0)].start_line;
    607			qed_ilt_cli_blk_fill(p_cli, p_blk, line, 0, 0);
    608		} else {
    609			task_size = p_mngr->task_type_size[p_seg->type];
    610			qed_ilt_cli_blk_fill(p_cli, p_blk,
    611					     curr_line, total, task_size);
    612			qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
    613					     ILT_CLI_CDUT);
    614		}
    615		p_cli->vf_total_lines = curr_line -
    616		    p_cli->vf_blks[0].start_line;
    617
    618		/* Now for the rest of the VFs */
    619		for (i = 1; i < p_mngr->vf_count; i++) {
    620			p_blk = &p_cli->vf_blks[CDUT_SEG_BLK(0)];
    621			qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
    622					     ILT_CLI_CDUT);
    623
    624			p_blk = &p_cli->vf_blks[CDUT_FL_SEG_BLK(0, VF)];
    625			qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
    626					     ILT_CLI_CDUT);
    627		}
    628	}
    629
    630	/* QM */
    631	p_cli = qed_cxt_set_cli(&p_mngr->clients[ILT_CLI_QM]);
    632	p_blk = qed_cxt_set_blk(&p_cli->pf_blks[0]);
    633
    634	qed_cxt_qm_iids(p_hwfn, &qm_iids);
    635	total = qed_qm_pf_mem_size(qm_iids.cids,
    636				   qm_iids.vf_cids, qm_iids.tids,
    637				   p_hwfn->qm_info.num_pqs,
    638				   p_hwfn->qm_info.num_vf_pqs);
    639
    640	DP_VERBOSE(p_hwfn,
    641		   QED_MSG_ILT,
    642		   "QM ILT Info, (cids=%d, vf_cids=%d, tids=%d, num_pqs=%d, num_vf_pqs=%d, memory_size=%d)\n",
    643		   qm_iids.cids,
    644		   qm_iids.vf_cids,
    645		   qm_iids.tids,
    646		   p_hwfn->qm_info.num_pqs, p_hwfn->qm_info.num_vf_pqs, total);
    647
    648	qed_ilt_cli_blk_fill(p_cli, p_blk,
    649			     curr_line, total * 0x1000,
    650			     QM_PQ_ELEMENT_SIZE);
    651
    652	qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_QM);
    653	p_cli->pf_total_lines = curr_line - p_blk->start_line;
    654
    655	/* SRC */
    656	p_cli = qed_cxt_set_cli(&p_mngr->clients[ILT_CLI_SRC]);
    657	qed_cxt_src_iids(p_mngr, &src_iids);
    658
    659	/* Both the PF and VFs searcher connections are stored in the per PF
    660	 * database. Thus sum the PF searcher cids and all the VFs searcher
    661	 * cids.
    662	 */
    663	total = src_iids.pf_cids + src_iids.per_vf_cids * p_mngr->vf_count;
    664	if (total) {
    665		u32 local_max = max_t(u32, total,
    666				      SRC_MIN_NUM_ELEMS);
    667
    668		total = roundup_pow_of_two(local_max);
    669
    670		p_blk = qed_cxt_set_blk(&p_cli->pf_blks[0]);
    671		qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
    672				     total * sizeof(struct src_ent),
    673				     sizeof(struct src_ent));
    674
    675		qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
    676				     ILT_CLI_SRC);
    677		p_cli->pf_total_lines = curr_line - p_blk->start_line;
    678	}
    679
    680	/* TM PF */
    681	p_cli = qed_cxt_set_cli(&p_mngr->clients[ILT_CLI_TM]);
    682	qed_cxt_tm_iids(p_hwfn, p_mngr, &tm_iids);
    683	total = tm_iids.pf_cids + tm_iids.pf_tids_total;
    684	if (total) {
    685		p_blk = qed_cxt_set_blk(&p_cli->pf_blks[0]);
    686		qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
    687				     total * TM_ELEM_SIZE, TM_ELEM_SIZE);
    688
    689		qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
    690				     ILT_CLI_TM);
    691		p_cli->pf_total_lines = curr_line - p_blk->start_line;
    692	}
    693
    694	/* TM VF */
    695	total = tm_iids.per_vf_cids + tm_iids.per_vf_tids;
    696	if (total) {
    697		p_blk = qed_cxt_set_blk(&p_cli->vf_blks[0]);
    698		qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
    699				     total * TM_ELEM_SIZE, TM_ELEM_SIZE);
    700
    701		qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
    702				     ILT_CLI_TM);
    703
    704		p_cli->vf_total_lines = curr_line - p_blk->start_line;
    705		for (i = 1; i < p_mngr->vf_count; i++)
    706			qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
    707					     ILT_CLI_TM);
    708	}
    709
    710	/* TSDM (SRQ CONTEXT) */
    711	total = qed_cxt_get_total_srq_count(p_hwfn);
    712
    713	if (total) {
    714		p_cli = qed_cxt_set_cli(&p_mngr->clients[ILT_CLI_TSDM]);
    715		p_blk = qed_cxt_set_blk(&p_cli->pf_blks[SRQ_BLK]);
    716		qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
    717				     total * SRQ_CXT_SIZE, SRQ_CXT_SIZE);
    718
    719		qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
    720				     ILT_CLI_TSDM);
    721		p_cli->pf_total_lines = curr_line - p_blk->start_line;
    722	}
    723
    724	*line_count = curr_line - p_hwfn->p_cxt_mngr->pf_start_line;
    725
    726	if (curr_line - p_hwfn->p_cxt_mngr->pf_start_line >
    727	    RESC_NUM(p_hwfn, QED_ILT))
    728		return -EINVAL;
    729
    730	return 0;
    731}
    732
    733u32 qed_cxt_cfg_ilt_compute_excess(struct qed_hwfn *p_hwfn, u32 used_lines)
    734{
    735	struct qed_ilt_client_cfg *p_cli;
    736	u32 excess_lines, available_lines;
    737	struct qed_cxt_mngr *p_mngr;
    738	u32 ilt_page_size, elem_size;
    739	struct qed_tid_seg *p_seg;
    740	int i;
    741
    742	available_lines = RESC_NUM(p_hwfn, QED_ILT);
    743	excess_lines = used_lines - available_lines;
    744
    745	if (!excess_lines)
    746		return 0;
    747
    748	if (!QED_IS_RDMA_PERSONALITY(p_hwfn))
    749		return 0;
    750
    751	p_mngr = p_hwfn->p_cxt_mngr;
    752	p_cli = &p_mngr->clients[ILT_CLI_CDUT];
    753	ilt_page_size = ILT_PAGE_IN_BYTES(p_cli->p_size.val);
    754
    755	for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) {
    756		p_seg = qed_cxt_tid_seg_info(p_hwfn, i);
    757		if (!p_seg || p_seg->count == 0)
    758			continue;
    759
    760		elem_size = p_mngr->task_type_size[p_seg->type];
    761		if (!elem_size)
    762			continue;
    763
    764		return (ilt_page_size / elem_size) * excess_lines;
    765	}
    766
    767	DP_NOTICE(p_hwfn, "failed computing excess ILT lines\n");
    768	return 0;
    769}
    770
    771static void qed_cxt_src_t2_free(struct qed_hwfn *p_hwfn)
    772{
    773	struct qed_src_t2 *p_t2 = &p_hwfn->p_cxt_mngr->src_t2;
    774	u32 i;
    775
    776	if (!p_t2 || !p_t2->dma_mem)
    777		return;
    778
    779	for (i = 0; i < p_t2->num_pages; i++)
    780		if (p_t2->dma_mem[i].virt_addr)
    781			dma_free_coherent(&p_hwfn->cdev->pdev->dev,
    782					  p_t2->dma_mem[i].size,
    783					  p_t2->dma_mem[i].virt_addr,
    784					  p_t2->dma_mem[i].phys_addr);
    785
    786	kfree(p_t2->dma_mem);
    787	p_t2->dma_mem = NULL;
    788}
    789
    790static int
    791qed_cxt_t2_alloc_pages(struct qed_hwfn *p_hwfn,
    792		       struct qed_src_t2 *p_t2, u32 total_size, u32 page_size)
    793{
    794	void **p_virt;
    795	u32 size, i;
    796
    797	if (!p_t2 || !p_t2->dma_mem)
    798		return -EINVAL;
    799
    800	for (i = 0; i < p_t2->num_pages; i++) {
    801		size = min_t(u32, total_size, page_size);
    802		p_virt = &p_t2->dma_mem[i].virt_addr;
    803
    804		*p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
    805					     size,
    806					     &p_t2->dma_mem[i].phys_addr,
    807					     GFP_KERNEL);
    808		if (!p_t2->dma_mem[i].virt_addr)
    809			return -ENOMEM;
    810
    811		memset(*p_virt, 0, size);
    812		p_t2->dma_mem[i].size = size;
    813		total_size -= size;
    814	}
    815
    816	return 0;
    817}
    818
    819static int qed_cxt_src_t2_alloc(struct qed_hwfn *p_hwfn)
    820{
    821	struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
    822	u32 conn_num, total_size, ent_per_page, psz, i;
    823	struct phys_mem_desc *p_t2_last_page;
    824	struct qed_ilt_client_cfg *p_src;
    825	struct qed_src_iids src_iids;
    826	struct qed_src_t2 *p_t2;
    827	int rc;
    828
    829	memset(&src_iids, 0, sizeof(src_iids));
    830
    831	/* if the SRC ILT client is inactive - there are no connection
    832	 * requiring the searcer, leave.
    833	 */
    834	p_src = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_SRC];
    835	if (!p_src->active)
    836		return 0;
    837
    838	qed_cxt_src_iids(p_mngr, &src_iids);
    839	conn_num = src_iids.pf_cids + src_iids.per_vf_cids * p_mngr->vf_count;
    840	total_size = conn_num * sizeof(struct src_ent);
    841
    842	/* use the same page size as the SRC ILT client */
    843	psz = ILT_PAGE_IN_BYTES(p_src->p_size.val);
    844	p_t2 = &p_mngr->src_t2;
    845	p_t2->num_pages = DIV_ROUND_UP(total_size, psz);
    846
    847	/* allocate t2 */
    848	p_t2->dma_mem = kcalloc(p_t2->num_pages, sizeof(struct phys_mem_desc),
    849				GFP_KERNEL);
    850	if (!p_t2->dma_mem) {
    851		DP_NOTICE(p_hwfn, "Failed to allocate t2 table\n");
    852		rc = -ENOMEM;
    853		goto t2_fail;
    854	}
    855
    856	rc = qed_cxt_t2_alloc_pages(p_hwfn, p_t2, total_size, psz);
    857	if (rc)
    858		goto t2_fail;
    859
    860	/* Set the t2 pointers */
    861
    862	/* entries per page - must be a power of two */
    863	ent_per_page = psz / sizeof(struct src_ent);
    864
    865	p_t2->first_free = (u64)p_t2->dma_mem[0].phys_addr;
    866
    867	p_t2_last_page = &p_t2->dma_mem[(conn_num - 1) / ent_per_page];
    868	p_t2->last_free = (u64)p_t2_last_page->phys_addr +
    869	    ((conn_num - 1) & (ent_per_page - 1)) * sizeof(struct src_ent);
    870
    871	for (i = 0; i < p_t2->num_pages; i++) {
    872		u32 ent_num = min_t(u32,
    873				    ent_per_page,
    874				    conn_num);
    875		struct src_ent *entries = p_t2->dma_mem[i].virt_addr;
    876		u64 p_ent_phys = (u64)p_t2->dma_mem[i].phys_addr, val;
    877		u32 j;
    878
    879		for (j = 0; j < ent_num - 1; j++) {
    880			val = p_ent_phys + (j + 1) * sizeof(struct src_ent);
    881			entries[j].next = cpu_to_be64(val);
    882		}
    883
    884		if (i < p_t2->num_pages - 1)
    885			val = (u64)p_t2->dma_mem[i + 1].phys_addr;
    886		else
    887			val = 0;
    888		entries[j].next = cpu_to_be64(val);
    889
    890		conn_num -= ent_num;
    891	}
    892
    893	return 0;
    894
    895t2_fail:
    896	qed_cxt_src_t2_free(p_hwfn);
    897	return rc;
    898}
    899
    900#define for_each_ilt_valid_client(pos, clients)	\
    901	for (pos = 0; pos < MAX_ILT_CLIENTS; pos++)	\
    902		if (!clients[pos].active) {	\
    903			continue;		\
    904		} else				\
    905
    906/* Total number of ILT lines used by this PF */
    907static u32 qed_cxt_ilt_shadow_size(struct qed_ilt_client_cfg *ilt_clients)
    908{
    909	u32 size = 0;
    910	u32 i;
    911
    912	for_each_ilt_valid_client(i, ilt_clients)
    913	    size += (ilt_clients[i].last.val - ilt_clients[i].first.val + 1);
    914
    915	return size;
    916}
    917
    918static void qed_ilt_shadow_free(struct qed_hwfn *p_hwfn)
    919{
    920	struct qed_ilt_client_cfg *p_cli = p_hwfn->p_cxt_mngr->clients;
    921	struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
    922	u32 ilt_size, i;
    923
    924	ilt_size = qed_cxt_ilt_shadow_size(p_cli);
    925
    926	for (i = 0; p_mngr->ilt_shadow && i < ilt_size; i++) {
    927		struct phys_mem_desc *p_dma = &p_mngr->ilt_shadow[i];
    928
    929		if (p_dma->virt_addr)
    930			dma_free_coherent(&p_hwfn->cdev->pdev->dev,
    931					  p_dma->size, p_dma->virt_addr,
    932					  p_dma->phys_addr);
    933		p_dma->virt_addr = NULL;
    934	}
    935	kfree(p_mngr->ilt_shadow);
    936}
    937
    938static int qed_ilt_blk_alloc(struct qed_hwfn *p_hwfn,
    939			     struct qed_ilt_cli_blk *p_blk,
    940			     enum ilt_clients ilt_client,
    941			     u32 start_line_offset)
    942{
    943	struct phys_mem_desc *ilt_shadow = p_hwfn->p_cxt_mngr->ilt_shadow;
    944	u32 lines, line, sz_left, lines_to_skip = 0;
    945
    946	/* Special handling for RoCE that supports dynamic allocation */
    947	if (QED_IS_RDMA_PERSONALITY(p_hwfn) &&
    948	    ((ilt_client == ILT_CLI_CDUT) || ilt_client == ILT_CLI_TSDM))
    949		return 0;
    950
    951	lines_to_skip = p_blk->dynamic_line_cnt;
    952
    953	if (!p_blk->total_size)
    954		return 0;
    955
    956	sz_left = p_blk->total_size;
    957	lines = DIV_ROUND_UP(sz_left, p_blk->real_size_in_page) - lines_to_skip;
    958	line = p_blk->start_line + start_line_offset -
    959	    p_hwfn->p_cxt_mngr->pf_start_line + lines_to_skip;
    960
    961	for (; lines; lines--) {
    962		dma_addr_t p_phys;
    963		void *p_virt;
    964		u32 size;
    965
    966		size = min_t(u32, sz_left, p_blk->real_size_in_page);
    967		p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, size,
    968					    &p_phys, GFP_KERNEL);
    969		if (!p_virt)
    970			return -ENOMEM;
    971
    972		ilt_shadow[line].phys_addr = p_phys;
    973		ilt_shadow[line].virt_addr = p_virt;
    974		ilt_shadow[line].size = size;
    975
    976		DP_VERBOSE(p_hwfn, QED_MSG_ILT,
    977			   "ILT shadow: Line [%d] Physical 0x%llx Virtual %p Size %d\n",
    978			    line, (u64)p_phys, p_virt, size);
    979
    980		sz_left -= size;
    981		line++;
    982	}
    983
    984	return 0;
    985}
    986
    987static int qed_ilt_shadow_alloc(struct qed_hwfn *p_hwfn)
    988{
    989	struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
    990	struct qed_ilt_client_cfg *clients = p_mngr->clients;
    991	struct qed_ilt_cli_blk *p_blk;
    992	u32 size, i, j, k;
    993	int rc;
    994
    995	size = qed_cxt_ilt_shadow_size(clients);
    996	p_mngr->ilt_shadow = kcalloc(size, sizeof(struct phys_mem_desc),
    997				     GFP_KERNEL);
    998	if (!p_mngr->ilt_shadow) {
    999		rc = -ENOMEM;
   1000		goto ilt_shadow_fail;
   1001	}
   1002
   1003	DP_VERBOSE(p_hwfn, QED_MSG_ILT,
   1004		   "Allocated 0x%x bytes for ilt shadow\n",
   1005		   (u32)(size * sizeof(struct phys_mem_desc)));
   1006
   1007	for_each_ilt_valid_client(i, clients) {
   1008		for (j = 0; j < ILT_CLI_PF_BLOCKS; j++) {
   1009			p_blk = &clients[i].pf_blks[j];
   1010			rc = qed_ilt_blk_alloc(p_hwfn, p_blk, i, 0);
   1011			if (rc)
   1012				goto ilt_shadow_fail;
   1013		}
   1014		for (k = 0; k < p_mngr->vf_count; k++) {
   1015			for (j = 0; j < ILT_CLI_VF_BLOCKS; j++) {
   1016				u32 lines = clients[i].vf_total_lines * k;
   1017
   1018				p_blk = &clients[i].vf_blks[j];
   1019				rc = qed_ilt_blk_alloc(p_hwfn, p_blk, i, lines);
   1020				if (rc)
   1021					goto ilt_shadow_fail;
   1022			}
   1023		}
   1024	}
   1025
   1026	return 0;
   1027
   1028ilt_shadow_fail:
   1029	qed_ilt_shadow_free(p_hwfn);
   1030	return rc;
   1031}
   1032
   1033static void qed_cid_map_free(struct qed_hwfn *p_hwfn)
   1034{
   1035	struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
   1036	u32 type, vf;
   1037
   1038	for (type = 0; type < MAX_CONN_TYPES; type++) {
   1039		bitmap_free(p_mngr->acquired[type].cid_map);
   1040		p_mngr->acquired[type].max_count = 0;
   1041		p_mngr->acquired[type].start_cid = 0;
   1042
   1043		for (vf = 0; vf < MAX_NUM_VFS; vf++) {
   1044			bitmap_free(p_mngr->acquired_vf[type][vf].cid_map);
   1045			p_mngr->acquired_vf[type][vf].max_count = 0;
   1046			p_mngr->acquired_vf[type][vf].start_cid = 0;
   1047		}
   1048	}
   1049}
   1050
   1051static int
   1052qed_cid_map_alloc_single(struct qed_hwfn *p_hwfn,
   1053			 u32 type,
   1054			 u32 cid_start,
   1055			 u32 cid_count, struct qed_cid_acquired_map *p_map)
   1056{
   1057	if (!cid_count)
   1058		return 0;
   1059
   1060	p_map->cid_map = bitmap_zalloc(cid_count, GFP_KERNEL);
   1061	if (!p_map->cid_map)
   1062		return -ENOMEM;
   1063
   1064	p_map->max_count = cid_count;
   1065	p_map->start_cid = cid_start;
   1066
   1067	DP_VERBOSE(p_hwfn, QED_MSG_CXT,
   1068		   "Type %08x start: %08x count %08x\n",
   1069		   type, p_map->start_cid, p_map->max_count);
   1070
   1071	return 0;
   1072}
   1073
   1074static int qed_cid_map_alloc(struct qed_hwfn *p_hwfn)
   1075{
   1076	struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
   1077	u32 start_cid = 0, vf_start_cid = 0;
   1078	u32 type, vf;
   1079
   1080	for (type = 0; type < MAX_CONN_TYPES; type++) {
   1081		struct qed_conn_type_cfg *p_cfg = &p_mngr->conn_cfg[type];
   1082		struct qed_cid_acquired_map *p_map;
   1083
   1084		/* Handle PF maps */
   1085		p_map = &p_mngr->acquired[type];
   1086		if (qed_cid_map_alloc_single(p_hwfn, type, start_cid,
   1087					     p_cfg->cid_count, p_map))
   1088			goto cid_map_fail;
   1089
   1090		/* Handle VF maps */
   1091		for (vf = 0; vf < MAX_NUM_VFS; vf++) {
   1092			p_map = &p_mngr->acquired_vf[type][vf];
   1093			if (qed_cid_map_alloc_single(p_hwfn, type,
   1094						     vf_start_cid,
   1095						     p_cfg->cids_per_vf, p_map))
   1096				goto cid_map_fail;
   1097		}
   1098
   1099		start_cid += p_cfg->cid_count;
   1100		vf_start_cid += p_cfg->cids_per_vf;
   1101	}
   1102
   1103	return 0;
   1104
   1105cid_map_fail:
   1106	qed_cid_map_free(p_hwfn);
   1107	return -ENOMEM;
   1108}
   1109
   1110int qed_cxt_mngr_alloc(struct qed_hwfn *p_hwfn)
   1111{
   1112	struct qed_ilt_client_cfg *clients;
   1113	struct qed_cxt_mngr *p_mngr;
   1114	u32 i;
   1115
   1116	p_mngr = kzalloc(sizeof(*p_mngr), GFP_KERNEL);
   1117	if (!p_mngr)
   1118		return -ENOMEM;
   1119
   1120	/* Initialize ILT client registers */
   1121	clients = p_mngr->clients;
   1122	clients[ILT_CLI_CDUC].first.reg = ILT_CFG_REG(CDUC, FIRST_ILT);
   1123	clients[ILT_CLI_CDUC].last.reg = ILT_CFG_REG(CDUC, LAST_ILT);
   1124	clients[ILT_CLI_CDUC].p_size.reg = ILT_CFG_REG(CDUC, P_SIZE);
   1125
   1126	clients[ILT_CLI_QM].first.reg = ILT_CFG_REG(QM, FIRST_ILT);
   1127	clients[ILT_CLI_QM].last.reg = ILT_CFG_REG(QM, LAST_ILT);
   1128	clients[ILT_CLI_QM].p_size.reg = ILT_CFG_REG(QM, P_SIZE);
   1129
   1130	clients[ILT_CLI_TM].first.reg = ILT_CFG_REG(TM, FIRST_ILT);
   1131	clients[ILT_CLI_TM].last.reg = ILT_CFG_REG(TM, LAST_ILT);
   1132	clients[ILT_CLI_TM].p_size.reg = ILT_CFG_REG(TM, P_SIZE);
   1133
   1134	clients[ILT_CLI_SRC].first.reg = ILT_CFG_REG(SRC, FIRST_ILT);
   1135	clients[ILT_CLI_SRC].last.reg = ILT_CFG_REG(SRC, LAST_ILT);
   1136	clients[ILT_CLI_SRC].p_size.reg = ILT_CFG_REG(SRC, P_SIZE);
   1137
   1138	clients[ILT_CLI_CDUT].first.reg = ILT_CFG_REG(CDUT, FIRST_ILT);
   1139	clients[ILT_CLI_CDUT].last.reg = ILT_CFG_REG(CDUT, LAST_ILT);
   1140	clients[ILT_CLI_CDUT].p_size.reg = ILT_CFG_REG(CDUT, P_SIZE);
   1141
   1142	clients[ILT_CLI_TSDM].first.reg = ILT_CFG_REG(TSDM, FIRST_ILT);
   1143	clients[ILT_CLI_TSDM].last.reg = ILT_CFG_REG(TSDM, LAST_ILT);
   1144	clients[ILT_CLI_TSDM].p_size.reg = ILT_CFG_REG(TSDM, P_SIZE);
   1145	/* default ILT page size for all clients is 64K */
   1146	for (i = 0; i < MAX_ILT_CLIENTS; i++)
   1147		p_mngr->clients[i].p_size.val = ILT_DEFAULT_HW_P_SIZE;
   1148
   1149	p_mngr->conn_ctx_size = CONN_CXT_SIZE(p_hwfn);
   1150
   1151	/* Initialize task sizes */
   1152	p_mngr->task_type_size[0] = TYPE0_TASK_CXT_SIZE(p_hwfn);
   1153	p_mngr->task_type_size[1] = TYPE1_TASK_CXT_SIZE(p_hwfn);
   1154
   1155	if (p_hwfn->cdev->p_iov_info) {
   1156		p_mngr->vf_count = p_hwfn->cdev->p_iov_info->total_vfs;
   1157		p_mngr->first_vf_in_pf =
   1158			p_hwfn->cdev->p_iov_info->first_vf_in_pf;
   1159	}
   1160	/* Initialize the dynamic ILT allocation mutex */
   1161	mutex_init(&p_mngr->mutex);
   1162
   1163	/* Set the cxt mangr pointer priori to further allocations */
   1164	p_hwfn->p_cxt_mngr = p_mngr;
   1165
   1166	return 0;
   1167}
   1168
   1169int qed_cxt_tables_alloc(struct qed_hwfn *p_hwfn)
   1170{
   1171	int rc;
   1172
   1173	/* Allocate the ILT shadow table */
   1174	rc = qed_ilt_shadow_alloc(p_hwfn);
   1175	if (rc)
   1176		goto tables_alloc_fail;
   1177
   1178	/* Allocate the T2  table */
   1179	rc = qed_cxt_src_t2_alloc(p_hwfn);
   1180	if (rc)
   1181		goto tables_alloc_fail;
   1182
   1183	/* Allocate and initialize the acquired cids bitmaps */
   1184	rc = qed_cid_map_alloc(p_hwfn);
   1185	if (rc)
   1186		goto tables_alloc_fail;
   1187
   1188	return 0;
   1189
   1190tables_alloc_fail:
   1191	qed_cxt_mngr_free(p_hwfn);
   1192	return rc;
   1193}
   1194
   1195void qed_cxt_mngr_free(struct qed_hwfn *p_hwfn)
   1196{
   1197	if (!p_hwfn->p_cxt_mngr)
   1198		return;
   1199
   1200	qed_cid_map_free(p_hwfn);
   1201	qed_cxt_src_t2_free(p_hwfn);
   1202	qed_ilt_shadow_free(p_hwfn);
   1203	kfree(p_hwfn->p_cxt_mngr);
   1204
   1205	p_hwfn->p_cxt_mngr = NULL;
   1206}
   1207
   1208void qed_cxt_mngr_setup(struct qed_hwfn *p_hwfn)
   1209{
   1210	struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
   1211	struct qed_cid_acquired_map *p_map;
   1212	struct qed_conn_type_cfg *p_cfg;
   1213	int type;
   1214
   1215	/* Reset acquired cids */
   1216	for (type = 0; type < MAX_CONN_TYPES; type++) {
   1217		u32 vf;
   1218
   1219		p_cfg = &p_mngr->conn_cfg[type];
   1220		if (p_cfg->cid_count) {
   1221			p_map = &p_mngr->acquired[type];
   1222			bitmap_zero(p_map->cid_map, p_map->max_count);
   1223		}
   1224
   1225		if (!p_cfg->cids_per_vf)
   1226			continue;
   1227
   1228		for (vf = 0; vf < MAX_NUM_VFS; vf++) {
   1229			p_map = &p_mngr->acquired_vf[type][vf];
   1230			bitmap_zero(p_map->cid_map, p_map->max_count);
   1231		}
   1232	}
   1233}
   1234
   1235/* CDU Common */
   1236#define CDUC_CXT_SIZE_SHIFT \
   1237	CDU_REG_CID_ADDR_PARAMS_CONTEXT_SIZE_SHIFT
   1238
   1239#define CDUC_CXT_SIZE_MASK \
   1240	(CDU_REG_CID_ADDR_PARAMS_CONTEXT_SIZE >> CDUC_CXT_SIZE_SHIFT)
   1241
   1242#define CDUC_BLOCK_WASTE_SHIFT \
   1243	CDU_REG_CID_ADDR_PARAMS_BLOCK_WASTE_SHIFT
   1244
   1245#define CDUC_BLOCK_WASTE_MASK \
   1246	(CDU_REG_CID_ADDR_PARAMS_BLOCK_WASTE >> CDUC_BLOCK_WASTE_SHIFT)
   1247
   1248#define CDUC_NCIB_SHIFT	\
   1249	CDU_REG_CID_ADDR_PARAMS_NCIB_SHIFT
   1250
   1251#define CDUC_NCIB_MASK \
   1252	(CDU_REG_CID_ADDR_PARAMS_NCIB >> CDUC_NCIB_SHIFT)
   1253
   1254#define CDUT_TYPE0_CXT_SIZE_SHIFT \
   1255	CDU_REG_SEGMENT0_PARAMS_T0_TID_SIZE_SHIFT
   1256
   1257#define CDUT_TYPE0_CXT_SIZE_MASK		\
   1258	(CDU_REG_SEGMENT0_PARAMS_T0_TID_SIZE >>	\
   1259	 CDUT_TYPE0_CXT_SIZE_SHIFT)
   1260
   1261#define CDUT_TYPE0_BLOCK_WASTE_SHIFT \
   1262	CDU_REG_SEGMENT0_PARAMS_T0_TID_BLOCK_WASTE_SHIFT
   1263
   1264#define CDUT_TYPE0_BLOCK_WASTE_MASK		       \
   1265	(CDU_REG_SEGMENT0_PARAMS_T0_TID_BLOCK_WASTE >> \
   1266	 CDUT_TYPE0_BLOCK_WASTE_SHIFT)
   1267
   1268#define CDUT_TYPE0_NCIB_SHIFT \
   1269	CDU_REG_SEGMENT0_PARAMS_T0_NUM_TIDS_IN_BLOCK_SHIFT
   1270
   1271#define CDUT_TYPE0_NCIB_MASK				 \
   1272	(CDU_REG_SEGMENT0_PARAMS_T0_NUM_TIDS_IN_BLOCK >> \
   1273	 CDUT_TYPE0_NCIB_SHIFT)
   1274
   1275#define CDUT_TYPE1_CXT_SIZE_SHIFT \
   1276	CDU_REG_SEGMENT1_PARAMS_T1_TID_SIZE_SHIFT
   1277
   1278#define CDUT_TYPE1_CXT_SIZE_MASK		\
   1279	(CDU_REG_SEGMENT1_PARAMS_T1_TID_SIZE >>	\
   1280	 CDUT_TYPE1_CXT_SIZE_SHIFT)
   1281
   1282#define CDUT_TYPE1_BLOCK_WASTE_SHIFT \
   1283	CDU_REG_SEGMENT1_PARAMS_T1_TID_BLOCK_WASTE_SHIFT
   1284
   1285#define CDUT_TYPE1_BLOCK_WASTE_MASK		       \
   1286	(CDU_REG_SEGMENT1_PARAMS_T1_TID_BLOCK_WASTE >> \
   1287	 CDUT_TYPE1_BLOCK_WASTE_SHIFT)
   1288
   1289#define CDUT_TYPE1_NCIB_SHIFT \
   1290	CDU_REG_SEGMENT1_PARAMS_T1_NUM_TIDS_IN_BLOCK_SHIFT
   1291
   1292#define CDUT_TYPE1_NCIB_MASK				 \
   1293	(CDU_REG_SEGMENT1_PARAMS_T1_NUM_TIDS_IN_BLOCK >> \
   1294	 CDUT_TYPE1_NCIB_SHIFT)
   1295
   1296static void qed_cdu_init_common(struct qed_hwfn *p_hwfn)
   1297{
   1298	u32 page_sz, elems_per_page, block_waste, cxt_size, cdu_params = 0;
   1299
   1300	/* CDUC - connection configuration */
   1301	page_sz = p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC].p_size.val;
   1302	cxt_size = CONN_CXT_SIZE(p_hwfn);
   1303	elems_per_page = ILT_PAGE_IN_BYTES(page_sz) / cxt_size;
   1304	block_waste = ILT_PAGE_IN_BYTES(page_sz) - elems_per_page * cxt_size;
   1305
   1306	SET_FIELD(cdu_params, CDUC_CXT_SIZE, cxt_size);
   1307	SET_FIELD(cdu_params, CDUC_BLOCK_WASTE, block_waste);
   1308	SET_FIELD(cdu_params, CDUC_NCIB, elems_per_page);
   1309	STORE_RT_REG(p_hwfn, CDU_REG_CID_ADDR_PARAMS_RT_OFFSET, cdu_params);
   1310
   1311	/* CDUT - type-0 tasks configuration */
   1312	page_sz = p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT].p_size.val;
   1313	cxt_size = p_hwfn->p_cxt_mngr->task_type_size[0];
   1314	elems_per_page = ILT_PAGE_IN_BYTES(page_sz) / cxt_size;
   1315	block_waste = ILT_PAGE_IN_BYTES(page_sz) - elems_per_page * cxt_size;
   1316
   1317	/* cxt size and block-waste are multipes of 8 */
   1318	cdu_params = 0;
   1319	SET_FIELD(cdu_params, CDUT_TYPE0_CXT_SIZE, (cxt_size >> 3));
   1320	SET_FIELD(cdu_params, CDUT_TYPE0_BLOCK_WASTE, (block_waste >> 3));
   1321	SET_FIELD(cdu_params, CDUT_TYPE0_NCIB, elems_per_page);
   1322	STORE_RT_REG(p_hwfn, CDU_REG_SEGMENT0_PARAMS_RT_OFFSET, cdu_params);
   1323
   1324	/* CDUT - type-1 tasks configuration */
   1325	cxt_size = p_hwfn->p_cxt_mngr->task_type_size[1];
   1326	elems_per_page = ILT_PAGE_IN_BYTES(page_sz) / cxt_size;
   1327	block_waste = ILT_PAGE_IN_BYTES(page_sz) - elems_per_page * cxt_size;
   1328
   1329	/* cxt size and block-waste are multipes of 8 */
   1330	cdu_params = 0;
   1331	SET_FIELD(cdu_params, CDUT_TYPE1_CXT_SIZE, (cxt_size >> 3));
   1332	SET_FIELD(cdu_params, CDUT_TYPE1_BLOCK_WASTE, (block_waste >> 3));
   1333	SET_FIELD(cdu_params, CDUT_TYPE1_NCIB, elems_per_page);
   1334	STORE_RT_REG(p_hwfn, CDU_REG_SEGMENT1_PARAMS_RT_OFFSET, cdu_params);
   1335}
   1336
   1337/* CDU PF */
   1338#define CDU_SEG_REG_TYPE_SHIFT          CDU_SEG_TYPE_OFFSET_REG_TYPE_SHIFT
   1339#define CDU_SEG_REG_TYPE_MASK           0x1
   1340#define CDU_SEG_REG_OFFSET_SHIFT        0
   1341#define CDU_SEG_REG_OFFSET_MASK         CDU_SEG_TYPE_OFFSET_REG_OFFSET_MASK
   1342
   1343static void qed_cdu_init_pf(struct qed_hwfn *p_hwfn)
   1344{
   1345	struct qed_ilt_client_cfg *p_cli;
   1346	struct qed_tid_seg *p_seg;
   1347	u32 cdu_seg_params, offset;
   1348	int i;
   1349
   1350	static const u32 rt_type_offset_arr[] = {
   1351		CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET,
   1352		CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET,
   1353		CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET,
   1354		CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET
   1355	};
   1356
   1357	static const u32 rt_type_offset_fl_arr[] = {
   1358		CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET,
   1359		CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET,
   1360		CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET,
   1361		CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET
   1362	};
   1363
   1364	p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT];
   1365
   1366	/* There are initializations only for CDUT during pf Phase */
   1367	for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) {
   1368		/* Segment 0 */
   1369		p_seg = qed_cxt_tid_seg_info(p_hwfn, i);
   1370		if (!p_seg)
   1371			continue;
   1372
   1373		/* Note: start_line is already adjusted for the CDU
   1374		 * segment register granularity, so we just need to
   1375		 * divide. Adjustment is implicit as we assume ILT
   1376		 * Page size is larger than 32K!
   1377		 */
   1378		offset = (ILT_PAGE_IN_BYTES(p_cli->p_size.val) *
   1379			  (p_cli->pf_blks[CDUT_SEG_BLK(i)].start_line -
   1380			   p_cli->first.val)) / CDUT_SEG_ALIGNMET_IN_BYTES;
   1381
   1382		cdu_seg_params = 0;
   1383		SET_FIELD(cdu_seg_params, CDU_SEG_REG_TYPE, p_seg->type);
   1384		SET_FIELD(cdu_seg_params, CDU_SEG_REG_OFFSET, offset);
   1385		STORE_RT_REG(p_hwfn, rt_type_offset_arr[i], cdu_seg_params);
   1386
   1387		offset = (ILT_PAGE_IN_BYTES(p_cli->p_size.val) *
   1388			  (p_cli->pf_blks[CDUT_FL_SEG_BLK(i, PF)].start_line -
   1389			   p_cli->first.val)) / CDUT_SEG_ALIGNMET_IN_BYTES;
   1390
   1391		cdu_seg_params = 0;
   1392		SET_FIELD(cdu_seg_params, CDU_SEG_REG_TYPE, p_seg->type);
   1393		SET_FIELD(cdu_seg_params, CDU_SEG_REG_OFFSET, offset);
   1394		STORE_RT_REG(p_hwfn, rt_type_offset_fl_arr[i], cdu_seg_params);
   1395	}
   1396}
   1397
   1398void qed_qm_init_pf(struct qed_hwfn *p_hwfn,
   1399		    struct qed_ptt *p_ptt, bool is_pf_loading)
   1400{
   1401	struct qed_qm_info *qm_info = &p_hwfn->qm_info;
   1402	struct qed_qm_pf_rt_init_params params;
   1403	struct qed_qm_iids iids;
   1404
   1405	memset(&iids, 0, sizeof(iids));
   1406	qed_cxt_qm_iids(p_hwfn, &iids);
   1407
   1408	memset(&params, 0, sizeof(params));
   1409	params.port_id = p_hwfn->port_id;
   1410	params.pf_id = p_hwfn->rel_pf_id;
   1411	params.max_phys_tcs_per_port = qm_info->max_phys_tcs_per_port;
   1412	params.is_pf_loading = is_pf_loading;
   1413	params.num_pf_cids = iids.cids;
   1414	params.num_vf_cids = iids.vf_cids;
   1415	params.num_tids = iids.tids;
   1416	params.start_pq = qm_info->start_pq;
   1417	params.num_pf_pqs = qm_info->num_pqs - qm_info->num_vf_pqs;
   1418	params.num_vf_pqs = qm_info->num_vf_pqs;
   1419	params.start_vport = qm_info->start_vport;
   1420	params.num_vports = qm_info->num_vports;
   1421	params.pf_wfq = qm_info->pf_wfq;
   1422	params.pf_rl = qm_info->pf_rl;
   1423	params.pq_params = qm_info->qm_pq_params;
   1424	params.vport_params = qm_info->qm_vport_params;
   1425
   1426	qed_qm_pf_rt_init(p_hwfn, p_ptt, &params);
   1427}
   1428
   1429/* CM PF */
   1430static void qed_cm_init_pf(struct qed_hwfn *p_hwfn)
   1431{
   1432	/* XCM pure-LB queue */
   1433	STORE_RT_REG(p_hwfn, XCM_REG_CON_PHY_Q3_RT_OFFSET,
   1434		     qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LB));
   1435}
   1436
   1437/* DQ PF */
   1438static void qed_dq_init_pf(struct qed_hwfn *p_hwfn)
   1439{
   1440	struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
   1441	u32 dq_pf_max_cid = 0, dq_vf_max_cid = 0;
   1442
   1443	dq_pf_max_cid += (p_mngr->conn_cfg[0].cid_count >> DQ_RANGE_SHIFT);
   1444	STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_0_RT_OFFSET, dq_pf_max_cid);
   1445
   1446	dq_vf_max_cid += (p_mngr->conn_cfg[0].cids_per_vf >> DQ_RANGE_SHIFT);
   1447	STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_0_RT_OFFSET, dq_vf_max_cid);
   1448
   1449	dq_pf_max_cid += (p_mngr->conn_cfg[1].cid_count >> DQ_RANGE_SHIFT);
   1450	STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_1_RT_OFFSET, dq_pf_max_cid);
   1451
   1452	dq_vf_max_cid += (p_mngr->conn_cfg[1].cids_per_vf >> DQ_RANGE_SHIFT);
   1453	STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_1_RT_OFFSET, dq_vf_max_cid);
   1454
   1455	dq_pf_max_cid += (p_mngr->conn_cfg[2].cid_count >> DQ_RANGE_SHIFT);
   1456	STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_2_RT_OFFSET, dq_pf_max_cid);
   1457
   1458	dq_vf_max_cid += (p_mngr->conn_cfg[2].cids_per_vf >> DQ_RANGE_SHIFT);
   1459	STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_2_RT_OFFSET, dq_vf_max_cid);
   1460
   1461	dq_pf_max_cid += (p_mngr->conn_cfg[3].cid_count >> DQ_RANGE_SHIFT);
   1462	STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_3_RT_OFFSET, dq_pf_max_cid);
   1463
   1464	dq_vf_max_cid += (p_mngr->conn_cfg[3].cids_per_vf >> DQ_RANGE_SHIFT);
   1465	STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_3_RT_OFFSET, dq_vf_max_cid);
   1466
   1467	dq_pf_max_cid += (p_mngr->conn_cfg[4].cid_count >> DQ_RANGE_SHIFT);
   1468	STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_4_RT_OFFSET, dq_pf_max_cid);
   1469
   1470	dq_vf_max_cid += (p_mngr->conn_cfg[4].cids_per_vf >> DQ_RANGE_SHIFT);
   1471	STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_4_RT_OFFSET, dq_vf_max_cid);
   1472
   1473	dq_pf_max_cid += (p_mngr->conn_cfg[5].cid_count >> DQ_RANGE_SHIFT);
   1474	STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_5_RT_OFFSET, dq_pf_max_cid);
   1475
   1476	dq_vf_max_cid += (p_mngr->conn_cfg[5].cids_per_vf >> DQ_RANGE_SHIFT);
   1477	STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_5_RT_OFFSET, dq_vf_max_cid);
   1478
   1479	/* Connection types 6 & 7 are not in use, yet they must be configured
   1480	 * as the highest possible connection. Not configuring them means the
   1481	 * defaults will be  used, and with a large number of cids a bug may
   1482	 * occur, if the defaults will be smaller than dq_pf_max_cid /
   1483	 * dq_vf_max_cid.
   1484	 */
   1485	STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_6_RT_OFFSET, dq_pf_max_cid);
   1486	STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_6_RT_OFFSET, dq_vf_max_cid);
   1487
   1488	STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_7_RT_OFFSET, dq_pf_max_cid);
   1489	STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_7_RT_OFFSET, dq_vf_max_cid);
   1490}
   1491
   1492static void qed_ilt_bounds_init(struct qed_hwfn *p_hwfn)
   1493{
   1494	struct qed_ilt_client_cfg *ilt_clients;
   1495	int i;
   1496
   1497	ilt_clients = p_hwfn->p_cxt_mngr->clients;
   1498	for_each_ilt_valid_client(i, ilt_clients) {
   1499		STORE_RT_REG(p_hwfn,
   1500			     ilt_clients[i].first.reg,
   1501			     ilt_clients[i].first.val);
   1502		STORE_RT_REG(p_hwfn,
   1503			     ilt_clients[i].last.reg, ilt_clients[i].last.val);
   1504		STORE_RT_REG(p_hwfn,
   1505			     ilt_clients[i].p_size.reg,
   1506			     ilt_clients[i].p_size.val);
   1507	}
   1508}
   1509
   1510static void qed_ilt_vf_bounds_init(struct qed_hwfn *p_hwfn)
   1511{
   1512	struct qed_ilt_client_cfg *p_cli;
   1513	u32 blk_factor;
   1514
   1515	/* For simplicty  we set the 'block' to be an ILT page */
   1516	if (p_hwfn->cdev->p_iov_info) {
   1517		struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info;
   1518
   1519		STORE_RT_REG(p_hwfn,
   1520			     PSWRQ2_REG_VF_BASE_RT_OFFSET,
   1521			     p_iov->first_vf_in_pf);
   1522		STORE_RT_REG(p_hwfn,
   1523			     PSWRQ2_REG_VF_LAST_ILT_RT_OFFSET,
   1524			     p_iov->first_vf_in_pf + p_iov->total_vfs);
   1525	}
   1526
   1527	p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC];
   1528	blk_factor = ilog2(ILT_PAGE_IN_BYTES(p_cli->p_size.val) >> 10);
   1529	if (p_cli->active) {
   1530		STORE_RT_REG(p_hwfn,
   1531			     PSWRQ2_REG_CDUC_BLOCKS_FACTOR_RT_OFFSET,
   1532			     blk_factor);
   1533		STORE_RT_REG(p_hwfn,
   1534			     PSWRQ2_REG_CDUC_NUMBER_OF_PF_BLOCKS_RT_OFFSET,
   1535			     p_cli->pf_total_lines);
   1536		STORE_RT_REG(p_hwfn,
   1537			     PSWRQ2_REG_CDUC_VF_BLOCKS_RT_OFFSET,
   1538			     p_cli->vf_total_lines);
   1539	}
   1540
   1541	p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT];
   1542	blk_factor = ilog2(ILT_PAGE_IN_BYTES(p_cli->p_size.val) >> 10);
   1543	if (p_cli->active) {
   1544		STORE_RT_REG(p_hwfn,
   1545			     PSWRQ2_REG_CDUT_BLOCKS_FACTOR_RT_OFFSET,
   1546			     blk_factor);
   1547		STORE_RT_REG(p_hwfn,
   1548			     PSWRQ2_REG_CDUT_NUMBER_OF_PF_BLOCKS_RT_OFFSET,
   1549			     p_cli->pf_total_lines);
   1550		STORE_RT_REG(p_hwfn,
   1551			     PSWRQ2_REG_CDUT_VF_BLOCKS_RT_OFFSET,
   1552			     p_cli->vf_total_lines);
   1553	}
   1554
   1555	p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_TM];
   1556	blk_factor = ilog2(ILT_PAGE_IN_BYTES(p_cli->p_size.val) >> 10);
   1557	if (p_cli->active) {
   1558		STORE_RT_REG(p_hwfn,
   1559			     PSWRQ2_REG_TM_BLOCKS_FACTOR_RT_OFFSET, blk_factor);
   1560		STORE_RT_REG(p_hwfn,
   1561			     PSWRQ2_REG_TM_NUMBER_OF_PF_BLOCKS_RT_OFFSET,
   1562			     p_cli->pf_total_lines);
   1563		STORE_RT_REG(p_hwfn,
   1564			     PSWRQ2_REG_TM_VF_BLOCKS_RT_OFFSET,
   1565			     p_cli->vf_total_lines);
   1566	}
   1567}
   1568
   1569/* ILT (PSWRQ2) PF */
   1570static void qed_ilt_init_pf(struct qed_hwfn *p_hwfn)
   1571{
   1572	struct qed_ilt_client_cfg *clients;
   1573	struct qed_cxt_mngr *p_mngr;
   1574	struct phys_mem_desc *p_shdw;
   1575	u32 line, rt_offst, i;
   1576
   1577	qed_ilt_bounds_init(p_hwfn);
   1578	qed_ilt_vf_bounds_init(p_hwfn);
   1579
   1580	p_mngr = p_hwfn->p_cxt_mngr;
   1581	p_shdw = p_mngr->ilt_shadow;
   1582	clients = p_hwfn->p_cxt_mngr->clients;
   1583
   1584	for_each_ilt_valid_client(i, clients) {
   1585		/** Client's 1st val and RT array are absolute, ILT shadows'
   1586		 *  lines are relative.
   1587		 */
   1588		line = clients[i].first.val - p_mngr->pf_start_line;
   1589		rt_offst = PSWRQ2_REG_ILT_MEMORY_RT_OFFSET +
   1590			   clients[i].first.val * ILT_ENTRY_IN_REGS;
   1591
   1592		for (; line <= clients[i].last.val - p_mngr->pf_start_line;
   1593		     line++, rt_offst += ILT_ENTRY_IN_REGS) {
   1594			u64 ilt_hw_entry = 0;
   1595
   1596			/** p_virt could be NULL incase of dynamic
   1597			 *  allocation
   1598			 */
   1599			if (p_shdw[line].virt_addr) {
   1600				SET_FIELD(ilt_hw_entry, ILT_ENTRY_VALID, 1ULL);
   1601				SET_FIELD(ilt_hw_entry, ILT_ENTRY_PHY_ADDR,
   1602					  (p_shdw[line].phys_addr >> 12));
   1603
   1604				DP_VERBOSE(p_hwfn, QED_MSG_ILT,
   1605					   "Setting RT[0x%08x] from ILT[0x%08x] [Client is %d] to Physical addr: 0x%llx\n",
   1606					   rt_offst, line, i,
   1607					   (u64)(p_shdw[line].phys_addr >> 12));
   1608			}
   1609
   1610			STORE_RT_REG_AGG(p_hwfn, rt_offst, ilt_hw_entry);
   1611		}
   1612	}
   1613}
   1614
   1615/* SRC (Searcher) PF */
   1616static void qed_src_init_pf(struct qed_hwfn *p_hwfn)
   1617{
   1618	struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
   1619	u32 rounded_conn_num, conn_num, conn_max;
   1620	struct qed_src_iids src_iids;
   1621
   1622	memset(&src_iids, 0, sizeof(src_iids));
   1623	qed_cxt_src_iids(p_mngr, &src_iids);
   1624	conn_num = src_iids.pf_cids + src_iids.per_vf_cids * p_mngr->vf_count;
   1625	if (!conn_num)
   1626		return;
   1627
   1628	conn_max = max_t(u32, conn_num, SRC_MIN_NUM_ELEMS);
   1629	rounded_conn_num = roundup_pow_of_two(conn_max);
   1630
   1631	STORE_RT_REG(p_hwfn, SRC_REG_COUNTFREE_RT_OFFSET, conn_num);
   1632	STORE_RT_REG(p_hwfn, SRC_REG_NUMBER_HASH_BITS_RT_OFFSET,
   1633		     ilog2(rounded_conn_num));
   1634
   1635	STORE_RT_REG_AGG(p_hwfn, SRC_REG_FIRSTFREE_RT_OFFSET,
   1636			 p_hwfn->p_cxt_mngr->src_t2.first_free);
   1637	STORE_RT_REG_AGG(p_hwfn, SRC_REG_LASTFREE_RT_OFFSET,
   1638			 p_hwfn->p_cxt_mngr->src_t2.last_free);
   1639}
   1640
   1641/* Timers PF */
   1642#define TM_CFG_NUM_IDS_SHIFT            0
   1643#define TM_CFG_NUM_IDS_MASK             0xFFFFULL
   1644#define TM_CFG_PRE_SCAN_OFFSET_SHIFT    16
   1645#define TM_CFG_PRE_SCAN_OFFSET_MASK     0x1FFULL
   1646#define TM_CFG_PARENT_PF_SHIFT          25
   1647#define TM_CFG_PARENT_PF_MASK           0x7ULL
   1648
   1649#define TM_CFG_CID_PRE_SCAN_ROWS_SHIFT  30
   1650#define TM_CFG_CID_PRE_SCAN_ROWS_MASK   0x1FFULL
   1651
   1652#define TM_CFG_TID_OFFSET_SHIFT         30
   1653#define TM_CFG_TID_OFFSET_MASK          0x7FFFFULL
   1654#define TM_CFG_TID_PRE_SCAN_ROWS_SHIFT  49
   1655#define TM_CFG_TID_PRE_SCAN_ROWS_MASK   0x1FFULL
   1656
   1657static void qed_tm_init_pf(struct qed_hwfn *p_hwfn)
   1658{
   1659	struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
   1660	u32 active_seg_mask = 0, tm_offset, rt_reg;
   1661	struct qed_tm_iids tm_iids;
   1662	u64 cfg_word;
   1663	u8 i;
   1664
   1665	memset(&tm_iids, 0, sizeof(tm_iids));
   1666	qed_cxt_tm_iids(p_hwfn, p_mngr, &tm_iids);
   1667
   1668	/* @@@TBD No pre-scan for now */
   1669
   1670	/* Note: We assume consecutive VFs for a PF */
   1671	for (i = 0; i < p_mngr->vf_count; i++) {
   1672		cfg_word = 0;
   1673		SET_FIELD(cfg_word, TM_CFG_NUM_IDS, tm_iids.per_vf_cids);
   1674		SET_FIELD(cfg_word, TM_CFG_PRE_SCAN_OFFSET, 0);
   1675		SET_FIELD(cfg_word, TM_CFG_PARENT_PF, p_hwfn->rel_pf_id);
   1676		SET_FIELD(cfg_word, TM_CFG_CID_PRE_SCAN_ROWS, 0);
   1677		rt_reg = TM_REG_CONFIG_CONN_MEM_RT_OFFSET +
   1678		    (sizeof(cfg_word) / sizeof(u32)) *
   1679		    (p_hwfn->cdev->p_iov_info->first_vf_in_pf + i);
   1680		STORE_RT_REG_AGG(p_hwfn, rt_reg, cfg_word);
   1681	}
   1682
   1683	cfg_word = 0;
   1684	SET_FIELD(cfg_word, TM_CFG_NUM_IDS, tm_iids.pf_cids);
   1685	SET_FIELD(cfg_word, TM_CFG_PRE_SCAN_OFFSET, 0);
   1686	SET_FIELD(cfg_word, TM_CFG_PARENT_PF, 0);	/* n/a for PF */
   1687	SET_FIELD(cfg_word, TM_CFG_CID_PRE_SCAN_ROWS, 0);	/* scan all   */
   1688
   1689	rt_reg = TM_REG_CONFIG_CONN_MEM_RT_OFFSET +
   1690	    (sizeof(cfg_word) / sizeof(u32)) *
   1691	    (NUM_OF_VFS(p_hwfn->cdev) + p_hwfn->rel_pf_id);
   1692	STORE_RT_REG_AGG(p_hwfn, rt_reg, cfg_word);
   1693
   1694	/* enale scan */
   1695	STORE_RT_REG(p_hwfn, TM_REG_PF_ENABLE_CONN_RT_OFFSET,
   1696		     tm_iids.pf_cids ? 0x1 : 0x0);
   1697
   1698	/* @@@TBD how to enable the scan for the VFs */
   1699
   1700	tm_offset = tm_iids.per_vf_cids;
   1701
   1702	/* Note: We assume consecutive VFs for a PF */
   1703	for (i = 0; i < p_mngr->vf_count; i++) {
   1704		cfg_word = 0;
   1705		SET_FIELD(cfg_word, TM_CFG_NUM_IDS, tm_iids.per_vf_tids);
   1706		SET_FIELD(cfg_word, TM_CFG_PRE_SCAN_OFFSET, 0);
   1707		SET_FIELD(cfg_word, TM_CFG_PARENT_PF, p_hwfn->rel_pf_id);
   1708		SET_FIELD(cfg_word, TM_CFG_TID_OFFSET, tm_offset);
   1709		SET_FIELD(cfg_word, TM_CFG_TID_PRE_SCAN_ROWS, (u64) 0);
   1710
   1711		rt_reg = TM_REG_CONFIG_TASK_MEM_RT_OFFSET +
   1712		    (sizeof(cfg_word) / sizeof(u32)) *
   1713		    (p_hwfn->cdev->p_iov_info->first_vf_in_pf + i);
   1714
   1715		STORE_RT_REG_AGG(p_hwfn, rt_reg, cfg_word);
   1716	}
   1717
   1718	tm_offset = tm_iids.pf_cids;
   1719	for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) {
   1720		cfg_word = 0;
   1721		SET_FIELD(cfg_word, TM_CFG_NUM_IDS, tm_iids.pf_tids[i]);
   1722		SET_FIELD(cfg_word, TM_CFG_PRE_SCAN_OFFSET, 0);
   1723		SET_FIELD(cfg_word, TM_CFG_PARENT_PF, 0);
   1724		SET_FIELD(cfg_word, TM_CFG_TID_OFFSET, tm_offset);
   1725		SET_FIELD(cfg_word, TM_CFG_TID_PRE_SCAN_ROWS, (u64) 0);
   1726
   1727		rt_reg = TM_REG_CONFIG_TASK_MEM_RT_OFFSET +
   1728		    (sizeof(cfg_word) / sizeof(u32)) *
   1729		    (NUM_OF_VFS(p_hwfn->cdev) +
   1730		     p_hwfn->rel_pf_id * NUM_TASK_PF_SEGMENTS + i);
   1731
   1732		STORE_RT_REG_AGG(p_hwfn, rt_reg, cfg_word);
   1733		active_seg_mask |= (tm_iids.pf_tids[i] ? BIT(i) : 0);
   1734
   1735		tm_offset += tm_iids.pf_tids[i];
   1736	}
   1737
   1738	if (QED_IS_RDMA_PERSONALITY(p_hwfn))
   1739		active_seg_mask = 0;
   1740
   1741	STORE_RT_REG(p_hwfn, TM_REG_PF_ENABLE_TASK_RT_OFFSET, active_seg_mask);
   1742
   1743	/* @@@TBD how to enable the scan for the VFs */
   1744}
   1745
   1746static void qed_prs_init_common(struct qed_hwfn *p_hwfn)
   1747{
   1748	if ((p_hwfn->hw_info.personality == QED_PCI_FCOE) &&
   1749	    p_hwfn->pf_params.fcoe_pf_params.is_target)
   1750		STORE_RT_REG(p_hwfn,
   1751			     PRS_REG_SEARCH_RESP_INITIATOR_TYPE_RT_OFFSET, 0);
   1752}
   1753
   1754static void qed_prs_init_pf(struct qed_hwfn *p_hwfn)
   1755{
   1756	struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
   1757	struct qed_conn_type_cfg *p_fcoe;
   1758	struct qed_tid_seg *p_tid;
   1759
   1760	p_fcoe = &p_mngr->conn_cfg[PROTOCOLID_FCOE];
   1761
   1762	/* If FCoE is active set the MAX OX_ID (tid) in the Parser */
   1763	if (!p_fcoe->cid_count)
   1764		return;
   1765
   1766	p_tid = &p_fcoe->tid_seg[QED_CXT_FCOE_TID_SEG];
   1767	if (p_hwfn->pf_params.fcoe_pf_params.is_target) {
   1768		STORE_RT_REG_AGG(p_hwfn,
   1769				 PRS_REG_TASK_ID_MAX_TARGET_PF_RT_OFFSET,
   1770				 p_tid->count);
   1771	} else {
   1772		STORE_RT_REG_AGG(p_hwfn,
   1773				 PRS_REG_TASK_ID_MAX_INITIATOR_PF_RT_OFFSET,
   1774				 p_tid->count);
   1775	}
   1776}
   1777
   1778void qed_cxt_hw_init_common(struct qed_hwfn *p_hwfn)
   1779{
   1780	qed_cdu_init_common(p_hwfn);
   1781	qed_prs_init_common(p_hwfn);
   1782}
   1783
   1784void qed_cxt_hw_init_pf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
   1785{
   1786	qed_qm_init_pf(p_hwfn, p_ptt, true);
   1787	qed_cm_init_pf(p_hwfn);
   1788	qed_dq_init_pf(p_hwfn);
   1789	qed_cdu_init_pf(p_hwfn);
   1790	qed_ilt_init_pf(p_hwfn);
   1791	qed_src_init_pf(p_hwfn);
   1792	qed_tm_init_pf(p_hwfn);
   1793	qed_prs_init_pf(p_hwfn);
   1794}
   1795
   1796int _qed_cxt_acquire_cid(struct qed_hwfn *p_hwfn,
   1797			 enum protocol_type type, u32 *p_cid, u8 vfid)
   1798{
   1799	struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
   1800	struct qed_cid_acquired_map *p_map;
   1801	u32 rel_cid;
   1802
   1803	if (type >= MAX_CONN_TYPES) {
   1804		DP_NOTICE(p_hwfn, "Invalid protocol type %d", type);
   1805		return -EINVAL;
   1806	}
   1807
   1808	if (vfid >= MAX_NUM_VFS && vfid != QED_CXT_PF_CID) {
   1809		DP_NOTICE(p_hwfn, "VF [%02x] is out of range\n", vfid);
   1810		return -EINVAL;
   1811	}
   1812
   1813	/* Determine the right map to take this CID from */
   1814	if (vfid == QED_CXT_PF_CID)
   1815		p_map = &p_mngr->acquired[type];
   1816	else
   1817		p_map = &p_mngr->acquired_vf[type][vfid];
   1818
   1819	if (!p_map->cid_map) {
   1820		DP_NOTICE(p_hwfn, "Invalid protocol type %d", type);
   1821		return -EINVAL;
   1822	}
   1823
   1824	rel_cid = find_first_zero_bit(p_map->cid_map, p_map->max_count);
   1825
   1826	if (rel_cid >= p_map->max_count) {
   1827		DP_NOTICE(p_hwfn, "no CID available for protocol %d\n", type);
   1828		return -EINVAL;
   1829	}
   1830
   1831	__set_bit(rel_cid, p_map->cid_map);
   1832
   1833	*p_cid = rel_cid + p_map->start_cid;
   1834
   1835	DP_VERBOSE(p_hwfn, QED_MSG_CXT,
   1836		   "Acquired cid 0x%08x [rel. %08x] vfid %02x type %d\n",
   1837		   *p_cid, rel_cid, vfid, type);
   1838
   1839	return 0;
   1840}
   1841
   1842int qed_cxt_acquire_cid(struct qed_hwfn *p_hwfn,
   1843			enum protocol_type type, u32 *p_cid)
   1844{
   1845	return _qed_cxt_acquire_cid(p_hwfn, type, p_cid, QED_CXT_PF_CID);
   1846}
   1847
   1848static bool qed_cxt_test_cid_acquired(struct qed_hwfn *p_hwfn,
   1849				      u32 cid,
   1850				      u8 vfid,
   1851				      enum protocol_type *p_type,
   1852				      struct qed_cid_acquired_map **pp_map)
   1853{
   1854	struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
   1855	u32 rel_cid;
   1856
   1857	/* Iterate over protocols and find matching cid range */
   1858	for (*p_type = 0; *p_type < MAX_CONN_TYPES; (*p_type)++) {
   1859		if (vfid == QED_CXT_PF_CID)
   1860			*pp_map = &p_mngr->acquired[*p_type];
   1861		else
   1862			*pp_map = &p_mngr->acquired_vf[*p_type][vfid];
   1863
   1864		if (!((*pp_map)->cid_map))
   1865			continue;
   1866		if (cid >= (*pp_map)->start_cid &&
   1867		    cid < (*pp_map)->start_cid + (*pp_map)->max_count)
   1868			break;
   1869	}
   1870
   1871	if (*p_type == MAX_CONN_TYPES) {
   1872		DP_NOTICE(p_hwfn, "Invalid CID %d vfid %02x", cid, vfid);
   1873		goto fail;
   1874	}
   1875
   1876	rel_cid = cid - (*pp_map)->start_cid;
   1877	if (!test_bit(rel_cid, (*pp_map)->cid_map)) {
   1878		DP_NOTICE(p_hwfn, "CID %d [vifd %02x] not acquired",
   1879			  cid, vfid);
   1880		goto fail;
   1881	}
   1882
   1883	return true;
   1884fail:
   1885	*p_type = MAX_CONN_TYPES;
   1886	*pp_map = NULL;
   1887	return false;
   1888}
   1889
   1890void _qed_cxt_release_cid(struct qed_hwfn *p_hwfn, u32 cid, u8 vfid)
   1891{
   1892	struct qed_cid_acquired_map *p_map = NULL;
   1893	enum protocol_type type;
   1894	bool b_acquired;
   1895	u32 rel_cid;
   1896
   1897	if (vfid != QED_CXT_PF_CID && vfid > MAX_NUM_VFS) {
   1898		DP_NOTICE(p_hwfn,
   1899			  "Trying to return incorrect CID belonging to VF %02x\n",
   1900			  vfid);
   1901		return;
   1902	}
   1903
   1904	/* Test acquired and find matching per-protocol map */
   1905	b_acquired = qed_cxt_test_cid_acquired(p_hwfn, cid, vfid,
   1906					       &type, &p_map);
   1907
   1908	if (!b_acquired)
   1909		return;
   1910
   1911	rel_cid = cid - p_map->start_cid;
   1912	clear_bit(rel_cid, p_map->cid_map);
   1913
   1914	DP_VERBOSE(p_hwfn, QED_MSG_CXT,
   1915		   "Released CID 0x%08x [rel. %08x] vfid %02x type %d\n",
   1916		   cid, rel_cid, vfid, type);
   1917}
   1918
   1919void qed_cxt_release_cid(struct qed_hwfn *p_hwfn, u32 cid)
   1920{
   1921	_qed_cxt_release_cid(p_hwfn, cid, QED_CXT_PF_CID);
   1922}
   1923
   1924int qed_cxt_get_cid_info(struct qed_hwfn *p_hwfn, struct qed_cxt_info *p_info)
   1925{
   1926	struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
   1927	struct qed_cid_acquired_map *p_map = NULL;
   1928	u32 conn_cxt_size, hw_p_size, cxts_per_p, line;
   1929	enum protocol_type type;
   1930	bool b_acquired;
   1931
   1932	/* Test acquired and find matching per-protocol map */
   1933	b_acquired = qed_cxt_test_cid_acquired(p_hwfn, p_info->iid,
   1934					       QED_CXT_PF_CID, &type, &p_map);
   1935
   1936	if (!b_acquired)
   1937		return -EINVAL;
   1938
   1939	/* set the protocl type */
   1940	p_info->type = type;
   1941
   1942	/* compute context virtual pointer */
   1943	hw_p_size = p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC].p_size.val;
   1944
   1945	conn_cxt_size = CONN_CXT_SIZE(p_hwfn);
   1946	cxts_per_p = ILT_PAGE_IN_BYTES(hw_p_size) / conn_cxt_size;
   1947	line = p_info->iid / cxts_per_p;
   1948
   1949	/* Make sure context is allocated (dynamic allocation) */
   1950	if (!p_mngr->ilt_shadow[line].virt_addr)
   1951		return -EINVAL;
   1952
   1953	p_info->p_cxt = p_mngr->ilt_shadow[line].virt_addr +
   1954			p_info->iid % cxts_per_p * conn_cxt_size;
   1955
   1956	DP_VERBOSE(p_hwfn, (QED_MSG_ILT | QED_MSG_CXT),
   1957		   "Accessing ILT shadow[%d]: CXT pointer is at %p (for iid %d)\n",
   1958		   p_info->iid / cxts_per_p, p_info->p_cxt, p_info->iid);
   1959
   1960	return 0;
   1961}
   1962
   1963static void qed_rdma_set_pf_params(struct qed_hwfn *p_hwfn,
   1964				   struct qed_rdma_pf_params *p_params,
   1965				   u32 num_tasks)
   1966{
   1967	u32 num_cons, num_qps;
   1968	enum protocol_type proto;
   1969
   1970	if (p_hwfn->mcp_info->func_info.protocol == QED_PCI_ETH_RDMA) {
   1971		DP_VERBOSE(p_hwfn, QED_MSG_SP,
   1972			   "Current day drivers don't support RoCE & iWARP simultaneously on the same PF. Default to RoCE-only\n");
   1973		p_hwfn->hw_info.personality = QED_PCI_ETH_ROCE;
   1974	}
   1975
   1976	switch (p_hwfn->hw_info.personality) {
   1977	case QED_PCI_ETH_IWARP:
   1978		/* Each QP requires one connection */
   1979		num_cons = min_t(u32, IWARP_MAX_QPS, p_params->num_qps);
   1980		proto = PROTOCOLID_IWARP;
   1981		break;
   1982	case QED_PCI_ETH_ROCE:
   1983		num_qps = min_t(u32, ROCE_MAX_QPS, p_params->num_qps);
   1984		num_cons = num_qps * 2;	/* each QP requires two connections */
   1985		proto = PROTOCOLID_ROCE;
   1986		break;
   1987	default:
   1988		return;
   1989	}
   1990
   1991	if (num_cons && num_tasks) {
   1992		u32 num_srqs, num_xrc_srqs;
   1993
   1994		qed_cxt_set_proto_cid_count(p_hwfn, proto, num_cons, 0);
   1995
   1996		/* Deliberatly passing ROCE for tasks id. This is because
   1997		 * iWARP / RoCE share the task id.
   1998		 */
   1999		qed_cxt_set_proto_tid_count(p_hwfn, PROTOCOLID_ROCE,
   2000					    QED_CXT_ROCE_TID_SEG, 1,
   2001					    num_tasks, false);
   2002
   2003		num_srqs = min_t(u32, QED_RDMA_MAX_SRQS, p_params->num_srqs);
   2004
   2005		/* XRC SRQs populate a single ILT page */
   2006		num_xrc_srqs = qed_cxt_xrc_srqs_per_page(p_hwfn);
   2007
   2008		qed_cxt_set_srq_count(p_hwfn, num_srqs, num_xrc_srqs);
   2009	} else {
   2010		DP_INFO(p_hwfn->cdev,
   2011			"RDMA personality used without setting params!\n");
   2012	}
   2013}
   2014
   2015int qed_cxt_set_pf_params(struct qed_hwfn *p_hwfn, u32 rdma_tasks)
   2016{
   2017	/* Set the number of required CORE connections */
   2018	u32 core_cids = 1; /* SPQ */
   2019
   2020	if (p_hwfn->using_ll2)
   2021		core_cids += 4;
   2022	qed_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_CORE, core_cids, 0);
   2023
   2024	switch (p_hwfn->hw_info.personality) {
   2025	case QED_PCI_ETH_RDMA:
   2026	case QED_PCI_ETH_IWARP:
   2027	case QED_PCI_ETH_ROCE:
   2028	{
   2029			qed_rdma_set_pf_params(p_hwfn,
   2030					       &p_hwfn->
   2031					       pf_params.rdma_pf_params,
   2032					       rdma_tasks);
   2033		/* no need for break since RoCE coexist with Ethernet */
   2034	}
   2035		fallthrough;
   2036	case QED_PCI_ETH:
   2037	{
   2038		struct qed_eth_pf_params *p_params =
   2039		    &p_hwfn->pf_params.eth_pf_params;
   2040
   2041		if (!p_params->num_vf_cons)
   2042			p_params->num_vf_cons =
   2043			    ETH_PF_PARAMS_VF_CONS_DEFAULT;
   2044		qed_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_ETH,
   2045					    p_params->num_cons,
   2046					    p_params->num_vf_cons);
   2047		p_hwfn->p_cxt_mngr->arfs_count = p_params->num_arfs_filters;
   2048		break;
   2049	}
   2050	case QED_PCI_FCOE:
   2051	{
   2052		struct qed_fcoe_pf_params *p_params;
   2053
   2054		p_params = &p_hwfn->pf_params.fcoe_pf_params;
   2055
   2056		if (p_params->num_cons && p_params->num_tasks) {
   2057			qed_cxt_set_proto_cid_count(p_hwfn,
   2058						    PROTOCOLID_FCOE,
   2059						    p_params->num_cons,
   2060						    0);
   2061			qed_cxt_set_proto_tid_count(p_hwfn, PROTOCOLID_FCOE,
   2062						    QED_CXT_FCOE_TID_SEG, 0,
   2063						    p_params->num_tasks, true);
   2064		} else {
   2065			DP_INFO(p_hwfn->cdev,
   2066				"Fcoe personality used without setting params!\n");
   2067		}
   2068		break;
   2069	}
   2070	case QED_PCI_ISCSI:
   2071	{
   2072		struct qed_iscsi_pf_params *p_params;
   2073
   2074		p_params = &p_hwfn->pf_params.iscsi_pf_params;
   2075
   2076		if (p_params->num_cons && p_params->num_tasks) {
   2077			qed_cxt_set_proto_cid_count(p_hwfn,
   2078						    PROTOCOLID_TCP_ULP,
   2079						    p_params->num_cons,
   2080						    0);
   2081			qed_cxt_set_proto_tid_count(p_hwfn,
   2082						    PROTOCOLID_TCP_ULP,
   2083						    QED_CXT_TCP_ULP_TID_SEG,
   2084						    0,
   2085						    p_params->num_tasks,
   2086						    true);
   2087		} else {
   2088			DP_INFO(p_hwfn->cdev,
   2089				"Iscsi personality used without setting params!\n");
   2090		}
   2091		break;
   2092	}
   2093	case QED_PCI_NVMETCP:
   2094	{
   2095		struct qed_nvmetcp_pf_params *p_params;
   2096
   2097		p_params = &p_hwfn->pf_params.nvmetcp_pf_params;
   2098
   2099		if (p_params->num_cons && p_params->num_tasks) {
   2100			qed_cxt_set_proto_cid_count(p_hwfn,
   2101						    PROTOCOLID_TCP_ULP,
   2102						    p_params->num_cons,
   2103						    0);
   2104			qed_cxt_set_proto_tid_count(p_hwfn,
   2105						    PROTOCOLID_TCP_ULP,
   2106						    QED_CXT_TCP_ULP_TID_SEG,
   2107						    0,
   2108						    p_params->num_tasks,
   2109						    true);
   2110		} else {
   2111			DP_INFO(p_hwfn->cdev,
   2112				"NvmeTCP personality used without setting params!\n");
   2113		}
   2114		break;
   2115	}
   2116	default:
   2117		return -EINVAL;
   2118	}
   2119
   2120	return 0;
   2121}
   2122
   2123int qed_cxt_get_tid_mem_info(struct qed_hwfn *p_hwfn,
   2124			     struct qed_tid_mem *p_info)
   2125{
   2126	struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
   2127	u32 proto, seg, total_lines, i, shadow_line;
   2128	struct qed_ilt_client_cfg *p_cli;
   2129	struct qed_ilt_cli_blk *p_fl_seg;
   2130	struct qed_tid_seg *p_seg_info;
   2131
   2132	/* Verify the personality */
   2133	switch (p_hwfn->hw_info.personality) {
   2134	case QED_PCI_FCOE:
   2135		proto = PROTOCOLID_FCOE;
   2136		seg = QED_CXT_FCOE_TID_SEG;
   2137		break;
   2138	case QED_PCI_ISCSI:
   2139	case QED_PCI_NVMETCP:
   2140		proto = PROTOCOLID_TCP_ULP;
   2141		seg = QED_CXT_TCP_ULP_TID_SEG;
   2142		break;
   2143	default:
   2144		return -EINVAL;
   2145	}
   2146
   2147	p_cli = &p_mngr->clients[ILT_CLI_CDUT];
   2148	if (!p_cli->active)
   2149		return -EINVAL;
   2150
   2151	p_seg_info = &p_mngr->conn_cfg[proto].tid_seg[seg];
   2152	if (!p_seg_info->has_fl_mem)
   2153		return -EINVAL;
   2154
   2155	p_fl_seg = &p_cli->pf_blks[CDUT_FL_SEG_BLK(seg, PF)];
   2156	total_lines = DIV_ROUND_UP(p_fl_seg->total_size,
   2157				   p_fl_seg->real_size_in_page);
   2158
   2159	for (i = 0; i < total_lines; i++) {
   2160		shadow_line = i + p_fl_seg->start_line -
   2161		    p_hwfn->p_cxt_mngr->pf_start_line;
   2162		p_info->blocks[i] = p_mngr->ilt_shadow[shadow_line].virt_addr;
   2163	}
   2164	p_info->waste = ILT_PAGE_IN_BYTES(p_cli->p_size.val) -
   2165	    p_fl_seg->real_size_in_page;
   2166	p_info->tid_size = p_mngr->task_type_size[p_seg_info->type];
   2167	p_info->num_tids_per_block = p_fl_seg->real_size_in_page /
   2168	    p_info->tid_size;
   2169
   2170	return 0;
   2171}
   2172
   2173/* This function is very RoCE oriented, if another protocol in the future
   2174 * will want this feature we'll need to modify the function to be more generic
   2175 */
   2176int
   2177qed_cxt_dynamic_ilt_alloc(struct qed_hwfn *p_hwfn,
   2178			  enum qed_cxt_elem_type elem_type, u32 iid)
   2179{
   2180	u32 reg_offset, shadow_line, elem_size, hw_p_size, elems_per_p, line;
   2181	struct tdif_task_context *tdif_context;
   2182	struct qed_ilt_client_cfg *p_cli;
   2183	struct qed_ilt_cli_blk *p_blk;
   2184	struct qed_ptt *p_ptt;
   2185	dma_addr_t p_phys;
   2186	u64 ilt_hw_entry;
   2187	void *p_virt;
   2188	u32 flags1;
   2189	int rc = 0;
   2190
   2191	switch (elem_type) {
   2192	case QED_ELEM_CXT:
   2193		p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC];
   2194		elem_size = CONN_CXT_SIZE(p_hwfn);
   2195		p_blk = &p_cli->pf_blks[CDUC_BLK];
   2196		break;
   2197	case QED_ELEM_SRQ:
   2198		/* The first ILT page is not used for regular SRQs. Skip it. */
   2199		iid += p_hwfn->p_cxt_mngr->xrc_srq_count;
   2200		p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_TSDM];
   2201		elem_size = SRQ_CXT_SIZE;
   2202		p_blk = &p_cli->pf_blks[SRQ_BLK];
   2203		break;
   2204	case QED_ELEM_XRC_SRQ:
   2205		p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_TSDM];
   2206		elem_size = XRC_SRQ_CXT_SIZE;
   2207		p_blk = &p_cli->pf_blks[SRQ_BLK];
   2208		break;
   2209	case QED_ELEM_TASK:
   2210		p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT];
   2211		elem_size = TYPE1_TASK_CXT_SIZE(p_hwfn);
   2212		p_blk = &p_cli->pf_blks[CDUT_SEG_BLK(QED_CXT_ROCE_TID_SEG)];
   2213		break;
   2214	default:
   2215		DP_NOTICE(p_hwfn, "-EOPNOTSUPP elem type = %d", elem_type);
   2216		return -EOPNOTSUPP;
   2217	}
   2218
   2219	/* Calculate line in ilt */
   2220	hw_p_size = p_cli->p_size.val;
   2221	elems_per_p = ILT_PAGE_IN_BYTES(hw_p_size) / elem_size;
   2222	line = p_blk->start_line + (iid / elems_per_p);
   2223	shadow_line = line - p_hwfn->p_cxt_mngr->pf_start_line;
   2224
   2225	/* If line is already allocated, do nothing, otherwise allocate it and
   2226	 * write it to the PSWRQ2 registers.
   2227	 * This section can be run in parallel from different contexts and thus
   2228	 * a mutex protection is needed.
   2229	 */
   2230
   2231	mutex_lock(&p_hwfn->p_cxt_mngr->mutex);
   2232
   2233	if (p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].virt_addr)
   2234		goto out0;
   2235
   2236	p_ptt = qed_ptt_acquire(p_hwfn);
   2237	if (!p_ptt) {
   2238		DP_NOTICE(p_hwfn,
   2239			  "QED_TIME_OUT on ptt acquire - dynamic allocation");
   2240		rc = -EBUSY;
   2241		goto out0;
   2242	}
   2243
   2244	p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
   2245				    p_blk->real_size_in_page, &p_phys,
   2246				    GFP_KERNEL);
   2247	if (!p_virt) {
   2248		rc = -ENOMEM;
   2249		goto out1;
   2250	}
   2251
   2252	/* configuration of refTagMask to 0xF is required for RoCE DIF MR only,
   2253	 * to compensate for a HW bug, but it is configured even if DIF is not
   2254	 * enabled. This is harmless and allows us to avoid a dedicated API. We
   2255	 * configure the field for all of the contexts on the newly allocated
   2256	 * page.
   2257	 */
   2258	if (elem_type == QED_ELEM_TASK) {
   2259		u32 elem_i;
   2260		u8 *elem_start = (u8 *)p_virt;
   2261		union type1_task_context *elem;
   2262
   2263		for (elem_i = 0; elem_i < elems_per_p; elem_i++) {
   2264			elem = (union type1_task_context *)elem_start;
   2265			tdif_context = &elem->roce_ctx.tdif_context;
   2266
   2267			flags1 = le32_to_cpu(tdif_context->flags1);
   2268			SET_FIELD(flags1, TDIF_TASK_CONTEXT_REF_TAG_MASK, 0xf);
   2269			tdif_context->flags1 = cpu_to_le32(flags1);
   2270
   2271			elem_start += TYPE1_TASK_CXT_SIZE(p_hwfn);
   2272		}
   2273	}
   2274
   2275	p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].virt_addr = p_virt;
   2276	p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].phys_addr = p_phys;
   2277	p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].size =
   2278	    p_blk->real_size_in_page;
   2279
   2280	/* compute absolute offset */
   2281	reg_offset = PSWRQ2_REG_ILT_MEMORY +
   2282	    (line * ILT_REG_SIZE_IN_BYTES * ILT_ENTRY_IN_REGS);
   2283
   2284	ilt_hw_entry = 0;
   2285	SET_FIELD(ilt_hw_entry, ILT_ENTRY_VALID, 1ULL);
   2286	SET_FIELD(ilt_hw_entry, ILT_ENTRY_PHY_ADDR,
   2287		  (p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].phys_addr
   2288		   >> 12));
   2289
   2290	/* Write via DMAE since the PSWRQ2_REG_ILT_MEMORY line is a wide-bus */
   2291	qed_dmae_host2grc(p_hwfn, p_ptt, (u64) (uintptr_t)&ilt_hw_entry,
   2292			  reg_offset, sizeof(ilt_hw_entry) / sizeof(u32),
   2293			  NULL);
   2294
   2295	if (elem_type == QED_ELEM_CXT) {
   2296		u32 last_cid_allocated = (1 + (iid / elems_per_p)) *
   2297		    elems_per_p;
   2298
   2299		/* Update the relevant register in the parser */
   2300		qed_wr(p_hwfn, p_ptt, PRS_REG_ROCE_DEST_QP_MAX_PF,
   2301		       last_cid_allocated - 1);
   2302
   2303		if (!p_hwfn->b_rdma_enabled_in_prs) {
   2304			/* Enable RDMA search */
   2305			qed_wr(p_hwfn, p_ptt, p_hwfn->rdma_prs_search_reg, 1);
   2306			p_hwfn->b_rdma_enabled_in_prs = true;
   2307		}
   2308	}
   2309
   2310out1:
   2311	qed_ptt_release(p_hwfn, p_ptt);
   2312out0:
   2313	mutex_unlock(&p_hwfn->p_cxt_mngr->mutex);
   2314
   2315	return rc;
   2316}
   2317
   2318/* This function is very RoCE oriented, if another protocol in the future
   2319 * will want this feature we'll need to modify the function to be more generic
   2320 */
   2321static int
   2322qed_cxt_free_ilt_range(struct qed_hwfn *p_hwfn,
   2323		       enum qed_cxt_elem_type elem_type,
   2324		       u32 start_iid, u32 count)
   2325{
   2326	u32 start_line, end_line, shadow_start_line, shadow_end_line;
   2327	u32 reg_offset, elem_size, hw_p_size, elems_per_p;
   2328	struct qed_ilt_client_cfg *p_cli;
   2329	struct qed_ilt_cli_blk *p_blk;
   2330	u32 end_iid = start_iid + count;
   2331	struct qed_ptt *p_ptt;
   2332	u64 ilt_hw_entry = 0;
   2333	u32 i;
   2334
   2335	switch (elem_type) {
   2336	case QED_ELEM_CXT:
   2337		p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC];
   2338		elem_size = CONN_CXT_SIZE(p_hwfn);
   2339		p_blk = &p_cli->pf_blks[CDUC_BLK];
   2340		break;
   2341	case QED_ELEM_SRQ:
   2342		p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_TSDM];
   2343		elem_size = SRQ_CXT_SIZE;
   2344		p_blk = &p_cli->pf_blks[SRQ_BLK];
   2345		break;
   2346	case QED_ELEM_XRC_SRQ:
   2347		p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_TSDM];
   2348		elem_size = XRC_SRQ_CXT_SIZE;
   2349		p_blk = &p_cli->pf_blks[SRQ_BLK];
   2350		break;
   2351	case QED_ELEM_TASK:
   2352		p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT];
   2353		elem_size = TYPE1_TASK_CXT_SIZE(p_hwfn);
   2354		p_blk = &p_cli->pf_blks[CDUT_SEG_BLK(QED_CXT_ROCE_TID_SEG)];
   2355		break;
   2356	default:
   2357		DP_NOTICE(p_hwfn, "-EINVALID elem type = %d", elem_type);
   2358		return -EINVAL;
   2359	}
   2360
   2361	/* Calculate line in ilt */
   2362	hw_p_size = p_cli->p_size.val;
   2363	elems_per_p = ILT_PAGE_IN_BYTES(hw_p_size) / elem_size;
   2364	start_line = p_blk->start_line + (start_iid / elems_per_p);
   2365	end_line = p_blk->start_line + (end_iid / elems_per_p);
   2366	if (((end_iid + 1) / elems_per_p) != (end_iid / elems_per_p))
   2367		end_line--;
   2368
   2369	shadow_start_line = start_line - p_hwfn->p_cxt_mngr->pf_start_line;
   2370	shadow_end_line = end_line - p_hwfn->p_cxt_mngr->pf_start_line;
   2371
   2372	p_ptt = qed_ptt_acquire(p_hwfn);
   2373	if (!p_ptt) {
   2374		DP_NOTICE(p_hwfn,
   2375			  "QED_TIME_OUT on ptt acquire - dynamic allocation");
   2376		return -EBUSY;
   2377	}
   2378
   2379	for (i = shadow_start_line; i < shadow_end_line; i++) {
   2380		if (!p_hwfn->p_cxt_mngr->ilt_shadow[i].virt_addr)
   2381			continue;
   2382
   2383		dma_free_coherent(&p_hwfn->cdev->pdev->dev,
   2384				  p_hwfn->p_cxt_mngr->ilt_shadow[i].size,
   2385				  p_hwfn->p_cxt_mngr->ilt_shadow[i].virt_addr,
   2386				  p_hwfn->p_cxt_mngr->ilt_shadow[i].phys_addr);
   2387
   2388		p_hwfn->p_cxt_mngr->ilt_shadow[i].virt_addr = NULL;
   2389		p_hwfn->p_cxt_mngr->ilt_shadow[i].phys_addr = 0;
   2390		p_hwfn->p_cxt_mngr->ilt_shadow[i].size = 0;
   2391
   2392		/* compute absolute offset */
   2393		reg_offset = PSWRQ2_REG_ILT_MEMORY +
   2394		    ((start_line++) * ILT_REG_SIZE_IN_BYTES *
   2395		     ILT_ENTRY_IN_REGS);
   2396
   2397		/* Write via DMAE since the PSWRQ2_REG_ILT_MEMORY line is a
   2398		 * wide-bus.
   2399		 */
   2400		qed_dmae_host2grc(p_hwfn, p_ptt,
   2401				  (u64) (uintptr_t) &ilt_hw_entry,
   2402				  reg_offset,
   2403				  sizeof(ilt_hw_entry) / sizeof(u32),
   2404				  NULL);
   2405	}
   2406
   2407	qed_ptt_release(p_hwfn, p_ptt);
   2408
   2409	return 0;
   2410}
   2411
   2412int qed_cxt_free_proto_ilt(struct qed_hwfn *p_hwfn, enum protocol_type proto)
   2413{
   2414	int rc;
   2415	u32 cid;
   2416
   2417	/* Free Connection CXT */
   2418	rc = qed_cxt_free_ilt_range(p_hwfn, QED_ELEM_CXT,
   2419				    qed_cxt_get_proto_cid_start(p_hwfn,
   2420								proto),
   2421				    qed_cxt_get_proto_cid_count(p_hwfn,
   2422								proto, &cid));
   2423
   2424	if (rc)
   2425		return rc;
   2426
   2427	/* Free Task CXT ( Intentionally RoCE as task-id is shared between
   2428	 * RoCE and iWARP )
   2429	 */
   2430	proto = PROTOCOLID_ROCE;
   2431	rc = qed_cxt_free_ilt_range(p_hwfn, QED_ELEM_TASK, 0,
   2432				    qed_cxt_get_proto_tid_count(p_hwfn, proto));
   2433	if (rc)
   2434		return rc;
   2435
   2436	/* Free TSDM CXT */
   2437	rc = qed_cxt_free_ilt_range(p_hwfn, QED_ELEM_XRC_SRQ, 0,
   2438				    p_hwfn->p_cxt_mngr->xrc_srq_count);
   2439
   2440	rc = qed_cxt_free_ilt_range(p_hwfn, QED_ELEM_SRQ,
   2441				    p_hwfn->p_cxt_mngr->xrc_srq_count,
   2442				    p_hwfn->p_cxt_mngr->srq_count);
   2443
   2444	return rc;
   2445}
   2446
   2447int qed_cxt_get_task_ctx(struct qed_hwfn *p_hwfn,
   2448			 u32 tid, u8 ctx_type, void **pp_task_ctx)
   2449{
   2450	struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
   2451	struct qed_ilt_client_cfg *p_cli;
   2452	struct qed_tid_seg *p_seg_info;
   2453	struct qed_ilt_cli_blk *p_seg;
   2454	u32 num_tids_per_block;
   2455	u32 tid_size, ilt_idx;
   2456	u32 total_lines;
   2457	u32 proto, seg;
   2458
   2459	/* Verify the personality */
   2460	switch (p_hwfn->hw_info.personality) {
   2461	case QED_PCI_FCOE:
   2462		proto = PROTOCOLID_FCOE;
   2463		seg = QED_CXT_FCOE_TID_SEG;
   2464		break;
   2465	case QED_PCI_ISCSI:
   2466	case QED_PCI_NVMETCP:
   2467		proto = PROTOCOLID_TCP_ULP;
   2468		seg = QED_CXT_TCP_ULP_TID_SEG;
   2469		break;
   2470	default:
   2471		return -EINVAL;
   2472	}
   2473
   2474	p_cli = &p_mngr->clients[ILT_CLI_CDUT];
   2475	if (!p_cli->active)
   2476		return -EINVAL;
   2477
   2478	p_seg_info = &p_mngr->conn_cfg[proto].tid_seg[seg];
   2479
   2480	if (ctx_type == QED_CTX_WORKING_MEM) {
   2481		p_seg = &p_cli->pf_blks[CDUT_SEG_BLK(seg)];
   2482	} else if (ctx_type == QED_CTX_FL_MEM) {
   2483		if (!p_seg_info->has_fl_mem)
   2484			return -EINVAL;
   2485		p_seg = &p_cli->pf_blks[CDUT_FL_SEG_BLK(seg, PF)];
   2486	} else {
   2487		return -EINVAL;
   2488	}
   2489	total_lines = DIV_ROUND_UP(p_seg->total_size, p_seg->real_size_in_page);
   2490	tid_size = p_mngr->task_type_size[p_seg_info->type];
   2491	num_tids_per_block = p_seg->real_size_in_page / tid_size;
   2492
   2493	if (total_lines < tid / num_tids_per_block)
   2494		return -EINVAL;
   2495
   2496	ilt_idx = tid / num_tids_per_block + p_seg->start_line -
   2497		  p_mngr->pf_start_line;
   2498	*pp_task_ctx = (u8 *)p_mngr->ilt_shadow[ilt_idx].virt_addr +
   2499		       (tid % num_tids_per_block) * tid_size;
   2500
   2501	return 0;
   2502}
   2503
   2504static u16 qed_blk_calculate_pages(struct qed_ilt_cli_blk *p_blk)
   2505{
   2506	if (p_blk->real_size_in_page == 0)
   2507		return 0;
   2508
   2509	return DIV_ROUND_UP(p_blk->total_size, p_blk->real_size_in_page);
   2510}
   2511
   2512u16 qed_get_cdut_num_pf_init_pages(struct qed_hwfn *p_hwfn)
   2513{
   2514	struct qed_ilt_client_cfg *p_cli;
   2515	struct qed_ilt_cli_blk *p_blk;
   2516	u16 i, pages = 0;
   2517
   2518	p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT];
   2519	for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) {
   2520		p_blk = &p_cli->pf_blks[CDUT_FL_SEG_BLK(i, PF)];
   2521		pages += qed_blk_calculate_pages(p_blk);
   2522	}
   2523
   2524	return pages;
   2525}
   2526
   2527u16 qed_get_cdut_num_vf_init_pages(struct qed_hwfn *p_hwfn)
   2528{
   2529	struct qed_ilt_client_cfg *p_cli;
   2530	struct qed_ilt_cli_blk *p_blk;
   2531	u16 i, pages = 0;
   2532
   2533	p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT];
   2534	for (i = 0; i < NUM_TASK_VF_SEGMENTS; i++) {
   2535		p_blk = &p_cli->vf_blks[CDUT_FL_SEG_BLK(i, VF)];
   2536		pages += qed_blk_calculate_pages(p_blk);
   2537	}
   2538
   2539	return pages;
   2540}
   2541
   2542u16 qed_get_cdut_num_pf_work_pages(struct qed_hwfn *p_hwfn)
   2543{
   2544	struct qed_ilt_client_cfg *p_cli;
   2545	struct qed_ilt_cli_blk *p_blk;
   2546	u16 i, pages = 0;
   2547
   2548	p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT];
   2549	for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) {
   2550		p_blk = &p_cli->pf_blks[CDUT_SEG_BLK(i)];
   2551		pages += qed_blk_calculate_pages(p_blk);
   2552	}
   2553
   2554	return pages;
   2555}
   2556
   2557u16 qed_get_cdut_num_vf_work_pages(struct qed_hwfn *p_hwfn)
   2558{
   2559	struct qed_ilt_client_cfg *p_cli;
   2560	struct qed_ilt_cli_blk *p_blk;
   2561	u16 pages = 0, i;
   2562
   2563	p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT];
   2564	for (i = 0; i < NUM_TASK_VF_SEGMENTS; i++) {
   2565		p_blk = &p_cli->vf_blks[CDUT_SEG_BLK(i)];
   2566		pages += qed_blk_calculate_pages(p_blk);
   2567	}
   2568
   2569	return pages;
   2570}