cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

cxgb4_uld.c (23143B)


      1/*
      2 * cxgb4_uld.c:Chelsio Upper Layer Driver Interface for T4/T5/T6 SGE management
      3 *
      4 * Copyright (c) 2016 Chelsio Communications, Inc. All rights reserved.
      5 *
      6 * This software is available to you under a choice of one of two
      7 * licenses.  You may choose to be licensed under the terms of the GNU
      8 * General Public License (GPL) Version 2, available from the file
      9 * COPYING in the main directory of this source tree, or the
     10 * OpenIB.org BSD license below:
     11 *
     12 *     Redistribution and use in source and binary forms, with or
     13 *     without modification, are permitted provided that the following
     14 *     conditions are met:
     15 *
     16 *      - Redistributions of source code must retain the above
     17 *        copyright notice, this list of conditions and the following
     18 *        disclaimer.
     19 *
     20 *      - Redistributions in binary form must reproduce the above
     21 *        copyright notice, this list of conditions and the following
     22 *        disclaimer in the documentation and/or other materials
     23 *        provided with the distribution.
     24 *
     25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
     26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
     27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
     28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
     29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
     30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
     31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
     32 * SOFTWARE.
     33 *
     34 *  Written by: Atul Gupta (atul.gupta@chelsio.com)
     35 *  Written by: Hariprasad Shenai (hariprasad@chelsio.com)
     36 */
     37
     38#include <linux/kernel.h>
     39#include <linux/module.h>
     40#include <linux/errno.h>
     41#include <linux/types.h>
     42#include <linux/debugfs.h>
     43#include <linux/export.h>
     44#include <linux/list.h>
     45#include <linux/skbuff.h>
     46#include <linux/pci.h>
     47
     48#include "cxgb4.h"
     49#include "cxgb4_uld.h"
     50#include "t4_regs.h"
     51#include "t4fw_api.h"
     52#include "t4_msg.h"
     53
     54#define for_each_uldrxq(m, i) for (i = 0; i < ((m)->nrxq + (m)->nciq); i++)
     55
     56/* Flush the aggregated lro sessions */
     57static void uldrx_flush_handler(struct sge_rspq *q)
     58{
     59	struct adapter *adap = q->adap;
     60
     61	if (adap->uld[q->uld].lro_flush)
     62		adap->uld[q->uld].lro_flush(&q->lro_mgr);
     63}
     64
     65/**
     66 *	uldrx_handler - response queue handler for ULD queues
     67 *	@q: the response queue that received the packet
     68 *	@rsp: the response queue descriptor holding the offload message
     69 *	@gl: the gather list of packet fragments
     70 *
     71 *	Deliver an ingress offload packet to a ULD.  All processing is done by
     72 *	the ULD, we just maintain statistics.
     73 */
     74static int uldrx_handler(struct sge_rspq *q, const __be64 *rsp,
     75			 const struct pkt_gl *gl)
     76{
     77	struct adapter *adap = q->adap;
     78	struct sge_ofld_rxq *rxq = container_of(q, struct sge_ofld_rxq, rspq);
     79	int ret;
     80
     81	/* FW can send CPLs encapsulated in a CPL_FW4_MSG */
     82	if (((const struct rss_header *)rsp)->opcode == CPL_FW4_MSG &&
     83	    ((const struct cpl_fw4_msg *)(rsp + 1))->type == FW_TYPE_RSSCPL)
     84		rsp += 2;
     85
     86	if (q->flush_handler)
     87		ret = adap->uld[q->uld].lro_rx_handler(adap->uld[q->uld].handle,
     88				rsp, gl, &q->lro_mgr,
     89				&q->napi);
     90	else
     91		ret = adap->uld[q->uld].rx_handler(adap->uld[q->uld].handle,
     92				rsp, gl);
     93
     94	if (ret) {
     95		rxq->stats.nomem++;
     96		return -1;
     97	}
     98
     99	if (!gl)
    100		rxq->stats.imm++;
    101	else if (gl == CXGB4_MSG_AN)
    102		rxq->stats.an++;
    103	else
    104		rxq->stats.pkts++;
    105	return 0;
    106}
    107
    108static int alloc_uld_rxqs(struct adapter *adap,
    109			  struct sge_uld_rxq_info *rxq_info, bool lro)
    110{
    111	unsigned int nq = rxq_info->nrxq + rxq_info->nciq;
    112	struct sge_ofld_rxq *q = rxq_info->uldrxq;
    113	unsigned short *ids = rxq_info->rspq_id;
    114	int i, err, msi_idx, que_idx = 0;
    115	struct sge *s = &adap->sge;
    116	unsigned int per_chan;
    117
    118	per_chan = rxq_info->nrxq / adap->params.nports;
    119
    120	if (adap->flags & CXGB4_USING_MSIX)
    121		msi_idx = 1;
    122	else
    123		msi_idx = -((int)s->intrq.abs_id + 1);
    124
    125	for (i = 0; i < nq; i++, q++) {
    126		if (i == rxq_info->nrxq) {
    127			/* start allocation of concentrator queues */
    128			per_chan = rxq_info->nciq / adap->params.nports;
    129			que_idx = 0;
    130		}
    131
    132		if (msi_idx >= 0) {
    133			msi_idx = cxgb4_get_msix_idx_from_bmap(adap);
    134			if (msi_idx < 0) {
    135				err = -ENOSPC;
    136				goto freeout;
    137			}
    138
    139			snprintf(adap->msix_info[msi_idx].desc,
    140				 sizeof(adap->msix_info[msi_idx].desc),
    141				 "%s-%s%d",
    142				 adap->port[0]->name, rxq_info->name, i);
    143
    144			q->msix = &adap->msix_info[msi_idx];
    145		}
    146		err = t4_sge_alloc_rxq(adap, &q->rspq, false,
    147				       adap->port[que_idx++ / per_chan],
    148				       msi_idx,
    149				       q->fl.size ? &q->fl : NULL,
    150				       uldrx_handler,
    151				       lro ? uldrx_flush_handler : NULL,
    152				       0);
    153		if (err)
    154			goto freeout;
    155
    156		memset(&q->stats, 0, sizeof(q->stats));
    157		if (ids)
    158			ids[i] = q->rspq.abs_id;
    159	}
    160	return 0;
    161freeout:
    162	q = rxq_info->uldrxq;
    163	for ( ; i; i--, q++) {
    164		if (q->rspq.desc)
    165			free_rspq_fl(adap, &q->rspq,
    166				     q->fl.size ? &q->fl : NULL);
    167		if (q->msix)
    168			cxgb4_free_msix_idx_in_bmap(adap, q->msix->idx);
    169	}
    170	return err;
    171}
    172
    173static int
    174setup_sge_queues_uld(struct adapter *adap, unsigned int uld_type, bool lro)
    175{
    176	struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
    177	int i, ret;
    178
    179	ret = alloc_uld_rxqs(adap, rxq_info, lro);
    180	if (ret)
    181		return ret;
    182
    183	/* Tell uP to route control queue completions to rdma rspq */
    184	if (adap->flags & CXGB4_FULL_INIT_DONE && uld_type == CXGB4_ULD_RDMA) {
    185		struct sge *s = &adap->sge;
    186		unsigned int cmplqid;
    187		u32 param, cmdop;
    188
    189		cmdop = FW_PARAMS_PARAM_DMAQ_EQ_CMPLIQID_CTRL;
    190		for_each_port(adap, i) {
    191			cmplqid = rxq_info->uldrxq[i].rspq.cntxt_id;
    192			param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
    193				 FW_PARAMS_PARAM_X_V(cmdop) |
    194				 FW_PARAMS_PARAM_YZ_V(s->ctrlq[i].q.cntxt_id));
    195			ret = t4_set_params(adap, adap->mbox, adap->pf,
    196					    0, 1, &param, &cmplqid);
    197		}
    198	}
    199	return ret;
    200}
    201
    202static void t4_free_uld_rxqs(struct adapter *adap, int n,
    203			     struct sge_ofld_rxq *q)
    204{
    205	for ( ; n; n--, q++) {
    206		if (q->rspq.desc)
    207			free_rspq_fl(adap, &q->rspq,
    208				     q->fl.size ? &q->fl : NULL);
    209	}
    210}
    211
    212static void free_sge_queues_uld(struct adapter *adap, unsigned int uld_type)
    213{
    214	struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
    215
    216	if (adap->flags & CXGB4_FULL_INIT_DONE && uld_type == CXGB4_ULD_RDMA) {
    217		struct sge *s = &adap->sge;
    218		u32 param, cmdop, cmplqid = 0;
    219		int i;
    220
    221		cmdop = FW_PARAMS_PARAM_DMAQ_EQ_CMPLIQID_CTRL;
    222		for_each_port(adap, i) {
    223			param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
    224				 FW_PARAMS_PARAM_X_V(cmdop) |
    225				 FW_PARAMS_PARAM_YZ_V(s->ctrlq[i].q.cntxt_id));
    226			t4_set_params(adap, adap->mbox, adap->pf,
    227				      0, 1, &param, &cmplqid);
    228		}
    229	}
    230
    231	if (rxq_info->nciq)
    232		t4_free_uld_rxqs(adap, rxq_info->nciq,
    233				 rxq_info->uldrxq + rxq_info->nrxq);
    234	t4_free_uld_rxqs(adap, rxq_info->nrxq, rxq_info->uldrxq);
    235}
    236
    237static int cfg_queues_uld(struct adapter *adap, unsigned int uld_type,
    238			  const struct cxgb4_uld_info *uld_info)
    239{
    240	struct sge *s = &adap->sge;
    241	struct sge_uld_rxq_info *rxq_info;
    242	int i, nrxq, ciq_size;
    243
    244	rxq_info = kzalloc(sizeof(*rxq_info), GFP_KERNEL);
    245	if (!rxq_info)
    246		return -ENOMEM;
    247
    248	if (adap->flags & CXGB4_USING_MSIX && uld_info->nrxq > s->nqs_per_uld) {
    249		i = s->nqs_per_uld;
    250		rxq_info->nrxq = roundup(i, adap->params.nports);
    251	} else {
    252		i = min_t(int, uld_info->nrxq,
    253			  num_online_cpus());
    254		rxq_info->nrxq = roundup(i, adap->params.nports);
    255	}
    256	if (!uld_info->ciq) {
    257		rxq_info->nciq = 0;
    258	} else  {
    259		if (adap->flags & CXGB4_USING_MSIX)
    260			rxq_info->nciq = min_t(int, s->nqs_per_uld,
    261					       num_online_cpus());
    262		else
    263			rxq_info->nciq = min_t(int, MAX_OFLD_QSETS,
    264					       num_online_cpus());
    265		rxq_info->nciq = ((rxq_info->nciq / adap->params.nports) *
    266				  adap->params.nports);
    267		rxq_info->nciq = max_t(int, rxq_info->nciq,
    268				       adap->params.nports);
    269	}
    270
    271	nrxq = rxq_info->nrxq + rxq_info->nciq; /* total rxq's */
    272	rxq_info->uldrxq = kcalloc(nrxq, sizeof(struct sge_ofld_rxq),
    273				   GFP_KERNEL);
    274	if (!rxq_info->uldrxq) {
    275		kfree(rxq_info);
    276		return -ENOMEM;
    277	}
    278
    279	rxq_info->rspq_id = kcalloc(nrxq, sizeof(unsigned short), GFP_KERNEL);
    280	if (!rxq_info->rspq_id) {
    281		kfree(rxq_info->uldrxq);
    282		kfree(rxq_info);
    283		return -ENOMEM;
    284	}
    285
    286	for (i = 0; i < rxq_info->nrxq; i++) {
    287		struct sge_ofld_rxq *r = &rxq_info->uldrxq[i];
    288
    289		init_rspq(adap, &r->rspq, 5, 1, uld_info->rxq_size, 64);
    290		r->rspq.uld = uld_type;
    291		r->fl.size = 72;
    292	}
    293
    294	ciq_size = 64 + adap->vres.cq.size + adap->tids.nftids;
    295	if (ciq_size > SGE_MAX_IQ_SIZE) {
    296		dev_warn(adap->pdev_dev, "CIQ size too small for available IQs\n");
    297		ciq_size = SGE_MAX_IQ_SIZE;
    298	}
    299
    300	for (i = rxq_info->nrxq; i < nrxq; i++) {
    301		struct sge_ofld_rxq *r = &rxq_info->uldrxq[i];
    302
    303		init_rspq(adap, &r->rspq, 5, 1, ciq_size, 64);
    304		r->rspq.uld = uld_type;
    305	}
    306
    307	memcpy(rxq_info->name, uld_info->name, IFNAMSIZ);
    308	adap->sge.uld_rxq_info[uld_type] = rxq_info;
    309
    310	return 0;
    311}
    312
    313static void free_queues_uld(struct adapter *adap, unsigned int uld_type)
    314{
    315	struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
    316
    317	adap->sge.uld_rxq_info[uld_type] = NULL;
    318	kfree(rxq_info->rspq_id);
    319	kfree(rxq_info->uldrxq);
    320	kfree(rxq_info);
    321}
    322
    323static int
    324request_msix_queue_irqs_uld(struct adapter *adap, unsigned int uld_type)
    325{
    326	struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
    327	struct msix_info *minfo;
    328	unsigned int idx;
    329	int err = 0;
    330
    331	for_each_uldrxq(rxq_info, idx) {
    332		minfo = rxq_info->uldrxq[idx].msix;
    333		err = request_irq(minfo->vec,
    334				  t4_sge_intr_msix, 0,
    335				  minfo->desc,
    336				  &rxq_info->uldrxq[idx].rspq);
    337		if (err)
    338			goto unwind;
    339
    340		cxgb4_set_msix_aff(adap, minfo->vec,
    341				   &minfo->aff_mask, idx);
    342	}
    343	return 0;
    344
    345unwind:
    346	while (idx-- > 0) {
    347		minfo = rxq_info->uldrxq[idx].msix;
    348		cxgb4_clear_msix_aff(minfo->vec, minfo->aff_mask);
    349		cxgb4_free_msix_idx_in_bmap(adap, minfo->idx);
    350		free_irq(minfo->vec, &rxq_info->uldrxq[idx].rspq);
    351	}
    352	return err;
    353}
    354
    355static void
    356free_msix_queue_irqs_uld(struct adapter *adap, unsigned int uld_type)
    357{
    358	struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
    359	struct msix_info *minfo;
    360	unsigned int idx;
    361
    362	for_each_uldrxq(rxq_info, idx) {
    363		minfo = rxq_info->uldrxq[idx].msix;
    364		cxgb4_clear_msix_aff(minfo->vec, minfo->aff_mask);
    365		cxgb4_free_msix_idx_in_bmap(adap, minfo->idx);
    366		free_irq(minfo->vec, &rxq_info->uldrxq[idx].rspq);
    367	}
    368}
    369
    370static void enable_rx_uld(struct adapter *adap, unsigned int uld_type)
    371{
    372	struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
    373	int idx;
    374
    375	for_each_uldrxq(rxq_info, idx) {
    376		struct sge_rspq *q = &rxq_info->uldrxq[idx].rspq;
    377
    378		if (!q)
    379			continue;
    380
    381		cxgb4_enable_rx(adap, q);
    382	}
    383}
    384
    385static void quiesce_rx_uld(struct adapter *adap, unsigned int uld_type)
    386{
    387	struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
    388	int idx;
    389
    390	for_each_uldrxq(rxq_info, idx) {
    391		struct sge_rspq *q = &rxq_info->uldrxq[idx].rspq;
    392
    393		if (!q)
    394			continue;
    395
    396		cxgb4_quiesce_rx(q);
    397	}
    398}
    399
    400static void
    401free_sge_txq_uld(struct adapter *adap, struct sge_uld_txq_info *txq_info)
    402{
    403	int nq = txq_info->ntxq;
    404	int i;
    405
    406	for (i = 0; i < nq; i++) {
    407		struct sge_uld_txq *txq = &txq_info->uldtxq[i];
    408
    409		if (txq && txq->q.desc) {
    410			tasklet_kill(&txq->qresume_tsk);
    411			t4_ofld_eq_free(adap, adap->mbox, adap->pf, 0,
    412					txq->q.cntxt_id);
    413			free_tx_desc(adap, &txq->q, txq->q.in_use, false);
    414			kfree(txq->q.sdesc);
    415			__skb_queue_purge(&txq->sendq);
    416			free_txq(adap, &txq->q);
    417		}
    418	}
    419}
    420
    421static int
    422alloc_sge_txq_uld(struct adapter *adap, struct sge_uld_txq_info *txq_info,
    423		  unsigned int uld_type)
    424{
    425	struct sge *s = &adap->sge;
    426	int nq = txq_info->ntxq;
    427	int i, j, err;
    428
    429	j = nq / adap->params.nports;
    430	for (i = 0; i < nq; i++) {
    431		struct sge_uld_txq *txq = &txq_info->uldtxq[i];
    432
    433		txq->q.size = 1024;
    434		err = t4_sge_alloc_uld_txq(adap, txq, adap->port[i / j],
    435					   s->fw_evtq.cntxt_id, uld_type);
    436		if (err)
    437			goto freeout;
    438	}
    439	return 0;
    440freeout:
    441	free_sge_txq_uld(adap, txq_info);
    442	return err;
    443}
    444
    445static void
    446release_sge_txq_uld(struct adapter *adap, unsigned int uld_type)
    447{
    448	struct sge_uld_txq_info *txq_info = NULL;
    449	int tx_uld_type = TX_ULD(uld_type);
    450
    451	txq_info = adap->sge.uld_txq_info[tx_uld_type];
    452
    453	if (txq_info && atomic_dec_and_test(&txq_info->users)) {
    454		free_sge_txq_uld(adap, txq_info);
    455		kfree(txq_info->uldtxq);
    456		kfree(txq_info);
    457		adap->sge.uld_txq_info[tx_uld_type] = NULL;
    458	}
    459}
    460
    461static int
    462setup_sge_txq_uld(struct adapter *adap, unsigned int uld_type,
    463		  const struct cxgb4_uld_info *uld_info)
    464{
    465	struct sge_uld_txq_info *txq_info = NULL;
    466	int tx_uld_type, i;
    467
    468	tx_uld_type = TX_ULD(uld_type);
    469	txq_info = adap->sge.uld_txq_info[tx_uld_type];
    470
    471	if ((tx_uld_type == CXGB4_TX_OFLD) && txq_info &&
    472	    (atomic_inc_return(&txq_info->users) > 1))
    473		return 0;
    474
    475	txq_info = kzalloc(sizeof(*txq_info), GFP_KERNEL);
    476	if (!txq_info)
    477		return -ENOMEM;
    478	if (uld_type == CXGB4_ULD_CRYPTO) {
    479		i = min_t(int, adap->vres.ncrypto_fc,
    480			  num_online_cpus());
    481		txq_info->ntxq = rounddown(i, adap->params.nports);
    482		if (txq_info->ntxq <= 0) {
    483			dev_warn(adap->pdev_dev, "Crypto Tx Queues can't be zero\n");
    484			kfree(txq_info);
    485			return -EINVAL;
    486		}
    487
    488	} else {
    489		i = min_t(int, uld_info->ntxq, num_online_cpus());
    490		txq_info->ntxq = roundup(i, adap->params.nports);
    491	}
    492	txq_info->uldtxq = kcalloc(txq_info->ntxq, sizeof(struct sge_uld_txq),
    493				   GFP_KERNEL);
    494	if (!txq_info->uldtxq) {
    495		kfree(txq_info);
    496		return -ENOMEM;
    497	}
    498
    499	if (alloc_sge_txq_uld(adap, txq_info, tx_uld_type)) {
    500		kfree(txq_info->uldtxq);
    501		kfree(txq_info);
    502		return -ENOMEM;
    503	}
    504
    505	atomic_inc(&txq_info->users);
    506	adap->sge.uld_txq_info[tx_uld_type] = txq_info;
    507	return 0;
    508}
    509
    510static void uld_queue_init(struct adapter *adap, unsigned int uld_type,
    511			   struct cxgb4_lld_info *lli)
    512{
    513	struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
    514	int tx_uld_type = TX_ULD(uld_type);
    515	struct sge_uld_txq_info *txq_info = adap->sge.uld_txq_info[tx_uld_type];
    516
    517	lli->rxq_ids = rxq_info->rspq_id;
    518	lli->nrxq = rxq_info->nrxq;
    519	lli->ciq_ids = rxq_info->rspq_id + rxq_info->nrxq;
    520	lli->nciq = rxq_info->nciq;
    521	lli->ntxq = txq_info->ntxq;
    522}
    523
    524int t4_uld_mem_alloc(struct adapter *adap)
    525{
    526	struct sge *s = &adap->sge;
    527
    528	adap->uld = kcalloc(CXGB4_ULD_MAX, sizeof(*adap->uld), GFP_KERNEL);
    529	if (!adap->uld)
    530		return -ENOMEM;
    531
    532	s->uld_rxq_info = kcalloc(CXGB4_ULD_MAX,
    533				  sizeof(struct sge_uld_rxq_info *),
    534				  GFP_KERNEL);
    535	if (!s->uld_rxq_info)
    536		goto err_uld;
    537
    538	s->uld_txq_info = kcalloc(CXGB4_TX_MAX,
    539				  sizeof(struct sge_uld_txq_info *),
    540				  GFP_KERNEL);
    541	if (!s->uld_txq_info)
    542		goto err_uld_rx;
    543	return 0;
    544
    545err_uld_rx:
    546	kfree(s->uld_rxq_info);
    547err_uld:
    548	kfree(adap->uld);
    549	return -ENOMEM;
    550}
    551
    552void t4_uld_mem_free(struct adapter *adap)
    553{
    554	struct sge *s = &adap->sge;
    555
    556	kfree(s->uld_txq_info);
    557	kfree(s->uld_rxq_info);
    558	kfree(adap->uld);
    559}
    560
    561/* This function should be called with uld_mutex taken. */
    562static void cxgb4_shutdown_uld_adapter(struct adapter *adap, enum cxgb4_uld type)
    563{
    564	if (adap->uld[type].handle) {
    565		adap->uld[type].handle = NULL;
    566		adap->uld[type].add = NULL;
    567		release_sge_txq_uld(adap, type);
    568
    569		if (adap->flags & CXGB4_FULL_INIT_DONE)
    570			quiesce_rx_uld(adap, type);
    571
    572		if (adap->flags & CXGB4_USING_MSIX)
    573			free_msix_queue_irqs_uld(adap, type);
    574
    575		free_sge_queues_uld(adap, type);
    576		free_queues_uld(adap, type);
    577	}
    578}
    579
    580void t4_uld_clean_up(struct adapter *adap)
    581{
    582	unsigned int i;
    583
    584	if (!is_uld(adap))
    585		return;
    586
    587	mutex_lock(&uld_mutex);
    588	for (i = 0; i < CXGB4_ULD_MAX; i++) {
    589		if (!adap->uld[i].handle)
    590			continue;
    591
    592		cxgb4_shutdown_uld_adapter(adap, i);
    593	}
    594	mutex_unlock(&uld_mutex);
    595}
    596
    597static void uld_init(struct adapter *adap, struct cxgb4_lld_info *lld)
    598{
    599	int i;
    600
    601	lld->pdev = adap->pdev;
    602	lld->pf = adap->pf;
    603	lld->l2t = adap->l2t;
    604	lld->tids = &adap->tids;
    605	lld->ports = adap->port;
    606	lld->vr = &adap->vres;
    607	lld->mtus = adap->params.mtus;
    608	lld->nchan = adap->params.nports;
    609	lld->nports = adap->params.nports;
    610	lld->wr_cred = adap->params.ofldq_wr_cred;
    611	lld->crypto = adap->params.crypto;
    612	lld->iscsi_iolen = MAXRXDATA_G(t4_read_reg(adap, TP_PARA_REG2_A));
    613	lld->iscsi_tagmask = t4_read_reg(adap, ULP_RX_ISCSI_TAGMASK_A);
    614	lld->iscsi_pgsz_order = t4_read_reg(adap, ULP_RX_ISCSI_PSZ_A);
    615	lld->iscsi_llimit = t4_read_reg(adap, ULP_RX_ISCSI_LLIMIT_A);
    616	lld->iscsi_ppm = &adap->iscsi_ppm;
    617	lld->adapter_type = adap->params.chip;
    618	lld->cclk_ps = 1000000000 / adap->params.vpd.cclk;
    619	lld->udb_density = 1 << adap->params.sge.eq_qpp;
    620	lld->ucq_density = 1 << adap->params.sge.iq_qpp;
    621	lld->sge_host_page_size = 1 << (adap->params.sge.hps + 10);
    622	lld->filt_mode = adap->params.tp.vlan_pri_map;
    623	/* MODQ_REQ_MAP sets queues 0-3 to chan 0-3 */
    624	for (i = 0; i < NCHAN; i++)
    625		lld->tx_modq[i] = i;
    626	lld->gts_reg = adap->regs + MYPF_REG(SGE_PF_GTS_A);
    627	lld->db_reg = adap->regs + MYPF_REG(SGE_PF_KDOORBELL_A);
    628	lld->fw_vers = adap->params.fw_vers;
    629	lld->dbfifo_int_thresh = dbfifo_int_thresh;
    630	lld->sge_ingpadboundary = adap->sge.fl_align;
    631	lld->sge_egrstatuspagesize = adap->sge.stat_len;
    632	lld->sge_pktshift = adap->sge.pktshift;
    633	lld->ulp_crypto = adap->params.crypto;
    634	lld->enable_fw_ofld_conn = adap->flags & CXGB4_FW_OFLD_CONN;
    635	lld->max_ordird_qp = adap->params.max_ordird_qp;
    636	lld->max_ird_adapter = adap->params.max_ird_adapter;
    637	lld->ulptx_memwrite_dsgl = adap->params.ulptx_memwrite_dsgl;
    638	lld->nodeid = dev_to_node(adap->pdev_dev);
    639	lld->fr_nsmr_tpte_wr_support = adap->params.fr_nsmr_tpte_wr_support;
    640	lld->write_w_imm_support = adap->params.write_w_imm_support;
    641	lld->write_cmpl_support = adap->params.write_cmpl_support;
    642}
    643
    644static int uld_attach(struct adapter *adap, unsigned int uld)
    645{
    646	struct cxgb4_lld_info lli;
    647	void *handle;
    648
    649	uld_init(adap, &lli);
    650	uld_queue_init(adap, uld, &lli);
    651
    652	handle = adap->uld[uld].add(&lli);
    653	if (IS_ERR(handle)) {
    654		dev_warn(adap->pdev_dev,
    655			 "could not attach to the %s driver, error %ld\n",
    656			 adap->uld[uld].name, PTR_ERR(handle));
    657		return PTR_ERR(handle);
    658	}
    659
    660	adap->uld[uld].handle = handle;
    661	t4_register_netevent_notifier();
    662
    663	if (adap->flags & CXGB4_FULL_INIT_DONE)
    664		adap->uld[uld].state_change(handle, CXGB4_STATE_UP);
    665
    666	return 0;
    667}
    668
    669#if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE)
    670static bool cxgb4_uld_in_use(struct adapter *adap)
    671{
    672	const struct tid_info *t = &adap->tids;
    673
    674	return (atomic_read(&t->conns_in_use) || t->stids_in_use);
    675}
    676
    677/* cxgb4_set_ktls_feature: request FW to enable/disable ktls settings.
    678 * @adap: adapter info
    679 * @enable: 1 to enable / 0 to disable ktls settings.
    680 */
    681int cxgb4_set_ktls_feature(struct adapter *adap, bool enable)
    682{
    683	int ret = 0;
    684	u32 params =
    685		FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
    686		FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_KTLS_HW) |
    687		FW_PARAMS_PARAM_Y_V(enable) |
    688		FW_PARAMS_PARAM_Z_V(FW_PARAMS_PARAM_DEV_KTLS_HW_USER_ENABLE);
    689
    690	if (enable) {
    691		if (!refcount_read(&adap->chcr_ktls.ktls_refcount)) {
    692			/* At this moment if ULD connection are up means, other
    693			 * ULD is/are already active, return failure.
    694			 */
    695			if (cxgb4_uld_in_use(adap)) {
    696				dev_dbg(adap->pdev_dev,
    697					"ULD connections (tid/stid) active. Can't enable kTLS\n");
    698				return -EINVAL;
    699			}
    700			ret = t4_set_params(adap, adap->mbox, adap->pf,
    701					    0, 1, &params, &params);
    702			if (ret)
    703				return ret;
    704			refcount_set(&adap->chcr_ktls.ktls_refcount, 1);
    705			pr_debug("kTLS has been enabled. Restrictions placed on ULD support\n");
    706		} else {
    707			/* ktls settings already up, just increment refcount. */
    708			refcount_inc(&adap->chcr_ktls.ktls_refcount);
    709		}
    710	} else {
    711		/* return failure if refcount is already 0. */
    712		if (!refcount_read(&adap->chcr_ktls.ktls_refcount))
    713			return -EINVAL;
    714		/* decrement refcount and test, if 0, disable ktls feature,
    715		 * else return command success.
    716		 */
    717		if (refcount_dec_and_test(&adap->chcr_ktls.ktls_refcount)) {
    718			ret = t4_set_params(adap, adap->mbox, adap->pf,
    719					    0, 1, &params, &params);
    720			if (ret)
    721				return ret;
    722			pr_debug("kTLS is disabled. Restrictions on ULD support removed\n");
    723		}
    724	}
    725
    726	return ret;
    727}
    728#endif
    729
    730static void cxgb4_uld_alloc_resources(struct adapter *adap,
    731				      enum cxgb4_uld type,
    732				      const struct cxgb4_uld_info *p)
    733{
    734	int ret = 0;
    735
    736	if ((type == CXGB4_ULD_CRYPTO && !is_pci_uld(adap)) ||
    737	    (type != CXGB4_ULD_CRYPTO && !is_offload(adap)))
    738		return;
    739	if (type == CXGB4_ULD_ISCSIT && is_t4(adap->params.chip))
    740		return;
    741	ret = cfg_queues_uld(adap, type, p);
    742	if (ret)
    743		goto out;
    744	ret = setup_sge_queues_uld(adap, type, p->lro);
    745	if (ret)
    746		goto free_queues;
    747	if (adap->flags & CXGB4_USING_MSIX) {
    748		ret = request_msix_queue_irqs_uld(adap, type);
    749		if (ret)
    750			goto free_rxq;
    751	}
    752	if (adap->flags & CXGB4_FULL_INIT_DONE)
    753		enable_rx_uld(adap, type);
    754	if (adap->uld[type].add)
    755		goto free_irq;
    756	ret = setup_sge_txq_uld(adap, type, p);
    757	if (ret)
    758		goto free_irq;
    759	adap->uld[type] = *p;
    760	ret = uld_attach(adap, type);
    761	if (ret)
    762		goto free_txq;
    763	return;
    764free_txq:
    765	release_sge_txq_uld(adap, type);
    766free_irq:
    767	if (adap->flags & CXGB4_FULL_INIT_DONE)
    768		quiesce_rx_uld(adap, type);
    769	if (adap->flags & CXGB4_USING_MSIX)
    770		free_msix_queue_irqs_uld(adap, type);
    771free_rxq:
    772	free_sge_queues_uld(adap, type);
    773free_queues:
    774	free_queues_uld(adap, type);
    775out:
    776	dev_warn(adap->pdev_dev,
    777		 "ULD registration failed for uld type %d\n", type);
    778}
    779
    780void cxgb4_uld_enable(struct adapter *adap)
    781{
    782	struct cxgb4_uld_list *uld_entry;
    783
    784	mutex_lock(&uld_mutex);
    785	list_add_tail(&adap->list_node, &adapter_list);
    786	list_for_each_entry(uld_entry, &uld_list, list_node)
    787		cxgb4_uld_alloc_resources(adap, uld_entry->uld_type,
    788					  &uld_entry->uld_info);
    789	mutex_unlock(&uld_mutex);
    790}
    791
    792/* cxgb4_register_uld - register an upper-layer driver
    793 * @type: the ULD type
    794 * @p: the ULD methods
    795 *
    796 * Registers an upper-layer driver with this driver and notifies the ULD
    797 * about any presently available devices that support its type.
    798 */
    799void cxgb4_register_uld(enum cxgb4_uld type,
    800			const struct cxgb4_uld_info *p)
    801{
    802	struct cxgb4_uld_list *uld_entry;
    803	struct adapter *adap;
    804
    805	if (type >= CXGB4_ULD_MAX)
    806		return;
    807
    808	uld_entry = kzalloc(sizeof(*uld_entry), GFP_KERNEL);
    809	if (!uld_entry)
    810		return;
    811
    812	memcpy(&uld_entry->uld_info, p, sizeof(struct cxgb4_uld_info));
    813	mutex_lock(&uld_mutex);
    814	list_for_each_entry(adap, &adapter_list, list_node)
    815		cxgb4_uld_alloc_resources(adap, type, p);
    816
    817	uld_entry->uld_type = type;
    818	list_add_tail(&uld_entry->list_node, &uld_list);
    819	mutex_unlock(&uld_mutex);
    820	return;
    821}
    822EXPORT_SYMBOL(cxgb4_register_uld);
    823
    824/**
    825 *	cxgb4_unregister_uld - unregister an upper-layer driver
    826 *	@type: the ULD type
    827 *
    828 *	Unregisters an existing upper-layer driver.
    829 */
    830int cxgb4_unregister_uld(enum cxgb4_uld type)
    831{
    832	struct cxgb4_uld_list *uld_entry, *tmp;
    833	struct adapter *adap;
    834
    835	if (type >= CXGB4_ULD_MAX)
    836		return -EINVAL;
    837
    838	mutex_lock(&uld_mutex);
    839	list_for_each_entry(adap, &adapter_list, list_node) {
    840		if ((type == CXGB4_ULD_CRYPTO && !is_pci_uld(adap)) ||
    841		    (type != CXGB4_ULD_CRYPTO && !is_offload(adap)))
    842			continue;
    843		if (type == CXGB4_ULD_ISCSIT && is_t4(adap->params.chip))
    844			continue;
    845
    846		cxgb4_shutdown_uld_adapter(adap, type);
    847	}
    848
    849	list_for_each_entry_safe(uld_entry, tmp, &uld_list, list_node) {
    850		if (uld_entry->uld_type == type) {
    851			list_del(&uld_entry->list_node);
    852			kfree(uld_entry);
    853		}
    854	}
    855	mutex_unlock(&uld_mutex);
    856
    857	return 0;
    858}
    859EXPORT_SYMBOL(cxgb4_unregister_uld);