cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

bnxt_dcb.c (18355B)


      1/* Broadcom NetXtreme-C/E network driver.
      2 *
      3 * Copyright (c) 2014-2016 Broadcom Corporation
      4 * Copyright (c) 2016-2017 Broadcom Limited
      5 *
      6 * This program is free software; you can redistribute it and/or modify
      7 * it under the terms of the GNU General Public License as published by
      8 * the Free Software Foundation.
      9 */
     10
     11#include <linux/netdevice.h>
     12#include <linux/types.h>
     13#include <linux/errno.h>
     14#include <linux/rtnetlink.h>
     15#include <linux/interrupt.h>
     16#include <linux/pci.h>
     17#include <linux/etherdevice.h>
     18#include <rdma/ib_verbs.h>
     19#include "bnxt_hsi.h"
     20#include "bnxt.h"
     21#include "bnxt_hwrm.h"
     22#include "bnxt_dcb.h"
     23
     24#ifdef CONFIG_BNXT_DCB
     25static int bnxt_queue_to_tc(struct bnxt *bp, u8 queue_id)
     26{
     27	int i, j;
     28
     29	for (i = 0; i < bp->max_tc; i++) {
     30		if (bp->q_info[i].queue_id == queue_id) {
     31			for (j = 0; j < bp->max_tc; j++) {
     32				if (bp->tc_to_qidx[j] == i)
     33					return j;
     34			}
     35		}
     36	}
     37	return -EINVAL;
     38}
     39
     40static int bnxt_hwrm_queue_pri2cos_cfg(struct bnxt *bp, struct ieee_ets *ets)
     41{
     42	struct hwrm_queue_pri2cos_cfg_input *req;
     43	u8 *pri2cos;
     44	int rc, i;
     45
     46	rc = hwrm_req_init(bp, req, HWRM_QUEUE_PRI2COS_CFG);
     47	if (rc)
     48		return rc;
     49
     50	req->flags = cpu_to_le32(QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_BIDIR |
     51				 QUEUE_PRI2COS_CFG_REQ_FLAGS_IVLAN);
     52
     53	pri2cos = &req->pri0_cos_queue_id;
     54	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
     55		u8 qidx;
     56
     57		req->enables |= cpu_to_le32(
     58			QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI0_COS_QUEUE_ID << i);
     59
     60		qidx = bp->tc_to_qidx[ets->prio_tc[i]];
     61		pri2cos[i] = bp->q_info[qidx].queue_id;
     62	}
     63	return hwrm_req_send(bp, req);
     64}
     65
     66static int bnxt_hwrm_queue_pri2cos_qcfg(struct bnxt *bp, struct ieee_ets *ets)
     67{
     68	struct hwrm_queue_pri2cos_qcfg_output *resp;
     69	struct hwrm_queue_pri2cos_qcfg_input *req;
     70	int rc;
     71
     72	rc = hwrm_req_init(bp, req, HWRM_QUEUE_PRI2COS_QCFG);
     73	if (rc)
     74		return rc;
     75
     76	req->flags = cpu_to_le32(QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN);
     77	resp = hwrm_req_hold(bp, req);
     78	rc = hwrm_req_send(bp, req);
     79	if (!rc) {
     80		u8 *pri2cos = &resp->pri0_cos_queue_id;
     81		int i;
     82
     83		for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
     84			u8 queue_id = pri2cos[i];
     85			int tc;
     86
     87			tc = bnxt_queue_to_tc(bp, queue_id);
     88			if (tc >= 0)
     89				ets->prio_tc[i] = tc;
     90		}
     91	}
     92	hwrm_req_drop(bp, req);
     93	return rc;
     94}
     95
     96static int bnxt_hwrm_queue_cos2bw_cfg(struct bnxt *bp, struct ieee_ets *ets,
     97				      u8 max_tc)
     98{
     99	struct hwrm_queue_cos2bw_cfg_input *req;
    100	struct bnxt_cos2bw_cfg cos2bw;
    101	void *data;
    102	int rc, i;
    103
    104	rc = hwrm_req_init(bp, req, HWRM_QUEUE_COS2BW_CFG);
    105	if (rc)
    106		return rc;
    107
    108	for (i = 0; i < max_tc; i++) {
    109		u8 qidx = bp->tc_to_qidx[i];
    110
    111		req->enables |= cpu_to_le32(
    112			QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID0_VALID <<
    113			qidx);
    114
    115		memset(&cos2bw, 0, sizeof(cos2bw));
    116		cos2bw.queue_id = bp->q_info[qidx].queue_id;
    117		if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_STRICT) {
    118			cos2bw.tsa =
    119				QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_SP;
    120			cos2bw.pri_lvl = i;
    121		} else {
    122			cos2bw.tsa =
    123				QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_ETS;
    124			cos2bw.bw_weight = ets->tc_tx_bw[i];
    125			/* older firmware requires min_bw to be set to the
    126			 * same weight value in percent.
    127			 */
    128			cos2bw.min_bw =
    129				cpu_to_le32((ets->tc_tx_bw[i] * 100) |
    130					    BW_VALUE_UNIT_PERCENT1_100);
    131		}
    132		data = &req->unused_0 + qidx * (sizeof(cos2bw) - 4);
    133		memcpy(data, &cos2bw.queue_id, sizeof(cos2bw) - 4);
    134		if (qidx == 0) {
    135			req->queue_id0 = cos2bw.queue_id;
    136			req->unused_0 = 0;
    137		}
    138	}
    139	return hwrm_req_send(bp, req);
    140}
    141
    142static int bnxt_hwrm_queue_cos2bw_qcfg(struct bnxt *bp, struct ieee_ets *ets)
    143{
    144	struct hwrm_queue_cos2bw_qcfg_output *resp;
    145	struct hwrm_queue_cos2bw_qcfg_input *req;
    146	struct bnxt_cos2bw_cfg cos2bw;
    147	void *data;
    148	int rc, i;
    149
    150	rc = hwrm_req_init(bp, req, HWRM_QUEUE_COS2BW_QCFG);
    151	if (rc)
    152		return rc;
    153
    154	resp = hwrm_req_hold(bp, req);
    155	rc = hwrm_req_send(bp, req);
    156	if (rc) {
    157		hwrm_req_drop(bp, req);
    158		return rc;
    159	}
    160
    161	data = &resp->queue_id0 + offsetof(struct bnxt_cos2bw_cfg, queue_id);
    162	for (i = 0; i < bp->max_tc; i++, data += sizeof(cos2bw.cfg)) {
    163		int tc;
    164
    165		memcpy(&cos2bw.cfg, data, sizeof(cos2bw.cfg));
    166		if (i == 0)
    167			cos2bw.queue_id = resp->queue_id0;
    168
    169		tc = bnxt_queue_to_tc(bp, cos2bw.queue_id);
    170		if (tc < 0)
    171			continue;
    172
    173		if (cos2bw.tsa ==
    174		    QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_SP) {
    175			ets->tc_tsa[tc] = IEEE_8021QAZ_TSA_STRICT;
    176		} else {
    177			ets->tc_tsa[tc] = IEEE_8021QAZ_TSA_ETS;
    178			ets->tc_tx_bw[tc] = cos2bw.bw_weight;
    179		}
    180	}
    181	hwrm_req_drop(bp, req);
    182	return 0;
    183}
    184
    185static int bnxt_queue_remap(struct bnxt *bp, unsigned int lltc_mask)
    186{
    187	unsigned long qmap = 0;
    188	int max = bp->max_tc;
    189	int i, j, rc;
    190
    191	/* Assign lossless TCs first */
    192	for (i = 0, j = 0; i < max; ) {
    193		if (lltc_mask & (1 << i)) {
    194			if (BNXT_LLQ(bp->q_info[j].queue_profile)) {
    195				bp->tc_to_qidx[i] = j;
    196				__set_bit(j, &qmap);
    197				i++;
    198			}
    199			j++;
    200			continue;
    201		}
    202		i++;
    203	}
    204
    205	for (i = 0, j = 0; i < max; i++) {
    206		if (lltc_mask & (1 << i))
    207			continue;
    208		j = find_next_zero_bit(&qmap, max, j);
    209		bp->tc_to_qidx[i] = j;
    210		__set_bit(j, &qmap);
    211		j++;
    212	}
    213
    214	if (netif_running(bp->dev)) {
    215		bnxt_close_nic(bp, false, false);
    216		rc = bnxt_open_nic(bp, false, false);
    217		if (rc) {
    218			netdev_warn(bp->dev, "failed to open NIC, rc = %d\n", rc);
    219			return rc;
    220		}
    221	}
    222	if (bp->ieee_ets) {
    223		int tc = netdev_get_num_tc(bp->dev);
    224
    225		if (!tc)
    226			tc = 1;
    227		rc = bnxt_hwrm_queue_cos2bw_cfg(bp, bp->ieee_ets, tc);
    228		if (rc) {
    229			netdev_warn(bp->dev, "failed to config BW, rc = %d\n", rc);
    230			return rc;
    231		}
    232		rc = bnxt_hwrm_queue_pri2cos_cfg(bp, bp->ieee_ets);
    233		if (rc) {
    234			netdev_warn(bp->dev, "failed to config prio, rc = %d\n", rc);
    235			return rc;
    236		}
    237	}
    238	return 0;
    239}
    240
    241static int bnxt_hwrm_queue_pfc_cfg(struct bnxt *bp, struct ieee_pfc *pfc)
    242{
    243	struct hwrm_queue_pfcenable_cfg_input *req;
    244	struct ieee_ets *my_ets = bp->ieee_ets;
    245	unsigned int tc_mask = 0, pri_mask = 0;
    246	u8 i, pri, lltc_count = 0;
    247	bool need_q_remap = false;
    248	int rc;
    249
    250	if (!my_ets)
    251		return -EINVAL;
    252
    253	for (i = 0; i < bp->max_tc; i++) {
    254		for (pri = 0; pri < IEEE_8021QAZ_MAX_TCS; pri++) {
    255			if ((pfc->pfc_en & (1 << pri)) &&
    256			    (my_ets->prio_tc[pri] == i)) {
    257				pri_mask |= 1 << pri;
    258				tc_mask |= 1 << i;
    259			}
    260		}
    261		if (tc_mask & (1 << i))
    262			lltc_count++;
    263	}
    264	if (lltc_count > bp->max_lltc)
    265		return -EINVAL;
    266
    267	for (i = 0; i < bp->max_tc; i++) {
    268		if (tc_mask & (1 << i)) {
    269			u8 qidx = bp->tc_to_qidx[i];
    270
    271			if (!BNXT_LLQ(bp->q_info[qidx].queue_profile)) {
    272				need_q_remap = true;
    273				break;
    274			}
    275		}
    276	}
    277
    278	if (need_q_remap)
    279		bnxt_queue_remap(bp, tc_mask);
    280
    281	rc = hwrm_req_init(bp, req, HWRM_QUEUE_PFCENABLE_CFG);
    282	if (rc)
    283		return rc;
    284
    285	req->flags = cpu_to_le32(pri_mask);
    286	return hwrm_req_send(bp, req);
    287}
    288
    289static int bnxt_hwrm_queue_pfc_qcfg(struct bnxt *bp, struct ieee_pfc *pfc)
    290{
    291	struct hwrm_queue_pfcenable_qcfg_output *resp;
    292	struct hwrm_queue_pfcenable_qcfg_input *req;
    293	u8 pri_mask;
    294	int rc;
    295
    296	rc = hwrm_req_init(bp, req, HWRM_QUEUE_PFCENABLE_QCFG);
    297	if (rc)
    298		return rc;
    299
    300	resp = hwrm_req_hold(bp, req);
    301	rc = hwrm_req_send(bp, req);
    302	if (rc) {
    303		hwrm_req_drop(bp, req);
    304		return rc;
    305	}
    306
    307	pri_mask = le32_to_cpu(resp->flags);
    308	pfc->pfc_en = pri_mask;
    309	hwrm_req_drop(bp, req);
    310	return 0;
    311}
    312
    313static int bnxt_hwrm_set_dcbx_app(struct bnxt *bp, struct dcb_app *app,
    314				  bool add)
    315{
    316	struct hwrm_fw_set_structured_data_input *set;
    317	struct hwrm_fw_get_structured_data_input *get;
    318	struct hwrm_struct_data_dcbx_app *fw_app;
    319	struct hwrm_struct_hdr *data;
    320	dma_addr_t mapping;
    321	size_t data_len;
    322	int rc, n, i;
    323
    324	if (bp->hwrm_spec_code < 0x10601)
    325		return 0;
    326
    327	rc = hwrm_req_init(bp, get, HWRM_FW_GET_STRUCTURED_DATA);
    328	if (rc)
    329		return rc;
    330
    331	hwrm_req_hold(bp, get);
    332	hwrm_req_alloc_flags(bp, get, GFP_KERNEL | __GFP_ZERO);
    333
    334	n = IEEE_8021QAZ_MAX_TCS;
    335	data_len = sizeof(*data) + sizeof(*fw_app) * n;
    336	data = hwrm_req_dma_slice(bp, get, data_len, &mapping);
    337	if (!data) {
    338		rc = -ENOMEM;
    339		goto set_app_exit;
    340	}
    341
    342	get->dest_data_addr = cpu_to_le64(mapping);
    343	get->structure_id = cpu_to_le16(STRUCT_HDR_STRUCT_ID_DCBX_APP);
    344	get->subtype = cpu_to_le16(HWRM_STRUCT_DATA_SUBTYPE_HOST_OPERATIONAL);
    345	get->count = 0;
    346	rc = hwrm_req_send(bp, get);
    347	if (rc)
    348		goto set_app_exit;
    349
    350	fw_app = (struct hwrm_struct_data_dcbx_app *)(data + 1);
    351
    352	if (data->struct_id != cpu_to_le16(STRUCT_HDR_STRUCT_ID_DCBX_APP)) {
    353		rc = -ENODEV;
    354		goto set_app_exit;
    355	}
    356
    357	n = data->count;
    358	for (i = 0; i < n; i++, fw_app++) {
    359		if (fw_app->protocol_id == cpu_to_be16(app->protocol) &&
    360		    fw_app->protocol_selector == app->selector &&
    361		    fw_app->priority == app->priority) {
    362			if (add)
    363				goto set_app_exit;
    364			else
    365				break;
    366		}
    367	}
    368	if (add) {
    369		/* append */
    370		n++;
    371		fw_app->protocol_id = cpu_to_be16(app->protocol);
    372		fw_app->protocol_selector = app->selector;
    373		fw_app->priority = app->priority;
    374		fw_app->valid = 1;
    375	} else {
    376		size_t len = 0;
    377
    378		/* not found, nothing to delete */
    379		if (n == i)
    380			goto set_app_exit;
    381
    382		len = (n - 1 - i) * sizeof(*fw_app);
    383		if (len)
    384			memmove(fw_app, fw_app + 1, len);
    385		n--;
    386		memset(fw_app + n, 0, sizeof(*fw_app));
    387	}
    388	data->count = n;
    389	data->len = cpu_to_le16(sizeof(*fw_app) * n);
    390	data->subtype = cpu_to_le16(HWRM_STRUCT_DATA_SUBTYPE_HOST_OPERATIONAL);
    391
    392	rc = hwrm_req_init(bp, set, HWRM_FW_SET_STRUCTURED_DATA);
    393	if (rc)
    394		goto set_app_exit;
    395
    396	set->src_data_addr = cpu_to_le64(mapping);
    397	set->data_len = cpu_to_le16(sizeof(*data) + sizeof(*fw_app) * n);
    398	set->hdr_cnt = 1;
    399	rc = hwrm_req_send(bp, set);
    400
    401set_app_exit:
    402	hwrm_req_drop(bp, get); /* dropping get request and associated slice */
    403	return rc;
    404}
    405
    406static int bnxt_hwrm_queue_dscp_qcaps(struct bnxt *bp)
    407{
    408	struct hwrm_queue_dscp_qcaps_output *resp;
    409	struct hwrm_queue_dscp_qcaps_input *req;
    410	int rc;
    411
    412	bp->max_dscp_value = 0;
    413	if (bp->hwrm_spec_code < 0x10800 || BNXT_VF(bp))
    414		return 0;
    415
    416	rc = hwrm_req_init(bp, req, HWRM_QUEUE_DSCP_QCAPS);
    417	if (rc)
    418		return rc;
    419
    420	resp = hwrm_req_hold(bp, req);
    421	rc = hwrm_req_send_silent(bp, req);
    422	if (!rc) {
    423		bp->max_dscp_value = (1 << resp->num_dscp_bits) - 1;
    424		if (bp->max_dscp_value < 0x3f)
    425			bp->max_dscp_value = 0;
    426	}
    427	hwrm_req_drop(bp, req);
    428	return rc;
    429}
    430
    431static int bnxt_hwrm_queue_dscp2pri_cfg(struct bnxt *bp, struct dcb_app *app,
    432					bool add)
    433{
    434	struct hwrm_queue_dscp2pri_cfg_input *req;
    435	struct bnxt_dscp2pri_entry *dscp2pri;
    436	dma_addr_t mapping;
    437	int rc;
    438
    439	if (bp->hwrm_spec_code < 0x10800)
    440		return 0;
    441
    442	rc = hwrm_req_init(bp, req, HWRM_QUEUE_DSCP2PRI_CFG);
    443	if (rc)
    444		return rc;
    445
    446	dscp2pri = hwrm_req_dma_slice(bp, req, sizeof(*dscp2pri), &mapping);
    447	if (!dscp2pri) {
    448		hwrm_req_drop(bp, req);
    449		return -ENOMEM;
    450	}
    451
    452	req->src_data_addr = cpu_to_le64(mapping);
    453	dscp2pri->dscp = app->protocol;
    454	if (add)
    455		dscp2pri->mask = 0x3f;
    456	else
    457		dscp2pri->mask = 0;
    458	dscp2pri->pri = app->priority;
    459	req->entry_cnt = cpu_to_le16(1);
    460	rc = hwrm_req_send(bp, req);
    461	return rc;
    462}
    463
    464static int bnxt_ets_validate(struct bnxt *bp, struct ieee_ets *ets, u8 *tc)
    465{
    466	int total_ets_bw = 0;
    467	bool zero = false;
    468	u8 max_tc = 0;
    469	int i;
    470
    471	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
    472		if (ets->prio_tc[i] > bp->max_tc) {
    473			netdev_err(bp->dev, "priority to TC mapping exceeds TC count %d\n",
    474				   ets->prio_tc[i]);
    475			return -EINVAL;
    476		}
    477		if (ets->prio_tc[i] > max_tc)
    478			max_tc = ets->prio_tc[i];
    479
    480		if ((ets->tc_tx_bw[i] || ets->tc_tsa[i]) && i > bp->max_tc)
    481			return -EINVAL;
    482
    483		switch (ets->tc_tsa[i]) {
    484		case IEEE_8021QAZ_TSA_STRICT:
    485			break;
    486		case IEEE_8021QAZ_TSA_ETS:
    487			total_ets_bw += ets->tc_tx_bw[i];
    488			zero = zero || !ets->tc_tx_bw[i];
    489			break;
    490		default:
    491			return -ENOTSUPP;
    492		}
    493	}
    494	if (total_ets_bw > 100) {
    495		netdev_warn(bp->dev, "rejecting ETS config exceeding available bandwidth\n");
    496		return -EINVAL;
    497	}
    498	if (zero && total_ets_bw == 100) {
    499		netdev_warn(bp->dev, "rejecting ETS config starving a TC\n");
    500		return -EINVAL;
    501	}
    502
    503	if (max_tc >= bp->max_tc)
    504		*tc = bp->max_tc;
    505	else
    506		*tc = max_tc + 1;
    507	return 0;
    508}
    509
    510static int bnxt_dcbnl_ieee_getets(struct net_device *dev, struct ieee_ets *ets)
    511{
    512	struct bnxt *bp = netdev_priv(dev);
    513	struct ieee_ets *my_ets = bp->ieee_ets;
    514	int rc;
    515
    516	ets->ets_cap = bp->max_tc;
    517
    518	if (!my_ets) {
    519		if (bp->dcbx_cap & DCB_CAP_DCBX_HOST)
    520			return 0;
    521
    522		my_ets = kzalloc(sizeof(*my_ets), GFP_KERNEL);
    523		if (!my_ets)
    524			return -ENOMEM;
    525		rc = bnxt_hwrm_queue_cos2bw_qcfg(bp, my_ets);
    526		if (rc)
    527			goto error;
    528		rc = bnxt_hwrm_queue_pri2cos_qcfg(bp, my_ets);
    529		if (rc)
    530			goto error;
    531
    532		/* cache result */
    533		bp->ieee_ets = my_ets;
    534	}
    535
    536	ets->cbs = my_ets->cbs;
    537	memcpy(ets->tc_tx_bw, my_ets->tc_tx_bw, sizeof(ets->tc_tx_bw));
    538	memcpy(ets->tc_rx_bw, my_ets->tc_rx_bw, sizeof(ets->tc_rx_bw));
    539	memcpy(ets->tc_tsa, my_ets->tc_tsa, sizeof(ets->tc_tsa));
    540	memcpy(ets->prio_tc, my_ets->prio_tc, sizeof(ets->prio_tc));
    541	return 0;
    542error:
    543	kfree(my_ets);
    544	return rc;
    545}
    546
    547static int bnxt_dcbnl_ieee_setets(struct net_device *dev, struct ieee_ets *ets)
    548{
    549	struct bnxt *bp = netdev_priv(dev);
    550	struct ieee_ets *my_ets = bp->ieee_ets;
    551	u8 max_tc = 0;
    552	int rc, i;
    553
    554	if (!(bp->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) ||
    555	    !(bp->dcbx_cap & DCB_CAP_DCBX_HOST))
    556		return -EINVAL;
    557
    558	rc = bnxt_ets_validate(bp, ets, &max_tc);
    559	if (!rc) {
    560		if (!my_ets) {
    561			my_ets = kzalloc(sizeof(*my_ets), GFP_KERNEL);
    562			if (!my_ets)
    563				return -ENOMEM;
    564			/* initialize PRI2TC mappings to invalid value */
    565			for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
    566				my_ets->prio_tc[i] = IEEE_8021QAZ_MAX_TCS;
    567			bp->ieee_ets = my_ets;
    568		}
    569		rc = bnxt_setup_mq_tc(dev, max_tc);
    570		if (rc)
    571			return rc;
    572		rc = bnxt_hwrm_queue_cos2bw_cfg(bp, ets, max_tc);
    573		if (rc)
    574			return rc;
    575		rc = bnxt_hwrm_queue_pri2cos_cfg(bp, ets);
    576		if (rc)
    577			return rc;
    578		memcpy(my_ets, ets, sizeof(*my_ets));
    579	}
    580	return rc;
    581}
    582
    583static int bnxt_dcbnl_ieee_getpfc(struct net_device *dev, struct ieee_pfc *pfc)
    584{
    585	struct bnxt *bp = netdev_priv(dev);
    586	__le64 *stats = bp->port_stats.hw_stats;
    587	struct ieee_pfc *my_pfc = bp->ieee_pfc;
    588	long rx_off, tx_off;
    589	int i, rc;
    590
    591	pfc->pfc_cap = bp->max_lltc;
    592
    593	if (!my_pfc) {
    594		if (bp->dcbx_cap & DCB_CAP_DCBX_HOST)
    595			return 0;
    596
    597		my_pfc = kzalloc(sizeof(*my_pfc), GFP_KERNEL);
    598		if (!my_pfc)
    599			return 0;
    600		bp->ieee_pfc = my_pfc;
    601		rc = bnxt_hwrm_queue_pfc_qcfg(bp, my_pfc);
    602		if (rc)
    603			return 0;
    604	}
    605
    606	pfc->pfc_en = my_pfc->pfc_en;
    607	pfc->mbc = my_pfc->mbc;
    608	pfc->delay = my_pfc->delay;
    609
    610	if (!stats)
    611		return 0;
    612
    613	rx_off = BNXT_RX_STATS_OFFSET(rx_pfc_ena_frames_pri0);
    614	tx_off = BNXT_TX_STATS_OFFSET(tx_pfc_ena_frames_pri0);
    615	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++, rx_off++, tx_off++) {
    616		pfc->requests[i] = le64_to_cpu(*(stats + tx_off));
    617		pfc->indications[i] = le64_to_cpu(*(stats + rx_off));
    618	}
    619
    620	return 0;
    621}
    622
    623static int bnxt_dcbnl_ieee_setpfc(struct net_device *dev, struct ieee_pfc *pfc)
    624{
    625	struct bnxt *bp = netdev_priv(dev);
    626	struct ieee_pfc *my_pfc = bp->ieee_pfc;
    627	int rc;
    628
    629	if (!(bp->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) ||
    630	    !(bp->dcbx_cap & DCB_CAP_DCBX_HOST) ||
    631	    (bp->phy_flags & BNXT_PHY_FL_NO_PAUSE))
    632		return -EINVAL;
    633
    634	if (!my_pfc) {
    635		my_pfc = kzalloc(sizeof(*my_pfc), GFP_KERNEL);
    636		if (!my_pfc)
    637			return -ENOMEM;
    638		bp->ieee_pfc = my_pfc;
    639	}
    640	rc = bnxt_hwrm_queue_pfc_cfg(bp, pfc);
    641	if (!rc)
    642		memcpy(my_pfc, pfc, sizeof(*my_pfc));
    643
    644	return rc;
    645}
    646
    647static int bnxt_dcbnl_ieee_dscp_app_prep(struct bnxt *bp, struct dcb_app *app)
    648{
    649	if (app->selector == IEEE_8021QAZ_APP_SEL_DSCP) {
    650		if (!bp->max_dscp_value)
    651			return -ENOTSUPP;
    652		if (app->protocol > bp->max_dscp_value)
    653			return -EINVAL;
    654	}
    655	return 0;
    656}
    657
    658static int bnxt_dcbnl_ieee_setapp(struct net_device *dev, struct dcb_app *app)
    659{
    660	struct bnxt *bp = netdev_priv(dev);
    661	int rc;
    662
    663	if (!(bp->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) ||
    664	    !(bp->dcbx_cap & DCB_CAP_DCBX_HOST))
    665		return -EINVAL;
    666
    667	rc = bnxt_dcbnl_ieee_dscp_app_prep(bp, app);
    668	if (rc)
    669		return rc;
    670
    671	rc = dcb_ieee_setapp(dev, app);
    672	if (rc)
    673		return rc;
    674
    675	if ((app->selector == IEEE_8021QAZ_APP_SEL_ETHERTYPE &&
    676	     app->protocol == ETH_P_IBOE) ||
    677	    (app->selector == IEEE_8021QAZ_APP_SEL_DGRAM &&
    678	     app->protocol == ROCE_V2_UDP_DPORT))
    679		rc = bnxt_hwrm_set_dcbx_app(bp, app, true);
    680
    681	if (app->selector == IEEE_8021QAZ_APP_SEL_DSCP)
    682		rc = bnxt_hwrm_queue_dscp2pri_cfg(bp, app, true);
    683
    684	return rc;
    685}
    686
    687static int bnxt_dcbnl_ieee_delapp(struct net_device *dev, struct dcb_app *app)
    688{
    689	struct bnxt *bp = netdev_priv(dev);
    690	int rc;
    691
    692	if (!(bp->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) ||
    693	    !(bp->dcbx_cap & DCB_CAP_DCBX_HOST))
    694		return -EINVAL;
    695
    696	rc = bnxt_dcbnl_ieee_dscp_app_prep(bp, app);
    697	if (rc)
    698		return rc;
    699
    700	rc = dcb_ieee_delapp(dev, app);
    701	if (rc)
    702		return rc;
    703	if ((app->selector == IEEE_8021QAZ_APP_SEL_ETHERTYPE &&
    704	     app->protocol == ETH_P_IBOE) ||
    705	    (app->selector == IEEE_8021QAZ_APP_SEL_DGRAM &&
    706	     app->protocol == ROCE_V2_UDP_DPORT))
    707		rc = bnxt_hwrm_set_dcbx_app(bp, app, false);
    708
    709	if (app->selector == IEEE_8021QAZ_APP_SEL_DSCP)
    710		rc = bnxt_hwrm_queue_dscp2pri_cfg(bp, app, false);
    711
    712	return rc;
    713}
    714
    715static u8 bnxt_dcbnl_getdcbx(struct net_device *dev)
    716{
    717	struct bnxt *bp = netdev_priv(dev);
    718
    719	return bp->dcbx_cap;
    720}
    721
    722static u8 bnxt_dcbnl_setdcbx(struct net_device *dev, u8 mode)
    723{
    724	struct bnxt *bp = netdev_priv(dev);
    725
    726	/* All firmware DCBX settings are set in NVRAM */
    727	if (bp->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED)
    728		return 1;
    729
    730	if (mode & DCB_CAP_DCBX_HOST) {
    731		if (BNXT_VF(bp) || (bp->fw_cap & BNXT_FW_CAP_LLDP_AGENT))
    732			return 1;
    733
    734		/* only support IEEE */
    735		if ((mode & DCB_CAP_DCBX_VER_CEE) ||
    736		    !(mode & DCB_CAP_DCBX_VER_IEEE))
    737			return 1;
    738	}
    739
    740	if (mode == bp->dcbx_cap)
    741		return 0;
    742
    743	bp->dcbx_cap = mode;
    744	return 0;
    745}
    746
    747static const struct dcbnl_rtnl_ops dcbnl_ops = {
    748	.ieee_getets	= bnxt_dcbnl_ieee_getets,
    749	.ieee_setets	= bnxt_dcbnl_ieee_setets,
    750	.ieee_getpfc	= bnxt_dcbnl_ieee_getpfc,
    751	.ieee_setpfc	= bnxt_dcbnl_ieee_setpfc,
    752	.ieee_setapp	= bnxt_dcbnl_ieee_setapp,
    753	.ieee_delapp	= bnxt_dcbnl_ieee_delapp,
    754	.getdcbx	= bnxt_dcbnl_getdcbx,
    755	.setdcbx	= bnxt_dcbnl_setdcbx,
    756};
    757
    758void bnxt_dcb_init(struct bnxt *bp)
    759{
    760	bp->dcbx_cap = 0;
    761	if (bp->hwrm_spec_code < 0x10501)
    762		return;
    763
    764	bnxt_hwrm_queue_dscp_qcaps(bp);
    765	bp->dcbx_cap = DCB_CAP_DCBX_VER_IEEE;
    766	if (BNXT_PF(bp) && !(bp->fw_cap & BNXT_FW_CAP_LLDP_AGENT))
    767		bp->dcbx_cap |= DCB_CAP_DCBX_HOST;
    768	else if (bp->fw_cap & BNXT_FW_CAP_DCBX_AGENT)
    769		bp->dcbx_cap |= DCB_CAP_DCBX_LLD_MANAGED;
    770	bp->dev->dcbnl_ops = &dcbnl_ops;
    771}
    772
    773void bnxt_dcb_free(struct bnxt *bp)
    774{
    775	kfree(bp->ieee_pfc);
    776	kfree(bp->ieee_ets);
    777	bp->ieee_pfc = NULL;
    778	bp->ieee_ets = NULL;
    779}
    780
    781#else
    782
    783void bnxt_dcb_init(struct bnxt *bp)
    784{
    785}
    786
    787void bnxt_dcb_free(struct bnxt *bp)
    788{
    789}
    790
    791#endif