cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

ice_eswitch.c (17654B)


      1// SPDX-License-Identifier: GPL-2.0
      2/* Copyright (C) 2019-2021, Intel Corporation. */
      3
      4#include "ice.h"
      5#include "ice_lib.h"
      6#include "ice_eswitch.h"
      7#include "ice_fltr.h"
      8#include "ice_repr.h"
      9#include "ice_devlink.h"
     10#include "ice_tc_lib.h"
     11
     12/**
     13 * ice_eswitch_add_vf_mac_rule - add adv rule with VF's MAC
     14 * @pf: pointer to PF struct
     15 * @vf: pointer to VF struct
     16 * @mac: VF's MAC address
     17 *
     18 * This function adds advanced rule that forwards packets with
     19 * VF's MAC address (src MAC) to the corresponding switchdev ctrl VSI queue.
     20 */
     21int
     22ice_eswitch_add_vf_mac_rule(struct ice_pf *pf, struct ice_vf *vf, const u8 *mac)
     23{
     24	struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi;
     25	struct ice_adv_rule_info rule_info = { 0 };
     26	struct ice_adv_lkup_elem *list;
     27	struct ice_hw *hw = &pf->hw;
     28	const u16 lkups_cnt = 1;
     29	int err;
     30
     31	list = kcalloc(lkups_cnt, sizeof(*list), GFP_ATOMIC);
     32	if (!list)
     33		return -ENOMEM;
     34
     35	list[0].type = ICE_MAC_OFOS;
     36	ether_addr_copy(list[0].h_u.eth_hdr.src_addr, mac);
     37	eth_broadcast_addr(list[0].m_u.eth_hdr.src_addr);
     38
     39	rule_info.sw_act.flag |= ICE_FLTR_TX;
     40	rule_info.sw_act.vsi_handle = ctrl_vsi->idx;
     41	rule_info.sw_act.fltr_act = ICE_FWD_TO_Q;
     42	rule_info.rx = false;
     43	rule_info.sw_act.fwd_id.q_id = hw->func_caps.common_cap.rxq_first_id +
     44				       ctrl_vsi->rxq_map[vf->vf_id];
     45	rule_info.flags_info.act |= ICE_SINGLE_ACT_LB_ENABLE;
     46	rule_info.flags_info.act_valid = true;
     47	rule_info.tun_type = ICE_SW_TUN_AND_NON_TUN;
     48
     49	err = ice_add_adv_rule(hw, list, lkups_cnt, &rule_info,
     50			       vf->repr->mac_rule);
     51	if (err)
     52		dev_err(ice_pf_to_dev(pf), "Unable to add VF mac rule in switchdev mode for VF %d",
     53			vf->vf_id);
     54	else
     55		vf->repr->rule_added = true;
     56
     57	kfree(list);
     58	return err;
     59}
     60
     61/**
     62 * ice_eswitch_replay_vf_mac_rule - replay adv rule with VF's MAC
     63 * @vf: pointer to vF struct
     64 *
     65 * This function replays VF's MAC rule after reset.
     66 */
     67void ice_eswitch_replay_vf_mac_rule(struct ice_vf *vf)
     68{
     69	int err;
     70
     71	if (!ice_is_switchdev_running(vf->pf))
     72		return;
     73
     74	if (is_valid_ether_addr(vf->hw_lan_addr.addr)) {
     75		err = ice_eswitch_add_vf_mac_rule(vf->pf, vf,
     76						  vf->hw_lan_addr.addr);
     77		if (err) {
     78			dev_err(ice_pf_to_dev(vf->pf), "Failed to add MAC %pM for VF %d\n, error %d\n",
     79				vf->hw_lan_addr.addr, vf->vf_id, err);
     80			return;
     81		}
     82		vf->num_mac++;
     83
     84		ether_addr_copy(vf->dev_lan_addr.addr, vf->hw_lan_addr.addr);
     85	}
     86}
     87
     88/**
     89 * ice_eswitch_del_vf_mac_rule - delete adv rule with VF's MAC
     90 * @vf: pointer to the VF struct
     91 *
     92 * Delete the advanced rule that was used to forward packets with the VF's MAC
     93 * address (src MAC) to the corresponding switchdev ctrl VSI queue.
     94 */
     95void ice_eswitch_del_vf_mac_rule(struct ice_vf *vf)
     96{
     97	if (!ice_is_switchdev_running(vf->pf))
     98		return;
     99
    100	if (!vf->repr->rule_added)
    101		return;
    102
    103	ice_rem_adv_rule_by_id(&vf->pf->hw, vf->repr->mac_rule);
    104	vf->repr->rule_added = false;
    105}
    106
    107/**
    108 * ice_eswitch_setup_env - configure switchdev HW filters
    109 * @pf: pointer to PF struct
    110 *
    111 * This function adds HW filters configuration specific for switchdev
    112 * mode.
    113 */
    114static int ice_eswitch_setup_env(struct ice_pf *pf)
    115{
    116	struct ice_vsi *uplink_vsi = pf->switchdev.uplink_vsi;
    117	struct net_device *uplink_netdev = uplink_vsi->netdev;
    118	struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi;
    119	struct ice_vsi_vlan_ops *vlan_ops;
    120	bool rule_added = false;
    121
    122	vlan_ops = ice_get_compat_vsi_vlan_ops(ctrl_vsi);
    123	if (vlan_ops->dis_stripping(ctrl_vsi))
    124		return -ENODEV;
    125
    126	ice_remove_vsi_fltr(&pf->hw, uplink_vsi->idx);
    127
    128	netif_addr_lock_bh(uplink_netdev);
    129	__dev_uc_unsync(uplink_netdev, NULL);
    130	__dev_mc_unsync(uplink_netdev, NULL);
    131	netif_addr_unlock_bh(uplink_netdev);
    132
    133	if (ice_vsi_add_vlan_zero(uplink_vsi))
    134		goto err_def_rx;
    135
    136	if (!ice_is_dflt_vsi_in_use(uplink_vsi->vsw)) {
    137		if (ice_set_dflt_vsi(uplink_vsi->vsw, uplink_vsi))
    138			goto err_def_rx;
    139		rule_added = true;
    140	}
    141
    142	if (ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_set_allow_override))
    143		goto err_override_uplink;
    144
    145	if (ice_vsi_update_security(ctrl_vsi, ice_vsi_ctx_set_allow_override))
    146		goto err_override_control;
    147
    148	return 0;
    149
    150err_override_control:
    151	ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_clear_allow_override);
    152err_override_uplink:
    153	if (rule_added)
    154		ice_clear_dflt_vsi(uplink_vsi->vsw);
    155err_def_rx:
    156	ice_fltr_add_mac_and_broadcast(uplink_vsi,
    157				       uplink_vsi->port_info->mac.perm_addr,
    158				       ICE_FWD_TO_VSI);
    159	return -ENODEV;
    160}
    161
    162/**
    163 * ice_eswitch_remap_rings_to_vectors - reconfigure rings of switchdev ctrl VSI
    164 * @pf: pointer to PF struct
    165 *
    166 * In switchdev number of allocated Tx/Rx rings is equal.
    167 *
    168 * This function fills q_vectors structures associated with representor and
    169 * move each ring pairs to port representor netdevs. Each port representor
    170 * will have dedicated 1 Tx/Rx ring pair, so number of rings pair is equal to
    171 * number of VFs.
    172 */
    173static void ice_eswitch_remap_rings_to_vectors(struct ice_pf *pf)
    174{
    175	struct ice_vsi *vsi = pf->switchdev.control_vsi;
    176	int q_id;
    177
    178	ice_for_each_txq(vsi, q_id) {
    179		struct ice_q_vector *q_vector;
    180		struct ice_tx_ring *tx_ring;
    181		struct ice_rx_ring *rx_ring;
    182		struct ice_repr *repr;
    183		struct ice_vf *vf;
    184
    185		vf = ice_get_vf_by_id(pf, q_id);
    186		if (WARN_ON(!vf))
    187			continue;
    188
    189		repr = vf->repr;
    190		q_vector = repr->q_vector;
    191		tx_ring = vsi->tx_rings[q_id];
    192		rx_ring = vsi->rx_rings[q_id];
    193
    194		q_vector->vsi = vsi;
    195		q_vector->reg_idx = vsi->q_vectors[0]->reg_idx;
    196
    197		q_vector->num_ring_tx = 1;
    198		q_vector->tx.tx_ring = tx_ring;
    199		tx_ring->q_vector = q_vector;
    200		tx_ring->next = NULL;
    201		tx_ring->netdev = repr->netdev;
    202		/* In switchdev mode, from OS stack perspective, there is only
    203		 * one queue for given netdev, so it needs to be indexed as 0.
    204		 */
    205		tx_ring->q_index = 0;
    206
    207		q_vector->num_ring_rx = 1;
    208		q_vector->rx.rx_ring = rx_ring;
    209		rx_ring->q_vector = q_vector;
    210		rx_ring->next = NULL;
    211		rx_ring->netdev = repr->netdev;
    212
    213		ice_put_vf(vf);
    214	}
    215}
    216
    217/**
    218 * ice_eswitch_release_reprs - clear PR VSIs configuration
    219 * @pf: poiner to PF struct
    220 * @ctrl_vsi: pointer to switchdev control VSI
    221 */
    222static void
    223ice_eswitch_release_reprs(struct ice_pf *pf, struct ice_vsi *ctrl_vsi)
    224{
    225	struct ice_vf *vf;
    226	unsigned int bkt;
    227
    228	lockdep_assert_held(&pf->vfs.table_lock);
    229
    230	ice_for_each_vf(pf, bkt, vf) {
    231		struct ice_vsi *vsi = vf->repr->src_vsi;
    232
    233		/* Skip VFs that aren't configured */
    234		if (!vf->repr->dst)
    235			continue;
    236
    237		ice_vsi_update_security(vsi, ice_vsi_ctx_set_antispoof);
    238		metadata_dst_free(vf->repr->dst);
    239		vf->repr->dst = NULL;
    240		ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr.addr,
    241					       ICE_FWD_TO_VSI);
    242
    243		netif_napi_del(&vf->repr->q_vector->napi);
    244	}
    245}
    246
    247/**
    248 * ice_eswitch_setup_reprs - configure port reprs to run in switchdev mode
    249 * @pf: pointer to PF struct
    250 */
    251static int ice_eswitch_setup_reprs(struct ice_pf *pf)
    252{
    253	struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi;
    254	int max_vsi_num = 0;
    255	struct ice_vf *vf;
    256	unsigned int bkt;
    257
    258	lockdep_assert_held(&pf->vfs.table_lock);
    259
    260	ice_for_each_vf(pf, bkt, vf) {
    261		struct ice_vsi *vsi = vf->repr->src_vsi;
    262
    263		ice_remove_vsi_fltr(&pf->hw, vsi->idx);
    264		vf->repr->dst = metadata_dst_alloc(0, METADATA_HW_PORT_MUX,
    265						   GFP_KERNEL);
    266		if (!vf->repr->dst) {
    267			ice_fltr_add_mac_and_broadcast(vsi,
    268						       vf->hw_lan_addr.addr,
    269						       ICE_FWD_TO_VSI);
    270			goto err;
    271		}
    272
    273		if (ice_vsi_update_security(vsi, ice_vsi_ctx_clear_antispoof)) {
    274			ice_fltr_add_mac_and_broadcast(vsi,
    275						       vf->hw_lan_addr.addr,
    276						       ICE_FWD_TO_VSI);
    277			metadata_dst_free(vf->repr->dst);
    278			vf->repr->dst = NULL;
    279			goto err;
    280		}
    281
    282		if (ice_vsi_add_vlan_zero(vsi)) {
    283			ice_fltr_add_mac_and_broadcast(vsi,
    284						       vf->hw_lan_addr.addr,
    285						       ICE_FWD_TO_VSI);
    286			metadata_dst_free(vf->repr->dst);
    287			vf->repr->dst = NULL;
    288			ice_vsi_update_security(vsi, ice_vsi_ctx_set_antispoof);
    289			goto err;
    290		}
    291
    292		if (max_vsi_num < vsi->vsi_num)
    293			max_vsi_num = vsi->vsi_num;
    294
    295		netif_napi_add(vf->repr->netdev, &vf->repr->q_vector->napi, ice_napi_poll,
    296			       NAPI_POLL_WEIGHT);
    297
    298		netif_keep_dst(vf->repr->netdev);
    299	}
    300
    301	ice_for_each_vf(pf, bkt, vf) {
    302		struct ice_repr *repr = vf->repr;
    303		struct ice_vsi *vsi = repr->src_vsi;
    304		struct metadata_dst *dst;
    305
    306		dst = repr->dst;
    307		dst->u.port_info.port_id = vsi->vsi_num;
    308		dst->u.port_info.lower_dev = repr->netdev;
    309		ice_repr_set_traffic_vsi(repr, ctrl_vsi);
    310	}
    311
    312	return 0;
    313
    314err:
    315	ice_eswitch_release_reprs(pf, ctrl_vsi);
    316
    317	return -ENODEV;
    318}
    319
    320/**
    321 * ice_eswitch_update_repr - reconfigure VF port representor
    322 * @vsi: VF VSI for which port representor is configured
    323 */
    324void ice_eswitch_update_repr(struct ice_vsi *vsi)
    325{
    326	struct ice_pf *pf = vsi->back;
    327	struct ice_repr *repr;
    328	struct ice_vf *vf;
    329	int ret;
    330
    331	if (!ice_is_switchdev_running(pf))
    332		return;
    333
    334	vf = vsi->vf;
    335	repr = vf->repr;
    336	repr->src_vsi = vsi;
    337	repr->dst->u.port_info.port_id = vsi->vsi_num;
    338
    339	ret = ice_vsi_update_security(vsi, ice_vsi_ctx_clear_antispoof);
    340	if (ret) {
    341		ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr.addr, ICE_FWD_TO_VSI);
    342		dev_err(ice_pf_to_dev(pf), "Failed to update VF %d port representor",
    343			vsi->vf->vf_id);
    344	}
    345}
    346
    347/**
    348 * ice_eswitch_port_start_xmit - callback for packets transmit
    349 * @skb: send buffer
    350 * @netdev: network interface device structure
    351 *
    352 * Returns NETDEV_TX_OK if sent, else an error code
    353 */
    354netdev_tx_t
    355ice_eswitch_port_start_xmit(struct sk_buff *skb, struct net_device *netdev)
    356{
    357	struct ice_netdev_priv *np;
    358	struct ice_repr *repr;
    359	struct ice_vsi *vsi;
    360
    361	np = netdev_priv(netdev);
    362	vsi = np->vsi;
    363
    364	if (ice_is_reset_in_progress(vsi->back->state) ||
    365	    test_bit(ICE_VF_DIS, vsi->back->state))
    366		return NETDEV_TX_BUSY;
    367
    368	repr = ice_netdev_to_repr(netdev);
    369	skb_dst_drop(skb);
    370	dst_hold((struct dst_entry *)repr->dst);
    371	skb_dst_set(skb, (struct dst_entry *)repr->dst);
    372	skb->queue_mapping = repr->vf->vf_id;
    373
    374	return ice_start_xmit(skb, netdev);
    375}
    376
    377/**
    378 * ice_eswitch_set_target_vsi - set switchdev context in Tx context descriptor
    379 * @skb: pointer to send buffer
    380 * @off: pointer to offload struct
    381 */
    382void
    383ice_eswitch_set_target_vsi(struct sk_buff *skb,
    384			   struct ice_tx_offload_params *off)
    385{
    386	struct metadata_dst *dst = skb_metadata_dst(skb);
    387	u64 cd_cmd, dst_vsi;
    388
    389	if (!dst) {
    390		cd_cmd = ICE_TX_CTX_DESC_SWTCH_UPLINK << ICE_TXD_CTX_QW1_CMD_S;
    391		off->cd_qw1 |= (cd_cmd | ICE_TX_DESC_DTYPE_CTX);
    392	} else {
    393		cd_cmd = ICE_TX_CTX_DESC_SWTCH_VSI << ICE_TXD_CTX_QW1_CMD_S;
    394		dst_vsi = ((u64)dst->u.port_info.port_id <<
    395			   ICE_TXD_CTX_QW1_VSI_S) & ICE_TXD_CTX_QW1_VSI_M;
    396		off->cd_qw1 = cd_cmd | dst_vsi | ICE_TX_DESC_DTYPE_CTX;
    397	}
    398}
    399
    400/**
    401 * ice_eswitch_release_env - clear switchdev HW filters
    402 * @pf: pointer to PF struct
    403 *
    404 * This function removes HW filters configuration specific for switchdev
    405 * mode and restores default legacy mode settings.
    406 */
    407static void ice_eswitch_release_env(struct ice_pf *pf)
    408{
    409	struct ice_vsi *uplink_vsi = pf->switchdev.uplink_vsi;
    410	struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi;
    411
    412	ice_vsi_update_security(ctrl_vsi, ice_vsi_ctx_clear_allow_override);
    413	ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_clear_allow_override);
    414	ice_clear_dflt_vsi(uplink_vsi->vsw);
    415	ice_fltr_add_mac_and_broadcast(uplink_vsi,
    416				       uplink_vsi->port_info->mac.perm_addr,
    417				       ICE_FWD_TO_VSI);
    418}
    419
    420/**
    421 * ice_eswitch_vsi_setup - configure switchdev control VSI
    422 * @pf: pointer to PF structure
    423 * @pi: pointer to port_info structure
    424 */
    425static struct ice_vsi *
    426ice_eswitch_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
    427{
    428	return ice_vsi_setup(pf, pi, ICE_VSI_SWITCHDEV_CTRL, NULL, NULL);
    429}
    430
    431/**
    432 * ice_eswitch_napi_del - remove NAPI handle for all port representors
    433 * @pf: pointer to PF structure
    434 */
    435static void ice_eswitch_napi_del(struct ice_pf *pf)
    436{
    437	struct ice_vf *vf;
    438	unsigned int bkt;
    439
    440	lockdep_assert_held(&pf->vfs.table_lock);
    441
    442	ice_for_each_vf(pf, bkt, vf)
    443		netif_napi_del(&vf->repr->q_vector->napi);
    444}
    445
    446/**
    447 * ice_eswitch_napi_enable - enable NAPI for all port representors
    448 * @pf: pointer to PF structure
    449 */
    450static void ice_eswitch_napi_enable(struct ice_pf *pf)
    451{
    452	struct ice_vf *vf;
    453	unsigned int bkt;
    454
    455	lockdep_assert_held(&pf->vfs.table_lock);
    456
    457	ice_for_each_vf(pf, bkt, vf)
    458		napi_enable(&vf->repr->q_vector->napi);
    459}
    460
    461/**
    462 * ice_eswitch_napi_disable - disable NAPI for all port representors
    463 * @pf: pointer to PF structure
    464 */
    465static void ice_eswitch_napi_disable(struct ice_pf *pf)
    466{
    467	struct ice_vf *vf;
    468	unsigned int bkt;
    469
    470	lockdep_assert_held(&pf->vfs.table_lock);
    471
    472	ice_for_each_vf(pf, bkt, vf)
    473		napi_disable(&vf->repr->q_vector->napi);
    474}
    475
    476/**
    477 * ice_eswitch_enable_switchdev - configure eswitch in switchdev mode
    478 * @pf: pointer to PF structure
    479 */
    480static int ice_eswitch_enable_switchdev(struct ice_pf *pf)
    481{
    482	struct ice_vsi *ctrl_vsi;
    483
    484	pf->switchdev.control_vsi = ice_eswitch_vsi_setup(pf, pf->hw.port_info);
    485	if (!pf->switchdev.control_vsi)
    486		return -ENODEV;
    487
    488	ctrl_vsi = pf->switchdev.control_vsi;
    489	pf->switchdev.uplink_vsi = ice_get_main_vsi(pf);
    490	if (!pf->switchdev.uplink_vsi)
    491		goto err_vsi;
    492
    493	if (ice_eswitch_setup_env(pf))
    494		goto err_vsi;
    495
    496	if (ice_repr_add_for_all_vfs(pf))
    497		goto err_repr_add;
    498
    499	if (ice_eswitch_setup_reprs(pf))
    500		goto err_setup_reprs;
    501
    502	ice_eswitch_remap_rings_to_vectors(pf);
    503
    504	if (ice_vsi_open(ctrl_vsi))
    505		goto err_setup_reprs;
    506
    507	ice_eswitch_napi_enable(pf);
    508
    509	return 0;
    510
    511err_setup_reprs:
    512	ice_repr_rem_from_all_vfs(pf);
    513err_repr_add:
    514	ice_eswitch_release_env(pf);
    515err_vsi:
    516	ice_vsi_release(ctrl_vsi);
    517	return -ENODEV;
    518}
    519
    520/**
    521 * ice_eswitch_disable_switchdev - disable switchdev resources
    522 * @pf: pointer to PF structure
    523 */
    524static void ice_eswitch_disable_switchdev(struct ice_pf *pf)
    525{
    526	struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi;
    527
    528	ice_eswitch_napi_disable(pf);
    529	ice_eswitch_release_env(pf);
    530	ice_rem_adv_rule_for_vsi(&pf->hw, ctrl_vsi->idx);
    531	ice_eswitch_release_reprs(pf, ctrl_vsi);
    532	ice_vsi_release(ctrl_vsi);
    533	ice_repr_rem_from_all_vfs(pf);
    534}
    535
    536/**
    537 * ice_eswitch_mode_set - set new eswitch mode
    538 * @devlink: pointer to devlink structure
    539 * @mode: eswitch mode to switch to
    540 * @extack: pointer to extack structure
    541 */
    542int
    543ice_eswitch_mode_set(struct devlink *devlink, u16 mode,
    544		     struct netlink_ext_ack *extack)
    545{
    546	struct ice_pf *pf = devlink_priv(devlink);
    547
    548	if (pf->eswitch_mode == mode)
    549		return 0;
    550
    551	if (ice_has_vfs(pf)) {
    552		dev_info(ice_pf_to_dev(pf), "Changing eswitch mode is allowed only if there is no VFs created");
    553		NL_SET_ERR_MSG_MOD(extack, "Changing eswitch mode is allowed only if there is no VFs created");
    554		return -EOPNOTSUPP;
    555	}
    556
    557	switch (mode) {
    558	case DEVLINK_ESWITCH_MODE_LEGACY:
    559		dev_info(ice_pf_to_dev(pf), "PF %d changed eswitch mode to legacy",
    560			 pf->hw.pf_id);
    561		NL_SET_ERR_MSG_MOD(extack, "Changed eswitch mode to legacy");
    562		break;
    563	case DEVLINK_ESWITCH_MODE_SWITCHDEV:
    564	{
    565		dev_info(ice_pf_to_dev(pf), "PF %d changed eswitch mode to switchdev",
    566			 pf->hw.pf_id);
    567		NL_SET_ERR_MSG_MOD(extack, "Changed eswitch mode to switchdev");
    568		break;
    569	}
    570	default:
    571		NL_SET_ERR_MSG_MOD(extack, "Unknown eswitch mode");
    572		return -EINVAL;
    573	}
    574
    575	pf->eswitch_mode = mode;
    576	return 0;
    577}
    578
    579/**
    580 * ice_eswitch_mode_get - get current eswitch mode
    581 * @devlink: pointer to devlink structure
    582 * @mode: output parameter for current eswitch mode
    583 */
    584int ice_eswitch_mode_get(struct devlink *devlink, u16 *mode)
    585{
    586	struct ice_pf *pf = devlink_priv(devlink);
    587
    588	*mode = pf->eswitch_mode;
    589	return 0;
    590}
    591
    592/**
    593 * ice_is_eswitch_mode_switchdev - check if eswitch mode is set to switchdev
    594 * @pf: pointer to PF structure
    595 *
    596 * Returns true if eswitch mode is set to DEVLINK_ESWITCH_MODE_SWITCHDEV,
    597 * false otherwise.
    598 */
    599bool ice_is_eswitch_mode_switchdev(struct ice_pf *pf)
    600{
    601	return pf->eswitch_mode == DEVLINK_ESWITCH_MODE_SWITCHDEV;
    602}
    603
    604/**
    605 * ice_eswitch_release - cleanup eswitch
    606 * @pf: pointer to PF structure
    607 */
    608void ice_eswitch_release(struct ice_pf *pf)
    609{
    610	if (pf->eswitch_mode == DEVLINK_ESWITCH_MODE_LEGACY)
    611		return;
    612
    613	ice_eswitch_disable_switchdev(pf);
    614	pf->switchdev.is_running = false;
    615}
    616
    617/**
    618 * ice_eswitch_configure - configure eswitch
    619 * @pf: pointer to PF structure
    620 */
    621int ice_eswitch_configure(struct ice_pf *pf)
    622{
    623	int status;
    624
    625	if (pf->eswitch_mode == DEVLINK_ESWITCH_MODE_LEGACY || pf->switchdev.is_running)
    626		return 0;
    627
    628	status = ice_eswitch_enable_switchdev(pf);
    629	if (status)
    630		return status;
    631
    632	pf->switchdev.is_running = true;
    633	return 0;
    634}
    635
    636/**
    637 * ice_eswitch_start_all_tx_queues - start Tx queues of all port representors
    638 * @pf: pointer to PF structure
    639 */
    640static void ice_eswitch_start_all_tx_queues(struct ice_pf *pf)
    641{
    642	struct ice_vf *vf;
    643	unsigned int bkt;
    644
    645	lockdep_assert_held(&pf->vfs.table_lock);
    646
    647	if (test_bit(ICE_DOWN, pf->state))
    648		return;
    649
    650	ice_for_each_vf(pf, bkt, vf) {
    651		if (vf->repr)
    652			ice_repr_start_tx_queues(vf->repr);
    653	}
    654}
    655
    656/**
    657 * ice_eswitch_stop_all_tx_queues - stop Tx queues of all port representors
    658 * @pf: pointer to PF structure
    659 */
    660void ice_eswitch_stop_all_tx_queues(struct ice_pf *pf)
    661{
    662	struct ice_vf *vf;
    663	unsigned int bkt;
    664
    665	lockdep_assert_held(&pf->vfs.table_lock);
    666
    667	if (test_bit(ICE_DOWN, pf->state))
    668		return;
    669
    670	ice_for_each_vf(pf, bkt, vf) {
    671		if (vf->repr)
    672			ice_repr_stop_tx_queues(vf->repr);
    673	}
    674}
    675
    676/**
    677 * ice_eswitch_rebuild - rebuild eswitch
    678 * @pf: pointer to PF structure
    679 */
    680int ice_eswitch_rebuild(struct ice_pf *pf)
    681{
    682	struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi;
    683	int status;
    684
    685	ice_eswitch_napi_disable(pf);
    686	ice_eswitch_napi_del(pf);
    687
    688	status = ice_eswitch_setup_env(pf);
    689	if (status)
    690		return status;
    691
    692	status = ice_eswitch_setup_reprs(pf);
    693	if (status)
    694		return status;
    695
    696	ice_eswitch_remap_rings_to_vectors(pf);
    697
    698	ice_replay_tc_fltrs(pf);
    699
    700	status = ice_vsi_open(ctrl_vsi);
    701	if (status)
    702		return status;
    703
    704	ice_eswitch_napi_enable(pf);
    705	ice_eswitch_start_all_tx_queues(pf);
    706
    707	return 0;
    708}