qed_sriov.h (12411B)
1/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ 2/* QLogic qed NIC Driver 3 * Copyright (c) 2015-2017 QLogic Corporation 4 * Copyright (c) 2019-2020 Marvell International Ltd. 5 */ 6 7#ifndef _QED_SRIOV_H 8#define _QED_SRIOV_H 9#include <linux/types.h> 10#include "qed_vf.h" 11 12#define QED_ETH_VF_NUM_MAC_FILTERS 1 13#define QED_ETH_VF_NUM_VLAN_FILTERS 2 14#define QED_VF_ARRAY_LENGTH (3) 15 16#ifdef CONFIG_QED_SRIOV 17#define IS_VF(cdev) ((cdev)->b_is_vf) 18#define IS_PF(cdev) (!((cdev)->b_is_vf)) 19#define IS_PF_SRIOV(p_hwfn) (!!((p_hwfn)->cdev->p_iov_info)) 20#else 21#define IS_VF(cdev) (0) 22#define IS_PF(cdev) (1) 23#define IS_PF_SRIOV(p_hwfn) (0) 24#endif 25#define IS_PF_SRIOV_ALLOC(p_hwfn) (!!((p_hwfn)->pf_iov_info)) 26 27#define QED_MAX_VF_CHAINS_PER_PF 16 28 29#define QED_ETH_MAX_VF_NUM_VLAN_FILTERS \ 30 (MAX_NUM_VFS * QED_ETH_VF_NUM_VLAN_FILTERS) 31 32enum qed_iov_vport_update_flag { 33 QED_IOV_VP_UPDATE_ACTIVATE, 34 QED_IOV_VP_UPDATE_VLAN_STRIP, 35 QED_IOV_VP_UPDATE_TX_SWITCH, 36 QED_IOV_VP_UPDATE_MCAST, 37 QED_IOV_VP_UPDATE_ACCEPT_PARAM, 38 QED_IOV_VP_UPDATE_RSS, 39 QED_IOV_VP_UPDATE_ACCEPT_ANY_VLAN, 40 QED_IOV_VP_UPDATE_SGE_TPA, 41 QED_IOV_VP_UPDATE_MAX, 42}; 43 44struct qed_public_vf_info { 45 /* These copies will later be reflected in the bulletin board, 46 * but this copy should be newer. 47 */ 48 u8 forced_mac[ETH_ALEN]; 49 u16 forced_vlan; 50 u8 mac[ETH_ALEN]; 51 52 /* IFLA_VF_LINK_STATE_<X> */ 53 int link_state; 54 55 /* Currently configured Tx rate in MB/sec. 0 if unconfigured */ 56 int tx_rate; 57 58 /* Trusted VFs can configure promiscuous mode. 59 * Also store shadow promisc configuration if needed. 60 */ 61 bool is_trusted_configured; 62 bool is_trusted_request; 63 u8 rx_accept_mode; 64 u8 tx_accept_mode; 65 bool accept_any_vlan; 66}; 67 68struct qed_iov_vf_init_params { 69 u16 rel_vf_id; 70 71 /* Number of requested Queues; Currently, don't support different 72 * number of Rx/Tx queues. 73 */ 74 75 u16 num_queues; 76 77 /* Allow the client to choose which qzones to use for Rx/Tx, 78 * and which queue_base to use for Tx queues on a per-queue basis. 79 * Notice values should be relative to the PF resources. 80 */ 81 u16 req_rx_queue[QED_MAX_VF_CHAINS_PER_PF]; 82 u16 req_tx_queue[QED_MAX_VF_CHAINS_PER_PF]; 83}; 84 85/* This struct is part of qed_dev and contains data relevant to all hwfns; 86 * Initialized only if SR-IOV cpabability is exposed in PCIe config space. 87 */ 88struct qed_hw_sriov_info { 89 int pos; /* capability position */ 90 int nres; /* number of resources */ 91 u32 cap; /* SR-IOV Capabilities */ 92 u16 ctrl; /* SR-IOV Control */ 93 u16 total_vfs; /* total VFs associated with the PF */ 94 u16 num_vfs; /* number of vfs that have been started */ 95 u16 initial_vfs; /* initial VFs associated with the PF */ 96 u16 nr_virtfn; /* number of VFs available */ 97 u16 offset; /* first VF Routing ID offset */ 98 u16 stride; /* following VF stride */ 99 u16 vf_device_id; /* VF device id */ 100 u32 pgsz; /* page size for BAR alignment */ 101 u8 link; /* Function Dependency Link */ 102 103 u32 first_vf_in_pf; 104}; 105 106/* This mailbox is maintained per VF in its PF contains all information 107 * required for sending / receiving a message. 108 */ 109struct qed_iov_vf_mbx { 110 union vfpf_tlvs *req_virt; 111 dma_addr_t req_phys; 112 union pfvf_tlvs *reply_virt; 113 dma_addr_t reply_phys; 114 115 /* Address in VF where a pending message is located */ 116 dma_addr_t pending_req; 117 118 /* Message from VF awaits handling */ 119 bool b_pending_msg; 120 121 u8 *offset; 122 123 /* saved VF request header */ 124 struct vfpf_first_tlv first_tlv; 125}; 126 127#define QED_IOV_LEGACY_QID_RX (0) 128#define QED_IOV_LEGACY_QID_TX (1) 129#define QED_IOV_QID_INVALID (0xFE) 130 131struct qed_vf_queue_cid { 132 bool b_is_tx; 133 struct qed_queue_cid *p_cid; 134}; 135 136/* Describes a qzone associated with the VF */ 137struct qed_vf_queue { 138 u16 fw_rx_qid; 139 u16 fw_tx_qid; 140 141 struct qed_vf_queue_cid cids[MAX_QUEUES_PER_QZONE]; 142}; 143 144enum vf_state { 145 VF_FREE = 0, /* VF ready to be acquired holds no resc */ 146 VF_ACQUIRED, /* VF, acquired, but not initialized */ 147 VF_ENABLED, /* VF, Enabled */ 148 VF_RESET, /* VF, FLR'd, pending cleanup */ 149 VF_STOPPED /* VF, Stopped */ 150}; 151 152struct qed_vf_vlan_shadow { 153 bool used; 154 u16 vid; 155}; 156 157struct qed_vf_shadow_config { 158 /* Shadow copy of all guest vlans */ 159 struct qed_vf_vlan_shadow vlans[QED_ETH_VF_NUM_VLAN_FILTERS + 1]; 160 161 /* Shadow copy of all configured MACs; Empty if forcing MACs */ 162 u8 macs[QED_ETH_VF_NUM_MAC_FILTERS][ETH_ALEN]; 163 u8 inner_vlan_removal; 164}; 165 166/* PFs maintain an array of this structure, per VF */ 167struct qed_vf_info { 168 struct qed_iov_vf_mbx vf_mbx; 169 enum vf_state state; 170 bool b_init; 171 bool b_malicious; 172 u8 to_disable; 173 174 struct qed_bulletin bulletin; 175 dma_addr_t vf_bulletin; 176 177 /* PF saves a copy of the last VF acquire message */ 178 struct vfpf_acquire_tlv acquire; 179 180 u32 concrete_fid; 181 u16 opaque_fid; 182 u16 mtu; 183 184 u8 vport_id; 185 u8 relative_vf_id; 186 u8 abs_vf_id; 187#define QED_VF_ABS_ID(p_hwfn, p_vf) (QED_PATH_ID(p_hwfn) ? \ 188 (p_vf)->abs_vf_id + MAX_NUM_VFS_BB : \ 189 (p_vf)->abs_vf_id) 190 191 u8 vport_instance; 192 u8 num_rxqs; 193 u8 num_txqs; 194 195 u16 rx_coal; 196 u16 tx_coal; 197 198 u8 num_sbs; 199 200 u8 num_mac_filters; 201 u8 num_vlan_filters; 202 203 struct qed_vf_queue vf_queues[QED_MAX_VF_CHAINS_PER_PF]; 204 u16 igu_sbs[QED_MAX_VF_CHAINS_PER_PF]; 205 u8 num_active_rxqs; 206 struct qed_public_vf_info p_vf_info; 207 bool spoof_chk; 208 bool req_spoofchk_val; 209 210 /* Stores the configuration requested by VF */ 211 struct qed_vf_shadow_config shadow_config; 212 213 /* A bitfield using bulletin's valid-map bits, used to indicate 214 * which of the bulletin board features have been configured. 215 */ 216 u64 configured_features; 217#define QED_IOV_CONFIGURED_FEATURES_MASK ((1 << MAC_ADDR_FORCED) | \ 218 (1 << VLAN_ADDR_FORCED)) 219}; 220 221/* This structure is part of qed_hwfn and used only for PFs that have sriov 222 * capability enabled. 223 */ 224struct qed_pf_iov { 225 struct qed_vf_info vfs_array[MAX_NUM_VFS]; 226 u64 pending_flr[QED_VF_ARRAY_LENGTH]; 227 228 /* Allocate message address continuosuly and split to each VF */ 229 void *mbx_msg_virt_addr; 230 dma_addr_t mbx_msg_phys_addr; 231 u32 mbx_msg_size; 232 void *mbx_reply_virt_addr; 233 dma_addr_t mbx_reply_phys_addr; 234 u32 mbx_reply_size; 235 void *p_bulletins; 236 dma_addr_t bulletins_phys; 237 u32 bulletins_size; 238}; 239 240enum qed_iov_wq_flag { 241 QED_IOV_WQ_MSG_FLAG, 242 QED_IOV_WQ_SET_UNICAST_FILTER_FLAG, 243 QED_IOV_WQ_BULLETIN_UPDATE_FLAG, 244 QED_IOV_WQ_STOP_WQ_FLAG, 245 QED_IOV_WQ_FLR_FLAG, 246 QED_IOV_WQ_TRUST_FLAG, 247 QED_IOV_WQ_VF_FORCE_LINK_QUERY_FLAG, 248}; 249 250extern const struct qed_iov_hv_ops qed_iov_ops_pass; 251 252#ifdef CONFIG_QED_SRIOV 253/** 254 * qed_iov_is_valid_vfid(): Check if given VF ID @vfid is valid 255 * w.r.t. @b_enabled_only value 256 * if b_enabled_only = true - only enabled 257 * VF id is valid. 258 * else any VF id less than max_vfs is valid. 259 * 260 * @p_hwfn: HW device data. 261 * @rel_vf_id: Relative VF ID. 262 * @b_enabled_only: consider only enabled VF. 263 * @b_non_malicious: true iff we want to validate vf isn't malicious. 264 * 265 * Return: bool - true for valid VF ID 266 */ 267bool qed_iov_is_valid_vfid(struct qed_hwfn *p_hwfn, 268 int rel_vf_id, 269 bool b_enabled_only, bool b_non_malicious); 270 271/** 272 * qed_iov_get_next_active_vf(): Given a VF index, return index of 273 * next [including that] active VF. 274 * 275 * @p_hwfn: HW device data. 276 * @rel_vf_id: VF ID. 277 * 278 * Return: MAX_NUM_VFS in case no further active VFs, otherwise index. 279 */ 280u16 qed_iov_get_next_active_vf(struct qed_hwfn *p_hwfn, u16 rel_vf_id); 281 282void qed_iov_bulletin_set_udp_ports(struct qed_hwfn *p_hwfn, 283 int vfid, u16 vxlan_port, u16 geneve_port); 284 285/** 286 * qed_iov_hw_info(): Read sriov related information and allocated resources 287 * reads from configuration space, shmem, etc. 288 * 289 * @p_hwfn: HW device data. 290 * 291 * Return: Int. 292 */ 293int qed_iov_hw_info(struct qed_hwfn *p_hwfn); 294 295/** 296 * qed_add_tlv(): place a given tlv on the tlv buffer at next offset 297 * 298 * @p_hwfn: HW device data. 299 * @offset: offset. 300 * @type: Type 301 * @length: Length. 302 * 303 * Return: pointer to the newly placed tlv 304 */ 305void *qed_add_tlv(struct qed_hwfn *p_hwfn, u8 **offset, u16 type, u16 length); 306 307/** 308 * qed_dp_tlv_list(): list the types and lengths of the tlvs on the buffer 309 * 310 * @p_hwfn: HW device data. 311 * @tlvs_list: Tlvs_list. 312 * 313 * Return: Void. 314 */ 315void qed_dp_tlv_list(struct qed_hwfn *p_hwfn, void *tlvs_list); 316 317/** 318 * qed_sriov_vfpf_malicious(): Handle malicious VF/PF. 319 * 320 * @p_hwfn: HW device data. 321 * @p_data: Pointer to data. 322 * 323 * Return: Void. 324 */ 325void qed_sriov_vfpf_malicious(struct qed_hwfn *p_hwfn, 326 struct fw_err_data *p_data); 327 328/** 329 * qed_sriov_eqe_event(): Callback for SRIOV events. 330 * 331 * @p_hwfn: HW device data. 332 * @opcode: Opcode. 333 * @echo: Echo. 334 * @data: data 335 * @fw_return_code: FW return code. 336 * 337 * Return: Int. 338 */ 339int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn, u8 opcode, __le16 echo, 340 union event_ring_data *data, u8 fw_return_code); 341 342/** 343 * qed_iov_alloc(): allocate sriov related resources 344 * 345 * @p_hwfn: HW device data. 346 * 347 * Return: Int. 348 */ 349int qed_iov_alloc(struct qed_hwfn *p_hwfn); 350 351/** 352 * qed_iov_setup(): setup sriov related resources 353 * 354 * @p_hwfn: HW device data. 355 * 356 * Return: Void. 357 */ 358void qed_iov_setup(struct qed_hwfn *p_hwfn); 359 360/** 361 * qed_iov_free(): free sriov related resources 362 * 363 * @p_hwfn: HW device data. 364 * 365 * Return: Void. 366 */ 367void qed_iov_free(struct qed_hwfn *p_hwfn); 368 369/** 370 * qed_iov_free_hw_info(): free sriov related memory that was 371 * allocated during hw_prepare 372 * 373 * @cdev: Qed dev pointer. 374 * 375 * Return: Void. 376 */ 377void qed_iov_free_hw_info(struct qed_dev *cdev); 378 379/** 380 * qed_iov_mark_vf_flr(): Mark structs of vfs that have been FLR-ed. 381 * 382 * @p_hwfn: HW device data. 383 * @disabled_vfs: bitmask of all VFs on path that were FLRed 384 * 385 * Return: true iff one of the PF's vfs got FLRed. false otherwise. 386 */ 387bool qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn, u32 *disabled_vfs); 388 389/** 390 * qed_iov_search_list_tlvs(): Search extended TLVs in request/reply buffer. 391 * 392 * @p_hwfn: HW device data. 393 * @p_tlvs_list: Pointer to tlvs list 394 * @req_type: Type of TLV 395 * 396 * Return: pointer to tlv type if found, otherwise returns NULL. 397 */ 398void *qed_iov_search_list_tlvs(struct qed_hwfn *p_hwfn, 399 void *p_tlvs_list, u16 req_type); 400 401void qed_iov_wq_stop(struct qed_dev *cdev, bool schedule_first); 402int qed_iov_wq_start(struct qed_dev *cdev); 403 404void qed_schedule_iov(struct qed_hwfn *hwfn, enum qed_iov_wq_flag flag); 405void qed_vf_start_iov_wq(struct qed_dev *cdev); 406int qed_sriov_disable(struct qed_dev *cdev, bool pci_enabled); 407void qed_inform_vf_link_state(struct qed_hwfn *hwfn); 408#else 409static inline bool 410qed_iov_is_valid_vfid(struct qed_hwfn *p_hwfn, 411 int rel_vf_id, bool b_enabled_only, bool b_non_malicious) 412{ 413 return false; 414} 415 416static inline u16 qed_iov_get_next_active_vf(struct qed_hwfn *p_hwfn, 417 u16 rel_vf_id) 418{ 419 return MAX_NUM_VFS; 420} 421 422static inline void 423qed_iov_bulletin_set_udp_ports(struct qed_hwfn *p_hwfn, int vfid, 424 u16 vxlan_port, u16 geneve_port) 425{ 426} 427 428static inline int qed_iov_hw_info(struct qed_hwfn *p_hwfn) 429{ 430 return 0; 431} 432 433static inline int qed_iov_alloc(struct qed_hwfn *p_hwfn) 434{ 435 return 0; 436} 437 438static inline void qed_iov_setup(struct qed_hwfn *p_hwfn) 439{ 440} 441 442static inline void qed_iov_free(struct qed_hwfn *p_hwfn) 443{ 444} 445 446static inline void qed_iov_free_hw_info(struct qed_dev *cdev) 447{ 448} 449 450static inline bool qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn, 451 u32 *disabled_vfs) 452{ 453 return false; 454} 455 456static inline void qed_iov_wq_stop(struct qed_dev *cdev, bool schedule_first) 457{ 458} 459 460static inline int qed_iov_wq_start(struct qed_dev *cdev) 461{ 462 return 0; 463} 464 465static inline void qed_schedule_iov(struct qed_hwfn *hwfn, 466 enum qed_iov_wq_flag flag) 467{ 468} 469 470static inline void qed_vf_start_iov_wq(struct qed_dev *cdev) 471{ 472} 473 474static inline int qed_sriov_disable(struct qed_dev *cdev, bool pci_enabled) 475{ 476 return 0; 477} 478 479static inline void qed_inform_vf_link_state(struct qed_hwfn *hwfn) 480{ 481} 482 483static inline void qed_sriov_vfpf_malicious(struct qed_hwfn *p_hwfn, 484 struct fw_err_data *p_data) 485{ 486} 487 488static inline int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn, u8 opcode, 489 __le16 echo, union event_ring_data *data, 490 u8 fw_return_code) 491{ 492 return 0; 493} 494#endif 495 496#define qed_for_each_vf(_p_hwfn, _i) \ 497 for (_i = qed_iov_get_next_active_vf(_p_hwfn, 0); \ 498 _i < MAX_NUM_VFS; \ 499 _i = qed_iov_get_next_active_vf(_p_hwfn, _i + 1)) 500 501#endif