cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

qed_l2.h (11463B)


      1/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
      2/* QLogic qed NIC Driver
      3 * Copyright (c) 2015-2017  QLogic Corporation
      4 * Copyright (c) 2019-2020 Marvell International Ltd.
      5 */
      6
      7#ifndef _QED_L2_H
      8#define _QED_L2_H
      9#include <linux/types.h>
     10#include <linux/io.h>
     11#include <linux/kernel.h>
     12#include <linux/slab.h>
     13#include <linux/qed/qed_eth_if.h>
     14#include "qed.h"
     15#include "qed_hw.h"
     16#include "qed_sp.h"
     17struct qed_rss_params {
     18	u8 update_rss_config;
     19	u8 rss_enable;
     20	u8 rss_eng_id;
     21	u8 update_rss_capabilities;
     22	u8 update_rss_ind_table;
     23	u8 update_rss_key;
     24	u8 rss_caps;
     25	u8 rss_table_size_log;
     26
     27	/* Indirection table consist of rx queue handles */
     28	void *rss_ind_table[QED_RSS_IND_TABLE_SIZE];
     29	u32 rss_key[QED_RSS_KEY_SIZE];
     30};
     31
     32struct qed_sge_tpa_params {
     33	u8 max_buffers_per_cqe;
     34
     35	u8 update_tpa_en_flg;
     36	u8 tpa_ipv4_en_flg;
     37	u8 tpa_ipv6_en_flg;
     38	u8 tpa_ipv4_tunn_en_flg;
     39	u8 tpa_ipv6_tunn_en_flg;
     40
     41	u8 update_tpa_param_flg;
     42	u8 tpa_pkt_split_flg;
     43	u8 tpa_hdr_data_split_flg;
     44	u8 tpa_gro_consistent_flg;
     45	u8 tpa_max_aggs_num;
     46	u16 tpa_max_size;
     47	u16 tpa_min_size_to_start;
     48	u16 tpa_min_size_to_cont;
     49};
     50
     51enum qed_filter_opcode {
     52	QED_FILTER_ADD,
     53	QED_FILTER_REMOVE,
     54	QED_FILTER_MOVE,
     55	QED_FILTER_REPLACE,	/* Delete all MACs and add new one instead */
     56	QED_FILTER_FLUSH,	/* Removes all filters */
     57};
     58
     59enum qed_filter_ucast_type {
     60	QED_FILTER_MAC,
     61	QED_FILTER_VLAN,
     62	QED_FILTER_MAC_VLAN,
     63	QED_FILTER_INNER_MAC,
     64	QED_FILTER_INNER_VLAN,
     65	QED_FILTER_INNER_PAIR,
     66	QED_FILTER_INNER_MAC_VNI_PAIR,
     67	QED_FILTER_MAC_VNI_PAIR,
     68	QED_FILTER_VNI,
     69};
     70
     71struct qed_filter_ucast {
     72	enum qed_filter_opcode opcode;
     73	enum qed_filter_ucast_type type;
     74	u8 is_rx_filter;
     75	u8 is_tx_filter;
     76	u8 vport_to_add_to;
     77	u8 vport_to_remove_from;
     78	unsigned char mac[ETH_ALEN];
     79	u8 assert_on_error;
     80	u16 vlan;
     81	u32 vni;
     82};
     83
     84struct qed_filter_mcast {
     85	/* MOVE is not supported for multicast */
     86	enum qed_filter_opcode opcode;
     87	u8 vport_to_add_to;
     88	u8 vport_to_remove_from;
     89	u8 num_mc_addrs;
     90#define QED_MAX_MC_ADDRS        64
     91	unsigned char mac[QED_MAX_MC_ADDRS][ETH_ALEN];
     92};
     93
     94/**
     95 * qed_eth_rx_queue_stop(): This ramrod closes an Rx queue.
     96 *
     97 * @p_hwfn: HW device data.
     98 * @p_rxq: Handler of queue to close
     99 * @eq_completion_only: If True completion will be on
    100 *                      EQe, if False completion will be
    101 *                      on EQe if p_hwfn opaque
    102 *                      different from the RXQ opaque
    103 *                      otherwise on CQe.
    104 * @cqe_completion: If True completion will be receive on CQe.
    105 *
    106 * Return: Int.
    107 */
    108int
    109qed_eth_rx_queue_stop(struct qed_hwfn *p_hwfn,
    110		      void *p_rxq,
    111		      bool eq_completion_only, bool cqe_completion);
    112
    113/**
    114 * qed_eth_tx_queue_stop(): Closes a Tx queue.
    115 *
    116 * @p_hwfn: HW device data.
    117 * @p_txq: handle to Tx queue needed to be closed.
    118 *
    119 * Return: Int.
    120 */
    121int qed_eth_tx_queue_stop(struct qed_hwfn *p_hwfn, void *p_txq);
    122
    123enum qed_tpa_mode {
    124	QED_TPA_MODE_NONE,
    125	QED_TPA_MODE_UNUSED,
    126	QED_TPA_MODE_GRO,
    127	QED_TPA_MODE_MAX
    128};
    129
    130struct qed_sp_vport_start_params {
    131	enum qed_tpa_mode tpa_mode;
    132	bool remove_inner_vlan;
    133	bool tx_switching;
    134	bool handle_ptp_pkts;
    135	bool only_untagged;
    136	bool drop_ttl0;
    137	u8 max_buffers_per_cqe;
    138	u32 concrete_fid;
    139	u16 opaque_fid;
    140	u8 vport_id;
    141	u16 mtu;
    142	bool check_mac;
    143	bool check_ethtype;
    144};
    145
    146int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn,
    147			   struct qed_sp_vport_start_params *p_params);
    148
    149struct qed_filter_accept_flags {
    150	u8	update_rx_mode_config;
    151	u8	update_tx_mode_config;
    152	u8	rx_accept_filter;
    153	u8	tx_accept_filter;
    154#define QED_ACCEPT_NONE         0x01
    155#define QED_ACCEPT_UCAST_MATCHED        0x02
    156#define QED_ACCEPT_UCAST_UNMATCHED      0x04
    157#define QED_ACCEPT_MCAST_MATCHED        0x08
    158#define QED_ACCEPT_MCAST_UNMATCHED      0x10
    159#define QED_ACCEPT_BCAST                0x20
    160#define QED_ACCEPT_ANY_VNI              0x40
    161};
    162
    163struct qed_arfs_config_params {
    164	bool tcp;
    165	bool udp;
    166	bool ipv4;
    167	bool ipv6;
    168	enum qed_filter_config_mode mode;
    169};
    170
    171struct qed_sp_vport_update_params {
    172	u16				opaque_fid;
    173	u8				vport_id;
    174	u8				update_vport_active_rx_flg;
    175	u8				vport_active_rx_flg;
    176	u8				update_vport_active_tx_flg;
    177	u8				vport_active_tx_flg;
    178	u8				update_inner_vlan_removal_flg;
    179	u8				inner_vlan_removal_flg;
    180	u8				silent_vlan_removal_flg;
    181	u8				update_default_vlan_enable_flg;
    182	u8				default_vlan_enable_flg;
    183	u8				update_default_vlan_flg;
    184	u16				default_vlan;
    185	u8				update_tx_switching_flg;
    186	u8				tx_switching_flg;
    187	u8				update_approx_mcast_flg;
    188	u8				update_anti_spoofing_en_flg;
    189	u8				anti_spoofing_en;
    190	u8				update_accept_any_vlan_flg;
    191	u8				accept_any_vlan;
    192	u32				bins[8];
    193	struct qed_rss_params		*rss_params;
    194	struct qed_filter_accept_flags	accept_flags;
    195	struct qed_sge_tpa_params	*sge_tpa_params;
    196	u8				update_ctl_frame_check;
    197	u8				mac_chk_en;
    198	u8				ethtype_chk_en;
    199};
    200
    201int qed_sp_vport_update(struct qed_hwfn *p_hwfn,
    202			struct qed_sp_vport_update_params *p_params,
    203			enum spq_mode comp_mode,
    204			struct qed_spq_comp_cb *p_comp_data);
    205
    206/**
    207 * qed_sp_vport_stop: This ramrod closes a VPort after all its
    208 *                    RX and TX queues are terminated.
    209 *                    An Assert is generated if any queues are left open.
    210 *
    211 * @p_hwfn: HW device data.
    212 * @opaque_fid: Opaque FID
    213 * @vport_id: VPort ID.
    214 *
    215 * Return: Int.
    216 */
    217int qed_sp_vport_stop(struct qed_hwfn *p_hwfn, u16 opaque_fid, u8 vport_id);
    218
    219int qed_sp_eth_filter_ucast(struct qed_hwfn *p_hwfn,
    220			    u16 opaque_fid,
    221			    struct qed_filter_ucast *p_filter_cmd,
    222			    enum spq_mode comp_mode,
    223			    struct qed_spq_comp_cb *p_comp_data);
    224
    225/**
    226 * qed_sp_eth_rx_queues_update(): This ramrod updates an RX queue.
    227 *                                It is used for setting the active state
    228 *                                of the queue and updating the TPA and
    229 *                                SGE parameters.
    230 * @p_hwfn: HW device data.
    231 * @pp_rxq_handlers: An array of queue handlers to be updated.
    232 * @num_rxqs: number of queues to update.
    233 * @complete_cqe_flg: Post completion to the CQE Ring if set.
    234 * @complete_event_flg: Post completion to the Event Ring if set.
    235 * @comp_mode: Comp mode.
    236 * @p_comp_data: Pointer Comp data.
    237 *
    238 * Return: Int.
    239 *
    240 * Note At the moment - only used by non-linux VFs.
    241 */
    242
    243int
    244qed_sp_eth_rx_queues_update(struct qed_hwfn *p_hwfn,
    245			    void **pp_rxq_handlers,
    246			    u8 num_rxqs,
    247			    u8 complete_cqe_flg,
    248			    u8 complete_event_flg,
    249			    enum spq_mode comp_mode,
    250			    struct qed_spq_comp_cb *p_comp_data);
    251
    252void qed_get_vport_stats(struct qed_dev *cdev, struct qed_eth_stats *stats);
    253
    254void qed_reset_vport_stats(struct qed_dev *cdev);
    255
    256/**
    257 * qed_arfs_mode_configure(): Enable or disable rfs mode.
    258 *                            It must accept at least one of tcp or udp true
    259 *                            and at least one of ipv4 or ipv6 true to enable
    260 *                            rfs mode.
    261 *
    262 * @p_hwfn: HW device data.
    263 * @p_ptt: P_ptt.
    264 * @p_cfg_params: arfs mode configuration parameters.
    265 *
    266 * Return. Void.
    267 */
    268void qed_arfs_mode_configure(struct qed_hwfn *p_hwfn,
    269			     struct qed_ptt *p_ptt,
    270			     struct qed_arfs_config_params *p_cfg_params);
    271
    272/**
    273 * qed_configure_rfs_ntuple_filter(): This ramrod should be used to add
    274 *                                     or remove arfs hw filter
    275 *
    276 * @p_hwfn: HW device data.
    277 * @p_cb: Used for QED_SPQ_MODE_CB,where client would initialize
    278 *        it with cookie and callback function address, if not
    279 *        using this mode then client must pass NULL.
    280 * @p_params: Pointer to params.
    281 *
    282 * Return: Void.
    283 */
    284int
    285qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn,
    286				struct qed_spq_comp_cb *p_cb,
    287				struct qed_ntuple_filter_params *p_params);
    288
    289#define MAX_QUEUES_PER_QZONE    (sizeof(unsigned long) * 8)
    290#define QED_QUEUE_CID_SELF	(0xff)
    291
    292/* Almost identical to the qed_queue_start_common_params,
    293 * but here we maintain the SB index in IGU CAM.
    294 */
    295struct qed_queue_cid_params {
    296	u8 vport_id;
    297	u16 queue_id;
    298	u8 stats_id;
    299};
    300
    301/* Additional parameters required for initialization of the queue_cid
    302 * and are relevant only for a PF initializing one for its VFs.
    303 */
    304struct qed_queue_cid_vf_params {
    305	/* Should match the VF's relative index */
    306	u8 vfid;
    307
    308	/* 0-based queue index. Should reflect the relative qzone the
    309	 * VF thinks is associated with it [in its range].
    310	 */
    311	u8 vf_qid;
    312
    313	/* Indicates a VF is legacy, making it differ in several things:
    314	 *  - Producers would be placed in a different place.
    315	 *  - Makes assumptions regarding the CIDs.
    316	 */
    317	u8 vf_legacy;
    318
    319	u8 qid_usage_idx;
    320};
    321
    322struct qed_queue_cid {
    323	/* For stats-id, the `rel' is actually absolute as well */
    324	struct qed_queue_cid_params rel;
    325	struct qed_queue_cid_params abs;
    326
    327	/* These have no 'relative' meaning */
    328	u16 sb_igu_id;
    329	u8 sb_idx;
    330
    331	u32 cid;
    332	u16 opaque_fid;
    333
    334	bool b_is_rx;
    335
    336	/* VFs queues are mapped differently, so we need to know the
    337	 * relative queue associated with them [0-based].
    338	 * Notice this is relevant on the *PF* queue-cid of its VF's queues,
    339	 * and not on the VF itself.
    340	 */
    341	u8 vfid;
    342	u8 vf_qid;
    343
    344	/* We need an additional index to differentiate between queues opened
    345	 * for same queue-zone, as VFs would have to communicate the info
    346	 * to the PF [otherwise PF has no way to differentiate].
    347	 */
    348	u8 qid_usage_idx;
    349
    350	u8 vf_legacy;
    351#define QED_QCID_LEGACY_VF_RX_PROD	(BIT(0))
    352#define QED_QCID_LEGACY_VF_CID		(BIT(1))
    353
    354	struct qed_hwfn *p_owner;
    355};
    356
    357int qed_l2_alloc(struct qed_hwfn *p_hwfn);
    358void qed_l2_setup(struct qed_hwfn *p_hwfn);
    359void qed_l2_free(struct qed_hwfn *p_hwfn);
    360
    361void qed_eth_queue_cid_release(struct qed_hwfn *p_hwfn,
    362			       struct qed_queue_cid *p_cid);
    363
    364struct qed_queue_cid *
    365qed_eth_queue_to_cid(struct qed_hwfn *p_hwfn,
    366		     u16 opaque_fid,
    367		     struct qed_queue_start_common_params *p_params,
    368		     bool b_is_rx,
    369		     struct qed_queue_cid_vf_params *p_vf_params);
    370
    371int
    372qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn,
    373		       struct qed_sp_vport_start_params *p_params);
    374
    375/**
    376 * qed_eth_rxq_start_ramrod(): Starts an Rx queue, when queue_cid is
    377 *                             already prepared
    378 *
    379 * @p_hwfn: HW device data.
    380 * @p_cid: Pointer CID.
    381 * @bd_max_bytes: Max bytes.
    382 * @bd_chain_phys_addr: Chain physcial address.
    383 * @cqe_pbl_addr: PBL address.
    384 * @cqe_pbl_size: PBL size.
    385 *
    386 * Return: Int.
    387 */
    388int
    389qed_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn,
    390			 struct qed_queue_cid *p_cid,
    391			 u16 bd_max_bytes,
    392			 dma_addr_t bd_chain_phys_addr,
    393			 dma_addr_t cqe_pbl_addr, u16 cqe_pbl_size);
    394
    395/**
    396 * qed_eth_txq_start_ramrod(): Starts a Tx queue, where queue_cid is
    397 *                             already prepared
    398 *
    399 * @p_hwfn: HW device data.
    400 * @p_cid: Pointer CID.
    401 * @pbl_addr: PBL address.
    402 * @pbl_size: PBL size.
    403 * @pq_id: Parameters for choosing the PQ for this Tx queue.
    404 *
    405 * Return: Int.
    406 */
    407int
    408qed_eth_txq_start_ramrod(struct qed_hwfn *p_hwfn,
    409			 struct qed_queue_cid *p_cid,
    410			 dma_addr_t pbl_addr, u16 pbl_size, u16 pq_id);
    411
    412u8 qed_mcast_bin_from_mac(u8 *mac);
    413
    414int qed_set_rxq_coalesce(struct qed_hwfn *p_hwfn,
    415			 struct qed_ptt *p_ptt,
    416			 u16 coalesce, struct qed_queue_cid *p_cid);
    417
    418int qed_set_txq_coalesce(struct qed_hwfn *p_hwfn,
    419			 struct qed_ptt *p_ptt,
    420			 u16 coalesce, struct qed_queue_cid *p_cid);
    421
    422int qed_get_rxq_coalesce(struct qed_hwfn *p_hwfn,
    423			 struct qed_ptt *p_ptt,
    424			 struct qed_queue_cid *p_cid, u16 *p_hw_coal);
    425
    426int qed_get_txq_coalesce(struct qed_hwfn *p_hwfn,
    427			 struct qed_ptt *p_ptt,
    428			 struct qed_queue_cid *p_cid, u16 *p_hw_coal);
    429
    430#endif