cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

driver.h (34455B)


      1/*
      2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
      3 *
      4 * This software is available to you under a choice of one of two
      5 * licenses.  You may choose to be licensed under the terms of the GNU
      6 * General Public License (GPL) Version 2, available from the file
      7 * COPYING in the main directory of this source tree, or the
      8 * OpenIB.org BSD license below:
      9 *
     10 *     Redistribution and use in source and binary forms, with or
     11 *     without modification, are permitted provided that the following
     12 *     conditions are met:
     13 *
     14 *      - Redistributions of source code must retain the above
     15 *        copyright notice, this list of conditions and the following
     16 *        disclaimer.
     17 *
     18 *      - Redistributions in binary form must reproduce the above
     19 *        copyright notice, this list of conditions and the following
     20 *        disclaimer in the documentation and/or other materials
     21 *        provided with the distribution.
     22 *
     23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
     24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
     25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
     26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
     27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
     28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
     29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
     30 * SOFTWARE.
     31 */
     32
     33#ifndef MLX5_DRIVER_H
     34#define MLX5_DRIVER_H
     35
     36#include <linux/kernel.h>
     37#include <linux/completion.h>
     38#include <linux/pci.h>
     39#include <linux/irq.h>
     40#include <linux/spinlock_types.h>
     41#include <linux/semaphore.h>
     42#include <linux/slab.h>
     43#include <linux/vmalloc.h>
     44#include <linux/xarray.h>
     45#include <linux/workqueue.h>
     46#include <linux/mempool.h>
     47#include <linux/interrupt.h>
     48#include <linux/idr.h>
     49#include <linux/notifier.h>
     50#include <linux/refcount.h>
     51#include <linux/auxiliary_bus.h>
     52
     53#include <linux/mlx5/device.h>
     54#include <linux/mlx5/doorbell.h>
     55#include <linux/mlx5/eq.h>
     56#include <linux/timecounter.h>
     57#include <linux/ptp_clock_kernel.h>
     58#include <net/devlink.h>
     59
     60#define MLX5_ADEV_NAME "mlx5_core"
     61
     62#define MLX5_IRQ_EQ_CTRL (U8_MAX)
     63
     64enum {
     65	MLX5_BOARD_ID_LEN = 64,
     66};
     67
     68enum {
     69	MLX5_CMD_WQ_MAX_NAME	= 32,
     70};
     71
     72enum {
     73	CMD_OWNER_SW		= 0x0,
     74	CMD_OWNER_HW		= 0x1,
     75	CMD_STATUS_SUCCESS	= 0,
     76};
     77
     78enum mlx5_sqp_t {
     79	MLX5_SQP_SMI		= 0,
     80	MLX5_SQP_GSI		= 1,
     81	MLX5_SQP_IEEE_1588	= 2,
     82	MLX5_SQP_SNIFFER	= 3,
     83	MLX5_SQP_SYNC_UMR	= 4,
     84};
     85
     86enum {
     87	MLX5_MAX_PORTS	= 4,
     88};
     89
     90enum {
     91	MLX5_ATOMIC_MODE_OFFSET = 16,
     92	MLX5_ATOMIC_MODE_IB_COMP = 1,
     93	MLX5_ATOMIC_MODE_CX = 2,
     94	MLX5_ATOMIC_MODE_8B = 3,
     95	MLX5_ATOMIC_MODE_16B = 4,
     96	MLX5_ATOMIC_MODE_32B = 5,
     97	MLX5_ATOMIC_MODE_64B = 6,
     98	MLX5_ATOMIC_MODE_128B = 7,
     99	MLX5_ATOMIC_MODE_256B = 8,
    100};
    101
    102enum {
    103	MLX5_REG_QPTS            = 0x4002,
    104	MLX5_REG_QETCR		 = 0x4005,
    105	MLX5_REG_QTCT		 = 0x400a,
    106	MLX5_REG_QPDPM           = 0x4013,
    107	MLX5_REG_QCAM            = 0x4019,
    108	MLX5_REG_DCBX_PARAM      = 0x4020,
    109	MLX5_REG_DCBX_APP        = 0x4021,
    110	MLX5_REG_FPGA_CAP	 = 0x4022,
    111	MLX5_REG_FPGA_CTRL	 = 0x4023,
    112	MLX5_REG_FPGA_ACCESS_REG = 0x4024,
    113	MLX5_REG_CORE_DUMP	 = 0x402e,
    114	MLX5_REG_PCAP		 = 0x5001,
    115	MLX5_REG_PMTU		 = 0x5003,
    116	MLX5_REG_PTYS		 = 0x5004,
    117	MLX5_REG_PAOS		 = 0x5006,
    118	MLX5_REG_PFCC            = 0x5007,
    119	MLX5_REG_PPCNT		 = 0x5008,
    120	MLX5_REG_PPTB            = 0x500b,
    121	MLX5_REG_PBMC            = 0x500c,
    122	MLX5_REG_PMAOS		 = 0x5012,
    123	MLX5_REG_PUDE		 = 0x5009,
    124	MLX5_REG_PMPE		 = 0x5010,
    125	MLX5_REG_PELC		 = 0x500e,
    126	MLX5_REG_PVLC		 = 0x500f,
    127	MLX5_REG_PCMR		 = 0x5041,
    128	MLX5_REG_PDDR		 = 0x5031,
    129	MLX5_REG_PMLP		 = 0x5002,
    130	MLX5_REG_PPLM		 = 0x5023,
    131	MLX5_REG_PCAM		 = 0x507f,
    132	MLX5_REG_NODE_DESC	 = 0x6001,
    133	MLX5_REG_HOST_ENDIANNESS = 0x7004,
    134	MLX5_REG_MCIA		 = 0x9014,
    135	MLX5_REG_MFRL		 = 0x9028,
    136	MLX5_REG_MLCR		 = 0x902b,
    137	MLX5_REG_MRTC		 = 0x902d,
    138	MLX5_REG_MTRC_CAP	 = 0x9040,
    139	MLX5_REG_MTRC_CONF	 = 0x9041,
    140	MLX5_REG_MTRC_STDB	 = 0x9042,
    141	MLX5_REG_MTRC_CTRL	 = 0x9043,
    142	MLX5_REG_MPEIN		 = 0x9050,
    143	MLX5_REG_MPCNT		 = 0x9051,
    144	MLX5_REG_MTPPS		 = 0x9053,
    145	MLX5_REG_MTPPSE		 = 0x9054,
    146	MLX5_REG_MTUTC		 = 0x9055,
    147	MLX5_REG_MPEGC		 = 0x9056,
    148	MLX5_REG_MCQS		 = 0x9060,
    149	MLX5_REG_MCQI		 = 0x9061,
    150	MLX5_REG_MCC		 = 0x9062,
    151	MLX5_REG_MCDA		 = 0x9063,
    152	MLX5_REG_MCAM		 = 0x907f,
    153	MLX5_REG_MIRC		 = 0x9162,
    154	MLX5_REG_SBCAM		 = 0xB01F,
    155	MLX5_REG_RESOURCE_DUMP   = 0xC000,
    156	MLX5_REG_DTOR            = 0xC00E,
    157};
    158
    159enum mlx5_qpts_trust_state {
    160	MLX5_QPTS_TRUST_PCP  = 1,
    161	MLX5_QPTS_TRUST_DSCP = 2,
    162};
    163
    164enum mlx5_dcbx_oper_mode {
    165	MLX5E_DCBX_PARAM_VER_OPER_HOST  = 0x0,
    166	MLX5E_DCBX_PARAM_VER_OPER_AUTO  = 0x3,
    167};
    168
    169enum {
    170	MLX5_ATOMIC_OPS_CMP_SWAP	= 1 << 0,
    171	MLX5_ATOMIC_OPS_FETCH_ADD	= 1 << 1,
    172	MLX5_ATOMIC_OPS_EXTENDED_CMP_SWAP = 1 << 2,
    173	MLX5_ATOMIC_OPS_EXTENDED_FETCH_ADD = 1 << 3,
    174};
    175
    176enum mlx5_page_fault_resume_flags {
    177	MLX5_PAGE_FAULT_RESUME_REQUESTOR = 1 << 0,
    178	MLX5_PAGE_FAULT_RESUME_WRITE	 = 1 << 1,
    179	MLX5_PAGE_FAULT_RESUME_RDMA	 = 1 << 2,
    180	MLX5_PAGE_FAULT_RESUME_ERROR	 = 1 << 7,
    181};
    182
    183enum dbg_rsc_type {
    184	MLX5_DBG_RSC_QP,
    185	MLX5_DBG_RSC_EQ,
    186	MLX5_DBG_RSC_CQ,
    187};
    188
    189enum port_state_policy {
    190	MLX5_POLICY_DOWN	= 0,
    191	MLX5_POLICY_UP		= 1,
    192	MLX5_POLICY_FOLLOW	= 2,
    193	MLX5_POLICY_INVALID	= 0xffffffff
    194};
    195
    196enum mlx5_coredev_type {
    197	MLX5_COREDEV_PF,
    198	MLX5_COREDEV_VF,
    199	MLX5_COREDEV_SF,
    200};
    201
    202struct mlx5_field_desc {
    203	int			i;
    204};
    205
    206struct mlx5_rsc_debug {
    207	struct mlx5_core_dev   *dev;
    208	void		       *object;
    209	enum dbg_rsc_type	type;
    210	struct dentry	       *root;
    211	struct mlx5_field_desc	fields[];
    212};
    213
    214enum mlx5_dev_event {
    215	MLX5_DEV_EVENT_SYS_ERROR = 128, /* 0 - 127 are FW events */
    216	MLX5_DEV_EVENT_PORT_AFFINITY = 129,
    217};
    218
    219enum mlx5_port_status {
    220	MLX5_PORT_UP        = 1,
    221	MLX5_PORT_DOWN      = 2,
    222};
    223
    224enum mlx5_cmdif_state {
    225	MLX5_CMDIF_STATE_UNINITIALIZED,
    226	MLX5_CMDIF_STATE_UP,
    227	MLX5_CMDIF_STATE_DOWN,
    228};
    229
    230struct mlx5_cmd_first {
    231	__be32		data[4];
    232};
    233
    234struct mlx5_cmd_msg {
    235	struct list_head		list;
    236	struct cmd_msg_cache	       *parent;
    237	u32				len;
    238	struct mlx5_cmd_first		first;
    239	struct mlx5_cmd_mailbox	       *next;
    240};
    241
    242struct mlx5_cmd_debug {
    243	struct dentry	       *dbg_root;
    244	void		       *in_msg;
    245	void		       *out_msg;
    246	u8			status;
    247	u16			inlen;
    248	u16			outlen;
    249};
    250
    251struct cmd_msg_cache {
    252	/* protect block chain allocations
    253	 */
    254	spinlock_t		lock;
    255	struct list_head	head;
    256	unsigned int		max_inbox_size;
    257	unsigned int		num_ent;
    258};
    259
    260enum {
    261	MLX5_NUM_COMMAND_CACHES = 5,
    262};
    263
    264struct mlx5_cmd_stats {
    265	u64		sum;
    266	u64		n;
    267	/* number of times command failed */
    268	u64		failed;
    269	/* number of times command failed on bad status returned by FW */
    270	u64		failed_mbox_status;
    271	/* last command failed returned errno */
    272	u32		last_failed_errno;
    273	/* last bad status returned by FW */
    274	u8		last_failed_mbox_status;
    275	/* last command failed syndrome returned by FW */
    276	u32		last_failed_syndrome;
    277	struct dentry  *root;
    278	/* protect command average calculations */
    279	spinlock_t	lock;
    280};
    281
    282struct mlx5_cmd {
    283	struct mlx5_nb    nb;
    284
    285	enum mlx5_cmdif_state	state;
    286	void	       *cmd_alloc_buf;
    287	dma_addr_t	alloc_dma;
    288	int		alloc_size;
    289	void	       *cmd_buf;
    290	dma_addr_t	dma;
    291	u16		cmdif_rev;
    292	u8		log_sz;
    293	u8		log_stride;
    294	int		max_reg_cmds;
    295	int		events;
    296	u32 __iomem    *vector;
    297
    298	/* protect command queue allocations
    299	 */
    300	spinlock_t	alloc_lock;
    301
    302	/* protect token allocations
    303	 */
    304	spinlock_t	token_lock;
    305	u8		token;
    306	unsigned long	bitmask;
    307	char		wq_name[MLX5_CMD_WQ_MAX_NAME];
    308	struct workqueue_struct *wq;
    309	struct semaphore sem;
    310	struct semaphore pages_sem;
    311	int	mode;
    312	u16     allowed_opcode;
    313	struct mlx5_cmd_work_ent *ent_arr[MLX5_MAX_COMMANDS];
    314	struct dma_pool *pool;
    315	struct mlx5_cmd_debug dbg;
    316	struct cmd_msg_cache cache[MLX5_NUM_COMMAND_CACHES];
    317	int checksum_disabled;
    318	struct mlx5_cmd_stats *stats;
    319};
    320
    321struct mlx5_cmd_mailbox {
    322	void	       *buf;
    323	dma_addr_t	dma;
    324	struct mlx5_cmd_mailbox *next;
    325};
    326
    327struct mlx5_buf_list {
    328	void		       *buf;
    329	dma_addr_t		map;
    330};
    331
    332struct mlx5_frag_buf {
    333	struct mlx5_buf_list	*frags;
    334	int			npages;
    335	int			size;
    336	u8			page_shift;
    337};
    338
    339struct mlx5_frag_buf_ctrl {
    340	struct mlx5_buf_list   *frags;
    341	u32			sz_m1;
    342	u16			frag_sz_m1;
    343	u16			strides_offset;
    344	u8			log_sz;
    345	u8			log_stride;
    346	u8			log_frag_strides;
    347};
    348
    349struct mlx5_core_psv {
    350	u32	psv_idx;
    351	struct psv_layout {
    352		u32	pd;
    353		u16	syndrome;
    354		u16	reserved;
    355		u16	bg;
    356		u16	app_tag;
    357		u32	ref_tag;
    358	} psv;
    359};
    360
    361struct mlx5_core_sig_ctx {
    362	struct mlx5_core_psv	psv_memory;
    363	struct mlx5_core_psv	psv_wire;
    364	struct ib_sig_err       err_item;
    365	bool			sig_status_checked;
    366	bool			sig_err_exists;
    367	u32			sigerr_count;
    368};
    369
    370#define MLX5_24BIT_MASK		((1 << 24) - 1)
    371
    372enum mlx5_res_type {
    373	MLX5_RES_QP	= MLX5_EVENT_QUEUE_TYPE_QP,
    374	MLX5_RES_RQ	= MLX5_EVENT_QUEUE_TYPE_RQ,
    375	MLX5_RES_SQ	= MLX5_EVENT_QUEUE_TYPE_SQ,
    376	MLX5_RES_SRQ	= 3,
    377	MLX5_RES_XSRQ	= 4,
    378	MLX5_RES_XRQ	= 5,
    379	MLX5_RES_DCT	= MLX5_EVENT_QUEUE_TYPE_DCT,
    380};
    381
    382struct mlx5_core_rsc_common {
    383	enum mlx5_res_type	res;
    384	refcount_t		refcount;
    385	struct completion	free;
    386};
    387
    388struct mlx5_uars_page {
    389	void __iomem	       *map;
    390	bool			wc;
    391	u32			index;
    392	struct list_head	list;
    393	unsigned int		bfregs;
    394	unsigned long	       *reg_bitmap; /* for non fast path bf regs */
    395	unsigned long	       *fp_bitmap;
    396	unsigned int		reg_avail;
    397	unsigned int		fp_avail;
    398	struct kref		ref_count;
    399	struct mlx5_core_dev   *mdev;
    400};
    401
    402struct mlx5_bfreg_head {
    403	/* protect blue flame registers allocations */
    404	struct mutex		lock;
    405	struct list_head	list;
    406};
    407
    408struct mlx5_bfreg_data {
    409	struct mlx5_bfreg_head	reg_head;
    410	struct mlx5_bfreg_head	wc_head;
    411};
    412
    413struct mlx5_sq_bfreg {
    414	void __iomem	       *map;
    415	struct mlx5_uars_page  *up;
    416	bool			wc;
    417	u32			index;
    418	unsigned int		offset;
    419};
    420
    421struct mlx5_core_health {
    422	struct health_buffer __iomem   *health;
    423	__be32 __iomem		       *health_counter;
    424	struct timer_list		timer;
    425	u32				prev;
    426	int				miss_counter;
    427	u8				synd;
    428	u32				fatal_error;
    429	u32				crdump_size;
    430	/* wq spinlock to synchronize draining */
    431	spinlock_t			wq_lock;
    432	struct workqueue_struct	       *wq;
    433	unsigned long			flags;
    434	struct work_struct		fatal_report_work;
    435	struct work_struct		report_work;
    436	struct devlink_health_reporter *fw_reporter;
    437	struct devlink_health_reporter *fw_fatal_reporter;
    438	struct delayed_work		update_fw_log_ts_work;
    439};
    440
    441struct mlx5_qp_table {
    442	struct notifier_block   nb;
    443
    444	/* protect radix tree
    445	 */
    446	spinlock_t		lock;
    447	struct radix_tree_root	tree;
    448};
    449
    450enum {
    451	MLX5_PF_NOTIFY_DISABLE_VF,
    452	MLX5_PF_NOTIFY_ENABLE_VF,
    453};
    454
    455struct mlx5_vf_context {
    456	int	enabled;
    457	u64	port_guid;
    458	u64	node_guid;
    459	/* Valid bits are used to validate administrative guid only.
    460	 * Enabled after ndo_set_vf_guid
    461	 */
    462	u8	port_guid_valid:1;
    463	u8	node_guid_valid:1;
    464	enum port_state_policy	policy;
    465	struct blocking_notifier_head notifier;
    466};
    467
    468struct mlx5_core_sriov {
    469	struct mlx5_vf_context	*vfs_ctx;
    470	int			num_vfs;
    471	u16			max_vfs;
    472};
    473
    474struct mlx5_fc_pool {
    475	struct mlx5_core_dev *dev;
    476	struct mutex pool_lock; /* protects pool lists */
    477	struct list_head fully_used;
    478	struct list_head partially_used;
    479	struct list_head unused;
    480	int available_fcs;
    481	int used_fcs;
    482	int threshold;
    483};
    484
    485struct mlx5_fc_stats {
    486	spinlock_t counters_idr_lock; /* protects counters_idr */
    487	struct idr counters_idr;
    488	struct list_head counters;
    489	struct llist_head addlist;
    490	struct llist_head dellist;
    491
    492	struct workqueue_struct *wq;
    493	struct delayed_work work;
    494	unsigned long next_query;
    495	unsigned long sampling_interval; /* jiffies */
    496	u32 *bulk_query_out;
    497	int bulk_query_len;
    498	size_t num_counters;
    499	bool bulk_query_alloc_failed;
    500	unsigned long next_bulk_query_alloc;
    501	struct mlx5_fc_pool fc_pool;
    502};
    503
    504struct mlx5_events;
    505struct mlx5_mpfs;
    506struct mlx5_eswitch;
    507struct mlx5_lag;
    508struct mlx5_devcom;
    509struct mlx5_fw_reset;
    510struct mlx5_eq_table;
    511struct mlx5_irq_table;
    512struct mlx5_vhca_state_notifier;
    513struct mlx5_sf_dev_table;
    514struct mlx5_sf_hw_table;
    515struct mlx5_sf_table;
    516
    517struct mlx5_rate_limit {
    518	u32			rate;
    519	u32			max_burst_sz;
    520	u16			typical_pkt_sz;
    521};
    522
    523struct mlx5_rl_entry {
    524	u8 rl_raw[MLX5_ST_SZ_BYTES(set_pp_rate_limit_context)];
    525	u64 refcount;
    526	u16 index;
    527	u16 uid;
    528	u8 dedicated : 1;
    529};
    530
    531struct mlx5_rl_table {
    532	/* protect rate limit table */
    533	struct mutex            rl_lock;
    534	u16                     max_size;
    535	u32                     max_rate;
    536	u32                     min_rate;
    537	struct mlx5_rl_entry   *rl_entry;
    538	u64 refcount;
    539};
    540
    541struct mlx5_core_roce {
    542	struct mlx5_flow_table *ft;
    543	struct mlx5_flow_group *fg;
    544	struct mlx5_flow_handle *allow_rule;
    545};
    546
    547enum {
    548	MLX5_PRIV_FLAGS_DISABLE_IB_ADEV = 1 << 0,
    549	MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV = 1 << 1,
    550	/* Set during device detach to block any further devices
    551	 * creation/deletion on drivers rescan. Unset during device attach.
    552	 */
    553	MLX5_PRIV_FLAGS_DETACH = 1 << 2,
    554};
    555
    556struct mlx5_adev {
    557	struct auxiliary_device adev;
    558	struct mlx5_core_dev *mdev;
    559	int idx;
    560};
    561
    562struct mlx5_debugfs_entries {
    563	struct dentry *dbg_root;
    564	struct dentry *qp_debugfs;
    565	struct dentry *eq_debugfs;
    566	struct dentry *cq_debugfs;
    567	struct dentry *cmdif_debugfs;
    568	struct dentry *pages_debugfs;
    569	struct dentry *lag_debugfs;
    570};
    571
    572struct mlx5_ft_pool;
    573struct mlx5_priv {
    574	/* IRQ table valid only for real pci devices PF or VF */
    575	struct mlx5_irq_table   *irq_table;
    576	struct mlx5_eq_table	*eq_table;
    577
    578	/* pages stuff */
    579	struct mlx5_nb          pg_nb;
    580	struct workqueue_struct *pg_wq;
    581	struct xarray           page_root_xa;
    582	u32			fw_pages;
    583	atomic_t		reg_pages;
    584	struct list_head	free_list;
    585	u32			vfs_pages;
    586	u32			host_pf_pages;
    587	u32			fw_pages_alloc_failed;
    588	u32			give_pages_dropped;
    589	u32			reclaim_pages_discard;
    590
    591	struct mlx5_core_health health;
    592	struct list_head	traps;
    593
    594	struct mlx5_debugfs_entries dbg;
    595
    596	/* start: alloc staff */
    597	/* protect buffer allocation according to numa node */
    598	struct mutex            alloc_mutex;
    599	int                     numa_node;
    600
    601	struct mutex            pgdir_mutex;
    602	struct list_head        pgdir_list;
    603	/* end: alloc staff */
    604
    605	struct list_head        ctx_list;
    606	spinlock_t              ctx_lock;
    607	struct mlx5_adev       **adev;
    608	int			adev_idx;
    609	struct mlx5_events      *events;
    610
    611	struct mlx5_flow_steering *steering;
    612	struct mlx5_mpfs        *mpfs;
    613	struct mlx5_eswitch     *eswitch;
    614	struct mlx5_core_sriov	sriov;
    615	struct mlx5_lag		*lag;
    616	u32			flags;
    617	struct mlx5_devcom	*devcom;
    618	struct mlx5_fw_reset	*fw_reset;
    619	struct mlx5_core_roce	roce;
    620	struct mlx5_fc_stats		fc_stats;
    621	struct mlx5_rl_table            rl_table;
    622	struct mlx5_ft_pool		*ft_pool;
    623
    624	struct mlx5_bfreg_data		bfregs;
    625	struct mlx5_uars_page	       *uar;
    626#ifdef CONFIG_MLX5_SF
    627	struct mlx5_vhca_state_notifier *vhca_state_notifier;
    628	struct mlx5_sf_dev_table *sf_dev_table;
    629	struct mlx5_core_dev *parent_mdev;
    630#endif
    631#ifdef CONFIG_MLX5_SF_MANAGER
    632	struct mlx5_sf_hw_table *sf_hw_table;
    633	struct mlx5_sf_table *sf_table;
    634#endif
    635};
    636
    637enum mlx5_device_state {
    638	MLX5_DEVICE_STATE_UP = 1,
    639	MLX5_DEVICE_STATE_INTERNAL_ERROR,
    640};
    641
    642enum mlx5_interface_state {
    643	MLX5_INTERFACE_STATE_UP = BIT(0),
    644	MLX5_BREAK_FW_WAIT = BIT(1),
    645};
    646
    647enum mlx5_pci_status {
    648	MLX5_PCI_STATUS_DISABLED,
    649	MLX5_PCI_STATUS_ENABLED,
    650};
    651
    652enum mlx5_pagefault_type_flags {
    653	MLX5_PFAULT_REQUESTOR = 1 << 0,
    654	MLX5_PFAULT_WRITE     = 1 << 1,
    655	MLX5_PFAULT_RDMA      = 1 << 2,
    656};
    657
    658struct mlx5_td {
    659	/* protects tirs list changes while tirs refresh */
    660	struct mutex     list_lock;
    661	struct list_head tirs_list;
    662	u32              tdn;
    663};
    664
    665struct mlx5e_resources {
    666	struct mlx5e_hw_objs {
    667		u32                        pdn;
    668		struct mlx5_td             td;
    669		u32			   mkey;
    670		struct mlx5_sq_bfreg       bfreg;
    671	} hw_objs;
    672	struct devlink_port dl_port;
    673	struct net_device *uplink_netdev;
    674};
    675
    676enum mlx5_sw_icm_type {
    677	MLX5_SW_ICM_TYPE_STEERING,
    678	MLX5_SW_ICM_TYPE_HEADER_MODIFY,
    679};
    680
    681#define MLX5_MAX_RESERVED_GIDS 8
    682
    683struct mlx5_rsvd_gids {
    684	unsigned int start;
    685	unsigned int count;
    686	struct ida ida;
    687};
    688
    689#define MAX_PIN_NUM	8
    690struct mlx5_pps {
    691	u8                         pin_caps[MAX_PIN_NUM];
    692	struct work_struct         out_work;
    693	u64                        start[MAX_PIN_NUM];
    694	u8                         enabled;
    695};
    696
    697struct mlx5_timer {
    698	struct cyclecounter        cycles;
    699	struct timecounter         tc;
    700	u32                        nominal_c_mult;
    701	unsigned long              overflow_period;
    702	struct delayed_work        overflow_work;
    703};
    704
    705struct mlx5_clock {
    706	struct mlx5_nb             pps_nb;
    707	seqlock_t                  lock;
    708	struct hwtstamp_config     hwtstamp_config;
    709	struct ptp_clock          *ptp;
    710	struct ptp_clock_info      ptp_info;
    711	struct mlx5_pps            pps_info;
    712	struct mlx5_timer          timer;
    713};
    714
    715struct mlx5_dm;
    716struct mlx5_fw_tracer;
    717struct mlx5_vxlan;
    718struct mlx5_geneve;
    719struct mlx5_hv_vhca;
    720
    721#define MLX5_LOG_SW_ICM_BLOCK_SIZE(dev) (MLX5_CAP_DEV_MEM(dev, log_sw_icm_alloc_granularity))
    722#define MLX5_SW_ICM_BLOCK_SIZE(dev) (1 << MLX5_LOG_SW_ICM_BLOCK_SIZE(dev))
    723
    724enum {
    725	MLX5_PROF_MASK_QP_SIZE		= (u64)1 << 0,
    726	MLX5_PROF_MASK_MR_CACHE		= (u64)1 << 1,
    727};
    728
    729enum {
    730	MR_CACHE_LAST_STD_ENTRY = 20,
    731	MLX5_IMR_MTT_CACHE_ENTRY,
    732	MLX5_IMR_KSM_CACHE_ENTRY,
    733	MAX_MR_CACHE_ENTRIES
    734};
    735
    736struct mlx5_profile {
    737	u64	mask;
    738	u8	log_max_qp;
    739	struct {
    740		int	size;
    741		int	limit;
    742	} mr_cache[MAX_MR_CACHE_ENTRIES];
    743};
    744
    745struct mlx5_hca_cap {
    746	u32 cur[MLX5_UN_SZ_DW(hca_cap_union)];
    747	u32 max[MLX5_UN_SZ_DW(hca_cap_union)];
    748};
    749
    750struct mlx5_core_dev {
    751	struct device *device;
    752	enum mlx5_coredev_type coredev_type;
    753	struct pci_dev	       *pdev;
    754	/* sync pci state */
    755	struct mutex		pci_status_mutex;
    756	enum mlx5_pci_status	pci_status;
    757	u8			rev_id;
    758	char			board_id[MLX5_BOARD_ID_LEN];
    759	struct mlx5_cmd		cmd;
    760	struct {
    761		struct mlx5_hca_cap *hca[MLX5_CAP_NUM];
    762		u32 pcam[MLX5_ST_SZ_DW(pcam_reg)];
    763		u32 mcam[MLX5_MCAM_REGS_NUM][MLX5_ST_SZ_DW(mcam_reg)];
    764		u32 fpga[MLX5_ST_SZ_DW(fpga_cap)];
    765		u32 qcam[MLX5_ST_SZ_DW(qcam_reg)];
    766		u8  embedded_cpu;
    767	} caps;
    768	struct mlx5_timeouts	*timeouts;
    769	u64			sys_image_guid;
    770	phys_addr_t		iseg_base;
    771	struct mlx5_init_seg __iomem *iseg;
    772	phys_addr_t             bar_addr;
    773	enum mlx5_device_state	state;
    774	/* sync interface state */
    775	struct mutex		intf_state_mutex;
    776	unsigned long		intf_state;
    777	struct mlx5_priv	priv;
    778	struct mlx5_profile	profile;
    779	u32			issi;
    780	struct mlx5e_resources  mlx5e_res;
    781	struct mlx5_dm          *dm;
    782	struct mlx5_vxlan       *vxlan;
    783	struct mlx5_geneve      *geneve;
    784	struct {
    785		struct mlx5_rsvd_gids	reserved_gids;
    786		u32			roce_en;
    787	} roce;
    788#ifdef CONFIG_MLX5_FPGA
    789	struct mlx5_fpga_device *fpga;
    790#endif
    791	struct mlx5_clock        clock;
    792	struct mlx5_ib_clock_info  *clock_info;
    793	struct mlx5_fw_tracer   *tracer;
    794	struct mlx5_rsc_dump    *rsc_dump;
    795	u32                      vsc_addr;
    796	struct mlx5_hv_vhca	*hv_vhca;
    797};
    798
    799struct mlx5_db {
    800	__be32			*db;
    801	union {
    802		struct mlx5_db_pgdir		*pgdir;
    803		struct mlx5_ib_user_db_page	*user_page;
    804	}			u;
    805	dma_addr_t		dma;
    806	int			index;
    807};
    808
    809enum {
    810	MLX5_COMP_EQ_SIZE = 1024,
    811};
    812
    813enum {
    814	MLX5_PTYS_IB = 1 << 0,
    815	MLX5_PTYS_EN = 1 << 2,
    816};
    817
    818typedef void (*mlx5_cmd_cbk_t)(int status, void *context);
    819
    820enum {
    821	MLX5_CMD_ENT_STATE_PENDING_COMP,
    822};
    823
    824struct mlx5_cmd_work_ent {
    825	unsigned long		state;
    826	struct mlx5_cmd_msg    *in;
    827	struct mlx5_cmd_msg    *out;
    828	void		       *uout;
    829	int			uout_size;
    830	mlx5_cmd_cbk_t		callback;
    831	struct delayed_work	cb_timeout_work;
    832	void		       *context;
    833	int			idx;
    834	struct completion	handling;
    835	struct completion	done;
    836	struct mlx5_cmd        *cmd;
    837	struct work_struct	work;
    838	struct mlx5_cmd_layout *lay;
    839	int			ret;
    840	int			page_queue;
    841	u8			status;
    842	u8			token;
    843	u64			ts1;
    844	u64			ts2;
    845	u16			op;
    846	bool			polling;
    847	/* Track the max comp handlers */
    848	refcount_t              refcnt;
    849};
    850
    851struct mlx5_pas {
    852	u64	pa;
    853	u8	log_sz;
    854};
    855
    856enum phy_port_state {
    857	MLX5_AAA_111
    858};
    859
    860struct mlx5_hca_vport_context {
    861	u32			field_select;
    862	bool			sm_virt_aware;
    863	bool			has_smi;
    864	bool			has_raw;
    865	enum port_state_policy	policy;
    866	enum phy_port_state	phys_state;
    867	enum ib_port_state	vport_state;
    868	u8			port_physical_state;
    869	u64			sys_image_guid;
    870	u64			port_guid;
    871	u64			node_guid;
    872	u32			cap_mask1;
    873	u32			cap_mask1_perm;
    874	u16			cap_mask2;
    875	u16			cap_mask2_perm;
    876	u16			lid;
    877	u8			init_type_reply; /* bitmask: see ib spec 14.2.5.6 InitTypeReply */
    878	u8			lmc;
    879	u8			subnet_timeout;
    880	u16			sm_lid;
    881	u8			sm_sl;
    882	u16			qkey_violation_counter;
    883	u16			pkey_violation_counter;
    884	bool			grh_required;
    885};
    886
    887#define STRUCT_FIELD(header, field) \
    888	.struct_offset_bytes = offsetof(struct ib_unpacked_ ## header, field),      \
    889	.struct_size_bytes   = sizeof((struct ib_unpacked_ ## header *)0)->field
    890
    891extern struct dentry *mlx5_debugfs_root;
    892
    893static inline u16 fw_rev_maj(struct mlx5_core_dev *dev)
    894{
    895	return ioread32be(&dev->iseg->fw_rev) & 0xffff;
    896}
    897
    898static inline u16 fw_rev_min(struct mlx5_core_dev *dev)
    899{
    900	return ioread32be(&dev->iseg->fw_rev) >> 16;
    901}
    902
    903static inline u16 fw_rev_sub(struct mlx5_core_dev *dev)
    904{
    905	return ioread32be(&dev->iseg->cmdif_rev_fw_sub) & 0xffff;
    906}
    907
    908static inline u32 mlx5_base_mkey(const u32 key)
    909{
    910	return key & 0xffffff00u;
    911}
    912
    913static inline u32 wq_get_byte_sz(u8 log_sz, u8 log_stride)
    914{
    915	return ((u32)1 << log_sz) << log_stride;
    916}
    917
    918static inline void mlx5_init_fbc_offset(struct mlx5_buf_list *frags,
    919					u8 log_stride, u8 log_sz,
    920					u16 strides_offset,
    921					struct mlx5_frag_buf_ctrl *fbc)
    922{
    923	fbc->frags      = frags;
    924	fbc->log_stride = log_stride;
    925	fbc->log_sz     = log_sz;
    926	fbc->sz_m1	= (1 << fbc->log_sz) - 1;
    927	fbc->log_frag_strides = PAGE_SHIFT - fbc->log_stride;
    928	fbc->frag_sz_m1	= (1 << fbc->log_frag_strides) - 1;
    929	fbc->strides_offset = strides_offset;
    930}
    931
    932static inline void mlx5_init_fbc(struct mlx5_buf_list *frags,
    933				 u8 log_stride, u8 log_sz,
    934				 struct mlx5_frag_buf_ctrl *fbc)
    935{
    936	mlx5_init_fbc_offset(frags, log_stride, log_sz, 0, fbc);
    937}
    938
    939static inline void *mlx5_frag_buf_get_wqe(struct mlx5_frag_buf_ctrl *fbc,
    940					  u32 ix)
    941{
    942	unsigned int frag;
    943
    944	ix  += fbc->strides_offset;
    945	frag = ix >> fbc->log_frag_strides;
    946
    947	return fbc->frags[frag].buf + ((fbc->frag_sz_m1 & ix) << fbc->log_stride);
    948}
    949
    950static inline u32
    951mlx5_frag_buf_get_idx_last_contig_stride(struct mlx5_frag_buf_ctrl *fbc, u32 ix)
    952{
    953	u32 last_frag_stride_idx = (ix + fbc->strides_offset) | fbc->frag_sz_m1;
    954
    955	return min_t(u32, last_frag_stride_idx - fbc->strides_offset, fbc->sz_m1);
    956}
    957
    958enum {
    959	CMD_ALLOWED_OPCODE_ALL,
    960};
    961
    962void mlx5_cmd_use_events(struct mlx5_core_dev *dev);
    963void mlx5_cmd_use_polling(struct mlx5_core_dev *dev);
    964void mlx5_cmd_allowed_opcode(struct mlx5_core_dev *dev, u16 opcode);
    965
    966struct mlx5_async_ctx {
    967	struct mlx5_core_dev *dev;
    968	atomic_t num_inflight;
    969	struct wait_queue_head wait;
    970};
    971
    972struct mlx5_async_work;
    973
    974typedef void (*mlx5_async_cbk_t)(int status, struct mlx5_async_work *context);
    975
    976struct mlx5_async_work {
    977	struct mlx5_async_ctx *ctx;
    978	mlx5_async_cbk_t user_callback;
    979	u16 opcode; /* cmd opcode */
    980	void *out; /* pointer to the cmd output buffer */
    981};
    982
    983void mlx5_cmd_init_async_ctx(struct mlx5_core_dev *dev,
    984			     struct mlx5_async_ctx *ctx);
    985void mlx5_cmd_cleanup_async_ctx(struct mlx5_async_ctx *ctx);
    986int mlx5_cmd_exec_cb(struct mlx5_async_ctx *ctx, void *in, int in_size,
    987		     void *out, int out_size, mlx5_async_cbk_t callback,
    988		     struct mlx5_async_work *work);
    989void mlx5_cmd_out_err(struct mlx5_core_dev *dev, u16 opcode, u16 op_mod, void *out);
    990int mlx5_cmd_do(struct mlx5_core_dev *dev, void *in, int in_size, void *out, int out_size);
    991int mlx5_cmd_check(struct mlx5_core_dev *dev, int err, void *in, void *out);
    992int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
    993		  int out_size);
    994
    995#define mlx5_cmd_exec_inout(dev, ifc_cmd, in, out)                             \
    996	({                                                                     \
    997		mlx5_cmd_exec(dev, in, MLX5_ST_SZ_BYTES(ifc_cmd##_in), out,    \
    998			      MLX5_ST_SZ_BYTES(ifc_cmd##_out));                \
    999	})
   1000
   1001#define mlx5_cmd_exec_in(dev, ifc_cmd, in)                                     \
   1002	({                                                                     \
   1003		u32 _out[MLX5_ST_SZ_DW(ifc_cmd##_out)] = {};                   \
   1004		mlx5_cmd_exec_inout(dev, ifc_cmd, in, _out);                   \
   1005	})
   1006
   1007int mlx5_cmd_exec_polling(struct mlx5_core_dev *dev, void *in, int in_size,
   1008			  void *out, int out_size);
   1009bool mlx5_cmd_is_down(struct mlx5_core_dev *dev);
   1010
   1011int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type);
   1012void mlx5_health_flush(struct mlx5_core_dev *dev);
   1013void mlx5_health_cleanup(struct mlx5_core_dev *dev);
   1014int mlx5_health_init(struct mlx5_core_dev *dev);
   1015void mlx5_start_health_poll(struct mlx5_core_dev *dev);
   1016void mlx5_stop_health_poll(struct mlx5_core_dev *dev, bool disable_health);
   1017void mlx5_drain_health_wq(struct mlx5_core_dev *dev);
   1018void mlx5_trigger_health_work(struct mlx5_core_dev *dev);
   1019int mlx5_frag_buf_alloc_node(struct mlx5_core_dev *dev, int size,
   1020			     struct mlx5_frag_buf *buf, int node);
   1021void mlx5_frag_buf_free(struct mlx5_core_dev *dev, struct mlx5_frag_buf *buf);
   1022struct mlx5_cmd_mailbox *mlx5_alloc_cmd_mailbox_chain(struct mlx5_core_dev *dev,
   1023						      gfp_t flags, int npages);
   1024void mlx5_free_cmd_mailbox_chain(struct mlx5_core_dev *dev,
   1025				 struct mlx5_cmd_mailbox *head);
   1026int mlx5_core_create_mkey(struct mlx5_core_dev *dev, u32 *mkey, u32 *in,
   1027			  int inlen);
   1028int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev, u32 mkey);
   1029int mlx5_core_query_mkey(struct mlx5_core_dev *dev, u32 mkey, u32 *out,
   1030			 int outlen);
   1031int mlx5_core_alloc_pd(struct mlx5_core_dev *dev, u32 *pdn);
   1032int mlx5_core_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn);
   1033int mlx5_pagealloc_init(struct mlx5_core_dev *dev);
   1034void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev);
   1035void mlx5_pagealloc_start(struct mlx5_core_dev *dev);
   1036void mlx5_pagealloc_stop(struct mlx5_core_dev *dev);
   1037void mlx5_pages_debugfs_init(struct mlx5_core_dev *dev);
   1038void mlx5_pages_debugfs_cleanup(struct mlx5_core_dev *dev);
   1039void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id,
   1040				 s32 npages, bool ec_function);
   1041int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot);
   1042int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev);
   1043void mlx5_register_debugfs(void);
   1044void mlx5_unregister_debugfs(void);
   1045
   1046void mlx5_fill_page_frag_array_perm(struct mlx5_frag_buf *buf, __be64 *pas, u8 perm);
   1047void mlx5_fill_page_frag_array(struct mlx5_frag_buf *frag_buf, __be64 *pas);
   1048int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn);
   1049int mlx5_core_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn);
   1050int mlx5_core_detach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn);
   1051
   1052struct dentry *mlx5_debugfs_get_dev_root(struct mlx5_core_dev *dev);
   1053void mlx5_qp_debugfs_init(struct mlx5_core_dev *dev);
   1054void mlx5_qp_debugfs_cleanup(struct mlx5_core_dev *dev);
   1055int mlx5_access_reg(struct mlx5_core_dev *dev, void *data_in, int size_in,
   1056		    void *data_out, int size_out, u16 reg_id, int arg,
   1057		    int write, bool verbose);
   1058int mlx5_core_access_reg(struct mlx5_core_dev *dev, void *data_in,
   1059			 int size_in, void *data_out, int size_out,
   1060			 u16 reg_num, int arg, int write);
   1061
   1062int mlx5_db_alloc_node(struct mlx5_core_dev *dev, struct mlx5_db *db,
   1063		       int node);
   1064
   1065static inline int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db)
   1066{
   1067	return mlx5_db_alloc_node(dev, db, dev->priv.numa_node);
   1068}
   1069
   1070void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db);
   1071
   1072const char *mlx5_command_str(int command);
   1073void mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev);
   1074void mlx5_cmdif_debugfs_cleanup(struct mlx5_core_dev *dev);
   1075int mlx5_core_create_psv(struct mlx5_core_dev *dev, u32 pdn,
   1076			 int npsvs, u32 *sig_index);
   1077int mlx5_core_destroy_psv(struct mlx5_core_dev *dev, int psv_num);
   1078void mlx5_core_put_rsc(struct mlx5_core_rsc_common *common);
   1079int mlx5_query_odp_caps(struct mlx5_core_dev *dev,
   1080			struct mlx5_odp_caps *odp_caps);
   1081int mlx5_core_query_ib_ppcnt(struct mlx5_core_dev *dev,
   1082			     u8 port_num, void *out, size_t sz);
   1083
   1084int mlx5_init_rl_table(struct mlx5_core_dev *dev);
   1085void mlx5_cleanup_rl_table(struct mlx5_core_dev *dev);
   1086int mlx5_rl_add_rate(struct mlx5_core_dev *dev, u16 *index,
   1087		     struct mlx5_rate_limit *rl);
   1088void mlx5_rl_remove_rate(struct mlx5_core_dev *dev, struct mlx5_rate_limit *rl);
   1089bool mlx5_rl_is_in_range(struct mlx5_core_dev *dev, u32 rate);
   1090int mlx5_rl_add_rate_raw(struct mlx5_core_dev *dev, void *rl_in, u16 uid,
   1091			 bool dedicated_entry, u16 *index);
   1092void mlx5_rl_remove_rate_raw(struct mlx5_core_dev *dev, u16 index);
   1093bool mlx5_rl_are_equal(struct mlx5_rate_limit *rl_0,
   1094		       struct mlx5_rate_limit *rl_1);
   1095int mlx5_alloc_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg,
   1096		     bool map_wc, bool fast_path);
   1097void mlx5_free_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg);
   1098
   1099unsigned int mlx5_comp_vectors_count(struct mlx5_core_dev *dev);
   1100struct cpumask *
   1101mlx5_comp_irq_get_affinity_mask(struct mlx5_core_dev *dev, int vector);
   1102unsigned int mlx5_core_reserved_gids_count(struct mlx5_core_dev *dev);
   1103int mlx5_core_roce_gid_set(struct mlx5_core_dev *dev, unsigned int index,
   1104			   u8 roce_version, u8 roce_l3_type, const u8 *gid,
   1105			   const u8 *mac, bool vlan, u16 vlan_id, u8 port_num);
   1106
   1107static inline u32 mlx5_mkey_to_idx(u32 mkey)
   1108{
   1109	return mkey >> 8;
   1110}
   1111
   1112static inline u32 mlx5_idx_to_mkey(u32 mkey_idx)
   1113{
   1114	return mkey_idx << 8;
   1115}
   1116
   1117static inline u8 mlx5_mkey_variant(u32 mkey)
   1118{
   1119	return mkey & 0xff;
   1120}
   1121
   1122/* Async-atomic event notifier used by mlx5 core to forward FW
   1123 * evetns received from event queue to mlx5 consumers.
   1124 * Optimise event queue dipatching.
   1125 */
   1126int mlx5_notifier_register(struct mlx5_core_dev *dev, struct notifier_block *nb);
   1127int mlx5_notifier_unregister(struct mlx5_core_dev *dev, struct notifier_block *nb);
   1128
   1129/* Async-atomic event notifier used for forwarding
   1130 * evetns from the event queue into the to mlx5 events dispatcher,
   1131 * eswitch, clock and others.
   1132 */
   1133int mlx5_eq_notifier_register(struct mlx5_core_dev *dev, struct mlx5_nb *nb);
   1134int mlx5_eq_notifier_unregister(struct mlx5_core_dev *dev, struct mlx5_nb *nb);
   1135
   1136/* Blocking event notifier used to forward SW events, used for slow path */
   1137int mlx5_blocking_notifier_register(struct mlx5_core_dev *dev, struct notifier_block *nb);
   1138int mlx5_blocking_notifier_unregister(struct mlx5_core_dev *dev, struct notifier_block *nb);
   1139int mlx5_blocking_notifier_call_chain(struct mlx5_core_dev *dev, unsigned int event,
   1140				      void *data);
   1141
   1142int mlx5_core_query_vendor_id(struct mlx5_core_dev *mdev, u32 *vendor_id);
   1143
   1144int mlx5_cmd_create_vport_lag(struct mlx5_core_dev *dev);
   1145int mlx5_cmd_destroy_vport_lag(struct mlx5_core_dev *dev);
   1146bool mlx5_lag_is_roce(struct mlx5_core_dev *dev);
   1147bool mlx5_lag_is_sriov(struct mlx5_core_dev *dev);
   1148bool mlx5_lag_is_active(struct mlx5_core_dev *dev);
   1149bool mlx5_lag_is_master(struct mlx5_core_dev *dev);
   1150bool mlx5_lag_is_shared_fdb(struct mlx5_core_dev *dev);
   1151struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev);
   1152u8 mlx5_lag_get_slave_port(struct mlx5_core_dev *dev,
   1153			   struct net_device *slave);
   1154int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev,
   1155				 u64 *values,
   1156				 int num_counters,
   1157				 size_t *offsets);
   1158struct mlx5_core_dev *mlx5_lag_get_peer_mdev(struct mlx5_core_dev *dev);
   1159u8 mlx5_lag_get_num_ports(struct mlx5_core_dev *dev);
   1160struct mlx5_uars_page *mlx5_get_uars_page(struct mlx5_core_dev *mdev);
   1161void mlx5_put_uars_page(struct mlx5_core_dev *mdev, struct mlx5_uars_page *up);
   1162int mlx5_dm_sw_icm_alloc(struct mlx5_core_dev *dev, enum mlx5_sw_icm_type type,
   1163			 u64 length, u32 log_alignment, u16 uid,
   1164			 phys_addr_t *addr, u32 *obj_id);
   1165int mlx5_dm_sw_icm_dealloc(struct mlx5_core_dev *dev, enum mlx5_sw_icm_type type,
   1166			   u64 length, u16 uid, phys_addr_t addr, u32 obj_id);
   1167
   1168struct mlx5_core_dev *mlx5_vf_get_core_dev(struct pci_dev *pdev);
   1169void mlx5_vf_put_core_dev(struct mlx5_core_dev *mdev);
   1170
   1171int mlx5_sriov_blocking_notifier_register(struct mlx5_core_dev *mdev,
   1172					  int vf_id,
   1173					  struct notifier_block *nb);
   1174void mlx5_sriov_blocking_notifier_unregister(struct mlx5_core_dev *mdev,
   1175					     int vf_id,
   1176					     struct notifier_block *nb);
   1177#ifdef CONFIG_MLX5_CORE_IPOIB
   1178struct net_device *mlx5_rdma_netdev_alloc(struct mlx5_core_dev *mdev,
   1179					  struct ib_device *ibdev,
   1180					  const char *name,
   1181					  void (*setup)(struct net_device *));
   1182#endif /* CONFIG_MLX5_CORE_IPOIB */
   1183int mlx5_rdma_rn_get_params(struct mlx5_core_dev *mdev,
   1184			    struct ib_device *device,
   1185			    struct rdma_netdev_alloc_params *params);
   1186
   1187enum {
   1188	MLX5_PCI_DEV_IS_VF		= 1 << 0,
   1189};
   1190
   1191static inline bool mlx5_core_is_pf(const struct mlx5_core_dev *dev)
   1192{
   1193	return dev->coredev_type == MLX5_COREDEV_PF;
   1194}
   1195
   1196static inline bool mlx5_core_is_vf(const struct mlx5_core_dev *dev)
   1197{
   1198	return dev->coredev_type == MLX5_COREDEV_VF;
   1199}
   1200
   1201static inline bool mlx5_core_is_ecpf(const struct mlx5_core_dev *dev)
   1202{
   1203	return dev->caps.embedded_cpu;
   1204}
   1205
   1206static inline bool
   1207mlx5_core_is_ecpf_esw_manager(const struct mlx5_core_dev *dev)
   1208{
   1209	return dev->caps.embedded_cpu && MLX5_CAP_GEN(dev, eswitch_manager);
   1210}
   1211
   1212static inline bool mlx5_ecpf_vport_exists(const struct mlx5_core_dev *dev)
   1213{
   1214	return mlx5_core_is_pf(dev) && MLX5_CAP_ESW(dev, ecpf_vport_exists);
   1215}
   1216
   1217static inline u16 mlx5_core_max_vfs(const struct mlx5_core_dev *dev)
   1218{
   1219	return dev->priv.sriov.max_vfs;
   1220}
   1221
   1222static inline int mlx5_get_gid_table_len(u16 param)
   1223{
   1224	if (param > 4) {
   1225		pr_warn("gid table length is zero\n");
   1226		return 0;
   1227	}
   1228
   1229	return 8 * (1 << param);
   1230}
   1231
   1232static inline bool mlx5_rl_is_supported(struct mlx5_core_dev *dev)
   1233{
   1234	return !!(dev->priv.rl_table.max_size);
   1235}
   1236
   1237static inline int mlx5_core_is_mp_slave(struct mlx5_core_dev *dev)
   1238{
   1239	return MLX5_CAP_GEN(dev, affiliate_nic_vport_criteria) &&
   1240	       MLX5_CAP_GEN(dev, num_vhca_ports) <= 1;
   1241}
   1242
   1243static inline int mlx5_core_is_mp_master(struct mlx5_core_dev *dev)
   1244{
   1245	return MLX5_CAP_GEN(dev, num_vhca_ports) > 1;
   1246}
   1247
   1248static inline int mlx5_core_mp_enabled(struct mlx5_core_dev *dev)
   1249{
   1250	return mlx5_core_is_mp_slave(dev) ||
   1251	       mlx5_core_is_mp_master(dev);
   1252}
   1253
   1254static inline int mlx5_core_native_port_num(struct mlx5_core_dev *dev)
   1255{
   1256	if (!mlx5_core_mp_enabled(dev))
   1257		return 1;
   1258
   1259	return MLX5_CAP_GEN(dev, native_port_num);
   1260}
   1261
   1262static inline int mlx5_get_dev_index(struct mlx5_core_dev *dev)
   1263{
   1264	int idx = MLX5_CAP_GEN(dev, native_port_num);
   1265
   1266	if (idx >= 1 && idx <= MLX5_MAX_PORTS)
   1267		return idx - 1;
   1268	else
   1269		return PCI_FUNC(dev->pdev->devfn);
   1270}
   1271
   1272enum {
   1273	MLX5_TRIGGERED_CMD_COMP = (u64)1 << 32,
   1274};
   1275
   1276static inline bool mlx5_is_roce_init_enabled(struct mlx5_core_dev *dev)
   1277{
   1278	struct devlink *devlink = priv_to_devlink(dev);
   1279	union devlink_param_value val;
   1280	int err;
   1281
   1282	err = devlink_param_driverinit_value_get(devlink,
   1283						 DEVLINK_PARAM_GENERIC_ID_ENABLE_ROCE,
   1284						 &val);
   1285	return err ? MLX5_CAP_GEN(dev, roce) : val.vbool;
   1286}
   1287
   1288#endif /* MLX5_DRIVER_H */