cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

en_stats.h (14337B)


      1/*
      2 * Copyright (c) 2015-2016, Mellanox Technologies. All rights reserved.
      3 *
      4 * This software is available to you under a choice of one of two
      5 * licenses.  You may choose to be licensed under the terms of the GNU
      6 * General Public License (GPL) Version 2, available from the file
      7 * COPYING in the main directory of this source tree, or the
      8 * OpenIB.org BSD license below:
      9 *
     10 *     Redistribution and use in source and binary forms, with or
     11 *     without modification, are permitted provided that the following
     12 *     conditions are met:
     13 *
     14 *      - Redistributions of source code must retain the above
     15 *        copyright notice, this list of conditions and the following
     16 *        disclaimer.
     17 *
     18 *      - Redistributions in binary form must reproduce the above
     19 *        copyright notice, this list of conditions and the following
     20 *        disclaimer in the documentation and/or other materials
     21 *        provided with the distribution.
     22 *
     23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
     24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
     25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
     26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
     27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
     28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
     29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
     30 * SOFTWARE.
     31 */
     32
     33#ifndef __MLX5_EN_STATS_H__
     34#define __MLX5_EN_STATS_H__
     35
     36#define MLX5E_READ_CTR64_CPU(ptr, dsc, i) \
     37	(*(u64 *)((char *)ptr + dsc[i].offset))
     38#define MLX5E_READ_CTR64_BE(ptr, dsc, i) \
     39	be64_to_cpu(*(__be64 *)((char *)ptr + dsc[i].offset))
     40#define MLX5E_READ_CTR32_CPU(ptr, dsc, i) \
     41	(*(u32 *)((char *)ptr + dsc[i].offset))
     42#define MLX5E_READ_CTR32_BE(ptr, dsc, i) \
     43	be32_to_cpu(*(__be32 *)((char *)ptr + dsc[i].offset))
     44
     45#define MLX5E_DECLARE_STAT(type, fld) #fld, offsetof(type, fld)
     46#define MLX5E_DECLARE_RX_STAT(type, fld) "rx%d_"#fld, offsetof(type, fld)
     47#define MLX5E_DECLARE_TX_STAT(type, fld) "tx%d_"#fld, offsetof(type, fld)
     48#define MLX5E_DECLARE_XDPSQ_STAT(type, fld) "tx%d_xdp_"#fld, offsetof(type, fld)
     49#define MLX5E_DECLARE_RQ_XDPSQ_STAT(type, fld) "rx%d_xdp_tx_"#fld, offsetof(type, fld)
     50#define MLX5E_DECLARE_XSKRQ_STAT(type, fld) "rx%d_xsk_"#fld, offsetof(type, fld)
     51#define MLX5E_DECLARE_XSKSQ_STAT(type, fld) "tx%d_xsk_"#fld, offsetof(type, fld)
     52#define MLX5E_DECLARE_CH_STAT(type, fld) "ch%d_"#fld, offsetof(type, fld)
     53
     54#define MLX5E_DECLARE_PTP_TX_STAT(type, fld) "ptp_tx%d_"#fld, offsetof(type, fld)
     55#define MLX5E_DECLARE_PTP_CH_STAT(type, fld) "ptp_ch_"#fld, offsetof(type, fld)
     56#define MLX5E_DECLARE_PTP_CQ_STAT(type, fld) "ptp_cq%d_"#fld, offsetof(type, fld)
     57#define MLX5E_DECLARE_PTP_RQ_STAT(type, fld) "ptp_rq%d_"#fld, offsetof(type, fld)
     58
     59#define MLX5E_DECLARE_QOS_TX_STAT(type, fld) "qos_tx%d_"#fld, offsetof(type, fld)
     60
     61struct counter_desc {
     62	char		format[ETH_GSTRING_LEN];
     63	size_t		offset; /* Byte offset */
     64};
     65
     66enum {
     67	MLX5E_NDO_UPDATE_STATS = BIT(0x1),
     68};
     69
     70struct mlx5e_priv;
     71struct mlx5e_stats_grp {
     72	u16 update_stats_mask;
     73	int (*get_num_stats)(struct mlx5e_priv *priv);
     74	int (*fill_strings)(struct mlx5e_priv *priv, u8 *data, int idx);
     75	int (*fill_stats)(struct mlx5e_priv *priv, u64 *data, int idx);
     76	void (*update_stats)(struct mlx5e_priv *priv);
     77};
     78
     79typedef const struct mlx5e_stats_grp *const mlx5e_stats_grp_t;
     80
     81#define MLX5E_STATS_GRP_OP(grp, name) mlx5e_stats_grp_ ## grp ## _ ## name
     82
     83#define MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(grp) \
     84	int MLX5E_STATS_GRP_OP(grp, num_stats)(struct mlx5e_priv *priv)
     85
     86#define MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(grp) \
     87	void MLX5E_STATS_GRP_OP(grp, update_stats)(struct mlx5e_priv *priv)
     88
     89#define MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(grp) \
     90	int MLX5E_STATS_GRP_OP(grp, fill_strings)(struct mlx5e_priv *priv, u8 *data, int idx)
     91
     92#define MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(grp) \
     93	int MLX5E_STATS_GRP_OP(grp, fill_stats)(struct mlx5e_priv *priv, u64 *data, int idx)
     94
     95#define MLX5E_STATS_GRP(grp) mlx5e_stats_grp_ ## grp
     96
     97#define MLX5E_DECLARE_STATS_GRP(grp) \
     98	const struct mlx5e_stats_grp MLX5E_STATS_GRP(grp)
     99
    100#define MLX5E_DEFINE_STATS_GRP(grp, mask) \
    101MLX5E_DECLARE_STATS_GRP(grp) = { \
    102	.get_num_stats = MLX5E_STATS_GRP_OP(grp, num_stats), \
    103	.fill_stats    = MLX5E_STATS_GRP_OP(grp, fill_stats), \
    104	.fill_strings  = MLX5E_STATS_GRP_OP(grp, fill_strings), \
    105	.update_stats  = MLX5E_STATS_GRP_OP(grp, update_stats), \
    106	.update_stats_mask = mask, \
    107}
    108
    109unsigned int mlx5e_stats_total_num(struct mlx5e_priv *priv);
    110void mlx5e_stats_update(struct mlx5e_priv *priv);
    111void mlx5e_stats_fill(struct mlx5e_priv *priv, u64 *data, int idx);
    112void mlx5e_stats_fill_strings(struct mlx5e_priv *priv, u8 *data);
    113void mlx5e_stats_update_ndo_stats(struct mlx5e_priv *priv);
    114
    115void mlx5e_stats_pause_get(struct mlx5e_priv *priv,
    116			   struct ethtool_pause_stats *pause_stats);
    117void mlx5e_stats_fec_get(struct mlx5e_priv *priv,
    118			 struct ethtool_fec_stats *fec_stats);
    119
    120void mlx5e_stats_eth_phy_get(struct mlx5e_priv *priv,
    121			     struct ethtool_eth_phy_stats *phy_stats);
    122void mlx5e_stats_eth_mac_get(struct mlx5e_priv *priv,
    123			     struct ethtool_eth_mac_stats *mac_stats);
    124void mlx5e_stats_eth_ctrl_get(struct mlx5e_priv *priv,
    125			      struct ethtool_eth_ctrl_stats *ctrl_stats);
    126void mlx5e_stats_rmon_get(struct mlx5e_priv *priv,
    127			  struct ethtool_rmon_stats *rmon,
    128			  const struct ethtool_rmon_hist_range **ranges);
    129
    130/* Concrete NIC Stats */
    131
    132struct mlx5e_sw_stats {
    133	u64 rx_packets;
    134	u64 rx_bytes;
    135	u64 tx_packets;
    136	u64 tx_bytes;
    137	u64 tx_tso_packets;
    138	u64 tx_tso_bytes;
    139	u64 tx_tso_inner_packets;
    140	u64 tx_tso_inner_bytes;
    141	u64 tx_added_vlan_packets;
    142	u64 tx_nop;
    143	u64 tx_mpwqe_blks;
    144	u64 tx_mpwqe_pkts;
    145	u64 rx_lro_packets;
    146	u64 rx_lro_bytes;
    147	u64 rx_gro_packets;
    148	u64 rx_gro_bytes;
    149	u64 rx_gro_skbs;
    150	u64 rx_gro_match_packets;
    151	u64 rx_gro_large_hds;
    152	u64 rx_mcast_packets;
    153	u64 rx_ecn_mark;
    154	u64 rx_removed_vlan_packets;
    155	u64 rx_csum_unnecessary;
    156	u64 rx_csum_none;
    157	u64 rx_csum_complete;
    158	u64 rx_csum_complete_tail;
    159	u64 rx_csum_complete_tail_slow;
    160	u64 rx_csum_unnecessary_inner;
    161	u64 rx_xdp_drop;
    162	u64 rx_xdp_redirect;
    163	u64 rx_xdp_tx_xmit;
    164	u64 rx_xdp_tx_mpwqe;
    165	u64 rx_xdp_tx_inlnw;
    166	u64 rx_xdp_tx_nops;
    167	u64 rx_xdp_tx_full;
    168	u64 rx_xdp_tx_err;
    169	u64 rx_xdp_tx_cqe;
    170	u64 tx_csum_none;
    171	u64 tx_csum_partial;
    172	u64 tx_csum_partial_inner;
    173	u64 tx_queue_stopped;
    174	u64 tx_queue_dropped;
    175	u64 tx_xmit_more;
    176	u64 tx_recover;
    177	u64 tx_cqes;
    178	u64 tx_queue_wake;
    179	u64 tx_cqe_err;
    180	u64 tx_xdp_xmit;
    181	u64 tx_xdp_mpwqe;
    182	u64 tx_xdp_inlnw;
    183	u64 tx_xdp_nops;
    184	u64 tx_xdp_full;
    185	u64 tx_xdp_err;
    186	u64 tx_xdp_cqes;
    187	u64 rx_wqe_err;
    188	u64 rx_mpwqe_filler_cqes;
    189	u64 rx_mpwqe_filler_strides;
    190	u64 rx_oversize_pkts_sw_drop;
    191	u64 rx_buff_alloc_err;
    192	u64 rx_cqe_compress_blks;
    193	u64 rx_cqe_compress_pkts;
    194	u64 rx_cache_reuse;
    195	u64 rx_cache_full;
    196	u64 rx_cache_empty;
    197	u64 rx_cache_busy;
    198	u64 rx_cache_waive;
    199	u64 rx_congst_umr;
    200	u64 rx_arfs_err;
    201	u64 rx_recover;
    202	u64 ch_events;
    203	u64 ch_poll;
    204	u64 ch_arm;
    205	u64 ch_aff_change;
    206	u64 ch_force_irq;
    207	u64 ch_eq_rearm;
    208#ifdef CONFIG_PAGE_POOL_STATS
    209	u64 rx_pp_alloc_fast;
    210	u64 rx_pp_alloc_slow;
    211	u64 rx_pp_alloc_slow_high_order;
    212	u64 rx_pp_alloc_empty;
    213	u64 rx_pp_alloc_refill;
    214	u64 rx_pp_alloc_waive;
    215	u64 rx_pp_recycle_cached;
    216	u64 rx_pp_recycle_cache_full;
    217	u64 rx_pp_recycle_ring;
    218	u64 rx_pp_recycle_ring_full;
    219	u64 rx_pp_recycle_released_ref;
    220#endif
    221#ifdef CONFIG_MLX5_EN_TLS
    222	u64 tx_tls_encrypted_packets;
    223	u64 tx_tls_encrypted_bytes;
    224	u64 tx_tls_ooo;
    225	u64 tx_tls_dump_packets;
    226	u64 tx_tls_dump_bytes;
    227	u64 tx_tls_resync_bytes;
    228	u64 tx_tls_skip_no_sync_data;
    229	u64 tx_tls_drop_no_sync_data;
    230	u64 tx_tls_drop_bypass_req;
    231
    232	u64 rx_tls_decrypted_packets;
    233	u64 rx_tls_decrypted_bytes;
    234	u64 rx_tls_resync_req_pkt;
    235	u64 rx_tls_resync_req_start;
    236	u64 rx_tls_resync_req_end;
    237	u64 rx_tls_resync_req_skip;
    238	u64 rx_tls_resync_res_ok;
    239	u64 rx_tls_resync_res_retry;
    240	u64 rx_tls_resync_res_skip;
    241	u64 rx_tls_err;
    242#endif
    243
    244	u64 rx_xsk_packets;
    245	u64 rx_xsk_bytes;
    246	u64 rx_xsk_csum_complete;
    247	u64 rx_xsk_csum_unnecessary;
    248	u64 rx_xsk_csum_unnecessary_inner;
    249	u64 rx_xsk_csum_none;
    250	u64 rx_xsk_ecn_mark;
    251	u64 rx_xsk_removed_vlan_packets;
    252	u64 rx_xsk_xdp_drop;
    253	u64 rx_xsk_xdp_redirect;
    254	u64 rx_xsk_wqe_err;
    255	u64 rx_xsk_mpwqe_filler_cqes;
    256	u64 rx_xsk_mpwqe_filler_strides;
    257	u64 rx_xsk_oversize_pkts_sw_drop;
    258	u64 rx_xsk_buff_alloc_err;
    259	u64 rx_xsk_cqe_compress_blks;
    260	u64 rx_xsk_cqe_compress_pkts;
    261	u64 rx_xsk_congst_umr;
    262	u64 rx_xsk_arfs_err;
    263	u64 tx_xsk_xmit;
    264	u64 tx_xsk_mpwqe;
    265	u64 tx_xsk_inlnw;
    266	u64 tx_xsk_full;
    267	u64 tx_xsk_err;
    268	u64 tx_xsk_cqes;
    269};
    270
    271struct mlx5e_qcounter_stats {
    272	u32 rx_out_of_buffer;
    273	u32 rx_if_down_packets;
    274};
    275
    276struct mlx5e_vnic_env_stats {
    277	__be64 query_vnic_env_out[MLX5_ST_SZ_QW(query_vnic_env_out)];
    278};
    279
    280#define VPORT_COUNTER_GET(vstats, c) MLX5_GET64(query_vport_counter_out, \
    281						vstats->query_vport_out, c)
    282
    283struct mlx5e_vport_stats {
    284	__be64 query_vport_out[MLX5_ST_SZ_QW(query_vport_counter_out)];
    285};
    286
    287#define PPORT_802_3_GET(pstats, c) \
    288	MLX5_GET64(ppcnt_reg, pstats->IEEE_802_3_counters, \
    289		   counter_set.eth_802_3_cntrs_grp_data_layout.c##_high)
    290#define PPORT_2863_GET(pstats, c) \
    291	MLX5_GET64(ppcnt_reg, pstats->RFC_2863_counters, \
    292		   counter_set.eth_2863_cntrs_grp_data_layout.c##_high)
    293#define PPORT_2819_GET(pstats, c) \
    294	MLX5_GET64(ppcnt_reg, pstats->RFC_2819_counters, \
    295		   counter_set.eth_2819_cntrs_grp_data_layout.c##_high)
    296#define PPORT_PHY_STATISTICAL_GET(pstats, c) \
    297	MLX5_GET64(ppcnt_reg, (pstats)->phy_statistical_counters, \
    298		   counter_set.phys_layer_statistical_cntrs.c##_high)
    299#define PPORT_PER_PRIO_GET(pstats, prio, c) \
    300	MLX5_GET64(ppcnt_reg, pstats->per_prio_counters[prio], \
    301		   counter_set.eth_per_prio_grp_data_layout.c##_high)
    302#define NUM_PPORT_PRIO				8
    303#define PPORT_ETH_EXT_GET(pstats, c) \
    304	MLX5_GET64(ppcnt_reg, (pstats)->eth_ext_counters, \
    305		   counter_set.eth_extended_cntrs_grp_data_layout.c##_high)
    306
    307struct mlx5e_pport_stats {
    308	__be64 IEEE_802_3_counters[MLX5_ST_SZ_QW(ppcnt_reg)];
    309	__be64 RFC_2863_counters[MLX5_ST_SZ_QW(ppcnt_reg)];
    310	__be64 RFC_2819_counters[MLX5_ST_SZ_QW(ppcnt_reg)];
    311	__be64 per_prio_counters[NUM_PPORT_PRIO][MLX5_ST_SZ_QW(ppcnt_reg)];
    312	__be64 phy_counters[MLX5_ST_SZ_QW(ppcnt_reg)];
    313	__be64 phy_statistical_counters[MLX5_ST_SZ_QW(ppcnt_reg)];
    314	__be64 eth_ext_counters[MLX5_ST_SZ_QW(ppcnt_reg)];
    315	__be64 per_tc_prio_counters[NUM_PPORT_PRIO][MLX5_ST_SZ_QW(ppcnt_reg)];
    316	__be64 per_tc_congest_prio_counters[NUM_PPORT_PRIO][MLX5_ST_SZ_QW(ppcnt_reg)];
    317};
    318
    319#define PCIE_PERF_GET(pcie_stats, c) \
    320	MLX5_GET(mpcnt_reg, (pcie_stats)->pcie_perf_counters, \
    321		 counter_set.pcie_perf_cntrs_grp_data_layout.c)
    322
    323#define PCIE_PERF_GET64(pcie_stats, c) \
    324	MLX5_GET64(mpcnt_reg, (pcie_stats)->pcie_perf_counters, \
    325		   counter_set.pcie_perf_cntrs_grp_data_layout.c##_high)
    326
    327struct mlx5e_pcie_stats {
    328	__be64 pcie_perf_counters[MLX5_ST_SZ_QW(mpcnt_reg)];
    329};
    330
    331struct mlx5e_rq_stats {
    332	u64 packets;
    333	u64 bytes;
    334	u64 csum_complete;
    335	u64 csum_complete_tail;
    336	u64 csum_complete_tail_slow;
    337	u64 csum_unnecessary;
    338	u64 csum_unnecessary_inner;
    339	u64 csum_none;
    340	u64 lro_packets;
    341	u64 lro_bytes;
    342	u64 gro_packets;
    343	u64 gro_bytes;
    344	u64 gro_skbs;
    345	u64 gro_match_packets;
    346	u64 gro_large_hds;
    347	u64 mcast_packets;
    348	u64 ecn_mark;
    349	u64 removed_vlan_packets;
    350	u64 xdp_drop;
    351	u64 xdp_redirect;
    352	u64 wqe_err;
    353	u64 mpwqe_filler_cqes;
    354	u64 mpwqe_filler_strides;
    355	u64 oversize_pkts_sw_drop;
    356	u64 buff_alloc_err;
    357	u64 cqe_compress_blks;
    358	u64 cqe_compress_pkts;
    359	u64 cache_reuse;
    360	u64 cache_full;
    361	u64 cache_empty;
    362	u64 cache_busy;
    363	u64 cache_waive;
    364	u64 congst_umr;
    365	u64 arfs_err;
    366	u64 recover;
    367#ifdef CONFIG_PAGE_POOL_STATS
    368	u64 pp_alloc_fast;
    369	u64 pp_alloc_slow;
    370	u64 pp_alloc_slow_high_order;
    371	u64 pp_alloc_empty;
    372	u64 pp_alloc_refill;
    373	u64 pp_alloc_waive;
    374	u64 pp_recycle_cached;
    375	u64 pp_recycle_cache_full;
    376	u64 pp_recycle_ring;
    377	u64 pp_recycle_ring_full;
    378	u64 pp_recycle_released_ref;
    379#endif
    380#ifdef CONFIG_MLX5_EN_TLS
    381	u64 tls_decrypted_packets;
    382	u64 tls_decrypted_bytes;
    383	u64 tls_resync_req_pkt;
    384	u64 tls_resync_req_start;
    385	u64 tls_resync_req_end;
    386	u64 tls_resync_req_skip;
    387	u64 tls_resync_res_ok;
    388	u64 tls_resync_res_retry;
    389	u64 tls_resync_res_skip;
    390	u64 tls_err;
    391#endif
    392};
    393
    394struct mlx5e_sq_stats {
    395	/* commonly accessed in data path */
    396	u64 packets;
    397	u64 bytes;
    398	u64 xmit_more;
    399	u64 tso_packets;
    400	u64 tso_bytes;
    401	u64 tso_inner_packets;
    402	u64 tso_inner_bytes;
    403	u64 csum_partial;
    404	u64 csum_partial_inner;
    405	u64 added_vlan_packets;
    406	u64 nop;
    407	u64 mpwqe_blks;
    408	u64 mpwqe_pkts;
    409#ifdef CONFIG_MLX5_EN_TLS
    410	u64 tls_encrypted_packets;
    411	u64 tls_encrypted_bytes;
    412	u64 tls_ooo;
    413	u64 tls_dump_packets;
    414	u64 tls_dump_bytes;
    415	u64 tls_resync_bytes;
    416	u64 tls_skip_no_sync_data;
    417	u64 tls_drop_no_sync_data;
    418	u64 tls_drop_bypass_req;
    419#endif
    420	/* less likely accessed in data path */
    421	u64 csum_none;
    422	u64 stopped;
    423	u64 dropped;
    424	u64 recover;
    425	/* dirtied @completion */
    426	u64 cqes ____cacheline_aligned_in_smp;
    427	u64 wake;
    428	u64 cqe_err;
    429};
    430
    431struct mlx5e_xdpsq_stats {
    432	u64 xmit;
    433	u64 mpwqe;
    434	u64 inlnw;
    435	u64 nops;
    436	u64 full;
    437	u64 err;
    438	/* dirtied @completion */
    439	u64 cqes ____cacheline_aligned_in_smp;
    440};
    441
    442struct mlx5e_ch_stats {
    443	u64 events;
    444	u64 poll;
    445	u64 arm;
    446	u64 aff_change;
    447	u64 force_irq;
    448	u64 eq_rearm;
    449};
    450
    451struct mlx5e_ptp_cq_stats {
    452	u64 cqe;
    453	u64 err_cqe;
    454	u64 abort;
    455	u64 abort_abs_diff_ns;
    456};
    457
    458struct mlx5e_stats {
    459	struct mlx5e_sw_stats sw;
    460	struct mlx5e_qcounter_stats qcnt;
    461	struct mlx5e_vnic_env_stats vnic;
    462	struct mlx5e_vport_stats vport;
    463	struct mlx5e_pport_stats pport;
    464	struct rtnl_link_stats64 vf_vport;
    465	struct mlx5e_pcie_stats pcie;
    466};
    467
    468extern mlx5e_stats_grp_t mlx5e_nic_stats_grps[];
    469unsigned int mlx5e_nic_stats_grps_num(struct mlx5e_priv *priv);
    470
    471extern MLX5E_DECLARE_STATS_GRP(sw);
    472extern MLX5E_DECLARE_STATS_GRP(qcnt);
    473extern MLX5E_DECLARE_STATS_GRP(vnic_env);
    474extern MLX5E_DECLARE_STATS_GRP(vport);
    475extern MLX5E_DECLARE_STATS_GRP(802_3);
    476extern MLX5E_DECLARE_STATS_GRP(2863);
    477extern MLX5E_DECLARE_STATS_GRP(2819);
    478extern MLX5E_DECLARE_STATS_GRP(phy);
    479extern MLX5E_DECLARE_STATS_GRP(eth_ext);
    480extern MLX5E_DECLARE_STATS_GRP(pcie);
    481extern MLX5E_DECLARE_STATS_GRP(per_prio);
    482extern MLX5E_DECLARE_STATS_GRP(pme);
    483extern MLX5E_DECLARE_STATS_GRP(channels);
    484extern MLX5E_DECLARE_STATS_GRP(per_port_buff_congest);
    485extern MLX5E_DECLARE_STATS_GRP(ipsec_sw);
    486extern MLX5E_DECLARE_STATS_GRP(ptp);
    487
    488#endif /* __MLX5_EN_STATS_H__ */