en_stats.c (90595B)
1/* 2 * Copyright (c) 2017, Mellanox Technologies, Ltd. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33#include "lib/mlx5.h" 34#include "en.h" 35#include "en_accel/ktls.h" 36#include "en_accel/en_accel.h" 37#include "en/ptp.h" 38#include "en/port.h" 39 40#ifdef CONFIG_PAGE_POOL_STATS 41#include <net/page_pool.h> 42#endif 43 44static unsigned int stats_grps_num(struct mlx5e_priv *priv) 45{ 46 return !priv->profile->stats_grps_num ? 0 : 47 priv->profile->stats_grps_num(priv); 48} 49 50unsigned int mlx5e_stats_total_num(struct mlx5e_priv *priv) 51{ 52 mlx5e_stats_grp_t *stats_grps = priv->profile->stats_grps; 53 const unsigned int num_stats_grps = stats_grps_num(priv); 54 unsigned int total = 0; 55 int i; 56 57 for (i = 0; i < num_stats_grps; i++) 58 total += stats_grps[i]->get_num_stats(priv); 59 60 return total; 61} 62 63void mlx5e_stats_update_ndo_stats(struct mlx5e_priv *priv) 64{ 65 mlx5e_stats_grp_t *stats_grps = priv->profile->stats_grps; 66 const unsigned int num_stats_grps = stats_grps_num(priv); 67 int i; 68 69 for (i = num_stats_grps - 1; i >= 0; i--) 70 if (stats_grps[i]->update_stats && 71 stats_grps[i]->update_stats_mask & MLX5E_NDO_UPDATE_STATS) 72 stats_grps[i]->update_stats(priv); 73} 74 75void mlx5e_stats_update(struct mlx5e_priv *priv) 76{ 77 mlx5e_stats_grp_t *stats_grps = priv->profile->stats_grps; 78 const unsigned int num_stats_grps = stats_grps_num(priv); 79 int i; 80 81 for (i = num_stats_grps - 1; i >= 0; i--) 82 if (stats_grps[i]->update_stats) 83 stats_grps[i]->update_stats(priv); 84} 85 86void mlx5e_stats_fill(struct mlx5e_priv *priv, u64 *data, int idx) 87{ 88 mlx5e_stats_grp_t *stats_grps = priv->profile->stats_grps; 89 const unsigned int num_stats_grps = stats_grps_num(priv); 90 int i; 91 92 for (i = 0; i < num_stats_grps; i++) 93 idx = stats_grps[i]->fill_stats(priv, data, idx); 94} 95 96void mlx5e_stats_fill_strings(struct mlx5e_priv *priv, u8 *data) 97{ 98 mlx5e_stats_grp_t *stats_grps = priv->profile->stats_grps; 99 const unsigned int num_stats_grps = stats_grps_num(priv); 100 int i, idx = 0; 101 102 for (i = 0; i < num_stats_grps; i++) 103 idx = stats_grps[i]->fill_strings(priv, data, idx); 104} 105 106/* Concrete NIC Stats */ 107 108static const struct counter_desc sw_stats_desc[] = { 109 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_packets) }, 110 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_bytes) }, 111 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_packets) }, 112 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_bytes) }, 113 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_packets) }, 114 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_bytes) }, 115 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_inner_packets) }, 116 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_inner_bytes) }, 117 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_added_vlan_packets) }, 118 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_nop) }, 119 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_mpwqe_blks) }, 120 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_mpwqe_pkts) }, 121 122#ifdef CONFIG_MLX5_EN_TLS 123 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_encrypted_packets) }, 124 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_encrypted_bytes) }, 125 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_ooo) }, 126 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_dump_packets) }, 127 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_dump_bytes) }, 128 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_resync_bytes) }, 129 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_skip_no_sync_data) }, 130 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_drop_no_sync_data) }, 131 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_drop_bypass_req) }, 132#endif 133 134 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_packets) }, 135 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_bytes) }, 136 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_gro_packets) }, 137 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_gro_bytes) }, 138 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_gro_skbs) }, 139 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_gro_match_packets) }, 140 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_gro_large_hds) }, 141 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_ecn_mark) }, 142 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_removed_vlan_packets) }, 143 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary) }, 144 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_none) }, 145 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete) }, 146 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete_tail) }, 147 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete_tail_slow) }, 148 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary_inner) }, 149 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_drop) }, 150 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_redirect) }, 151 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_xmit) }, 152 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_mpwqe) }, 153 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_inlnw) }, 154 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_nops) }, 155 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_full) }, 156 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_err) }, 157 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_cqe) }, 158 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_none) }, 159 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial) }, 160 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial_inner) }, 161 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_stopped) }, 162 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_dropped) }, 163 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xmit_more) }, 164 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_recover) }, 165 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_cqes) }, 166 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_wake) }, 167 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_cqe_err) }, 168 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_xmit) }, 169 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_mpwqe) }, 170 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_inlnw) }, 171 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_nops) }, 172 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_full) }, 173 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_err) }, 174 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_cqes) }, 175 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_wqe_err) }, 176 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler_cqes) }, 177 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler_strides) }, 178 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_oversize_pkts_sw_drop) }, 179 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_buff_alloc_err) }, 180 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_blks) }, 181 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_pkts) }, 182 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_reuse) }, 183 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_full) }, 184 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_empty) }, 185 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_busy) }, 186 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_waive) }, 187 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_congst_umr) }, 188 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_arfs_err) }, 189 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_recover) }, 190#ifdef CONFIG_PAGE_POOL_STATS 191 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_alloc_fast) }, 192 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_alloc_slow) }, 193 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_alloc_slow_high_order) }, 194 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_alloc_empty) }, 195 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_alloc_refill) }, 196 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_alloc_waive) }, 197 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_recycle_cached) }, 198 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_recycle_cache_full) }, 199 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_recycle_ring) }, 200 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_recycle_ring_full) }, 201 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_recycle_released_ref) }, 202#endif 203#ifdef CONFIG_MLX5_EN_TLS 204 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_decrypted_packets) }, 205 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_decrypted_bytes) }, 206 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_req_pkt) }, 207 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_req_start) }, 208 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_req_end) }, 209 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_req_skip) }, 210 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_res_ok) }, 211 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_res_retry) }, 212 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_res_skip) }, 213 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_err) }, 214#endif 215 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_events) }, 216 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_poll) }, 217 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_arm) }, 218 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_aff_change) }, 219 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_force_irq) }, 220 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_eq_rearm) }, 221 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_packets) }, 222 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_bytes) }, 223 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_csum_complete) }, 224 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_csum_unnecessary) }, 225 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_csum_unnecessary_inner) }, 226 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_csum_none) }, 227 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_ecn_mark) }, 228 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_removed_vlan_packets) }, 229 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_xdp_drop) }, 230 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_xdp_redirect) }, 231 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_wqe_err) }, 232 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_mpwqe_filler_cqes) }, 233 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_mpwqe_filler_strides) }, 234 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_oversize_pkts_sw_drop) }, 235 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_buff_alloc_err) }, 236 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_cqe_compress_blks) }, 237 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_cqe_compress_pkts) }, 238 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_congst_umr) }, 239 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_arfs_err) }, 240 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_xmit) }, 241 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_mpwqe) }, 242 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_inlnw) }, 243 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_full) }, 244 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_err) }, 245 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_cqes) }, 246}; 247 248#define NUM_SW_COUNTERS ARRAY_SIZE(sw_stats_desc) 249 250static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(sw) 251{ 252 return NUM_SW_COUNTERS; 253} 254 255static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(sw) 256{ 257 int i; 258 259 for (i = 0; i < NUM_SW_COUNTERS; i++) 260 strcpy(data + (idx++) * ETH_GSTRING_LEN, sw_stats_desc[i].format); 261 return idx; 262} 263 264static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(sw) 265{ 266 int i; 267 268 for (i = 0; i < NUM_SW_COUNTERS; i++) 269 data[idx++] = MLX5E_READ_CTR64_CPU(&priv->stats.sw, sw_stats_desc, i); 270 return idx; 271} 272 273static void mlx5e_stats_grp_sw_update_stats_xdp_red(struct mlx5e_sw_stats *s, 274 struct mlx5e_xdpsq_stats *xdpsq_red_stats) 275{ 276 s->tx_xdp_xmit += xdpsq_red_stats->xmit; 277 s->tx_xdp_mpwqe += xdpsq_red_stats->mpwqe; 278 s->tx_xdp_inlnw += xdpsq_red_stats->inlnw; 279 s->tx_xdp_nops += xdpsq_red_stats->nops; 280 s->tx_xdp_full += xdpsq_red_stats->full; 281 s->tx_xdp_err += xdpsq_red_stats->err; 282 s->tx_xdp_cqes += xdpsq_red_stats->cqes; 283} 284 285static void mlx5e_stats_grp_sw_update_stats_xdpsq(struct mlx5e_sw_stats *s, 286 struct mlx5e_xdpsq_stats *xdpsq_stats) 287{ 288 s->rx_xdp_tx_xmit += xdpsq_stats->xmit; 289 s->rx_xdp_tx_mpwqe += xdpsq_stats->mpwqe; 290 s->rx_xdp_tx_inlnw += xdpsq_stats->inlnw; 291 s->rx_xdp_tx_nops += xdpsq_stats->nops; 292 s->rx_xdp_tx_full += xdpsq_stats->full; 293 s->rx_xdp_tx_err += xdpsq_stats->err; 294 s->rx_xdp_tx_cqe += xdpsq_stats->cqes; 295} 296 297static void mlx5e_stats_grp_sw_update_stats_xsksq(struct mlx5e_sw_stats *s, 298 struct mlx5e_xdpsq_stats *xsksq_stats) 299{ 300 s->tx_xsk_xmit += xsksq_stats->xmit; 301 s->tx_xsk_mpwqe += xsksq_stats->mpwqe; 302 s->tx_xsk_inlnw += xsksq_stats->inlnw; 303 s->tx_xsk_full += xsksq_stats->full; 304 s->tx_xsk_err += xsksq_stats->err; 305 s->tx_xsk_cqes += xsksq_stats->cqes; 306} 307 308static void mlx5e_stats_grp_sw_update_stats_xskrq(struct mlx5e_sw_stats *s, 309 struct mlx5e_rq_stats *xskrq_stats) 310{ 311 s->rx_xsk_packets += xskrq_stats->packets; 312 s->rx_xsk_bytes += xskrq_stats->bytes; 313 s->rx_xsk_csum_complete += xskrq_stats->csum_complete; 314 s->rx_xsk_csum_unnecessary += xskrq_stats->csum_unnecessary; 315 s->rx_xsk_csum_unnecessary_inner += xskrq_stats->csum_unnecessary_inner; 316 s->rx_xsk_csum_none += xskrq_stats->csum_none; 317 s->rx_xsk_ecn_mark += xskrq_stats->ecn_mark; 318 s->rx_xsk_removed_vlan_packets += xskrq_stats->removed_vlan_packets; 319 s->rx_xsk_xdp_drop += xskrq_stats->xdp_drop; 320 s->rx_xsk_xdp_redirect += xskrq_stats->xdp_redirect; 321 s->rx_xsk_wqe_err += xskrq_stats->wqe_err; 322 s->rx_xsk_mpwqe_filler_cqes += xskrq_stats->mpwqe_filler_cqes; 323 s->rx_xsk_mpwqe_filler_strides += xskrq_stats->mpwqe_filler_strides; 324 s->rx_xsk_oversize_pkts_sw_drop += xskrq_stats->oversize_pkts_sw_drop; 325 s->rx_xsk_buff_alloc_err += xskrq_stats->buff_alloc_err; 326 s->rx_xsk_cqe_compress_blks += xskrq_stats->cqe_compress_blks; 327 s->rx_xsk_cqe_compress_pkts += xskrq_stats->cqe_compress_pkts; 328 s->rx_xsk_congst_umr += xskrq_stats->congst_umr; 329 s->rx_xsk_arfs_err += xskrq_stats->arfs_err; 330} 331 332static void mlx5e_stats_grp_sw_update_stats_rq_stats(struct mlx5e_sw_stats *s, 333 struct mlx5e_rq_stats *rq_stats) 334{ 335 s->rx_packets += rq_stats->packets; 336 s->rx_bytes += rq_stats->bytes; 337 s->rx_lro_packets += rq_stats->lro_packets; 338 s->rx_lro_bytes += rq_stats->lro_bytes; 339 s->rx_gro_packets += rq_stats->gro_packets; 340 s->rx_gro_bytes += rq_stats->gro_bytes; 341 s->rx_gro_skbs += rq_stats->gro_skbs; 342 s->rx_gro_match_packets += rq_stats->gro_match_packets; 343 s->rx_gro_large_hds += rq_stats->gro_large_hds; 344 s->rx_ecn_mark += rq_stats->ecn_mark; 345 s->rx_removed_vlan_packets += rq_stats->removed_vlan_packets; 346 s->rx_csum_none += rq_stats->csum_none; 347 s->rx_csum_complete += rq_stats->csum_complete; 348 s->rx_csum_complete_tail += rq_stats->csum_complete_tail; 349 s->rx_csum_complete_tail_slow += rq_stats->csum_complete_tail_slow; 350 s->rx_csum_unnecessary += rq_stats->csum_unnecessary; 351 s->rx_csum_unnecessary_inner += rq_stats->csum_unnecessary_inner; 352 s->rx_xdp_drop += rq_stats->xdp_drop; 353 s->rx_xdp_redirect += rq_stats->xdp_redirect; 354 s->rx_wqe_err += rq_stats->wqe_err; 355 s->rx_mpwqe_filler_cqes += rq_stats->mpwqe_filler_cqes; 356 s->rx_mpwqe_filler_strides += rq_stats->mpwqe_filler_strides; 357 s->rx_oversize_pkts_sw_drop += rq_stats->oversize_pkts_sw_drop; 358 s->rx_buff_alloc_err += rq_stats->buff_alloc_err; 359 s->rx_cqe_compress_blks += rq_stats->cqe_compress_blks; 360 s->rx_cqe_compress_pkts += rq_stats->cqe_compress_pkts; 361 s->rx_cache_reuse += rq_stats->cache_reuse; 362 s->rx_cache_full += rq_stats->cache_full; 363 s->rx_cache_empty += rq_stats->cache_empty; 364 s->rx_cache_busy += rq_stats->cache_busy; 365 s->rx_cache_waive += rq_stats->cache_waive; 366 s->rx_congst_umr += rq_stats->congst_umr; 367 s->rx_arfs_err += rq_stats->arfs_err; 368 s->rx_recover += rq_stats->recover; 369#ifdef CONFIG_PAGE_POOL_STATS 370 s->rx_pp_alloc_fast += rq_stats->pp_alloc_fast; 371 s->rx_pp_alloc_slow += rq_stats->pp_alloc_slow; 372 s->rx_pp_alloc_empty += rq_stats->pp_alloc_empty; 373 s->rx_pp_alloc_refill += rq_stats->pp_alloc_refill; 374 s->rx_pp_alloc_waive += rq_stats->pp_alloc_waive; 375 s->rx_pp_alloc_slow_high_order += rq_stats->pp_alloc_slow_high_order; 376 s->rx_pp_recycle_cached += rq_stats->pp_recycle_cached; 377 s->rx_pp_recycle_cache_full += rq_stats->pp_recycle_cache_full; 378 s->rx_pp_recycle_ring += rq_stats->pp_recycle_ring; 379 s->rx_pp_recycle_ring_full += rq_stats->pp_recycle_ring_full; 380 s->rx_pp_recycle_released_ref += rq_stats->pp_recycle_released_ref; 381#endif 382#ifdef CONFIG_MLX5_EN_TLS 383 s->rx_tls_decrypted_packets += rq_stats->tls_decrypted_packets; 384 s->rx_tls_decrypted_bytes += rq_stats->tls_decrypted_bytes; 385 s->rx_tls_resync_req_pkt += rq_stats->tls_resync_req_pkt; 386 s->rx_tls_resync_req_start += rq_stats->tls_resync_req_start; 387 s->rx_tls_resync_req_end += rq_stats->tls_resync_req_end; 388 s->rx_tls_resync_req_skip += rq_stats->tls_resync_req_skip; 389 s->rx_tls_resync_res_ok += rq_stats->tls_resync_res_ok; 390 s->rx_tls_resync_res_retry += rq_stats->tls_resync_res_retry; 391 s->rx_tls_resync_res_skip += rq_stats->tls_resync_res_skip; 392 s->rx_tls_err += rq_stats->tls_err; 393#endif 394} 395 396static void mlx5e_stats_grp_sw_update_stats_ch_stats(struct mlx5e_sw_stats *s, 397 struct mlx5e_ch_stats *ch_stats) 398{ 399 s->ch_events += ch_stats->events; 400 s->ch_poll += ch_stats->poll; 401 s->ch_arm += ch_stats->arm; 402 s->ch_aff_change += ch_stats->aff_change; 403 s->ch_force_irq += ch_stats->force_irq; 404 s->ch_eq_rearm += ch_stats->eq_rearm; 405} 406 407static void mlx5e_stats_grp_sw_update_stats_sq(struct mlx5e_sw_stats *s, 408 struct mlx5e_sq_stats *sq_stats) 409{ 410 s->tx_packets += sq_stats->packets; 411 s->tx_bytes += sq_stats->bytes; 412 s->tx_tso_packets += sq_stats->tso_packets; 413 s->tx_tso_bytes += sq_stats->tso_bytes; 414 s->tx_tso_inner_packets += sq_stats->tso_inner_packets; 415 s->tx_tso_inner_bytes += sq_stats->tso_inner_bytes; 416 s->tx_added_vlan_packets += sq_stats->added_vlan_packets; 417 s->tx_nop += sq_stats->nop; 418 s->tx_mpwqe_blks += sq_stats->mpwqe_blks; 419 s->tx_mpwqe_pkts += sq_stats->mpwqe_pkts; 420 s->tx_queue_stopped += sq_stats->stopped; 421 s->tx_queue_wake += sq_stats->wake; 422 s->tx_queue_dropped += sq_stats->dropped; 423 s->tx_cqe_err += sq_stats->cqe_err; 424 s->tx_recover += sq_stats->recover; 425 s->tx_xmit_more += sq_stats->xmit_more; 426 s->tx_csum_partial_inner += sq_stats->csum_partial_inner; 427 s->tx_csum_none += sq_stats->csum_none; 428 s->tx_csum_partial += sq_stats->csum_partial; 429#ifdef CONFIG_MLX5_EN_TLS 430 s->tx_tls_encrypted_packets += sq_stats->tls_encrypted_packets; 431 s->tx_tls_encrypted_bytes += sq_stats->tls_encrypted_bytes; 432 s->tx_tls_ooo += sq_stats->tls_ooo; 433 s->tx_tls_dump_bytes += sq_stats->tls_dump_bytes; 434 s->tx_tls_dump_packets += sq_stats->tls_dump_packets; 435 s->tx_tls_resync_bytes += sq_stats->tls_resync_bytes; 436 s->tx_tls_skip_no_sync_data += sq_stats->tls_skip_no_sync_data; 437 s->tx_tls_drop_no_sync_data += sq_stats->tls_drop_no_sync_data; 438 s->tx_tls_drop_bypass_req += sq_stats->tls_drop_bypass_req; 439#endif 440 s->tx_cqes += sq_stats->cqes; 441} 442 443static void mlx5e_stats_grp_sw_update_stats_ptp(struct mlx5e_priv *priv, 444 struct mlx5e_sw_stats *s) 445{ 446 int i; 447 448 if (!priv->tx_ptp_opened && !priv->rx_ptp_opened) 449 return; 450 451 mlx5e_stats_grp_sw_update_stats_ch_stats(s, &priv->ptp_stats.ch); 452 453 if (priv->tx_ptp_opened) { 454 for (i = 0; i < priv->max_opened_tc; i++) { 455 mlx5e_stats_grp_sw_update_stats_sq(s, &priv->ptp_stats.sq[i]); 456 457 /* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=92657 */ 458 barrier(); 459 } 460 } 461 if (priv->rx_ptp_opened) { 462 mlx5e_stats_grp_sw_update_stats_rq_stats(s, &priv->ptp_stats.rq); 463 464 /* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=92657 */ 465 barrier(); 466 } 467} 468 469static void mlx5e_stats_grp_sw_update_stats_qos(struct mlx5e_priv *priv, 470 struct mlx5e_sw_stats *s) 471{ 472 struct mlx5e_sq_stats **stats; 473 u16 max_qos_sqs; 474 int i; 475 476 /* Pairs with smp_store_release in mlx5e_open_qos_sq. */ 477 max_qos_sqs = smp_load_acquire(&priv->htb.max_qos_sqs); 478 stats = READ_ONCE(priv->htb.qos_sq_stats); 479 480 for (i = 0; i < max_qos_sqs; i++) { 481 mlx5e_stats_grp_sw_update_stats_sq(s, READ_ONCE(stats[i])); 482 483 /* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=92657 */ 484 barrier(); 485 } 486} 487 488#ifdef CONFIG_PAGE_POOL_STATS 489static void mlx5e_stats_update_stats_rq_page_pool(struct mlx5e_channel *c) 490{ 491 struct mlx5e_rq_stats *rq_stats = c->rq.stats; 492 struct page_pool *pool = c->rq.page_pool; 493 struct page_pool_stats stats = { 0 }; 494 495 if (!page_pool_get_stats(pool, &stats)) 496 return; 497 498 rq_stats->pp_alloc_fast = stats.alloc_stats.fast; 499 rq_stats->pp_alloc_slow = stats.alloc_stats.slow; 500 rq_stats->pp_alloc_slow_high_order = stats.alloc_stats.slow_high_order; 501 rq_stats->pp_alloc_empty = stats.alloc_stats.empty; 502 rq_stats->pp_alloc_waive = stats.alloc_stats.waive; 503 rq_stats->pp_alloc_refill = stats.alloc_stats.refill; 504 505 rq_stats->pp_recycle_cached = stats.recycle_stats.cached; 506 rq_stats->pp_recycle_cache_full = stats.recycle_stats.cache_full; 507 rq_stats->pp_recycle_ring = stats.recycle_stats.ring; 508 rq_stats->pp_recycle_ring_full = stats.recycle_stats.ring_full; 509 rq_stats->pp_recycle_released_ref = stats.recycle_stats.released_refcnt; 510} 511#else 512static void mlx5e_stats_update_stats_rq_page_pool(struct mlx5e_channel *c) 513{ 514} 515#endif 516 517static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(sw) 518{ 519 struct mlx5e_sw_stats *s = &priv->stats.sw; 520 int i; 521 522 memset(s, 0, sizeof(*s)); 523 524 for (i = 0; i < priv->channels.num; i++) /* for active channels only */ 525 mlx5e_stats_update_stats_rq_page_pool(priv->channels.c[i]); 526 527 for (i = 0; i < priv->stats_nch; i++) { 528 struct mlx5e_channel_stats *channel_stats = 529 priv->channel_stats[i]; 530 531 int j; 532 533 mlx5e_stats_grp_sw_update_stats_rq_stats(s, &channel_stats->rq); 534 mlx5e_stats_grp_sw_update_stats_xdpsq(s, &channel_stats->rq_xdpsq); 535 mlx5e_stats_grp_sw_update_stats_ch_stats(s, &channel_stats->ch); 536 /* xdp redirect */ 537 mlx5e_stats_grp_sw_update_stats_xdp_red(s, &channel_stats->xdpsq); 538 /* AF_XDP zero-copy */ 539 mlx5e_stats_grp_sw_update_stats_xskrq(s, &channel_stats->xskrq); 540 mlx5e_stats_grp_sw_update_stats_xsksq(s, &channel_stats->xsksq); 541 542 for (j = 0; j < priv->max_opened_tc; j++) { 543 mlx5e_stats_grp_sw_update_stats_sq(s, &channel_stats->sq[j]); 544 545 /* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=92657 */ 546 barrier(); 547 } 548 } 549 mlx5e_stats_grp_sw_update_stats_ptp(priv, s); 550 mlx5e_stats_grp_sw_update_stats_qos(priv, s); 551} 552 553static const struct counter_desc q_stats_desc[] = { 554 { MLX5E_DECLARE_STAT(struct mlx5e_qcounter_stats, rx_out_of_buffer) }, 555}; 556 557static const struct counter_desc drop_rq_stats_desc[] = { 558 { MLX5E_DECLARE_STAT(struct mlx5e_qcounter_stats, rx_if_down_packets) }, 559}; 560 561#define NUM_Q_COUNTERS ARRAY_SIZE(q_stats_desc) 562#define NUM_DROP_RQ_COUNTERS ARRAY_SIZE(drop_rq_stats_desc) 563 564static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(qcnt) 565{ 566 int num_stats = 0; 567 568 if (priv->q_counter) 569 num_stats += NUM_Q_COUNTERS; 570 571 if (priv->drop_rq_q_counter) 572 num_stats += NUM_DROP_RQ_COUNTERS; 573 574 return num_stats; 575} 576 577static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(qcnt) 578{ 579 int i; 580 581 for (i = 0; i < NUM_Q_COUNTERS && priv->q_counter; i++) 582 strcpy(data + (idx++) * ETH_GSTRING_LEN, 583 q_stats_desc[i].format); 584 585 for (i = 0; i < NUM_DROP_RQ_COUNTERS && priv->drop_rq_q_counter; i++) 586 strcpy(data + (idx++) * ETH_GSTRING_LEN, 587 drop_rq_stats_desc[i].format); 588 589 return idx; 590} 591 592static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(qcnt) 593{ 594 int i; 595 596 for (i = 0; i < NUM_Q_COUNTERS && priv->q_counter; i++) 597 data[idx++] = MLX5E_READ_CTR32_CPU(&priv->stats.qcnt, 598 q_stats_desc, i); 599 for (i = 0; i < NUM_DROP_RQ_COUNTERS && priv->drop_rq_q_counter; i++) 600 data[idx++] = MLX5E_READ_CTR32_CPU(&priv->stats.qcnt, 601 drop_rq_stats_desc, i); 602 return idx; 603} 604 605static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(qcnt) 606{ 607 struct mlx5e_qcounter_stats *qcnt = &priv->stats.qcnt; 608 u32 out[MLX5_ST_SZ_DW(query_q_counter_out)] = {}; 609 u32 in[MLX5_ST_SZ_DW(query_q_counter_in)] = {}; 610 int ret; 611 612 MLX5_SET(query_q_counter_in, in, opcode, MLX5_CMD_OP_QUERY_Q_COUNTER); 613 614 if (priv->q_counter) { 615 MLX5_SET(query_q_counter_in, in, counter_set_id, 616 priv->q_counter); 617 ret = mlx5_cmd_exec_inout(priv->mdev, query_q_counter, in, out); 618 if (!ret) 619 qcnt->rx_out_of_buffer = MLX5_GET(query_q_counter_out, 620 out, out_of_buffer); 621 } 622 623 if (priv->drop_rq_q_counter) { 624 MLX5_SET(query_q_counter_in, in, counter_set_id, 625 priv->drop_rq_q_counter); 626 ret = mlx5_cmd_exec_inout(priv->mdev, query_q_counter, in, out); 627 if (!ret) 628 qcnt->rx_if_down_packets = MLX5_GET(query_q_counter_out, 629 out, out_of_buffer); 630 } 631} 632 633#define VNIC_ENV_OFF(c) MLX5_BYTE_OFF(query_vnic_env_out, c) 634static const struct counter_desc vnic_env_stats_steer_desc[] = { 635 { "rx_steer_missed_packets", 636 VNIC_ENV_OFF(vport_env.nic_receive_steering_discard) }, 637}; 638 639static const struct counter_desc vnic_env_stats_dev_oob_desc[] = { 640 { "dev_internal_queue_oob", 641 VNIC_ENV_OFF(vport_env.internal_rq_out_of_buffer) }, 642}; 643 644#define NUM_VNIC_ENV_STEER_COUNTERS(dev) \ 645 (MLX5_CAP_GEN(dev, nic_receive_steering_discard) ? \ 646 ARRAY_SIZE(vnic_env_stats_steer_desc) : 0) 647#define NUM_VNIC_ENV_DEV_OOB_COUNTERS(dev) \ 648 (MLX5_CAP_GEN(dev, vnic_env_int_rq_oob) ? \ 649 ARRAY_SIZE(vnic_env_stats_dev_oob_desc) : 0) 650 651static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(vnic_env) 652{ 653 return NUM_VNIC_ENV_STEER_COUNTERS(priv->mdev) + 654 NUM_VNIC_ENV_DEV_OOB_COUNTERS(priv->mdev); 655} 656 657static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(vnic_env) 658{ 659 int i; 660 661 for (i = 0; i < NUM_VNIC_ENV_STEER_COUNTERS(priv->mdev); i++) 662 strcpy(data + (idx++) * ETH_GSTRING_LEN, 663 vnic_env_stats_steer_desc[i].format); 664 665 for (i = 0; i < NUM_VNIC_ENV_DEV_OOB_COUNTERS(priv->mdev); i++) 666 strcpy(data + (idx++) * ETH_GSTRING_LEN, 667 vnic_env_stats_dev_oob_desc[i].format); 668 return idx; 669} 670 671static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(vnic_env) 672{ 673 int i; 674 675 for (i = 0; i < NUM_VNIC_ENV_STEER_COUNTERS(priv->mdev); i++) 676 data[idx++] = MLX5E_READ_CTR64_BE(priv->stats.vnic.query_vnic_env_out, 677 vnic_env_stats_steer_desc, i); 678 679 for (i = 0; i < NUM_VNIC_ENV_DEV_OOB_COUNTERS(priv->mdev); i++) 680 data[idx++] = MLX5E_READ_CTR32_BE(priv->stats.vnic.query_vnic_env_out, 681 vnic_env_stats_dev_oob_desc, i); 682 return idx; 683} 684 685static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(vnic_env) 686{ 687 u32 *out = (u32 *)priv->stats.vnic.query_vnic_env_out; 688 u32 in[MLX5_ST_SZ_DW(query_vnic_env_in)] = {}; 689 struct mlx5_core_dev *mdev = priv->mdev; 690 691 if (!MLX5_CAP_GEN(priv->mdev, nic_receive_steering_discard)) 692 return; 693 694 MLX5_SET(query_vnic_env_in, in, opcode, MLX5_CMD_OP_QUERY_VNIC_ENV); 695 mlx5_cmd_exec_inout(mdev, query_vnic_env, in, out); 696} 697 698#define VPORT_COUNTER_OFF(c) MLX5_BYTE_OFF(query_vport_counter_out, c) 699static const struct counter_desc vport_stats_desc[] = { 700 { "rx_vport_unicast_packets", 701 VPORT_COUNTER_OFF(received_eth_unicast.packets) }, 702 { "rx_vport_unicast_bytes", 703 VPORT_COUNTER_OFF(received_eth_unicast.octets) }, 704 { "tx_vport_unicast_packets", 705 VPORT_COUNTER_OFF(transmitted_eth_unicast.packets) }, 706 { "tx_vport_unicast_bytes", 707 VPORT_COUNTER_OFF(transmitted_eth_unicast.octets) }, 708 { "rx_vport_multicast_packets", 709 VPORT_COUNTER_OFF(received_eth_multicast.packets) }, 710 { "rx_vport_multicast_bytes", 711 VPORT_COUNTER_OFF(received_eth_multicast.octets) }, 712 { "tx_vport_multicast_packets", 713 VPORT_COUNTER_OFF(transmitted_eth_multicast.packets) }, 714 { "tx_vport_multicast_bytes", 715 VPORT_COUNTER_OFF(transmitted_eth_multicast.octets) }, 716 { "rx_vport_broadcast_packets", 717 VPORT_COUNTER_OFF(received_eth_broadcast.packets) }, 718 { "rx_vport_broadcast_bytes", 719 VPORT_COUNTER_OFF(received_eth_broadcast.octets) }, 720 { "tx_vport_broadcast_packets", 721 VPORT_COUNTER_OFF(transmitted_eth_broadcast.packets) }, 722 { "tx_vport_broadcast_bytes", 723 VPORT_COUNTER_OFF(transmitted_eth_broadcast.octets) }, 724 { "rx_vport_rdma_unicast_packets", 725 VPORT_COUNTER_OFF(received_ib_unicast.packets) }, 726 { "rx_vport_rdma_unicast_bytes", 727 VPORT_COUNTER_OFF(received_ib_unicast.octets) }, 728 { "tx_vport_rdma_unicast_packets", 729 VPORT_COUNTER_OFF(transmitted_ib_unicast.packets) }, 730 { "tx_vport_rdma_unicast_bytes", 731 VPORT_COUNTER_OFF(transmitted_ib_unicast.octets) }, 732 { "rx_vport_rdma_multicast_packets", 733 VPORT_COUNTER_OFF(received_ib_multicast.packets) }, 734 { "rx_vport_rdma_multicast_bytes", 735 VPORT_COUNTER_OFF(received_ib_multicast.octets) }, 736 { "tx_vport_rdma_multicast_packets", 737 VPORT_COUNTER_OFF(transmitted_ib_multicast.packets) }, 738 { "tx_vport_rdma_multicast_bytes", 739 VPORT_COUNTER_OFF(transmitted_ib_multicast.octets) }, 740}; 741 742#define NUM_VPORT_COUNTERS ARRAY_SIZE(vport_stats_desc) 743 744static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(vport) 745{ 746 return NUM_VPORT_COUNTERS; 747} 748 749static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(vport) 750{ 751 int i; 752 753 for (i = 0; i < NUM_VPORT_COUNTERS; i++) 754 strcpy(data + (idx++) * ETH_GSTRING_LEN, vport_stats_desc[i].format); 755 return idx; 756} 757 758static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(vport) 759{ 760 int i; 761 762 for (i = 0; i < NUM_VPORT_COUNTERS; i++) 763 data[idx++] = MLX5E_READ_CTR64_BE(priv->stats.vport.query_vport_out, 764 vport_stats_desc, i); 765 return idx; 766} 767 768static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(vport) 769{ 770 u32 *out = (u32 *)priv->stats.vport.query_vport_out; 771 u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)] = {}; 772 struct mlx5_core_dev *mdev = priv->mdev; 773 774 MLX5_SET(query_vport_counter_in, in, opcode, MLX5_CMD_OP_QUERY_VPORT_COUNTER); 775 mlx5_cmd_exec_inout(mdev, query_vport_counter, in, out); 776} 777 778#define PPORT_802_3_OFF(c) \ 779 MLX5_BYTE_OFF(ppcnt_reg, \ 780 counter_set.eth_802_3_cntrs_grp_data_layout.c##_high) 781static const struct counter_desc pport_802_3_stats_desc[] = { 782 { "tx_packets_phy", PPORT_802_3_OFF(a_frames_transmitted_ok) }, 783 { "rx_packets_phy", PPORT_802_3_OFF(a_frames_received_ok) }, 784 { "rx_crc_errors_phy", PPORT_802_3_OFF(a_frame_check_sequence_errors) }, 785 { "tx_bytes_phy", PPORT_802_3_OFF(a_octets_transmitted_ok) }, 786 { "rx_bytes_phy", PPORT_802_3_OFF(a_octets_received_ok) }, 787 { "tx_multicast_phy", PPORT_802_3_OFF(a_multicast_frames_xmitted_ok) }, 788 { "tx_broadcast_phy", PPORT_802_3_OFF(a_broadcast_frames_xmitted_ok) }, 789 { "rx_multicast_phy", PPORT_802_3_OFF(a_multicast_frames_received_ok) }, 790 { "rx_broadcast_phy", PPORT_802_3_OFF(a_broadcast_frames_received_ok) }, 791 { "rx_in_range_len_errors_phy", PPORT_802_3_OFF(a_in_range_length_errors) }, 792 { "rx_out_of_range_len_phy", PPORT_802_3_OFF(a_out_of_range_length_field) }, 793 { "rx_oversize_pkts_phy", PPORT_802_3_OFF(a_frame_too_long_errors) }, 794 { "rx_symbol_err_phy", PPORT_802_3_OFF(a_symbol_error_during_carrier) }, 795 { "tx_mac_control_phy", PPORT_802_3_OFF(a_mac_control_frames_transmitted) }, 796 { "rx_mac_control_phy", PPORT_802_3_OFF(a_mac_control_frames_received) }, 797 { "rx_unsupported_op_phy", PPORT_802_3_OFF(a_unsupported_opcodes_received) }, 798 { "rx_pause_ctrl_phy", PPORT_802_3_OFF(a_pause_mac_ctrl_frames_received) }, 799 { "tx_pause_ctrl_phy", PPORT_802_3_OFF(a_pause_mac_ctrl_frames_transmitted) }, 800}; 801 802#define NUM_PPORT_802_3_COUNTERS ARRAY_SIZE(pport_802_3_stats_desc) 803 804static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(802_3) 805{ 806 return NUM_PPORT_802_3_COUNTERS; 807} 808 809static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(802_3) 810{ 811 int i; 812 813 for (i = 0; i < NUM_PPORT_802_3_COUNTERS; i++) 814 strcpy(data + (idx++) * ETH_GSTRING_LEN, pport_802_3_stats_desc[i].format); 815 return idx; 816} 817 818static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(802_3) 819{ 820 int i; 821 822 for (i = 0; i < NUM_PPORT_802_3_COUNTERS; i++) 823 data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.IEEE_802_3_counters, 824 pport_802_3_stats_desc, i); 825 return idx; 826} 827 828#define MLX5_BASIC_PPCNT_SUPPORTED(mdev) \ 829 (MLX5_CAP_GEN(mdev, pcam_reg) ? MLX5_CAP_PCAM_REG(mdev, ppcnt) : 1) 830 831static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(802_3) 832{ 833 struct mlx5e_pport_stats *pstats = &priv->stats.pport; 834 struct mlx5_core_dev *mdev = priv->mdev; 835 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0}; 836 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg); 837 void *out; 838 839 if (!MLX5_BASIC_PPCNT_SUPPORTED(mdev)) 840 return; 841 842 MLX5_SET(ppcnt_reg, in, local_port, 1); 843 out = pstats->IEEE_802_3_counters; 844 MLX5_SET(ppcnt_reg, in, grp, MLX5_IEEE_802_3_COUNTERS_GROUP); 845 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0); 846} 847 848#define MLX5E_READ_CTR64_BE_F(ptr, set, c) \ 849 be64_to_cpu(*(__be64 *)((char *)ptr + \ 850 MLX5_BYTE_OFF(ppcnt_reg, \ 851 counter_set.set.c##_high))) 852 853static int mlx5e_stats_get_ieee(struct mlx5_core_dev *mdev, 854 u32 *ppcnt_ieee_802_3) 855{ 856 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {}; 857 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg); 858 859 if (!MLX5_BASIC_PPCNT_SUPPORTED(mdev)) 860 return -EOPNOTSUPP; 861 862 MLX5_SET(ppcnt_reg, in, local_port, 1); 863 MLX5_SET(ppcnt_reg, in, grp, MLX5_IEEE_802_3_COUNTERS_GROUP); 864 return mlx5_core_access_reg(mdev, in, sz, ppcnt_ieee_802_3, 865 sz, MLX5_REG_PPCNT, 0, 0); 866} 867 868void mlx5e_stats_pause_get(struct mlx5e_priv *priv, 869 struct ethtool_pause_stats *pause_stats) 870{ 871 u32 ppcnt_ieee_802_3[MLX5_ST_SZ_DW(ppcnt_reg)]; 872 struct mlx5_core_dev *mdev = priv->mdev; 873 874 if (mlx5e_stats_get_ieee(mdev, ppcnt_ieee_802_3)) 875 return; 876 877 pause_stats->tx_pause_frames = 878 MLX5E_READ_CTR64_BE_F(ppcnt_ieee_802_3, 879 eth_802_3_cntrs_grp_data_layout, 880 a_pause_mac_ctrl_frames_transmitted); 881 pause_stats->rx_pause_frames = 882 MLX5E_READ_CTR64_BE_F(ppcnt_ieee_802_3, 883 eth_802_3_cntrs_grp_data_layout, 884 a_pause_mac_ctrl_frames_received); 885} 886 887void mlx5e_stats_eth_phy_get(struct mlx5e_priv *priv, 888 struct ethtool_eth_phy_stats *phy_stats) 889{ 890 u32 ppcnt_ieee_802_3[MLX5_ST_SZ_DW(ppcnt_reg)]; 891 struct mlx5_core_dev *mdev = priv->mdev; 892 893 if (mlx5e_stats_get_ieee(mdev, ppcnt_ieee_802_3)) 894 return; 895 896 phy_stats->SymbolErrorDuringCarrier = 897 MLX5E_READ_CTR64_BE_F(ppcnt_ieee_802_3, 898 eth_802_3_cntrs_grp_data_layout, 899 a_symbol_error_during_carrier); 900} 901 902void mlx5e_stats_eth_mac_get(struct mlx5e_priv *priv, 903 struct ethtool_eth_mac_stats *mac_stats) 904{ 905 u32 ppcnt_ieee_802_3[MLX5_ST_SZ_DW(ppcnt_reg)]; 906 struct mlx5_core_dev *mdev = priv->mdev; 907 908 if (mlx5e_stats_get_ieee(mdev, ppcnt_ieee_802_3)) 909 return; 910 911#define RD(name) \ 912 MLX5E_READ_CTR64_BE_F(ppcnt_ieee_802_3, \ 913 eth_802_3_cntrs_grp_data_layout, \ 914 name) 915 916 mac_stats->FramesTransmittedOK = RD(a_frames_transmitted_ok); 917 mac_stats->FramesReceivedOK = RD(a_frames_received_ok); 918 mac_stats->FrameCheckSequenceErrors = RD(a_frame_check_sequence_errors); 919 mac_stats->OctetsTransmittedOK = RD(a_octets_transmitted_ok); 920 mac_stats->OctetsReceivedOK = RD(a_octets_received_ok); 921 mac_stats->MulticastFramesXmittedOK = RD(a_multicast_frames_xmitted_ok); 922 mac_stats->BroadcastFramesXmittedOK = RD(a_broadcast_frames_xmitted_ok); 923 mac_stats->MulticastFramesReceivedOK = RD(a_multicast_frames_received_ok); 924 mac_stats->BroadcastFramesReceivedOK = RD(a_broadcast_frames_received_ok); 925 mac_stats->InRangeLengthErrors = RD(a_in_range_length_errors); 926 mac_stats->OutOfRangeLengthField = RD(a_out_of_range_length_field); 927 mac_stats->FrameTooLongErrors = RD(a_frame_too_long_errors); 928#undef RD 929} 930 931void mlx5e_stats_eth_ctrl_get(struct mlx5e_priv *priv, 932 struct ethtool_eth_ctrl_stats *ctrl_stats) 933{ 934 u32 ppcnt_ieee_802_3[MLX5_ST_SZ_DW(ppcnt_reg)]; 935 struct mlx5_core_dev *mdev = priv->mdev; 936 937 if (mlx5e_stats_get_ieee(mdev, ppcnt_ieee_802_3)) 938 return; 939 940 ctrl_stats->MACControlFramesTransmitted = 941 MLX5E_READ_CTR64_BE_F(ppcnt_ieee_802_3, 942 eth_802_3_cntrs_grp_data_layout, 943 a_mac_control_frames_transmitted); 944 ctrl_stats->MACControlFramesReceived = 945 MLX5E_READ_CTR64_BE_F(ppcnt_ieee_802_3, 946 eth_802_3_cntrs_grp_data_layout, 947 a_mac_control_frames_received); 948 ctrl_stats->UnsupportedOpcodesReceived = 949 MLX5E_READ_CTR64_BE_F(ppcnt_ieee_802_3, 950 eth_802_3_cntrs_grp_data_layout, 951 a_unsupported_opcodes_received); 952} 953 954#define PPORT_2863_OFF(c) \ 955 MLX5_BYTE_OFF(ppcnt_reg, \ 956 counter_set.eth_2863_cntrs_grp_data_layout.c##_high) 957static const struct counter_desc pport_2863_stats_desc[] = { 958 { "rx_discards_phy", PPORT_2863_OFF(if_in_discards) }, 959 { "tx_discards_phy", PPORT_2863_OFF(if_out_discards) }, 960 { "tx_errors_phy", PPORT_2863_OFF(if_out_errors) }, 961}; 962 963#define NUM_PPORT_2863_COUNTERS ARRAY_SIZE(pport_2863_stats_desc) 964 965static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(2863) 966{ 967 return NUM_PPORT_2863_COUNTERS; 968} 969 970static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(2863) 971{ 972 int i; 973 974 for (i = 0; i < NUM_PPORT_2863_COUNTERS; i++) 975 strcpy(data + (idx++) * ETH_GSTRING_LEN, pport_2863_stats_desc[i].format); 976 return idx; 977} 978 979static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(2863) 980{ 981 int i; 982 983 for (i = 0; i < NUM_PPORT_2863_COUNTERS; i++) 984 data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.RFC_2863_counters, 985 pport_2863_stats_desc, i); 986 return idx; 987} 988 989static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(2863) 990{ 991 struct mlx5e_pport_stats *pstats = &priv->stats.pport; 992 struct mlx5_core_dev *mdev = priv->mdev; 993 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0}; 994 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg); 995 void *out; 996 997 MLX5_SET(ppcnt_reg, in, local_port, 1); 998 out = pstats->RFC_2863_counters; 999 MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2863_COUNTERS_GROUP); 1000 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0); 1001} 1002 1003#define PPORT_2819_OFF(c) \ 1004 MLX5_BYTE_OFF(ppcnt_reg, \ 1005 counter_set.eth_2819_cntrs_grp_data_layout.c##_high) 1006static const struct counter_desc pport_2819_stats_desc[] = { 1007 { "rx_undersize_pkts_phy", PPORT_2819_OFF(ether_stats_undersize_pkts) }, 1008 { "rx_fragments_phy", PPORT_2819_OFF(ether_stats_fragments) }, 1009 { "rx_jabbers_phy", PPORT_2819_OFF(ether_stats_jabbers) }, 1010 { "rx_64_bytes_phy", PPORT_2819_OFF(ether_stats_pkts64octets) }, 1011 { "rx_65_to_127_bytes_phy", PPORT_2819_OFF(ether_stats_pkts65to127octets) }, 1012 { "rx_128_to_255_bytes_phy", PPORT_2819_OFF(ether_stats_pkts128to255octets) }, 1013 { "rx_256_to_511_bytes_phy", PPORT_2819_OFF(ether_stats_pkts256to511octets) }, 1014 { "rx_512_to_1023_bytes_phy", PPORT_2819_OFF(ether_stats_pkts512to1023octets) }, 1015 { "rx_1024_to_1518_bytes_phy", PPORT_2819_OFF(ether_stats_pkts1024to1518octets) }, 1016 { "rx_1519_to_2047_bytes_phy", PPORT_2819_OFF(ether_stats_pkts1519to2047octets) }, 1017 { "rx_2048_to_4095_bytes_phy", PPORT_2819_OFF(ether_stats_pkts2048to4095octets) }, 1018 { "rx_4096_to_8191_bytes_phy", PPORT_2819_OFF(ether_stats_pkts4096to8191octets) }, 1019 { "rx_8192_to_10239_bytes_phy", PPORT_2819_OFF(ether_stats_pkts8192to10239octets) }, 1020}; 1021 1022#define NUM_PPORT_2819_COUNTERS ARRAY_SIZE(pport_2819_stats_desc) 1023 1024static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(2819) 1025{ 1026 return NUM_PPORT_2819_COUNTERS; 1027} 1028 1029static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(2819) 1030{ 1031 int i; 1032 1033 for (i = 0; i < NUM_PPORT_2819_COUNTERS; i++) 1034 strcpy(data + (idx++) * ETH_GSTRING_LEN, pport_2819_stats_desc[i].format); 1035 return idx; 1036} 1037 1038static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(2819) 1039{ 1040 int i; 1041 1042 for (i = 0; i < NUM_PPORT_2819_COUNTERS; i++) 1043 data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.RFC_2819_counters, 1044 pport_2819_stats_desc, i); 1045 return idx; 1046} 1047 1048static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(2819) 1049{ 1050 struct mlx5e_pport_stats *pstats = &priv->stats.pport; 1051 struct mlx5_core_dev *mdev = priv->mdev; 1052 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0}; 1053 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg); 1054 void *out; 1055 1056 if (!MLX5_BASIC_PPCNT_SUPPORTED(mdev)) 1057 return; 1058 1059 MLX5_SET(ppcnt_reg, in, local_port, 1); 1060 out = pstats->RFC_2819_counters; 1061 MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2819_COUNTERS_GROUP); 1062 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0); 1063} 1064 1065static const struct ethtool_rmon_hist_range mlx5e_rmon_ranges[] = { 1066 { 0, 64 }, 1067 { 65, 127 }, 1068 { 128, 255 }, 1069 { 256, 511 }, 1070 { 512, 1023 }, 1071 { 1024, 1518 }, 1072 { 1519, 2047 }, 1073 { 2048, 4095 }, 1074 { 4096, 8191 }, 1075 { 8192, 10239 }, 1076 {} 1077}; 1078 1079void mlx5e_stats_rmon_get(struct mlx5e_priv *priv, 1080 struct ethtool_rmon_stats *rmon, 1081 const struct ethtool_rmon_hist_range **ranges) 1082{ 1083 u32 ppcnt_RFC_2819_counters[MLX5_ST_SZ_DW(ppcnt_reg)]; 1084 struct mlx5_core_dev *mdev = priv->mdev; 1085 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0}; 1086 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg); 1087 1088 MLX5_SET(ppcnt_reg, in, local_port, 1); 1089 MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2819_COUNTERS_GROUP); 1090 if (mlx5_core_access_reg(mdev, in, sz, ppcnt_RFC_2819_counters, 1091 sz, MLX5_REG_PPCNT, 0, 0)) 1092 return; 1093 1094#define RD(name) \ 1095 MLX5E_READ_CTR64_BE_F(ppcnt_RFC_2819_counters, \ 1096 eth_2819_cntrs_grp_data_layout, \ 1097 name) 1098 1099 rmon->undersize_pkts = RD(ether_stats_undersize_pkts); 1100 rmon->fragments = RD(ether_stats_fragments); 1101 rmon->jabbers = RD(ether_stats_jabbers); 1102 1103 rmon->hist[0] = RD(ether_stats_pkts64octets); 1104 rmon->hist[1] = RD(ether_stats_pkts65to127octets); 1105 rmon->hist[2] = RD(ether_stats_pkts128to255octets); 1106 rmon->hist[3] = RD(ether_stats_pkts256to511octets); 1107 rmon->hist[4] = RD(ether_stats_pkts512to1023octets); 1108 rmon->hist[5] = RD(ether_stats_pkts1024to1518octets); 1109 rmon->hist[6] = RD(ether_stats_pkts1519to2047octets); 1110 rmon->hist[7] = RD(ether_stats_pkts2048to4095octets); 1111 rmon->hist[8] = RD(ether_stats_pkts4096to8191octets); 1112 rmon->hist[9] = RD(ether_stats_pkts8192to10239octets); 1113#undef RD 1114 1115 *ranges = mlx5e_rmon_ranges; 1116} 1117 1118#define PPORT_PHY_STATISTICAL_OFF(c) \ 1119 MLX5_BYTE_OFF(ppcnt_reg, \ 1120 counter_set.phys_layer_statistical_cntrs.c##_high) 1121static const struct counter_desc pport_phy_statistical_stats_desc[] = { 1122 { "rx_pcs_symbol_err_phy", PPORT_PHY_STATISTICAL_OFF(phy_symbol_errors) }, 1123 { "rx_corrected_bits_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits) }, 1124}; 1125 1126static const struct counter_desc 1127pport_phy_statistical_err_lanes_stats_desc[] = { 1128 { "rx_err_lane_0_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits_lane0) }, 1129 { "rx_err_lane_1_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits_lane1) }, 1130 { "rx_err_lane_2_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits_lane2) }, 1131 { "rx_err_lane_3_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits_lane3) }, 1132}; 1133 1134#define NUM_PPORT_PHY_STATISTICAL_COUNTERS \ 1135 ARRAY_SIZE(pport_phy_statistical_stats_desc) 1136#define NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS \ 1137 ARRAY_SIZE(pport_phy_statistical_err_lanes_stats_desc) 1138 1139static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(phy) 1140{ 1141 struct mlx5_core_dev *mdev = priv->mdev; 1142 int num_stats; 1143 1144 /* "1" for link_down_events special counter */ 1145 num_stats = 1; 1146 1147 num_stats += MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group) ? 1148 NUM_PPORT_PHY_STATISTICAL_COUNTERS : 0; 1149 1150 num_stats += MLX5_CAP_PCAM_FEATURE(mdev, per_lane_error_counters) ? 1151 NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS : 0; 1152 1153 return num_stats; 1154} 1155 1156static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(phy) 1157{ 1158 struct mlx5_core_dev *mdev = priv->mdev; 1159 int i; 1160 1161 strcpy(data + (idx++) * ETH_GSTRING_LEN, "link_down_events_phy"); 1162 1163 if (!MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group)) 1164 return idx; 1165 1166 for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_COUNTERS; i++) 1167 strcpy(data + (idx++) * ETH_GSTRING_LEN, 1168 pport_phy_statistical_stats_desc[i].format); 1169 1170 if (MLX5_CAP_PCAM_FEATURE(mdev, per_lane_error_counters)) 1171 for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS; i++) 1172 strcpy(data + (idx++) * ETH_GSTRING_LEN, 1173 pport_phy_statistical_err_lanes_stats_desc[i].format); 1174 1175 return idx; 1176} 1177 1178static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(phy) 1179{ 1180 struct mlx5_core_dev *mdev = priv->mdev; 1181 int i; 1182 1183 /* link_down_events_phy has special handling since it is not stored in __be64 format */ 1184 data[idx++] = MLX5_GET(ppcnt_reg, priv->stats.pport.phy_counters, 1185 counter_set.phys_layer_cntrs.link_down_events); 1186 1187 if (!MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group)) 1188 return idx; 1189 1190 for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_COUNTERS; i++) 1191 data[idx++] = 1192 MLX5E_READ_CTR64_BE(&priv->stats.pport.phy_statistical_counters, 1193 pport_phy_statistical_stats_desc, i); 1194 1195 if (MLX5_CAP_PCAM_FEATURE(mdev, per_lane_error_counters)) 1196 for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS; i++) 1197 data[idx++] = 1198 MLX5E_READ_CTR64_BE(&priv->stats.pport.phy_statistical_counters, 1199 pport_phy_statistical_err_lanes_stats_desc, 1200 i); 1201 return idx; 1202} 1203 1204static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(phy) 1205{ 1206 struct mlx5e_pport_stats *pstats = &priv->stats.pport; 1207 struct mlx5_core_dev *mdev = priv->mdev; 1208 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0}; 1209 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg); 1210 void *out; 1211 1212 MLX5_SET(ppcnt_reg, in, local_port, 1); 1213 out = pstats->phy_counters; 1214 MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_COUNTERS_GROUP); 1215 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0); 1216 1217 if (!MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group)) 1218 return; 1219 1220 out = pstats->phy_statistical_counters; 1221 MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_STATISTICAL_GROUP); 1222 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0); 1223} 1224 1225static int fec_num_lanes(struct mlx5_core_dev *dev) 1226{ 1227 u32 out[MLX5_ST_SZ_DW(pmlp_reg)] = {}; 1228 u32 in[MLX5_ST_SZ_DW(pmlp_reg)] = {}; 1229 int err; 1230 1231 MLX5_SET(pmlp_reg, in, local_port, 1); 1232 err = mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out), 1233 MLX5_REG_PMLP, 0, 0); 1234 if (err) 1235 return 0; 1236 1237 return MLX5_GET(pmlp_reg, out, width); 1238} 1239 1240static int fec_active_mode(struct mlx5_core_dev *mdev) 1241{ 1242 unsigned long fec_active_long; 1243 u32 fec_active; 1244 1245 if (mlx5e_get_fec_mode(mdev, &fec_active, NULL)) 1246 return MLX5E_FEC_NOFEC; 1247 1248 fec_active_long = fec_active; 1249 return find_first_bit(&fec_active_long, sizeof(unsigned long) * BITS_PER_BYTE); 1250} 1251 1252#define MLX5E_STATS_SET_FEC_BLOCK(idx) ({ \ 1253 fec_stats->corrected_blocks.lanes[(idx)] = \ 1254 MLX5E_READ_CTR64_BE_F(ppcnt, phys_layer_cntrs, \ 1255 fc_fec_corrected_blocks_lane##idx); \ 1256 fec_stats->uncorrectable_blocks.lanes[(idx)] = \ 1257 MLX5E_READ_CTR64_BE_F(ppcnt, phys_layer_cntrs, \ 1258 fc_fec_uncorrectable_blocks_lane##idx); \ 1259}) 1260 1261static void fec_set_fc_stats(struct ethtool_fec_stats *fec_stats, 1262 u32 *ppcnt, u8 lanes) 1263{ 1264 if (lanes > 3) { /* 4 lanes */ 1265 MLX5E_STATS_SET_FEC_BLOCK(3); 1266 MLX5E_STATS_SET_FEC_BLOCK(2); 1267 } 1268 if (lanes > 1) /* 2 lanes */ 1269 MLX5E_STATS_SET_FEC_BLOCK(1); 1270 if (lanes > 0) /* 1 lane */ 1271 MLX5E_STATS_SET_FEC_BLOCK(0); 1272} 1273 1274static void fec_set_rs_stats(struct ethtool_fec_stats *fec_stats, u32 *ppcnt) 1275{ 1276 fec_stats->corrected_blocks.total = 1277 MLX5E_READ_CTR64_BE_F(ppcnt, phys_layer_cntrs, 1278 rs_fec_corrected_blocks); 1279 fec_stats->uncorrectable_blocks.total = 1280 MLX5E_READ_CTR64_BE_F(ppcnt, phys_layer_cntrs, 1281 rs_fec_uncorrectable_blocks); 1282} 1283 1284static void fec_set_block_stats(struct mlx5e_priv *priv, 1285 struct ethtool_fec_stats *fec_stats) 1286{ 1287 struct mlx5_core_dev *mdev = priv->mdev; 1288 u32 out[MLX5_ST_SZ_DW(ppcnt_reg)] = {}; 1289 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {}; 1290 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg); 1291 int mode = fec_active_mode(mdev); 1292 1293 if (mode == MLX5E_FEC_NOFEC) 1294 return; 1295 1296 MLX5_SET(ppcnt_reg, in, local_port, 1); 1297 MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_COUNTERS_GROUP); 1298 if (mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0)) 1299 return; 1300 1301 switch (mode) { 1302 case MLX5E_FEC_RS_528_514: 1303 case MLX5E_FEC_RS_544_514: 1304 case MLX5E_FEC_LLRS_272_257_1: 1305 fec_set_rs_stats(fec_stats, out); 1306 return; 1307 case MLX5E_FEC_FIRECODE: 1308 fec_set_fc_stats(fec_stats, out, fec_num_lanes(mdev)); 1309 } 1310} 1311 1312static void fec_set_corrected_bits_total(struct mlx5e_priv *priv, 1313 struct ethtool_fec_stats *fec_stats) 1314{ 1315 u32 ppcnt_phy_statistical[MLX5_ST_SZ_DW(ppcnt_reg)]; 1316 struct mlx5_core_dev *mdev = priv->mdev; 1317 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {}; 1318 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg); 1319 1320 MLX5_SET(ppcnt_reg, in, local_port, 1); 1321 MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_STATISTICAL_GROUP); 1322 if (mlx5_core_access_reg(mdev, in, sz, ppcnt_phy_statistical, 1323 sz, MLX5_REG_PPCNT, 0, 0)) 1324 return; 1325 1326 fec_stats->corrected_bits.total = 1327 MLX5E_READ_CTR64_BE_F(ppcnt_phy_statistical, 1328 phys_layer_statistical_cntrs, 1329 phy_corrected_bits); 1330} 1331 1332void mlx5e_stats_fec_get(struct mlx5e_priv *priv, 1333 struct ethtool_fec_stats *fec_stats) 1334{ 1335 if (!MLX5_CAP_PCAM_FEATURE(priv->mdev, ppcnt_statistical_group)) 1336 return; 1337 1338 fec_set_corrected_bits_total(priv, fec_stats); 1339 fec_set_block_stats(priv, fec_stats); 1340} 1341 1342#define PPORT_ETH_EXT_OFF(c) \ 1343 MLX5_BYTE_OFF(ppcnt_reg, \ 1344 counter_set.eth_extended_cntrs_grp_data_layout.c##_high) 1345static const struct counter_desc pport_eth_ext_stats_desc[] = { 1346 { "rx_buffer_passed_thres_phy", PPORT_ETH_EXT_OFF(rx_buffer_almost_full) }, 1347}; 1348 1349#define NUM_PPORT_ETH_EXT_COUNTERS ARRAY_SIZE(pport_eth_ext_stats_desc) 1350 1351static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(eth_ext) 1352{ 1353 if (MLX5_CAP_PCAM_FEATURE((priv)->mdev, rx_buffer_fullness_counters)) 1354 return NUM_PPORT_ETH_EXT_COUNTERS; 1355 1356 return 0; 1357} 1358 1359static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(eth_ext) 1360{ 1361 int i; 1362 1363 if (MLX5_CAP_PCAM_FEATURE((priv)->mdev, rx_buffer_fullness_counters)) 1364 for (i = 0; i < NUM_PPORT_ETH_EXT_COUNTERS; i++) 1365 strcpy(data + (idx++) * ETH_GSTRING_LEN, 1366 pport_eth_ext_stats_desc[i].format); 1367 return idx; 1368} 1369 1370static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(eth_ext) 1371{ 1372 int i; 1373 1374 if (MLX5_CAP_PCAM_FEATURE((priv)->mdev, rx_buffer_fullness_counters)) 1375 for (i = 0; i < NUM_PPORT_ETH_EXT_COUNTERS; i++) 1376 data[idx++] = 1377 MLX5E_READ_CTR64_BE(&priv->stats.pport.eth_ext_counters, 1378 pport_eth_ext_stats_desc, i); 1379 return idx; 1380} 1381 1382static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(eth_ext) 1383{ 1384 struct mlx5e_pport_stats *pstats = &priv->stats.pport; 1385 struct mlx5_core_dev *mdev = priv->mdev; 1386 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0}; 1387 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg); 1388 void *out; 1389 1390 if (!MLX5_CAP_PCAM_FEATURE(mdev, rx_buffer_fullness_counters)) 1391 return; 1392 1393 MLX5_SET(ppcnt_reg, in, local_port, 1); 1394 out = pstats->eth_ext_counters; 1395 MLX5_SET(ppcnt_reg, in, grp, MLX5_ETHERNET_EXTENDED_COUNTERS_GROUP); 1396 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0); 1397} 1398 1399#define PCIE_PERF_OFF(c) \ 1400 MLX5_BYTE_OFF(mpcnt_reg, counter_set.pcie_perf_cntrs_grp_data_layout.c) 1401static const struct counter_desc pcie_perf_stats_desc[] = { 1402 { "rx_pci_signal_integrity", PCIE_PERF_OFF(rx_errors) }, 1403 { "tx_pci_signal_integrity", PCIE_PERF_OFF(tx_errors) }, 1404}; 1405 1406#define PCIE_PERF_OFF64(c) \ 1407 MLX5_BYTE_OFF(mpcnt_reg, counter_set.pcie_perf_cntrs_grp_data_layout.c##_high) 1408static const struct counter_desc pcie_perf_stats_desc64[] = { 1409 { "outbound_pci_buffer_overflow", PCIE_PERF_OFF64(tx_overflow_buffer_pkt) }, 1410}; 1411 1412static const struct counter_desc pcie_perf_stall_stats_desc[] = { 1413 { "outbound_pci_stalled_rd", PCIE_PERF_OFF(outbound_stalled_reads) }, 1414 { "outbound_pci_stalled_wr", PCIE_PERF_OFF(outbound_stalled_writes) }, 1415 { "outbound_pci_stalled_rd_events", PCIE_PERF_OFF(outbound_stalled_reads_events) }, 1416 { "outbound_pci_stalled_wr_events", PCIE_PERF_OFF(outbound_stalled_writes_events) }, 1417}; 1418 1419#define NUM_PCIE_PERF_COUNTERS ARRAY_SIZE(pcie_perf_stats_desc) 1420#define NUM_PCIE_PERF_COUNTERS64 ARRAY_SIZE(pcie_perf_stats_desc64) 1421#define NUM_PCIE_PERF_STALL_COUNTERS ARRAY_SIZE(pcie_perf_stall_stats_desc) 1422 1423static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(pcie) 1424{ 1425 int num_stats = 0; 1426 1427 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_performance_group)) 1428 num_stats += NUM_PCIE_PERF_COUNTERS; 1429 1430 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, tx_overflow_buffer_pkt)) 1431 num_stats += NUM_PCIE_PERF_COUNTERS64; 1432 1433 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_outbound_stalled)) 1434 num_stats += NUM_PCIE_PERF_STALL_COUNTERS; 1435 1436 return num_stats; 1437} 1438 1439static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(pcie) 1440{ 1441 int i; 1442 1443 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_performance_group)) 1444 for (i = 0; i < NUM_PCIE_PERF_COUNTERS; i++) 1445 strcpy(data + (idx++) * ETH_GSTRING_LEN, 1446 pcie_perf_stats_desc[i].format); 1447 1448 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, tx_overflow_buffer_pkt)) 1449 for (i = 0; i < NUM_PCIE_PERF_COUNTERS64; i++) 1450 strcpy(data + (idx++) * ETH_GSTRING_LEN, 1451 pcie_perf_stats_desc64[i].format); 1452 1453 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_outbound_stalled)) 1454 for (i = 0; i < NUM_PCIE_PERF_STALL_COUNTERS; i++) 1455 strcpy(data + (idx++) * ETH_GSTRING_LEN, 1456 pcie_perf_stall_stats_desc[i].format); 1457 return idx; 1458} 1459 1460static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(pcie) 1461{ 1462 int i; 1463 1464 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_performance_group)) 1465 for (i = 0; i < NUM_PCIE_PERF_COUNTERS; i++) 1466 data[idx++] = 1467 MLX5E_READ_CTR32_BE(&priv->stats.pcie.pcie_perf_counters, 1468 pcie_perf_stats_desc, i); 1469 1470 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, tx_overflow_buffer_pkt)) 1471 for (i = 0; i < NUM_PCIE_PERF_COUNTERS64; i++) 1472 data[idx++] = 1473 MLX5E_READ_CTR64_BE(&priv->stats.pcie.pcie_perf_counters, 1474 pcie_perf_stats_desc64, i); 1475 1476 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_outbound_stalled)) 1477 for (i = 0; i < NUM_PCIE_PERF_STALL_COUNTERS; i++) 1478 data[idx++] = 1479 MLX5E_READ_CTR32_BE(&priv->stats.pcie.pcie_perf_counters, 1480 pcie_perf_stall_stats_desc, i); 1481 return idx; 1482} 1483 1484static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(pcie) 1485{ 1486 struct mlx5e_pcie_stats *pcie_stats = &priv->stats.pcie; 1487 struct mlx5_core_dev *mdev = priv->mdev; 1488 u32 in[MLX5_ST_SZ_DW(mpcnt_reg)] = {0}; 1489 int sz = MLX5_ST_SZ_BYTES(mpcnt_reg); 1490 void *out; 1491 1492 if (!MLX5_CAP_MCAM_FEATURE(mdev, pcie_performance_group)) 1493 return; 1494 1495 out = pcie_stats->pcie_perf_counters; 1496 MLX5_SET(mpcnt_reg, in, grp, MLX5_PCIE_PERFORMANCE_COUNTERS_GROUP); 1497 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_MPCNT, 0, 0); 1498} 1499 1500#define PPORT_PER_TC_PRIO_OFF(c) \ 1501 MLX5_BYTE_OFF(ppcnt_reg, \ 1502 counter_set.eth_per_tc_prio_grp_data_layout.c##_high) 1503 1504static const struct counter_desc pport_per_tc_prio_stats_desc[] = { 1505 { "rx_prio%d_buf_discard", PPORT_PER_TC_PRIO_OFF(no_buffer_discard_uc) }, 1506}; 1507 1508#define NUM_PPORT_PER_TC_PRIO_COUNTERS ARRAY_SIZE(pport_per_tc_prio_stats_desc) 1509 1510#define PPORT_PER_TC_CONGEST_PRIO_OFF(c) \ 1511 MLX5_BYTE_OFF(ppcnt_reg, \ 1512 counter_set.eth_per_tc_congest_prio_grp_data_layout.c##_high) 1513 1514static const struct counter_desc pport_per_tc_congest_prio_stats_desc[] = { 1515 { "rx_prio%d_cong_discard", PPORT_PER_TC_CONGEST_PRIO_OFF(wred_discard) }, 1516 { "rx_prio%d_marked", PPORT_PER_TC_CONGEST_PRIO_OFF(ecn_marked_tc) }, 1517}; 1518 1519#define NUM_PPORT_PER_TC_CONGEST_PRIO_COUNTERS \ 1520 ARRAY_SIZE(pport_per_tc_congest_prio_stats_desc) 1521 1522static int mlx5e_grp_per_tc_prio_get_num_stats(struct mlx5e_priv *priv) 1523{ 1524 struct mlx5_core_dev *mdev = priv->mdev; 1525 1526 if (!MLX5_CAP_GEN(mdev, sbcam_reg)) 1527 return 0; 1528 1529 return NUM_PPORT_PER_TC_PRIO_COUNTERS * NUM_PPORT_PRIO; 1530} 1531 1532static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(per_port_buff_congest) 1533{ 1534 struct mlx5_core_dev *mdev = priv->mdev; 1535 int i, prio; 1536 1537 if (!MLX5_CAP_GEN(mdev, sbcam_reg)) 1538 return idx; 1539 1540 for (prio = 0; prio < NUM_PPORT_PRIO; prio++) { 1541 for (i = 0; i < NUM_PPORT_PER_TC_PRIO_COUNTERS; i++) 1542 sprintf(data + (idx++) * ETH_GSTRING_LEN, 1543 pport_per_tc_prio_stats_desc[i].format, prio); 1544 for (i = 0; i < NUM_PPORT_PER_TC_CONGEST_PRIO_COUNTERS; i++) 1545 sprintf(data + (idx++) * ETH_GSTRING_LEN, 1546 pport_per_tc_congest_prio_stats_desc[i].format, prio); 1547 } 1548 1549 return idx; 1550} 1551 1552static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(per_port_buff_congest) 1553{ 1554 struct mlx5e_pport_stats *pport = &priv->stats.pport; 1555 struct mlx5_core_dev *mdev = priv->mdev; 1556 int i, prio; 1557 1558 if (!MLX5_CAP_GEN(mdev, sbcam_reg)) 1559 return idx; 1560 1561 for (prio = 0; prio < NUM_PPORT_PRIO; prio++) { 1562 for (i = 0; i < NUM_PPORT_PER_TC_PRIO_COUNTERS; i++) 1563 data[idx++] = 1564 MLX5E_READ_CTR64_BE(&pport->per_tc_prio_counters[prio], 1565 pport_per_tc_prio_stats_desc, i); 1566 for (i = 0; i < NUM_PPORT_PER_TC_CONGEST_PRIO_COUNTERS ; i++) 1567 data[idx++] = 1568 MLX5E_READ_CTR64_BE(&pport->per_tc_congest_prio_counters[prio], 1569 pport_per_tc_congest_prio_stats_desc, i); 1570 } 1571 1572 return idx; 1573} 1574 1575static void mlx5e_grp_per_tc_prio_update_stats(struct mlx5e_priv *priv) 1576{ 1577 struct mlx5e_pport_stats *pstats = &priv->stats.pport; 1578 struct mlx5_core_dev *mdev = priv->mdev; 1579 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {}; 1580 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg); 1581 void *out; 1582 int prio; 1583 1584 if (!MLX5_CAP_GEN(mdev, sbcam_reg)) 1585 return; 1586 1587 MLX5_SET(ppcnt_reg, in, pnat, 2); 1588 MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_TRAFFIC_CLASS_COUNTERS_GROUP); 1589 for (prio = 0; prio < NUM_PPORT_PRIO; prio++) { 1590 out = pstats->per_tc_prio_counters[prio]; 1591 MLX5_SET(ppcnt_reg, in, prio_tc, prio); 1592 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0); 1593 } 1594} 1595 1596static int mlx5e_grp_per_tc_congest_prio_get_num_stats(struct mlx5e_priv *priv) 1597{ 1598 struct mlx5_core_dev *mdev = priv->mdev; 1599 1600 if (!MLX5_CAP_GEN(mdev, sbcam_reg)) 1601 return 0; 1602 1603 return NUM_PPORT_PER_TC_CONGEST_PRIO_COUNTERS * NUM_PPORT_PRIO; 1604} 1605 1606static void mlx5e_grp_per_tc_congest_prio_update_stats(struct mlx5e_priv *priv) 1607{ 1608 struct mlx5e_pport_stats *pstats = &priv->stats.pport; 1609 struct mlx5_core_dev *mdev = priv->mdev; 1610 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {}; 1611 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg); 1612 void *out; 1613 int prio; 1614 1615 if (!MLX5_CAP_GEN(mdev, sbcam_reg)) 1616 return; 1617 1618 MLX5_SET(ppcnt_reg, in, pnat, 2); 1619 MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_TRAFFIC_CLASS_CONGESTION_GROUP); 1620 for (prio = 0; prio < NUM_PPORT_PRIO; prio++) { 1621 out = pstats->per_tc_congest_prio_counters[prio]; 1622 MLX5_SET(ppcnt_reg, in, prio_tc, prio); 1623 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0); 1624 } 1625} 1626 1627static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(per_port_buff_congest) 1628{ 1629 return mlx5e_grp_per_tc_prio_get_num_stats(priv) + 1630 mlx5e_grp_per_tc_congest_prio_get_num_stats(priv); 1631} 1632 1633static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(per_port_buff_congest) 1634{ 1635 mlx5e_grp_per_tc_prio_update_stats(priv); 1636 mlx5e_grp_per_tc_congest_prio_update_stats(priv); 1637} 1638 1639#define PPORT_PER_PRIO_OFF(c) \ 1640 MLX5_BYTE_OFF(ppcnt_reg, \ 1641 counter_set.eth_per_prio_grp_data_layout.c##_high) 1642static const struct counter_desc pport_per_prio_traffic_stats_desc[] = { 1643 { "rx_prio%d_bytes", PPORT_PER_PRIO_OFF(rx_octets) }, 1644 { "rx_prio%d_packets", PPORT_PER_PRIO_OFF(rx_frames) }, 1645 { "rx_prio%d_discards", PPORT_PER_PRIO_OFF(rx_discards) }, 1646 { "tx_prio%d_bytes", PPORT_PER_PRIO_OFF(tx_octets) }, 1647 { "tx_prio%d_packets", PPORT_PER_PRIO_OFF(tx_frames) }, 1648}; 1649 1650#define NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS ARRAY_SIZE(pport_per_prio_traffic_stats_desc) 1651 1652static int mlx5e_grp_per_prio_traffic_get_num_stats(void) 1653{ 1654 return NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS * NUM_PPORT_PRIO; 1655} 1656 1657static int mlx5e_grp_per_prio_traffic_fill_strings(struct mlx5e_priv *priv, 1658 u8 *data, 1659 int idx) 1660{ 1661 int i, prio; 1662 1663 for (prio = 0; prio < NUM_PPORT_PRIO; prio++) { 1664 for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++) 1665 sprintf(data + (idx++) * ETH_GSTRING_LEN, 1666 pport_per_prio_traffic_stats_desc[i].format, prio); 1667 } 1668 1669 return idx; 1670} 1671 1672static int mlx5e_grp_per_prio_traffic_fill_stats(struct mlx5e_priv *priv, 1673 u64 *data, 1674 int idx) 1675{ 1676 int i, prio; 1677 1678 for (prio = 0; prio < NUM_PPORT_PRIO; prio++) { 1679 for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++) 1680 data[idx++] = 1681 MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[prio], 1682 pport_per_prio_traffic_stats_desc, i); 1683 } 1684 1685 return idx; 1686} 1687 1688static const struct counter_desc pport_per_prio_pfc_stats_desc[] = { 1689 /* %s is "global" or "prio{i}" */ 1690 { "rx_%s_pause", PPORT_PER_PRIO_OFF(rx_pause) }, 1691 { "rx_%s_pause_duration", PPORT_PER_PRIO_OFF(rx_pause_duration) }, 1692 { "tx_%s_pause", PPORT_PER_PRIO_OFF(tx_pause) }, 1693 { "tx_%s_pause_duration", PPORT_PER_PRIO_OFF(tx_pause_duration) }, 1694 { "rx_%s_pause_transition", PPORT_PER_PRIO_OFF(rx_pause_transition) }, 1695}; 1696 1697static const struct counter_desc pport_pfc_stall_stats_desc[] = { 1698 { "tx_pause_storm_warning_events", PPORT_PER_PRIO_OFF(device_stall_minor_watermark_cnt) }, 1699 { "tx_pause_storm_error_events", PPORT_PER_PRIO_OFF(device_stall_critical_watermark_cnt) }, 1700}; 1701 1702#define NUM_PPORT_PER_PRIO_PFC_COUNTERS ARRAY_SIZE(pport_per_prio_pfc_stats_desc) 1703#define NUM_PPORT_PFC_STALL_COUNTERS(priv) (ARRAY_SIZE(pport_pfc_stall_stats_desc) * \ 1704 MLX5_CAP_PCAM_FEATURE((priv)->mdev, pfcc_mask) * \ 1705 MLX5_CAP_DEBUG((priv)->mdev, stall_detect)) 1706 1707static unsigned long mlx5e_query_pfc_combined(struct mlx5e_priv *priv) 1708{ 1709 struct mlx5_core_dev *mdev = priv->mdev; 1710 u8 pfc_en_tx; 1711 u8 pfc_en_rx; 1712 int err; 1713 1714 if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) 1715 return 0; 1716 1717 err = mlx5_query_port_pfc(mdev, &pfc_en_tx, &pfc_en_rx); 1718 1719 return err ? 0 : pfc_en_tx | pfc_en_rx; 1720} 1721 1722static bool mlx5e_query_global_pause_combined(struct mlx5e_priv *priv) 1723{ 1724 struct mlx5_core_dev *mdev = priv->mdev; 1725 u32 rx_pause; 1726 u32 tx_pause; 1727 int err; 1728 1729 if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) 1730 return false; 1731 1732 err = mlx5_query_port_pause(mdev, &rx_pause, &tx_pause); 1733 1734 return err ? false : rx_pause | tx_pause; 1735} 1736 1737static int mlx5e_grp_per_prio_pfc_get_num_stats(struct mlx5e_priv *priv) 1738{ 1739 return (mlx5e_query_global_pause_combined(priv) + 1740 hweight8(mlx5e_query_pfc_combined(priv))) * 1741 NUM_PPORT_PER_PRIO_PFC_COUNTERS + 1742 NUM_PPORT_PFC_STALL_COUNTERS(priv); 1743} 1744 1745static int mlx5e_grp_per_prio_pfc_fill_strings(struct mlx5e_priv *priv, 1746 u8 *data, 1747 int idx) 1748{ 1749 unsigned long pfc_combined; 1750 int i, prio; 1751 1752 pfc_combined = mlx5e_query_pfc_combined(priv); 1753 for_each_set_bit(prio, &pfc_combined, NUM_PPORT_PRIO) { 1754 for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) { 1755 char pfc_string[ETH_GSTRING_LEN]; 1756 1757 snprintf(pfc_string, sizeof(pfc_string), "prio%d", prio); 1758 sprintf(data + (idx++) * ETH_GSTRING_LEN, 1759 pport_per_prio_pfc_stats_desc[i].format, pfc_string); 1760 } 1761 } 1762 1763 if (mlx5e_query_global_pause_combined(priv)) { 1764 for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) { 1765 sprintf(data + (idx++) * ETH_GSTRING_LEN, 1766 pport_per_prio_pfc_stats_desc[i].format, "global"); 1767 } 1768 } 1769 1770 for (i = 0; i < NUM_PPORT_PFC_STALL_COUNTERS(priv); i++) 1771 strcpy(data + (idx++) * ETH_GSTRING_LEN, 1772 pport_pfc_stall_stats_desc[i].format); 1773 1774 return idx; 1775} 1776 1777static int mlx5e_grp_per_prio_pfc_fill_stats(struct mlx5e_priv *priv, 1778 u64 *data, 1779 int idx) 1780{ 1781 unsigned long pfc_combined; 1782 int i, prio; 1783 1784 pfc_combined = mlx5e_query_pfc_combined(priv); 1785 for_each_set_bit(prio, &pfc_combined, NUM_PPORT_PRIO) { 1786 for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) { 1787 data[idx++] = 1788 MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[prio], 1789 pport_per_prio_pfc_stats_desc, i); 1790 } 1791 } 1792 1793 if (mlx5e_query_global_pause_combined(priv)) { 1794 for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) { 1795 data[idx++] = 1796 MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[0], 1797 pport_per_prio_pfc_stats_desc, i); 1798 } 1799 } 1800 1801 for (i = 0; i < NUM_PPORT_PFC_STALL_COUNTERS(priv); i++) 1802 data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[0], 1803 pport_pfc_stall_stats_desc, i); 1804 1805 return idx; 1806} 1807 1808static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(per_prio) 1809{ 1810 return mlx5e_grp_per_prio_traffic_get_num_stats() + 1811 mlx5e_grp_per_prio_pfc_get_num_stats(priv); 1812} 1813 1814static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(per_prio) 1815{ 1816 idx = mlx5e_grp_per_prio_traffic_fill_strings(priv, data, idx); 1817 idx = mlx5e_grp_per_prio_pfc_fill_strings(priv, data, idx); 1818 return idx; 1819} 1820 1821static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(per_prio) 1822{ 1823 idx = mlx5e_grp_per_prio_traffic_fill_stats(priv, data, idx); 1824 idx = mlx5e_grp_per_prio_pfc_fill_stats(priv, data, idx); 1825 return idx; 1826} 1827 1828static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(per_prio) 1829{ 1830 struct mlx5e_pport_stats *pstats = &priv->stats.pport; 1831 struct mlx5_core_dev *mdev = priv->mdev; 1832 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0}; 1833 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg); 1834 int prio; 1835 void *out; 1836 1837 if (!MLX5_BASIC_PPCNT_SUPPORTED(mdev)) 1838 return; 1839 1840 MLX5_SET(ppcnt_reg, in, local_port, 1); 1841 MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_PRIORITY_COUNTERS_GROUP); 1842 for (prio = 0; prio < NUM_PPORT_PRIO; prio++) { 1843 out = pstats->per_prio_counters[prio]; 1844 MLX5_SET(ppcnt_reg, in, prio_tc, prio); 1845 mlx5_core_access_reg(mdev, in, sz, out, sz, 1846 MLX5_REG_PPCNT, 0, 0); 1847 } 1848} 1849 1850static const struct counter_desc mlx5e_pme_status_desc[] = { 1851 { "module_unplug", sizeof(u64) * MLX5_MODULE_STATUS_UNPLUGGED }, 1852}; 1853 1854static const struct counter_desc mlx5e_pme_error_desc[] = { 1855 { "module_bus_stuck", sizeof(u64) * MLX5_MODULE_EVENT_ERROR_BUS_STUCK }, 1856 { "module_high_temp", sizeof(u64) * MLX5_MODULE_EVENT_ERROR_HIGH_TEMPERATURE }, 1857 { "module_bad_shorted", sizeof(u64) * MLX5_MODULE_EVENT_ERROR_BAD_CABLE }, 1858}; 1859 1860#define NUM_PME_STATUS_STATS ARRAY_SIZE(mlx5e_pme_status_desc) 1861#define NUM_PME_ERR_STATS ARRAY_SIZE(mlx5e_pme_error_desc) 1862 1863static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(pme) 1864{ 1865 return NUM_PME_STATUS_STATS + NUM_PME_ERR_STATS; 1866} 1867 1868static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(pme) 1869{ 1870 int i; 1871 1872 for (i = 0; i < NUM_PME_STATUS_STATS; i++) 1873 strcpy(data + (idx++) * ETH_GSTRING_LEN, mlx5e_pme_status_desc[i].format); 1874 1875 for (i = 0; i < NUM_PME_ERR_STATS; i++) 1876 strcpy(data + (idx++) * ETH_GSTRING_LEN, mlx5e_pme_error_desc[i].format); 1877 1878 return idx; 1879} 1880 1881static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(pme) 1882{ 1883 struct mlx5_pme_stats pme_stats; 1884 int i; 1885 1886 mlx5_get_pme_stats(priv->mdev, &pme_stats); 1887 1888 for (i = 0; i < NUM_PME_STATUS_STATS; i++) 1889 data[idx++] = MLX5E_READ_CTR64_CPU(pme_stats.status_counters, 1890 mlx5e_pme_status_desc, i); 1891 1892 for (i = 0; i < NUM_PME_ERR_STATS; i++) 1893 data[idx++] = MLX5E_READ_CTR64_CPU(pme_stats.error_counters, 1894 mlx5e_pme_error_desc, i); 1895 1896 return idx; 1897} 1898 1899static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(pme) { return; } 1900 1901static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(tls) 1902{ 1903 return mlx5e_ktls_get_count(priv); 1904} 1905 1906static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(tls) 1907{ 1908 return idx + mlx5e_ktls_get_strings(priv, data + idx * ETH_GSTRING_LEN); 1909} 1910 1911static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(tls) 1912{ 1913 return idx + mlx5e_ktls_get_stats(priv, data + idx); 1914} 1915 1916static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(tls) { return; } 1917 1918static const struct counter_desc rq_stats_desc[] = { 1919 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, packets) }, 1920 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, bytes) }, 1921 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete) }, 1922 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete_tail) }, 1923 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete_tail_slow) }, 1924 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary) }, 1925 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary_inner) }, 1926 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_none) }, 1927 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_drop) }, 1928 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_redirect) }, 1929 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_packets) }, 1930 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_bytes) }, 1931 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, gro_packets) }, 1932 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, gro_bytes) }, 1933 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, gro_skbs) }, 1934 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, gro_match_packets) }, 1935 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, gro_large_hds) }, 1936 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, ecn_mark) }, 1937 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, removed_vlan_packets) }, 1938 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, wqe_err) }, 1939 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler_cqes) }, 1940 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler_strides) }, 1941 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, oversize_pkts_sw_drop) }, 1942 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, buff_alloc_err) }, 1943 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_blks) }, 1944 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) }, 1945 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_reuse) }, 1946 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_full) }, 1947 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_empty) }, 1948 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_busy) }, 1949 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_waive) }, 1950 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, congst_umr) }, 1951 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, arfs_err) }, 1952 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, recover) }, 1953#ifdef CONFIG_PAGE_POOL_STATS 1954 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_alloc_fast) }, 1955 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_alloc_slow) }, 1956 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_alloc_slow_high_order) }, 1957 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_alloc_empty) }, 1958 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_alloc_refill) }, 1959 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_alloc_waive) }, 1960 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_recycle_cached) }, 1961 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_recycle_cache_full) }, 1962 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_recycle_ring) }, 1963 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_recycle_ring_full) }, 1964 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_recycle_released_ref) }, 1965#endif 1966#ifdef CONFIG_MLX5_EN_TLS 1967 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_decrypted_packets) }, 1968 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_decrypted_bytes) }, 1969 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_req_pkt) }, 1970 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_req_start) }, 1971 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_req_end) }, 1972 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_req_skip) }, 1973 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_res_ok) }, 1974 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_res_retry) }, 1975 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_res_skip) }, 1976 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_err) }, 1977#endif 1978}; 1979 1980static const struct counter_desc sq_stats_desc[] = { 1981 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, packets) }, 1982 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, bytes) }, 1983 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_packets) }, 1984 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_bytes) }, 1985 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_inner_packets) }, 1986 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_inner_bytes) }, 1987 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_partial) }, 1988 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_partial_inner) }, 1989 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, added_vlan_packets) }, 1990 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, nop) }, 1991 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, mpwqe_blks) }, 1992 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, mpwqe_pkts) }, 1993#ifdef CONFIG_MLX5_EN_TLS 1994 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_packets) }, 1995 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_bytes) }, 1996 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_ooo) }, 1997 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_dump_packets) }, 1998 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_dump_bytes) }, 1999 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_resync_bytes) }, 2000 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_skip_no_sync_data) }, 2001 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_drop_no_sync_data) }, 2002 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_drop_bypass_req) }, 2003#endif 2004 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_none) }, 2005 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, stopped) }, 2006 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, dropped) }, 2007 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, xmit_more) }, 2008 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, recover) }, 2009 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, cqes) }, 2010 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, wake) }, 2011 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, cqe_err) }, 2012}; 2013 2014static const struct counter_desc rq_xdpsq_stats_desc[] = { 2015 { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, xmit) }, 2016 { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, mpwqe) }, 2017 { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, inlnw) }, 2018 { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, nops) }, 2019 { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, full) }, 2020 { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, err) }, 2021 { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, cqes) }, 2022}; 2023 2024static const struct counter_desc xdpsq_stats_desc[] = { 2025 { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, xmit) }, 2026 { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, mpwqe) }, 2027 { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, inlnw) }, 2028 { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, nops) }, 2029 { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, full) }, 2030 { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, err) }, 2031 { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, cqes) }, 2032}; 2033 2034static const struct counter_desc xskrq_stats_desc[] = { 2035 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, packets) }, 2036 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, bytes) }, 2037 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, csum_complete) }, 2038 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, csum_unnecessary) }, 2039 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, csum_unnecessary_inner) }, 2040 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, csum_none) }, 2041 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, ecn_mark) }, 2042 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, removed_vlan_packets) }, 2043 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, xdp_drop) }, 2044 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, xdp_redirect) }, 2045 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, wqe_err) }, 2046 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, mpwqe_filler_cqes) }, 2047 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, mpwqe_filler_strides) }, 2048 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, oversize_pkts_sw_drop) }, 2049 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, buff_alloc_err) }, 2050 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, cqe_compress_blks) }, 2051 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) }, 2052 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, congst_umr) }, 2053 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, arfs_err) }, 2054}; 2055 2056static const struct counter_desc xsksq_stats_desc[] = { 2057 { MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, xmit) }, 2058 { MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, mpwqe) }, 2059 { MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, inlnw) }, 2060 { MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, full) }, 2061 { MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, err) }, 2062 { MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, cqes) }, 2063}; 2064 2065static const struct counter_desc ch_stats_desc[] = { 2066 { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, events) }, 2067 { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, poll) }, 2068 { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, arm) }, 2069 { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, aff_change) }, 2070 { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, force_irq) }, 2071 { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, eq_rearm) }, 2072}; 2073 2074static const struct counter_desc ptp_sq_stats_desc[] = { 2075 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, packets) }, 2076 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, bytes) }, 2077 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, csum_partial) }, 2078 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, csum_partial_inner) }, 2079 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, added_vlan_packets) }, 2080 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, nop) }, 2081 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, csum_none) }, 2082 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, stopped) }, 2083 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, dropped) }, 2084 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, xmit_more) }, 2085 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, recover) }, 2086 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, cqes) }, 2087 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, wake) }, 2088 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, cqe_err) }, 2089}; 2090 2091static const struct counter_desc ptp_ch_stats_desc[] = { 2092 { MLX5E_DECLARE_PTP_CH_STAT(struct mlx5e_ch_stats, events) }, 2093 { MLX5E_DECLARE_PTP_CH_STAT(struct mlx5e_ch_stats, poll) }, 2094 { MLX5E_DECLARE_PTP_CH_STAT(struct mlx5e_ch_stats, arm) }, 2095 { MLX5E_DECLARE_PTP_CH_STAT(struct mlx5e_ch_stats, eq_rearm) }, 2096}; 2097 2098static const struct counter_desc ptp_cq_stats_desc[] = { 2099 { MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, cqe) }, 2100 { MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, err_cqe) }, 2101 { MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, abort) }, 2102 { MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, abort_abs_diff_ns) }, 2103}; 2104 2105static const struct counter_desc ptp_rq_stats_desc[] = { 2106 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, packets) }, 2107 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, bytes) }, 2108 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, csum_complete) }, 2109 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, csum_complete_tail) }, 2110 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, csum_complete_tail_slow) }, 2111 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, csum_unnecessary) }, 2112 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, csum_unnecessary_inner) }, 2113 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, csum_none) }, 2114 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, xdp_drop) }, 2115 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, xdp_redirect) }, 2116 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, lro_packets) }, 2117 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, lro_bytes) }, 2118 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, ecn_mark) }, 2119 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, removed_vlan_packets) }, 2120 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, wqe_err) }, 2121 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, mpwqe_filler_cqes) }, 2122 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, mpwqe_filler_strides) }, 2123 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, oversize_pkts_sw_drop) }, 2124 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, buff_alloc_err) }, 2125 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, cqe_compress_blks) }, 2126 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) }, 2127 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, cache_reuse) }, 2128 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, cache_full) }, 2129 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, cache_empty) }, 2130 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, cache_busy) }, 2131 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, cache_waive) }, 2132 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, congst_umr) }, 2133 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, arfs_err) }, 2134 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, recover) }, 2135}; 2136 2137static const struct counter_desc qos_sq_stats_desc[] = { 2138 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, packets) }, 2139 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, bytes) }, 2140 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tso_packets) }, 2141 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tso_bytes) }, 2142 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tso_inner_packets) }, 2143 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tso_inner_bytes) }, 2144 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, csum_partial) }, 2145 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, csum_partial_inner) }, 2146 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, added_vlan_packets) }, 2147 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, nop) }, 2148 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, mpwqe_blks) }, 2149 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, mpwqe_pkts) }, 2150#ifdef CONFIG_MLX5_EN_TLS 2151 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_packets) }, 2152 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_bytes) }, 2153 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_ooo) }, 2154 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_dump_packets) }, 2155 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_dump_bytes) }, 2156 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_resync_bytes) }, 2157 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_skip_no_sync_data) }, 2158 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_drop_no_sync_data) }, 2159 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_drop_bypass_req) }, 2160#endif 2161 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, csum_none) }, 2162 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, stopped) }, 2163 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, dropped) }, 2164 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, xmit_more) }, 2165 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, recover) }, 2166 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, cqes) }, 2167 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, wake) }, 2168 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, cqe_err) }, 2169}; 2170 2171#define NUM_RQ_STATS ARRAY_SIZE(rq_stats_desc) 2172#define NUM_SQ_STATS ARRAY_SIZE(sq_stats_desc) 2173#define NUM_XDPSQ_STATS ARRAY_SIZE(xdpsq_stats_desc) 2174#define NUM_RQ_XDPSQ_STATS ARRAY_SIZE(rq_xdpsq_stats_desc) 2175#define NUM_XSKRQ_STATS ARRAY_SIZE(xskrq_stats_desc) 2176#define NUM_XSKSQ_STATS ARRAY_SIZE(xsksq_stats_desc) 2177#define NUM_CH_STATS ARRAY_SIZE(ch_stats_desc) 2178#define NUM_PTP_SQ_STATS ARRAY_SIZE(ptp_sq_stats_desc) 2179#define NUM_PTP_CH_STATS ARRAY_SIZE(ptp_ch_stats_desc) 2180#define NUM_PTP_CQ_STATS ARRAY_SIZE(ptp_cq_stats_desc) 2181#define NUM_PTP_RQ_STATS ARRAY_SIZE(ptp_rq_stats_desc) 2182#define NUM_QOS_SQ_STATS ARRAY_SIZE(qos_sq_stats_desc) 2183 2184static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(qos) 2185{ 2186 /* Pairs with smp_store_release in mlx5e_open_qos_sq. */ 2187 return NUM_QOS_SQ_STATS * smp_load_acquire(&priv->htb.max_qos_sqs); 2188} 2189 2190static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(qos) 2191{ 2192 /* Pairs with smp_store_release in mlx5e_open_qos_sq. */ 2193 u16 max_qos_sqs = smp_load_acquire(&priv->htb.max_qos_sqs); 2194 int i, qid; 2195 2196 for (qid = 0; qid < max_qos_sqs; qid++) 2197 for (i = 0; i < NUM_QOS_SQ_STATS; i++) 2198 sprintf(data + (idx++) * ETH_GSTRING_LEN, 2199 qos_sq_stats_desc[i].format, qid); 2200 2201 return idx; 2202} 2203 2204static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(qos) 2205{ 2206 struct mlx5e_sq_stats **stats; 2207 u16 max_qos_sqs; 2208 int i, qid; 2209 2210 /* Pairs with smp_store_release in mlx5e_open_qos_sq. */ 2211 max_qos_sqs = smp_load_acquire(&priv->htb.max_qos_sqs); 2212 stats = READ_ONCE(priv->htb.qos_sq_stats); 2213 2214 for (qid = 0; qid < max_qos_sqs; qid++) { 2215 struct mlx5e_sq_stats *s = READ_ONCE(stats[qid]); 2216 2217 for (i = 0; i < NUM_QOS_SQ_STATS; i++) 2218 data[idx++] = MLX5E_READ_CTR64_CPU(s, qos_sq_stats_desc, i); 2219 } 2220 2221 return idx; 2222} 2223 2224static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(qos) { return; } 2225 2226static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(ptp) 2227{ 2228 int num = NUM_PTP_CH_STATS; 2229 2230 if (!priv->tx_ptp_opened && !priv->rx_ptp_opened) 2231 return 0; 2232 2233 if (priv->tx_ptp_opened) 2234 num += (NUM_PTP_SQ_STATS + NUM_PTP_CQ_STATS) * priv->max_opened_tc; 2235 if (priv->rx_ptp_opened) 2236 num += NUM_PTP_RQ_STATS; 2237 2238 return num; 2239} 2240 2241static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(ptp) 2242{ 2243 int i, tc; 2244 2245 if (!priv->tx_ptp_opened && !priv->rx_ptp_opened) 2246 return idx; 2247 2248 for (i = 0; i < NUM_PTP_CH_STATS; i++) 2249 sprintf(data + (idx++) * ETH_GSTRING_LEN, 2250 "%s", ptp_ch_stats_desc[i].format); 2251 2252 if (priv->tx_ptp_opened) { 2253 for (tc = 0; tc < priv->max_opened_tc; tc++) 2254 for (i = 0; i < NUM_PTP_SQ_STATS; i++) 2255 sprintf(data + (idx++) * ETH_GSTRING_LEN, 2256 ptp_sq_stats_desc[i].format, tc); 2257 2258 for (tc = 0; tc < priv->max_opened_tc; tc++) 2259 for (i = 0; i < NUM_PTP_CQ_STATS; i++) 2260 sprintf(data + (idx++) * ETH_GSTRING_LEN, 2261 ptp_cq_stats_desc[i].format, tc); 2262 } 2263 if (priv->rx_ptp_opened) { 2264 for (i = 0; i < NUM_PTP_RQ_STATS; i++) 2265 sprintf(data + (idx++) * ETH_GSTRING_LEN, 2266 ptp_rq_stats_desc[i].format, MLX5E_PTP_CHANNEL_IX); 2267 } 2268 return idx; 2269} 2270 2271static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(ptp) 2272{ 2273 int i, tc; 2274 2275 if (!priv->tx_ptp_opened && !priv->rx_ptp_opened) 2276 return idx; 2277 2278 for (i = 0; i < NUM_PTP_CH_STATS; i++) 2279 data[idx++] = 2280 MLX5E_READ_CTR64_CPU(&priv->ptp_stats.ch, 2281 ptp_ch_stats_desc, i); 2282 2283 if (priv->tx_ptp_opened) { 2284 for (tc = 0; tc < priv->max_opened_tc; tc++) 2285 for (i = 0; i < NUM_PTP_SQ_STATS; i++) 2286 data[idx++] = 2287 MLX5E_READ_CTR64_CPU(&priv->ptp_stats.sq[tc], 2288 ptp_sq_stats_desc, i); 2289 2290 for (tc = 0; tc < priv->max_opened_tc; tc++) 2291 for (i = 0; i < NUM_PTP_CQ_STATS; i++) 2292 data[idx++] = 2293 MLX5E_READ_CTR64_CPU(&priv->ptp_stats.cq[tc], 2294 ptp_cq_stats_desc, i); 2295 } 2296 if (priv->rx_ptp_opened) { 2297 for (i = 0; i < NUM_PTP_RQ_STATS; i++) 2298 data[idx++] = 2299 MLX5E_READ_CTR64_CPU(&priv->ptp_stats.rq, 2300 ptp_rq_stats_desc, i); 2301 } 2302 return idx; 2303} 2304 2305static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(ptp) { return; } 2306 2307static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(channels) 2308{ 2309 int max_nch = priv->stats_nch; 2310 2311 return (NUM_RQ_STATS * max_nch) + 2312 (NUM_CH_STATS * max_nch) + 2313 (NUM_SQ_STATS * max_nch * priv->max_opened_tc) + 2314 (NUM_RQ_XDPSQ_STATS * max_nch) + 2315 (NUM_XDPSQ_STATS * max_nch) + 2316 (NUM_XSKRQ_STATS * max_nch * priv->xsk.ever_used) + 2317 (NUM_XSKSQ_STATS * max_nch * priv->xsk.ever_used); 2318} 2319 2320static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(channels) 2321{ 2322 bool is_xsk = priv->xsk.ever_used; 2323 int max_nch = priv->stats_nch; 2324 int i, j, tc; 2325 2326 for (i = 0; i < max_nch; i++) 2327 for (j = 0; j < NUM_CH_STATS; j++) 2328 sprintf(data + (idx++) * ETH_GSTRING_LEN, 2329 ch_stats_desc[j].format, i); 2330 2331 for (i = 0; i < max_nch; i++) { 2332 for (j = 0; j < NUM_RQ_STATS; j++) 2333 sprintf(data + (idx++) * ETH_GSTRING_LEN, 2334 rq_stats_desc[j].format, i); 2335 for (j = 0; j < NUM_XSKRQ_STATS * is_xsk; j++) 2336 sprintf(data + (idx++) * ETH_GSTRING_LEN, 2337 xskrq_stats_desc[j].format, i); 2338 for (j = 0; j < NUM_RQ_XDPSQ_STATS; j++) 2339 sprintf(data + (idx++) * ETH_GSTRING_LEN, 2340 rq_xdpsq_stats_desc[j].format, i); 2341 } 2342 2343 for (tc = 0; tc < priv->max_opened_tc; tc++) 2344 for (i = 0; i < max_nch; i++) 2345 for (j = 0; j < NUM_SQ_STATS; j++) 2346 sprintf(data + (idx++) * ETH_GSTRING_LEN, 2347 sq_stats_desc[j].format, 2348 i + tc * max_nch); 2349 2350 for (i = 0; i < max_nch; i++) { 2351 for (j = 0; j < NUM_XSKSQ_STATS * is_xsk; j++) 2352 sprintf(data + (idx++) * ETH_GSTRING_LEN, 2353 xsksq_stats_desc[j].format, i); 2354 for (j = 0; j < NUM_XDPSQ_STATS; j++) 2355 sprintf(data + (idx++) * ETH_GSTRING_LEN, 2356 xdpsq_stats_desc[j].format, i); 2357 } 2358 2359 return idx; 2360} 2361 2362static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(channels) 2363{ 2364 bool is_xsk = priv->xsk.ever_used; 2365 int max_nch = priv->stats_nch; 2366 int i, j, tc; 2367 2368 for (i = 0; i < max_nch; i++) 2369 for (j = 0; j < NUM_CH_STATS; j++) 2370 data[idx++] = 2371 MLX5E_READ_CTR64_CPU(&priv->channel_stats[i]->ch, 2372 ch_stats_desc, j); 2373 2374 for (i = 0; i < max_nch; i++) { 2375 for (j = 0; j < NUM_RQ_STATS; j++) 2376 data[idx++] = 2377 MLX5E_READ_CTR64_CPU(&priv->channel_stats[i]->rq, 2378 rq_stats_desc, j); 2379 for (j = 0; j < NUM_XSKRQ_STATS * is_xsk; j++) 2380 data[idx++] = 2381 MLX5E_READ_CTR64_CPU(&priv->channel_stats[i]->xskrq, 2382 xskrq_stats_desc, j); 2383 for (j = 0; j < NUM_RQ_XDPSQ_STATS; j++) 2384 data[idx++] = 2385 MLX5E_READ_CTR64_CPU(&priv->channel_stats[i]->rq_xdpsq, 2386 rq_xdpsq_stats_desc, j); 2387 } 2388 2389 for (tc = 0; tc < priv->max_opened_tc; tc++) 2390 for (i = 0; i < max_nch; i++) 2391 for (j = 0; j < NUM_SQ_STATS; j++) 2392 data[idx++] = 2393 MLX5E_READ_CTR64_CPU(&priv->channel_stats[i]->sq[tc], 2394 sq_stats_desc, j); 2395 2396 for (i = 0; i < max_nch; i++) { 2397 for (j = 0; j < NUM_XSKSQ_STATS * is_xsk; j++) 2398 data[idx++] = 2399 MLX5E_READ_CTR64_CPU(&priv->channel_stats[i]->xsksq, 2400 xsksq_stats_desc, j); 2401 for (j = 0; j < NUM_XDPSQ_STATS; j++) 2402 data[idx++] = 2403 MLX5E_READ_CTR64_CPU(&priv->channel_stats[i]->xdpsq, 2404 xdpsq_stats_desc, j); 2405 } 2406 2407 return idx; 2408} 2409 2410static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(channels) { return; } 2411 2412MLX5E_DEFINE_STATS_GRP(sw, 0); 2413MLX5E_DEFINE_STATS_GRP(qcnt, MLX5E_NDO_UPDATE_STATS); 2414MLX5E_DEFINE_STATS_GRP(vnic_env, 0); 2415MLX5E_DEFINE_STATS_GRP(vport, MLX5E_NDO_UPDATE_STATS); 2416MLX5E_DEFINE_STATS_GRP(802_3, MLX5E_NDO_UPDATE_STATS); 2417MLX5E_DEFINE_STATS_GRP(2863, 0); 2418MLX5E_DEFINE_STATS_GRP(2819, 0); 2419MLX5E_DEFINE_STATS_GRP(phy, 0); 2420MLX5E_DEFINE_STATS_GRP(pcie, 0); 2421MLX5E_DEFINE_STATS_GRP(per_prio, 0); 2422MLX5E_DEFINE_STATS_GRP(pme, 0); 2423MLX5E_DEFINE_STATS_GRP(channels, 0); 2424MLX5E_DEFINE_STATS_GRP(per_port_buff_congest, 0); 2425MLX5E_DEFINE_STATS_GRP(eth_ext, 0); 2426static MLX5E_DEFINE_STATS_GRP(tls, 0); 2427MLX5E_DEFINE_STATS_GRP(ptp, 0); 2428static MLX5E_DEFINE_STATS_GRP(qos, 0); 2429 2430/* The stats groups order is opposite to the update_stats() order calls */ 2431mlx5e_stats_grp_t mlx5e_nic_stats_grps[] = { 2432 &MLX5E_STATS_GRP(sw), 2433 &MLX5E_STATS_GRP(qcnt), 2434 &MLX5E_STATS_GRP(vnic_env), 2435 &MLX5E_STATS_GRP(vport), 2436 &MLX5E_STATS_GRP(802_3), 2437 &MLX5E_STATS_GRP(2863), 2438 &MLX5E_STATS_GRP(2819), 2439 &MLX5E_STATS_GRP(phy), 2440 &MLX5E_STATS_GRP(eth_ext), 2441 &MLX5E_STATS_GRP(pcie), 2442 &MLX5E_STATS_GRP(per_prio), 2443 &MLX5E_STATS_GRP(pme), 2444#ifdef CONFIG_MLX5_EN_IPSEC 2445 &MLX5E_STATS_GRP(ipsec_sw), 2446#endif 2447 &MLX5E_STATS_GRP(tls), 2448 &MLX5E_STATS_GRP(channels), 2449 &MLX5E_STATS_GRP(per_port_buff_congest), 2450 &MLX5E_STATS_GRP(ptp), 2451 &MLX5E_STATS_GRP(qos), 2452}; 2453 2454unsigned int mlx5e_nic_stats_grps_num(struct mlx5e_priv *priv) 2455{ 2456 return ARRAY_SIZE(mlx5e_nic_stats_grps); 2457}