cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

en_port.c (15817B)


      1/*
      2 * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
      3 *
      4 * This software is available to you under a choice of one of two
      5 * licenses.  You may choose to be licensed under the terms of the GNU
      6 * General Public License (GPL) Version 2, available from the file
      7 * COPYING in the main directory of this source tree, or the
      8 * OpenIB.org BSD license below:
      9 *
     10 *     Redistribution and use in source and binary forms, with or
     11 *     without modification, are permitted provided that the following
     12 *     conditions are met:
     13 *
     14 *      - Redistributions of source code must retain the above
     15 *        copyright notice, this list of conditions and the following
     16 *        disclaimer.
     17 *
     18 *      - Redistributions in binary form must reproduce the above
     19 *        copyright notice, this list of conditions and the following
     20 *        disclaimer in the documentation and/or other materials
     21 *        provided with the distribution.
     22 *
     23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
     24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
     25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
     26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
     27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
     28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
     29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
     30 * SOFTWARE.
     31 *
     32 */
     33
     34
     35#include <linux/if_vlan.h>
     36
     37#include <linux/mlx4/device.h>
     38#include <linux/mlx4/cmd.h>
     39
     40#include "en_port.h"
     41#include "mlx4_en.h"
     42
     43
     44int mlx4_SET_VLAN_FLTR(struct mlx4_dev *dev, struct mlx4_en_priv *priv)
     45{
     46	struct mlx4_cmd_mailbox *mailbox;
     47	struct mlx4_set_vlan_fltr_mbox *filter;
     48	int i;
     49	int j;
     50	int index = 0;
     51	u32 entry;
     52	int err = 0;
     53
     54	mailbox = mlx4_alloc_cmd_mailbox(dev);
     55	if (IS_ERR(mailbox))
     56		return PTR_ERR(mailbox);
     57
     58	filter = mailbox->buf;
     59	for (i = VLAN_FLTR_SIZE - 1; i >= 0; i--) {
     60		entry = 0;
     61		for (j = 0; j < 32; j++)
     62			if (test_bit(index++, priv->active_vlans))
     63				entry |= 1 << j;
     64		filter->entry[i] = cpu_to_be32(entry);
     65	}
     66	err = mlx4_cmd(dev, mailbox->dma, priv->port, 0, MLX4_CMD_SET_VLAN_FLTR,
     67		       MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED);
     68	mlx4_free_cmd_mailbox(dev, mailbox);
     69	return err;
     70}
     71
     72int mlx4_en_QUERY_PORT(struct mlx4_en_dev *mdev, u8 port)
     73{
     74	struct mlx4_en_query_port_context *qport_context;
     75	struct mlx4_en_priv *priv = netdev_priv(mdev->pndev[port]);
     76	struct mlx4_en_port_state *state = &priv->port_state;
     77	struct mlx4_cmd_mailbox *mailbox;
     78	int err;
     79
     80	mailbox = mlx4_alloc_cmd_mailbox(mdev->dev);
     81	if (IS_ERR(mailbox))
     82		return PTR_ERR(mailbox);
     83	err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, port, 0,
     84			   MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B,
     85			   MLX4_CMD_WRAPPED);
     86	if (err)
     87		goto out;
     88	qport_context = mailbox->buf;
     89
     90	/* This command is always accessed from Ethtool context
     91	 * already synchronized, no need in locking */
     92	state->link_state = !!(qport_context->link_up & MLX4_EN_LINK_UP_MASK);
     93	switch (qport_context->link_speed & MLX4_EN_SPEED_MASK) {
     94	case MLX4_EN_100M_SPEED:
     95		state->link_speed = SPEED_100;
     96		break;
     97	case MLX4_EN_1G_SPEED:
     98		state->link_speed = SPEED_1000;
     99		break;
    100	case MLX4_EN_10G_SPEED_XAUI:
    101	case MLX4_EN_10G_SPEED_XFI:
    102		state->link_speed = SPEED_10000;
    103		break;
    104	case MLX4_EN_20G_SPEED:
    105		state->link_speed = SPEED_20000;
    106		break;
    107	case MLX4_EN_40G_SPEED:
    108		state->link_speed = SPEED_40000;
    109		break;
    110	case MLX4_EN_56G_SPEED:
    111		state->link_speed = SPEED_56000;
    112		break;
    113	default:
    114		state->link_speed = -1;
    115		break;
    116	}
    117
    118	state->transceiver = qport_context->transceiver;
    119
    120	state->flags = 0; /* Reset and recalculate the port flags */
    121	state->flags |= (qport_context->link_up & MLX4_EN_ANC_MASK) ?
    122		MLX4_EN_PORT_ANC : 0;
    123	state->flags |= (qport_context->autoneg & MLX4_EN_AUTONEG_MASK) ?
    124		MLX4_EN_PORT_ANE : 0;
    125
    126out:
    127	mlx4_free_cmd_mailbox(mdev->dev, mailbox);
    128	return err;
    129}
    130
    131/* Each counter set is located in struct mlx4_en_stat_out_mbox
    132 * with a const offset between its prio components.
    133 * This function runs over a counter set and sum all of it's prio components.
    134 */
    135static unsigned long en_stats_adder(__be64 *start, __be64 *next, int num)
    136{
    137	__be64 *curr = start;
    138	unsigned long ret = 0;
    139	int i;
    140	int offset = next - start;
    141
    142	for (i = 0; i < num; i++) {
    143		ret += be64_to_cpu(*curr);
    144		curr += offset;
    145	}
    146
    147	return ret;
    148}
    149
    150void mlx4_en_fold_software_stats(struct net_device *dev)
    151{
    152	struct mlx4_en_priv *priv = netdev_priv(dev);
    153	struct mlx4_en_dev *mdev = priv->mdev;
    154	unsigned long packets, bytes;
    155	int i;
    156
    157	if (!priv->port_up || mlx4_is_master(mdev->dev))
    158		return;
    159
    160	packets = 0;
    161	bytes = 0;
    162	for (i = 0; i < priv->rx_ring_num; i++) {
    163		const struct mlx4_en_rx_ring *ring = priv->rx_ring[i];
    164
    165		packets += READ_ONCE(ring->packets);
    166		bytes   += READ_ONCE(ring->bytes);
    167	}
    168	dev->stats.rx_packets = packets;
    169	dev->stats.rx_bytes = bytes;
    170
    171	packets = 0;
    172	bytes = 0;
    173	for (i = 0; i < priv->tx_ring_num[TX]; i++) {
    174		const struct mlx4_en_tx_ring *ring = priv->tx_ring[TX][i];
    175
    176		packets += READ_ONCE(ring->packets);
    177		bytes   += READ_ONCE(ring->bytes);
    178	}
    179	dev->stats.tx_packets = packets;
    180	dev->stats.tx_bytes = bytes;
    181}
    182
    183int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
    184{
    185	struct mlx4_counter tmp_counter_stats;
    186	struct mlx4_en_stat_out_mbox *mlx4_en_stats;
    187	struct mlx4_en_stat_out_flow_control_mbox *flowstats;
    188	struct net_device *dev = mdev->pndev[port];
    189	struct mlx4_en_priv *priv = netdev_priv(dev);
    190	struct net_device_stats *stats = &dev->stats;
    191	struct mlx4_cmd_mailbox *mailbox, *mailbox_priority;
    192	u64 in_mod = reset << 8 | port;
    193	int err;
    194	int i, counter_index;
    195	unsigned long sw_tx_dropped = 0;
    196	unsigned long sw_rx_dropped = 0;
    197
    198	mailbox = mlx4_alloc_cmd_mailbox(mdev->dev);
    199	if (IS_ERR(mailbox))
    200		return PTR_ERR(mailbox);
    201
    202	mailbox_priority = mlx4_alloc_cmd_mailbox(mdev->dev);
    203	if (IS_ERR(mailbox_priority)) {
    204		mlx4_free_cmd_mailbox(mdev->dev, mailbox);
    205		return PTR_ERR(mailbox_priority);
    206	}
    207
    208	err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, in_mod, 0,
    209			   MLX4_CMD_DUMP_ETH_STATS, MLX4_CMD_TIME_CLASS_B,
    210			   MLX4_CMD_NATIVE);
    211	if (err)
    212		goto out;
    213
    214	mlx4_en_stats = mailbox->buf;
    215
    216	memset(&tmp_counter_stats, 0, sizeof(tmp_counter_stats));
    217	counter_index = mlx4_get_default_counter_index(mdev->dev, port);
    218	err = mlx4_get_counter_stats(mdev->dev, counter_index,
    219				     &tmp_counter_stats, reset);
    220
    221	/* 0xffs indicates invalid value */
    222	memset(mailbox_priority->buf, 0xff,
    223	       sizeof(*flowstats) * MLX4_NUM_PRIORITIES);
    224
    225	if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FLOWSTATS_EN) {
    226		memset(mailbox_priority->buf, 0,
    227		       sizeof(*flowstats) * MLX4_NUM_PRIORITIES);
    228		err = mlx4_cmd_box(mdev->dev, 0, mailbox_priority->dma,
    229				   in_mod | MLX4_DUMP_ETH_STATS_FLOW_CONTROL,
    230				   0, MLX4_CMD_DUMP_ETH_STATS,
    231				   MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
    232		if (err)
    233			goto out;
    234	}
    235
    236	flowstats = mailbox_priority->buf;
    237
    238	spin_lock_bh(&priv->stats_lock);
    239
    240	mlx4_en_fold_software_stats(dev);
    241
    242	priv->port_stats.rx_chksum_good = 0;
    243	priv->port_stats.rx_chksum_none = 0;
    244	priv->port_stats.rx_chksum_complete = 0;
    245	priv->port_stats.rx_alloc_pages = 0;
    246	priv->xdp_stats.rx_xdp_drop    = 0;
    247	priv->xdp_stats.rx_xdp_redirect = 0;
    248	priv->xdp_stats.rx_xdp_redirect_fail = 0;
    249	priv->xdp_stats.rx_xdp_tx      = 0;
    250	priv->xdp_stats.rx_xdp_tx_full = 0;
    251	for (i = 0; i < priv->rx_ring_num; i++) {
    252		const struct mlx4_en_rx_ring *ring = priv->rx_ring[i];
    253
    254		sw_rx_dropped			+= READ_ONCE(ring->dropped);
    255		priv->port_stats.rx_chksum_good += READ_ONCE(ring->csum_ok);
    256		priv->port_stats.rx_chksum_none += READ_ONCE(ring->csum_none);
    257		priv->port_stats.rx_chksum_complete += READ_ONCE(ring->csum_complete);
    258		priv->port_stats.rx_alloc_pages += READ_ONCE(ring->rx_alloc_pages);
    259		priv->xdp_stats.rx_xdp_drop	+= READ_ONCE(ring->xdp_drop);
    260		priv->xdp_stats.rx_xdp_redirect += READ_ONCE(ring->xdp_redirect);
    261		priv->xdp_stats.rx_xdp_redirect_fail += READ_ONCE(ring->xdp_redirect_fail);
    262		priv->xdp_stats.rx_xdp_tx	+= READ_ONCE(ring->xdp_tx);
    263		priv->xdp_stats.rx_xdp_tx_full	+= READ_ONCE(ring->xdp_tx_full);
    264	}
    265	priv->port_stats.tx_chksum_offload = 0;
    266	priv->port_stats.queue_stopped = 0;
    267	priv->port_stats.wake_queue = 0;
    268	priv->port_stats.tso_packets = 0;
    269	priv->port_stats.xmit_more = 0;
    270
    271	for (i = 0; i < priv->tx_ring_num[TX]; i++) {
    272		const struct mlx4_en_tx_ring *ring = priv->tx_ring[TX][i];
    273
    274		sw_tx_dropped			   += READ_ONCE(ring->tx_dropped);
    275		priv->port_stats.tx_chksum_offload += READ_ONCE(ring->tx_csum);
    276		priv->port_stats.queue_stopped     += READ_ONCE(ring->queue_stopped);
    277		priv->port_stats.wake_queue        += READ_ONCE(ring->wake_queue);
    278		priv->port_stats.tso_packets       += READ_ONCE(ring->tso_packets);
    279		priv->port_stats.xmit_more         += READ_ONCE(ring->xmit_more);
    280	}
    281
    282	if (!mlx4_is_slave(mdev->dev)) {
    283		struct mlx4_en_phy_stats *p_stats = &priv->phy_stats;
    284
    285		p_stats->rx_packets_phy =
    286			en_stats_adder(&mlx4_en_stats->RTOT_prio_0,
    287				       &mlx4_en_stats->RTOT_prio_1,
    288				       NUM_PRIORITIES);
    289		p_stats->tx_packets_phy =
    290			en_stats_adder(&mlx4_en_stats->TTOT_prio_0,
    291				       &mlx4_en_stats->TTOT_prio_1,
    292				       NUM_PRIORITIES);
    293		p_stats->rx_bytes_phy =
    294			en_stats_adder(&mlx4_en_stats->ROCT_prio_0,
    295				       &mlx4_en_stats->ROCT_prio_1,
    296				       NUM_PRIORITIES);
    297		p_stats->tx_bytes_phy =
    298			en_stats_adder(&mlx4_en_stats->TOCT_prio_0,
    299				       &mlx4_en_stats->TOCT_prio_1,
    300				       NUM_PRIORITIES);
    301		if (mlx4_is_master(mdev->dev)) {
    302			stats->rx_packets = p_stats->rx_packets_phy;
    303			stats->tx_packets = p_stats->tx_packets_phy;
    304			stats->rx_bytes = p_stats->rx_bytes_phy;
    305			stats->tx_bytes = p_stats->tx_bytes_phy;
    306		}
    307	}
    308
    309	/* net device stats */
    310	stats->rx_errors = be64_to_cpu(mlx4_en_stats->PCS) +
    311			   be32_to_cpu(mlx4_en_stats->RJBBR) +
    312			   be32_to_cpu(mlx4_en_stats->RCRC) +
    313			   be32_to_cpu(mlx4_en_stats->RRUNT) +
    314			   be64_to_cpu(mlx4_en_stats->RInRangeLengthErr) +
    315			   be64_to_cpu(mlx4_en_stats->ROutRangeLengthErr) +
    316			   be32_to_cpu(mlx4_en_stats->RSHORT) +
    317			   en_stats_adder(&mlx4_en_stats->RGIANT_prio_0,
    318					  &mlx4_en_stats->RGIANT_prio_1,
    319					  NUM_PRIORITIES);
    320	stats->tx_errors = en_stats_adder(&mlx4_en_stats->TGIANT_prio_0,
    321					  &mlx4_en_stats->TGIANT_prio_1,
    322					  NUM_PRIORITIES);
    323	stats->multicast = en_stats_adder(&mlx4_en_stats->MCAST_prio_0,
    324					  &mlx4_en_stats->MCAST_prio_1,
    325					  NUM_PRIORITIES);
    326	stats->rx_dropped = be32_to_cpu(mlx4_en_stats->RDROP) +
    327			    sw_rx_dropped;
    328	stats->rx_length_errors = be32_to_cpu(mlx4_en_stats->RdropLength);
    329	stats->rx_crc_errors = be32_to_cpu(mlx4_en_stats->RCRC);
    330	stats->rx_fifo_errors = be32_to_cpu(mlx4_en_stats->RdropOvflw);
    331	stats->tx_dropped = be32_to_cpu(mlx4_en_stats->TDROP) +
    332			    sw_tx_dropped;
    333
    334	/* RX stats */
    335	priv->pkstats.rx_multicast_packets = stats->multicast;
    336	priv->pkstats.rx_broadcast_packets =
    337			en_stats_adder(&mlx4_en_stats->RBCAST_prio_0,
    338				       &mlx4_en_stats->RBCAST_prio_1,
    339				       NUM_PRIORITIES);
    340	priv->pkstats.rx_jabbers = be32_to_cpu(mlx4_en_stats->RJBBR);
    341	priv->pkstats.rx_in_range_length_error =
    342		be64_to_cpu(mlx4_en_stats->RInRangeLengthErr);
    343	priv->pkstats.rx_out_range_length_error =
    344		be64_to_cpu(mlx4_en_stats->ROutRangeLengthErr);
    345
    346	/* Tx stats */
    347	priv->pkstats.tx_multicast_packets =
    348		en_stats_adder(&mlx4_en_stats->TMCAST_prio_0,
    349			       &mlx4_en_stats->TMCAST_prio_1,
    350			       NUM_PRIORITIES);
    351	priv->pkstats.tx_broadcast_packets =
    352		en_stats_adder(&mlx4_en_stats->TBCAST_prio_0,
    353			       &mlx4_en_stats->TBCAST_prio_1,
    354			       NUM_PRIORITIES);
    355
    356	priv->pkstats.rx_prio[0][0] = be64_to_cpu(mlx4_en_stats->RTOT_prio_0);
    357	priv->pkstats.rx_prio[0][1] = be64_to_cpu(mlx4_en_stats->ROCT_prio_0);
    358	priv->pkstats.rx_prio[1][0] = be64_to_cpu(mlx4_en_stats->RTOT_prio_1);
    359	priv->pkstats.rx_prio[1][1] = be64_to_cpu(mlx4_en_stats->ROCT_prio_1);
    360	priv->pkstats.rx_prio[2][0] = be64_to_cpu(mlx4_en_stats->RTOT_prio_2);
    361	priv->pkstats.rx_prio[2][1] = be64_to_cpu(mlx4_en_stats->ROCT_prio_2);
    362	priv->pkstats.rx_prio[3][0] = be64_to_cpu(mlx4_en_stats->RTOT_prio_3);
    363	priv->pkstats.rx_prio[3][1] = be64_to_cpu(mlx4_en_stats->ROCT_prio_3);
    364	priv->pkstats.rx_prio[4][0] = be64_to_cpu(mlx4_en_stats->RTOT_prio_4);
    365	priv->pkstats.rx_prio[4][1] = be64_to_cpu(mlx4_en_stats->ROCT_prio_4);
    366	priv->pkstats.rx_prio[5][0] = be64_to_cpu(mlx4_en_stats->RTOT_prio_5);
    367	priv->pkstats.rx_prio[5][1] = be64_to_cpu(mlx4_en_stats->ROCT_prio_5);
    368	priv->pkstats.rx_prio[6][0] = be64_to_cpu(mlx4_en_stats->RTOT_prio_6);
    369	priv->pkstats.rx_prio[6][1] = be64_to_cpu(mlx4_en_stats->ROCT_prio_6);
    370	priv->pkstats.rx_prio[7][0] = be64_to_cpu(mlx4_en_stats->RTOT_prio_7);
    371	priv->pkstats.rx_prio[7][1] = be64_to_cpu(mlx4_en_stats->ROCT_prio_7);
    372	priv->pkstats.rx_prio[8][0] = be64_to_cpu(mlx4_en_stats->RTOT_novlan);
    373	priv->pkstats.rx_prio[8][1] = be64_to_cpu(mlx4_en_stats->ROCT_novlan);
    374	priv->pkstats.tx_prio[0][0] = be64_to_cpu(mlx4_en_stats->TTOT_prio_0);
    375	priv->pkstats.tx_prio[0][1] = be64_to_cpu(mlx4_en_stats->TOCT_prio_0);
    376	priv->pkstats.tx_prio[1][0] = be64_to_cpu(mlx4_en_stats->TTOT_prio_1);
    377	priv->pkstats.tx_prio[1][1] = be64_to_cpu(mlx4_en_stats->TOCT_prio_1);
    378	priv->pkstats.tx_prio[2][0] = be64_to_cpu(mlx4_en_stats->TTOT_prio_2);
    379	priv->pkstats.tx_prio[2][1] = be64_to_cpu(mlx4_en_stats->TOCT_prio_2);
    380	priv->pkstats.tx_prio[3][0] = be64_to_cpu(mlx4_en_stats->TTOT_prio_3);
    381	priv->pkstats.tx_prio[3][1] = be64_to_cpu(mlx4_en_stats->TOCT_prio_3);
    382	priv->pkstats.tx_prio[4][0] = be64_to_cpu(mlx4_en_stats->TTOT_prio_4);
    383	priv->pkstats.tx_prio[4][1] = be64_to_cpu(mlx4_en_stats->TOCT_prio_4);
    384	priv->pkstats.tx_prio[5][0] = be64_to_cpu(mlx4_en_stats->TTOT_prio_5);
    385	priv->pkstats.tx_prio[5][1] = be64_to_cpu(mlx4_en_stats->TOCT_prio_5);
    386	priv->pkstats.tx_prio[6][0] = be64_to_cpu(mlx4_en_stats->TTOT_prio_6);
    387	priv->pkstats.tx_prio[6][1] = be64_to_cpu(mlx4_en_stats->TOCT_prio_6);
    388	priv->pkstats.tx_prio[7][0] = be64_to_cpu(mlx4_en_stats->TTOT_prio_7);
    389	priv->pkstats.tx_prio[7][1] = be64_to_cpu(mlx4_en_stats->TOCT_prio_7);
    390	priv->pkstats.tx_prio[8][0] = be64_to_cpu(mlx4_en_stats->TTOT_novlan);
    391	priv->pkstats.tx_prio[8][1] = be64_to_cpu(mlx4_en_stats->TOCT_novlan);
    392
    393	if (tmp_counter_stats.counter_mode == 0) {
    394		priv->pf_stats.rx_bytes   = be64_to_cpu(tmp_counter_stats.rx_bytes);
    395		priv->pf_stats.tx_bytes   = be64_to_cpu(tmp_counter_stats.tx_bytes);
    396		priv->pf_stats.rx_packets = be64_to_cpu(tmp_counter_stats.rx_frames);
    397		priv->pf_stats.tx_packets = be64_to_cpu(tmp_counter_stats.tx_frames);
    398	}
    399
    400	for (i = 0; i < MLX4_NUM_PRIORITIES; i++)	{
    401		priv->rx_priority_flowstats[i].rx_pause =
    402			be64_to_cpu(flowstats[i].rx_pause);
    403		priv->rx_priority_flowstats[i].rx_pause_duration =
    404			be64_to_cpu(flowstats[i].rx_pause_duration);
    405		priv->rx_priority_flowstats[i].rx_pause_transition =
    406			be64_to_cpu(flowstats[i].rx_pause_transition);
    407		priv->tx_priority_flowstats[i].tx_pause =
    408			be64_to_cpu(flowstats[i].tx_pause);
    409		priv->tx_priority_flowstats[i].tx_pause_duration =
    410			be64_to_cpu(flowstats[i].tx_pause_duration);
    411		priv->tx_priority_flowstats[i].tx_pause_transition =
    412			be64_to_cpu(flowstats[i].tx_pause_transition);
    413	}
    414
    415	/* if pfc is not in use, all priorities counters have the same value */
    416	priv->rx_flowstats.rx_pause =
    417		be64_to_cpu(flowstats[0].rx_pause);
    418	priv->rx_flowstats.rx_pause_duration =
    419		be64_to_cpu(flowstats[0].rx_pause_duration);
    420	priv->rx_flowstats.rx_pause_transition =
    421		be64_to_cpu(flowstats[0].rx_pause_transition);
    422	priv->tx_flowstats.tx_pause =
    423		be64_to_cpu(flowstats[0].tx_pause);
    424	priv->tx_flowstats.tx_pause_duration =
    425		be64_to_cpu(flowstats[0].tx_pause_duration);
    426	priv->tx_flowstats.tx_pause_transition =
    427		be64_to_cpu(flowstats[0].tx_pause_transition);
    428
    429	spin_unlock_bh(&priv->stats_lock);
    430
    431out:
    432	mlx4_free_cmd_mailbox(mdev->dev, mailbox);
    433	mlx4_free_cmd_mailbox(mdev->dev, mailbox_priority);
    434	return err;
    435}
    436