cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

cmd.c (63820B)


      1/*
      2 * Copyright (c) 2013-2016, Mellanox Technologies. All rights reserved.
      3 *
      4 * This software is available to you under a choice of one of two
      5 * licenses.  You may choose to be licensed under the terms of the GNU
      6 * General Public License (GPL) Version 2, available from the file
      7 * COPYING in the main directory of this source tree, or the
      8 * OpenIB.org BSD license below:
      9 *
     10 *     Redistribution and use in source and binary forms, with or
     11 *     without modification, are permitted provided that the following
     12 *     conditions are met:
     13 *
     14 *      - Redistributions of source code must retain the above
     15 *        copyright notice, this list of conditions and the following
     16 *        disclaimer.
     17 *
     18 *      - Redistributions in binary form must reproduce the above
     19 *        copyright notice, this list of conditions and the following
     20 *        disclaimer in the documentation and/or other materials
     21 *        provided with the distribution.
     22 *
     23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
     24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
     25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
     26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
     27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
     28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
     29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
     30 * SOFTWARE.
     31 */
     32
     33#include <linux/highmem.h>
     34#include <linux/errno.h>
     35#include <linux/pci.h>
     36#include <linux/dma-mapping.h>
     37#include <linux/slab.h>
     38#include <linux/delay.h>
     39#include <linux/random.h>
     40#include <linux/io-mapping.h>
     41#include <linux/mlx5/driver.h>
     42#include <linux/mlx5/eq.h>
     43#include <linux/debugfs.h>
     44
     45#include "mlx5_core.h"
     46#include "lib/eq.h"
     47#include "lib/tout.h"
     48
     49enum {
     50	CMD_IF_REV = 5,
     51};
     52
     53enum {
     54	CMD_MODE_POLLING,
     55	CMD_MODE_EVENTS
     56};
     57
     58enum {
     59	MLX5_CMD_DELIVERY_STAT_OK			= 0x0,
     60	MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR		= 0x1,
     61	MLX5_CMD_DELIVERY_STAT_TOK_ERR			= 0x2,
     62	MLX5_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR		= 0x3,
     63	MLX5_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR	= 0x4,
     64	MLX5_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR		= 0x5,
     65	MLX5_CMD_DELIVERY_STAT_FW_ERR			= 0x6,
     66	MLX5_CMD_DELIVERY_STAT_IN_LENGTH_ERR		= 0x7,
     67	MLX5_CMD_DELIVERY_STAT_OUT_LENGTH_ERR		= 0x8,
     68	MLX5_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR	= 0x9,
     69	MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR		= 0x10,
     70};
     71
     72static struct mlx5_cmd_work_ent *
     73cmd_alloc_ent(struct mlx5_cmd *cmd, struct mlx5_cmd_msg *in,
     74	      struct mlx5_cmd_msg *out, void *uout, int uout_size,
     75	      mlx5_cmd_cbk_t cbk, void *context, int page_queue)
     76{
     77	gfp_t alloc_flags = cbk ? GFP_ATOMIC : GFP_KERNEL;
     78	struct mlx5_cmd_work_ent *ent;
     79
     80	ent = kzalloc(sizeof(*ent), alloc_flags);
     81	if (!ent)
     82		return ERR_PTR(-ENOMEM);
     83
     84	ent->idx	= -EINVAL;
     85	ent->in		= in;
     86	ent->out	= out;
     87	ent->uout	= uout;
     88	ent->uout_size	= uout_size;
     89	ent->callback	= cbk;
     90	ent->context	= context;
     91	ent->cmd	= cmd;
     92	ent->page_queue = page_queue;
     93	refcount_set(&ent->refcnt, 1);
     94
     95	return ent;
     96}
     97
     98static void cmd_free_ent(struct mlx5_cmd_work_ent *ent)
     99{
    100	kfree(ent);
    101}
    102
    103static u8 alloc_token(struct mlx5_cmd *cmd)
    104{
    105	u8 token;
    106
    107	spin_lock(&cmd->token_lock);
    108	cmd->token++;
    109	if (cmd->token == 0)
    110		cmd->token++;
    111	token = cmd->token;
    112	spin_unlock(&cmd->token_lock);
    113
    114	return token;
    115}
    116
    117static int cmd_alloc_index(struct mlx5_cmd *cmd)
    118{
    119	unsigned long flags;
    120	int ret;
    121
    122	spin_lock_irqsave(&cmd->alloc_lock, flags);
    123	ret = find_first_bit(&cmd->bitmask, cmd->max_reg_cmds);
    124	if (ret < cmd->max_reg_cmds)
    125		clear_bit(ret, &cmd->bitmask);
    126	spin_unlock_irqrestore(&cmd->alloc_lock, flags);
    127
    128	return ret < cmd->max_reg_cmds ? ret : -ENOMEM;
    129}
    130
    131static void cmd_free_index(struct mlx5_cmd *cmd, int idx)
    132{
    133	lockdep_assert_held(&cmd->alloc_lock);
    134	set_bit(idx, &cmd->bitmask);
    135}
    136
    137static void cmd_ent_get(struct mlx5_cmd_work_ent *ent)
    138{
    139	refcount_inc(&ent->refcnt);
    140}
    141
    142static void cmd_ent_put(struct mlx5_cmd_work_ent *ent)
    143{
    144	struct mlx5_cmd *cmd = ent->cmd;
    145	unsigned long flags;
    146
    147	spin_lock_irqsave(&cmd->alloc_lock, flags);
    148	if (!refcount_dec_and_test(&ent->refcnt))
    149		goto out;
    150
    151	if (ent->idx >= 0) {
    152		cmd_free_index(cmd, ent->idx);
    153		up(ent->page_queue ? &cmd->pages_sem : &cmd->sem);
    154	}
    155
    156	cmd_free_ent(ent);
    157out:
    158	spin_unlock_irqrestore(&cmd->alloc_lock, flags);
    159}
    160
    161static struct mlx5_cmd_layout *get_inst(struct mlx5_cmd *cmd, int idx)
    162{
    163	return cmd->cmd_buf + (idx << cmd->log_stride);
    164}
    165
    166static int mlx5_calc_cmd_blocks(struct mlx5_cmd_msg *msg)
    167{
    168	int size = msg->len;
    169	int blen = size - min_t(int, sizeof(msg->first.data), size);
    170
    171	return DIV_ROUND_UP(blen, MLX5_CMD_DATA_BLOCK_SIZE);
    172}
    173
    174static u8 xor8_buf(void *buf, size_t offset, int len)
    175{
    176	u8 *ptr = buf;
    177	u8 sum = 0;
    178	int i;
    179	int end = len + offset;
    180
    181	for (i = offset; i < end; i++)
    182		sum ^= ptr[i];
    183
    184	return sum;
    185}
    186
    187static int verify_block_sig(struct mlx5_cmd_prot_block *block)
    188{
    189	size_t rsvd0_off = offsetof(struct mlx5_cmd_prot_block, rsvd0);
    190	int xor_len = sizeof(*block) - sizeof(block->data) - 1;
    191
    192	if (xor8_buf(block, rsvd0_off, xor_len) != 0xff)
    193		return -EHWPOISON;
    194
    195	if (xor8_buf(block, 0, sizeof(*block)) != 0xff)
    196		return -EHWPOISON;
    197
    198	return 0;
    199}
    200
    201static void calc_block_sig(struct mlx5_cmd_prot_block *block)
    202{
    203	int ctrl_xor_len = sizeof(*block) - sizeof(block->data) - 2;
    204	size_t rsvd0_off = offsetof(struct mlx5_cmd_prot_block, rsvd0);
    205
    206	block->ctrl_sig = ~xor8_buf(block, rsvd0_off, ctrl_xor_len);
    207	block->sig = ~xor8_buf(block, 0, sizeof(*block) - 1);
    208}
    209
    210static void calc_chain_sig(struct mlx5_cmd_msg *msg)
    211{
    212	struct mlx5_cmd_mailbox *next = msg->next;
    213	int n = mlx5_calc_cmd_blocks(msg);
    214	int i = 0;
    215
    216	for (i = 0; i < n && next; i++)  {
    217		calc_block_sig(next->buf);
    218		next = next->next;
    219	}
    220}
    221
    222static void set_signature(struct mlx5_cmd_work_ent *ent, int csum)
    223{
    224	ent->lay->sig = ~xor8_buf(ent->lay, 0,  sizeof(*ent->lay));
    225	if (csum) {
    226		calc_chain_sig(ent->in);
    227		calc_chain_sig(ent->out);
    228	}
    229}
    230
    231static void poll_timeout(struct mlx5_cmd_work_ent *ent)
    232{
    233	struct mlx5_core_dev *dev = container_of(ent->cmd, struct mlx5_core_dev, cmd);
    234	u64 cmd_to_ms = mlx5_tout_ms(dev, CMD);
    235	unsigned long poll_end;
    236	u8 own;
    237
    238	poll_end = jiffies + msecs_to_jiffies(cmd_to_ms + 1000);
    239
    240	do {
    241		own = READ_ONCE(ent->lay->status_own);
    242		if (!(own & CMD_OWNER_HW)) {
    243			ent->ret = 0;
    244			return;
    245		}
    246		cond_resched();
    247	} while (time_before(jiffies, poll_end));
    248
    249	ent->ret = -ETIMEDOUT;
    250}
    251
    252static int verify_signature(struct mlx5_cmd_work_ent *ent)
    253{
    254	struct mlx5_cmd_mailbox *next = ent->out->next;
    255	int n = mlx5_calc_cmd_blocks(ent->out);
    256	int err;
    257	u8 sig;
    258	int i = 0;
    259
    260	sig = xor8_buf(ent->lay, 0, sizeof(*ent->lay));
    261	if (sig != 0xff)
    262		return -EHWPOISON;
    263
    264	for (i = 0; i < n && next; i++) {
    265		err = verify_block_sig(next->buf);
    266		if (err)
    267			return -EHWPOISON;
    268
    269		next = next->next;
    270	}
    271
    272	return 0;
    273}
    274
    275static void dump_buf(void *buf, int size, int data_only, int offset, int idx)
    276{
    277	__be32 *p = buf;
    278	int i;
    279
    280	for (i = 0; i < size; i += 16) {
    281		pr_debug("cmd[%d]: %03x: %08x %08x %08x %08x\n", idx, offset,
    282			 be32_to_cpu(p[0]), be32_to_cpu(p[1]),
    283			 be32_to_cpu(p[2]), be32_to_cpu(p[3]));
    284		p += 4;
    285		offset += 16;
    286	}
    287	if (!data_only)
    288		pr_debug("\n");
    289}
    290
    291static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
    292				       u32 *synd, u8 *status)
    293{
    294	*synd = 0;
    295	*status = 0;
    296
    297	switch (op) {
    298	case MLX5_CMD_OP_TEARDOWN_HCA:
    299	case MLX5_CMD_OP_DISABLE_HCA:
    300	case MLX5_CMD_OP_MANAGE_PAGES:
    301	case MLX5_CMD_OP_DESTROY_MKEY:
    302	case MLX5_CMD_OP_DESTROY_EQ:
    303	case MLX5_CMD_OP_DESTROY_CQ:
    304	case MLX5_CMD_OP_DESTROY_QP:
    305	case MLX5_CMD_OP_DESTROY_PSV:
    306	case MLX5_CMD_OP_DESTROY_SRQ:
    307	case MLX5_CMD_OP_DESTROY_XRC_SRQ:
    308	case MLX5_CMD_OP_DESTROY_XRQ:
    309	case MLX5_CMD_OP_DESTROY_DCT:
    310	case MLX5_CMD_OP_DEALLOC_Q_COUNTER:
    311	case MLX5_CMD_OP_DESTROY_SCHEDULING_ELEMENT:
    312	case MLX5_CMD_OP_DESTROY_QOS_PARA_VPORT:
    313	case MLX5_CMD_OP_DEALLOC_PD:
    314	case MLX5_CMD_OP_DEALLOC_UAR:
    315	case MLX5_CMD_OP_DETACH_FROM_MCG:
    316	case MLX5_CMD_OP_DEALLOC_XRCD:
    317	case MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN:
    318	case MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT:
    319	case MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY:
    320	case MLX5_CMD_OP_DESTROY_LAG:
    321	case MLX5_CMD_OP_DESTROY_VPORT_LAG:
    322	case MLX5_CMD_OP_DESTROY_TIR:
    323	case MLX5_CMD_OP_DESTROY_SQ:
    324	case MLX5_CMD_OP_DESTROY_RQ:
    325	case MLX5_CMD_OP_DESTROY_RMP:
    326	case MLX5_CMD_OP_DESTROY_TIS:
    327	case MLX5_CMD_OP_DESTROY_RQT:
    328	case MLX5_CMD_OP_DESTROY_FLOW_TABLE:
    329	case MLX5_CMD_OP_DESTROY_FLOW_GROUP:
    330	case MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY:
    331	case MLX5_CMD_OP_DEALLOC_FLOW_COUNTER:
    332	case MLX5_CMD_OP_2ERR_QP:
    333	case MLX5_CMD_OP_2RST_QP:
    334	case MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT:
    335	case MLX5_CMD_OP_MODIFY_FLOW_TABLE:
    336	case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
    337	case MLX5_CMD_OP_SET_FLOW_TABLE_ROOT:
    338	case MLX5_CMD_OP_DEALLOC_PACKET_REFORMAT_CONTEXT:
    339	case MLX5_CMD_OP_DEALLOC_MODIFY_HEADER_CONTEXT:
    340	case MLX5_CMD_OP_FPGA_DESTROY_QP:
    341	case MLX5_CMD_OP_DESTROY_GENERAL_OBJECT:
    342	case MLX5_CMD_OP_DEALLOC_MEMIC:
    343	case MLX5_CMD_OP_PAGE_FAULT_RESUME:
    344	case MLX5_CMD_OP_QUERY_ESW_FUNCTIONS:
    345	case MLX5_CMD_OP_DEALLOC_SF:
    346	case MLX5_CMD_OP_DESTROY_UCTX:
    347	case MLX5_CMD_OP_DESTROY_UMEM:
    348	case MLX5_CMD_OP_MODIFY_RQT:
    349		return MLX5_CMD_STAT_OK;
    350
    351	case MLX5_CMD_OP_QUERY_HCA_CAP:
    352	case MLX5_CMD_OP_QUERY_ADAPTER:
    353	case MLX5_CMD_OP_INIT_HCA:
    354	case MLX5_CMD_OP_ENABLE_HCA:
    355	case MLX5_CMD_OP_QUERY_PAGES:
    356	case MLX5_CMD_OP_SET_HCA_CAP:
    357	case MLX5_CMD_OP_QUERY_ISSI:
    358	case MLX5_CMD_OP_SET_ISSI:
    359	case MLX5_CMD_OP_CREATE_MKEY:
    360	case MLX5_CMD_OP_QUERY_MKEY:
    361	case MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS:
    362	case MLX5_CMD_OP_CREATE_EQ:
    363	case MLX5_CMD_OP_QUERY_EQ:
    364	case MLX5_CMD_OP_GEN_EQE:
    365	case MLX5_CMD_OP_CREATE_CQ:
    366	case MLX5_CMD_OP_QUERY_CQ:
    367	case MLX5_CMD_OP_MODIFY_CQ:
    368	case MLX5_CMD_OP_CREATE_QP:
    369	case MLX5_CMD_OP_RST2INIT_QP:
    370	case MLX5_CMD_OP_INIT2RTR_QP:
    371	case MLX5_CMD_OP_RTR2RTS_QP:
    372	case MLX5_CMD_OP_RTS2RTS_QP:
    373	case MLX5_CMD_OP_SQERR2RTS_QP:
    374	case MLX5_CMD_OP_QUERY_QP:
    375	case MLX5_CMD_OP_SQD_RTS_QP:
    376	case MLX5_CMD_OP_INIT2INIT_QP:
    377	case MLX5_CMD_OP_CREATE_PSV:
    378	case MLX5_CMD_OP_CREATE_SRQ:
    379	case MLX5_CMD_OP_QUERY_SRQ:
    380	case MLX5_CMD_OP_ARM_RQ:
    381	case MLX5_CMD_OP_CREATE_XRC_SRQ:
    382	case MLX5_CMD_OP_QUERY_XRC_SRQ:
    383	case MLX5_CMD_OP_ARM_XRC_SRQ:
    384	case MLX5_CMD_OP_CREATE_XRQ:
    385	case MLX5_CMD_OP_QUERY_XRQ:
    386	case MLX5_CMD_OP_ARM_XRQ:
    387	case MLX5_CMD_OP_CREATE_DCT:
    388	case MLX5_CMD_OP_DRAIN_DCT:
    389	case MLX5_CMD_OP_QUERY_DCT:
    390	case MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION:
    391	case MLX5_CMD_OP_QUERY_VPORT_STATE:
    392	case MLX5_CMD_OP_MODIFY_VPORT_STATE:
    393	case MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT:
    394	case MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT:
    395	case MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT:
    396	case MLX5_CMD_OP_QUERY_ROCE_ADDRESS:
    397	case MLX5_CMD_OP_SET_ROCE_ADDRESS:
    398	case MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT:
    399	case MLX5_CMD_OP_MODIFY_HCA_VPORT_CONTEXT:
    400	case MLX5_CMD_OP_QUERY_HCA_VPORT_GID:
    401	case MLX5_CMD_OP_QUERY_HCA_VPORT_PKEY:
    402	case MLX5_CMD_OP_QUERY_VNIC_ENV:
    403	case MLX5_CMD_OP_QUERY_VPORT_COUNTER:
    404	case MLX5_CMD_OP_ALLOC_Q_COUNTER:
    405	case MLX5_CMD_OP_QUERY_Q_COUNTER:
    406	case MLX5_CMD_OP_SET_MONITOR_COUNTER:
    407	case MLX5_CMD_OP_ARM_MONITOR_COUNTER:
    408	case MLX5_CMD_OP_SET_PP_RATE_LIMIT:
    409	case MLX5_CMD_OP_QUERY_RATE_LIMIT:
    410	case MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT:
    411	case MLX5_CMD_OP_QUERY_SCHEDULING_ELEMENT:
    412	case MLX5_CMD_OP_MODIFY_SCHEDULING_ELEMENT:
    413	case MLX5_CMD_OP_CREATE_QOS_PARA_VPORT:
    414	case MLX5_CMD_OP_ALLOC_PD:
    415	case MLX5_CMD_OP_ALLOC_UAR:
    416	case MLX5_CMD_OP_CONFIG_INT_MODERATION:
    417	case MLX5_CMD_OP_ACCESS_REG:
    418	case MLX5_CMD_OP_ATTACH_TO_MCG:
    419	case MLX5_CMD_OP_GET_DROPPED_PACKET_LOG:
    420	case MLX5_CMD_OP_MAD_IFC:
    421	case MLX5_CMD_OP_QUERY_MAD_DEMUX:
    422	case MLX5_CMD_OP_SET_MAD_DEMUX:
    423	case MLX5_CMD_OP_NOP:
    424	case MLX5_CMD_OP_ALLOC_XRCD:
    425	case MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN:
    426	case MLX5_CMD_OP_QUERY_CONG_STATUS:
    427	case MLX5_CMD_OP_MODIFY_CONG_STATUS:
    428	case MLX5_CMD_OP_QUERY_CONG_PARAMS:
    429	case MLX5_CMD_OP_MODIFY_CONG_PARAMS:
    430	case MLX5_CMD_OP_QUERY_CONG_STATISTICS:
    431	case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
    432	case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
    433	case MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY:
    434	case MLX5_CMD_OP_CREATE_LAG:
    435	case MLX5_CMD_OP_MODIFY_LAG:
    436	case MLX5_CMD_OP_QUERY_LAG:
    437	case MLX5_CMD_OP_CREATE_VPORT_LAG:
    438	case MLX5_CMD_OP_CREATE_TIR:
    439	case MLX5_CMD_OP_MODIFY_TIR:
    440	case MLX5_CMD_OP_QUERY_TIR:
    441	case MLX5_CMD_OP_CREATE_SQ:
    442	case MLX5_CMD_OP_MODIFY_SQ:
    443	case MLX5_CMD_OP_QUERY_SQ:
    444	case MLX5_CMD_OP_CREATE_RQ:
    445	case MLX5_CMD_OP_MODIFY_RQ:
    446	case MLX5_CMD_OP_QUERY_RQ:
    447	case MLX5_CMD_OP_CREATE_RMP:
    448	case MLX5_CMD_OP_MODIFY_RMP:
    449	case MLX5_CMD_OP_QUERY_RMP:
    450	case MLX5_CMD_OP_CREATE_TIS:
    451	case MLX5_CMD_OP_MODIFY_TIS:
    452	case MLX5_CMD_OP_QUERY_TIS:
    453	case MLX5_CMD_OP_CREATE_RQT:
    454	case MLX5_CMD_OP_QUERY_RQT:
    455
    456	case MLX5_CMD_OP_CREATE_FLOW_TABLE:
    457	case MLX5_CMD_OP_QUERY_FLOW_TABLE:
    458	case MLX5_CMD_OP_CREATE_FLOW_GROUP:
    459	case MLX5_CMD_OP_QUERY_FLOW_GROUP:
    460	case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY:
    461	case MLX5_CMD_OP_ALLOC_FLOW_COUNTER:
    462	case MLX5_CMD_OP_QUERY_FLOW_COUNTER:
    463	case MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT:
    464	case MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT:
    465	case MLX5_CMD_OP_FPGA_CREATE_QP:
    466	case MLX5_CMD_OP_FPGA_MODIFY_QP:
    467	case MLX5_CMD_OP_FPGA_QUERY_QP:
    468	case MLX5_CMD_OP_FPGA_QUERY_QP_COUNTERS:
    469	case MLX5_CMD_OP_CREATE_GENERAL_OBJECT:
    470	case MLX5_CMD_OP_MODIFY_GENERAL_OBJECT:
    471	case MLX5_CMD_OP_QUERY_GENERAL_OBJECT:
    472	case MLX5_CMD_OP_CREATE_UCTX:
    473	case MLX5_CMD_OP_CREATE_UMEM:
    474	case MLX5_CMD_OP_ALLOC_MEMIC:
    475	case MLX5_CMD_OP_MODIFY_XRQ:
    476	case MLX5_CMD_OP_RELEASE_XRQ_ERROR:
    477	case MLX5_CMD_OP_QUERY_VHCA_STATE:
    478	case MLX5_CMD_OP_MODIFY_VHCA_STATE:
    479	case MLX5_CMD_OP_ALLOC_SF:
    480	case MLX5_CMD_OP_SUSPEND_VHCA:
    481	case MLX5_CMD_OP_RESUME_VHCA:
    482	case MLX5_CMD_OP_QUERY_VHCA_MIGRATION_STATE:
    483	case MLX5_CMD_OP_SAVE_VHCA_STATE:
    484	case MLX5_CMD_OP_LOAD_VHCA_STATE:
    485		*status = MLX5_DRIVER_STATUS_ABORTED;
    486		*synd = MLX5_DRIVER_SYND;
    487		return -ENOLINK;
    488	default:
    489		mlx5_core_err(dev, "Unknown FW command (%d)\n", op);
    490		return -EINVAL;
    491	}
    492}
    493
    494const char *mlx5_command_str(int command)
    495{
    496#define MLX5_COMMAND_STR_CASE(__cmd) case MLX5_CMD_OP_ ## __cmd: return #__cmd
    497
    498	switch (command) {
    499	MLX5_COMMAND_STR_CASE(QUERY_HCA_CAP);
    500	MLX5_COMMAND_STR_CASE(QUERY_ADAPTER);
    501	MLX5_COMMAND_STR_CASE(INIT_HCA);
    502	MLX5_COMMAND_STR_CASE(TEARDOWN_HCA);
    503	MLX5_COMMAND_STR_CASE(ENABLE_HCA);
    504	MLX5_COMMAND_STR_CASE(DISABLE_HCA);
    505	MLX5_COMMAND_STR_CASE(QUERY_PAGES);
    506	MLX5_COMMAND_STR_CASE(MANAGE_PAGES);
    507	MLX5_COMMAND_STR_CASE(SET_HCA_CAP);
    508	MLX5_COMMAND_STR_CASE(QUERY_ISSI);
    509	MLX5_COMMAND_STR_CASE(SET_ISSI);
    510	MLX5_COMMAND_STR_CASE(SET_DRIVER_VERSION);
    511	MLX5_COMMAND_STR_CASE(CREATE_MKEY);
    512	MLX5_COMMAND_STR_CASE(QUERY_MKEY);
    513	MLX5_COMMAND_STR_CASE(DESTROY_MKEY);
    514	MLX5_COMMAND_STR_CASE(QUERY_SPECIAL_CONTEXTS);
    515	MLX5_COMMAND_STR_CASE(PAGE_FAULT_RESUME);
    516	MLX5_COMMAND_STR_CASE(CREATE_EQ);
    517	MLX5_COMMAND_STR_CASE(DESTROY_EQ);
    518	MLX5_COMMAND_STR_CASE(QUERY_EQ);
    519	MLX5_COMMAND_STR_CASE(GEN_EQE);
    520	MLX5_COMMAND_STR_CASE(CREATE_CQ);
    521	MLX5_COMMAND_STR_CASE(DESTROY_CQ);
    522	MLX5_COMMAND_STR_CASE(QUERY_CQ);
    523	MLX5_COMMAND_STR_CASE(MODIFY_CQ);
    524	MLX5_COMMAND_STR_CASE(CREATE_QP);
    525	MLX5_COMMAND_STR_CASE(DESTROY_QP);
    526	MLX5_COMMAND_STR_CASE(RST2INIT_QP);
    527	MLX5_COMMAND_STR_CASE(INIT2RTR_QP);
    528	MLX5_COMMAND_STR_CASE(RTR2RTS_QP);
    529	MLX5_COMMAND_STR_CASE(RTS2RTS_QP);
    530	MLX5_COMMAND_STR_CASE(SQERR2RTS_QP);
    531	MLX5_COMMAND_STR_CASE(2ERR_QP);
    532	MLX5_COMMAND_STR_CASE(2RST_QP);
    533	MLX5_COMMAND_STR_CASE(QUERY_QP);
    534	MLX5_COMMAND_STR_CASE(SQD_RTS_QP);
    535	MLX5_COMMAND_STR_CASE(INIT2INIT_QP);
    536	MLX5_COMMAND_STR_CASE(CREATE_PSV);
    537	MLX5_COMMAND_STR_CASE(DESTROY_PSV);
    538	MLX5_COMMAND_STR_CASE(CREATE_SRQ);
    539	MLX5_COMMAND_STR_CASE(DESTROY_SRQ);
    540	MLX5_COMMAND_STR_CASE(QUERY_SRQ);
    541	MLX5_COMMAND_STR_CASE(ARM_RQ);
    542	MLX5_COMMAND_STR_CASE(CREATE_XRC_SRQ);
    543	MLX5_COMMAND_STR_CASE(DESTROY_XRC_SRQ);
    544	MLX5_COMMAND_STR_CASE(QUERY_XRC_SRQ);
    545	MLX5_COMMAND_STR_CASE(ARM_XRC_SRQ);
    546	MLX5_COMMAND_STR_CASE(CREATE_DCT);
    547	MLX5_COMMAND_STR_CASE(DESTROY_DCT);
    548	MLX5_COMMAND_STR_CASE(DRAIN_DCT);
    549	MLX5_COMMAND_STR_CASE(QUERY_DCT);
    550	MLX5_COMMAND_STR_CASE(ARM_DCT_FOR_KEY_VIOLATION);
    551	MLX5_COMMAND_STR_CASE(QUERY_VPORT_STATE);
    552	MLX5_COMMAND_STR_CASE(MODIFY_VPORT_STATE);
    553	MLX5_COMMAND_STR_CASE(QUERY_ESW_VPORT_CONTEXT);
    554	MLX5_COMMAND_STR_CASE(MODIFY_ESW_VPORT_CONTEXT);
    555	MLX5_COMMAND_STR_CASE(QUERY_NIC_VPORT_CONTEXT);
    556	MLX5_COMMAND_STR_CASE(MODIFY_NIC_VPORT_CONTEXT);
    557	MLX5_COMMAND_STR_CASE(QUERY_ROCE_ADDRESS);
    558	MLX5_COMMAND_STR_CASE(SET_ROCE_ADDRESS);
    559	MLX5_COMMAND_STR_CASE(QUERY_HCA_VPORT_CONTEXT);
    560	MLX5_COMMAND_STR_CASE(MODIFY_HCA_VPORT_CONTEXT);
    561	MLX5_COMMAND_STR_CASE(QUERY_HCA_VPORT_GID);
    562	MLX5_COMMAND_STR_CASE(QUERY_HCA_VPORT_PKEY);
    563	MLX5_COMMAND_STR_CASE(QUERY_VNIC_ENV);
    564	MLX5_COMMAND_STR_CASE(QUERY_VPORT_COUNTER);
    565	MLX5_COMMAND_STR_CASE(ALLOC_Q_COUNTER);
    566	MLX5_COMMAND_STR_CASE(DEALLOC_Q_COUNTER);
    567	MLX5_COMMAND_STR_CASE(QUERY_Q_COUNTER);
    568	MLX5_COMMAND_STR_CASE(SET_MONITOR_COUNTER);
    569	MLX5_COMMAND_STR_CASE(ARM_MONITOR_COUNTER);
    570	MLX5_COMMAND_STR_CASE(SET_PP_RATE_LIMIT);
    571	MLX5_COMMAND_STR_CASE(QUERY_RATE_LIMIT);
    572	MLX5_COMMAND_STR_CASE(CREATE_SCHEDULING_ELEMENT);
    573	MLX5_COMMAND_STR_CASE(DESTROY_SCHEDULING_ELEMENT);
    574	MLX5_COMMAND_STR_CASE(QUERY_SCHEDULING_ELEMENT);
    575	MLX5_COMMAND_STR_CASE(MODIFY_SCHEDULING_ELEMENT);
    576	MLX5_COMMAND_STR_CASE(CREATE_QOS_PARA_VPORT);
    577	MLX5_COMMAND_STR_CASE(DESTROY_QOS_PARA_VPORT);
    578	MLX5_COMMAND_STR_CASE(ALLOC_PD);
    579	MLX5_COMMAND_STR_CASE(DEALLOC_PD);
    580	MLX5_COMMAND_STR_CASE(ALLOC_UAR);
    581	MLX5_COMMAND_STR_CASE(DEALLOC_UAR);
    582	MLX5_COMMAND_STR_CASE(CONFIG_INT_MODERATION);
    583	MLX5_COMMAND_STR_CASE(ACCESS_REG);
    584	MLX5_COMMAND_STR_CASE(ATTACH_TO_MCG);
    585	MLX5_COMMAND_STR_CASE(DETACH_FROM_MCG);
    586	MLX5_COMMAND_STR_CASE(GET_DROPPED_PACKET_LOG);
    587	MLX5_COMMAND_STR_CASE(MAD_IFC);
    588	MLX5_COMMAND_STR_CASE(QUERY_MAD_DEMUX);
    589	MLX5_COMMAND_STR_CASE(SET_MAD_DEMUX);
    590	MLX5_COMMAND_STR_CASE(NOP);
    591	MLX5_COMMAND_STR_CASE(ALLOC_XRCD);
    592	MLX5_COMMAND_STR_CASE(DEALLOC_XRCD);
    593	MLX5_COMMAND_STR_CASE(ALLOC_TRANSPORT_DOMAIN);
    594	MLX5_COMMAND_STR_CASE(DEALLOC_TRANSPORT_DOMAIN);
    595	MLX5_COMMAND_STR_CASE(QUERY_CONG_STATUS);
    596	MLX5_COMMAND_STR_CASE(MODIFY_CONG_STATUS);
    597	MLX5_COMMAND_STR_CASE(QUERY_CONG_PARAMS);
    598	MLX5_COMMAND_STR_CASE(MODIFY_CONG_PARAMS);
    599	MLX5_COMMAND_STR_CASE(QUERY_CONG_STATISTICS);
    600	MLX5_COMMAND_STR_CASE(ADD_VXLAN_UDP_DPORT);
    601	MLX5_COMMAND_STR_CASE(DELETE_VXLAN_UDP_DPORT);
    602	MLX5_COMMAND_STR_CASE(SET_L2_TABLE_ENTRY);
    603	MLX5_COMMAND_STR_CASE(QUERY_L2_TABLE_ENTRY);
    604	MLX5_COMMAND_STR_CASE(DELETE_L2_TABLE_ENTRY);
    605	MLX5_COMMAND_STR_CASE(SET_WOL_ROL);
    606	MLX5_COMMAND_STR_CASE(QUERY_WOL_ROL);
    607	MLX5_COMMAND_STR_CASE(CREATE_LAG);
    608	MLX5_COMMAND_STR_CASE(MODIFY_LAG);
    609	MLX5_COMMAND_STR_CASE(QUERY_LAG);
    610	MLX5_COMMAND_STR_CASE(DESTROY_LAG);
    611	MLX5_COMMAND_STR_CASE(CREATE_VPORT_LAG);
    612	MLX5_COMMAND_STR_CASE(DESTROY_VPORT_LAG);
    613	MLX5_COMMAND_STR_CASE(CREATE_TIR);
    614	MLX5_COMMAND_STR_CASE(MODIFY_TIR);
    615	MLX5_COMMAND_STR_CASE(DESTROY_TIR);
    616	MLX5_COMMAND_STR_CASE(QUERY_TIR);
    617	MLX5_COMMAND_STR_CASE(CREATE_SQ);
    618	MLX5_COMMAND_STR_CASE(MODIFY_SQ);
    619	MLX5_COMMAND_STR_CASE(DESTROY_SQ);
    620	MLX5_COMMAND_STR_CASE(QUERY_SQ);
    621	MLX5_COMMAND_STR_CASE(CREATE_RQ);
    622	MLX5_COMMAND_STR_CASE(MODIFY_RQ);
    623	MLX5_COMMAND_STR_CASE(DESTROY_RQ);
    624	MLX5_COMMAND_STR_CASE(QUERY_RQ);
    625	MLX5_COMMAND_STR_CASE(CREATE_RMP);
    626	MLX5_COMMAND_STR_CASE(MODIFY_RMP);
    627	MLX5_COMMAND_STR_CASE(DESTROY_RMP);
    628	MLX5_COMMAND_STR_CASE(QUERY_RMP);
    629	MLX5_COMMAND_STR_CASE(CREATE_TIS);
    630	MLX5_COMMAND_STR_CASE(MODIFY_TIS);
    631	MLX5_COMMAND_STR_CASE(DESTROY_TIS);
    632	MLX5_COMMAND_STR_CASE(QUERY_TIS);
    633	MLX5_COMMAND_STR_CASE(CREATE_RQT);
    634	MLX5_COMMAND_STR_CASE(MODIFY_RQT);
    635	MLX5_COMMAND_STR_CASE(DESTROY_RQT);
    636	MLX5_COMMAND_STR_CASE(QUERY_RQT);
    637	MLX5_COMMAND_STR_CASE(SET_FLOW_TABLE_ROOT);
    638	MLX5_COMMAND_STR_CASE(CREATE_FLOW_TABLE);
    639	MLX5_COMMAND_STR_CASE(DESTROY_FLOW_TABLE);
    640	MLX5_COMMAND_STR_CASE(QUERY_FLOW_TABLE);
    641	MLX5_COMMAND_STR_CASE(CREATE_FLOW_GROUP);
    642	MLX5_COMMAND_STR_CASE(DESTROY_FLOW_GROUP);
    643	MLX5_COMMAND_STR_CASE(QUERY_FLOW_GROUP);
    644	MLX5_COMMAND_STR_CASE(SET_FLOW_TABLE_ENTRY);
    645	MLX5_COMMAND_STR_CASE(QUERY_FLOW_TABLE_ENTRY);
    646	MLX5_COMMAND_STR_CASE(DELETE_FLOW_TABLE_ENTRY);
    647	MLX5_COMMAND_STR_CASE(ALLOC_FLOW_COUNTER);
    648	MLX5_COMMAND_STR_CASE(DEALLOC_FLOW_COUNTER);
    649	MLX5_COMMAND_STR_CASE(QUERY_FLOW_COUNTER);
    650	MLX5_COMMAND_STR_CASE(MODIFY_FLOW_TABLE);
    651	MLX5_COMMAND_STR_CASE(ALLOC_PACKET_REFORMAT_CONTEXT);
    652	MLX5_COMMAND_STR_CASE(DEALLOC_PACKET_REFORMAT_CONTEXT);
    653	MLX5_COMMAND_STR_CASE(ALLOC_MODIFY_HEADER_CONTEXT);
    654	MLX5_COMMAND_STR_CASE(DEALLOC_MODIFY_HEADER_CONTEXT);
    655	MLX5_COMMAND_STR_CASE(FPGA_CREATE_QP);
    656	MLX5_COMMAND_STR_CASE(FPGA_MODIFY_QP);
    657	MLX5_COMMAND_STR_CASE(FPGA_QUERY_QP);
    658	MLX5_COMMAND_STR_CASE(FPGA_QUERY_QP_COUNTERS);
    659	MLX5_COMMAND_STR_CASE(FPGA_DESTROY_QP);
    660	MLX5_COMMAND_STR_CASE(CREATE_XRQ);
    661	MLX5_COMMAND_STR_CASE(DESTROY_XRQ);
    662	MLX5_COMMAND_STR_CASE(QUERY_XRQ);
    663	MLX5_COMMAND_STR_CASE(ARM_XRQ);
    664	MLX5_COMMAND_STR_CASE(CREATE_GENERAL_OBJECT);
    665	MLX5_COMMAND_STR_CASE(DESTROY_GENERAL_OBJECT);
    666	MLX5_COMMAND_STR_CASE(MODIFY_GENERAL_OBJECT);
    667	MLX5_COMMAND_STR_CASE(QUERY_GENERAL_OBJECT);
    668	MLX5_COMMAND_STR_CASE(QUERY_MODIFY_HEADER_CONTEXT);
    669	MLX5_COMMAND_STR_CASE(ALLOC_MEMIC);
    670	MLX5_COMMAND_STR_CASE(DEALLOC_MEMIC);
    671	MLX5_COMMAND_STR_CASE(QUERY_ESW_FUNCTIONS);
    672	MLX5_COMMAND_STR_CASE(CREATE_UCTX);
    673	MLX5_COMMAND_STR_CASE(DESTROY_UCTX);
    674	MLX5_COMMAND_STR_CASE(CREATE_UMEM);
    675	MLX5_COMMAND_STR_CASE(DESTROY_UMEM);
    676	MLX5_COMMAND_STR_CASE(RELEASE_XRQ_ERROR);
    677	MLX5_COMMAND_STR_CASE(MODIFY_XRQ);
    678	MLX5_COMMAND_STR_CASE(QUERY_VHCA_STATE);
    679	MLX5_COMMAND_STR_CASE(MODIFY_VHCA_STATE);
    680	MLX5_COMMAND_STR_CASE(ALLOC_SF);
    681	MLX5_COMMAND_STR_CASE(DEALLOC_SF);
    682	MLX5_COMMAND_STR_CASE(SUSPEND_VHCA);
    683	MLX5_COMMAND_STR_CASE(RESUME_VHCA);
    684	MLX5_COMMAND_STR_CASE(QUERY_VHCA_MIGRATION_STATE);
    685	MLX5_COMMAND_STR_CASE(SAVE_VHCA_STATE);
    686	MLX5_COMMAND_STR_CASE(LOAD_VHCA_STATE);
    687	default: return "unknown command opcode";
    688	}
    689}
    690
    691static const char *cmd_status_str(u8 status)
    692{
    693	switch (status) {
    694	case MLX5_CMD_STAT_OK:
    695		return "OK";
    696	case MLX5_CMD_STAT_INT_ERR:
    697		return "internal error";
    698	case MLX5_CMD_STAT_BAD_OP_ERR:
    699		return "bad operation";
    700	case MLX5_CMD_STAT_BAD_PARAM_ERR:
    701		return "bad parameter";
    702	case MLX5_CMD_STAT_BAD_SYS_STATE_ERR:
    703		return "bad system state";
    704	case MLX5_CMD_STAT_BAD_RES_ERR:
    705		return "bad resource";
    706	case MLX5_CMD_STAT_RES_BUSY:
    707		return "resource busy";
    708	case MLX5_CMD_STAT_LIM_ERR:
    709		return "limits exceeded";
    710	case MLX5_CMD_STAT_BAD_RES_STATE_ERR:
    711		return "bad resource state";
    712	case MLX5_CMD_STAT_IX_ERR:
    713		return "bad index";
    714	case MLX5_CMD_STAT_NO_RES_ERR:
    715		return "no resources";
    716	case MLX5_CMD_STAT_BAD_INP_LEN_ERR:
    717		return "bad input length";
    718	case MLX5_CMD_STAT_BAD_OUTP_LEN_ERR:
    719		return "bad output length";
    720	case MLX5_CMD_STAT_BAD_QP_STATE_ERR:
    721		return "bad QP state";
    722	case MLX5_CMD_STAT_BAD_PKT_ERR:
    723		return "bad packet (discarded)";
    724	case MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR:
    725		return "bad size too many outstanding CQEs";
    726	default:
    727		return "unknown status";
    728	}
    729}
    730
    731static int cmd_status_to_err(u8 status)
    732{
    733	switch (status) {
    734	case MLX5_CMD_STAT_OK:				return 0;
    735	case MLX5_CMD_STAT_INT_ERR:			return -EIO;
    736	case MLX5_CMD_STAT_BAD_OP_ERR:			return -EINVAL;
    737	case MLX5_CMD_STAT_BAD_PARAM_ERR:		return -EINVAL;
    738	case MLX5_CMD_STAT_BAD_SYS_STATE_ERR:		return -EIO;
    739	case MLX5_CMD_STAT_BAD_RES_ERR:			return -EINVAL;
    740	case MLX5_CMD_STAT_RES_BUSY:			return -EBUSY;
    741	case MLX5_CMD_STAT_LIM_ERR:			return -ENOMEM;
    742	case MLX5_CMD_STAT_BAD_RES_STATE_ERR:		return -EINVAL;
    743	case MLX5_CMD_STAT_IX_ERR:			return -EINVAL;
    744	case MLX5_CMD_STAT_NO_RES_ERR:			return -EAGAIN;
    745	case MLX5_CMD_STAT_BAD_INP_LEN_ERR:		return -EIO;
    746	case MLX5_CMD_STAT_BAD_OUTP_LEN_ERR:		return -EIO;
    747	case MLX5_CMD_STAT_BAD_QP_STATE_ERR:		return -EINVAL;
    748	case MLX5_CMD_STAT_BAD_PKT_ERR:			return -EINVAL;
    749	case MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR:	return -EINVAL;
    750	default:					return -EIO;
    751	}
    752}
    753
    754struct mlx5_ifc_mbox_out_bits {
    755	u8         status[0x8];
    756	u8         reserved_at_8[0x18];
    757
    758	u8         syndrome[0x20];
    759
    760	u8         reserved_at_40[0x40];
    761};
    762
    763struct mlx5_ifc_mbox_in_bits {
    764	u8         opcode[0x10];
    765	u8         uid[0x10];
    766
    767	u8         reserved_at_20[0x10];
    768	u8         op_mod[0x10];
    769
    770	u8         reserved_at_40[0x40];
    771};
    772
    773void mlx5_cmd_out_err(struct mlx5_core_dev *dev, u16 opcode, u16 op_mod, void *out)
    774{
    775	u32 syndrome = MLX5_GET(mbox_out, out, syndrome);
    776	u8 status = MLX5_GET(mbox_out, out, status);
    777
    778	mlx5_core_err_rl(dev,
    779			 "%s(0x%x) op_mod(0x%x) failed, status %s(0x%x), syndrome (0x%x), err(%d)\n",
    780			 mlx5_command_str(opcode), opcode, op_mod,
    781			 cmd_status_str(status), status, syndrome, cmd_status_to_err(status));
    782}
    783EXPORT_SYMBOL(mlx5_cmd_out_err);
    784
    785static void cmd_status_print(struct mlx5_core_dev *dev, void *in, void *out)
    786{
    787	u16 opcode, op_mod;
    788	u32 syndrome;
    789	u8  status;
    790	u16 uid;
    791	int err;
    792
    793	syndrome = MLX5_GET(mbox_out, out, syndrome);
    794	status = MLX5_GET(mbox_out, out, status);
    795
    796	opcode = MLX5_GET(mbox_in, in, opcode);
    797	op_mod = MLX5_GET(mbox_in, in, op_mod);
    798	uid    = MLX5_GET(mbox_in, in, uid);
    799
    800	err = cmd_status_to_err(status);
    801
    802	if (!uid && opcode != MLX5_CMD_OP_DESTROY_MKEY)
    803		mlx5_cmd_out_err(dev, opcode, op_mod, out);
    804	else
    805		mlx5_core_dbg(dev,
    806			"%s(0x%x) op_mod(0x%x) uid(%d) failed, status %s(0x%x), syndrome (0x%x), err(%d)\n",
    807			mlx5_command_str(opcode), opcode, op_mod, uid,
    808			cmd_status_str(status), status, syndrome, err);
    809}
    810
    811int mlx5_cmd_check(struct mlx5_core_dev *dev, int err, void *in, void *out)
    812{
    813	/* aborted due to PCI error or via reset flow mlx5_cmd_trigger_completions() */
    814	if (err == -ENXIO) {
    815		u16 opcode = MLX5_GET(mbox_in, in, opcode);
    816		u32 syndrome;
    817		u8 status;
    818
    819		/* PCI Error, emulate command return status, for smooth reset */
    820		err = mlx5_internal_err_ret_value(dev, opcode, &syndrome, &status);
    821		MLX5_SET(mbox_out, out, status, status);
    822		MLX5_SET(mbox_out, out, syndrome, syndrome);
    823		if (!err)
    824			return 0;
    825	}
    826
    827	/* driver or FW delivery error */
    828	if (err != -EREMOTEIO && err)
    829		return err;
    830
    831	/* check outbox status */
    832	err = cmd_status_to_err(MLX5_GET(mbox_out, out, status));
    833	if (err)
    834		cmd_status_print(dev, in, out);
    835
    836	return err;
    837}
    838EXPORT_SYMBOL(mlx5_cmd_check);
    839
    840static void dump_command(struct mlx5_core_dev *dev,
    841			 struct mlx5_cmd_work_ent *ent, int input)
    842{
    843	struct mlx5_cmd_msg *msg = input ? ent->in : ent->out;
    844	u16 op = MLX5_GET(mbox_in, ent->lay->in, opcode);
    845	struct mlx5_cmd_mailbox *next = msg->next;
    846	int n = mlx5_calc_cmd_blocks(msg);
    847	int data_only;
    848	u32 offset = 0;
    849	int dump_len;
    850	int i;
    851
    852	mlx5_core_dbg(dev, "cmd[%d]: start dump\n", ent->idx);
    853	data_only = !!(mlx5_core_debug_mask & (1 << MLX5_CMD_DATA));
    854
    855	if (data_only)
    856		mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_DATA,
    857				   "cmd[%d]: dump command data %s(0x%x) %s\n",
    858				   ent->idx, mlx5_command_str(op), op,
    859				   input ? "INPUT" : "OUTPUT");
    860	else
    861		mlx5_core_dbg(dev, "cmd[%d]: dump command %s(0x%x) %s\n",
    862			      ent->idx, mlx5_command_str(op), op,
    863			      input ? "INPUT" : "OUTPUT");
    864
    865	if (data_only) {
    866		if (input) {
    867			dump_buf(ent->lay->in, sizeof(ent->lay->in), 1, offset, ent->idx);
    868			offset += sizeof(ent->lay->in);
    869		} else {
    870			dump_buf(ent->lay->out, sizeof(ent->lay->out), 1, offset, ent->idx);
    871			offset += sizeof(ent->lay->out);
    872		}
    873	} else {
    874		dump_buf(ent->lay, sizeof(*ent->lay), 0, offset, ent->idx);
    875		offset += sizeof(*ent->lay);
    876	}
    877
    878	for (i = 0; i < n && next; i++)  {
    879		if (data_only) {
    880			dump_len = min_t(int, MLX5_CMD_DATA_BLOCK_SIZE, msg->len - offset);
    881			dump_buf(next->buf, dump_len, 1, offset, ent->idx);
    882			offset += MLX5_CMD_DATA_BLOCK_SIZE;
    883		} else {
    884			mlx5_core_dbg(dev, "cmd[%d]: command block:\n", ent->idx);
    885			dump_buf(next->buf, sizeof(struct mlx5_cmd_prot_block), 0, offset,
    886				 ent->idx);
    887			offset += sizeof(struct mlx5_cmd_prot_block);
    888		}
    889		next = next->next;
    890	}
    891
    892	if (data_only)
    893		pr_debug("\n");
    894
    895	mlx5_core_dbg(dev, "cmd[%d]: end dump\n", ent->idx);
    896}
    897
    898static u16 msg_to_opcode(struct mlx5_cmd_msg *in)
    899{
    900	return MLX5_GET(mbox_in, in->first.data, opcode);
    901}
    902
    903static void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool forced);
    904
    905static void cb_timeout_handler(struct work_struct *work)
    906{
    907	struct delayed_work *dwork = container_of(work, struct delayed_work,
    908						  work);
    909	struct mlx5_cmd_work_ent *ent = container_of(dwork,
    910						     struct mlx5_cmd_work_ent,
    911						     cb_timeout_work);
    912	struct mlx5_core_dev *dev = container_of(ent->cmd, struct mlx5_core_dev,
    913						 cmd);
    914
    915	mlx5_cmd_eq_recover(dev);
    916
    917	/* Maybe got handled by eq recover ? */
    918	if (!test_bit(MLX5_CMD_ENT_STATE_PENDING_COMP, &ent->state)) {
    919		mlx5_core_warn(dev, "cmd[%d]: %s(0x%x) Async, recovered after timeout\n", ent->idx,
    920			       mlx5_command_str(msg_to_opcode(ent->in)), msg_to_opcode(ent->in));
    921		goto out; /* phew, already handled */
    922	}
    923
    924	ent->ret = -ETIMEDOUT;
    925	mlx5_core_warn(dev, "cmd[%d]: %s(0x%x) Async, timeout. Will cause a leak of a command resource\n",
    926		       ent->idx, mlx5_command_str(msg_to_opcode(ent->in)), msg_to_opcode(ent->in));
    927	mlx5_cmd_comp_handler(dev, 1ULL << ent->idx, true);
    928
    929out:
    930	cmd_ent_put(ent); /* for the cmd_ent_get() took on schedule delayed work */
    931}
    932
    933static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg);
    934static void mlx5_free_cmd_msg(struct mlx5_core_dev *dev,
    935			      struct mlx5_cmd_msg *msg);
    936
    937static bool opcode_allowed(struct mlx5_cmd *cmd, u16 opcode)
    938{
    939	if (cmd->allowed_opcode == CMD_ALLOWED_OPCODE_ALL)
    940		return true;
    941
    942	return cmd->allowed_opcode == opcode;
    943}
    944
    945bool mlx5_cmd_is_down(struct mlx5_core_dev *dev)
    946{
    947	return pci_channel_offline(dev->pdev) ||
    948	       dev->cmd.state != MLX5_CMDIF_STATE_UP ||
    949	       dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR;
    950}
    951
    952static void cmd_work_handler(struct work_struct *work)
    953{
    954	struct mlx5_cmd_work_ent *ent = container_of(work, struct mlx5_cmd_work_ent, work);
    955	struct mlx5_cmd *cmd = ent->cmd;
    956	bool poll_cmd = ent->polling;
    957	struct mlx5_cmd_layout *lay;
    958	struct mlx5_core_dev *dev;
    959	unsigned long cb_timeout;
    960	struct semaphore *sem;
    961	unsigned long flags;
    962	int alloc_ret;
    963	int cmd_mode;
    964
    965	dev = container_of(cmd, struct mlx5_core_dev, cmd);
    966	cb_timeout = msecs_to_jiffies(mlx5_tout_ms(dev, CMD));
    967
    968	complete(&ent->handling);
    969	sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem;
    970	down(sem);
    971	if (!ent->page_queue) {
    972		alloc_ret = cmd_alloc_index(cmd);
    973		if (alloc_ret < 0) {
    974			mlx5_core_err_rl(dev, "failed to allocate command entry\n");
    975			if (ent->callback) {
    976				ent->callback(-EAGAIN, ent->context);
    977				mlx5_free_cmd_msg(dev, ent->out);
    978				free_msg(dev, ent->in);
    979				cmd_ent_put(ent);
    980			} else {
    981				ent->ret = -EAGAIN;
    982				complete(&ent->done);
    983			}
    984			up(sem);
    985			return;
    986		}
    987		ent->idx = alloc_ret;
    988	} else {
    989		ent->idx = cmd->max_reg_cmds;
    990		spin_lock_irqsave(&cmd->alloc_lock, flags);
    991		clear_bit(ent->idx, &cmd->bitmask);
    992		spin_unlock_irqrestore(&cmd->alloc_lock, flags);
    993	}
    994
    995	cmd->ent_arr[ent->idx] = ent;
    996	lay = get_inst(cmd, ent->idx);
    997	ent->lay = lay;
    998	memset(lay, 0, sizeof(*lay));
    999	memcpy(lay->in, ent->in->first.data, sizeof(lay->in));
   1000	ent->op = be32_to_cpu(lay->in[0]) >> 16;
   1001	if (ent->in->next)
   1002		lay->in_ptr = cpu_to_be64(ent->in->next->dma);
   1003	lay->inlen = cpu_to_be32(ent->in->len);
   1004	if (ent->out->next)
   1005		lay->out_ptr = cpu_to_be64(ent->out->next->dma);
   1006	lay->outlen = cpu_to_be32(ent->out->len);
   1007	lay->type = MLX5_PCI_CMD_XPORT;
   1008	lay->token = ent->token;
   1009	lay->status_own = CMD_OWNER_HW;
   1010	set_signature(ent, !cmd->checksum_disabled);
   1011	dump_command(dev, ent, 1);
   1012	ent->ts1 = ktime_get_ns();
   1013	cmd_mode = cmd->mode;
   1014
   1015	if (ent->callback && schedule_delayed_work(&ent->cb_timeout_work, cb_timeout))
   1016		cmd_ent_get(ent);
   1017	set_bit(MLX5_CMD_ENT_STATE_PENDING_COMP, &ent->state);
   1018
   1019	/* Skip sending command to fw if internal error */
   1020	if (mlx5_cmd_is_down(dev) || !opcode_allowed(&dev->cmd, ent->op)) {
   1021		ent->ret = -ENXIO;
   1022		mlx5_cmd_comp_handler(dev, 1ULL << ent->idx, true);
   1023		return;
   1024	}
   1025
   1026	cmd_ent_get(ent); /* for the _real_ FW event on completion */
   1027	/* ring doorbell after the descriptor is valid */
   1028	mlx5_core_dbg(dev, "writing 0x%x to command doorbell\n", 1 << ent->idx);
   1029	wmb();
   1030	iowrite32be(1 << ent->idx, &dev->iseg->cmd_dbell);
   1031	/* if not in polling don't use ent after this point */
   1032	if (cmd_mode == CMD_MODE_POLLING || poll_cmd) {
   1033		poll_timeout(ent);
   1034		/* make sure we read the descriptor after ownership is SW */
   1035		rmb();
   1036		mlx5_cmd_comp_handler(dev, 1ULL << ent->idx, (ent->ret == -ETIMEDOUT));
   1037	}
   1038}
   1039
   1040static int deliv_status_to_err(u8 status)
   1041{
   1042	switch (status) {
   1043	case MLX5_CMD_DELIVERY_STAT_OK:
   1044	case MLX5_DRIVER_STATUS_ABORTED:
   1045		return 0;
   1046	case MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR:
   1047	case MLX5_CMD_DELIVERY_STAT_TOK_ERR:
   1048		return -EBADR;
   1049	case MLX5_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR:
   1050	case MLX5_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR:
   1051	case MLX5_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR:
   1052		return -EFAULT; /* Bad address */
   1053	case MLX5_CMD_DELIVERY_STAT_IN_LENGTH_ERR:
   1054	case MLX5_CMD_DELIVERY_STAT_OUT_LENGTH_ERR:
   1055	case MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR:
   1056	case MLX5_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR:
   1057		return -ENOMSG;
   1058	case MLX5_CMD_DELIVERY_STAT_FW_ERR:
   1059		return -EIO;
   1060	default:
   1061		return -EINVAL;
   1062	}
   1063}
   1064
   1065static const char *deliv_status_to_str(u8 status)
   1066{
   1067	switch (status) {
   1068	case MLX5_CMD_DELIVERY_STAT_OK:
   1069		return "no errors";
   1070	case MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR:
   1071		return "signature error";
   1072	case MLX5_CMD_DELIVERY_STAT_TOK_ERR:
   1073		return "token error";
   1074	case MLX5_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR:
   1075		return "bad block number";
   1076	case MLX5_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR:
   1077		return "output pointer not aligned to block size";
   1078	case MLX5_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR:
   1079		return "input pointer not aligned to block size";
   1080	case MLX5_CMD_DELIVERY_STAT_FW_ERR:
   1081		return "firmware internal error";
   1082	case MLX5_CMD_DELIVERY_STAT_IN_LENGTH_ERR:
   1083		return "command input length error";
   1084	case MLX5_CMD_DELIVERY_STAT_OUT_LENGTH_ERR:
   1085		return "command output length error";
   1086	case MLX5_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR:
   1087		return "reserved fields not cleared";
   1088	case MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR:
   1089		return "bad command descriptor type";
   1090	default:
   1091		return "unknown status code";
   1092	}
   1093}
   1094
   1095enum {
   1096	MLX5_CMD_TIMEOUT_RECOVER_MSEC   = 5 * 1000,
   1097};
   1098
   1099static void wait_func_handle_exec_timeout(struct mlx5_core_dev *dev,
   1100					  struct mlx5_cmd_work_ent *ent)
   1101{
   1102	unsigned long timeout = msecs_to_jiffies(MLX5_CMD_TIMEOUT_RECOVER_MSEC);
   1103
   1104	mlx5_cmd_eq_recover(dev);
   1105
   1106	/* Re-wait on the ent->done after executing the recovery flow. If the
   1107	 * recovery flow (or any other recovery flow running simultaneously)
   1108	 * has recovered an EQE, it should cause the entry to be completed by
   1109	 * the command interface.
   1110	 */
   1111	if (wait_for_completion_timeout(&ent->done, timeout)) {
   1112		mlx5_core_warn(dev, "cmd[%d]: %s(0x%x) recovered after timeout\n", ent->idx,
   1113			       mlx5_command_str(msg_to_opcode(ent->in)), msg_to_opcode(ent->in));
   1114		return;
   1115	}
   1116
   1117	mlx5_core_warn(dev, "cmd[%d]: %s(0x%x) No done completion\n", ent->idx,
   1118		       mlx5_command_str(msg_to_opcode(ent->in)), msg_to_opcode(ent->in));
   1119
   1120	ent->ret = -ETIMEDOUT;
   1121	mlx5_cmd_comp_handler(dev, 1ULL << ent->idx, true);
   1122}
   1123
   1124static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent)
   1125{
   1126	unsigned long timeout = msecs_to_jiffies(mlx5_tout_ms(dev, CMD));
   1127	struct mlx5_cmd *cmd = &dev->cmd;
   1128	int err;
   1129
   1130	if (!wait_for_completion_timeout(&ent->handling, timeout) &&
   1131	    cancel_work_sync(&ent->work)) {
   1132		ent->ret = -ECANCELED;
   1133		goto out_err;
   1134	}
   1135	if (cmd->mode == CMD_MODE_POLLING || ent->polling)
   1136		wait_for_completion(&ent->done);
   1137	else if (!wait_for_completion_timeout(&ent->done, timeout))
   1138		wait_func_handle_exec_timeout(dev, ent);
   1139
   1140out_err:
   1141	err = ent->ret;
   1142
   1143	if (err == -ETIMEDOUT) {
   1144		mlx5_core_warn(dev, "%s(0x%x) timeout. Will cause a leak of a command resource\n",
   1145			       mlx5_command_str(msg_to_opcode(ent->in)),
   1146			       msg_to_opcode(ent->in));
   1147	} else if (err == -ECANCELED) {
   1148		mlx5_core_warn(dev, "%s(0x%x) canceled on out of queue timeout.\n",
   1149			       mlx5_command_str(msg_to_opcode(ent->in)),
   1150			       msg_to_opcode(ent->in));
   1151	}
   1152	mlx5_core_dbg(dev, "err %d, delivery status %s(%d)\n",
   1153		      err, deliv_status_to_str(ent->status), ent->status);
   1154
   1155	return err;
   1156}
   1157
   1158/*  Notes:
   1159 *    1. Callback functions may not sleep
   1160 *    2. page queue commands do not support asynchrous completion
   1161 *
   1162 * return value in case (!callback):
   1163 *	ret < 0 : Command execution couldn't be submitted by driver
   1164 *	ret > 0 : Command execution couldn't be performed by firmware
   1165 *	ret == 0: Command was executed by FW, Caller must check FW outbox status.
   1166 *
   1167 * return value in case (callback):
   1168 *	ret < 0 : Command execution couldn't be submitted by driver
   1169 *	ret == 0: Command will be submitted to FW for execution
   1170 *		  and the callback will be called for further status updates
   1171 */
   1172static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
   1173			   struct mlx5_cmd_msg *out, void *uout, int uout_size,
   1174			   mlx5_cmd_cbk_t callback,
   1175			   void *context, int page_queue,
   1176			   u8 token, bool force_polling)
   1177{
   1178	struct mlx5_cmd *cmd = &dev->cmd;
   1179	struct mlx5_cmd_work_ent *ent;
   1180	struct mlx5_cmd_stats *stats;
   1181	u8 status = 0;
   1182	int err = 0;
   1183	s64 ds;
   1184	u16 op;
   1185
   1186	if (callback && page_queue)
   1187		return -EINVAL;
   1188
   1189	ent = cmd_alloc_ent(cmd, in, out, uout, uout_size,
   1190			    callback, context, page_queue);
   1191	if (IS_ERR(ent))
   1192		return PTR_ERR(ent);
   1193
   1194	/* put for this ent is when consumed, depending on the use case
   1195	 * 1) (!callback) blocking flow: by caller after wait_func completes
   1196	 * 2) (callback) flow: by mlx5_cmd_comp_handler() when ent is handled
   1197	 */
   1198
   1199	ent->token = token;
   1200	ent->polling = force_polling;
   1201
   1202	init_completion(&ent->handling);
   1203	if (!callback)
   1204		init_completion(&ent->done);
   1205
   1206	INIT_DELAYED_WORK(&ent->cb_timeout_work, cb_timeout_handler);
   1207	INIT_WORK(&ent->work, cmd_work_handler);
   1208	if (page_queue) {
   1209		cmd_work_handler(&ent->work);
   1210	} else if (!queue_work(cmd->wq, &ent->work)) {
   1211		mlx5_core_warn(dev, "failed to queue work\n");
   1212		err = -EALREADY;
   1213		goto out_free;
   1214	}
   1215
   1216	if (callback)
   1217		return 0; /* mlx5_cmd_comp_handler() will put(ent) */
   1218
   1219	err = wait_func(dev, ent);
   1220	if (err == -ETIMEDOUT || err == -ECANCELED)
   1221		goto out_free;
   1222
   1223	ds = ent->ts2 - ent->ts1;
   1224	op = MLX5_GET(mbox_in, in->first.data, opcode);
   1225	if (op < MLX5_CMD_OP_MAX) {
   1226		stats = &cmd->stats[op];
   1227		spin_lock_irq(&stats->lock);
   1228		stats->sum += ds;
   1229		++stats->n;
   1230		spin_unlock_irq(&stats->lock);
   1231	}
   1232	mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_TIME,
   1233			   "fw exec time for %s is %lld nsec\n",
   1234			   mlx5_command_str(op), ds);
   1235
   1236out_free:
   1237	status = ent->status;
   1238	cmd_ent_put(ent);
   1239	return err ? : status;
   1240}
   1241
   1242static ssize_t dbg_write(struct file *filp, const char __user *buf,
   1243			 size_t count, loff_t *pos)
   1244{
   1245	struct mlx5_core_dev *dev = filp->private_data;
   1246	struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
   1247	char lbuf[3];
   1248	int err;
   1249
   1250	if (!dbg->in_msg || !dbg->out_msg)
   1251		return -ENOMEM;
   1252
   1253	if (count < sizeof(lbuf) - 1)
   1254		return -EINVAL;
   1255
   1256	if (copy_from_user(lbuf, buf, sizeof(lbuf) - 1))
   1257		return -EFAULT;
   1258
   1259	lbuf[sizeof(lbuf) - 1] = 0;
   1260
   1261	if (strcmp(lbuf, "go"))
   1262		return -EINVAL;
   1263
   1264	err = mlx5_cmd_exec(dev, dbg->in_msg, dbg->inlen, dbg->out_msg, dbg->outlen);
   1265
   1266	return err ? err : count;
   1267}
   1268
   1269static const struct file_operations fops = {
   1270	.owner	= THIS_MODULE,
   1271	.open	= simple_open,
   1272	.write	= dbg_write,
   1273};
   1274
   1275static int mlx5_copy_to_msg(struct mlx5_cmd_msg *to, void *from, int size,
   1276			    u8 token)
   1277{
   1278	struct mlx5_cmd_prot_block *block;
   1279	struct mlx5_cmd_mailbox *next;
   1280	int copy;
   1281
   1282	if (!to || !from)
   1283		return -ENOMEM;
   1284
   1285	copy = min_t(int, size, sizeof(to->first.data));
   1286	memcpy(to->first.data, from, copy);
   1287	size -= copy;
   1288	from += copy;
   1289
   1290	next = to->next;
   1291	while (size) {
   1292		if (!next) {
   1293			/* this is a BUG */
   1294			return -ENOMEM;
   1295		}
   1296
   1297		copy = min_t(int, size, MLX5_CMD_DATA_BLOCK_SIZE);
   1298		block = next->buf;
   1299		memcpy(block->data, from, copy);
   1300		from += copy;
   1301		size -= copy;
   1302		block->token = token;
   1303		next = next->next;
   1304	}
   1305
   1306	return 0;
   1307}
   1308
   1309static int mlx5_copy_from_msg(void *to, struct mlx5_cmd_msg *from, int size)
   1310{
   1311	struct mlx5_cmd_prot_block *block;
   1312	struct mlx5_cmd_mailbox *next;
   1313	int copy;
   1314
   1315	if (!to || !from)
   1316		return -ENOMEM;
   1317
   1318	copy = min_t(int, size, sizeof(from->first.data));
   1319	memcpy(to, from->first.data, copy);
   1320	size -= copy;
   1321	to += copy;
   1322
   1323	next = from->next;
   1324	while (size) {
   1325		if (!next) {
   1326			/* this is a BUG */
   1327			return -ENOMEM;
   1328		}
   1329
   1330		copy = min_t(int, size, MLX5_CMD_DATA_BLOCK_SIZE);
   1331		block = next->buf;
   1332
   1333		memcpy(to, block->data, copy);
   1334		to += copy;
   1335		size -= copy;
   1336		next = next->next;
   1337	}
   1338
   1339	return 0;
   1340}
   1341
   1342static struct mlx5_cmd_mailbox *alloc_cmd_box(struct mlx5_core_dev *dev,
   1343					      gfp_t flags)
   1344{
   1345	struct mlx5_cmd_mailbox *mailbox;
   1346
   1347	mailbox = kmalloc(sizeof(*mailbox), flags);
   1348	if (!mailbox)
   1349		return ERR_PTR(-ENOMEM);
   1350
   1351	mailbox->buf = dma_pool_zalloc(dev->cmd.pool, flags,
   1352				       &mailbox->dma);
   1353	if (!mailbox->buf) {
   1354		mlx5_core_dbg(dev, "failed allocation\n");
   1355		kfree(mailbox);
   1356		return ERR_PTR(-ENOMEM);
   1357	}
   1358	mailbox->next = NULL;
   1359
   1360	return mailbox;
   1361}
   1362
   1363static void free_cmd_box(struct mlx5_core_dev *dev,
   1364			 struct mlx5_cmd_mailbox *mailbox)
   1365{
   1366	dma_pool_free(dev->cmd.pool, mailbox->buf, mailbox->dma);
   1367	kfree(mailbox);
   1368}
   1369
   1370static struct mlx5_cmd_msg *mlx5_alloc_cmd_msg(struct mlx5_core_dev *dev,
   1371					       gfp_t flags, int size,
   1372					       u8 token)
   1373{
   1374	struct mlx5_cmd_mailbox *tmp, *head = NULL;
   1375	struct mlx5_cmd_prot_block *block;
   1376	struct mlx5_cmd_msg *msg;
   1377	int err;
   1378	int n;
   1379	int i;
   1380
   1381	msg = kzalloc(sizeof(*msg), flags);
   1382	if (!msg)
   1383		return ERR_PTR(-ENOMEM);
   1384
   1385	msg->len = size;
   1386	n = mlx5_calc_cmd_blocks(msg);
   1387
   1388	for (i = 0; i < n; i++) {
   1389		tmp = alloc_cmd_box(dev, flags);
   1390		if (IS_ERR(tmp)) {
   1391			mlx5_core_warn(dev, "failed allocating block\n");
   1392			err = PTR_ERR(tmp);
   1393			goto err_alloc;
   1394		}
   1395
   1396		block = tmp->buf;
   1397		tmp->next = head;
   1398		block->next = cpu_to_be64(tmp->next ? tmp->next->dma : 0);
   1399		block->block_num = cpu_to_be32(n - i - 1);
   1400		block->token = token;
   1401		head = tmp;
   1402	}
   1403	msg->next = head;
   1404	return msg;
   1405
   1406err_alloc:
   1407	while (head) {
   1408		tmp = head->next;
   1409		free_cmd_box(dev, head);
   1410		head = tmp;
   1411	}
   1412	kfree(msg);
   1413
   1414	return ERR_PTR(err);
   1415}
   1416
   1417static void mlx5_free_cmd_msg(struct mlx5_core_dev *dev,
   1418			      struct mlx5_cmd_msg *msg)
   1419{
   1420	struct mlx5_cmd_mailbox *head = msg->next;
   1421	struct mlx5_cmd_mailbox *next;
   1422
   1423	while (head) {
   1424		next = head->next;
   1425		free_cmd_box(dev, head);
   1426		head = next;
   1427	}
   1428	kfree(msg);
   1429}
   1430
   1431static ssize_t data_write(struct file *filp, const char __user *buf,
   1432			  size_t count, loff_t *pos)
   1433{
   1434	struct mlx5_core_dev *dev = filp->private_data;
   1435	struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
   1436	void *ptr;
   1437
   1438	if (*pos != 0)
   1439		return -EINVAL;
   1440
   1441	kfree(dbg->in_msg);
   1442	dbg->in_msg = NULL;
   1443	dbg->inlen = 0;
   1444	ptr = memdup_user(buf, count);
   1445	if (IS_ERR(ptr))
   1446		return PTR_ERR(ptr);
   1447	dbg->in_msg = ptr;
   1448	dbg->inlen = count;
   1449
   1450	*pos = count;
   1451
   1452	return count;
   1453}
   1454
   1455static ssize_t data_read(struct file *filp, char __user *buf, size_t count,
   1456			 loff_t *pos)
   1457{
   1458	struct mlx5_core_dev *dev = filp->private_data;
   1459	struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
   1460
   1461	if (!dbg->out_msg)
   1462		return -ENOMEM;
   1463
   1464	return simple_read_from_buffer(buf, count, pos, dbg->out_msg,
   1465				       dbg->outlen);
   1466}
   1467
   1468static const struct file_operations dfops = {
   1469	.owner	= THIS_MODULE,
   1470	.open	= simple_open,
   1471	.write	= data_write,
   1472	.read	= data_read,
   1473};
   1474
   1475static ssize_t outlen_read(struct file *filp, char __user *buf, size_t count,
   1476			   loff_t *pos)
   1477{
   1478	struct mlx5_core_dev *dev = filp->private_data;
   1479	struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
   1480	char outlen[8];
   1481	int err;
   1482
   1483	err = snprintf(outlen, sizeof(outlen), "%d", dbg->outlen);
   1484	if (err < 0)
   1485		return err;
   1486
   1487	return simple_read_from_buffer(buf, count, pos, outlen, err);
   1488}
   1489
   1490static ssize_t outlen_write(struct file *filp, const char __user *buf,
   1491			    size_t count, loff_t *pos)
   1492{
   1493	struct mlx5_core_dev *dev = filp->private_data;
   1494	struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
   1495	char outlen_str[8] = {0};
   1496	int outlen;
   1497	void *ptr;
   1498	int err;
   1499
   1500	if (*pos != 0 || count > 6)
   1501		return -EINVAL;
   1502
   1503	kfree(dbg->out_msg);
   1504	dbg->out_msg = NULL;
   1505	dbg->outlen = 0;
   1506
   1507	if (copy_from_user(outlen_str, buf, count))
   1508		return -EFAULT;
   1509
   1510	err = sscanf(outlen_str, "%d", &outlen);
   1511	if (err < 0)
   1512		return err;
   1513
   1514	ptr = kzalloc(outlen, GFP_KERNEL);
   1515	if (!ptr)
   1516		return -ENOMEM;
   1517
   1518	dbg->out_msg = ptr;
   1519	dbg->outlen = outlen;
   1520
   1521	*pos = count;
   1522
   1523	return count;
   1524}
   1525
   1526static const struct file_operations olfops = {
   1527	.owner	= THIS_MODULE,
   1528	.open	= simple_open,
   1529	.write	= outlen_write,
   1530	.read	= outlen_read,
   1531};
   1532
   1533static void set_wqname(struct mlx5_core_dev *dev)
   1534{
   1535	struct mlx5_cmd *cmd = &dev->cmd;
   1536
   1537	snprintf(cmd->wq_name, sizeof(cmd->wq_name), "mlx5_cmd_%s",
   1538		 dev_name(dev->device));
   1539}
   1540
   1541static void clean_debug_files(struct mlx5_core_dev *dev)
   1542{
   1543	struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
   1544
   1545	if (!mlx5_debugfs_root)
   1546		return;
   1547
   1548	mlx5_cmdif_debugfs_cleanup(dev);
   1549	debugfs_remove_recursive(dbg->dbg_root);
   1550}
   1551
   1552static void create_debugfs_files(struct mlx5_core_dev *dev)
   1553{
   1554	struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
   1555
   1556	dbg->dbg_root = debugfs_create_dir("cmd", mlx5_debugfs_get_dev_root(dev));
   1557
   1558	debugfs_create_file("in", 0400, dbg->dbg_root, dev, &dfops);
   1559	debugfs_create_file("out", 0200, dbg->dbg_root, dev, &dfops);
   1560	debugfs_create_file("out_len", 0600, dbg->dbg_root, dev, &olfops);
   1561	debugfs_create_u8("status", 0600, dbg->dbg_root, &dbg->status);
   1562	debugfs_create_file("run", 0200, dbg->dbg_root, dev, &fops);
   1563
   1564	mlx5_cmdif_debugfs_init(dev);
   1565}
   1566
   1567void mlx5_cmd_allowed_opcode(struct mlx5_core_dev *dev, u16 opcode)
   1568{
   1569	struct mlx5_cmd *cmd = &dev->cmd;
   1570	int i;
   1571
   1572	for (i = 0; i < cmd->max_reg_cmds; i++)
   1573		down(&cmd->sem);
   1574	down(&cmd->pages_sem);
   1575
   1576	cmd->allowed_opcode = opcode;
   1577
   1578	up(&cmd->pages_sem);
   1579	for (i = 0; i < cmd->max_reg_cmds; i++)
   1580		up(&cmd->sem);
   1581}
   1582
   1583static void mlx5_cmd_change_mod(struct mlx5_core_dev *dev, int mode)
   1584{
   1585	struct mlx5_cmd *cmd = &dev->cmd;
   1586	int i;
   1587
   1588	for (i = 0; i < cmd->max_reg_cmds; i++)
   1589		down(&cmd->sem);
   1590	down(&cmd->pages_sem);
   1591
   1592	cmd->mode = mode;
   1593
   1594	up(&cmd->pages_sem);
   1595	for (i = 0; i < cmd->max_reg_cmds; i++)
   1596		up(&cmd->sem);
   1597}
   1598
   1599static int cmd_comp_notifier(struct notifier_block *nb,
   1600			     unsigned long type, void *data)
   1601{
   1602	struct mlx5_core_dev *dev;
   1603	struct mlx5_cmd *cmd;
   1604	struct mlx5_eqe *eqe;
   1605
   1606	cmd = mlx5_nb_cof(nb, struct mlx5_cmd, nb);
   1607	dev = container_of(cmd, struct mlx5_core_dev, cmd);
   1608	eqe = data;
   1609
   1610	mlx5_cmd_comp_handler(dev, be32_to_cpu(eqe->data.cmd.vector), false);
   1611
   1612	return NOTIFY_OK;
   1613}
   1614void mlx5_cmd_use_events(struct mlx5_core_dev *dev)
   1615{
   1616	MLX5_NB_INIT(&dev->cmd.nb, cmd_comp_notifier, CMD);
   1617	mlx5_eq_notifier_register(dev, &dev->cmd.nb);
   1618	mlx5_cmd_change_mod(dev, CMD_MODE_EVENTS);
   1619}
   1620
   1621void mlx5_cmd_use_polling(struct mlx5_core_dev *dev)
   1622{
   1623	mlx5_cmd_change_mod(dev, CMD_MODE_POLLING);
   1624	mlx5_eq_notifier_unregister(dev, &dev->cmd.nb);
   1625}
   1626
   1627static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg)
   1628{
   1629	unsigned long flags;
   1630
   1631	if (msg->parent) {
   1632		spin_lock_irqsave(&msg->parent->lock, flags);
   1633		list_add_tail(&msg->list, &msg->parent->head);
   1634		spin_unlock_irqrestore(&msg->parent->lock, flags);
   1635	} else {
   1636		mlx5_free_cmd_msg(dev, msg);
   1637	}
   1638}
   1639
   1640static void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool forced)
   1641{
   1642	struct mlx5_cmd *cmd = &dev->cmd;
   1643	struct mlx5_cmd_work_ent *ent;
   1644	mlx5_cmd_cbk_t callback;
   1645	void *context;
   1646	int err;
   1647	int i;
   1648	s64 ds;
   1649	struct mlx5_cmd_stats *stats;
   1650	unsigned long flags;
   1651	unsigned long vector;
   1652
   1653	/* there can be at most 32 command queues */
   1654	vector = vec & 0xffffffff;
   1655	for (i = 0; i < (1 << cmd->log_sz); i++) {
   1656		if (test_bit(i, &vector)) {
   1657			ent = cmd->ent_arr[i];
   1658
   1659			/* if we already completed the command, ignore it */
   1660			if (!test_and_clear_bit(MLX5_CMD_ENT_STATE_PENDING_COMP,
   1661						&ent->state)) {
   1662				/* only real completion can free the cmd slot */
   1663				if (!forced) {
   1664					mlx5_core_err(dev, "Command completion arrived after timeout (entry idx = %d).\n",
   1665						      ent->idx);
   1666					cmd_ent_put(ent);
   1667				}
   1668				continue;
   1669			}
   1670
   1671			if (ent->callback && cancel_delayed_work(&ent->cb_timeout_work))
   1672				cmd_ent_put(ent); /* timeout work was canceled */
   1673
   1674			if (!forced || /* Real FW completion */
   1675			    pci_channel_offline(dev->pdev) || /* FW is inaccessible */
   1676			    dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
   1677				cmd_ent_put(ent);
   1678
   1679			ent->ts2 = ktime_get_ns();
   1680			memcpy(ent->out->first.data, ent->lay->out, sizeof(ent->lay->out));
   1681			dump_command(dev, ent, 0);
   1682
   1683			if (vec & MLX5_TRIGGERED_CMD_COMP)
   1684				ent->ret = -ENXIO;
   1685
   1686			if (!ent->ret) { /* Command completed by FW */
   1687				if (!cmd->checksum_disabled)
   1688					ent->ret = verify_signature(ent);
   1689
   1690				ent->status = ent->lay->status_own >> 1;
   1691
   1692				mlx5_core_dbg(dev, "command completed. ret 0x%x, delivery status %s(0x%x)\n",
   1693					      ent->ret, deliv_status_to_str(ent->status), ent->status);
   1694			}
   1695
   1696			if (ent->callback) {
   1697				ds = ent->ts2 - ent->ts1;
   1698				if (ent->op < MLX5_CMD_OP_MAX) {
   1699					stats = &cmd->stats[ent->op];
   1700					spin_lock_irqsave(&stats->lock, flags);
   1701					stats->sum += ds;
   1702					++stats->n;
   1703					spin_unlock_irqrestore(&stats->lock, flags);
   1704				}
   1705
   1706				callback = ent->callback;
   1707				context = ent->context;
   1708				err = ent->ret ? : ent->status;
   1709				if (err > 0) /* Failed in FW, command didn't execute */
   1710					err = deliv_status_to_err(err);
   1711
   1712				if (!err)
   1713					err = mlx5_copy_from_msg(ent->uout,
   1714								 ent->out,
   1715								 ent->uout_size);
   1716
   1717				mlx5_free_cmd_msg(dev, ent->out);
   1718				free_msg(dev, ent->in);
   1719
   1720				/* final consumer is done, release ent */
   1721				cmd_ent_put(ent);
   1722				callback(err, context);
   1723			} else {
   1724				/* release wait_func() so mlx5_cmd_invoke()
   1725				 * can make the final ent_put()
   1726				 */
   1727				complete(&ent->done);
   1728			}
   1729		}
   1730	}
   1731}
   1732
   1733static void mlx5_cmd_trigger_completions(struct mlx5_core_dev *dev)
   1734{
   1735	struct mlx5_cmd *cmd = &dev->cmd;
   1736	unsigned long bitmask;
   1737	unsigned long flags;
   1738	u64 vector;
   1739	int i;
   1740
   1741	/* wait for pending handlers to complete */
   1742	mlx5_eq_synchronize_cmd_irq(dev);
   1743	spin_lock_irqsave(&dev->cmd.alloc_lock, flags);
   1744	vector = ~dev->cmd.bitmask & ((1ul << (1 << dev->cmd.log_sz)) - 1);
   1745	if (!vector)
   1746		goto no_trig;
   1747
   1748	bitmask = vector;
   1749	/* we must increment the allocated entries refcount before triggering the completions
   1750	 * to guarantee pending commands will not get freed in the meanwhile.
   1751	 * For that reason, it also has to be done inside the alloc_lock.
   1752	 */
   1753	for_each_set_bit(i, &bitmask, (1 << cmd->log_sz))
   1754		cmd_ent_get(cmd->ent_arr[i]);
   1755	vector |= MLX5_TRIGGERED_CMD_COMP;
   1756	spin_unlock_irqrestore(&dev->cmd.alloc_lock, flags);
   1757
   1758	mlx5_core_dbg(dev, "vector 0x%llx\n", vector);
   1759	mlx5_cmd_comp_handler(dev, vector, true);
   1760	for_each_set_bit(i, &bitmask, (1 << cmd->log_sz))
   1761		cmd_ent_put(cmd->ent_arr[i]);
   1762	return;
   1763
   1764no_trig:
   1765	spin_unlock_irqrestore(&dev->cmd.alloc_lock, flags);
   1766}
   1767
   1768void mlx5_cmd_flush(struct mlx5_core_dev *dev)
   1769{
   1770	struct mlx5_cmd *cmd = &dev->cmd;
   1771	int i;
   1772
   1773	for (i = 0; i < cmd->max_reg_cmds; i++)
   1774		while (down_trylock(&cmd->sem))
   1775			mlx5_cmd_trigger_completions(dev);
   1776
   1777	while (down_trylock(&cmd->pages_sem))
   1778		mlx5_cmd_trigger_completions(dev);
   1779
   1780	/* Unlock cmdif */
   1781	up(&cmd->pages_sem);
   1782	for (i = 0; i < cmd->max_reg_cmds; i++)
   1783		up(&cmd->sem);
   1784}
   1785
   1786static struct mlx5_cmd_msg *alloc_msg(struct mlx5_core_dev *dev, int in_size,
   1787				      gfp_t gfp)
   1788{
   1789	struct mlx5_cmd_msg *msg = ERR_PTR(-ENOMEM);
   1790	struct cmd_msg_cache *ch = NULL;
   1791	struct mlx5_cmd *cmd = &dev->cmd;
   1792	int i;
   1793
   1794	if (in_size <= 16)
   1795		goto cache_miss;
   1796
   1797	for (i = 0; i < MLX5_NUM_COMMAND_CACHES; i++) {
   1798		ch = &cmd->cache[i];
   1799		if (in_size > ch->max_inbox_size)
   1800			continue;
   1801		spin_lock_irq(&ch->lock);
   1802		if (list_empty(&ch->head)) {
   1803			spin_unlock_irq(&ch->lock);
   1804			continue;
   1805		}
   1806		msg = list_entry(ch->head.next, typeof(*msg), list);
   1807		/* For cached lists, we must explicitly state what is
   1808		 * the real size
   1809		 */
   1810		msg->len = in_size;
   1811		list_del(&msg->list);
   1812		spin_unlock_irq(&ch->lock);
   1813		break;
   1814	}
   1815
   1816	if (!IS_ERR(msg))
   1817		return msg;
   1818
   1819cache_miss:
   1820	msg = mlx5_alloc_cmd_msg(dev, gfp, in_size, 0);
   1821	return msg;
   1822}
   1823
   1824static int is_manage_pages(void *in)
   1825{
   1826	return MLX5_GET(mbox_in, in, opcode) == MLX5_CMD_OP_MANAGE_PAGES;
   1827}
   1828
   1829/*  Notes:
   1830 *    1. Callback functions may not sleep
   1831 *    2. Page queue commands do not support asynchrous completion
   1832 */
   1833static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
   1834		    int out_size, mlx5_cmd_cbk_t callback, void *context,
   1835		    bool force_polling)
   1836{
   1837	u16 opcode = MLX5_GET(mbox_in, in, opcode);
   1838	struct mlx5_cmd_msg *inb, *outb;
   1839	int pages_queue;
   1840	gfp_t gfp;
   1841	u8 token;
   1842	int err;
   1843
   1844	if (mlx5_cmd_is_down(dev) || !opcode_allowed(&dev->cmd, opcode))
   1845		return -ENXIO;
   1846
   1847	pages_queue = is_manage_pages(in);
   1848	gfp = callback ? GFP_ATOMIC : GFP_KERNEL;
   1849
   1850	inb = alloc_msg(dev, in_size, gfp);
   1851	if (IS_ERR(inb)) {
   1852		err = PTR_ERR(inb);
   1853		return err;
   1854	}
   1855
   1856	token = alloc_token(&dev->cmd);
   1857
   1858	err = mlx5_copy_to_msg(inb, in, in_size, token);
   1859	if (err) {
   1860		mlx5_core_warn(dev, "err %d\n", err);
   1861		goto out_in;
   1862	}
   1863
   1864	outb = mlx5_alloc_cmd_msg(dev, gfp, out_size, token);
   1865	if (IS_ERR(outb)) {
   1866		err = PTR_ERR(outb);
   1867		goto out_in;
   1868	}
   1869
   1870	err = mlx5_cmd_invoke(dev, inb, outb, out, out_size, callback, context,
   1871			      pages_queue, token, force_polling);
   1872	if (callback)
   1873		return err;
   1874
   1875	if (err > 0) /* Failed in FW, command didn't execute */
   1876		err = deliv_status_to_err(err);
   1877
   1878	if (err)
   1879		goto out_out;
   1880
   1881	/* command completed by FW */
   1882	err = mlx5_copy_from_msg(out, outb, out_size);
   1883out_out:
   1884	mlx5_free_cmd_msg(dev, outb);
   1885out_in:
   1886	free_msg(dev, inb);
   1887	return err;
   1888}
   1889
   1890static void cmd_status_log(struct mlx5_core_dev *dev, u16 opcode, u8 status,
   1891			   u32 syndrome, int err)
   1892{
   1893	struct mlx5_cmd_stats *stats;
   1894
   1895	if (!err)
   1896		return;
   1897
   1898	stats = &dev->cmd.stats[opcode];
   1899	spin_lock_irq(&stats->lock);
   1900	stats->failed++;
   1901	if (err < 0)
   1902		stats->last_failed_errno = -err;
   1903	if (err == -EREMOTEIO) {
   1904		stats->failed_mbox_status++;
   1905		stats->last_failed_mbox_status = status;
   1906		stats->last_failed_syndrome = syndrome;
   1907	}
   1908	spin_unlock_irq(&stats->lock);
   1909}
   1910
   1911/* preserve -EREMOTEIO for outbox.status != OK, otherwise return err as is */
   1912static int cmd_status_err(struct mlx5_core_dev *dev, int err, u16 opcode, void *out)
   1913{
   1914	u32 syndrome = MLX5_GET(mbox_out, out, syndrome);
   1915	u8 status = MLX5_GET(mbox_out, out, status);
   1916
   1917	if (err == -EREMOTEIO) /* -EREMOTEIO is preserved */
   1918		err = -EIO;
   1919
   1920	if (!err && status != MLX5_CMD_STAT_OK)
   1921		err = -EREMOTEIO;
   1922
   1923	cmd_status_log(dev, opcode, status, syndrome, err);
   1924	return err;
   1925}
   1926
   1927/**
   1928 * mlx5_cmd_do - Executes a fw command, wait for completion.
   1929 * Unlike mlx5_cmd_exec, this function will not translate or intercept
   1930 * outbox.status and will return -EREMOTEIO when
   1931 * outbox.status != MLX5_CMD_STAT_OK
   1932 *
   1933 * @dev: mlx5 core device
   1934 * @in: inbox mlx5_ifc command buffer
   1935 * @in_size: inbox buffer size
   1936 * @out: outbox mlx5_ifc buffer
   1937 * @out_size: outbox size
   1938 *
   1939 * @return:
   1940 * -EREMOTEIO : Command executed by FW, outbox.status != MLX5_CMD_STAT_OK.
   1941 *              Caller must check FW outbox status.
   1942 *   0 : Command execution successful, outbox.status == MLX5_CMD_STAT_OK.
   1943 * < 0 : Command execution couldn't be performed by firmware or driver
   1944 */
   1945int mlx5_cmd_do(struct mlx5_core_dev *dev, void *in, int in_size, void *out, int out_size)
   1946{
   1947	int err = cmd_exec(dev, in, in_size, out, out_size, NULL, NULL, false);
   1948	u16 opcode = MLX5_GET(mbox_in, in, opcode);
   1949
   1950	err = cmd_status_err(dev, err, opcode, out);
   1951	return err;
   1952}
   1953EXPORT_SYMBOL(mlx5_cmd_do);
   1954
   1955/**
   1956 * mlx5_cmd_exec - Executes a fw command, wait for completion
   1957 *
   1958 * @dev: mlx5 core device
   1959 * @in: inbox mlx5_ifc command buffer
   1960 * @in_size: inbox buffer size
   1961 * @out: outbox mlx5_ifc buffer
   1962 * @out_size: outbox size
   1963 *
   1964 * @return: 0 if no error, FW command execution was successful
   1965 *          and outbox status is ok.
   1966 */
   1967int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
   1968		  int out_size)
   1969{
   1970	int err = mlx5_cmd_do(dev, in, in_size, out, out_size);
   1971
   1972	return mlx5_cmd_check(dev, err, in, out);
   1973}
   1974EXPORT_SYMBOL(mlx5_cmd_exec);
   1975
   1976/**
   1977 * mlx5_cmd_exec_polling - Executes a fw command, poll for completion
   1978 *	Needed for driver force teardown, when command completion EQ
   1979 *	will not be available to complete the command
   1980 *
   1981 * @dev: mlx5 core device
   1982 * @in: inbox mlx5_ifc command buffer
   1983 * @in_size: inbox buffer size
   1984 * @out: outbox mlx5_ifc buffer
   1985 * @out_size: outbox size
   1986 *
   1987 * @return: 0 if no error, FW command execution was successful
   1988 *          and outbox status is ok.
   1989 */
   1990int mlx5_cmd_exec_polling(struct mlx5_core_dev *dev, void *in, int in_size,
   1991			  void *out, int out_size)
   1992{
   1993	int err = cmd_exec(dev, in, in_size, out, out_size, NULL, NULL, true);
   1994	u16 opcode = MLX5_GET(mbox_in, in, opcode);
   1995
   1996	err = cmd_status_err(dev, err, opcode, out);
   1997	return mlx5_cmd_check(dev, err, in, out);
   1998}
   1999EXPORT_SYMBOL(mlx5_cmd_exec_polling);
   2000
   2001void mlx5_cmd_init_async_ctx(struct mlx5_core_dev *dev,
   2002			     struct mlx5_async_ctx *ctx)
   2003{
   2004	ctx->dev = dev;
   2005	/* Starts at 1 to avoid doing wake_up if we are not cleaning up */
   2006	atomic_set(&ctx->num_inflight, 1);
   2007	init_waitqueue_head(&ctx->wait);
   2008}
   2009EXPORT_SYMBOL(mlx5_cmd_init_async_ctx);
   2010
   2011/**
   2012 * mlx5_cmd_cleanup_async_ctx - Clean up an async_ctx
   2013 * @ctx: The ctx to clean
   2014 *
   2015 * Upon return all callbacks given to mlx5_cmd_exec_cb() have been called. The
   2016 * caller must ensure that mlx5_cmd_exec_cb() is not called during or after
   2017 * the call mlx5_cleanup_async_ctx().
   2018 */
   2019void mlx5_cmd_cleanup_async_ctx(struct mlx5_async_ctx *ctx)
   2020{
   2021	atomic_dec(&ctx->num_inflight);
   2022	wait_event(ctx->wait, atomic_read(&ctx->num_inflight) == 0);
   2023}
   2024EXPORT_SYMBOL(mlx5_cmd_cleanup_async_ctx);
   2025
   2026static void mlx5_cmd_exec_cb_handler(int status, void *_work)
   2027{
   2028	struct mlx5_async_work *work = _work;
   2029	struct mlx5_async_ctx *ctx;
   2030
   2031	ctx = work->ctx;
   2032	status = cmd_status_err(ctx->dev, status, work->opcode, work->out);
   2033	work->user_callback(status, work);
   2034	if (atomic_dec_and_test(&ctx->num_inflight))
   2035		wake_up(&ctx->wait);
   2036}
   2037
   2038int mlx5_cmd_exec_cb(struct mlx5_async_ctx *ctx, void *in, int in_size,
   2039		     void *out, int out_size, mlx5_async_cbk_t callback,
   2040		     struct mlx5_async_work *work)
   2041{
   2042	int ret;
   2043
   2044	work->ctx = ctx;
   2045	work->user_callback = callback;
   2046	work->opcode = MLX5_GET(mbox_in, in, opcode);
   2047	work->out = out;
   2048	if (WARN_ON(!atomic_inc_not_zero(&ctx->num_inflight)))
   2049		return -EIO;
   2050	ret = cmd_exec(ctx->dev, in, in_size, out, out_size,
   2051		       mlx5_cmd_exec_cb_handler, work, false);
   2052	if (ret && atomic_dec_and_test(&ctx->num_inflight))
   2053		wake_up(&ctx->wait);
   2054
   2055	return ret;
   2056}
   2057EXPORT_SYMBOL(mlx5_cmd_exec_cb);
   2058
   2059static void destroy_msg_cache(struct mlx5_core_dev *dev)
   2060{
   2061	struct cmd_msg_cache *ch;
   2062	struct mlx5_cmd_msg *msg;
   2063	struct mlx5_cmd_msg *n;
   2064	int i;
   2065
   2066	for (i = 0; i < MLX5_NUM_COMMAND_CACHES; i++) {
   2067		ch = &dev->cmd.cache[i];
   2068		list_for_each_entry_safe(msg, n, &ch->head, list) {
   2069			list_del(&msg->list);
   2070			mlx5_free_cmd_msg(dev, msg);
   2071		}
   2072	}
   2073}
   2074
   2075static unsigned cmd_cache_num_ent[MLX5_NUM_COMMAND_CACHES] = {
   2076	512, 32, 16, 8, 2
   2077};
   2078
   2079static unsigned cmd_cache_ent_size[MLX5_NUM_COMMAND_CACHES] = {
   2080	16 + MLX5_CMD_DATA_BLOCK_SIZE,
   2081	16 + MLX5_CMD_DATA_BLOCK_SIZE * 2,
   2082	16 + MLX5_CMD_DATA_BLOCK_SIZE * 16,
   2083	16 + MLX5_CMD_DATA_BLOCK_SIZE * 256,
   2084	16 + MLX5_CMD_DATA_BLOCK_SIZE * 512,
   2085};
   2086
   2087static void create_msg_cache(struct mlx5_core_dev *dev)
   2088{
   2089	struct mlx5_cmd *cmd = &dev->cmd;
   2090	struct cmd_msg_cache *ch;
   2091	struct mlx5_cmd_msg *msg;
   2092	int i;
   2093	int k;
   2094
   2095	/* Initialize and fill the caches with initial entries */
   2096	for (k = 0; k < MLX5_NUM_COMMAND_CACHES; k++) {
   2097		ch = &cmd->cache[k];
   2098		spin_lock_init(&ch->lock);
   2099		INIT_LIST_HEAD(&ch->head);
   2100		ch->num_ent = cmd_cache_num_ent[k];
   2101		ch->max_inbox_size = cmd_cache_ent_size[k];
   2102		for (i = 0; i < ch->num_ent; i++) {
   2103			msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL | __GFP_NOWARN,
   2104						 ch->max_inbox_size, 0);
   2105			if (IS_ERR(msg))
   2106				break;
   2107			msg->parent = ch;
   2108			list_add_tail(&msg->list, &ch->head);
   2109		}
   2110	}
   2111}
   2112
   2113static int alloc_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd)
   2114{
   2115	cmd->cmd_alloc_buf = dma_alloc_coherent(mlx5_core_dma_dev(dev), MLX5_ADAPTER_PAGE_SIZE,
   2116						&cmd->alloc_dma, GFP_KERNEL);
   2117	if (!cmd->cmd_alloc_buf)
   2118		return -ENOMEM;
   2119
   2120	/* make sure it is aligned to 4K */
   2121	if (!((uintptr_t)cmd->cmd_alloc_buf & (MLX5_ADAPTER_PAGE_SIZE - 1))) {
   2122		cmd->cmd_buf = cmd->cmd_alloc_buf;
   2123		cmd->dma = cmd->alloc_dma;
   2124		cmd->alloc_size = MLX5_ADAPTER_PAGE_SIZE;
   2125		return 0;
   2126	}
   2127
   2128	dma_free_coherent(mlx5_core_dma_dev(dev), MLX5_ADAPTER_PAGE_SIZE, cmd->cmd_alloc_buf,
   2129			  cmd->alloc_dma);
   2130	cmd->cmd_alloc_buf = dma_alloc_coherent(mlx5_core_dma_dev(dev),
   2131						2 * MLX5_ADAPTER_PAGE_SIZE - 1,
   2132						&cmd->alloc_dma, GFP_KERNEL);
   2133	if (!cmd->cmd_alloc_buf)
   2134		return -ENOMEM;
   2135
   2136	cmd->cmd_buf = PTR_ALIGN(cmd->cmd_alloc_buf, MLX5_ADAPTER_PAGE_SIZE);
   2137	cmd->dma = ALIGN(cmd->alloc_dma, MLX5_ADAPTER_PAGE_SIZE);
   2138	cmd->alloc_size = 2 * MLX5_ADAPTER_PAGE_SIZE - 1;
   2139	return 0;
   2140}
   2141
   2142static void free_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd)
   2143{
   2144	dma_free_coherent(mlx5_core_dma_dev(dev), cmd->alloc_size, cmd->cmd_alloc_buf,
   2145			  cmd->alloc_dma);
   2146}
   2147
   2148static u16 cmdif_rev(struct mlx5_core_dev *dev)
   2149{
   2150	return ioread32be(&dev->iseg->cmdif_rev_fw_sub) >> 16;
   2151}
   2152
   2153int mlx5_cmd_init(struct mlx5_core_dev *dev)
   2154{
   2155	int size = sizeof(struct mlx5_cmd_prot_block);
   2156	int align = roundup_pow_of_two(size);
   2157	struct mlx5_cmd *cmd = &dev->cmd;
   2158	u32 cmd_h, cmd_l;
   2159	u16 cmd_if_rev;
   2160	int err;
   2161	int i;
   2162
   2163	memset(cmd, 0, sizeof(*cmd));
   2164	cmd_if_rev = cmdif_rev(dev);
   2165	if (cmd_if_rev != CMD_IF_REV) {
   2166		mlx5_core_err(dev,
   2167			      "Driver cmdif rev(%d) differs from firmware's(%d)\n",
   2168			      CMD_IF_REV, cmd_if_rev);
   2169		return -EINVAL;
   2170	}
   2171
   2172	cmd->stats = kvcalloc(MLX5_CMD_OP_MAX, sizeof(*cmd->stats), GFP_KERNEL);
   2173	if (!cmd->stats)
   2174		return -ENOMEM;
   2175
   2176	cmd->pool = dma_pool_create("mlx5_cmd", mlx5_core_dma_dev(dev), size, align, 0);
   2177	if (!cmd->pool) {
   2178		err = -ENOMEM;
   2179		goto dma_pool_err;
   2180	}
   2181
   2182	err = alloc_cmd_page(dev, cmd);
   2183	if (err)
   2184		goto err_free_pool;
   2185
   2186	cmd_l = ioread32be(&dev->iseg->cmdq_addr_l_sz) & 0xff;
   2187	cmd->log_sz = cmd_l >> 4 & 0xf;
   2188	cmd->log_stride = cmd_l & 0xf;
   2189	if (1 << cmd->log_sz > MLX5_MAX_COMMANDS) {
   2190		mlx5_core_err(dev, "firmware reports too many outstanding commands %d\n",
   2191			      1 << cmd->log_sz);
   2192		err = -EINVAL;
   2193		goto err_free_page;
   2194	}
   2195
   2196	if (cmd->log_sz + cmd->log_stride > MLX5_ADAPTER_PAGE_SHIFT) {
   2197		mlx5_core_err(dev, "command queue size overflow\n");
   2198		err = -EINVAL;
   2199		goto err_free_page;
   2200	}
   2201
   2202	cmd->state = MLX5_CMDIF_STATE_DOWN;
   2203	cmd->checksum_disabled = 1;
   2204	cmd->max_reg_cmds = (1 << cmd->log_sz) - 1;
   2205	cmd->bitmask = (1UL << cmd->max_reg_cmds) - 1;
   2206
   2207	cmd->cmdif_rev = ioread32be(&dev->iseg->cmdif_rev_fw_sub) >> 16;
   2208	if (cmd->cmdif_rev > CMD_IF_REV) {
   2209		mlx5_core_err(dev, "driver does not support command interface version. driver %d, firmware %d\n",
   2210			      CMD_IF_REV, cmd->cmdif_rev);
   2211		err = -EOPNOTSUPP;
   2212		goto err_free_page;
   2213	}
   2214
   2215	spin_lock_init(&cmd->alloc_lock);
   2216	spin_lock_init(&cmd->token_lock);
   2217	for (i = 0; i < MLX5_CMD_OP_MAX; i++)
   2218		spin_lock_init(&cmd->stats[i].lock);
   2219
   2220	sema_init(&cmd->sem, cmd->max_reg_cmds);
   2221	sema_init(&cmd->pages_sem, 1);
   2222
   2223	cmd_h = (u32)((u64)(cmd->dma) >> 32);
   2224	cmd_l = (u32)(cmd->dma);
   2225	if (cmd_l & 0xfff) {
   2226		mlx5_core_err(dev, "invalid command queue address\n");
   2227		err = -ENOMEM;
   2228		goto err_free_page;
   2229	}
   2230
   2231	iowrite32be(cmd_h, &dev->iseg->cmdq_addr_h);
   2232	iowrite32be(cmd_l, &dev->iseg->cmdq_addr_l_sz);
   2233
   2234	/* Make sure firmware sees the complete address before we proceed */
   2235	wmb();
   2236
   2237	mlx5_core_dbg(dev, "descriptor at dma 0x%llx\n", (unsigned long long)(cmd->dma));
   2238
   2239	cmd->mode = CMD_MODE_POLLING;
   2240	cmd->allowed_opcode = CMD_ALLOWED_OPCODE_ALL;
   2241
   2242	create_msg_cache(dev);
   2243
   2244	set_wqname(dev);
   2245	cmd->wq = create_singlethread_workqueue(cmd->wq_name);
   2246	if (!cmd->wq) {
   2247		mlx5_core_err(dev, "failed to create command workqueue\n");
   2248		err = -ENOMEM;
   2249		goto err_cache;
   2250	}
   2251
   2252	create_debugfs_files(dev);
   2253
   2254	return 0;
   2255
   2256err_cache:
   2257	destroy_msg_cache(dev);
   2258
   2259err_free_page:
   2260	free_cmd_page(dev, cmd);
   2261
   2262err_free_pool:
   2263	dma_pool_destroy(cmd->pool);
   2264dma_pool_err:
   2265	kvfree(cmd->stats);
   2266	return err;
   2267}
   2268
   2269void mlx5_cmd_cleanup(struct mlx5_core_dev *dev)
   2270{
   2271	struct mlx5_cmd *cmd = &dev->cmd;
   2272
   2273	clean_debug_files(dev);
   2274	destroy_workqueue(cmd->wq);
   2275	destroy_msg_cache(dev);
   2276	free_cmd_page(dev, cmd);
   2277	dma_pool_destroy(cmd->pool);
   2278	kvfree(cmd->stats);
   2279}
   2280
   2281void mlx5_cmd_set_state(struct mlx5_core_dev *dev,
   2282			enum mlx5_cmdif_state cmdif_state)
   2283{
   2284	dev->cmd.state = cmdif_state;
   2285}