cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

vport.c (32705B)


      1/*
      2 * Copyright (c) 2013-2015, Mellanox Technologies, Ltd.  All rights reserved.
      3 *
      4 * This software is available to you under a choice of one of two
      5 * licenses.  You may choose to be licensed under the terms of the GNU
      6 * General Public License (GPL) Version 2, available from the file
      7 * COPYING in the main directory of this source tree, or the
      8 * OpenIB.org BSD license below:
      9 *
     10 *     Redistribution and use in source and binary forms, with or
     11 *     without modification, are permitted provided that the following
     12 *     conditions are met:
     13 *
     14 *      - Redistributions of source code must retain the above
     15 *        copyright notice, this list of conditions and the following
     16 *        disclaimer.
     17 *
     18 *      - Redistributions in binary form must reproduce the above
     19 *        copyright notice, this list of conditions and the following
     20 *        disclaimer in the documentation and/or other materials
     21 *        provided with the distribution.
     22 *
     23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
     24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
     25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
     26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
     27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
     28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
     29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
     30 * SOFTWARE.
     31 */
     32
     33#include <linux/export.h>
     34#include <linux/etherdevice.h>
     35#include <linux/mlx5/driver.h>
     36#include <linux/mlx5/vport.h>
     37#include <linux/mlx5/eswitch.h>
     38#include "mlx5_core.h"
     39#include "sf/sf.h"
     40
     41/* Mutex to hold while enabling or disabling RoCE */
     42static DEFINE_MUTEX(mlx5_roce_en_lock);
     43
     44u8 mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport)
     45{
     46	u32 out[MLX5_ST_SZ_DW(query_vport_state_out)] = {};
     47	u32 in[MLX5_ST_SZ_DW(query_vport_state_in)] = {};
     48	int err;
     49
     50	MLX5_SET(query_vport_state_in, in, opcode,
     51		 MLX5_CMD_OP_QUERY_VPORT_STATE);
     52	MLX5_SET(query_vport_state_in, in, op_mod, opmod);
     53	MLX5_SET(query_vport_state_in, in, vport_number, vport);
     54	if (vport)
     55		MLX5_SET(query_vport_state_in, in, other_vport, 1);
     56
     57	err = mlx5_cmd_exec_inout(mdev, query_vport_state, in, out);
     58	if (err)
     59		return 0;
     60
     61	return MLX5_GET(query_vport_state_out, out, state);
     62}
     63
     64int mlx5_modify_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod,
     65				  u16 vport, u8 other_vport, u8 state)
     66{
     67	u32 in[MLX5_ST_SZ_DW(modify_vport_state_in)] = {};
     68
     69	MLX5_SET(modify_vport_state_in, in, opcode,
     70		 MLX5_CMD_OP_MODIFY_VPORT_STATE);
     71	MLX5_SET(modify_vport_state_in, in, op_mod, opmod);
     72	MLX5_SET(modify_vport_state_in, in, vport_number, vport);
     73	MLX5_SET(modify_vport_state_in, in, other_vport, other_vport);
     74	MLX5_SET(modify_vport_state_in, in, admin_state, state);
     75
     76	return mlx5_cmd_exec_in(mdev, modify_vport_state, in);
     77}
     78
     79static int mlx5_query_nic_vport_context(struct mlx5_core_dev *mdev, u16 vport,
     80					u32 *out)
     81{
     82	u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)] = {};
     83
     84	MLX5_SET(query_nic_vport_context_in, in, opcode,
     85		 MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
     86	MLX5_SET(query_nic_vport_context_in, in, vport_number, vport);
     87	if (vport)
     88		MLX5_SET(query_nic_vport_context_in, in, other_vport, 1);
     89
     90	return mlx5_cmd_exec_inout(mdev, query_nic_vport_context, in, out);
     91}
     92
     93int mlx5_query_nic_vport_min_inline(struct mlx5_core_dev *mdev,
     94				    u16 vport, u8 *min_inline)
     95{
     96	u32 out[MLX5_ST_SZ_DW(query_nic_vport_context_out)] = {};
     97	int err;
     98
     99	err = mlx5_query_nic_vport_context(mdev, vport, out);
    100	if (!err)
    101		*min_inline = MLX5_GET(query_nic_vport_context_out, out,
    102				       nic_vport_context.min_wqe_inline_mode);
    103	return err;
    104}
    105EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_min_inline);
    106
    107void mlx5_query_min_inline(struct mlx5_core_dev *mdev,
    108			   u8 *min_inline_mode)
    109{
    110	switch (MLX5_CAP_ETH(mdev, wqe_inline_mode)) {
    111	case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
    112		if (!mlx5_query_nic_vport_min_inline(mdev, 0, min_inline_mode))
    113			break;
    114		fallthrough;
    115	case MLX5_CAP_INLINE_MODE_L2:
    116		*min_inline_mode = MLX5_INLINE_MODE_L2;
    117		break;
    118	case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
    119		*min_inline_mode = MLX5_INLINE_MODE_NONE;
    120		break;
    121	}
    122}
    123EXPORT_SYMBOL_GPL(mlx5_query_min_inline);
    124
    125int mlx5_modify_nic_vport_min_inline(struct mlx5_core_dev *mdev,
    126				     u16 vport, u8 min_inline)
    127{
    128	u32 in[MLX5_ST_SZ_DW(modify_nic_vport_context_in)] = {};
    129	void *nic_vport_ctx;
    130
    131	MLX5_SET(modify_nic_vport_context_in, in,
    132		 field_select.min_inline, 1);
    133	MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
    134	MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
    135
    136	nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in,
    137				     in, nic_vport_context);
    138	MLX5_SET(nic_vport_context, nic_vport_ctx,
    139		 min_wqe_inline_mode, min_inline);
    140	MLX5_SET(modify_nic_vport_context_in, in, opcode,
    141		 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
    142
    143	return mlx5_cmd_exec_in(mdev, modify_nic_vport_context, in);
    144}
    145
    146int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev,
    147				     u16 vport, bool other, u8 *addr)
    148{
    149	u32 out[MLX5_ST_SZ_DW(query_nic_vport_context_out)] = {};
    150	u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)] = {};
    151	u8 *out_addr;
    152	int err;
    153
    154	out_addr = MLX5_ADDR_OF(query_nic_vport_context_out, out,
    155				nic_vport_context.permanent_address);
    156
    157	MLX5_SET(query_nic_vport_context_in, in, opcode,
    158		 MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
    159	MLX5_SET(query_nic_vport_context_in, in, vport_number, vport);
    160	MLX5_SET(query_nic_vport_context_in, in, other_vport, other);
    161
    162	err = mlx5_cmd_exec_inout(mdev, query_nic_vport_context, in, out);
    163	if (!err)
    164		ether_addr_copy(addr, &out_addr[2]);
    165
    166	return err;
    167}
    168EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_mac_address);
    169
    170int mlx5_query_mac_address(struct mlx5_core_dev *mdev, u8 *addr)
    171{
    172	return mlx5_query_nic_vport_mac_address(mdev, 0, false, addr);
    173}
    174EXPORT_SYMBOL_GPL(mlx5_query_mac_address);
    175
    176int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev *mdev,
    177				      u16 vport, const u8 *addr)
    178{
    179	void *in;
    180	int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
    181	int err;
    182	void *nic_vport_ctx;
    183	u8 *perm_mac;
    184
    185	in = kvzalloc(inlen, GFP_KERNEL);
    186	if (!in)
    187		return -ENOMEM;
    188
    189	MLX5_SET(modify_nic_vport_context_in, in,
    190		 field_select.permanent_address, 1);
    191	MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
    192	MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
    193
    194	nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in,
    195				     in, nic_vport_context);
    196	perm_mac = MLX5_ADDR_OF(nic_vport_context, nic_vport_ctx,
    197				permanent_address);
    198
    199	ether_addr_copy(&perm_mac[2], addr);
    200	MLX5_SET(modify_nic_vport_context_in, in, opcode,
    201		 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
    202
    203	err = mlx5_cmd_exec_in(mdev, modify_nic_vport_context, in);
    204
    205	kvfree(in);
    206
    207	return err;
    208}
    209EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_mac_address);
    210
    211int mlx5_query_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 *mtu)
    212{
    213	int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
    214	u32 *out;
    215	int err;
    216
    217	out = kvzalloc(outlen, GFP_KERNEL);
    218	if (!out)
    219		return -ENOMEM;
    220
    221	err = mlx5_query_nic_vport_context(mdev, 0, out);
    222	if (!err)
    223		*mtu = MLX5_GET(query_nic_vport_context_out, out,
    224				nic_vport_context.mtu);
    225
    226	kvfree(out);
    227	return err;
    228}
    229EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_mtu);
    230
    231int mlx5_modify_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 mtu)
    232{
    233	int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
    234	void *in;
    235	int err;
    236
    237	in = kvzalloc(inlen, GFP_KERNEL);
    238	if (!in)
    239		return -ENOMEM;
    240
    241	MLX5_SET(modify_nic_vport_context_in, in, field_select.mtu, 1);
    242	MLX5_SET(modify_nic_vport_context_in, in, nic_vport_context.mtu, mtu);
    243	MLX5_SET(modify_nic_vport_context_in, in, opcode,
    244		 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
    245
    246	err = mlx5_cmd_exec_in(mdev, modify_nic_vport_context, in);
    247
    248	kvfree(in);
    249	return err;
    250}
    251EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_mtu);
    252
    253int mlx5_query_nic_vport_mac_list(struct mlx5_core_dev *dev,
    254				  u16 vport,
    255				  enum mlx5_list_type list_type,
    256				  u8 addr_list[][ETH_ALEN],
    257				  int *list_size)
    258{
    259	u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)] = {0};
    260	void *nic_vport_ctx;
    261	int max_list_size;
    262	int req_list_size;
    263	int out_sz;
    264	void *out;
    265	int err;
    266	int i;
    267
    268	req_list_size = *list_size;
    269
    270	max_list_size = list_type == MLX5_NVPRT_LIST_TYPE_UC ?
    271		1 << MLX5_CAP_GEN(dev, log_max_current_uc_list) :
    272		1 << MLX5_CAP_GEN(dev, log_max_current_mc_list);
    273
    274	if (req_list_size > max_list_size) {
    275		mlx5_core_warn(dev, "Requested list size (%d) > (%d) max_list_size\n",
    276			       req_list_size, max_list_size);
    277		req_list_size = max_list_size;
    278	}
    279
    280	out_sz = MLX5_ST_SZ_BYTES(query_nic_vport_context_in) +
    281			req_list_size * MLX5_ST_SZ_BYTES(mac_address_layout);
    282
    283	out = kvzalloc(out_sz, GFP_KERNEL);
    284	if (!out)
    285		return -ENOMEM;
    286
    287	MLX5_SET(query_nic_vport_context_in, in, opcode,
    288		 MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
    289	MLX5_SET(query_nic_vport_context_in, in, allowed_list_type, list_type);
    290	MLX5_SET(query_nic_vport_context_in, in, vport_number, vport);
    291	MLX5_SET(query_nic_vport_context_in, in, other_vport, 1);
    292
    293	err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz);
    294	if (err)
    295		goto out;
    296
    297	nic_vport_ctx = MLX5_ADDR_OF(query_nic_vport_context_out, out,
    298				     nic_vport_context);
    299	req_list_size = MLX5_GET(nic_vport_context, nic_vport_ctx,
    300				 allowed_list_size);
    301
    302	*list_size = req_list_size;
    303	for (i = 0; i < req_list_size; i++) {
    304		u8 *mac_addr = MLX5_ADDR_OF(nic_vport_context,
    305					nic_vport_ctx,
    306					current_uc_mac_address[i]) + 2;
    307		ether_addr_copy(addr_list[i], mac_addr);
    308	}
    309out:
    310	kvfree(out);
    311	return err;
    312}
    313EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_mac_list);
    314
    315int mlx5_modify_nic_vport_mac_list(struct mlx5_core_dev *dev,
    316				   enum mlx5_list_type list_type,
    317				   u8 addr_list[][ETH_ALEN],
    318				   int list_size)
    319{
    320	u32 out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)] = {};
    321	void *nic_vport_ctx;
    322	int max_list_size;
    323	int in_sz;
    324	void *in;
    325	int err;
    326	int i;
    327
    328	max_list_size = list_type == MLX5_NVPRT_LIST_TYPE_UC ?
    329		 1 << MLX5_CAP_GEN(dev, log_max_current_uc_list) :
    330		 1 << MLX5_CAP_GEN(dev, log_max_current_mc_list);
    331
    332	if (list_size > max_list_size)
    333		return -ENOSPC;
    334
    335	in_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) +
    336		list_size * MLX5_ST_SZ_BYTES(mac_address_layout);
    337
    338	in = kvzalloc(in_sz, GFP_KERNEL);
    339	if (!in)
    340		return -ENOMEM;
    341
    342	MLX5_SET(modify_nic_vport_context_in, in, opcode,
    343		 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
    344	MLX5_SET(modify_nic_vport_context_in, in,
    345		 field_select.addresses_list, 1);
    346
    347	nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, in,
    348				     nic_vport_context);
    349
    350	MLX5_SET(nic_vport_context, nic_vport_ctx,
    351		 allowed_list_type, list_type);
    352	MLX5_SET(nic_vport_context, nic_vport_ctx,
    353		 allowed_list_size, list_size);
    354
    355	for (i = 0; i < list_size; i++) {
    356		u8 *curr_mac = MLX5_ADDR_OF(nic_vport_context,
    357					    nic_vport_ctx,
    358					    current_uc_mac_address[i]) + 2;
    359		ether_addr_copy(curr_mac, addr_list[i]);
    360	}
    361
    362	err = mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out));
    363	kvfree(in);
    364	return err;
    365}
    366EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_mac_list);
    367
    368int mlx5_modify_nic_vport_vlans(struct mlx5_core_dev *dev,
    369				u16 vlans[],
    370				int list_size)
    371{
    372	u32 out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)];
    373	void *nic_vport_ctx;
    374	int max_list_size;
    375	int in_sz;
    376	void *in;
    377	int err;
    378	int i;
    379
    380	max_list_size = 1 << MLX5_CAP_GEN(dev, log_max_vlan_list);
    381
    382	if (list_size > max_list_size)
    383		return -ENOSPC;
    384
    385	in_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) +
    386		list_size * MLX5_ST_SZ_BYTES(vlan_layout);
    387
    388	memset(out, 0, sizeof(out));
    389	in = kvzalloc(in_sz, GFP_KERNEL);
    390	if (!in)
    391		return -ENOMEM;
    392
    393	MLX5_SET(modify_nic_vport_context_in, in, opcode,
    394		 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
    395	MLX5_SET(modify_nic_vport_context_in, in,
    396		 field_select.addresses_list, 1);
    397
    398	nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, in,
    399				     nic_vport_context);
    400
    401	MLX5_SET(nic_vport_context, nic_vport_ctx,
    402		 allowed_list_type, MLX5_NVPRT_LIST_TYPE_VLAN);
    403	MLX5_SET(nic_vport_context, nic_vport_ctx,
    404		 allowed_list_size, list_size);
    405
    406	for (i = 0; i < list_size; i++) {
    407		void *vlan_addr = MLX5_ADDR_OF(nic_vport_context,
    408					       nic_vport_ctx,
    409					       current_uc_mac_address[i]);
    410		MLX5_SET(vlan_layout, vlan_addr, vlan, vlans[i]);
    411	}
    412
    413	err = mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out));
    414	kvfree(in);
    415	return err;
    416}
    417EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_vlans);
    418
    419int mlx5_query_nic_vport_system_image_guid(struct mlx5_core_dev *mdev,
    420					   u64 *system_image_guid)
    421{
    422	u32 *out;
    423	int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
    424	int err;
    425
    426	out = kvzalloc(outlen, GFP_KERNEL);
    427	if (!out)
    428		return -ENOMEM;
    429
    430	err = mlx5_query_nic_vport_context(mdev, 0, out);
    431	if (err)
    432		goto out;
    433
    434	*system_image_guid = MLX5_GET64(query_nic_vport_context_out, out,
    435					nic_vport_context.system_image_guid);
    436out:
    437	kvfree(out);
    438	return err;
    439}
    440EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_system_image_guid);
    441
    442int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid)
    443{
    444	u32 *out;
    445	int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
    446
    447	out = kvzalloc(outlen, GFP_KERNEL);
    448	if (!out)
    449		return -ENOMEM;
    450
    451	mlx5_query_nic_vport_context(mdev, 0, out);
    452
    453	*node_guid = MLX5_GET64(query_nic_vport_context_out, out,
    454				nic_vport_context.node_guid);
    455
    456	kvfree(out);
    457
    458	return 0;
    459}
    460EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_node_guid);
    461
    462int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev,
    463				    u16 vport, u64 node_guid)
    464{
    465	int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
    466	void *nic_vport_context;
    467	void *in;
    468	int err;
    469
    470	if (!MLX5_CAP_GEN(mdev, vport_group_manager))
    471		return -EACCES;
    472
    473	in = kvzalloc(inlen, GFP_KERNEL);
    474	if (!in)
    475		return -ENOMEM;
    476
    477	MLX5_SET(modify_nic_vport_context_in, in,
    478		 field_select.node_guid, 1);
    479	MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
    480	MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
    481
    482	nic_vport_context = MLX5_ADDR_OF(modify_nic_vport_context_in,
    483					 in, nic_vport_context);
    484	MLX5_SET64(nic_vport_context, nic_vport_context, node_guid, node_guid);
    485	MLX5_SET(modify_nic_vport_context_in, in, opcode,
    486		 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
    487
    488	err = mlx5_cmd_exec_in(mdev, modify_nic_vport_context, in);
    489
    490	kvfree(in);
    491
    492	return err;
    493}
    494
    495int mlx5_query_nic_vport_qkey_viol_cntr(struct mlx5_core_dev *mdev,
    496					u16 *qkey_viol_cntr)
    497{
    498	u32 *out;
    499	int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
    500
    501	out = kvzalloc(outlen, GFP_KERNEL);
    502	if (!out)
    503		return -ENOMEM;
    504
    505	mlx5_query_nic_vport_context(mdev, 0, out);
    506
    507	*qkey_viol_cntr = MLX5_GET(query_nic_vport_context_out, out,
    508				   nic_vport_context.qkey_violation_counter);
    509
    510	kvfree(out);
    511
    512	return 0;
    513}
    514EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_qkey_viol_cntr);
    515
    516int mlx5_query_hca_vport_gid(struct mlx5_core_dev *dev, u8 other_vport,
    517			     u8 port_num, u16  vf_num, u16 gid_index,
    518			     union ib_gid *gid)
    519{
    520	int in_sz = MLX5_ST_SZ_BYTES(query_hca_vport_gid_in);
    521	int out_sz = MLX5_ST_SZ_BYTES(query_hca_vport_gid_out);
    522	int is_group_manager;
    523	void *out = NULL;
    524	void *in = NULL;
    525	union ib_gid *tmp;
    526	int tbsz;
    527	int nout;
    528	int err;
    529
    530	is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
    531	tbsz = mlx5_get_gid_table_len(MLX5_CAP_GEN(dev, gid_table_size));
    532	mlx5_core_dbg(dev, "vf_num %d, index %d, gid_table_size %d\n",
    533		      vf_num, gid_index, tbsz);
    534
    535	if (gid_index > tbsz && gid_index != 0xffff)
    536		return -EINVAL;
    537
    538	if (gid_index == 0xffff)
    539		nout = tbsz;
    540	else
    541		nout = 1;
    542
    543	out_sz += nout * sizeof(*gid);
    544
    545	in = kvzalloc(in_sz, GFP_KERNEL);
    546	out = kvzalloc(out_sz, GFP_KERNEL);
    547	if (!in || !out) {
    548		err = -ENOMEM;
    549		goto out;
    550	}
    551
    552	MLX5_SET(query_hca_vport_gid_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_VPORT_GID);
    553	if (other_vport) {
    554		if (is_group_manager) {
    555			MLX5_SET(query_hca_vport_gid_in, in, vport_number, vf_num);
    556			MLX5_SET(query_hca_vport_gid_in, in, other_vport, 1);
    557		} else {
    558			err = -EPERM;
    559			goto out;
    560		}
    561	}
    562	MLX5_SET(query_hca_vport_gid_in, in, gid_index, gid_index);
    563
    564	if (MLX5_CAP_GEN(dev, num_ports) == 2)
    565		MLX5_SET(query_hca_vport_gid_in, in, port_num, port_num);
    566
    567	err = mlx5_cmd_exec(dev, in, in_sz, out, out_sz);
    568	if (err)
    569		goto out;
    570
    571	tmp = out + MLX5_ST_SZ_BYTES(query_hca_vport_gid_out);
    572	gid->global.subnet_prefix = tmp->global.subnet_prefix;
    573	gid->global.interface_id = tmp->global.interface_id;
    574
    575out:
    576	kvfree(in);
    577	kvfree(out);
    578	return err;
    579}
    580EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_gid);
    581
    582int mlx5_query_hca_vport_pkey(struct mlx5_core_dev *dev, u8 other_vport,
    583			      u8 port_num, u16 vf_num, u16 pkey_index,
    584			      u16 *pkey)
    585{
    586	int in_sz = MLX5_ST_SZ_BYTES(query_hca_vport_pkey_in);
    587	int out_sz = MLX5_ST_SZ_BYTES(query_hca_vport_pkey_out);
    588	int is_group_manager;
    589	void *out = NULL;
    590	void *in = NULL;
    591	void *pkarr;
    592	int nout;
    593	int tbsz;
    594	int err;
    595	int i;
    596
    597	is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
    598
    599	tbsz = mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(dev, pkey_table_size));
    600	if (pkey_index > tbsz && pkey_index != 0xffff)
    601		return -EINVAL;
    602
    603	if (pkey_index == 0xffff)
    604		nout = tbsz;
    605	else
    606		nout = 1;
    607
    608	out_sz += nout * MLX5_ST_SZ_BYTES(pkey);
    609
    610	in = kvzalloc(in_sz, GFP_KERNEL);
    611	out = kvzalloc(out_sz, GFP_KERNEL);
    612	if (!in || !out) {
    613		err = -ENOMEM;
    614		goto out;
    615	}
    616
    617	MLX5_SET(query_hca_vport_pkey_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_VPORT_PKEY);
    618	if (other_vport) {
    619		if (is_group_manager) {
    620			MLX5_SET(query_hca_vport_pkey_in, in, vport_number, vf_num);
    621			MLX5_SET(query_hca_vport_pkey_in, in, other_vport, 1);
    622		} else {
    623			err = -EPERM;
    624			goto out;
    625		}
    626	}
    627	MLX5_SET(query_hca_vport_pkey_in, in, pkey_index, pkey_index);
    628
    629	if (MLX5_CAP_GEN(dev, num_ports) == 2)
    630		MLX5_SET(query_hca_vport_pkey_in, in, port_num, port_num);
    631
    632	err = mlx5_cmd_exec(dev, in, in_sz, out, out_sz);
    633	if (err)
    634		goto out;
    635
    636	pkarr = MLX5_ADDR_OF(query_hca_vport_pkey_out, out, pkey);
    637	for (i = 0; i < nout; i++, pkey++, pkarr += MLX5_ST_SZ_BYTES(pkey))
    638		*pkey = MLX5_GET_PR(pkey, pkarr, pkey);
    639
    640out:
    641	kvfree(in);
    642	kvfree(out);
    643	return err;
    644}
    645EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_pkey);
    646
    647int mlx5_query_hca_vport_context(struct mlx5_core_dev *dev,
    648				 u8 other_vport, u8 port_num,
    649				 u16 vf_num,
    650				 struct mlx5_hca_vport_context *rep)
    651{
    652	int out_sz = MLX5_ST_SZ_BYTES(query_hca_vport_context_out);
    653	int in[MLX5_ST_SZ_DW(query_hca_vport_context_in)] = {};
    654	int is_group_manager;
    655	void *out;
    656	void *ctx;
    657	int err;
    658
    659	is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
    660
    661	out = kvzalloc(out_sz, GFP_KERNEL);
    662	if (!out)
    663		return -ENOMEM;
    664
    665	MLX5_SET(query_hca_vport_context_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT);
    666
    667	if (other_vport) {
    668		if (is_group_manager) {
    669			MLX5_SET(query_hca_vport_context_in, in, other_vport, 1);
    670			MLX5_SET(query_hca_vport_context_in, in, vport_number, vf_num);
    671		} else {
    672			err = -EPERM;
    673			goto ex;
    674		}
    675	}
    676
    677	if (MLX5_CAP_GEN(dev, num_ports) == 2)
    678		MLX5_SET(query_hca_vport_context_in, in, port_num, port_num);
    679
    680	err = mlx5_cmd_exec_inout(dev, query_hca_vport_context, in, out);
    681	if (err)
    682		goto ex;
    683
    684	ctx = MLX5_ADDR_OF(query_hca_vport_context_out, out, hca_vport_context);
    685	rep->field_select = MLX5_GET_PR(hca_vport_context, ctx, field_select);
    686	rep->sm_virt_aware = MLX5_GET_PR(hca_vport_context, ctx, sm_virt_aware);
    687	rep->has_smi = MLX5_GET_PR(hca_vport_context, ctx, has_smi);
    688	rep->has_raw = MLX5_GET_PR(hca_vport_context, ctx, has_raw);
    689	rep->policy = MLX5_GET_PR(hca_vport_context, ctx, vport_state_policy);
    690	rep->phys_state = MLX5_GET_PR(hca_vport_context, ctx,
    691				      port_physical_state);
    692	rep->vport_state = MLX5_GET_PR(hca_vport_context, ctx, vport_state);
    693	rep->port_physical_state = MLX5_GET_PR(hca_vport_context, ctx,
    694					       port_physical_state);
    695	rep->port_guid = MLX5_GET64_PR(hca_vport_context, ctx, port_guid);
    696	rep->node_guid = MLX5_GET64_PR(hca_vport_context, ctx, node_guid);
    697	rep->cap_mask1 = MLX5_GET_PR(hca_vport_context, ctx, cap_mask1);
    698	rep->cap_mask1_perm = MLX5_GET_PR(hca_vport_context, ctx,
    699					  cap_mask1_field_select);
    700	rep->cap_mask2 = MLX5_GET_PR(hca_vport_context, ctx, cap_mask2);
    701	rep->cap_mask2_perm = MLX5_GET_PR(hca_vport_context, ctx,
    702					  cap_mask2_field_select);
    703	rep->lid = MLX5_GET_PR(hca_vport_context, ctx, lid);
    704	rep->init_type_reply = MLX5_GET_PR(hca_vport_context, ctx,
    705					   init_type_reply);
    706	rep->lmc = MLX5_GET_PR(hca_vport_context, ctx, lmc);
    707	rep->subnet_timeout = MLX5_GET_PR(hca_vport_context, ctx,
    708					  subnet_timeout);
    709	rep->sm_lid = MLX5_GET_PR(hca_vport_context, ctx, sm_lid);
    710	rep->sm_sl = MLX5_GET_PR(hca_vport_context, ctx, sm_sl);
    711	rep->qkey_violation_counter = MLX5_GET_PR(hca_vport_context, ctx,
    712						  qkey_violation_counter);
    713	rep->pkey_violation_counter = MLX5_GET_PR(hca_vport_context, ctx,
    714						  pkey_violation_counter);
    715	rep->grh_required = MLX5_GET_PR(hca_vport_context, ctx, grh_required);
    716	rep->sys_image_guid = MLX5_GET64_PR(hca_vport_context, ctx,
    717					    system_image_guid);
    718
    719ex:
    720	kvfree(out);
    721	return err;
    722}
    723EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_context);
    724
    725int mlx5_query_hca_vport_system_image_guid(struct mlx5_core_dev *dev,
    726					   u64 *sys_image_guid)
    727{
    728	struct mlx5_hca_vport_context *rep;
    729	int err;
    730
    731	rep = kvzalloc(sizeof(*rep), GFP_KERNEL);
    732	if (!rep)
    733		return -ENOMEM;
    734
    735	err = mlx5_query_hca_vport_context(dev, 0, 1, 0, rep);
    736	if (!err)
    737		*sys_image_guid = rep->sys_image_guid;
    738
    739	kvfree(rep);
    740	return err;
    741}
    742EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_system_image_guid);
    743
    744int mlx5_query_hca_vport_node_guid(struct mlx5_core_dev *dev,
    745				   u64 *node_guid)
    746{
    747	struct mlx5_hca_vport_context *rep;
    748	int err;
    749
    750	rep = kvzalloc(sizeof(*rep), GFP_KERNEL);
    751	if (!rep)
    752		return -ENOMEM;
    753
    754	err = mlx5_query_hca_vport_context(dev, 0, 1, 0, rep);
    755	if (!err)
    756		*node_guid = rep->node_guid;
    757
    758	kvfree(rep);
    759	return err;
    760}
    761EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_node_guid);
    762
    763int mlx5_query_nic_vport_promisc(struct mlx5_core_dev *mdev,
    764				 u16 vport,
    765				 int *promisc_uc,
    766				 int *promisc_mc,
    767				 int *promisc_all)
    768{
    769	u32 *out;
    770	int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
    771	int err;
    772
    773	out = kvzalloc(outlen, GFP_KERNEL);
    774	if (!out)
    775		return -ENOMEM;
    776
    777	err = mlx5_query_nic_vport_context(mdev, vport, out);
    778	if (err)
    779		goto out;
    780
    781	*promisc_uc = MLX5_GET(query_nic_vport_context_out, out,
    782			       nic_vport_context.promisc_uc);
    783	*promisc_mc = MLX5_GET(query_nic_vport_context_out, out,
    784			       nic_vport_context.promisc_mc);
    785	*promisc_all = MLX5_GET(query_nic_vport_context_out, out,
    786				nic_vport_context.promisc_all);
    787
    788out:
    789	kvfree(out);
    790	return err;
    791}
    792EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_promisc);
    793
    794int mlx5_modify_nic_vport_promisc(struct mlx5_core_dev *mdev,
    795				  int promisc_uc,
    796				  int promisc_mc,
    797				  int promisc_all)
    798{
    799	void *in;
    800	int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
    801	int err;
    802
    803	in = kvzalloc(inlen, GFP_KERNEL);
    804	if (!in)
    805		return -ENOMEM;
    806
    807	MLX5_SET(modify_nic_vport_context_in, in, field_select.promisc, 1);
    808	MLX5_SET(modify_nic_vport_context_in, in,
    809		 nic_vport_context.promisc_uc, promisc_uc);
    810	MLX5_SET(modify_nic_vport_context_in, in,
    811		 nic_vport_context.promisc_mc, promisc_mc);
    812	MLX5_SET(modify_nic_vport_context_in, in,
    813		 nic_vport_context.promisc_all, promisc_all);
    814	MLX5_SET(modify_nic_vport_context_in, in, opcode,
    815		 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
    816
    817	err = mlx5_cmd_exec_in(mdev, modify_nic_vport_context, in);
    818
    819	kvfree(in);
    820
    821	return err;
    822}
    823EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_promisc);
    824
    825enum {
    826	UC_LOCAL_LB,
    827	MC_LOCAL_LB
    828};
    829
    830int mlx5_nic_vport_update_local_lb(struct mlx5_core_dev *mdev, bool enable)
    831{
    832	int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
    833	void *in;
    834	int err;
    835
    836	if (!MLX5_CAP_GEN(mdev, disable_local_lb_mc) &&
    837	    !MLX5_CAP_GEN(mdev, disable_local_lb_uc))
    838		return 0;
    839
    840	in = kvzalloc(inlen, GFP_KERNEL);
    841	if (!in)
    842		return -ENOMEM;
    843
    844	MLX5_SET(modify_nic_vport_context_in, in,
    845		 nic_vport_context.disable_mc_local_lb, !enable);
    846	MLX5_SET(modify_nic_vport_context_in, in,
    847		 nic_vport_context.disable_uc_local_lb, !enable);
    848
    849	if (MLX5_CAP_GEN(mdev, disable_local_lb_mc))
    850		MLX5_SET(modify_nic_vport_context_in, in,
    851			 field_select.disable_mc_local_lb, 1);
    852
    853	if (MLX5_CAP_GEN(mdev, disable_local_lb_uc))
    854		MLX5_SET(modify_nic_vport_context_in, in,
    855			 field_select.disable_uc_local_lb, 1);
    856	MLX5_SET(modify_nic_vport_context_in, in, opcode,
    857		 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
    858
    859	err = mlx5_cmd_exec_in(mdev, modify_nic_vport_context, in);
    860
    861	if (!err)
    862		mlx5_core_dbg(mdev, "%s local_lb\n",
    863			      enable ? "enable" : "disable");
    864
    865	kvfree(in);
    866	return err;
    867}
    868EXPORT_SYMBOL_GPL(mlx5_nic_vport_update_local_lb);
    869
    870int mlx5_nic_vport_query_local_lb(struct mlx5_core_dev *mdev, bool *status)
    871{
    872	int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
    873	u32 *out;
    874	int value;
    875	int err;
    876
    877	out = kvzalloc(outlen, GFP_KERNEL);
    878	if (!out)
    879		return -ENOMEM;
    880
    881	err = mlx5_query_nic_vport_context(mdev, 0, out);
    882	if (err)
    883		goto out;
    884
    885	value = MLX5_GET(query_nic_vport_context_out, out,
    886			 nic_vport_context.disable_mc_local_lb) << MC_LOCAL_LB;
    887
    888	value |= MLX5_GET(query_nic_vport_context_out, out,
    889			  nic_vport_context.disable_uc_local_lb) << UC_LOCAL_LB;
    890
    891	*status = !value;
    892
    893out:
    894	kvfree(out);
    895	return err;
    896}
    897EXPORT_SYMBOL_GPL(mlx5_nic_vport_query_local_lb);
    898
    899enum mlx5_vport_roce_state {
    900	MLX5_VPORT_ROCE_DISABLED = 0,
    901	MLX5_VPORT_ROCE_ENABLED  = 1,
    902};
    903
    904static int mlx5_nic_vport_update_roce_state(struct mlx5_core_dev *mdev,
    905					    enum mlx5_vport_roce_state state)
    906{
    907	void *in;
    908	int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
    909	int err;
    910
    911	in = kvzalloc(inlen, GFP_KERNEL);
    912	if (!in)
    913		return -ENOMEM;
    914
    915	MLX5_SET(modify_nic_vport_context_in, in, field_select.roce_en, 1);
    916	MLX5_SET(modify_nic_vport_context_in, in, nic_vport_context.roce_en,
    917		 state);
    918	MLX5_SET(modify_nic_vport_context_in, in, opcode,
    919		 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
    920
    921	err = mlx5_cmd_exec_in(mdev, modify_nic_vport_context, in);
    922
    923	kvfree(in);
    924
    925	return err;
    926}
    927
    928int mlx5_nic_vport_enable_roce(struct mlx5_core_dev *mdev)
    929{
    930	int err = 0;
    931
    932	mutex_lock(&mlx5_roce_en_lock);
    933	if (!mdev->roce.roce_en)
    934		err = mlx5_nic_vport_update_roce_state(mdev, MLX5_VPORT_ROCE_ENABLED);
    935
    936	if (!err)
    937		mdev->roce.roce_en++;
    938	mutex_unlock(&mlx5_roce_en_lock);
    939
    940	return err;
    941}
    942EXPORT_SYMBOL_GPL(mlx5_nic_vport_enable_roce);
    943
    944int mlx5_nic_vport_disable_roce(struct mlx5_core_dev *mdev)
    945{
    946	int err = 0;
    947
    948	mutex_lock(&mlx5_roce_en_lock);
    949	if (mdev->roce.roce_en) {
    950		mdev->roce.roce_en--;
    951		if (mdev->roce.roce_en == 0)
    952			err = mlx5_nic_vport_update_roce_state(mdev, MLX5_VPORT_ROCE_DISABLED);
    953
    954		if (err)
    955			mdev->roce.roce_en++;
    956	}
    957	mutex_unlock(&mlx5_roce_en_lock);
    958	return err;
    959}
    960EXPORT_SYMBOL(mlx5_nic_vport_disable_roce);
    961
    962int mlx5_core_query_vport_counter(struct mlx5_core_dev *dev, u8 other_vport,
    963				  int vf, u8 port_num, void *out)
    964{
    965	int in_sz = MLX5_ST_SZ_BYTES(query_vport_counter_in);
    966	int is_group_manager;
    967	void *in;
    968	int err;
    969
    970	is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
    971	in = kvzalloc(in_sz, GFP_KERNEL);
    972	if (!in) {
    973		err = -ENOMEM;
    974		return err;
    975	}
    976
    977	MLX5_SET(query_vport_counter_in, in, opcode,
    978		 MLX5_CMD_OP_QUERY_VPORT_COUNTER);
    979	if (other_vport) {
    980		if (is_group_manager) {
    981			MLX5_SET(query_vport_counter_in, in, other_vport, 1);
    982			MLX5_SET(query_vport_counter_in, in, vport_number, vf + 1);
    983		} else {
    984			err = -EPERM;
    985			goto free;
    986		}
    987	}
    988	if (MLX5_CAP_GEN(dev, num_ports) == 2)
    989		MLX5_SET(query_vport_counter_in, in, port_num, port_num);
    990
    991	err = mlx5_cmd_exec_inout(dev, query_vport_counter, in, out);
    992free:
    993	kvfree(in);
    994	return err;
    995}
    996EXPORT_SYMBOL_GPL(mlx5_core_query_vport_counter);
    997
    998int mlx5_query_vport_down_stats(struct mlx5_core_dev *mdev, u16 vport,
    999				u8 other_vport, u64 *rx_discard_vport_down,
   1000				u64 *tx_discard_vport_down)
   1001{
   1002	u32 out[MLX5_ST_SZ_DW(query_vnic_env_out)] = {};
   1003	u32 in[MLX5_ST_SZ_DW(query_vnic_env_in)] = {};
   1004	int err;
   1005
   1006	MLX5_SET(query_vnic_env_in, in, opcode,
   1007		 MLX5_CMD_OP_QUERY_VNIC_ENV);
   1008	MLX5_SET(query_vnic_env_in, in, op_mod, 0);
   1009	MLX5_SET(query_vnic_env_in, in, vport_number, vport);
   1010	MLX5_SET(query_vnic_env_in, in, other_vport, other_vport);
   1011
   1012	err = mlx5_cmd_exec_inout(mdev, query_vnic_env, in, out);
   1013	if (err)
   1014		return err;
   1015
   1016	*rx_discard_vport_down = MLX5_GET64(query_vnic_env_out, out,
   1017					    vport_env.receive_discard_vport_down);
   1018	*tx_discard_vport_down = MLX5_GET64(query_vnic_env_out, out,
   1019					    vport_env.transmit_discard_vport_down);
   1020	return 0;
   1021}
   1022
   1023int mlx5_core_modify_hca_vport_context(struct mlx5_core_dev *dev,
   1024				       u8 other_vport, u8 port_num,
   1025				       int vf,
   1026				       struct mlx5_hca_vport_context *req)
   1027{
   1028	int in_sz = MLX5_ST_SZ_BYTES(modify_hca_vport_context_in);
   1029	int is_group_manager;
   1030	void *ctx;
   1031	void *in;
   1032	int err;
   1033
   1034	mlx5_core_dbg(dev, "vf %d\n", vf);
   1035	is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
   1036	in = kvzalloc(in_sz, GFP_KERNEL);
   1037	if (!in)
   1038		return -ENOMEM;
   1039
   1040	MLX5_SET(modify_hca_vport_context_in, in, opcode, MLX5_CMD_OP_MODIFY_HCA_VPORT_CONTEXT);
   1041	if (other_vport) {
   1042		if (is_group_manager) {
   1043			MLX5_SET(modify_hca_vport_context_in, in, other_vport, 1);
   1044			MLX5_SET(modify_hca_vport_context_in, in, vport_number, vf);
   1045		} else {
   1046			err = -EPERM;
   1047			goto ex;
   1048		}
   1049	}
   1050
   1051	if (MLX5_CAP_GEN(dev, num_ports) > 1)
   1052		MLX5_SET(modify_hca_vport_context_in, in, port_num, port_num);
   1053
   1054	ctx = MLX5_ADDR_OF(modify_hca_vport_context_in, in, hca_vport_context);
   1055	MLX5_SET(hca_vport_context, ctx, field_select, req->field_select);
   1056	if (req->field_select & MLX5_HCA_VPORT_SEL_STATE_POLICY)
   1057		MLX5_SET(hca_vport_context, ctx, vport_state_policy,
   1058			 req->policy);
   1059	if (req->field_select & MLX5_HCA_VPORT_SEL_PORT_GUID)
   1060		MLX5_SET64(hca_vport_context, ctx, port_guid, req->port_guid);
   1061	if (req->field_select & MLX5_HCA_VPORT_SEL_NODE_GUID)
   1062		MLX5_SET64(hca_vport_context, ctx, node_guid, req->node_guid);
   1063	MLX5_SET(hca_vport_context, ctx, cap_mask1, req->cap_mask1);
   1064	MLX5_SET(hca_vport_context, ctx, cap_mask1_field_select,
   1065		 req->cap_mask1_perm);
   1066	err = mlx5_cmd_exec_in(dev, modify_hca_vport_context, in);
   1067ex:
   1068	kvfree(in);
   1069	return err;
   1070}
   1071EXPORT_SYMBOL_GPL(mlx5_core_modify_hca_vport_context);
   1072
   1073int mlx5_nic_vport_affiliate_multiport(struct mlx5_core_dev *master_mdev,
   1074				       struct mlx5_core_dev *port_mdev)
   1075{
   1076	int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
   1077	void *in;
   1078	int err;
   1079
   1080	in = kvzalloc(inlen, GFP_KERNEL);
   1081	if (!in)
   1082		return -ENOMEM;
   1083
   1084	err = mlx5_nic_vport_enable_roce(port_mdev);
   1085	if (err)
   1086		goto free;
   1087
   1088	MLX5_SET(modify_nic_vport_context_in, in, field_select.affiliation, 1);
   1089	MLX5_SET(modify_nic_vport_context_in, in,
   1090		 nic_vport_context.affiliated_vhca_id,
   1091		 MLX5_CAP_GEN(master_mdev, vhca_id));
   1092	MLX5_SET(modify_nic_vport_context_in, in,
   1093		 nic_vport_context.affiliation_criteria,
   1094		 MLX5_CAP_GEN(port_mdev, affiliate_nic_vport_criteria));
   1095	MLX5_SET(modify_nic_vport_context_in, in, opcode,
   1096		 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
   1097
   1098	err = mlx5_cmd_exec_in(port_mdev, modify_nic_vport_context, in);
   1099	if (err)
   1100		mlx5_nic_vport_disable_roce(port_mdev);
   1101
   1102free:
   1103	kvfree(in);
   1104	return err;
   1105}
   1106EXPORT_SYMBOL_GPL(mlx5_nic_vport_affiliate_multiport);
   1107
   1108int mlx5_nic_vport_unaffiliate_multiport(struct mlx5_core_dev *port_mdev)
   1109{
   1110	int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
   1111	void *in;
   1112	int err;
   1113
   1114	in = kvzalloc(inlen, GFP_KERNEL);
   1115	if (!in)
   1116		return -ENOMEM;
   1117
   1118	MLX5_SET(modify_nic_vport_context_in, in, field_select.affiliation, 1);
   1119	MLX5_SET(modify_nic_vport_context_in, in,
   1120		 nic_vport_context.affiliated_vhca_id, 0);
   1121	MLX5_SET(modify_nic_vport_context_in, in,
   1122		 nic_vport_context.affiliation_criteria, 0);
   1123	MLX5_SET(modify_nic_vport_context_in, in, opcode,
   1124		 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
   1125
   1126	err = mlx5_cmd_exec_in(port_mdev, modify_nic_vport_context, in);
   1127	if (!err)
   1128		mlx5_nic_vport_disable_roce(port_mdev);
   1129
   1130	kvfree(in);
   1131	return err;
   1132}
   1133EXPORT_SYMBOL_GPL(mlx5_nic_vport_unaffiliate_multiport);
   1134
   1135u64 mlx5_query_nic_system_image_guid(struct mlx5_core_dev *mdev)
   1136{
   1137	int port_type_cap = MLX5_CAP_GEN(mdev, port_type);
   1138	u64 tmp;
   1139	int err;
   1140
   1141	if (mdev->sys_image_guid)
   1142		return mdev->sys_image_guid;
   1143
   1144	if (port_type_cap == MLX5_CAP_PORT_TYPE_ETH)
   1145		err = mlx5_query_nic_vport_system_image_guid(mdev, &tmp);
   1146	else
   1147		err = mlx5_query_hca_vport_system_image_guid(mdev, &tmp);
   1148
   1149	mdev->sys_image_guid = err ? 0 : tmp;
   1150
   1151	return mdev->sys_image_guid;
   1152}
   1153EXPORT_SYMBOL_GPL(mlx5_query_nic_system_image_guid);
   1154
   1155int mlx5_vport_get_other_func_cap(struct mlx5_core_dev *dev, u16 function_id, void *out)
   1156{
   1157	u16 opmod = (MLX5_CAP_GENERAL << 1) | (HCA_CAP_OPMOD_GET_MAX & 0x01);
   1158	u8 in[MLX5_ST_SZ_BYTES(query_hca_cap_in)] = {};
   1159
   1160	MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
   1161	MLX5_SET(query_hca_cap_in, in, op_mod, opmod);
   1162	MLX5_SET(query_hca_cap_in, in, function_id, function_id);
   1163	MLX5_SET(query_hca_cap_in, in, other_function, true);
   1164	return mlx5_cmd_exec_inout(dev, query_hca_cap, in, out);
   1165}