sriov.c (9434B)
1/* 2 * Copyright (c) 2014, Mellanox Technologies inc. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33#include <linux/pci.h> 34#include <linux/mlx5/driver.h> 35#include <linux/mlx5/vport.h> 36#include "mlx5_core.h" 37#include "mlx5_irq.h" 38#include "eswitch.h" 39 40static int sriov_restore_guids(struct mlx5_core_dev *dev, int vf) 41{ 42 struct mlx5_core_sriov *sriov = &dev->priv.sriov; 43 struct mlx5_hca_vport_context *in; 44 int err = 0; 45 46 /* Restore sriov guid and policy settings */ 47 if (sriov->vfs_ctx[vf].node_guid || 48 sriov->vfs_ctx[vf].port_guid || 49 sriov->vfs_ctx[vf].policy != MLX5_POLICY_INVALID) { 50 in = kzalloc(sizeof(*in), GFP_KERNEL); 51 if (!in) 52 return -ENOMEM; 53 54 in->node_guid = sriov->vfs_ctx[vf].node_guid; 55 in->port_guid = sriov->vfs_ctx[vf].port_guid; 56 in->policy = sriov->vfs_ctx[vf].policy; 57 in->field_select = 58 !!(in->port_guid) * MLX5_HCA_VPORT_SEL_PORT_GUID | 59 !!(in->node_guid) * MLX5_HCA_VPORT_SEL_NODE_GUID | 60 !!(in->policy) * MLX5_HCA_VPORT_SEL_STATE_POLICY; 61 62 err = mlx5_core_modify_hca_vport_context(dev, 1, 1, vf + 1, in); 63 if (err) 64 mlx5_core_warn(dev, "modify vport context failed, unable to restore VF %d settings\n", vf); 65 66 kfree(in); 67 } 68 69 return err; 70} 71 72static int mlx5_device_enable_sriov(struct mlx5_core_dev *dev, int num_vfs) 73{ 74 struct mlx5_core_sriov *sriov = &dev->priv.sriov; 75 int err, vf, num_msix_count; 76 77 if (!MLX5_ESWITCH_MANAGER(dev)) 78 goto enable_vfs_hca; 79 80 err = mlx5_eswitch_enable(dev->priv.eswitch, num_vfs); 81 if (err) { 82 mlx5_core_warn(dev, 83 "failed to enable eswitch SRIOV (%d)\n", err); 84 return err; 85 } 86 87enable_vfs_hca: 88 num_msix_count = mlx5_get_default_msix_vec_count(dev, num_vfs); 89 for (vf = 0; vf < num_vfs; vf++) { 90 /* Notify the VF before its enablement to let it set 91 * some stuff. 92 */ 93 blocking_notifier_call_chain(&sriov->vfs_ctx[vf].notifier, 94 MLX5_PF_NOTIFY_ENABLE_VF, dev); 95 err = mlx5_core_enable_hca(dev, vf + 1); 96 if (err) { 97 mlx5_core_warn(dev, "failed to enable VF %d (%d)\n", vf, err); 98 continue; 99 } 100 101 err = mlx5_set_msix_vec_count(dev, vf + 1, num_msix_count); 102 if (err) { 103 mlx5_core_warn(dev, 104 "failed to set MSI-X vector counts VF %d, err %d\n", 105 vf, err); 106 continue; 107 } 108 109 sriov->vfs_ctx[vf].enabled = 1; 110 if (MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_IB) { 111 err = sriov_restore_guids(dev, vf); 112 if (err) { 113 mlx5_core_warn(dev, 114 "failed to restore VF %d settings, err %d\n", 115 vf, err); 116 continue; 117 } 118 } 119 mlx5_core_dbg(dev, "successfully enabled VF* %d\n", vf); 120 } 121 122 return 0; 123} 124 125static void 126mlx5_device_disable_sriov(struct mlx5_core_dev *dev, int num_vfs, bool clear_vf) 127{ 128 struct mlx5_core_sriov *sriov = &dev->priv.sriov; 129 int err; 130 int vf; 131 132 for (vf = num_vfs - 1; vf >= 0; vf--) { 133 if (!sriov->vfs_ctx[vf].enabled) 134 continue; 135 /* Notify the VF before its disablement to let it clean 136 * some resources. 137 */ 138 blocking_notifier_call_chain(&sriov->vfs_ctx[vf].notifier, 139 MLX5_PF_NOTIFY_DISABLE_VF, dev); 140 err = mlx5_core_disable_hca(dev, vf + 1); 141 if (err) { 142 mlx5_core_warn(dev, "failed to disable VF %d\n", vf); 143 continue; 144 } 145 sriov->vfs_ctx[vf].enabled = 0; 146 } 147 148 if (MLX5_ESWITCH_MANAGER(dev)) 149 mlx5_eswitch_disable(dev->priv.eswitch, clear_vf); 150 151 if (mlx5_wait_for_pages(dev, &dev->priv.vfs_pages)) 152 mlx5_core_warn(dev, "timeout reclaiming VFs pages\n"); 153} 154 155static int mlx5_sriov_enable(struct pci_dev *pdev, int num_vfs) 156{ 157 struct mlx5_core_dev *dev = pci_get_drvdata(pdev); 158 int err; 159 160 err = mlx5_device_enable_sriov(dev, num_vfs); 161 if (err) { 162 mlx5_core_warn(dev, "mlx5_device_enable_sriov failed : %d\n", err); 163 return err; 164 } 165 166 err = pci_enable_sriov(pdev, num_vfs); 167 if (err) { 168 mlx5_core_warn(dev, "pci_enable_sriov failed : %d\n", err); 169 mlx5_device_disable_sriov(dev, num_vfs, true); 170 } 171 return err; 172} 173 174void mlx5_sriov_disable(struct pci_dev *pdev) 175{ 176 struct mlx5_core_dev *dev = pci_get_drvdata(pdev); 177 int num_vfs = pci_num_vf(dev->pdev); 178 179 pci_disable_sriov(pdev); 180 mlx5_device_disable_sriov(dev, num_vfs, true); 181} 182 183int mlx5_core_sriov_configure(struct pci_dev *pdev, int num_vfs) 184{ 185 struct mlx5_core_dev *dev = pci_get_drvdata(pdev); 186 struct mlx5_core_sriov *sriov = &dev->priv.sriov; 187 int err = 0; 188 189 mlx5_core_dbg(dev, "requested num_vfs %d\n", num_vfs); 190 191 if (num_vfs) 192 err = mlx5_sriov_enable(pdev, num_vfs); 193 else 194 mlx5_sriov_disable(pdev); 195 196 if (!err) 197 sriov->num_vfs = num_vfs; 198 return err ? err : num_vfs; 199} 200 201int mlx5_core_sriov_set_msix_vec_count(struct pci_dev *vf, int msix_vec_count) 202{ 203 struct pci_dev *pf = pci_physfn(vf); 204 struct mlx5_core_sriov *sriov; 205 struct mlx5_core_dev *dev; 206 int num_vf_msix, id; 207 208 dev = pci_get_drvdata(pf); 209 num_vf_msix = MLX5_CAP_GEN_MAX(dev, num_total_dynamic_vf_msix); 210 if (!num_vf_msix) 211 return -EOPNOTSUPP; 212 213 if (!msix_vec_count) 214 msix_vec_count = 215 mlx5_get_default_msix_vec_count(dev, pci_num_vf(pf)); 216 217 sriov = &dev->priv.sriov; 218 id = pci_iov_vf_id(vf); 219 if (id < 0 || !sriov->vfs_ctx[id].enabled) 220 return -EINVAL; 221 222 return mlx5_set_msix_vec_count(dev, id + 1, msix_vec_count); 223} 224 225int mlx5_sriov_attach(struct mlx5_core_dev *dev) 226{ 227 if (!mlx5_core_is_pf(dev) || !pci_num_vf(dev->pdev)) 228 return 0; 229 230 /* If sriov VFs exist in PCI level, enable them in device level */ 231 return mlx5_device_enable_sriov(dev, pci_num_vf(dev->pdev)); 232} 233 234void mlx5_sriov_detach(struct mlx5_core_dev *dev) 235{ 236 if (!mlx5_core_is_pf(dev)) 237 return; 238 239 mlx5_device_disable_sriov(dev, pci_num_vf(dev->pdev), false); 240} 241 242static u16 mlx5_get_max_vfs(struct mlx5_core_dev *dev) 243{ 244 u16 host_total_vfs; 245 const u32 *out; 246 247 if (mlx5_core_is_ecpf_esw_manager(dev)) { 248 out = mlx5_esw_query_functions(dev); 249 250 /* Old FW doesn't support getting total_vfs from esw func 251 * but supports getting it from pci_sriov. 252 */ 253 if (IS_ERR(out)) 254 goto done; 255 host_total_vfs = MLX5_GET(query_esw_functions_out, out, 256 host_params_context.host_total_vfs); 257 kvfree(out); 258 if (host_total_vfs) 259 return host_total_vfs; 260 } 261 262done: 263 return pci_sriov_get_totalvfs(dev->pdev); 264} 265 266int mlx5_sriov_init(struct mlx5_core_dev *dev) 267{ 268 struct mlx5_core_sriov *sriov = &dev->priv.sriov; 269 struct pci_dev *pdev = dev->pdev; 270 int total_vfs, i; 271 272 if (!mlx5_core_is_pf(dev)) 273 return 0; 274 275 total_vfs = pci_sriov_get_totalvfs(pdev); 276 sriov->max_vfs = mlx5_get_max_vfs(dev); 277 sriov->num_vfs = pci_num_vf(pdev); 278 sriov->vfs_ctx = kcalloc(total_vfs, sizeof(*sriov->vfs_ctx), GFP_KERNEL); 279 if (!sriov->vfs_ctx) 280 return -ENOMEM; 281 282 for (i = 0; i < total_vfs; i++) 283 BLOCKING_INIT_NOTIFIER_HEAD(&sriov->vfs_ctx[i].notifier); 284 285 return 0; 286} 287 288void mlx5_sriov_cleanup(struct mlx5_core_dev *dev) 289{ 290 struct mlx5_core_sriov *sriov = &dev->priv.sriov; 291 292 if (!mlx5_core_is_pf(dev)) 293 return; 294 295 kfree(sriov->vfs_ctx); 296} 297 298/** 299 * mlx5_sriov_blocking_notifier_unregister - Unregister a VF from 300 * a notification block chain. 301 * 302 * @mdev: The mlx5 core device. 303 * @vf_id: The VF id. 304 * @nb: The notifier block to be unregistered. 305 */ 306void mlx5_sriov_blocking_notifier_unregister(struct mlx5_core_dev *mdev, 307 int vf_id, 308 struct notifier_block *nb) 309{ 310 struct mlx5_vf_context *vfs_ctx; 311 struct mlx5_core_sriov *sriov; 312 313 sriov = &mdev->priv.sriov; 314 if (WARN_ON(vf_id < 0 || vf_id >= sriov->num_vfs)) 315 return; 316 317 vfs_ctx = &sriov->vfs_ctx[vf_id]; 318 blocking_notifier_chain_unregister(&vfs_ctx->notifier, nb); 319} 320EXPORT_SYMBOL(mlx5_sriov_blocking_notifier_unregister); 321 322/** 323 * mlx5_sriov_blocking_notifier_register - Register a VF notification 324 * block chain. 325 * 326 * @mdev: The mlx5 core device. 327 * @vf_id: The VF id. 328 * @nb: The notifier block to be called upon the VF events. 329 * 330 * Returns 0 on success or an error code. 331 */ 332int mlx5_sriov_blocking_notifier_register(struct mlx5_core_dev *mdev, 333 int vf_id, 334 struct notifier_block *nb) 335{ 336 struct mlx5_vf_context *vfs_ctx; 337 struct mlx5_core_sriov *sriov; 338 339 sriov = &mdev->priv.sriov; 340 if (vf_id < 0 || vf_id >= sriov->num_vfs) 341 return -EINVAL; 342 343 vfs_ctx = &sriov->vfs_ctx[vf_id]; 344 return blocking_notifier_chain_register(&vfs_ctx->notifier, nb); 345} 346EXPORT_SYMBOL(mlx5_sriov_blocking_notifier_register);