fw.c (22769B)
1/* 2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33#include <linux/mlx5/driver.h> 34#include <linux/mlx5/eswitch.h> 35#include "mlx5_core.h" 36#include "../../mlxfw/mlxfw.h" 37#include "lib/tout.h" 38 39enum { 40 MCQS_IDENTIFIER_BOOT_IMG = 0x1, 41 MCQS_IDENTIFIER_OEM_NVCONFIG = 0x4, 42 MCQS_IDENTIFIER_MLNX_NVCONFIG = 0x5, 43 MCQS_IDENTIFIER_CS_TOKEN = 0x6, 44 MCQS_IDENTIFIER_DBG_TOKEN = 0x7, 45 MCQS_IDENTIFIER_GEARBOX = 0xA, 46}; 47 48enum { 49 MCQS_UPDATE_STATE_IDLE, 50 MCQS_UPDATE_STATE_IN_PROGRESS, 51 MCQS_UPDATE_STATE_APPLIED, 52 MCQS_UPDATE_STATE_ACTIVE, 53 MCQS_UPDATE_STATE_ACTIVE_PENDING_RESET, 54 MCQS_UPDATE_STATE_FAILED, 55 MCQS_UPDATE_STATE_CANCELED, 56 MCQS_UPDATE_STATE_BUSY, 57}; 58 59enum { 60 MCQI_INFO_TYPE_CAPABILITIES = 0x0, 61 MCQI_INFO_TYPE_VERSION = 0x1, 62 MCQI_INFO_TYPE_ACTIVATION_METHOD = 0x5, 63}; 64 65enum { 66 MCQI_FW_RUNNING_VERSION = 0, 67 MCQI_FW_STORED_VERSION = 1, 68}; 69 70int mlx5_query_board_id(struct mlx5_core_dev *dev) 71{ 72 u32 *out; 73 int outlen = MLX5_ST_SZ_BYTES(query_adapter_out); 74 u32 in[MLX5_ST_SZ_DW(query_adapter_in)] = {}; 75 int err; 76 77 out = kzalloc(outlen, GFP_KERNEL); 78 if (!out) 79 return -ENOMEM; 80 81 MLX5_SET(query_adapter_in, in, opcode, MLX5_CMD_OP_QUERY_ADAPTER); 82 err = mlx5_cmd_exec_inout(dev, query_adapter, in, out); 83 if (err) 84 goto out; 85 86 memcpy(dev->board_id, 87 MLX5_ADDR_OF(query_adapter_out, out, 88 query_adapter_struct.vsd_contd_psid), 89 MLX5_FLD_SZ_BYTES(query_adapter_out, 90 query_adapter_struct.vsd_contd_psid)); 91 92out: 93 kfree(out); 94 return err; 95} 96 97int mlx5_core_query_vendor_id(struct mlx5_core_dev *mdev, u32 *vendor_id) 98{ 99 u32 *out; 100 int outlen = MLX5_ST_SZ_BYTES(query_adapter_out); 101 u32 in[MLX5_ST_SZ_DW(query_adapter_in)] = {}; 102 int err; 103 104 out = kzalloc(outlen, GFP_KERNEL); 105 if (!out) 106 return -ENOMEM; 107 108 MLX5_SET(query_adapter_in, in, opcode, MLX5_CMD_OP_QUERY_ADAPTER); 109 err = mlx5_cmd_exec_inout(mdev, query_adapter, in, out); 110 if (err) 111 goto out; 112 113 *vendor_id = MLX5_GET(query_adapter_out, out, 114 query_adapter_struct.ieee_vendor_id); 115out: 116 kfree(out); 117 return err; 118} 119EXPORT_SYMBOL(mlx5_core_query_vendor_id); 120 121static int mlx5_get_pcam_reg(struct mlx5_core_dev *dev) 122{ 123 return mlx5_query_pcam_reg(dev, dev->caps.pcam, 124 MLX5_PCAM_FEATURE_ENHANCED_FEATURES, 125 MLX5_PCAM_REGS_5000_TO_507F); 126} 127 128static int mlx5_get_mcam_access_reg_group(struct mlx5_core_dev *dev, 129 enum mlx5_mcam_reg_groups group) 130{ 131 return mlx5_query_mcam_reg(dev, dev->caps.mcam[group], 132 MLX5_MCAM_FEATURE_ENHANCED_FEATURES, group); 133} 134 135static int mlx5_get_qcam_reg(struct mlx5_core_dev *dev) 136{ 137 return mlx5_query_qcam_reg(dev, dev->caps.qcam, 138 MLX5_QCAM_FEATURE_ENHANCED_FEATURES, 139 MLX5_QCAM_REGS_FIRST_128); 140} 141 142int mlx5_query_hca_caps(struct mlx5_core_dev *dev) 143{ 144 int err; 145 146 err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL); 147 if (err) 148 return err; 149 150 if (MLX5_CAP_GEN(dev, port_selection_cap)) { 151 err = mlx5_core_get_caps(dev, MLX5_CAP_PORT_SELECTION); 152 if (err) 153 return err; 154 } 155 156 if (MLX5_CAP_GEN(dev, hca_cap_2)) { 157 err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL_2); 158 if (err) 159 return err; 160 } 161 162 if (MLX5_CAP_GEN(dev, eth_net_offloads)) { 163 err = mlx5_core_get_caps(dev, MLX5_CAP_ETHERNET_OFFLOADS); 164 if (err) 165 return err; 166 } 167 168 if (MLX5_CAP_GEN(dev, ipoib_enhanced_offloads)) { 169 err = mlx5_core_get_caps(dev, MLX5_CAP_IPOIB_ENHANCED_OFFLOADS); 170 if (err) 171 return err; 172 } 173 174 if (MLX5_CAP_GEN(dev, pg)) { 175 err = mlx5_core_get_caps(dev, MLX5_CAP_ODP); 176 if (err) 177 return err; 178 } 179 180 if (MLX5_CAP_GEN(dev, atomic)) { 181 err = mlx5_core_get_caps(dev, MLX5_CAP_ATOMIC); 182 if (err) 183 return err; 184 } 185 186 if (MLX5_CAP_GEN(dev, roce)) { 187 err = mlx5_core_get_caps(dev, MLX5_CAP_ROCE); 188 if (err) 189 return err; 190 } 191 192 if (MLX5_CAP_GEN(dev, nic_flow_table) || 193 MLX5_CAP_GEN(dev, ipoib_enhanced_offloads)) { 194 err = mlx5_core_get_caps(dev, MLX5_CAP_FLOW_TABLE); 195 if (err) 196 return err; 197 } 198 199 if (MLX5_CAP_GEN(dev, vport_group_manager) && 200 MLX5_ESWITCH_MANAGER(dev)) { 201 err = mlx5_core_get_caps(dev, MLX5_CAP_ESWITCH_FLOW_TABLE); 202 if (err) 203 return err; 204 } 205 206 if (MLX5_ESWITCH_MANAGER(dev)) { 207 err = mlx5_core_get_caps(dev, MLX5_CAP_ESWITCH); 208 if (err) 209 return err; 210 } 211 212 if (MLX5_CAP_GEN(dev, vector_calc)) { 213 err = mlx5_core_get_caps(dev, MLX5_CAP_VECTOR_CALC); 214 if (err) 215 return err; 216 } 217 218 if (MLX5_CAP_GEN(dev, qos)) { 219 err = mlx5_core_get_caps(dev, MLX5_CAP_QOS); 220 if (err) 221 return err; 222 } 223 224 if (MLX5_CAP_GEN(dev, debug)) 225 mlx5_core_get_caps(dev, MLX5_CAP_DEBUG); 226 227 if (MLX5_CAP_GEN(dev, pcam_reg)) 228 mlx5_get_pcam_reg(dev); 229 230 if (MLX5_CAP_GEN(dev, mcam_reg)) { 231 mlx5_get_mcam_access_reg_group(dev, MLX5_MCAM_REGS_FIRST_128); 232 mlx5_get_mcam_access_reg_group(dev, MLX5_MCAM_REGS_0x9080_0x90FF); 233 mlx5_get_mcam_access_reg_group(dev, MLX5_MCAM_REGS_0x9100_0x917F); 234 } 235 236 if (MLX5_CAP_GEN(dev, qcam_reg)) 237 mlx5_get_qcam_reg(dev); 238 239 if (MLX5_CAP_GEN(dev, device_memory)) { 240 err = mlx5_core_get_caps(dev, MLX5_CAP_DEV_MEM); 241 if (err) 242 return err; 243 } 244 245 if (MLX5_CAP_GEN(dev, event_cap)) { 246 err = mlx5_core_get_caps(dev, MLX5_CAP_DEV_EVENT); 247 if (err) 248 return err; 249 } 250 251 if (MLX5_CAP_GEN(dev, tls_tx) || MLX5_CAP_GEN(dev, tls_rx)) { 252 err = mlx5_core_get_caps(dev, MLX5_CAP_TLS); 253 if (err) 254 return err; 255 } 256 257 if (MLX5_CAP_GEN_64(dev, general_obj_types) & 258 MLX5_GENERAL_OBJ_TYPES_CAP_VIRTIO_NET_Q) { 259 err = mlx5_core_get_caps(dev, MLX5_CAP_VDPA_EMULATION); 260 if (err) 261 return err; 262 } 263 264 if (MLX5_CAP_GEN(dev, ipsec_offload)) { 265 err = mlx5_core_get_caps(dev, MLX5_CAP_IPSEC); 266 if (err) 267 return err; 268 } 269 270 if (MLX5_CAP_GEN(dev, shampo)) { 271 err = mlx5_core_get_caps(dev, MLX5_CAP_DEV_SHAMPO); 272 if (err) 273 return err; 274 } 275 276 return 0; 277} 278 279int mlx5_cmd_init_hca(struct mlx5_core_dev *dev, uint32_t *sw_owner_id) 280{ 281 u32 in[MLX5_ST_SZ_DW(init_hca_in)] = {}; 282 int i; 283 284 MLX5_SET(init_hca_in, in, opcode, MLX5_CMD_OP_INIT_HCA); 285 286 if (MLX5_CAP_GEN(dev, sw_owner_id)) { 287 for (i = 0; i < 4; i++) 288 MLX5_ARRAY_SET(init_hca_in, in, sw_owner_id, i, 289 sw_owner_id[i]); 290 } 291 292 return mlx5_cmd_exec_in(dev, init_hca, in); 293} 294 295int mlx5_cmd_teardown_hca(struct mlx5_core_dev *dev) 296{ 297 u32 in[MLX5_ST_SZ_DW(teardown_hca_in)] = {}; 298 299 MLX5_SET(teardown_hca_in, in, opcode, MLX5_CMD_OP_TEARDOWN_HCA); 300 return mlx5_cmd_exec_in(dev, teardown_hca, in); 301} 302 303int mlx5_cmd_force_teardown_hca(struct mlx5_core_dev *dev) 304{ 305 u32 out[MLX5_ST_SZ_DW(teardown_hca_out)] = {0}; 306 u32 in[MLX5_ST_SZ_DW(teardown_hca_in)] = {0}; 307 int force_state; 308 int ret; 309 310 if (!MLX5_CAP_GEN(dev, force_teardown)) { 311 mlx5_core_dbg(dev, "force teardown is not supported in the firmware\n"); 312 return -EOPNOTSUPP; 313 } 314 315 MLX5_SET(teardown_hca_in, in, opcode, MLX5_CMD_OP_TEARDOWN_HCA); 316 MLX5_SET(teardown_hca_in, in, profile, MLX5_TEARDOWN_HCA_IN_PROFILE_FORCE_CLOSE); 317 318 ret = mlx5_cmd_exec_polling(dev, in, sizeof(in), out, sizeof(out)); 319 if (ret) 320 return ret; 321 322 force_state = MLX5_GET(teardown_hca_out, out, state); 323 if (force_state == MLX5_TEARDOWN_HCA_OUT_FORCE_STATE_FAIL) { 324 mlx5_core_warn(dev, "teardown with force mode failed, doing normal teardown\n"); 325 return -EIO; 326 } 327 328 return 0; 329} 330 331int mlx5_cmd_fast_teardown_hca(struct mlx5_core_dev *dev) 332{ 333 unsigned long end, delay_ms = mlx5_tout_ms(dev, TEARDOWN); 334 u32 out[MLX5_ST_SZ_DW(teardown_hca_out)] = {}; 335 u32 in[MLX5_ST_SZ_DW(teardown_hca_in)] = {}; 336 int state; 337 int ret; 338 339 if (!MLX5_CAP_GEN(dev, fast_teardown)) { 340 mlx5_core_dbg(dev, "fast teardown is not supported in the firmware\n"); 341 return -EOPNOTSUPP; 342 } 343 344 MLX5_SET(teardown_hca_in, in, opcode, MLX5_CMD_OP_TEARDOWN_HCA); 345 MLX5_SET(teardown_hca_in, in, profile, 346 MLX5_TEARDOWN_HCA_IN_PROFILE_PREPARE_FAST_TEARDOWN); 347 348 ret = mlx5_cmd_exec_inout(dev, teardown_hca, in, out); 349 if (ret) 350 return ret; 351 352 state = MLX5_GET(teardown_hca_out, out, state); 353 if (state == MLX5_TEARDOWN_HCA_OUT_FORCE_STATE_FAIL) { 354 mlx5_core_warn(dev, "teardown with fast mode failed\n"); 355 return -EIO; 356 } 357 358 mlx5_set_nic_state(dev, MLX5_NIC_IFC_DISABLED); 359 360 /* Loop until device state turns to disable */ 361 end = jiffies + msecs_to_jiffies(delay_ms); 362 do { 363 if (mlx5_get_nic_state(dev) == MLX5_NIC_IFC_DISABLED) 364 break; 365 366 cond_resched(); 367 } while (!time_after(jiffies, end)); 368 369 if (mlx5_get_nic_state(dev) != MLX5_NIC_IFC_DISABLED) { 370 dev_err(&dev->pdev->dev, "NIC IFC still %d after %lums.\n", 371 mlx5_get_nic_state(dev), delay_ms); 372 return -EIO; 373 } 374 375 return 0; 376} 377 378enum mlxsw_reg_mcc_instruction { 379 MLX5_REG_MCC_INSTRUCTION_LOCK_UPDATE_HANDLE = 0x01, 380 MLX5_REG_MCC_INSTRUCTION_RELEASE_UPDATE_HANDLE = 0x02, 381 MLX5_REG_MCC_INSTRUCTION_UPDATE_COMPONENT = 0x03, 382 MLX5_REG_MCC_INSTRUCTION_VERIFY_COMPONENT = 0x04, 383 MLX5_REG_MCC_INSTRUCTION_ACTIVATE = 0x06, 384 MLX5_REG_MCC_INSTRUCTION_CANCEL = 0x08, 385}; 386 387static int mlx5_reg_mcc_set(struct mlx5_core_dev *dev, 388 enum mlxsw_reg_mcc_instruction instr, 389 u16 component_index, u32 update_handle, 390 u32 component_size) 391{ 392 u32 out[MLX5_ST_SZ_DW(mcc_reg)]; 393 u32 in[MLX5_ST_SZ_DW(mcc_reg)]; 394 395 memset(in, 0, sizeof(in)); 396 397 MLX5_SET(mcc_reg, in, instruction, instr); 398 MLX5_SET(mcc_reg, in, component_index, component_index); 399 MLX5_SET(mcc_reg, in, update_handle, update_handle); 400 MLX5_SET(mcc_reg, in, component_size, component_size); 401 402 return mlx5_core_access_reg(dev, in, sizeof(in), out, 403 sizeof(out), MLX5_REG_MCC, 0, 1); 404} 405 406static int mlx5_reg_mcc_query(struct mlx5_core_dev *dev, 407 u32 *update_handle, u8 *error_code, 408 u8 *control_state) 409{ 410 u32 out[MLX5_ST_SZ_DW(mcc_reg)]; 411 u32 in[MLX5_ST_SZ_DW(mcc_reg)]; 412 int err; 413 414 memset(in, 0, sizeof(in)); 415 memset(out, 0, sizeof(out)); 416 MLX5_SET(mcc_reg, in, update_handle, *update_handle); 417 418 err = mlx5_core_access_reg(dev, in, sizeof(in), out, 419 sizeof(out), MLX5_REG_MCC, 0, 0); 420 if (err) 421 goto out; 422 423 *update_handle = MLX5_GET(mcc_reg, out, update_handle); 424 *error_code = MLX5_GET(mcc_reg, out, error_code); 425 *control_state = MLX5_GET(mcc_reg, out, control_state); 426 427out: 428 return err; 429} 430 431static int mlx5_reg_mcda_set(struct mlx5_core_dev *dev, 432 u32 update_handle, 433 u32 offset, u16 size, 434 u8 *data) 435{ 436 int err, in_size = MLX5_ST_SZ_BYTES(mcda_reg) + size; 437 u32 out[MLX5_ST_SZ_DW(mcda_reg)]; 438 int i, j, dw_size = size >> 2; 439 __be32 data_element; 440 u32 *in; 441 442 in = kzalloc(in_size, GFP_KERNEL); 443 if (!in) 444 return -ENOMEM; 445 446 MLX5_SET(mcda_reg, in, update_handle, update_handle); 447 MLX5_SET(mcda_reg, in, offset, offset); 448 MLX5_SET(mcda_reg, in, size, size); 449 450 for (i = 0; i < dw_size; i++) { 451 j = i * 4; 452 data_element = htonl(*(u32 *)&data[j]); 453 memcpy(MLX5_ADDR_OF(mcda_reg, in, data) + j, &data_element, 4); 454 } 455 456 err = mlx5_core_access_reg(dev, in, in_size, out, 457 sizeof(out), MLX5_REG_MCDA, 0, 1); 458 kfree(in); 459 return err; 460} 461 462static int mlx5_reg_mcqi_query(struct mlx5_core_dev *dev, 463 u16 component_index, bool read_pending, 464 u8 info_type, u16 data_size, void *mcqi_data) 465{ 466 u32 out[MLX5_ST_SZ_DW(mcqi_reg) + MLX5_UN_SZ_DW(mcqi_reg_data)] = {}; 467 u32 in[MLX5_ST_SZ_DW(mcqi_reg)] = {}; 468 void *data; 469 int err; 470 471 MLX5_SET(mcqi_reg, in, component_index, component_index); 472 MLX5_SET(mcqi_reg, in, read_pending_component, read_pending); 473 MLX5_SET(mcqi_reg, in, info_type, info_type); 474 MLX5_SET(mcqi_reg, in, data_size, data_size); 475 476 err = mlx5_core_access_reg(dev, in, sizeof(in), out, 477 MLX5_ST_SZ_BYTES(mcqi_reg) + data_size, 478 MLX5_REG_MCQI, 0, 0); 479 if (err) 480 return err; 481 482 data = MLX5_ADDR_OF(mcqi_reg, out, data); 483 memcpy(mcqi_data, data, data_size); 484 485 return 0; 486} 487 488static int mlx5_reg_mcqi_caps_query(struct mlx5_core_dev *dev, u16 component_index, 489 u32 *max_component_size, u8 *log_mcda_word_size, 490 u16 *mcda_max_write_size) 491{ 492 u32 mcqi_reg[MLX5_ST_SZ_DW(mcqi_cap)] = {}; 493 int err; 494 495 err = mlx5_reg_mcqi_query(dev, component_index, 0, 496 MCQI_INFO_TYPE_CAPABILITIES, 497 MLX5_ST_SZ_BYTES(mcqi_cap), mcqi_reg); 498 if (err) 499 return err; 500 501 *max_component_size = MLX5_GET(mcqi_cap, mcqi_reg, max_component_size); 502 *log_mcda_word_size = MLX5_GET(mcqi_cap, mcqi_reg, log_mcda_word_size); 503 *mcda_max_write_size = MLX5_GET(mcqi_cap, mcqi_reg, mcda_max_write_size); 504 505 return 0; 506} 507 508struct mlx5_mlxfw_dev { 509 struct mlxfw_dev mlxfw_dev; 510 struct mlx5_core_dev *mlx5_core_dev; 511}; 512 513static int mlx5_component_query(struct mlxfw_dev *mlxfw_dev, 514 u16 component_index, u32 *p_max_size, 515 u8 *p_align_bits, u16 *p_max_write_size) 516{ 517 struct mlx5_mlxfw_dev *mlx5_mlxfw_dev = 518 container_of(mlxfw_dev, struct mlx5_mlxfw_dev, mlxfw_dev); 519 struct mlx5_core_dev *dev = mlx5_mlxfw_dev->mlx5_core_dev; 520 521 if (!MLX5_CAP_GEN(dev, mcam_reg) || !MLX5_CAP_MCAM_REG(dev, mcqi)) { 522 mlx5_core_warn(dev, "caps query isn't supported by running FW\n"); 523 return -EOPNOTSUPP; 524 } 525 526 return mlx5_reg_mcqi_caps_query(dev, component_index, p_max_size, 527 p_align_bits, p_max_write_size); 528} 529 530static int mlx5_fsm_lock(struct mlxfw_dev *mlxfw_dev, u32 *fwhandle) 531{ 532 struct mlx5_mlxfw_dev *mlx5_mlxfw_dev = 533 container_of(mlxfw_dev, struct mlx5_mlxfw_dev, mlxfw_dev); 534 struct mlx5_core_dev *dev = mlx5_mlxfw_dev->mlx5_core_dev; 535 u8 control_state, error_code; 536 int err; 537 538 *fwhandle = 0; 539 err = mlx5_reg_mcc_query(dev, fwhandle, &error_code, &control_state); 540 if (err) 541 return err; 542 543 if (control_state != MLXFW_FSM_STATE_IDLE) 544 return -EBUSY; 545 546 return mlx5_reg_mcc_set(dev, MLX5_REG_MCC_INSTRUCTION_LOCK_UPDATE_HANDLE, 547 0, *fwhandle, 0); 548} 549 550static int mlx5_fsm_component_update(struct mlxfw_dev *mlxfw_dev, u32 fwhandle, 551 u16 component_index, u32 component_size) 552{ 553 struct mlx5_mlxfw_dev *mlx5_mlxfw_dev = 554 container_of(mlxfw_dev, struct mlx5_mlxfw_dev, mlxfw_dev); 555 struct mlx5_core_dev *dev = mlx5_mlxfw_dev->mlx5_core_dev; 556 557 return mlx5_reg_mcc_set(dev, MLX5_REG_MCC_INSTRUCTION_UPDATE_COMPONENT, 558 component_index, fwhandle, component_size); 559} 560 561static int mlx5_fsm_block_download(struct mlxfw_dev *mlxfw_dev, u32 fwhandle, 562 u8 *data, u16 size, u32 offset) 563{ 564 struct mlx5_mlxfw_dev *mlx5_mlxfw_dev = 565 container_of(mlxfw_dev, struct mlx5_mlxfw_dev, mlxfw_dev); 566 struct mlx5_core_dev *dev = mlx5_mlxfw_dev->mlx5_core_dev; 567 568 return mlx5_reg_mcda_set(dev, fwhandle, offset, size, data); 569} 570 571static int mlx5_fsm_component_verify(struct mlxfw_dev *mlxfw_dev, u32 fwhandle, 572 u16 component_index) 573{ 574 struct mlx5_mlxfw_dev *mlx5_mlxfw_dev = 575 container_of(mlxfw_dev, struct mlx5_mlxfw_dev, mlxfw_dev); 576 struct mlx5_core_dev *dev = mlx5_mlxfw_dev->mlx5_core_dev; 577 578 return mlx5_reg_mcc_set(dev, MLX5_REG_MCC_INSTRUCTION_VERIFY_COMPONENT, 579 component_index, fwhandle, 0); 580} 581 582static int mlx5_fsm_activate(struct mlxfw_dev *mlxfw_dev, u32 fwhandle) 583{ 584 struct mlx5_mlxfw_dev *mlx5_mlxfw_dev = 585 container_of(mlxfw_dev, struct mlx5_mlxfw_dev, mlxfw_dev); 586 struct mlx5_core_dev *dev = mlx5_mlxfw_dev->mlx5_core_dev; 587 588 return mlx5_reg_mcc_set(dev, MLX5_REG_MCC_INSTRUCTION_ACTIVATE, 0, 589 fwhandle, 0); 590} 591 592static int mlx5_fsm_query_state(struct mlxfw_dev *mlxfw_dev, u32 fwhandle, 593 enum mlxfw_fsm_state *fsm_state, 594 enum mlxfw_fsm_state_err *fsm_state_err) 595{ 596 struct mlx5_mlxfw_dev *mlx5_mlxfw_dev = 597 container_of(mlxfw_dev, struct mlx5_mlxfw_dev, mlxfw_dev); 598 struct mlx5_core_dev *dev = mlx5_mlxfw_dev->mlx5_core_dev; 599 u8 control_state, error_code; 600 int err; 601 602 err = mlx5_reg_mcc_query(dev, &fwhandle, &error_code, &control_state); 603 if (err) 604 return err; 605 606 *fsm_state = control_state; 607 *fsm_state_err = min_t(enum mlxfw_fsm_state_err, error_code, 608 MLXFW_FSM_STATE_ERR_MAX); 609 return 0; 610} 611 612static void mlx5_fsm_cancel(struct mlxfw_dev *mlxfw_dev, u32 fwhandle) 613{ 614 struct mlx5_mlxfw_dev *mlx5_mlxfw_dev = 615 container_of(mlxfw_dev, struct mlx5_mlxfw_dev, mlxfw_dev); 616 struct mlx5_core_dev *dev = mlx5_mlxfw_dev->mlx5_core_dev; 617 618 mlx5_reg_mcc_set(dev, MLX5_REG_MCC_INSTRUCTION_CANCEL, 0, fwhandle, 0); 619} 620 621static void mlx5_fsm_release(struct mlxfw_dev *mlxfw_dev, u32 fwhandle) 622{ 623 struct mlx5_mlxfw_dev *mlx5_mlxfw_dev = 624 container_of(mlxfw_dev, struct mlx5_mlxfw_dev, mlxfw_dev); 625 struct mlx5_core_dev *dev = mlx5_mlxfw_dev->mlx5_core_dev; 626 627 mlx5_reg_mcc_set(dev, MLX5_REG_MCC_INSTRUCTION_RELEASE_UPDATE_HANDLE, 0, 628 fwhandle, 0); 629} 630 631static int mlx5_fsm_reactivate(struct mlxfw_dev *mlxfw_dev, u8 *status) 632{ 633 struct mlx5_mlxfw_dev *mlx5_mlxfw_dev = 634 container_of(mlxfw_dev, struct mlx5_mlxfw_dev, mlxfw_dev); 635 struct mlx5_core_dev *dev = mlx5_mlxfw_dev->mlx5_core_dev; 636 u32 out[MLX5_ST_SZ_DW(mirc_reg)]; 637 u32 in[MLX5_ST_SZ_DW(mirc_reg)]; 638 unsigned long exp_time; 639 int err; 640 641 exp_time = jiffies + msecs_to_jiffies(mlx5_tout_ms(dev, FSM_REACTIVATE)); 642 643 if (!MLX5_CAP_MCAM_REG2(dev, mirc)) 644 return -EOPNOTSUPP; 645 646 memset(in, 0, sizeof(in)); 647 648 err = mlx5_core_access_reg(dev, in, sizeof(in), out, 649 sizeof(out), MLX5_REG_MIRC, 0, 1); 650 if (err) 651 return err; 652 653 do { 654 memset(out, 0, sizeof(out)); 655 err = mlx5_core_access_reg(dev, in, sizeof(in), out, 656 sizeof(out), MLX5_REG_MIRC, 0, 0); 657 if (err) 658 return err; 659 660 *status = MLX5_GET(mirc_reg, out, status_code); 661 if (*status != MLXFW_FSM_REACTIVATE_STATUS_BUSY) 662 return 0; 663 664 msleep(20); 665 } while (time_before(jiffies, exp_time)); 666 667 return 0; 668} 669 670static const struct mlxfw_dev_ops mlx5_mlxfw_dev_ops = { 671 .component_query = mlx5_component_query, 672 .fsm_lock = mlx5_fsm_lock, 673 .fsm_component_update = mlx5_fsm_component_update, 674 .fsm_block_download = mlx5_fsm_block_download, 675 .fsm_component_verify = mlx5_fsm_component_verify, 676 .fsm_activate = mlx5_fsm_activate, 677 .fsm_reactivate = mlx5_fsm_reactivate, 678 .fsm_query_state = mlx5_fsm_query_state, 679 .fsm_cancel = mlx5_fsm_cancel, 680 .fsm_release = mlx5_fsm_release 681}; 682 683int mlx5_firmware_flash(struct mlx5_core_dev *dev, 684 const struct firmware *firmware, 685 struct netlink_ext_ack *extack) 686{ 687 struct mlx5_mlxfw_dev mlx5_mlxfw_dev = { 688 .mlxfw_dev = { 689 .ops = &mlx5_mlxfw_dev_ops, 690 .psid = dev->board_id, 691 .psid_size = strlen(dev->board_id), 692 .devlink = priv_to_devlink(dev), 693 }, 694 .mlx5_core_dev = dev 695 }; 696 697 if (!MLX5_CAP_GEN(dev, mcam_reg) || 698 !MLX5_CAP_MCAM_REG(dev, mcqi) || 699 !MLX5_CAP_MCAM_REG(dev, mcc) || 700 !MLX5_CAP_MCAM_REG(dev, mcda)) { 701 pr_info("%s flashing isn't supported by the running FW\n", __func__); 702 return -EOPNOTSUPP; 703 } 704 705 return mlxfw_firmware_flash(&mlx5_mlxfw_dev.mlxfw_dev, 706 firmware, extack); 707} 708 709static int mlx5_reg_mcqi_version_query(struct mlx5_core_dev *dev, 710 u16 component_index, bool read_pending, 711 u32 *mcqi_version_out) 712{ 713 return mlx5_reg_mcqi_query(dev, component_index, read_pending, 714 MCQI_INFO_TYPE_VERSION, 715 MLX5_ST_SZ_BYTES(mcqi_version), 716 mcqi_version_out); 717} 718 719static int mlx5_reg_mcqs_query(struct mlx5_core_dev *dev, u32 *out, 720 u16 component_index) 721{ 722 u8 out_sz = MLX5_ST_SZ_BYTES(mcqs_reg); 723 u32 in[MLX5_ST_SZ_DW(mcqs_reg)] = {}; 724 int err; 725 726 memset(out, 0, out_sz); 727 728 MLX5_SET(mcqs_reg, in, component_index, component_index); 729 730 err = mlx5_core_access_reg(dev, in, sizeof(in), out, 731 out_sz, MLX5_REG_MCQS, 0, 0); 732 return err; 733} 734 735/* scans component index sequentially, to find the boot img index */ 736static int mlx5_get_boot_img_component_index(struct mlx5_core_dev *dev) 737{ 738 u32 out[MLX5_ST_SZ_DW(mcqs_reg)] = {}; 739 u16 identifier, component_idx = 0; 740 bool quit; 741 int err; 742 743 do { 744 err = mlx5_reg_mcqs_query(dev, out, component_idx); 745 if (err) 746 return err; 747 748 identifier = MLX5_GET(mcqs_reg, out, identifier); 749 quit = !!MLX5_GET(mcqs_reg, out, last_index_flag); 750 quit |= identifier == MCQS_IDENTIFIER_BOOT_IMG; 751 } while (!quit && ++component_idx); 752 753 if (identifier != MCQS_IDENTIFIER_BOOT_IMG) { 754 mlx5_core_warn(dev, "mcqs: can't find boot_img component ix, last scanned idx %d\n", 755 component_idx); 756 return -EOPNOTSUPP; 757 } 758 759 return component_idx; 760} 761 762static int 763mlx5_fw_image_pending(struct mlx5_core_dev *dev, 764 int component_index, 765 bool *pending_version_exists) 766{ 767 u32 out[MLX5_ST_SZ_DW(mcqs_reg)]; 768 u8 component_update_state; 769 int err; 770 771 err = mlx5_reg_mcqs_query(dev, out, component_index); 772 if (err) 773 return err; 774 775 component_update_state = MLX5_GET(mcqs_reg, out, component_update_state); 776 777 if (component_update_state == MCQS_UPDATE_STATE_IDLE) { 778 *pending_version_exists = false; 779 } else if (component_update_state == MCQS_UPDATE_STATE_ACTIVE_PENDING_RESET) { 780 *pending_version_exists = true; 781 } else { 782 mlx5_core_warn(dev, 783 "mcqs: can't read pending fw version while fw state is %d\n", 784 component_update_state); 785 return -ENODATA; 786 } 787 return 0; 788} 789 790int mlx5_fw_version_query(struct mlx5_core_dev *dev, 791 u32 *running_ver, u32 *pending_ver) 792{ 793 u32 reg_mcqi_version[MLX5_ST_SZ_DW(mcqi_version)] = {}; 794 bool pending_version_exists; 795 int component_index; 796 int err; 797 798 if (!MLX5_CAP_GEN(dev, mcam_reg) || !MLX5_CAP_MCAM_REG(dev, mcqi) || 799 !MLX5_CAP_MCAM_REG(dev, mcqs)) { 800 mlx5_core_warn(dev, "fw query isn't supported by the FW\n"); 801 return -EOPNOTSUPP; 802 } 803 804 component_index = mlx5_get_boot_img_component_index(dev); 805 if (component_index < 0) 806 return component_index; 807 808 err = mlx5_reg_mcqi_version_query(dev, component_index, 809 MCQI_FW_RUNNING_VERSION, 810 reg_mcqi_version); 811 if (err) 812 return err; 813 814 *running_ver = MLX5_GET(mcqi_version, reg_mcqi_version, version); 815 816 err = mlx5_fw_image_pending(dev, component_index, &pending_version_exists); 817 if (err) 818 return err; 819 820 if (!pending_version_exists) { 821 *pending_ver = 0; 822 return 0; 823 } 824 825 err = mlx5_reg_mcqi_version_query(dev, component_index, 826 MCQI_FW_STORED_VERSION, 827 reg_mcqi_version); 828 if (err) 829 return err; 830 831 *pending_ver = MLX5_GET(mcqi_version, reg_mcqi_version, version); 832 833 return 0; 834}