cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

fw_reset.c (15755B)


      1// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
      2/* Copyright (c) 2020, Mellanox Technologies inc.  All rights reserved. */
      3
      4#include "fw_reset.h"
      5#include "diag/fw_tracer.h"
      6#include "lib/tout.h"
      7
      8enum {
      9	MLX5_FW_RESET_FLAGS_RESET_REQUESTED,
     10	MLX5_FW_RESET_FLAGS_NACK_RESET_REQUEST,
     11	MLX5_FW_RESET_FLAGS_PENDING_COMP,
     12	MLX5_FW_RESET_FLAGS_DROP_NEW_REQUESTS
     13};
     14
     15struct mlx5_fw_reset {
     16	struct mlx5_core_dev *dev;
     17	struct mlx5_nb nb;
     18	struct workqueue_struct *wq;
     19	struct work_struct fw_live_patch_work;
     20	struct work_struct reset_request_work;
     21	struct work_struct reset_reload_work;
     22	struct work_struct reset_now_work;
     23	struct work_struct reset_abort_work;
     24	unsigned long reset_flags;
     25	struct timer_list timer;
     26	struct completion done;
     27	int ret;
     28};
     29
     30void mlx5_fw_reset_enable_remote_dev_reset_set(struct mlx5_core_dev *dev, bool enable)
     31{
     32	struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset;
     33
     34	if (enable)
     35		clear_bit(MLX5_FW_RESET_FLAGS_NACK_RESET_REQUEST, &fw_reset->reset_flags);
     36	else
     37		set_bit(MLX5_FW_RESET_FLAGS_NACK_RESET_REQUEST, &fw_reset->reset_flags);
     38}
     39
     40bool mlx5_fw_reset_enable_remote_dev_reset_get(struct mlx5_core_dev *dev)
     41{
     42	struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset;
     43
     44	return !test_bit(MLX5_FW_RESET_FLAGS_NACK_RESET_REQUEST, &fw_reset->reset_flags);
     45}
     46
     47static int mlx5_reg_mfrl_set(struct mlx5_core_dev *dev, u8 reset_level,
     48			     u8 reset_type_sel, u8 sync_resp, bool sync_start)
     49{
     50	u32 out[MLX5_ST_SZ_DW(mfrl_reg)] = {};
     51	u32 in[MLX5_ST_SZ_DW(mfrl_reg)] = {};
     52
     53	MLX5_SET(mfrl_reg, in, reset_level, reset_level);
     54	MLX5_SET(mfrl_reg, in, rst_type_sel, reset_type_sel);
     55	MLX5_SET(mfrl_reg, in, pci_sync_for_fw_update_resp, sync_resp);
     56	MLX5_SET(mfrl_reg, in, pci_sync_for_fw_update_start, sync_start);
     57
     58	return mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out), MLX5_REG_MFRL, 0, 1);
     59}
     60
     61static int mlx5_reg_mfrl_query(struct mlx5_core_dev *dev, u8 *reset_level,
     62			       u8 *reset_type, u8 *reset_state)
     63{
     64	u32 out[MLX5_ST_SZ_DW(mfrl_reg)] = {};
     65	u32 in[MLX5_ST_SZ_DW(mfrl_reg)] = {};
     66	int err;
     67
     68	err = mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out), MLX5_REG_MFRL, 0, 0);
     69	if (err)
     70		return err;
     71
     72	if (reset_level)
     73		*reset_level = MLX5_GET(mfrl_reg, out, reset_level);
     74	if (reset_type)
     75		*reset_type = MLX5_GET(mfrl_reg, out, reset_type);
     76	if (reset_state)
     77		*reset_state = MLX5_GET(mfrl_reg, out, reset_state);
     78
     79	return 0;
     80}
     81
     82int mlx5_fw_reset_query(struct mlx5_core_dev *dev, u8 *reset_level, u8 *reset_type)
     83{
     84	return mlx5_reg_mfrl_query(dev, reset_level, reset_type, NULL);
     85}
     86
     87static int mlx5_fw_reset_get_reset_state_err(struct mlx5_core_dev *dev,
     88					     struct netlink_ext_ack *extack)
     89{
     90	u8 reset_state;
     91
     92	if (mlx5_reg_mfrl_query(dev, NULL, NULL, &reset_state))
     93		goto out;
     94
     95	switch (reset_state) {
     96	case MLX5_MFRL_REG_RESET_STATE_IN_NEGOTIATION:
     97	case MLX5_MFRL_REG_RESET_STATE_RESET_IN_PROGRESS:
     98		NL_SET_ERR_MSG_MOD(extack, "Sync reset was already triggered");
     99		return -EBUSY;
    100	case MLX5_MFRL_REG_RESET_STATE_TIMEOUT:
    101		NL_SET_ERR_MSG_MOD(extack, "Sync reset got timeout");
    102		return -ETIMEDOUT;
    103	case MLX5_MFRL_REG_RESET_STATE_NACK:
    104		NL_SET_ERR_MSG_MOD(extack, "One of the hosts disabled reset");
    105		return -EPERM;
    106	}
    107
    108out:
    109	NL_SET_ERR_MSG_MOD(extack, "Sync reset failed");
    110	return -EIO;
    111}
    112
    113int mlx5_fw_reset_set_reset_sync(struct mlx5_core_dev *dev, u8 reset_type_sel,
    114				 struct netlink_ext_ack *extack)
    115{
    116	struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset;
    117	u32 out[MLX5_ST_SZ_DW(mfrl_reg)] = {};
    118	u32 in[MLX5_ST_SZ_DW(mfrl_reg)] = {};
    119	int err;
    120
    121	set_bit(MLX5_FW_RESET_FLAGS_PENDING_COMP, &fw_reset->reset_flags);
    122
    123	MLX5_SET(mfrl_reg, in, reset_level, MLX5_MFRL_REG_RESET_LEVEL3);
    124	MLX5_SET(mfrl_reg, in, rst_type_sel, reset_type_sel);
    125	MLX5_SET(mfrl_reg, in, pci_sync_for_fw_update_start, 1);
    126	err = mlx5_access_reg(dev, in, sizeof(in), out, sizeof(out),
    127			      MLX5_REG_MFRL, 0, 1, false);
    128	if (!err)
    129		return 0;
    130
    131	clear_bit(MLX5_FW_RESET_FLAGS_PENDING_COMP, &fw_reset->reset_flags);
    132	if (err == -EREMOTEIO && MLX5_CAP_MCAM_FEATURE(dev, reset_state))
    133		return mlx5_fw_reset_get_reset_state_err(dev, extack);
    134
    135	NL_SET_ERR_MSG_MOD(extack, "Sync reset command failed");
    136	return mlx5_cmd_check(dev, err, in, out);
    137}
    138
    139int mlx5_fw_reset_set_live_patch(struct mlx5_core_dev *dev)
    140{
    141	return mlx5_reg_mfrl_set(dev, MLX5_MFRL_REG_RESET_LEVEL0, 0, 0, false);
    142}
    143
    144static void mlx5_fw_reset_complete_reload(struct mlx5_core_dev *dev)
    145{
    146	struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset;
    147
    148	/* if this is the driver that initiated the fw reset, devlink completed the reload */
    149	if (test_bit(MLX5_FW_RESET_FLAGS_PENDING_COMP, &fw_reset->reset_flags)) {
    150		complete(&fw_reset->done);
    151	} else {
    152		mlx5_load_one(dev, false);
    153		devlink_remote_reload_actions_performed(priv_to_devlink(dev), 0,
    154							BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT) |
    155							BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE));
    156	}
    157}
    158
    159static void mlx5_stop_sync_reset_poll(struct mlx5_core_dev *dev)
    160{
    161	struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset;
    162
    163	del_timer_sync(&fw_reset->timer);
    164}
    165
    166static int mlx5_sync_reset_clear_reset_requested(struct mlx5_core_dev *dev, bool poll_health)
    167{
    168	struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset;
    169
    170	if (!test_and_clear_bit(MLX5_FW_RESET_FLAGS_RESET_REQUESTED, &fw_reset->reset_flags)) {
    171		mlx5_core_warn(dev, "Reset request was already cleared\n");
    172		return -EALREADY;
    173	}
    174
    175	mlx5_stop_sync_reset_poll(dev);
    176	if (poll_health)
    177		mlx5_start_health_poll(dev);
    178	return 0;
    179}
    180
    181static void mlx5_sync_reset_reload_work(struct work_struct *work)
    182{
    183	struct mlx5_fw_reset *fw_reset = container_of(work, struct mlx5_fw_reset,
    184						      reset_reload_work);
    185	struct mlx5_core_dev *dev = fw_reset->dev;
    186	int err;
    187
    188	mlx5_sync_reset_clear_reset_requested(dev, false);
    189	mlx5_enter_error_state(dev, true);
    190	mlx5_unload_one(dev);
    191	err = mlx5_health_wait_pci_up(dev);
    192	if (err)
    193		mlx5_core_err(dev, "reset reload flow aborted, PCI reads still not working\n");
    194	fw_reset->ret = err;
    195	mlx5_fw_reset_complete_reload(dev);
    196}
    197
    198#define MLX5_RESET_POLL_INTERVAL	(HZ / 10)
    199static void poll_sync_reset(struct timer_list *t)
    200{
    201	struct mlx5_fw_reset *fw_reset = from_timer(fw_reset, t, timer);
    202	struct mlx5_core_dev *dev = fw_reset->dev;
    203	u32 fatal_error;
    204
    205	if (!test_bit(MLX5_FW_RESET_FLAGS_RESET_REQUESTED, &fw_reset->reset_flags))
    206		return;
    207
    208	fatal_error = mlx5_health_check_fatal_sensors(dev);
    209
    210	if (fatal_error) {
    211		mlx5_core_warn(dev, "Got Device Reset\n");
    212		if (!test_bit(MLX5_FW_RESET_FLAGS_DROP_NEW_REQUESTS, &fw_reset->reset_flags))
    213			queue_work(fw_reset->wq, &fw_reset->reset_reload_work);
    214		else
    215			mlx5_core_err(dev, "Device is being removed, Drop new reset work\n");
    216		return;
    217	}
    218
    219	mod_timer(&fw_reset->timer, round_jiffies(jiffies + MLX5_RESET_POLL_INTERVAL));
    220}
    221
    222static void mlx5_start_sync_reset_poll(struct mlx5_core_dev *dev)
    223{
    224	struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset;
    225
    226	timer_setup(&fw_reset->timer, poll_sync_reset, 0);
    227	fw_reset->timer.expires = round_jiffies(jiffies + MLX5_RESET_POLL_INTERVAL);
    228	add_timer(&fw_reset->timer);
    229}
    230
    231static int mlx5_fw_reset_set_reset_sync_ack(struct mlx5_core_dev *dev)
    232{
    233	return mlx5_reg_mfrl_set(dev, MLX5_MFRL_REG_RESET_LEVEL3, 0, 1, false);
    234}
    235
    236static int mlx5_fw_reset_set_reset_sync_nack(struct mlx5_core_dev *dev)
    237{
    238	return mlx5_reg_mfrl_set(dev, MLX5_MFRL_REG_RESET_LEVEL3, 0, 2, false);
    239}
    240
    241static int mlx5_sync_reset_set_reset_requested(struct mlx5_core_dev *dev)
    242{
    243	struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset;
    244
    245	if (test_and_set_bit(MLX5_FW_RESET_FLAGS_RESET_REQUESTED, &fw_reset->reset_flags)) {
    246		mlx5_core_warn(dev, "Reset request was already set\n");
    247		return -EALREADY;
    248	}
    249	mlx5_stop_health_poll(dev, true);
    250	mlx5_start_sync_reset_poll(dev);
    251	return 0;
    252}
    253
    254static void mlx5_fw_live_patch_event(struct work_struct *work)
    255{
    256	struct mlx5_fw_reset *fw_reset = container_of(work, struct mlx5_fw_reset,
    257						      fw_live_patch_work);
    258	struct mlx5_core_dev *dev = fw_reset->dev;
    259
    260	mlx5_core_info(dev, "Live patch updated firmware version: %d.%d.%d\n", fw_rev_maj(dev),
    261		       fw_rev_min(dev), fw_rev_sub(dev));
    262
    263	if (mlx5_fw_tracer_reload(dev->tracer))
    264		mlx5_core_err(dev, "Failed to reload FW tracer\n");
    265}
    266
    267static void mlx5_sync_reset_request_event(struct work_struct *work)
    268{
    269	struct mlx5_fw_reset *fw_reset = container_of(work, struct mlx5_fw_reset,
    270						      reset_request_work);
    271	struct mlx5_core_dev *dev = fw_reset->dev;
    272	int err;
    273
    274	if (test_bit(MLX5_FW_RESET_FLAGS_NACK_RESET_REQUEST, &fw_reset->reset_flags)) {
    275		err = mlx5_fw_reset_set_reset_sync_nack(dev);
    276		mlx5_core_warn(dev, "PCI Sync FW Update Reset Nack %s",
    277			       err ? "Failed" : "Sent");
    278		return;
    279	}
    280	if (mlx5_sync_reset_set_reset_requested(dev))
    281		return;
    282
    283	err = mlx5_fw_reset_set_reset_sync_ack(dev);
    284	if (err)
    285		mlx5_core_warn(dev, "PCI Sync FW Update Reset Ack Failed. Error code: %d\n", err);
    286	else
    287		mlx5_core_warn(dev, "PCI Sync FW Update Reset Ack. Device reset is expected.\n");
    288}
    289
    290static int mlx5_pci_link_toggle(struct mlx5_core_dev *dev)
    291{
    292	struct pci_bus *bridge_bus = dev->pdev->bus;
    293	struct pci_dev *bridge = bridge_bus->self;
    294	u16 reg16, dev_id, sdev_id;
    295	unsigned long timeout;
    296	struct pci_dev *sdev;
    297	int cap, err;
    298	u32 reg32;
    299
    300	/* Check that all functions under the pci bridge are PFs of
    301	 * this device otherwise fail this function.
    302	 */
    303	err = pci_read_config_word(dev->pdev, PCI_DEVICE_ID, &dev_id);
    304	if (err)
    305		return err;
    306	list_for_each_entry(sdev, &bridge_bus->devices, bus_list) {
    307		err = pci_read_config_word(sdev, PCI_DEVICE_ID, &sdev_id);
    308		if (err)
    309			return err;
    310		if (sdev_id != dev_id)
    311			return -EPERM;
    312	}
    313
    314	cap = pci_find_capability(bridge, PCI_CAP_ID_EXP);
    315	if (!cap)
    316		return -EOPNOTSUPP;
    317
    318	list_for_each_entry(sdev, &bridge_bus->devices, bus_list) {
    319		pci_save_state(sdev);
    320		pci_cfg_access_lock(sdev);
    321	}
    322	/* PCI link toggle */
    323	err = pci_read_config_word(bridge, cap + PCI_EXP_LNKCTL, &reg16);
    324	if (err)
    325		return err;
    326	reg16 |= PCI_EXP_LNKCTL_LD;
    327	err = pci_write_config_word(bridge, cap + PCI_EXP_LNKCTL, reg16);
    328	if (err)
    329		return err;
    330	msleep(500);
    331	reg16 &= ~PCI_EXP_LNKCTL_LD;
    332	err = pci_write_config_word(bridge, cap + PCI_EXP_LNKCTL, reg16);
    333	if (err)
    334		return err;
    335
    336	/* Check link */
    337	err = pci_read_config_dword(bridge, cap + PCI_EXP_LNKCAP, &reg32);
    338	if (err)
    339		return err;
    340	if (!(reg32 & PCI_EXP_LNKCAP_DLLLARC)) {
    341		mlx5_core_warn(dev, "No PCI link reporting capability (0x%08x)\n", reg32);
    342		msleep(1000);
    343		goto restore;
    344	}
    345
    346	timeout = jiffies + msecs_to_jiffies(mlx5_tout_ms(dev, PCI_TOGGLE));
    347	do {
    348		err = pci_read_config_word(bridge, cap + PCI_EXP_LNKSTA, &reg16);
    349		if (err)
    350			return err;
    351		if (reg16 & PCI_EXP_LNKSTA_DLLLA)
    352			break;
    353		msleep(20);
    354	} while (!time_after(jiffies, timeout));
    355
    356	if (reg16 & PCI_EXP_LNKSTA_DLLLA) {
    357		mlx5_core_info(dev, "PCI Link up\n");
    358	} else {
    359		mlx5_core_err(dev, "PCI link not ready (0x%04x) after %llu ms\n",
    360			      reg16, mlx5_tout_ms(dev, PCI_TOGGLE));
    361		err = -ETIMEDOUT;
    362	}
    363
    364restore:
    365	list_for_each_entry(sdev, &bridge_bus->devices, bus_list) {
    366		pci_cfg_access_unlock(sdev);
    367		pci_restore_state(sdev);
    368	}
    369
    370	return err;
    371}
    372
    373static void mlx5_sync_reset_now_event(struct work_struct *work)
    374{
    375	struct mlx5_fw_reset *fw_reset = container_of(work, struct mlx5_fw_reset,
    376						      reset_now_work);
    377	struct mlx5_core_dev *dev = fw_reset->dev;
    378	int err;
    379
    380	if (mlx5_sync_reset_clear_reset_requested(dev, false))
    381		return;
    382
    383	mlx5_core_warn(dev, "Sync Reset now. Device is going to reset.\n");
    384
    385	err = mlx5_cmd_fast_teardown_hca(dev);
    386	if (err) {
    387		mlx5_core_warn(dev, "Fast teardown failed, no reset done, err %d\n", err);
    388		goto done;
    389	}
    390
    391	err = mlx5_pci_link_toggle(dev);
    392	if (err) {
    393		mlx5_core_warn(dev, "mlx5_pci_link_toggle failed, no reset done, err %d\n", err);
    394		goto done;
    395	}
    396
    397	mlx5_enter_error_state(dev, true);
    398	mlx5_unload_one(dev);
    399done:
    400	fw_reset->ret = err;
    401	mlx5_fw_reset_complete_reload(dev);
    402}
    403
    404static void mlx5_sync_reset_abort_event(struct work_struct *work)
    405{
    406	struct mlx5_fw_reset *fw_reset = container_of(work, struct mlx5_fw_reset,
    407						      reset_abort_work);
    408	struct mlx5_core_dev *dev = fw_reset->dev;
    409
    410	if (mlx5_sync_reset_clear_reset_requested(dev, true))
    411		return;
    412	mlx5_core_warn(dev, "PCI Sync FW Update Reset Aborted.\n");
    413}
    414
    415static void mlx5_sync_reset_events_handle(struct mlx5_fw_reset *fw_reset, struct mlx5_eqe *eqe)
    416{
    417	struct mlx5_eqe_sync_fw_update *sync_fw_update_eqe;
    418	u8 sync_event_rst_type;
    419
    420	sync_fw_update_eqe = &eqe->data.sync_fw_update;
    421	sync_event_rst_type = sync_fw_update_eqe->sync_rst_state & SYNC_RST_STATE_MASK;
    422	switch (sync_event_rst_type) {
    423	case MLX5_SYNC_RST_STATE_RESET_REQUEST:
    424		queue_work(fw_reset->wq, &fw_reset->reset_request_work);
    425		break;
    426	case MLX5_SYNC_RST_STATE_RESET_NOW:
    427		queue_work(fw_reset->wq, &fw_reset->reset_now_work);
    428		break;
    429	case MLX5_SYNC_RST_STATE_RESET_ABORT:
    430		queue_work(fw_reset->wq, &fw_reset->reset_abort_work);
    431		break;
    432	}
    433}
    434
    435static int fw_reset_event_notifier(struct notifier_block *nb, unsigned long action, void *data)
    436{
    437	struct mlx5_fw_reset *fw_reset = mlx5_nb_cof(nb, struct mlx5_fw_reset, nb);
    438	struct mlx5_eqe *eqe = data;
    439
    440	if (test_bit(MLX5_FW_RESET_FLAGS_DROP_NEW_REQUESTS, &fw_reset->reset_flags))
    441		return NOTIFY_DONE;
    442
    443	switch (eqe->sub_type) {
    444	case MLX5_GENERAL_SUBTYPE_FW_LIVE_PATCH_EVENT:
    445		queue_work(fw_reset->wq, &fw_reset->fw_live_patch_work);
    446		break;
    447	case MLX5_GENERAL_SUBTYPE_PCI_SYNC_FOR_FW_UPDATE_EVENT:
    448		mlx5_sync_reset_events_handle(fw_reset, eqe);
    449		break;
    450	default:
    451		return NOTIFY_DONE;
    452	}
    453
    454	return NOTIFY_OK;
    455}
    456
    457int mlx5_fw_reset_wait_reset_done(struct mlx5_core_dev *dev)
    458{
    459	unsigned long pci_sync_update_timeout = mlx5_tout_ms(dev, PCI_SYNC_UPDATE);
    460	unsigned long timeout = msecs_to_jiffies(pci_sync_update_timeout);
    461	struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset;
    462	int err;
    463
    464	if (!wait_for_completion_timeout(&fw_reset->done, timeout)) {
    465		mlx5_core_warn(dev, "FW sync reset timeout after %lu seconds\n",
    466			       pci_sync_update_timeout / 1000);
    467		err = -ETIMEDOUT;
    468		goto out;
    469	}
    470	err = fw_reset->ret;
    471out:
    472	clear_bit(MLX5_FW_RESET_FLAGS_PENDING_COMP, &fw_reset->reset_flags);
    473	return err;
    474}
    475
    476void mlx5_fw_reset_events_start(struct mlx5_core_dev *dev)
    477{
    478	struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset;
    479
    480	MLX5_NB_INIT(&fw_reset->nb, fw_reset_event_notifier, GENERAL_EVENT);
    481	mlx5_eq_notifier_register(dev, &fw_reset->nb);
    482}
    483
    484void mlx5_fw_reset_events_stop(struct mlx5_core_dev *dev)
    485{
    486	mlx5_eq_notifier_unregister(dev, &dev->priv.fw_reset->nb);
    487}
    488
    489void mlx5_drain_fw_reset(struct mlx5_core_dev *dev)
    490{
    491	struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset;
    492
    493	set_bit(MLX5_FW_RESET_FLAGS_DROP_NEW_REQUESTS, &fw_reset->reset_flags);
    494	cancel_work_sync(&fw_reset->fw_live_patch_work);
    495	cancel_work_sync(&fw_reset->reset_request_work);
    496	cancel_work_sync(&fw_reset->reset_reload_work);
    497	cancel_work_sync(&fw_reset->reset_now_work);
    498	cancel_work_sync(&fw_reset->reset_abort_work);
    499}
    500
    501int mlx5_fw_reset_init(struct mlx5_core_dev *dev)
    502{
    503	struct mlx5_fw_reset *fw_reset = kzalloc(sizeof(*fw_reset), GFP_KERNEL);
    504
    505	if (!fw_reset)
    506		return -ENOMEM;
    507	fw_reset->wq = create_singlethread_workqueue("mlx5_fw_reset_events");
    508	if (!fw_reset->wq) {
    509		kfree(fw_reset);
    510		return -ENOMEM;
    511	}
    512
    513	fw_reset->dev = dev;
    514	dev->priv.fw_reset = fw_reset;
    515
    516	INIT_WORK(&fw_reset->fw_live_patch_work, mlx5_fw_live_patch_event);
    517	INIT_WORK(&fw_reset->reset_request_work, mlx5_sync_reset_request_event);
    518	INIT_WORK(&fw_reset->reset_reload_work, mlx5_sync_reset_reload_work);
    519	INIT_WORK(&fw_reset->reset_now_work, mlx5_sync_reset_now_event);
    520	INIT_WORK(&fw_reset->reset_abort_work, mlx5_sync_reset_abort_event);
    521
    522	init_completion(&fw_reset->done);
    523	return 0;
    524}
    525
    526void mlx5_fw_reset_cleanup(struct mlx5_core_dev *dev)
    527{
    528	struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset;
    529
    530	destroy_workqueue(fw_reset->wq);
    531	kfree(dev->priv.fw_reset);
    532}