cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

cmd.c (10186B)


      1// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
      2/*
      3 * Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved
      4 */
      5
      6#include "cmd.h"
      7
      8static int mlx5vf_cmd_get_vhca_id(struct mlx5_core_dev *mdev, u16 function_id,
      9				  u16 *vhca_id);
     10
     11int mlx5vf_cmd_suspend_vhca(struct mlx5vf_pci_core_device *mvdev, u16 op_mod)
     12{
     13	u32 out[MLX5_ST_SZ_DW(suspend_vhca_out)] = {};
     14	u32 in[MLX5_ST_SZ_DW(suspend_vhca_in)] = {};
     15
     16	lockdep_assert_held(&mvdev->state_mutex);
     17	if (mvdev->mdev_detach)
     18		return -ENOTCONN;
     19
     20	MLX5_SET(suspend_vhca_in, in, opcode, MLX5_CMD_OP_SUSPEND_VHCA);
     21	MLX5_SET(suspend_vhca_in, in, vhca_id, mvdev->vhca_id);
     22	MLX5_SET(suspend_vhca_in, in, op_mod, op_mod);
     23
     24	return mlx5_cmd_exec_inout(mvdev->mdev, suspend_vhca, in, out);
     25}
     26
     27int mlx5vf_cmd_resume_vhca(struct mlx5vf_pci_core_device *mvdev, u16 op_mod)
     28{
     29	u32 out[MLX5_ST_SZ_DW(resume_vhca_out)] = {};
     30	u32 in[MLX5_ST_SZ_DW(resume_vhca_in)] = {};
     31
     32	lockdep_assert_held(&mvdev->state_mutex);
     33	if (mvdev->mdev_detach)
     34		return -ENOTCONN;
     35
     36	MLX5_SET(resume_vhca_in, in, opcode, MLX5_CMD_OP_RESUME_VHCA);
     37	MLX5_SET(resume_vhca_in, in, vhca_id, mvdev->vhca_id);
     38	MLX5_SET(resume_vhca_in, in, op_mod, op_mod);
     39
     40	return mlx5_cmd_exec_inout(mvdev->mdev, resume_vhca, in, out);
     41}
     42
     43int mlx5vf_cmd_query_vhca_migration_state(struct mlx5vf_pci_core_device *mvdev,
     44					  size_t *state_size)
     45{
     46	u32 out[MLX5_ST_SZ_DW(query_vhca_migration_state_out)] = {};
     47	u32 in[MLX5_ST_SZ_DW(query_vhca_migration_state_in)] = {};
     48	int ret;
     49
     50	lockdep_assert_held(&mvdev->state_mutex);
     51	if (mvdev->mdev_detach)
     52		return -ENOTCONN;
     53
     54	MLX5_SET(query_vhca_migration_state_in, in, opcode,
     55		 MLX5_CMD_OP_QUERY_VHCA_MIGRATION_STATE);
     56	MLX5_SET(query_vhca_migration_state_in, in, vhca_id, mvdev->vhca_id);
     57	MLX5_SET(query_vhca_migration_state_in, in, op_mod, 0);
     58
     59	ret = mlx5_cmd_exec_inout(mvdev->mdev, query_vhca_migration_state, in,
     60				  out);
     61	if (ret)
     62		return ret;
     63
     64	*state_size = MLX5_GET(query_vhca_migration_state_out, out,
     65			       required_umem_size);
     66	return 0;
     67}
     68
     69static int mlx5fv_vf_event(struct notifier_block *nb,
     70			   unsigned long event, void *data)
     71{
     72	struct mlx5vf_pci_core_device *mvdev =
     73		container_of(nb, struct mlx5vf_pci_core_device, nb);
     74
     75	mutex_lock(&mvdev->state_mutex);
     76	switch (event) {
     77	case MLX5_PF_NOTIFY_ENABLE_VF:
     78		mvdev->mdev_detach = false;
     79		break;
     80	case MLX5_PF_NOTIFY_DISABLE_VF:
     81		mlx5vf_disable_fds(mvdev);
     82		mvdev->mdev_detach = true;
     83		break;
     84	default:
     85		break;
     86	}
     87	mlx5vf_state_mutex_unlock(mvdev);
     88	return 0;
     89}
     90
     91void mlx5vf_cmd_remove_migratable(struct mlx5vf_pci_core_device *mvdev)
     92{
     93	if (!mvdev->migrate_cap)
     94		return;
     95
     96	mlx5_sriov_blocking_notifier_unregister(mvdev->mdev, mvdev->vf_id,
     97						&mvdev->nb);
     98	destroy_workqueue(mvdev->cb_wq);
     99}
    100
    101void mlx5vf_cmd_set_migratable(struct mlx5vf_pci_core_device *mvdev)
    102{
    103	struct pci_dev *pdev = mvdev->core_device.pdev;
    104	int ret;
    105
    106	if (!pdev->is_virtfn)
    107		return;
    108
    109	mvdev->mdev = mlx5_vf_get_core_dev(pdev);
    110	if (!mvdev->mdev)
    111		return;
    112
    113	if (!MLX5_CAP_GEN(mvdev->mdev, migration))
    114		goto end;
    115
    116	mvdev->vf_id = pci_iov_vf_id(pdev);
    117	if (mvdev->vf_id < 0)
    118		goto end;
    119
    120	if (mlx5vf_cmd_get_vhca_id(mvdev->mdev, mvdev->vf_id + 1,
    121				   &mvdev->vhca_id))
    122		goto end;
    123
    124	mvdev->cb_wq = alloc_ordered_workqueue("mlx5vf_wq", 0);
    125	if (!mvdev->cb_wq)
    126		goto end;
    127
    128	mutex_init(&mvdev->state_mutex);
    129	spin_lock_init(&mvdev->reset_lock);
    130	mvdev->nb.notifier_call = mlx5fv_vf_event;
    131	ret = mlx5_sriov_blocking_notifier_register(mvdev->mdev, mvdev->vf_id,
    132						    &mvdev->nb);
    133	if (ret) {
    134		destroy_workqueue(mvdev->cb_wq);
    135		goto end;
    136	}
    137
    138	mvdev->migrate_cap = 1;
    139	mvdev->core_device.vdev.migration_flags =
    140		VFIO_MIGRATION_STOP_COPY |
    141		VFIO_MIGRATION_P2P;
    142
    143end:
    144	mlx5_vf_put_core_dev(mvdev->mdev);
    145}
    146
    147static int mlx5vf_cmd_get_vhca_id(struct mlx5_core_dev *mdev, u16 function_id,
    148				  u16 *vhca_id)
    149{
    150	u32 in[MLX5_ST_SZ_DW(query_hca_cap_in)] = {};
    151	int out_size;
    152	void *out;
    153	int ret;
    154
    155	out_size = MLX5_ST_SZ_BYTES(query_hca_cap_out);
    156	out = kzalloc(out_size, GFP_KERNEL);
    157	if (!out)
    158		return -ENOMEM;
    159
    160	MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
    161	MLX5_SET(query_hca_cap_in, in, other_function, 1);
    162	MLX5_SET(query_hca_cap_in, in, function_id, function_id);
    163	MLX5_SET(query_hca_cap_in, in, op_mod,
    164		 MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE << 1 |
    165		 HCA_CAP_OPMOD_GET_CUR);
    166
    167	ret = mlx5_cmd_exec_inout(mdev, query_hca_cap, in, out);
    168	if (ret)
    169		goto err_exec;
    170
    171	*vhca_id = MLX5_GET(query_hca_cap_out, out,
    172			    capability.cmd_hca_cap.vhca_id);
    173
    174err_exec:
    175	kfree(out);
    176	return ret;
    177}
    178
    179static int _create_state_mkey(struct mlx5_core_dev *mdev, u32 pdn,
    180			      struct mlx5_vf_migration_file *migf, u32 *mkey)
    181{
    182	size_t npages = DIV_ROUND_UP(migf->total_length, PAGE_SIZE);
    183	struct sg_dma_page_iter dma_iter;
    184	int err = 0, inlen;
    185	__be64 *mtt;
    186	void *mkc;
    187	u32 *in;
    188
    189	inlen = MLX5_ST_SZ_BYTES(create_mkey_in) +
    190		sizeof(*mtt) * round_up(npages, 2);
    191
    192	in = kvzalloc(inlen, GFP_KERNEL);
    193	if (!in)
    194		return -ENOMEM;
    195
    196	MLX5_SET(create_mkey_in, in, translations_octword_actual_size,
    197		 DIV_ROUND_UP(npages, 2));
    198	mtt = (__be64 *)MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt);
    199
    200	for_each_sgtable_dma_page(&migf->table.sgt, &dma_iter, 0)
    201		*mtt++ = cpu_to_be64(sg_page_iter_dma_address(&dma_iter));
    202
    203	mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
    204	MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_MTT);
    205	MLX5_SET(mkc, mkc, lr, 1);
    206	MLX5_SET(mkc, mkc, lw, 1);
    207	MLX5_SET(mkc, mkc, rr, 1);
    208	MLX5_SET(mkc, mkc, rw, 1);
    209	MLX5_SET(mkc, mkc, pd, pdn);
    210	MLX5_SET(mkc, mkc, bsf_octword_size, 0);
    211	MLX5_SET(mkc, mkc, qpn, 0xffffff);
    212	MLX5_SET(mkc, mkc, log_page_size, PAGE_SHIFT);
    213	MLX5_SET(mkc, mkc, translations_octword_size, DIV_ROUND_UP(npages, 2));
    214	MLX5_SET64(mkc, mkc, len, migf->total_length);
    215	err = mlx5_core_create_mkey(mdev, mkey, in, inlen);
    216	kvfree(in);
    217	return err;
    218}
    219
    220void mlx5vf_mig_file_cleanup_cb(struct work_struct *_work)
    221{
    222	struct mlx5vf_async_data *async_data = container_of(_work,
    223		struct mlx5vf_async_data, work);
    224	struct mlx5_vf_migration_file *migf = container_of(async_data,
    225		struct mlx5_vf_migration_file, async_data);
    226	struct mlx5_core_dev *mdev = migf->mvdev->mdev;
    227
    228	mutex_lock(&migf->lock);
    229	if (async_data->status) {
    230		migf->is_err = true;
    231		wake_up_interruptible(&migf->poll_wait);
    232	}
    233	mutex_unlock(&migf->lock);
    234
    235	mlx5_core_destroy_mkey(mdev, async_data->mkey);
    236	dma_unmap_sgtable(mdev->device, &migf->table.sgt, DMA_FROM_DEVICE, 0);
    237	mlx5_core_dealloc_pd(mdev, async_data->pdn);
    238	kvfree(async_data->out);
    239	fput(migf->filp);
    240}
    241
    242static void mlx5vf_save_callback(int status, struct mlx5_async_work *context)
    243{
    244	struct mlx5vf_async_data *async_data = container_of(context,
    245			struct mlx5vf_async_data, cb_work);
    246	struct mlx5_vf_migration_file *migf = container_of(async_data,
    247			struct mlx5_vf_migration_file, async_data);
    248
    249	if (!status) {
    250		WRITE_ONCE(migf->total_length,
    251			   MLX5_GET(save_vhca_state_out, async_data->out,
    252				    actual_image_size));
    253		wake_up_interruptible(&migf->poll_wait);
    254	}
    255
    256	/*
    257	 * The error and the cleanup flows can't run from an
    258	 * interrupt context
    259	 */
    260	async_data->status = status;
    261	queue_work(migf->mvdev->cb_wq, &async_data->work);
    262}
    263
    264int mlx5vf_cmd_save_vhca_state(struct mlx5vf_pci_core_device *mvdev,
    265			       struct mlx5_vf_migration_file *migf)
    266{
    267	u32 out_size = MLX5_ST_SZ_BYTES(save_vhca_state_out);
    268	u32 in[MLX5_ST_SZ_DW(save_vhca_state_in)] = {};
    269	struct mlx5vf_async_data *async_data;
    270	struct mlx5_core_dev *mdev;
    271	u32 pdn, mkey;
    272	int err;
    273
    274	lockdep_assert_held(&mvdev->state_mutex);
    275	if (mvdev->mdev_detach)
    276		return -ENOTCONN;
    277
    278	mdev = mvdev->mdev;
    279	err = mlx5_core_alloc_pd(mdev, &pdn);
    280	if (err)
    281		return err;
    282
    283	err = dma_map_sgtable(mdev->device, &migf->table.sgt, DMA_FROM_DEVICE,
    284			      0);
    285	if (err)
    286		goto err_dma_map;
    287
    288	err = _create_state_mkey(mdev, pdn, migf, &mkey);
    289	if (err)
    290		goto err_create_mkey;
    291
    292	MLX5_SET(save_vhca_state_in, in, opcode,
    293		 MLX5_CMD_OP_SAVE_VHCA_STATE);
    294	MLX5_SET(save_vhca_state_in, in, op_mod, 0);
    295	MLX5_SET(save_vhca_state_in, in, vhca_id, mvdev->vhca_id);
    296	MLX5_SET(save_vhca_state_in, in, mkey, mkey);
    297	MLX5_SET(save_vhca_state_in, in, size, migf->total_length);
    298
    299	async_data = &migf->async_data;
    300	async_data->out = kvzalloc(out_size, GFP_KERNEL);
    301	if (!async_data->out) {
    302		err = -ENOMEM;
    303		goto err_out;
    304	}
    305
    306	/* no data exists till the callback comes back */
    307	migf->total_length = 0;
    308	get_file(migf->filp);
    309	async_data->mkey = mkey;
    310	async_data->pdn = pdn;
    311	err = mlx5_cmd_exec_cb(&migf->async_ctx, in, sizeof(in),
    312			       async_data->out,
    313			       out_size, mlx5vf_save_callback,
    314			       &async_data->cb_work);
    315	if (err)
    316		goto err_exec;
    317
    318	return 0;
    319
    320err_exec:
    321	fput(migf->filp);
    322	kvfree(async_data->out);
    323err_out:
    324	mlx5_core_destroy_mkey(mdev, mkey);
    325err_create_mkey:
    326	dma_unmap_sgtable(mdev->device, &migf->table.sgt, DMA_FROM_DEVICE, 0);
    327err_dma_map:
    328	mlx5_core_dealloc_pd(mdev, pdn);
    329	return err;
    330}
    331
    332int mlx5vf_cmd_load_vhca_state(struct mlx5vf_pci_core_device *mvdev,
    333			       struct mlx5_vf_migration_file *migf)
    334{
    335	struct mlx5_core_dev *mdev;
    336	u32 out[MLX5_ST_SZ_DW(save_vhca_state_out)] = {};
    337	u32 in[MLX5_ST_SZ_DW(save_vhca_state_in)] = {};
    338	u32 pdn, mkey;
    339	int err;
    340
    341	lockdep_assert_held(&mvdev->state_mutex);
    342	if (mvdev->mdev_detach)
    343		return -ENOTCONN;
    344
    345	mutex_lock(&migf->lock);
    346	if (!migf->total_length) {
    347		err = -EINVAL;
    348		goto end;
    349	}
    350
    351	mdev = mvdev->mdev;
    352	err = mlx5_core_alloc_pd(mdev, &pdn);
    353	if (err)
    354		goto end;
    355
    356	err = dma_map_sgtable(mdev->device, &migf->table.sgt, DMA_TO_DEVICE, 0);
    357	if (err)
    358		goto err_reg;
    359
    360	err = _create_state_mkey(mdev, pdn, migf, &mkey);
    361	if (err)
    362		goto err_mkey;
    363
    364	MLX5_SET(load_vhca_state_in, in, opcode,
    365		 MLX5_CMD_OP_LOAD_VHCA_STATE);
    366	MLX5_SET(load_vhca_state_in, in, op_mod, 0);
    367	MLX5_SET(load_vhca_state_in, in, vhca_id, mvdev->vhca_id);
    368	MLX5_SET(load_vhca_state_in, in, mkey, mkey);
    369	MLX5_SET(load_vhca_state_in, in, size, migf->total_length);
    370
    371	err = mlx5_cmd_exec_inout(mdev, load_vhca_state, in, out);
    372
    373	mlx5_core_destroy_mkey(mdev, mkey);
    374err_mkey:
    375	dma_unmap_sgtable(mdev->device, &migf->table.sgt, DMA_TO_DEVICE, 0);
    376err_reg:
    377	mlx5_core_dealloc_pd(mdev, pdn);
    378end:
    379	mutex_unlock(&migf->lock);
    380	return err;
    381}