cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

restrack.c (4112B)


      1// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
      2/*
      3 * Copyright (c) 2019-2020, Mellanox Technologies Ltd. All rights reserved.
      4 */
      5
      6#include <uapi/rdma/rdma_netlink.h>
      7#include <linux/mlx5/rsc_dump.h>
      8#include <rdma/ib_umem_odp.h>
      9#include <rdma/restrack.h>
     10#include "mlx5_ib.h"
     11#include "restrack.h"
     12
     13#define MAX_DUMP_SIZE 1024
     14
     15static int dump_rsc(struct mlx5_core_dev *dev, enum mlx5_sgmt_type type,
     16		    int index, void *data, int *data_len)
     17{
     18	struct mlx5_core_dev *mdev = dev;
     19	struct mlx5_rsc_dump_cmd *cmd;
     20	struct mlx5_rsc_key key = {};
     21	struct page *page;
     22	int offset = 0;
     23	int err = 0;
     24	int cmd_err;
     25	int size;
     26
     27	page = alloc_page(GFP_KERNEL);
     28	if (!page)
     29		return -ENOMEM;
     30
     31	key.size = PAGE_SIZE;
     32	key.rsc = type;
     33	key.index1 = index;
     34	key.num_of_obj1 = 1;
     35
     36	cmd = mlx5_rsc_dump_cmd_create(mdev, &key);
     37	if (IS_ERR(cmd)) {
     38		err = PTR_ERR(cmd);
     39		goto free_page;
     40	}
     41
     42	do {
     43		cmd_err = mlx5_rsc_dump_next(mdev, cmd, page, &size);
     44		if (cmd_err < 0 || size + offset > MAX_DUMP_SIZE) {
     45			err = cmd_err;
     46			goto destroy_cmd;
     47		}
     48		memcpy(data + offset, page_address(page), size);
     49		offset += size;
     50	} while (cmd_err > 0);
     51	*data_len = offset;
     52
     53destroy_cmd:
     54	mlx5_rsc_dump_cmd_destroy(cmd);
     55free_page:
     56	__free_page(page);
     57	return err;
     58}
     59
     60static int fill_res_raw(struct sk_buff *msg, struct mlx5_ib_dev *dev,
     61			enum mlx5_sgmt_type type, u32 key)
     62{
     63	int len = 0;
     64	void *data;
     65	int err;
     66
     67	data = kzalloc(MAX_DUMP_SIZE, GFP_KERNEL);
     68	if (!data)
     69		return -ENOMEM;
     70
     71	err = dump_rsc(dev->mdev, type, key, data, &len);
     72	if (err)
     73		goto out;
     74
     75	err = nla_put(msg, RDMA_NLDEV_ATTR_RES_RAW, len, data);
     76out:
     77	kfree(data);
     78	return err;
     79}
     80
     81static int fill_stat_mr_entry(struct sk_buff *msg, struct ib_mr *ibmr)
     82{
     83	struct mlx5_ib_mr *mr = to_mmr(ibmr);
     84	struct nlattr *table_attr;
     85
     86	if (!(mr->access_flags & IB_ACCESS_ON_DEMAND))
     87		return 0;
     88
     89	table_attr = nla_nest_start(msg,
     90				    RDMA_NLDEV_ATTR_STAT_HWCOUNTERS);
     91
     92	if (!table_attr)
     93		goto err;
     94
     95	if (rdma_nl_stat_hwcounter_entry(msg, "page_faults",
     96					 atomic64_read(&mr->odp_stats.faults)))
     97		goto err_table;
     98	if (rdma_nl_stat_hwcounter_entry(
     99		    msg, "page_invalidations",
    100		    atomic64_read(&mr->odp_stats.invalidations)))
    101		goto err_table;
    102	if (rdma_nl_stat_hwcounter_entry(msg, "page_prefetch",
    103					 atomic64_read(&mr->odp_stats.prefetch)))
    104		goto err_table;
    105
    106	nla_nest_end(msg, table_attr);
    107	return 0;
    108
    109err_table:
    110	nla_nest_cancel(msg, table_attr);
    111err:
    112	return -EMSGSIZE;
    113}
    114
    115static int fill_res_mr_entry_raw(struct sk_buff *msg, struct ib_mr *ibmr)
    116{
    117	struct mlx5_ib_mr *mr = to_mmr(ibmr);
    118
    119	return fill_res_raw(msg, mr_to_mdev(mr), MLX5_SGMT_TYPE_PRM_QUERY_MKEY,
    120			    mlx5_mkey_to_idx(mr->mmkey.key));
    121}
    122
    123static int fill_res_mr_entry(struct sk_buff *msg, struct ib_mr *ibmr)
    124{
    125	struct mlx5_ib_mr *mr = to_mmr(ibmr);
    126	struct nlattr *table_attr;
    127
    128	if (!(mr->access_flags & IB_ACCESS_ON_DEMAND))
    129		return 0;
    130
    131	table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_DRIVER);
    132	if (!table_attr)
    133		goto err;
    134
    135	if (mr->is_odp_implicit) {
    136		if (rdma_nl_put_driver_string(msg, "odp", "implicit"))
    137			goto err;
    138	} else {
    139		if (rdma_nl_put_driver_string(msg, "odp", "explicit"))
    140			goto err;
    141	}
    142
    143	nla_nest_end(msg, table_attr);
    144	return 0;
    145
    146err:
    147	nla_nest_cancel(msg, table_attr);
    148	return -EMSGSIZE;
    149}
    150
    151static int fill_res_cq_entry_raw(struct sk_buff *msg, struct ib_cq *ibcq)
    152{
    153	struct mlx5_ib_dev *dev = to_mdev(ibcq->device);
    154	struct mlx5_ib_cq *cq = to_mcq(ibcq);
    155
    156	return fill_res_raw(msg, dev, MLX5_SGMT_TYPE_PRM_QUERY_CQ, cq->mcq.cqn);
    157}
    158
    159static int fill_res_qp_entry_raw(struct sk_buff *msg, struct ib_qp *ibqp)
    160{
    161	struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
    162
    163	return fill_res_raw(msg, dev, MLX5_SGMT_TYPE_PRM_QUERY_QP,
    164			    ibqp->qp_num);
    165}
    166
    167static const struct ib_device_ops restrack_ops = {
    168	.fill_res_cq_entry_raw = fill_res_cq_entry_raw,
    169	.fill_res_mr_entry = fill_res_mr_entry,
    170	.fill_res_mr_entry_raw = fill_res_mr_entry_raw,
    171	.fill_res_qp_entry_raw = fill_res_qp_entry_raw,
    172	.fill_stat_mr_entry = fill_stat_mr_entry,
    173};
    174
    175int mlx5_ib_restrack_init(struct mlx5_ib_dev *dev)
    176{
    177	ib_set_device_ops(&dev->ib_dev, &restrack_ops);
    178	return 0;
    179}