cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

uar.c (8864B)


      1/*
      2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
      3 *
      4 * This software is available to you under a choice of one of two
      5 * licenses.  You may choose to be licensed under the terms of the GNU
      6 * General Public License (GPL) Version 2, available from the file
      7 * COPYING in the main directory of this source tree, or the
      8 * OpenIB.org BSD license below:
      9 *
     10 *     Redistribution and use in source and binary forms, with or
     11 *     without modification, are permitted provided that the following
     12 *     conditions are met:
     13 *
     14 *      - Redistributions of source code must retain the above
     15 *        copyright notice, this list of conditions and the following
     16 *        disclaimer.
     17 *
     18 *      - Redistributions in binary form must reproduce the above
     19 *        copyright notice, this list of conditions and the following
     20 *        disclaimer in the documentation and/or other materials
     21 *        provided with the distribution.
     22 *
     23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
     24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
     25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
     26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
     27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
     28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
     29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
     30 * SOFTWARE.
     31 */
     32
     33#include <linux/kernel.h>
     34#include <linux/io-mapping.h>
     35#include <linux/mlx5/driver.h>
     36#include "mlx5_core.h"
     37
     38static int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn)
     39{
     40	u32 out[MLX5_ST_SZ_DW(alloc_uar_out)] = {};
     41	u32 in[MLX5_ST_SZ_DW(alloc_uar_in)] = {};
     42	int err;
     43
     44	MLX5_SET(alloc_uar_in, in, opcode, MLX5_CMD_OP_ALLOC_UAR);
     45	err = mlx5_cmd_exec_inout(dev, alloc_uar, in, out);
     46	if (err)
     47		return err;
     48
     49	*uarn = MLX5_GET(alloc_uar_out, out, uar);
     50	return 0;
     51}
     52
     53static int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn)
     54{
     55	u32 in[MLX5_ST_SZ_DW(dealloc_uar_in)] = {};
     56
     57	MLX5_SET(dealloc_uar_in, in, opcode, MLX5_CMD_OP_DEALLOC_UAR);
     58	MLX5_SET(dealloc_uar_in, in, uar, uarn);
     59	return mlx5_cmd_exec_in(dev, dealloc_uar, in);
     60}
     61
     62static int uars_per_sys_page(struct mlx5_core_dev *mdev)
     63{
     64	if (MLX5_CAP_GEN(mdev, uar_4k))
     65		return MLX5_CAP_GEN(mdev, num_of_uars_per_page);
     66
     67	return 1;
     68}
     69
     70static u64 uar2pfn(struct mlx5_core_dev *mdev, u32 index)
     71{
     72	u32 system_page_index;
     73
     74	if (MLX5_CAP_GEN(mdev, uar_4k))
     75		system_page_index = index >> (PAGE_SHIFT - MLX5_ADAPTER_PAGE_SHIFT);
     76	else
     77		system_page_index = index;
     78
     79	return (mdev->bar_addr >> PAGE_SHIFT) + system_page_index;
     80}
     81
     82static void up_rel_func(struct kref *kref)
     83{
     84	struct mlx5_uars_page *up = container_of(kref, struct mlx5_uars_page, ref_count);
     85
     86	list_del(&up->list);
     87	iounmap(up->map);
     88	if (mlx5_cmd_free_uar(up->mdev, up->index))
     89		mlx5_core_warn(up->mdev, "failed to free uar index %d\n", up->index);
     90	bitmap_free(up->reg_bitmap);
     91	bitmap_free(up->fp_bitmap);
     92	kfree(up);
     93}
     94
     95static struct mlx5_uars_page *alloc_uars_page(struct mlx5_core_dev *mdev,
     96					      bool map_wc)
     97{
     98	struct mlx5_uars_page *up;
     99	int err = -ENOMEM;
    100	phys_addr_t pfn;
    101	int bfregs;
    102	int node;
    103	int i;
    104
    105	bfregs = uars_per_sys_page(mdev) * MLX5_BFREGS_PER_UAR;
    106	node = mdev->priv.numa_node;
    107	up = kzalloc_node(sizeof(*up), GFP_KERNEL, node);
    108	if (!up)
    109		return ERR_PTR(err);
    110
    111	up->mdev = mdev;
    112	up->reg_bitmap = bitmap_zalloc_node(bfregs, GFP_KERNEL, node);
    113	if (!up->reg_bitmap)
    114		goto error1;
    115
    116	up->fp_bitmap = bitmap_zalloc_node(bfregs, GFP_KERNEL, node);
    117	if (!up->fp_bitmap)
    118		goto error1;
    119
    120	for (i = 0; i < bfregs; i++)
    121		if ((i % MLX5_BFREGS_PER_UAR) < MLX5_NON_FP_BFREGS_PER_UAR)
    122			set_bit(i, up->reg_bitmap);
    123		else
    124			set_bit(i, up->fp_bitmap);
    125
    126	up->bfregs = bfregs;
    127	up->fp_avail = bfregs * MLX5_FP_BFREGS_PER_UAR / MLX5_BFREGS_PER_UAR;
    128	up->reg_avail = bfregs * MLX5_NON_FP_BFREGS_PER_UAR / MLX5_BFREGS_PER_UAR;
    129
    130	err = mlx5_cmd_alloc_uar(mdev, &up->index);
    131	if (err) {
    132		mlx5_core_warn(mdev, "mlx5_cmd_alloc_uar() failed, %d\n", err);
    133		goto error1;
    134	}
    135
    136	pfn = uar2pfn(mdev, up->index);
    137	if (map_wc) {
    138		up->map = ioremap_wc(pfn << PAGE_SHIFT, PAGE_SIZE);
    139		if (!up->map) {
    140			err = -EAGAIN;
    141			goto error2;
    142		}
    143	} else {
    144		up->map = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE);
    145		if (!up->map) {
    146			err = -ENOMEM;
    147			goto error2;
    148		}
    149	}
    150	kref_init(&up->ref_count);
    151	mlx5_core_dbg(mdev, "allocated UAR page: index %d, total bfregs %d\n",
    152		      up->index, up->bfregs);
    153	return up;
    154
    155error2:
    156	if (mlx5_cmd_free_uar(mdev, up->index))
    157		mlx5_core_warn(mdev, "failed to free uar index %d\n", up->index);
    158error1:
    159	bitmap_free(up->fp_bitmap);
    160	bitmap_free(up->reg_bitmap);
    161	kfree(up);
    162	return ERR_PTR(err);
    163}
    164
    165struct mlx5_uars_page *mlx5_get_uars_page(struct mlx5_core_dev *mdev)
    166{
    167	struct mlx5_uars_page *ret;
    168
    169	mutex_lock(&mdev->priv.bfregs.reg_head.lock);
    170	if (!list_empty(&mdev->priv.bfregs.reg_head.list)) {
    171		ret = list_first_entry(&mdev->priv.bfregs.reg_head.list,
    172				       struct mlx5_uars_page, list);
    173		kref_get(&ret->ref_count);
    174		goto out;
    175	}
    176	ret = alloc_uars_page(mdev, false);
    177	if (IS_ERR(ret))
    178		goto out;
    179	list_add(&ret->list, &mdev->priv.bfregs.reg_head.list);
    180out:
    181	mutex_unlock(&mdev->priv.bfregs.reg_head.lock);
    182
    183	return ret;
    184}
    185EXPORT_SYMBOL(mlx5_get_uars_page);
    186
    187void mlx5_put_uars_page(struct mlx5_core_dev *mdev, struct mlx5_uars_page *up)
    188{
    189	mutex_lock(&mdev->priv.bfregs.reg_head.lock);
    190	kref_put(&up->ref_count, up_rel_func);
    191	mutex_unlock(&mdev->priv.bfregs.reg_head.lock);
    192}
    193EXPORT_SYMBOL(mlx5_put_uars_page);
    194
    195static unsigned long map_offset(struct mlx5_core_dev *mdev, int dbi)
    196{
    197	/* return the offset in bytes from the start of the page to the
    198	 * blue flame area of the UAR
    199	 */
    200	return dbi / MLX5_BFREGS_PER_UAR * MLX5_ADAPTER_PAGE_SIZE +
    201	       (dbi % MLX5_BFREGS_PER_UAR) *
    202	       (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) + MLX5_BF_OFFSET;
    203}
    204
    205static int alloc_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg,
    206		       bool map_wc, bool fast_path)
    207{
    208	struct mlx5_bfreg_data *bfregs;
    209	struct mlx5_uars_page *up;
    210	struct list_head *head;
    211	unsigned long *bitmap;
    212	unsigned int *avail;
    213	struct mutex *lock;  /* pointer to right mutex */
    214	int dbi;
    215
    216	bfregs = &mdev->priv.bfregs;
    217	if (map_wc) {
    218		head = &bfregs->wc_head.list;
    219		lock = &bfregs->wc_head.lock;
    220	} else {
    221		head = &bfregs->reg_head.list;
    222		lock = &bfregs->reg_head.lock;
    223	}
    224	mutex_lock(lock);
    225	if (list_empty(head)) {
    226		up = alloc_uars_page(mdev, map_wc);
    227		if (IS_ERR(up)) {
    228			mutex_unlock(lock);
    229			return PTR_ERR(up);
    230		}
    231		list_add(&up->list, head);
    232	} else {
    233		up = list_entry(head->next, struct mlx5_uars_page, list);
    234		kref_get(&up->ref_count);
    235	}
    236	if (fast_path) {
    237		bitmap = up->fp_bitmap;
    238		avail = &up->fp_avail;
    239	} else {
    240		bitmap = up->reg_bitmap;
    241		avail = &up->reg_avail;
    242	}
    243	dbi = find_first_bit(bitmap, up->bfregs);
    244	clear_bit(dbi, bitmap);
    245	(*avail)--;
    246	if (!(*avail))
    247		list_del(&up->list);
    248
    249	bfreg->map = up->map + map_offset(mdev, dbi);
    250	bfreg->up = up;
    251	bfreg->wc = map_wc;
    252	bfreg->index = up->index + dbi / MLX5_BFREGS_PER_UAR;
    253	mutex_unlock(lock);
    254
    255	return 0;
    256}
    257
    258int mlx5_alloc_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg,
    259		     bool map_wc, bool fast_path)
    260{
    261	int err;
    262
    263	err = alloc_bfreg(mdev, bfreg, map_wc, fast_path);
    264	if (!err)
    265		return 0;
    266
    267	if (err == -EAGAIN && map_wc)
    268		return alloc_bfreg(mdev, bfreg, false, fast_path);
    269
    270	return err;
    271}
    272EXPORT_SYMBOL(mlx5_alloc_bfreg);
    273
    274static unsigned int addr_to_dbi_in_syspage(struct mlx5_core_dev *dev,
    275					   struct mlx5_uars_page *up,
    276					   struct mlx5_sq_bfreg *bfreg)
    277{
    278	unsigned int uar_idx;
    279	unsigned int bfreg_idx;
    280	unsigned int bf_reg_size;
    281
    282	bf_reg_size = 1 << MLX5_CAP_GEN(dev, log_bf_reg_size);
    283
    284	uar_idx = (bfreg->map - up->map) >> MLX5_ADAPTER_PAGE_SHIFT;
    285	bfreg_idx = (((uintptr_t)bfreg->map % MLX5_ADAPTER_PAGE_SIZE) - MLX5_BF_OFFSET) / bf_reg_size;
    286
    287	return uar_idx * MLX5_BFREGS_PER_UAR + bfreg_idx;
    288}
    289
    290void mlx5_free_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg)
    291{
    292	struct mlx5_bfreg_data *bfregs;
    293	struct mlx5_uars_page *up;
    294	struct mutex *lock; /* pointer to right mutex */
    295	unsigned int dbi;
    296	bool fp;
    297	unsigned int *avail;
    298	unsigned long *bitmap;
    299	struct list_head *head;
    300
    301	bfregs = &mdev->priv.bfregs;
    302	if (bfreg->wc) {
    303		head = &bfregs->wc_head.list;
    304		lock = &bfregs->wc_head.lock;
    305	} else {
    306		head = &bfregs->reg_head.list;
    307		lock = &bfregs->reg_head.lock;
    308	}
    309	up = bfreg->up;
    310	dbi = addr_to_dbi_in_syspage(mdev, up, bfreg);
    311	fp = (dbi % MLX5_BFREGS_PER_UAR) >= MLX5_NON_FP_BFREGS_PER_UAR;
    312	if (fp) {
    313		avail = &up->fp_avail;
    314		bitmap = up->fp_bitmap;
    315	} else {
    316		avail = &up->reg_avail;
    317		bitmap = up->reg_bitmap;
    318	}
    319	mutex_lock(lock);
    320	(*avail)++;
    321	set_bit(dbi, bitmap);
    322	if (*avail == 1)
    323		list_add_tail(&up->list, head);
    324
    325	kref_put(&up->ref_count, up_rel_func);
    326	mutex_unlock(lock);
    327}
    328EXPORT_SYMBOL(mlx5_free_bfreg);