cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

mthca_allocator.c (7576B)


      1/*
      2 * Copyright (c) 2004 Topspin Communications.  All rights reserved.
      3 *
      4 * This software is available to you under a choice of one of two
      5 * licenses.  You may choose to be licensed under the terms of the GNU
      6 * General Public License (GPL) Version 2, available from the file
      7 * COPYING in the main directory of this source tree, or the
      8 * OpenIB.org BSD license below:
      9 *
     10 *     Redistribution and use in source and binary forms, with or
     11 *     without modification, are permitted provided that the following
     12 *     conditions are met:
     13 *
     14 *      - Redistributions of source code must retain the above
     15 *        copyright notice, this list of conditions and the following
     16 *        disclaimer.
     17 *
     18 *      - Redistributions in binary form must reproduce the above
     19 *        copyright notice, this list of conditions and the following
     20 *        disclaimer in the documentation and/or other materials
     21 *        provided with the distribution.
     22 *
     23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
     24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
     25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
     26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
     27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
     28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
     29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
     30 * SOFTWARE.
     31 */
     32
     33#include <linux/errno.h>
     34#include <linux/slab.h>
     35#include <linux/bitmap.h>
     36
     37#include "mthca_dev.h"
     38
     39/* Trivial bitmap-based allocator */
     40u32 mthca_alloc(struct mthca_alloc *alloc)
     41{
     42	unsigned long flags;
     43	u32 obj;
     44
     45	spin_lock_irqsave(&alloc->lock, flags);
     46
     47	obj = find_next_zero_bit(alloc->table, alloc->max, alloc->last);
     48	if (obj >= alloc->max) {
     49		alloc->top = (alloc->top + alloc->max) & alloc->mask;
     50		obj = find_first_zero_bit(alloc->table, alloc->max);
     51	}
     52
     53	if (obj < alloc->max) {
     54		__set_bit(obj, alloc->table);
     55		obj |= alloc->top;
     56	} else
     57		obj = -1;
     58
     59	spin_unlock_irqrestore(&alloc->lock, flags);
     60
     61	return obj;
     62}
     63
     64void mthca_free(struct mthca_alloc *alloc, u32 obj)
     65{
     66	unsigned long flags;
     67
     68	obj &= alloc->max - 1;
     69
     70	spin_lock_irqsave(&alloc->lock, flags);
     71
     72	__clear_bit(obj, alloc->table);
     73	alloc->last = min(alloc->last, obj);
     74	alloc->top = (alloc->top + alloc->max) & alloc->mask;
     75
     76	spin_unlock_irqrestore(&alloc->lock, flags);
     77}
     78
     79int mthca_alloc_init(struct mthca_alloc *alloc, u32 num, u32 mask,
     80		     u32 reserved)
     81{
     82	/* num must be a power of 2 */
     83	if (num != 1 << (ffs(num) - 1))
     84		return -EINVAL;
     85
     86	alloc->last = 0;
     87	alloc->top  = 0;
     88	alloc->max  = num;
     89	alloc->mask = mask;
     90	spin_lock_init(&alloc->lock);
     91	alloc->table = bitmap_zalloc(num, GFP_KERNEL);
     92	if (!alloc->table)
     93		return -ENOMEM;
     94
     95	bitmap_set(alloc->table, 0, reserved);
     96
     97	return 0;
     98}
     99
    100void mthca_alloc_cleanup(struct mthca_alloc *alloc)
    101{
    102	bitmap_free(alloc->table);
    103}
    104
    105/*
    106 * Array of pointers with lazy allocation of leaf pages.  Callers of
    107 * _get, _set and _clear methods must use a lock or otherwise
    108 * serialize access to the array.
    109 */
    110
    111#define MTHCA_ARRAY_MASK (PAGE_SIZE / sizeof (void *) - 1)
    112
    113void *mthca_array_get(struct mthca_array *array, int index)
    114{
    115	int p = (index * sizeof (void *)) >> PAGE_SHIFT;
    116
    117	if (array->page_list[p].page)
    118		return array->page_list[p].page[index & MTHCA_ARRAY_MASK];
    119	else
    120		return NULL;
    121}
    122
    123int mthca_array_set(struct mthca_array *array, int index, void *value)
    124{
    125	int p = (index * sizeof (void *)) >> PAGE_SHIFT;
    126
    127	/* Allocate with GFP_ATOMIC because we'll be called with locks held. */
    128	if (!array->page_list[p].page)
    129		array->page_list[p].page = (void **) get_zeroed_page(GFP_ATOMIC);
    130
    131	if (!array->page_list[p].page)
    132		return -ENOMEM;
    133
    134	array->page_list[p].page[index & MTHCA_ARRAY_MASK] = value;
    135	++array->page_list[p].used;
    136
    137	return 0;
    138}
    139
    140void mthca_array_clear(struct mthca_array *array, int index)
    141{
    142	int p = (index * sizeof (void *)) >> PAGE_SHIFT;
    143
    144	if (--array->page_list[p].used == 0) {
    145		free_page((unsigned long) array->page_list[p].page);
    146		array->page_list[p].page = NULL;
    147	} else
    148		array->page_list[p].page[index & MTHCA_ARRAY_MASK] = NULL;
    149
    150	if (array->page_list[p].used < 0)
    151		pr_debug("Array %p index %d page %d with ref count %d < 0\n",
    152			 array, index, p, array->page_list[p].used);
    153}
    154
    155int mthca_array_init(struct mthca_array *array, int nent)
    156{
    157	int npage = (nent * sizeof (void *) + PAGE_SIZE - 1) / PAGE_SIZE;
    158	int i;
    159
    160	array->page_list = kmalloc_array(npage, sizeof(*array->page_list),
    161					 GFP_KERNEL);
    162	if (!array->page_list)
    163		return -ENOMEM;
    164
    165	for (i = 0; i < npage; ++i) {
    166		array->page_list[i].page = NULL;
    167		array->page_list[i].used = 0;
    168	}
    169
    170	return 0;
    171}
    172
    173void mthca_array_cleanup(struct mthca_array *array, int nent)
    174{
    175	int i;
    176
    177	for (i = 0; i < (nent * sizeof (void *) + PAGE_SIZE - 1) / PAGE_SIZE; ++i)
    178		free_page((unsigned long) array->page_list[i].page);
    179
    180	kfree(array->page_list);
    181}
    182
    183/*
    184 * Handling for queue buffers -- we allocate a bunch of memory and
    185 * register it in a memory region at HCA virtual address 0.  If the
    186 * requested size is > max_direct, we split the allocation into
    187 * multiple pages, so we don't require too much contiguous memory.
    188 */
    189
    190int mthca_buf_alloc(struct mthca_dev *dev, int size, int max_direct,
    191		    union mthca_buf *buf, int *is_direct, struct mthca_pd *pd,
    192		    int hca_write, struct mthca_mr *mr)
    193{
    194	int err = -ENOMEM;
    195	int npages, shift;
    196	u64 *dma_list = NULL;
    197	dma_addr_t t;
    198	int i;
    199
    200	if (size <= max_direct) {
    201		*is_direct = 1;
    202		npages     = 1;
    203		shift      = get_order(size) + PAGE_SHIFT;
    204
    205		buf->direct.buf = dma_alloc_coherent(&dev->pdev->dev,
    206						     size, &t, GFP_KERNEL);
    207		if (!buf->direct.buf)
    208			return -ENOMEM;
    209
    210		dma_unmap_addr_set(&buf->direct, mapping, t);
    211
    212		while (t & ((1 << shift) - 1)) {
    213			--shift;
    214			npages *= 2;
    215		}
    216
    217		dma_list = kmalloc_array(npages, sizeof(*dma_list),
    218					 GFP_KERNEL);
    219		if (!dma_list)
    220			goto err_free;
    221
    222		for (i = 0; i < npages; ++i)
    223			dma_list[i] = t + i * (1 << shift);
    224	} else {
    225		*is_direct = 0;
    226		npages     = (size + PAGE_SIZE - 1) / PAGE_SIZE;
    227		shift      = PAGE_SHIFT;
    228
    229		dma_list = kmalloc_array(npages, sizeof(*dma_list),
    230					 GFP_KERNEL);
    231		if (!dma_list)
    232			return -ENOMEM;
    233
    234		buf->page_list = kmalloc_array(npages,
    235					       sizeof(*buf->page_list),
    236					       GFP_KERNEL);
    237		if (!buf->page_list)
    238			goto err_out;
    239
    240		for (i = 0; i < npages; ++i)
    241			buf->page_list[i].buf = NULL;
    242
    243		for (i = 0; i < npages; ++i) {
    244			buf->page_list[i].buf =
    245				dma_alloc_coherent(&dev->pdev->dev, PAGE_SIZE,
    246						   &t, GFP_KERNEL);
    247			if (!buf->page_list[i].buf)
    248				goto err_free;
    249
    250			dma_list[i] = t;
    251			dma_unmap_addr_set(&buf->page_list[i], mapping, t);
    252
    253			clear_page(buf->page_list[i].buf);
    254		}
    255	}
    256
    257	err = mthca_mr_alloc_phys(dev, pd->pd_num,
    258				  dma_list, shift, npages,
    259				  0, size,
    260				  MTHCA_MPT_FLAG_LOCAL_READ |
    261				  (hca_write ? MTHCA_MPT_FLAG_LOCAL_WRITE : 0),
    262				  mr);
    263	if (err)
    264		goto err_free;
    265
    266	kfree(dma_list);
    267
    268	return 0;
    269
    270err_free:
    271	mthca_buf_free(dev, size, buf, *is_direct, NULL);
    272
    273err_out:
    274	kfree(dma_list);
    275
    276	return err;
    277}
    278
    279void mthca_buf_free(struct mthca_dev *dev, int size, union mthca_buf *buf,
    280		    int is_direct, struct mthca_mr *mr)
    281{
    282	int i;
    283
    284	if (mr)
    285		mthca_free_mr(dev, mr);
    286
    287	if (is_direct)
    288		dma_free_coherent(&dev->pdev->dev, size, buf->direct.buf,
    289				  dma_unmap_addr(&buf->direct, mapping));
    290	else {
    291		for (i = 0; i < (size + PAGE_SIZE - 1) / PAGE_SIZE; ++i)
    292			dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
    293					  buf->page_list[i].buf,
    294					  dma_unmap_addr(&buf->page_list[i],
    295							 mapping));
    296		kfree(buf->page_list);
    297	}
    298}