cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

adf_dev_mgr.c (10584B)


      1// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
      2/* Copyright(c) 2014 - 2020 Intel Corporation */
      3#include <linux/mutex.h>
      4#include <linux/list.h>
      5#include "adf_cfg.h"
      6#include "adf_common_drv.h"
      7
      8static LIST_HEAD(accel_table);
      9static LIST_HEAD(vfs_table);
     10static DEFINE_MUTEX(table_lock);
     11static u32 num_devices;
     12static u8 id_map[ADF_MAX_DEVICES];
     13
     14struct vf_id_map {
     15	u32 bdf;
     16	u32 id;
     17	u32 fake_id;
     18	bool attached;
     19	struct list_head list;
     20};
     21
     22static int adf_get_vf_id(struct adf_accel_dev *vf)
     23{
     24	return ((7 * (PCI_SLOT(accel_to_pci_dev(vf)->devfn) - 1)) +
     25		PCI_FUNC(accel_to_pci_dev(vf)->devfn) +
     26		(PCI_SLOT(accel_to_pci_dev(vf)->devfn) - 1));
     27}
     28
     29static int adf_get_vf_num(struct adf_accel_dev *vf)
     30{
     31	return (accel_to_pci_dev(vf)->bus->number << 8) | adf_get_vf_id(vf);
     32}
     33
     34static struct vf_id_map *adf_find_vf(u32 bdf)
     35{
     36	struct list_head *itr;
     37
     38	list_for_each(itr, &vfs_table) {
     39		struct vf_id_map *ptr =
     40			list_entry(itr, struct vf_id_map, list);
     41
     42		if (ptr->bdf == bdf)
     43			return ptr;
     44	}
     45	return NULL;
     46}
     47
     48static int adf_get_vf_real_id(u32 fake)
     49{
     50	struct list_head *itr;
     51
     52	list_for_each(itr, &vfs_table) {
     53		struct vf_id_map *ptr =
     54			list_entry(itr, struct vf_id_map, list);
     55		if (ptr->fake_id == fake)
     56			return ptr->id;
     57	}
     58	return -1;
     59}
     60
     61/**
     62 * adf_clean_vf_map() - Cleans VF id mapings
     63 *
     64 * Function cleans internal ids for virtual functions.
     65 * @vf: flag indicating whether mappings is cleaned
     66 *	for vfs only or for vfs and pfs
     67 */
     68void adf_clean_vf_map(bool vf)
     69{
     70	struct vf_id_map *map;
     71	struct list_head *ptr, *tmp;
     72
     73	mutex_lock(&table_lock);
     74	list_for_each_safe(ptr, tmp, &vfs_table) {
     75		map = list_entry(ptr, struct vf_id_map, list);
     76		if (map->bdf != -1) {
     77			id_map[map->id] = 0;
     78			num_devices--;
     79		}
     80
     81		if (vf && map->bdf == -1)
     82			continue;
     83
     84		list_del(ptr);
     85		kfree(map);
     86	}
     87	mutex_unlock(&table_lock);
     88}
     89EXPORT_SYMBOL_GPL(adf_clean_vf_map);
     90
     91/**
     92 * adf_devmgr_update_class_index() - Update internal index
     93 * @hw_data:  Pointer to internal device data.
     94 *
     95 * Function updates internal dev index for VFs
     96 */
     97void adf_devmgr_update_class_index(struct adf_hw_device_data *hw_data)
     98{
     99	struct adf_hw_device_class *class = hw_data->dev_class;
    100	struct list_head *itr;
    101	int i = 0;
    102
    103	list_for_each(itr, &accel_table) {
    104		struct adf_accel_dev *ptr =
    105				list_entry(itr, struct adf_accel_dev, list);
    106
    107		if (ptr->hw_device->dev_class == class)
    108			ptr->hw_device->instance_id = i++;
    109
    110		if (i == class->instances)
    111			break;
    112	}
    113}
    114EXPORT_SYMBOL_GPL(adf_devmgr_update_class_index);
    115
    116static unsigned int adf_find_free_id(void)
    117{
    118	unsigned int i;
    119
    120	for (i = 0; i < ADF_MAX_DEVICES; i++) {
    121		if (!id_map[i]) {
    122			id_map[i] = 1;
    123			return i;
    124		}
    125	}
    126	return ADF_MAX_DEVICES + 1;
    127}
    128
    129/**
    130 * adf_devmgr_add_dev() - Add accel_dev to the acceleration framework
    131 * @accel_dev:  Pointer to acceleration device.
    132 * @pf:		Corresponding PF if the accel_dev is a VF
    133 *
    134 * Function adds acceleration device to the acceleration framework.
    135 * To be used by QAT device specific drivers.
    136 *
    137 * Return: 0 on success, error code otherwise.
    138 */
    139int adf_devmgr_add_dev(struct adf_accel_dev *accel_dev,
    140		       struct adf_accel_dev *pf)
    141{
    142	struct list_head *itr;
    143	int ret = 0;
    144
    145	if (num_devices == ADF_MAX_DEVICES) {
    146		dev_err(&GET_DEV(accel_dev), "Only support up to %d devices\n",
    147			ADF_MAX_DEVICES);
    148		return -EFAULT;
    149	}
    150
    151	mutex_lock(&table_lock);
    152	atomic_set(&accel_dev->ref_count, 0);
    153
    154	/* PF on host or VF on guest - optimized to remove redundant is_vf */
    155	if (!accel_dev->is_vf || !pf) {
    156		struct vf_id_map *map;
    157
    158		list_for_each(itr, &accel_table) {
    159			struct adf_accel_dev *ptr =
    160				list_entry(itr, struct adf_accel_dev, list);
    161
    162			if (ptr == accel_dev) {
    163				ret = -EEXIST;
    164				goto unlock;
    165			}
    166		}
    167
    168		list_add_tail(&accel_dev->list, &accel_table);
    169		accel_dev->accel_id = adf_find_free_id();
    170		if (accel_dev->accel_id > ADF_MAX_DEVICES) {
    171			ret = -EFAULT;
    172			goto unlock;
    173		}
    174		num_devices++;
    175		map = kzalloc(sizeof(*map), GFP_KERNEL);
    176		if (!map) {
    177			ret = -ENOMEM;
    178			goto unlock;
    179		}
    180		map->bdf = ~0;
    181		map->id = accel_dev->accel_id;
    182		map->fake_id = map->id;
    183		map->attached = true;
    184		list_add_tail(&map->list, &vfs_table);
    185	} else if (accel_dev->is_vf && pf) {
    186		/* VF on host */
    187		struct vf_id_map *map;
    188
    189		map = adf_find_vf(adf_get_vf_num(accel_dev));
    190		if (map) {
    191			struct vf_id_map *next;
    192
    193			accel_dev->accel_id = map->id;
    194			list_add_tail(&accel_dev->list, &accel_table);
    195			map->fake_id++;
    196			map->attached = true;
    197			next = list_next_entry(map, list);
    198			while (next && &next->list != &vfs_table) {
    199				next->fake_id++;
    200				next = list_next_entry(next, list);
    201			}
    202
    203			ret = 0;
    204			goto unlock;
    205		}
    206
    207		map = kzalloc(sizeof(*map), GFP_KERNEL);
    208		if (!map) {
    209			ret = -ENOMEM;
    210			goto unlock;
    211		}
    212		accel_dev->accel_id = adf_find_free_id();
    213		if (accel_dev->accel_id > ADF_MAX_DEVICES) {
    214			kfree(map);
    215			ret = -EFAULT;
    216			goto unlock;
    217		}
    218		num_devices++;
    219		list_add_tail(&accel_dev->list, &accel_table);
    220		map->bdf = adf_get_vf_num(accel_dev);
    221		map->id = accel_dev->accel_id;
    222		map->fake_id = map->id;
    223		map->attached = true;
    224		list_add_tail(&map->list, &vfs_table);
    225	}
    226unlock:
    227	mutex_unlock(&table_lock);
    228	return ret;
    229}
    230EXPORT_SYMBOL_GPL(adf_devmgr_add_dev);
    231
    232struct list_head *adf_devmgr_get_head(void)
    233{
    234	return &accel_table;
    235}
    236
    237/**
    238 * adf_devmgr_rm_dev() - Remove accel_dev from the acceleration framework.
    239 * @accel_dev:  Pointer to acceleration device.
    240 * @pf:		Corresponding PF if the accel_dev is a VF
    241 *
    242 * Function removes acceleration device from the acceleration framework.
    243 * To be used by QAT device specific drivers.
    244 *
    245 * Return: void
    246 */
    247void adf_devmgr_rm_dev(struct adf_accel_dev *accel_dev,
    248		       struct adf_accel_dev *pf)
    249{
    250	mutex_lock(&table_lock);
    251	/* PF on host or VF on guest - optimized to remove redundant is_vf */
    252	if (!accel_dev->is_vf || !pf) {
    253		id_map[accel_dev->accel_id] = 0;
    254		num_devices--;
    255	} else if (accel_dev->is_vf && pf) {
    256		struct vf_id_map *map, *next;
    257
    258		map = adf_find_vf(adf_get_vf_num(accel_dev));
    259		if (!map) {
    260			dev_err(&GET_DEV(accel_dev), "Failed to find VF map\n");
    261			goto unlock;
    262		}
    263		map->fake_id--;
    264		map->attached = false;
    265		next = list_next_entry(map, list);
    266		while (next && &next->list != &vfs_table) {
    267			next->fake_id--;
    268			next = list_next_entry(next, list);
    269		}
    270	}
    271unlock:
    272	list_del(&accel_dev->list);
    273	mutex_unlock(&table_lock);
    274}
    275EXPORT_SYMBOL_GPL(adf_devmgr_rm_dev);
    276
    277struct adf_accel_dev *adf_devmgr_get_first(void)
    278{
    279	struct adf_accel_dev *dev = NULL;
    280
    281	if (!list_empty(&accel_table))
    282		dev = list_first_entry(&accel_table, struct adf_accel_dev,
    283				       list);
    284	return dev;
    285}
    286
    287/**
    288 * adf_devmgr_pci_to_accel_dev() - Get accel_dev associated with the pci_dev.
    289 * @pci_dev:  Pointer to PCI device.
    290 *
    291 * Function returns acceleration device associated with the given PCI device.
    292 * To be used by QAT device specific drivers.
    293 *
    294 * Return: pointer to accel_dev or NULL if not found.
    295 */
    296struct adf_accel_dev *adf_devmgr_pci_to_accel_dev(struct pci_dev *pci_dev)
    297{
    298	struct list_head *itr;
    299
    300	mutex_lock(&table_lock);
    301	list_for_each(itr, &accel_table) {
    302		struct adf_accel_dev *ptr =
    303				list_entry(itr, struct adf_accel_dev, list);
    304
    305		if (ptr->accel_pci_dev.pci_dev == pci_dev) {
    306			mutex_unlock(&table_lock);
    307			return ptr;
    308		}
    309	}
    310	mutex_unlock(&table_lock);
    311	return NULL;
    312}
    313EXPORT_SYMBOL_GPL(adf_devmgr_pci_to_accel_dev);
    314
    315struct adf_accel_dev *adf_devmgr_get_dev_by_id(u32 id)
    316{
    317	struct list_head *itr;
    318	int real_id;
    319
    320	mutex_lock(&table_lock);
    321	real_id = adf_get_vf_real_id(id);
    322	if (real_id < 0)
    323		goto unlock;
    324
    325	id = real_id;
    326
    327	list_for_each(itr, &accel_table) {
    328		struct adf_accel_dev *ptr =
    329				list_entry(itr, struct adf_accel_dev, list);
    330		if (ptr->accel_id == id) {
    331			mutex_unlock(&table_lock);
    332			return ptr;
    333		}
    334	}
    335unlock:
    336	mutex_unlock(&table_lock);
    337	return NULL;
    338}
    339
    340int adf_devmgr_verify_id(u32 id)
    341{
    342	if (id == ADF_CFG_ALL_DEVICES)
    343		return 0;
    344
    345	if (adf_devmgr_get_dev_by_id(id))
    346		return 0;
    347
    348	return -ENODEV;
    349}
    350
    351static int adf_get_num_dettached_vfs(void)
    352{
    353	struct list_head *itr;
    354	int vfs = 0;
    355
    356	mutex_lock(&table_lock);
    357	list_for_each(itr, &vfs_table) {
    358		struct vf_id_map *ptr =
    359			list_entry(itr, struct vf_id_map, list);
    360		if (ptr->bdf != ~0 && !ptr->attached)
    361			vfs++;
    362	}
    363	mutex_unlock(&table_lock);
    364	return vfs;
    365}
    366
    367void adf_devmgr_get_num_dev(u32 *num)
    368{
    369	*num = num_devices - adf_get_num_dettached_vfs();
    370}
    371
    372/**
    373 * adf_dev_in_use() - Check whether accel_dev is currently in use
    374 * @accel_dev: Pointer to acceleration device.
    375 *
    376 * To be used by QAT device specific drivers.
    377 *
    378 * Return: 1 when device is in use, 0 otherwise.
    379 */
    380int adf_dev_in_use(struct adf_accel_dev *accel_dev)
    381{
    382	return atomic_read(&accel_dev->ref_count) != 0;
    383}
    384EXPORT_SYMBOL_GPL(adf_dev_in_use);
    385
    386/**
    387 * adf_dev_get() - Increment accel_dev reference count
    388 * @accel_dev: Pointer to acceleration device.
    389 *
    390 * Increment the accel_dev refcount and if this is the first time
    391 * incrementing it during this period the accel_dev is in use,
    392 * increment the module refcount too.
    393 * To be used by QAT device specific drivers.
    394 *
    395 * Return: 0 when successful, EFAULT when fail to bump module refcount
    396 */
    397int adf_dev_get(struct adf_accel_dev *accel_dev)
    398{
    399	if (atomic_add_return(1, &accel_dev->ref_count) == 1)
    400		if (!try_module_get(accel_dev->owner))
    401			return -EFAULT;
    402	return 0;
    403}
    404EXPORT_SYMBOL_GPL(adf_dev_get);
    405
    406/**
    407 * adf_dev_put() - Decrement accel_dev reference count
    408 * @accel_dev: Pointer to acceleration device.
    409 *
    410 * Decrement the accel_dev refcount and if this is the last time
    411 * decrementing it during this period the accel_dev is in use,
    412 * decrement the module refcount too.
    413 * To be used by QAT device specific drivers.
    414 *
    415 * Return: void
    416 */
    417void adf_dev_put(struct adf_accel_dev *accel_dev)
    418{
    419	if (atomic_sub_return(1, &accel_dev->ref_count) == 0)
    420		module_put(accel_dev->owner);
    421}
    422EXPORT_SYMBOL_GPL(adf_dev_put);
    423
    424/**
    425 * adf_devmgr_in_reset() - Check whether device is in reset
    426 * @accel_dev: Pointer to acceleration device.
    427 *
    428 * To be used by QAT device specific drivers.
    429 *
    430 * Return: 1 when the device is being reset, 0 otherwise.
    431 */
    432int adf_devmgr_in_reset(struct adf_accel_dev *accel_dev)
    433{
    434	return test_bit(ADF_STATUS_RESTARTING, &accel_dev->status);
    435}
    436EXPORT_SYMBOL_GPL(adf_devmgr_in_reset);
    437
    438/**
    439 * adf_dev_started() - Check whether device has started
    440 * @accel_dev: Pointer to acceleration device.
    441 *
    442 * To be used by QAT device specific drivers.
    443 *
    444 * Return: 1 when the device has started, 0 otherwise
    445 */
    446int adf_dev_started(struct adf_accel_dev *accel_dev)
    447{
    448	return test_bit(ADF_STATUS_STARTED, &accel_dev->status);
    449}
    450EXPORT_SYMBOL_GPL(adf_dev_started);