cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

utils.c (7380B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2//
      3// Copyright(c) 2021-2022 Intel Corporation. All rights reserved.
      4//
      5// Authors: Cezary Rojewski <cezary.rojewski@intel.com>
      6//          Amadeusz Slawinski <amadeuszx.slawinski@linux.intel.com>
      7//
      8
      9#include <linux/firmware.h>
     10#include <linux/kfifo.h>
     11#include <linux/slab.h>
     12#include "avs.h"
     13#include "messages.h"
     14
     15/* Caller responsible for holding adev->modres_mutex. */
     16static int avs_module_entry_index(struct avs_dev *adev, const guid_t *uuid)
     17{
     18	int i;
     19
     20	for (i = 0; i < adev->mods_info->count; i++) {
     21		struct avs_module_entry *module;
     22
     23		module = &adev->mods_info->entries[i];
     24		if (guid_equal(&module->uuid, uuid))
     25			return i;
     26	}
     27
     28	return -ENOENT;
     29}
     30
     31/* Caller responsible for holding adev->modres_mutex. */
     32static int avs_module_id_entry_index(struct avs_dev *adev, u32 module_id)
     33{
     34	int i;
     35
     36	for (i = 0; i < adev->mods_info->count; i++) {
     37		struct avs_module_entry *module;
     38
     39		module = &adev->mods_info->entries[i];
     40		if (module->module_id == module_id)
     41			return i;
     42	}
     43
     44	return -ENOENT;
     45}
     46
     47int avs_get_module_entry(struct avs_dev *adev, const guid_t *uuid, struct avs_module_entry *entry)
     48{
     49	int idx;
     50
     51	mutex_lock(&adev->modres_mutex);
     52
     53	idx = avs_module_entry_index(adev, uuid);
     54	if (idx >= 0)
     55		memcpy(entry, &adev->mods_info->entries[idx], sizeof(*entry));
     56
     57	mutex_unlock(&adev->modres_mutex);
     58	return (idx < 0) ? idx : 0;
     59}
     60
     61int avs_get_module_id_entry(struct avs_dev *adev, u32 module_id, struct avs_module_entry *entry)
     62{
     63	int idx;
     64
     65	mutex_lock(&adev->modres_mutex);
     66
     67	idx = avs_module_id_entry_index(adev, module_id);
     68	if (idx >= 0)
     69		memcpy(entry, &adev->mods_info->entries[idx], sizeof(*entry));
     70
     71	mutex_unlock(&adev->modres_mutex);
     72	return (idx < 0) ? idx : 0;
     73}
     74
     75int avs_get_module_id(struct avs_dev *adev, const guid_t *uuid)
     76{
     77	struct avs_module_entry module;
     78	int ret;
     79
     80	ret = avs_get_module_entry(adev, uuid, &module);
     81	return !ret ? module.module_id : -ENOENT;
     82}
     83
     84bool avs_is_module_ida_empty(struct avs_dev *adev, u32 module_id)
     85{
     86	bool ret = false;
     87	int idx;
     88
     89	mutex_lock(&adev->modres_mutex);
     90
     91	idx = avs_module_id_entry_index(adev, module_id);
     92	if (idx >= 0)
     93		ret = ida_is_empty(adev->mod_idas[idx]);
     94
     95	mutex_unlock(&adev->modres_mutex);
     96	return ret;
     97}
     98
     99/* Caller responsible for holding adev->modres_mutex. */
    100static void avs_module_ida_destroy(struct avs_dev *adev)
    101{
    102	int i = adev->mods_info ? adev->mods_info->count : 0;
    103
    104	while (i--) {
    105		ida_destroy(adev->mod_idas[i]);
    106		kfree(adev->mod_idas[i]);
    107	}
    108	kfree(adev->mod_idas);
    109}
    110
    111/* Caller responsible for holding adev->modres_mutex. */
    112static int
    113avs_module_ida_alloc(struct avs_dev *adev, struct avs_mods_info *newinfo, bool purge)
    114{
    115	struct avs_mods_info *oldinfo = adev->mods_info;
    116	struct ida **ida_ptrs;
    117	u32 tocopy_count = 0;
    118	int i;
    119
    120	if (!purge && oldinfo) {
    121		if (oldinfo->count >= newinfo->count)
    122			dev_warn(adev->dev, "refreshing %d modules info with %d\n",
    123				 oldinfo->count, newinfo->count);
    124		tocopy_count = oldinfo->count;
    125	}
    126
    127	ida_ptrs = kcalloc(newinfo->count, sizeof(*ida_ptrs), GFP_KERNEL);
    128	if (!ida_ptrs)
    129		return -ENOMEM;
    130
    131	if (tocopy_count)
    132		memcpy(ida_ptrs, adev->mod_idas, tocopy_count * sizeof(*ida_ptrs));
    133
    134	for (i = tocopy_count; i < newinfo->count; i++) {
    135		ida_ptrs[i] = kzalloc(sizeof(**ida_ptrs), GFP_KERNEL);
    136		if (!ida_ptrs[i]) {
    137			while (i--)
    138				kfree(ida_ptrs[i]);
    139
    140			kfree(ida_ptrs);
    141			return -ENOMEM;
    142		}
    143
    144		ida_init(ida_ptrs[i]);
    145	}
    146
    147	/* If old elements have been reused, don't wipe them. */
    148	if (tocopy_count)
    149		kfree(adev->mod_idas);
    150	else
    151		avs_module_ida_destroy(adev);
    152
    153	adev->mod_idas = ida_ptrs;
    154	return 0;
    155}
    156
    157int avs_module_info_init(struct avs_dev *adev, bool purge)
    158{
    159	struct avs_mods_info *info;
    160	int ret;
    161
    162	ret = avs_ipc_get_modules_info(adev, &info);
    163	if (ret)
    164		return AVS_IPC_RET(ret);
    165
    166	mutex_lock(&adev->modres_mutex);
    167
    168	ret = avs_module_ida_alloc(adev, info, purge);
    169	if (ret < 0) {
    170		dev_err(adev->dev, "initialize module idas failed: %d\n", ret);
    171		goto exit;
    172	}
    173
    174	/* Refresh current information with newly received table. */
    175	kfree(adev->mods_info);
    176	adev->mods_info = info;
    177
    178exit:
    179	mutex_unlock(&adev->modres_mutex);
    180	return ret;
    181}
    182
    183void avs_module_info_free(struct avs_dev *adev)
    184{
    185	mutex_lock(&adev->modres_mutex);
    186
    187	avs_module_ida_destroy(adev);
    188	kfree(adev->mods_info);
    189	adev->mods_info = NULL;
    190
    191	mutex_unlock(&adev->modres_mutex);
    192}
    193
    194int avs_module_id_alloc(struct avs_dev *adev, u16 module_id)
    195{
    196	int ret, idx, max_id;
    197
    198	mutex_lock(&adev->modres_mutex);
    199
    200	idx = avs_module_id_entry_index(adev, module_id);
    201	if (idx == -ENOENT) {
    202		dev_err(adev->dev, "invalid module id: %d", module_id);
    203		ret = -EINVAL;
    204		goto exit;
    205	}
    206	max_id = adev->mods_info->entries[idx].instance_max_count - 1;
    207	ret = ida_alloc_max(adev->mod_idas[idx], max_id, GFP_KERNEL);
    208exit:
    209	mutex_unlock(&adev->modres_mutex);
    210	return ret;
    211}
    212
    213void avs_module_id_free(struct avs_dev *adev, u16 module_id, u8 instance_id)
    214{
    215	int idx;
    216
    217	mutex_lock(&adev->modres_mutex);
    218
    219	idx = avs_module_id_entry_index(adev, module_id);
    220	if (idx == -ENOENT) {
    221		dev_err(adev->dev, "invalid module id: %d", module_id);
    222		goto exit;
    223	}
    224
    225	ida_free(adev->mod_idas[idx], instance_id);
    226exit:
    227	mutex_unlock(&adev->modres_mutex);
    228}
    229
    230/*
    231 * Once driver loads FW it should keep it in memory, so we are not affected
    232 * by FW removal from filesystem or even worse by loading different FW at
    233 * runtime suspend/resume.
    234 */
    235int avs_request_firmware(struct avs_dev *adev, const struct firmware **fw_p, const char *name)
    236{
    237	struct avs_fw_entry *entry;
    238	int ret;
    239
    240	/* first check in list if it is not already loaded */
    241	list_for_each_entry(entry, &adev->fw_list, node) {
    242		if (!strcmp(name, entry->name)) {
    243			*fw_p = entry->fw;
    244			return 0;
    245		}
    246	}
    247
    248	/* FW is not loaded, let's load it now and add to the list */
    249	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
    250	if (!entry)
    251		return -ENOMEM;
    252
    253	entry->name = kstrdup(name, GFP_KERNEL);
    254	if (!entry->name) {
    255		kfree(entry);
    256		return -ENOMEM;
    257	}
    258
    259	ret = request_firmware(&entry->fw, name, adev->dev);
    260	if (ret < 0) {
    261		kfree(entry->name);
    262		kfree(entry);
    263		return ret;
    264	}
    265
    266	*fw_p = entry->fw;
    267
    268	list_add_tail(&entry->node, &adev->fw_list);
    269
    270	return 0;
    271}
    272
    273/*
    274 * Release single FW entry, used to handle errors in functions calling
    275 * avs_request_firmware()
    276 */
    277void avs_release_last_firmware(struct avs_dev *adev)
    278{
    279	struct avs_fw_entry *entry;
    280
    281	entry = list_last_entry(&adev->fw_list, typeof(*entry), node);
    282
    283	list_del(&entry->node);
    284	release_firmware(entry->fw);
    285	kfree(entry->name);
    286	kfree(entry);
    287}
    288
    289/*
    290 * Release all FW entries, used on driver removal
    291 */
    292void avs_release_firmwares(struct avs_dev *adev)
    293{
    294	struct avs_fw_entry *entry, *tmp;
    295
    296	list_for_each_entry_safe(entry, tmp, &adev->fw_list, node) {
    297		list_del(&entry->node);
    298		release_firmware(entry->fw);
    299		kfree(entry->name);
    300		kfree(entry);
    301	}
    302}
    303
    304unsigned int __kfifo_fromio_locked(struct kfifo *fifo, const void __iomem *src, unsigned int len,
    305				   spinlock_t *lock)
    306{
    307	struct __kfifo *__fifo = &fifo->kfifo;
    308	unsigned long flags;
    309	unsigned int l, off;
    310
    311	spin_lock_irqsave(lock, flags);
    312	len = min(len, kfifo_avail(fifo));
    313	off = __fifo->in & __fifo->mask;
    314	l = min(len, kfifo_size(fifo) - off);
    315
    316	memcpy_fromio(__fifo->data + off, src, l);
    317	memcpy_fromio(__fifo->data, src + l, len - l);
    318	/* Make sure data copied from SRAM is visible to all CPUs. */
    319	smp_mb();
    320	__fifo->in += len;
    321	spin_unlock_irqrestore(lock, flags);
    322
    323	return len;
    324}