cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

target_core_hba.c (4114B)


      1// SPDX-License-Identifier: GPL-2.0-or-later
      2/*******************************************************************************
      3 * Filename:  target_core_hba.c
      4 *
      5 * This file contains the TCM HBA Transport related functions.
      6 *
      7 * (c) Copyright 2003-2013 Datera, Inc.
      8 *
      9 * Nicholas A. Bellinger <nab@kernel.org>
     10 *
     11 ******************************************************************************/
     12
     13#include <linux/net.h>
     14#include <linux/string.h>
     15#include <linux/timer.h>
     16#include <linux/slab.h>
     17#include <linux/spinlock.h>
     18#include <linux/in.h>
     19#include <linux/module.h>
     20#include <net/sock.h>
     21#include <net/tcp.h>
     22
     23#include <target/target_core_base.h>
     24#include <target/target_core_backend.h>
     25#include <target/target_core_fabric.h>
     26
     27#include "target_core_internal.h"
     28
     29static LIST_HEAD(backend_list);
     30static DEFINE_MUTEX(backend_mutex);
     31
     32static u32 hba_id_counter;
     33
     34static DEFINE_SPINLOCK(hba_lock);
     35static LIST_HEAD(hba_list);
     36
     37
     38int transport_backend_register(const struct target_backend_ops *ops)
     39{
     40	struct target_backend *tb, *old;
     41
     42	tb = kzalloc(sizeof(*tb), GFP_KERNEL);
     43	if (!tb)
     44		return -ENOMEM;
     45	tb->ops = ops;
     46
     47	mutex_lock(&backend_mutex);
     48	list_for_each_entry(old, &backend_list, list) {
     49		if (!strcmp(old->ops->name, ops->name)) {
     50			pr_err("backend %s already registered.\n", ops->name);
     51			mutex_unlock(&backend_mutex);
     52			kfree(tb);
     53			return -EEXIST;
     54		}
     55	}
     56	target_setup_backend_cits(tb);
     57	list_add_tail(&tb->list, &backend_list);
     58	mutex_unlock(&backend_mutex);
     59
     60	pr_debug("TCM: Registered subsystem plugin: %s struct module: %p\n",
     61			ops->name, ops->owner);
     62	return 0;
     63}
     64EXPORT_SYMBOL(transport_backend_register);
     65
     66void target_backend_unregister(const struct target_backend_ops *ops)
     67{
     68	struct target_backend *tb;
     69
     70	mutex_lock(&backend_mutex);
     71	list_for_each_entry(tb, &backend_list, list) {
     72		if (tb->ops == ops) {
     73			list_del(&tb->list);
     74			mutex_unlock(&backend_mutex);
     75			/*
     76			 * Wait for any outstanding backend driver ->rcu_head
     77			 * callbacks to complete post TBO->free_device() ->
     78			 * call_rcu(), before allowing backend driver module
     79			 * unload of target_backend_ops->owner to proceed.
     80			 */
     81			rcu_barrier();
     82			kfree(tb);
     83			return;
     84		}
     85	}
     86	mutex_unlock(&backend_mutex);
     87}
     88EXPORT_SYMBOL(target_backend_unregister);
     89
     90static struct target_backend *core_get_backend(const char *name)
     91{
     92	struct target_backend *tb;
     93
     94	mutex_lock(&backend_mutex);
     95	list_for_each_entry(tb, &backend_list, list) {
     96		if (!strcmp(tb->ops->name, name))
     97			goto found;
     98	}
     99	mutex_unlock(&backend_mutex);
    100	return NULL;
    101found:
    102	if (tb->ops->owner && !try_module_get(tb->ops->owner))
    103		tb = NULL;
    104	mutex_unlock(&backend_mutex);
    105	return tb;
    106}
    107
    108struct se_hba *
    109core_alloc_hba(const char *plugin_name, u32 plugin_dep_id, u32 hba_flags)
    110{
    111	struct se_hba *hba;
    112	int ret = 0;
    113
    114	hba = kzalloc(sizeof(*hba), GFP_KERNEL);
    115	if (!hba) {
    116		pr_err("Unable to allocate struct se_hba\n");
    117		return ERR_PTR(-ENOMEM);
    118	}
    119
    120	spin_lock_init(&hba->device_lock);
    121	mutex_init(&hba->hba_access_mutex);
    122
    123	hba->hba_index = scsi_get_new_index(SCSI_INST_INDEX);
    124	hba->hba_flags |= hba_flags;
    125
    126	hba->backend = core_get_backend(plugin_name);
    127	if (!hba->backend) {
    128		ret = -EINVAL;
    129		goto out_free_hba;
    130	}
    131
    132	ret = hba->backend->ops->attach_hba(hba, plugin_dep_id);
    133	if (ret < 0)
    134		goto out_module_put;
    135
    136	spin_lock(&hba_lock);
    137	hba->hba_id = hba_id_counter++;
    138	list_add_tail(&hba->hba_node, &hba_list);
    139	spin_unlock(&hba_lock);
    140
    141	pr_debug("CORE_HBA[%d] - Attached HBA to Generic Target"
    142			" Core\n", hba->hba_id);
    143
    144	return hba;
    145
    146out_module_put:
    147	module_put(hba->backend->ops->owner);
    148	hba->backend = NULL;
    149out_free_hba:
    150	kfree(hba);
    151	return ERR_PTR(ret);
    152}
    153
    154int
    155core_delete_hba(struct se_hba *hba)
    156{
    157	WARN_ON(hba->dev_count);
    158
    159	hba->backend->ops->detach_hba(hba);
    160
    161	spin_lock(&hba_lock);
    162	list_del(&hba->hba_node);
    163	spin_unlock(&hba_lock);
    164
    165	pr_debug("CORE_HBA[%d] - Detached HBA from Generic Target"
    166			" Core\n", hba->hba_id);
    167
    168	module_put(hba->backend->ops->owner);
    169
    170	hba->backend = NULL;
    171	kfree(hba);
    172	return 0;
    173}
    174
    175bool target_sense_desc_format(struct se_device *dev)
    176{
    177	return (dev) ? dev->transport->get_blocks(dev) > U32_MAX : false;
    178}