cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

efct_io.c (4240B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
      4 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
      5 */
      6
      7#include "efct_driver.h"
      8#include "efct_hw.h"
      9#include "efct_io.h"
     10
     11struct efct_io_pool {
     12	struct efct *efct;
     13	spinlock_t lock;	/* IO pool lock */
     14	u32 io_num_ios;		/* Total IOs allocated */
     15	struct efct_io *ios[EFCT_NUM_SCSI_IOS];
     16	struct list_head freelist;
     17
     18};
     19
     20struct efct_io_pool *
     21efct_io_pool_create(struct efct *efct, u32 num_sgl)
     22{
     23	u32 i = 0;
     24	struct efct_io_pool *io_pool;
     25	struct efct_io *io;
     26
     27	/* Allocate the IO pool */
     28	io_pool = kzalloc(sizeof(*io_pool), GFP_KERNEL);
     29	if (!io_pool)
     30		return NULL;
     31
     32	io_pool->efct = efct;
     33	INIT_LIST_HEAD(&io_pool->freelist);
     34	/* initialize IO pool lock */
     35	spin_lock_init(&io_pool->lock);
     36
     37	for (i = 0; i < EFCT_NUM_SCSI_IOS; i++) {
     38		io = kzalloc(sizeof(*io), GFP_KERNEL);
     39		if (!io)
     40			break;
     41
     42		io_pool->io_num_ios++;
     43		io_pool->ios[i] = io;
     44		io->tag = i;
     45		io->instance_index = i;
     46
     47		/* Allocate a response buffer */
     48		io->rspbuf.size = SCSI_RSP_BUF_LENGTH;
     49		io->rspbuf.virt = dma_alloc_coherent(&efct->pci->dev,
     50						     io->rspbuf.size,
     51						     &io->rspbuf.phys, GFP_KERNEL);
     52		if (!io->rspbuf.virt) {
     53			efc_log_err(efct, "dma_alloc rspbuf failed\n");
     54			efct_io_pool_free(io_pool);
     55			return NULL;
     56		}
     57
     58		/* Allocate SGL */
     59		io->sgl = kzalloc(sizeof(*io->sgl) * num_sgl, GFP_KERNEL);
     60		if (!io->sgl) {
     61			efct_io_pool_free(io_pool);
     62			return NULL;
     63		}
     64
     65		io->sgl_allocated = num_sgl;
     66		io->sgl_count = 0;
     67
     68		INIT_LIST_HEAD(&io->list_entry);
     69		list_add_tail(&io->list_entry, &io_pool->freelist);
     70	}
     71
     72	return io_pool;
     73}
     74
     75int
     76efct_io_pool_free(struct efct_io_pool *io_pool)
     77{
     78	struct efct *efct;
     79	u32 i;
     80	struct efct_io *io;
     81
     82	if (io_pool) {
     83		efct = io_pool->efct;
     84
     85		for (i = 0; i < io_pool->io_num_ios; i++) {
     86			io = io_pool->ios[i];
     87			if (!io)
     88				continue;
     89
     90			kfree(io->sgl);
     91			dma_free_coherent(&efct->pci->dev,
     92					  io->rspbuf.size, io->rspbuf.virt,
     93					  io->rspbuf.phys);
     94			memset(&io->rspbuf, 0, sizeof(struct efc_dma));
     95		}
     96
     97		kfree(io_pool);
     98		efct->xport->io_pool = NULL;
     99	}
    100
    101	return 0;
    102}
    103
    104struct efct_io *
    105efct_io_pool_io_alloc(struct efct_io_pool *io_pool)
    106{
    107	struct efct_io *io = NULL;
    108	struct efct *efct;
    109	unsigned long flags = 0;
    110
    111	efct = io_pool->efct;
    112
    113	spin_lock_irqsave(&io_pool->lock, flags);
    114
    115	if (!list_empty(&io_pool->freelist)) {
    116		io = list_first_entry(&io_pool->freelist, struct efct_io,
    117				      list_entry);
    118		list_del_init(&io->list_entry);
    119	}
    120
    121	spin_unlock_irqrestore(&io_pool->lock, flags);
    122
    123	if (!io)
    124		return NULL;
    125
    126	io->io_type = EFCT_IO_TYPE_MAX;
    127	io->hio_type = EFCT_HW_IO_MAX;
    128	io->hio = NULL;
    129	io->transferred = 0;
    130	io->efct = efct;
    131	io->timeout = 0;
    132	io->sgl_count = 0;
    133	io->tgt_task_tag = 0;
    134	io->init_task_tag = 0;
    135	io->hw_tag = 0;
    136	io->display_name = "pending";
    137	io->seq_init = 0;
    138	io->io_free = 0;
    139	io->release = NULL;
    140	atomic_add_return(1, &efct->xport->io_active_count);
    141	atomic_add_return(1, &efct->xport->io_total_alloc);
    142	return io;
    143}
    144
    145/* Free an object used to track an IO */
    146void
    147efct_io_pool_io_free(struct efct_io_pool *io_pool, struct efct_io *io)
    148{
    149	struct efct *efct;
    150	struct efct_hw_io *hio = NULL;
    151	unsigned long flags = 0;
    152
    153	efct = io_pool->efct;
    154
    155	spin_lock_irqsave(&io_pool->lock, flags);
    156	hio = io->hio;
    157	io->hio = NULL;
    158	io->io_free = 1;
    159	INIT_LIST_HEAD(&io->list_entry);
    160	list_add(&io->list_entry, &io_pool->freelist);
    161	spin_unlock_irqrestore(&io_pool->lock, flags);
    162
    163	if (hio)
    164		efct_hw_io_free(&efct->hw, hio);
    165
    166	atomic_sub_return(1, &efct->xport->io_active_count);
    167	atomic_add_return(1, &efct->xport->io_total_free);
    168}
    169
    170/* Find an I/O given it's node and ox_id */
    171struct efct_io *
    172efct_io_find_tgt_io(struct efct *efct, struct efct_node *node,
    173		    u16 ox_id, u16 rx_id)
    174{
    175	struct efct_io	*io = NULL;
    176	unsigned long flags = 0;
    177	u8 found = false;
    178
    179	spin_lock_irqsave(&node->active_ios_lock, flags);
    180	list_for_each_entry(io, &node->active_ios, list_entry) {
    181		if ((io->cmd_tgt && io->init_task_tag == ox_id) &&
    182		    (rx_id == 0xffff || io->tgt_task_tag == rx_id)) {
    183			if (kref_get_unless_zero(&io->ref))
    184				found = true;
    185			break;
    186		}
    187	}
    188	spin_unlock_irqrestore(&node->active_ios_lock, flags);
    189	return found ? io : NULL;
    190}