cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

client-buffers.c (7054B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 * ISHTP Ring Buffers
      4 *
      5 * Copyright (c) 2003-2016, Intel Corporation.
      6 */
      7
      8#include <linux/slab.h>
      9#include "client.h"
     10
     11/**
     12 * ishtp_cl_alloc_rx_ring() - Allocate RX ring buffers
     13 * @cl: client device instance
     14 *
     15 * Allocate and initialize RX ring buffers
     16 *
     17 * Return: 0 on success else -ENOMEM
     18 */
     19int ishtp_cl_alloc_rx_ring(struct ishtp_cl *cl)
     20{
     21	size_t	len = cl->device->fw_client->props.max_msg_length;
     22	int	j;
     23	struct ishtp_cl_rb *rb;
     24	int	ret = 0;
     25	unsigned long	flags;
     26
     27	for (j = 0; j < cl->rx_ring_size; ++j) {
     28		rb = ishtp_io_rb_init(cl);
     29		if (!rb) {
     30			ret = -ENOMEM;
     31			goto out;
     32		}
     33		ret = ishtp_io_rb_alloc_buf(rb, len);
     34		if (ret)
     35			goto out;
     36		spin_lock_irqsave(&cl->free_list_spinlock, flags);
     37		list_add_tail(&rb->list, &cl->free_rb_list.list);
     38		spin_unlock_irqrestore(&cl->free_list_spinlock, flags);
     39	}
     40
     41	return	0;
     42
     43out:
     44	dev_err(&cl->device->dev, "error in allocating Rx buffers\n");
     45	ishtp_cl_free_rx_ring(cl);
     46	return	ret;
     47}
     48
     49/**
     50 * ishtp_cl_alloc_tx_ring() - Allocate TX ring buffers
     51 * @cl: client device instance
     52 *
     53 * Allocate and initialize TX ring buffers
     54 *
     55 * Return: 0 on success else -ENOMEM
     56 */
     57int ishtp_cl_alloc_tx_ring(struct ishtp_cl *cl)
     58{
     59	size_t	len = cl->device->fw_client->props.max_msg_length;
     60	int	j;
     61	unsigned long	flags;
     62
     63	cl->tx_ring_free_size = 0;
     64
     65	/* Allocate pool to free Tx bufs */
     66	for (j = 0; j < cl->tx_ring_size; ++j) {
     67		struct ishtp_cl_tx_ring	*tx_buf;
     68
     69		tx_buf = kzalloc(sizeof(struct ishtp_cl_tx_ring), GFP_KERNEL);
     70		if (!tx_buf)
     71			goto	out;
     72
     73		tx_buf->send_buf.data = kmalloc(len, GFP_KERNEL);
     74		if (!tx_buf->send_buf.data) {
     75			kfree(tx_buf);
     76			goto	out;
     77		}
     78
     79		spin_lock_irqsave(&cl->tx_free_list_spinlock, flags);
     80		list_add_tail(&tx_buf->list, &cl->tx_free_list.list);
     81		++cl->tx_ring_free_size;
     82		spin_unlock_irqrestore(&cl->tx_free_list_spinlock, flags);
     83	}
     84	return	0;
     85out:
     86	dev_err(&cl->device->dev, "error in allocating Tx pool\n");
     87	ishtp_cl_free_tx_ring(cl);
     88	return	-ENOMEM;
     89}
     90
     91/**
     92 * ishtp_cl_free_rx_ring() - Free RX ring buffers
     93 * @cl: client device instance
     94 *
     95 * Free RX ring buffers
     96 */
     97void ishtp_cl_free_rx_ring(struct ishtp_cl *cl)
     98{
     99	struct ishtp_cl_rb *rb;
    100	unsigned long	flags;
    101
    102	/* release allocated memory - pass over free_rb_list */
    103	spin_lock_irqsave(&cl->free_list_spinlock, flags);
    104	while (!list_empty(&cl->free_rb_list.list)) {
    105		rb = list_entry(cl->free_rb_list.list.next, struct ishtp_cl_rb,
    106				list);
    107		list_del(&rb->list);
    108		kfree(rb->buffer.data);
    109		kfree(rb);
    110	}
    111	spin_unlock_irqrestore(&cl->free_list_spinlock, flags);
    112	/* release allocated memory - pass over in_process_list */
    113	spin_lock_irqsave(&cl->in_process_spinlock, flags);
    114	while (!list_empty(&cl->in_process_list.list)) {
    115		rb = list_entry(cl->in_process_list.list.next,
    116				struct ishtp_cl_rb, list);
    117		list_del(&rb->list);
    118		kfree(rb->buffer.data);
    119		kfree(rb);
    120	}
    121	spin_unlock_irqrestore(&cl->in_process_spinlock, flags);
    122}
    123
    124/**
    125 * ishtp_cl_free_tx_ring() - Free TX ring buffers
    126 * @cl: client device instance
    127 *
    128 * Free TX ring buffers
    129 */
    130void ishtp_cl_free_tx_ring(struct ishtp_cl *cl)
    131{
    132	struct ishtp_cl_tx_ring	*tx_buf;
    133	unsigned long	flags;
    134
    135	spin_lock_irqsave(&cl->tx_free_list_spinlock, flags);
    136	/* release allocated memory - pass over tx_free_list */
    137	while (!list_empty(&cl->tx_free_list.list)) {
    138		tx_buf = list_entry(cl->tx_free_list.list.next,
    139				    struct ishtp_cl_tx_ring, list);
    140		list_del(&tx_buf->list);
    141		--cl->tx_ring_free_size;
    142		kfree(tx_buf->send_buf.data);
    143		kfree(tx_buf);
    144	}
    145	spin_unlock_irqrestore(&cl->tx_free_list_spinlock, flags);
    146
    147	spin_lock_irqsave(&cl->tx_list_spinlock, flags);
    148	/* release allocated memory - pass over tx_list */
    149	while (!list_empty(&cl->tx_list.list)) {
    150		tx_buf = list_entry(cl->tx_list.list.next,
    151				    struct ishtp_cl_tx_ring, list);
    152		list_del(&tx_buf->list);
    153		kfree(tx_buf->send_buf.data);
    154		kfree(tx_buf);
    155	}
    156	spin_unlock_irqrestore(&cl->tx_list_spinlock, flags);
    157}
    158
    159/**
    160 * ishtp_io_rb_free() - Free IO request block
    161 * @rb: IO request block
    162 *
    163 * Free io request block memory
    164 */
    165void ishtp_io_rb_free(struct ishtp_cl_rb *rb)
    166{
    167	if (rb == NULL)
    168		return;
    169
    170	kfree(rb->buffer.data);
    171	kfree(rb);
    172}
    173
    174/**
    175 * ishtp_io_rb_init() - Allocate and init IO request block
    176 * @cl: client device instance
    177 *
    178 * Allocate and initialize request block
    179 *
    180 * Return: Allocted IO request block pointer
    181 */
    182struct ishtp_cl_rb *ishtp_io_rb_init(struct ishtp_cl *cl)
    183{
    184	struct ishtp_cl_rb *rb;
    185
    186	rb = kzalloc(sizeof(struct ishtp_cl_rb), GFP_KERNEL);
    187	if (!rb)
    188		return NULL;
    189
    190	INIT_LIST_HEAD(&rb->list);
    191	rb->cl = cl;
    192	rb->buf_idx = 0;
    193	return rb;
    194}
    195
    196/**
    197 * ishtp_io_rb_alloc_buf() - Allocate and init response buffer
    198 * @rb: IO request block
    199 * @length: length of response buffer
    200 *
    201 * Allocate respose buffer
    202 *
    203 * Return: 0 on success else -ENOMEM
    204 */
    205int ishtp_io_rb_alloc_buf(struct ishtp_cl_rb *rb, size_t length)
    206{
    207	if (!rb)
    208		return -EINVAL;
    209
    210	if (length == 0)
    211		return 0;
    212
    213	rb->buffer.data = kmalloc(length, GFP_KERNEL);
    214	if (!rb->buffer.data)
    215		return -ENOMEM;
    216
    217	rb->buffer.size = length;
    218	return 0;
    219}
    220
    221/**
    222 * ishtp_cl_io_rb_recycle() - Recycle IO request blocks
    223 * @rb: IO request block
    224 *
    225 * Re-append rb to its client's free list and send flow control if needed
    226 *
    227 * Return: 0 on success else -EFAULT
    228 */
    229int ishtp_cl_io_rb_recycle(struct ishtp_cl_rb *rb)
    230{
    231	struct ishtp_cl *cl;
    232	int	rets = 0;
    233	unsigned long	flags;
    234
    235	if (!rb || !rb->cl)
    236		return	-EFAULT;
    237
    238	cl = rb->cl;
    239	spin_lock_irqsave(&cl->free_list_spinlock, flags);
    240	list_add_tail(&rb->list, &cl->free_rb_list.list);
    241	spin_unlock_irqrestore(&cl->free_list_spinlock, flags);
    242
    243	/*
    244	 * If we returned the first buffer to empty 'free' list,
    245	 * send flow control
    246	 */
    247	if (!cl->out_flow_ctrl_creds)
    248		rets = ishtp_cl_read_start(cl);
    249
    250	return	rets;
    251}
    252EXPORT_SYMBOL(ishtp_cl_io_rb_recycle);
    253
    254/**
    255 * ishtp_cl_tx_empty() -test whether client device tx buffer is empty
    256 * @cl: Pointer to client device instance
    257 *
    258 * Look client device tx buffer list, and check whether this list is empty
    259 *
    260 * Return: true if client tx buffer list is empty else false
    261 */
    262bool ishtp_cl_tx_empty(struct ishtp_cl *cl)
    263{
    264	int tx_list_empty;
    265	unsigned long tx_flags;
    266
    267	spin_lock_irqsave(&cl->tx_list_spinlock, tx_flags);
    268	tx_list_empty = list_empty(&cl->tx_list.list);
    269	spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
    270
    271	return !!tx_list_empty;
    272}
    273EXPORT_SYMBOL(ishtp_cl_tx_empty);
    274
    275/**
    276 * ishtp_cl_rx_get_rb() -Get a rb from client device rx buffer list
    277 * @cl: Pointer to client device instance
    278 *
    279 * Check client device in-processing buffer list and get a rb from it.
    280 *
    281 * Return: rb pointer if buffer list isn't empty else NULL
    282 */
    283struct ishtp_cl_rb *ishtp_cl_rx_get_rb(struct ishtp_cl *cl)
    284{
    285	unsigned long rx_flags;
    286	struct ishtp_cl_rb *rb;
    287
    288	spin_lock_irqsave(&cl->in_process_spinlock, rx_flags);
    289	rb = list_first_entry_or_null(&cl->in_process_list.list,
    290				struct ishtp_cl_rb, list);
    291	if (rb)
    292		list_del_init(&rb->list);
    293	spin_unlock_irqrestore(&cl->in_process_spinlock, rx_flags);
    294
    295	return rb;
    296}
    297EXPORT_SYMBOL(ishtp_cl_rx_get_rb);