cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

seq_fifo.c (5865B)


      1// SPDX-License-Identifier: GPL-2.0-or-later
      2/*
      3 *   ALSA sequencer FIFO
      4 *   Copyright (c) 1998 by Frank van de Pol <fvdpol@coil.demon.nl>
      5 */
      6
      7#include <sound/core.h>
      8#include <linux/slab.h>
      9#include <linux/sched/signal.h>
     10
     11#include "seq_fifo.h"
     12#include "seq_lock.h"
     13
     14
     15/* FIFO */
     16
     17/* create new fifo */
     18struct snd_seq_fifo *snd_seq_fifo_new(int poolsize)
     19{
     20	struct snd_seq_fifo *f;
     21
     22	f = kzalloc(sizeof(*f), GFP_KERNEL);
     23	if (!f)
     24		return NULL;
     25
     26	f->pool = snd_seq_pool_new(poolsize);
     27	if (f->pool == NULL) {
     28		kfree(f);
     29		return NULL;
     30	}
     31	if (snd_seq_pool_init(f->pool) < 0) {
     32		snd_seq_pool_delete(&f->pool);
     33		kfree(f);
     34		return NULL;
     35	}
     36
     37	spin_lock_init(&f->lock);
     38	snd_use_lock_init(&f->use_lock);
     39	init_waitqueue_head(&f->input_sleep);
     40	atomic_set(&f->overflow, 0);
     41
     42	f->head = NULL;
     43	f->tail = NULL;
     44	f->cells = 0;
     45	
     46	return f;
     47}
     48
     49void snd_seq_fifo_delete(struct snd_seq_fifo **fifo)
     50{
     51	struct snd_seq_fifo *f;
     52
     53	if (snd_BUG_ON(!fifo))
     54		return;
     55	f = *fifo;
     56	if (snd_BUG_ON(!f))
     57		return;
     58	*fifo = NULL;
     59
     60	if (f->pool)
     61		snd_seq_pool_mark_closing(f->pool);
     62
     63	snd_seq_fifo_clear(f);
     64
     65	/* wake up clients if any */
     66	if (waitqueue_active(&f->input_sleep))
     67		wake_up(&f->input_sleep);
     68
     69	/* release resources...*/
     70	/*....................*/
     71
     72	if (f->pool) {
     73		snd_seq_pool_done(f->pool);
     74		snd_seq_pool_delete(&f->pool);
     75	}
     76	
     77	kfree(f);
     78}
     79
     80static struct snd_seq_event_cell *fifo_cell_out(struct snd_seq_fifo *f);
     81
     82/* clear queue */
     83void snd_seq_fifo_clear(struct snd_seq_fifo *f)
     84{
     85	struct snd_seq_event_cell *cell;
     86
     87	/* clear overflow flag */
     88	atomic_set(&f->overflow, 0);
     89
     90	snd_use_lock_sync(&f->use_lock);
     91	spin_lock_irq(&f->lock);
     92	/* drain the fifo */
     93	while ((cell = fifo_cell_out(f)) != NULL) {
     94		snd_seq_cell_free(cell);
     95	}
     96	spin_unlock_irq(&f->lock);
     97}
     98
     99
    100/* enqueue event to fifo */
    101int snd_seq_fifo_event_in(struct snd_seq_fifo *f,
    102			  struct snd_seq_event *event)
    103{
    104	struct snd_seq_event_cell *cell;
    105	unsigned long flags;
    106	int err;
    107
    108	if (snd_BUG_ON(!f))
    109		return -EINVAL;
    110
    111	snd_use_lock_use(&f->use_lock);
    112	err = snd_seq_event_dup(f->pool, event, &cell, 1, NULL, NULL); /* always non-blocking */
    113	if (err < 0) {
    114		if ((err == -ENOMEM) || (err == -EAGAIN))
    115			atomic_inc(&f->overflow);
    116		snd_use_lock_free(&f->use_lock);
    117		return err;
    118	}
    119		
    120	/* append new cells to fifo */
    121	spin_lock_irqsave(&f->lock, flags);
    122	if (f->tail != NULL)
    123		f->tail->next = cell;
    124	f->tail = cell;
    125	if (f->head == NULL)
    126		f->head = cell;
    127	cell->next = NULL;
    128	f->cells++;
    129	spin_unlock_irqrestore(&f->lock, flags);
    130
    131	/* wakeup client */
    132	if (waitqueue_active(&f->input_sleep))
    133		wake_up(&f->input_sleep);
    134
    135	snd_use_lock_free(&f->use_lock);
    136
    137	return 0; /* success */
    138
    139}
    140
    141/* dequeue cell from fifo */
    142static struct snd_seq_event_cell *fifo_cell_out(struct snd_seq_fifo *f)
    143{
    144	struct snd_seq_event_cell *cell;
    145
    146	cell = f->head;
    147	if (cell) {
    148		f->head = cell->next;
    149
    150		/* reset tail if this was the last element */
    151		if (f->tail == cell)
    152			f->tail = NULL;
    153
    154		cell->next = NULL;
    155		f->cells--;
    156	}
    157
    158	return cell;
    159}
    160
    161/* dequeue cell from fifo and copy on user space */
    162int snd_seq_fifo_cell_out(struct snd_seq_fifo *f,
    163			  struct snd_seq_event_cell **cellp, int nonblock)
    164{
    165	struct snd_seq_event_cell *cell;
    166	unsigned long flags;
    167	wait_queue_entry_t wait;
    168
    169	if (snd_BUG_ON(!f))
    170		return -EINVAL;
    171
    172	*cellp = NULL;
    173	init_waitqueue_entry(&wait, current);
    174	spin_lock_irqsave(&f->lock, flags);
    175	while ((cell = fifo_cell_out(f)) == NULL) {
    176		if (nonblock) {
    177			/* non-blocking - return immediately */
    178			spin_unlock_irqrestore(&f->lock, flags);
    179			return -EAGAIN;
    180		}
    181		set_current_state(TASK_INTERRUPTIBLE);
    182		add_wait_queue(&f->input_sleep, &wait);
    183		spin_unlock_irqrestore(&f->lock, flags);
    184		schedule();
    185		spin_lock_irqsave(&f->lock, flags);
    186		remove_wait_queue(&f->input_sleep, &wait);
    187		if (signal_pending(current)) {
    188			spin_unlock_irqrestore(&f->lock, flags);
    189			return -ERESTARTSYS;
    190		}
    191	}
    192	spin_unlock_irqrestore(&f->lock, flags);
    193	*cellp = cell;
    194
    195	return 0;
    196}
    197
    198
    199void snd_seq_fifo_cell_putback(struct snd_seq_fifo *f,
    200			       struct snd_seq_event_cell *cell)
    201{
    202	unsigned long flags;
    203
    204	if (cell) {
    205		spin_lock_irqsave(&f->lock, flags);
    206		cell->next = f->head;
    207		f->head = cell;
    208		if (!f->tail)
    209			f->tail = cell;
    210		f->cells++;
    211		spin_unlock_irqrestore(&f->lock, flags);
    212	}
    213}
    214
    215
    216/* polling; return non-zero if queue is available */
    217int snd_seq_fifo_poll_wait(struct snd_seq_fifo *f, struct file *file,
    218			   poll_table *wait)
    219{
    220	poll_wait(file, &f->input_sleep, wait);
    221	return (f->cells > 0);
    222}
    223
    224/* change the size of pool; all old events are removed */
    225int snd_seq_fifo_resize(struct snd_seq_fifo *f, int poolsize)
    226{
    227	struct snd_seq_pool *newpool, *oldpool;
    228	struct snd_seq_event_cell *cell, *next, *oldhead;
    229
    230	if (snd_BUG_ON(!f || !f->pool))
    231		return -EINVAL;
    232
    233	/* allocate new pool */
    234	newpool = snd_seq_pool_new(poolsize);
    235	if (newpool == NULL)
    236		return -ENOMEM;
    237	if (snd_seq_pool_init(newpool) < 0) {
    238		snd_seq_pool_delete(&newpool);
    239		return -ENOMEM;
    240	}
    241
    242	spin_lock_irq(&f->lock);
    243	/* remember old pool */
    244	oldpool = f->pool;
    245	oldhead = f->head;
    246	/* exchange pools */
    247	f->pool = newpool;
    248	f->head = NULL;
    249	f->tail = NULL;
    250	f->cells = 0;
    251	/* NOTE: overflow flag is not cleared */
    252	spin_unlock_irq(&f->lock);
    253
    254	/* close the old pool and wait until all users are gone */
    255	snd_seq_pool_mark_closing(oldpool);
    256	snd_use_lock_sync(&f->use_lock);
    257
    258	/* release cells in old pool */
    259	for (cell = oldhead; cell; cell = next) {
    260		next = cell->next;
    261		snd_seq_cell_free(cell);
    262	}
    263	snd_seq_pool_delete(&oldpool);
    264
    265	return 0;
    266}
    267
    268/* get the number of unused cells safely */
    269int snd_seq_fifo_unused_cells(struct snd_seq_fifo *f)
    270{
    271	unsigned long flags;
    272	int cells;
    273
    274	if (!f)
    275		return 0;
    276
    277	snd_use_lock_use(&f->use_lock);
    278	spin_lock_irqsave(&f->lock, flags);
    279	cells = snd_seq_unused_cells(f->pool);
    280	spin_unlock_irqrestore(&f->lock, flags);
    281	snd_use_lock_free(&f->use_lock);
    282	return cells;
    283}