cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

seq_memory.c (11903B)


      1// SPDX-License-Identifier: GPL-2.0-or-later
      2/*
      3 *  ALSA sequencer Memory Manager
      4 *  Copyright (c) 1998 by Frank van de Pol <fvdpol@coil.demon.nl>
      5 *                        Jaroslav Kysela <perex@perex.cz>
      6 *                2000 by Takashi Iwai <tiwai@suse.de>
      7 */
      8
      9#include <linux/init.h>
     10#include <linux/export.h>
     11#include <linux/slab.h>
     12#include <linux/sched/signal.h>
     13#include <linux/mm.h>
     14#include <sound/core.h>
     15
     16#include <sound/seq_kernel.h>
     17#include "seq_memory.h"
     18#include "seq_queue.h"
     19#include "seq_info.h"
     20#include "seq_lock.h"
     21
     22static inline int snd_seq_pool_available(struct snd_seq_pool *pool)
     23{
     24	return pool->total_elements - atomic_read(&pool->counter);
     25}
     26
     27static inline int snd_seq_output_ok(struct snd_seq_pool *pool)
     28{
     29	return snd_seq_pool_available(pool) >= pool->room;
     30}
     31
     32/*
     33 * Variable length event:
     34 * The event like sysex uses variable length type.
     35 * The external data may be stored in three different formats.
     36 * 1) kernel space
     37 *    This is the normal case.
     38 *      ext.data.len = length
     39 *      ext.data.ptr = buffer pointer
     40 * 2) user space
     41 *    When an event is generated via read(), the external data is
     42 *    kept in user space until expanded.
     43 *      ext.data.len = length | SNDRV_SEQ_EXT_USRPTR
     44 *      ext.data.ptr = userspace pointer
     45 * 3) chained cells
     46 *    When the variable length event is enqueued (in prioq or fifo),
     47 *    the external data is decomposed to several cells.
     48 *      ext.data.len = length | SNDRV_SEQ_EXT_CHAINED
     49 *      ext.data.ptr = the additiona cell head
     50 *         -> cell.next -> cell.next -> ..
     51 */
     52
     53/*
     54 * exported:
     55 * call dump function to expand external data.
     56 */
     57
     58static int get_var_len(const struct snd_seq_event *event)
     59{
     60	if ((event->flags & SNDRV_SEQ_EVENT_LENGTH_MASK) != SNDRV_SEQ_EVENT_LENGTH_VARIABLE)
     61		return -EINVAL;
     62
     63	return event->data.ext.len & ~SNDRV_SEQ_EXT_MASK;
     64}
     65
     66int snd_seq_dump_var_event(const struct snd_seq_event *event,
     67			   snd_seq_dump_func_t func, void *private_data)
     68{
     69	int len, err;
     70	struct snd_seq_event_cell *cell;
     71
     72	len = get_var_len(event);
     73	if (len <= 0)
     74		return len;
     75
     76	if (event->data.ext.len & SNDRV_SEQ_EXT_USRPTR) {
     77		char buf[32];
     78		char __user *curptr = (char __force __user *)event->data.ext.ptr;
     79		while (len > 0) {
     80			int size = sizeof(buf);
     81			if (len < size)
     82				size = len;
     83			if (copy_from_user(buf, curptr, size))
     84				return -EFAULT;
     85			err = func(private_data, buf, size);
     86			if (err < 0)
     87				return err;
     88			curptr += size;
     89			len -= size;
     90		}
     91		return 0;
     92	}
     93	if (!(event->data.ext.len & SNDRV_SEQ_EXT_CHAINED))
     94		return func(private_data, event->data.ext.ptr, len);
     95
     96	cell = (struct snd_seq_event_cell *)event->data.ext.ptr;
     97	for (; len > 0 && cell; cell = cell->next) {
     98		int size = sizeof(struct snd_seq_event);
     99		if (len < size)
    100			size = len;
    101		err = func(private_data, &cell->event, size);
    102		if (err < 0)
    103			return err;
    104		len -= size;
    105	}
    106	return 0;
    107}
    108EXPORT_SYMBOL(snd_seq_dump_var_event);
    109
    110
    111/*
    112 * exported:
    113 * expand the variable length event to linear buffer space.
    114 */
    115
    116static int seq_copy_in_kernel(char **bufptr, const void *src, int size)
    117{
    118	memcpy(*bufptr, src, size);
    119	*bufptr += size;
    120	return 0;
    121}
    122
    123static int seq_copy_in_user(char __user **bufptr, const void *src, int size)
    124{
    125	if (copy_to_user(*bufptr, src, size))
    126		return -EFAULT;
    127	*bufptr += size;
    128	return 0;
    129}
    130
    131int snd_seq_expand_var_event(const struct snd_seq_event *event, int count, char *buf,
    132			     int in_kernel, int size_aligned)
    133{
    134	int len, newlen;
    135	int err;
    136
    137	len = get_var_len(event);
    138	if (len < 0)
    139		return len;
    140	newlen = len;
    141	if (size_aligned > 0)
    142		newlen = roundup(len, size_aligned);
    143	if (count < newlen)
    144		return -EAGAIN;
    145
    146	if (event->data.ext.len & SNDRV_SEQ_EXT_USRPTR) {
    147		if (! in_kernel)
    148			return -EINVAL;
    149		if (copy_from_user(buf, (void __force __user *)event->data.ext.ptr, len))
    150			return -EFAULT;
    151		return newlen;
    152	}
    153	err = snd_seq_dump_var_event(event,
    154				     in_kernel ? (snd_seq_dump_func_t)seq_copy_in_kernel :
    155				     (snd_seq_dump_func_t)seq_copy_in_user,
    156				     &buf);
    157	return err < 0 ? err : newlen;
    158}
    159EXPORT_SYMBOL(snd_seq_expand_var_event);
    160
    161/*
    162 * release this cell, free extended data if available
    163 */
    164
    165static inline void free_cell(struct snd_seq_pool *pool,
    166			     struct snd_seq_event_cell *cell)
    167{
    168	cell->next = pool->free;
    169	pool->free = cell;
    170	atomic_dec(&pool->counter);
    171}
    172
    173void snd_seq_cell_free(struct snd_seq_event_cell * cell)
    174{
    175	unsigned long flags;
    176	struct snd_seq_pool *pool;
    177
    178	if (snd_BUG_ON(!cell))
    179		return;
    180	pool = cell->pool;
    181	if (snd_BUG_ON(!pool))
    182		return;
    183
    184	spin_lock_irqsave(&pool->lock, flags);
    185	free_cell(pool, cell);
    186	if (snd_seq_ev_is_variable(&cell->event)) {
    187		if (cell->event.data.ext.len & SNDRV_SEQ_EXT_CHAINED) {
    188			struct snd_seq_event_cell *curp, *nextptr;
    189			curp = cell->event.data.ext.ptr;
    190			for (; curp; curp = nextptr) {
    191				nextptr = curp->next;
    192				curp->next = pool->free;
    193				free_cell(pool, curp);
    194			}
    195		}
    196	}
    197	if (waitqueue_active(&pool->output_sleep)) {
    198		/* has enough space now? */
    199		if (snd_seq_output_ok(pool))
    200			wake_up(&pool->output_sleep);
    201	}
    202	spin_unlock_irqrestore(&pool->lock, flags);
    203}
    204
    205
    206/*
    207 * allocate an event cell.
    208 */
    209static int snd_seq_cell_alloc(struct snd_seq_pool *pool,
    210			      struct snd_seq_event_cell **cellp,
    211			      int nonblock, struct file *file,
    212			      struct mutex *mutexp)
    213{
    214	struct snd_seq_event_cell *cell;
    215	unsigned long flags;
    216	int err = -EAGAIN;
    217	wait_queue_entry_t wait;
    218
    219	if (pool == NULL)
    220		return -EINVAL;
    221
    222	*cellp = NULL;
    223
    224	init_waitqueue_entry(&wait, current);
    225	spin_lock_irqsave(&pool->lock, flags);
    226	if (pool->ptr == NULL) {	/* not initialized */
    227		pr_debug("ALSA: seq: pool is not initialized\n");
    228		err = -EINVAL;
    229		goto __error;
    230	}
    231	while (pool->free == NULL && ! nonblock && ! pool->closing) {
    232
    233		set_current_state(TASK_INTERRUPTIBLE);
    234		add_wait_queue(&pool->output_sleep, &wait);
    235		spin_unlock_irqrestore(&pool->lock, flags);
    236		if (mutexp)
    237			mutex_unlock(mutexp);
    238		schedule();
    239		if (mutexp)
    240			mutex_lock(mutexp);
    241		spin_lock_irqsave(&pool->lock, flags);
    242		remove_wait_queue(&pool->output_sleep, &wait);
    243		/* interrupted? */
    244		if (signal_pending(current)) {
    245			err = -ERESTARTSYS;
    246			goto __error;
    247		}
    248	}
    249	if (pool->closing) { /* closing.. */
    250		err = -ENOMEM;
    251		goto __error;
    252	}
    253
    254	cell = pool->free;
    255	if (cell) {
    256		int used;
    257		pool->free = cell->next;
    258		atomic_inc(&pool->counter);
    259		used = atomic_read(&pool->counter);
    260		if (pool->max_used < used)
    261			pool->max_used = used;
    262		pool->event_alloc_success++;
    263		/* clear cell pointers */
    264		cell->next = NULL;
    265		err = 0;
    266	} else
    267		pool->event_alloc_failures++;
    268	*cellp = cell;
    269
    270__error:
    271	spin_unlock_irqrestore(&pool->lock, flags);
    272	return err;
    273}
    274
    275
    276/*
    277 * duplicate the event to a cell.
    278 * if the event has external data, the data is decomposed to additional
    279 * cells.
    280 */
    281int snd_seq_event_dup(struct snd_seq_pool *pool, struct snd_seq_event *event,
    282		      struct snd_seq_event_cell **cellp, int nonblock,
    283		      struct file *file, struct mutex *mutexp)
    284{
    285	int ncells, err;
    286	unsigned int extlen;
    287	struct snd_seq_event_cell *cell;
    288
    289	*cellp = NULL;
    290
    291	ncells = 0;
    292	extlen = 0;
    293	if (snd_seq_ev_is_variable(event)) {
    294		extlen = event->data.ext.len & ~SNDRV_SEQ_EXT_MASK;
    295		ncells = DIV_ROUND_UP(extlen, sizeof(struct snd_seq_event));
    296	}
    297	if (ncells >= pool->total_elements)
    298		return -ENOMEM;
    299
    300	err = snd_seq_cell_alloc(pool, &cell, nonblock, file, mutexp);
    301	if (err < 0)
    302		return err;
    303
    304	/* copy the event */
    305	cell->event = *event;
    306
    307	/* decompose */
    308	if (snd_seq_ev_is_variable(event)) {
    309		int len = extlen;
    310		int is_chained = event->data.ext.len & SNDRV_SEQ_EXT_CHAINED;
    311		int is_usrptr = event->data.ext.len & SNDRV_SEQ_EXT_USRPTR;
    312		struct snd_seq_event_cell *src, *tmp, *tail;
    313		char *buf;
    314
    315		cell->event.data.ext.len = extlen | SNDRV_SEQ_EXT_CHAINED;
    316		cell->event.data.ext.ptr = NULL;
    317
    318		src = (struct snd_seq_event_cell *)event->data.ext.ptr;
    319		buf = (char *)event->data.ext.ptr;
    320		tail = NULL;
    321
    322		while (ncells-- > 0) {
    323			int size = sizeof(struct snd_seq_event);
    324			if (len < size)
    325				size = len;
    326			err = snd_seq_cell_alloc(pool, &tmp, nonblock, file,
    327						 mutexp);
    328			if (err < 0)
    329				goto __error;
    330			if (cell->event.data.ext.ptr == NULL)
    331				cell->event.data.ext.ptr = tmp;
    332			if (tail)
    333				tail->next = tmp;
    334			tail = tmp;
    335			/* copy chunk */
    336			if (is_chained && src) {
    337				tmp->event = src->event;
    338				src = src->next;
    339			} else if (is_usrptr) {
    340				if (copy_from_user(&tmp->event, (char __force __user *)buf, size)) {
    341					err = -EFAULT;
    342					goto __error;
    343				}
    344			} else {
    345				memcpy(&tmp->event, buf, size);
    346			}
    347			buf += size;
    348			len -= size;
    349		}
    350	}
    351
    352	*cellp = cell;
    353	return 0;
    354
    355__error:
    356	snd_seq_cell_free(cell);
    357	return err;
    358}
    359  
    360
    361/* poll wait */
    362int snd_seq_pool_poll_wait(struct snd_seq_pool *pool, struct file *file,
    363			   poll_table *wait)
    364{
    365	poll_wait(file, &pool->output_sleep, wait);
    366	return snd_seq_output_ok(pool);
    367}
    368
    369
    370/* allocate room specified number of events */
    371int snd_seq_pool_init(struct snd_seq_pool *pool)
    372{
    373	int cell;
    374	struct snd_seq_event_cell *cellptr;
    375
    376	if (snd_BUG_ON(!pool))
    377		return -EINVAL;
    378
    379	cellptr = kvmalloc_array(sizeof(struct snd_seq_event_cell), pool->size,
    380				 GFP_KERNEL);
    381	if (!cellptr)
    382		return -ENOMEM;
    383
    384	/* add new cells to the free cell list */
    385	spin_lock_irq(&pool->lock);
    386	if (pool->ptr) {
    387		spin_unlock_irq(&pool->lock);
    388		kvfree(cellptr);
    389		return 0;
    390	}
    391
    392	pool->ptr = cellptr;
    393	pool->free = NULL;
    394
    395	for (cell = 0; cell < pool->size; cell++) {
    396		cellptr = pool->ptr + cell;
    397		cellptr->pool = pool;
    398		cellptr->next = pool->free;
    399		pool->free = cellptr;
    400	}
    401	pool->room = (pool->size + 1) / 2;
    402
    403	/* init statistics */
    404	pool->max_used = 0;
    405	pool->total_elements = pool->size;
    406	spin_unlock_irq(&pool->lock);
    407	return 0;
    408}
    409
    410/* refuse the further insertion to the pool */
    411void snd_seq_pool_mark_closing(struct snd_seq_pool *pool)
    412{
    413	unsigned long flags;
    414
    415	if (snd_BUG_ON(!pool))
    416		return;
    417	spin_lock_irqsave(&pool->lock, flags);
    418	pool->closing = 1;
    419	spin_unlock_irqrestore(&pool->lock, flags);
    420}
    421
    422/* remove events */
    423int snd_seq_pool_done(struct snd_seq_pool *pool)
    424{
    425	struct snd_seq_event_cell *ptr;
    426
    427	if (snd_BUG_ON(!pool))
    428		return -EINVAL;
    429
    430	/* wait for closing all threads */
    431	if (waitqueue_active(&pool->output_sleep))
    432		wake_up(&pool->output_sleep);
    433
    434	while (atomic_read(&pool->counter) > 0)
    435		schedule_timeout_uninterruptible(1);
    436	
    437	/* release all resources */
    438	spin_lock_irq(&pool->lock);
    439	ptr = pool->ptr;
    440	pool->ptr = NULL;
    441	pool->free = NULL;
    442	pool->total_elements = 0;
    443	spin_unlock_irq(&pool->lock);
    444
    445	kvfree(ptr);
    446
    447	spin_lock_irq(&pool->lock);
    448	pool->closing = 0;
    449	spin_unlock_irq(&pool->lock);
    450
    451	return 0;
    452}
    453
    454
    455/* init new memory pool */
    456struct snd_seq_pool *snd_seq_pool_new(int poolsize)
    457{
    458	struct snd_seq_pool *pool;
    459
    460	/* create pool block */
    461	pool = kzalloc(sizeof(*pool), GFP_KERNEL);
    462	if (!pool)
    463		return NULL;
    464	spin_lock_init(&pool->lock);
    465	pool->ptr = NULL;
    466	pool->free = NULL;
    467	pool->total_elements = 0;
    468	atomic_set(&pool->counter, 0);
    469	pool->closing = 0;
    470	init_waitqueue_head(&pool->output_sleep);
    471	
    472	pool->size = poolsize;
    473
    474	/* init statistics */
    475	pool->max_used = 0;
    476	return pool;
    477}
    478
    479/* remove memory pool */
    480int snd_seq_pool_delete(struct snd_seq_pool **ppool)
    481{
    482	struct snd_seq_pool *pool = *ppool;
    483
    484	*ppool = NULL;
    485	if (pool == NULL)
    486		return 0;
    487	snd_seq_pool_mark_closing(pool);
    488	snd_seq_pool_done(pool);
    489	kfree(pool);
    490	return 0;
    491}
    492
    493/* exported to seq_clientmgr.c */
    494void snd_seq_info_pool(struct snd_info_buffer *buffer,
    495		       struct snd_seq_pool *pool, char *space)
    496{
    497	if (pool == NULL)
    498		return;
    499	snd_iprintf(buffer, "%sPool size          : %d\n", space, pool->total_elements);
    500	snd_iprintf(buffer, "%sCells in use       : %d\n", space, atomic_read(&pool->counter));
    501	snd_iprintf(buffer, "%sPeak cells in use  : %d\n", space, pool->max_used);
    502	snd_iprintf(buffer, "%sAlloc success      : %d\n", space, pool->event_alloc_success);
    503	snd_iprintf(buffer, "%sAlloc failures     : %d\n", space, pool->event_alloc_failures);
    504}