cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

sym_malloc.c (8173B)


      1// SPDX-License-Identifier: GPL-2.0-or-later
      2/*
      3 * Device driver for the SYMBIOS/LSILOGIC 53C8XX and 53C1010 family 
      4 * of PCI-SCSI IO processors.
      5 *
      6 * Copyright (C) 1999-2001  Gerard Roudier <groudier@free.fr>
      7 *
      8 * This driver is derived from the Linux sym53c8xx driver.
      9 * Copyright (C) 1998-2000  Gerard Roudier
     10 *
     11 * The sym53c8xx driver is derived from the ncr53c8xx driver that had been 
     12 * a port of the FreeBSD ncr driver to Linux-1.2.13.
     13 *
     14 * The original ncr driver has been written for 386bsd and FreeBSD by
     15 *         Wolfgang Stanglmeier        <wolf@cologne.de>
     16 *         Stefan Esser                <se@mi.Uni-Koeln.de>
     17 * Copyright (C) 1994  Wolfgang Stanglmeier
     18 *
     19 * Other major contributions:
     20 *
     21 * NVRAM detection and reading.
     22 * Copyright (C) 1997 Richard Waltham <dormouse@farsrobt.demon.co.uk>
     23 *
     24 *-----------------------------------------------------------------------------
     25 */
     26
     27#include "sym_glue.h"
     28
     29/*
     30 *  Simple power of two buddy-like generic allocator.
     31 *  Provides naturally aligned memory chunks.
     32 *
     33 *  This simple code is not intended to be fast, but to 
     34 *  provide power of 2 aligned memory allocations.
     35 *  Since the SCRIPTS processor only supplies 8 bit arithmetic, 
     36 *  this allocator allows simple and fast address calculations  
     37 *  from the SCRIPTS code. In addition, cache line alignment 
     38 *  is guaranteed for power of 2 cache line size.
     39 *
     40 *  This allocator has been developed for the Linux sym53c8xx  
     41 *  driver, since this O/S does not provide naturally aligned 
     42 *  allocations.
     43 *  It has the advantage of allowing the driver to use private 
     44 *  pages of memory that will be useful if we ever need to deal 
     45 *  with IO MMUs for PCI.
     46 */
     47static void *___sym_malloc(m_pool_p mp, int size)
     48{
     49	int i = 0;
     50	int s = (1 << SYM_MEM_SHIFT);
     51	int j;
     52	void *a;
     53	m_link_p h = mp->h;
     54
     55	if (size > SYM_MEM_CLUSTER_SIZE)
     56		return NULL;
     57
     58	while (size > s) {
     59		s <<= 1;
     60		++i;
     61	}
     62
     63	j = i;
     64	while (!h[j].next) {
     65		if (s == SYM_MEM_CLUSTER_SIZE) {
     66			h[j].next = (m_link_p) M_GET_MEM_CLUSTER();
     67			if (h[j].next)
     68				h[j].next->next = NULL;
     69			break;
     70		}
     71		++j;
     72		s <<= 1;
     73	}
     74	a = h[j].next;
     75	if (a) {
     76		h[j].next = h[j].next->next;
     77		while (j > i) {
     78			j -= 1;
     79			s >>= 1;
     80			h[j].next = (m_link_p) (a+s);
     81			h[j].next->next = NULL;
     82		}
     83	}
     84#ifdef DEBUG
     85	printf("___sym_malloc(%d) = %p\n", size, (void *) a);
     86#endif
     87	return a;
     88}
     89
     90/*
     91 *  Counter-part of the generic allocator.
     92 */
     93static void ___sym_mfree(m_pool_p mp, void *ptr, int size)
     94{
     95	int i = 0;
     96	int s = (1 << SYM_MEM_SHIFT);
     97	m_link_p q;
     98	unsigned long a, b;
     99	m_link_p h = mp->h;
    100
    101#ifdef DEBUG
    102	printf("___sym_mfree(%p, %d)\n", ptr, size);
    103#endif
    104
    105	if (size > SYM_MEM_CLUSTER_SIZE)
    106		return;
    107
    108	while (size > s) {
    109		s <<= 1;
    110		++i;
    111	}
    112
    113	a = (unsigned long)ptr;
    114
    115	while (1) {
    116		if (s == SYM_MEM_CLUSTER_SIZE) {
    117#ifdef SYM_MEM_FREE_UNUSED
    118			M_FREE_MEM_CLUSTER((void *)a);
    119#else
    120			((m_link_p) a)->next = h[i].next;
    121			h[i].next = (m_link_p) a;
    122#endif
    123			break;
    124		}
    125		b = a ^ s;
    126		q = &h[i];
    127		while (q->next && q->next != (m_link_p) b) {
    128			q = q->next;
    129		}
    130		if (!q->next) {
    131			((m_link_p) a)->next = h[i].next;
    132			h[i].next = (m_link_p) a;
    133			break;
    134		}
    135		q->next = q->next->next;
    136		a = a & b;
    137		s <<= 1;
    138		++i;
    139	}
    140}
    141
    142/*
    143 *  Verbose and zeroing allocator that wrapps to the generic allocator.
    144 */
    145static void *__sym_calloc2(m_pool_p mp, int size, char *name, int uflags)
    146{
    147	void *p;
    148
    149	p = ___sym_malloc(mp, size);
    150
    151	if (DEBUG_FLAGS & DEBUG_ALLOC) {
    152		printf ("new %-10s[%4d] @%p.\n", name, size, p);
    153	}
    154
    155	if (p)
    156		memset(p, 0, size);
    157	else if (uflags & SYM_MEM_WARN)
    158		printf ("__sym_calloc2: failed to allocate %s[%d]\n", name, size);
    159	return p;
    160}
    161#define __sym_calloc(mp, s, n)	__sym_calloc2(mp, s, n, SYM_MEM_WARN)
    162
    163/*
    164 *  Its counter-part.
    165 */
    166static void __sym_mfree(m_pool_p mp, void *ptr, int size, char *name)
    167{
    168	if (DEBUG_FLAGS & DEBUG_ALLOC)
    169		printf ("freeing %-10s[%4d] @%p.\n", name, size, ptr);
    170
    171	___sym_mfree(mp, ptr, size);
    172}
    173
    174/*
    175 *  Default memory pool we donnot need to involve in DMA.
    176 *
    177 *  With DMA abstraction, we use functions (methods), to 
    178 *  distinguish between non DMAable memory and DMAable memory.
    179 */
    180static void *___mp0_get_mem_cluster(m_pool_p mp)
    181{
    182	void *m = sym_get_mem_cluster();
    183	if (m)
    184		++mp->nump;
    185	return m;
    186}
    187
    188#ifdef	SYM_MEM_FREE_UNUSED
    189static void ___mp0_free_mem_cluster(m_pool_p mp, void *m)
    190{
    191	sym_free_mem_cluster(m);
    192	--mp->nump;
    193}
    194#else
    195#define ___mp0_free_mem_cluster NULL
    196#endif
    197
    198static struct sym_m_pool mp0 = {
    199	NULL,
    200	___mp0_get_mem_cluster,
    201	___mp0_free_mem_cluster
    202};
    203
    204/*
    205 *  Methods that maintains DMAable pools according to user allocations.
    206 *  New pools are created on the fly when a new pool id is provided.
    207 *  They are deleted on the fly when they get emptied.
    208 */
    209/* Get a memory cluster that matches the DMA constraints of a given pool */
    210static void * ___get_dma_mem_cluster(m_pool_p mp)
    211{
    212	m_vtob_p vbp;
    213	void *vaddr;
    214
    215	vbp = __sym_calloc(&mp0, sizeof(*vbp), "VTOB");
    216	if (!vbp)
    217		goto out_err;
    218
    219	vaddr = sym_m_get_dma_mem_cluster(mp, vbp);
    220	if (vaddr) {
    221		int hc = VTOB_HASH_CODE(vaddr);
    222		vbp->next = mp->vtob[hc];
    223		mp->vtob[hc] = vbp;
    224		++mp->nump;
    225	}
    226	return vaddr;
    227out_err:
    228	return NULL;
    229}
    230
    231#ifdef	SYM_MEM_FREE_UNUSED
    232/* Free a memory cluster and associated resources for DMA */
    233static void ___free_dma_mem_cluster(m_pool_p mp, void *m)
    234{
    235	m_vtob_p *vbpp, vbp;
    236	int hc = VTOB_HASH_CODE(m);
    237
    238	vbpp = &mp->vtob[hc];
    239	while (*vbpp && (*vbpp)->vaddr != m)
    240		vbpp = &(*vbpp)->next;
    241	if (*vbpp) {
    242		vbp = *vbpp;
    243		*vbpp = (*vbpp)->next;
    244		sym_m_free_dma_mem_cluster(mp, vbp);
    245		__sym_mfree(&mp0, vbp, sizeof(*vbp), "VTOB");
    246		--mp->nump;
    247	}
    248}
    249#endif
    250
    251/* Fetch the memory pool for a given pool id (i.e. DMA constraints) */
    252static inline m_pool_p ___get_dma_pool(m_pool_ident_t dev_dmat)
    253{
    254	m_pool_p mp;
    255	for (mp = mp0.next;
    256		mp && !sym_m_pool_match(mp->dev_dmat, dev_dmat);
    257			mp = mp->next);
    258	return mp;
    259}
    260
    261/* Create a new memory DMAable pool (when fetch failed) */
    262static m_pool_p ___cre_dma_pool(m_pool_ident_t dev_dmat)
    263{
    264	m_pool_p mp = __sym_calloc(&mp0, sizeof(*mp), "MPOOL");
    265	if (mp) {
    266		mp->dev_dmat = dev_dmat;
    267		mp->get_mem_cluster = ___get_dma_mem_cluster;
    268#ifdef	SYM_MEM_FREE_UNUSED
    269		mp->free_mem_cluster = ___free_dma_mem_cluster;
    270#endif
    271		mp->next = mp0.next;
    272		mp0.next = mp;
    273		return mp;
    274	}
    275	return NULL;
    276}
    277
    278#ifdef	SYM_MEM_FREE_UNUSED
    279/* Destroy a DMAable memory pool (when got emptied) */
    280static void ___del_dma_pool(m_pool_p p)
    281{
    282	m_pool_p *pp = &mp0.next;
    283
    284	while (*pp && *pp != p)
    285		pp = &(*pp)->next;
    286	if (*pp) {
    287		*pp = (*pp)->next;
    288		__sym_mfree(&mp0, p, sizeof(*p), "MPOOL");
    289	}
    290}
    291#endif
    292
    293/* This lock protects only the memory allocation/free.  */
    294static DEFINE_SPINLOCK(sym53c8xx_lock);
    295
    296/*
    297 *  Actual allocator for DMAable memory.
    298 */
    299void *__sym_calloc_dma(m_pool_ident_t dev_dmat, int size, char *name)
    300{
    301	unsigned long flags;
    302	m_pool_p mp;
    303	void *m = NULL;
    304
    305	spin_lock_irqsave(&sym53c8xx_lock, flags);
    306	mp = ___get_dma_pool(dev_dmat);
    307	if (!mp)
    308		mp = ___cre_dma_pool(dev_dmat);
    309	if (!mp)
    310		goto out;
    311	m = __sym_calloc(mp, size, name);
    312#ifdef	SYM_MEM_FREE_UNUSED
    313	if (!mp->nump)
    314		___del_dma_pool(mp);
    315#endif
    316
    317 out:
    318	spin_unlock_irqrestore(&sym53c8xx_lock, flags);
    319	return m;
    320}
    321
    322void __sym_mfree_dma(m_pool_ident_t dev_dmat, void *m, int size, char *name)
    323{
    324	unsigned long flags;
    325	m_pool_p mp;
    326
    327	spin_lock_irqsave(&sym53c8xx_lock, flags);
    328	mp = ___get_dma_pool(dev_dmat);
    329	if (!mp)
    330		goto out;
    331	__sym_mfree(mp, m, size, name);
    332#ifdef	SYM_MEM_FREE_UNUSED
    333	if (!mp->nump)
    334		___del_dma_pool(mp);
    335#endif
    336 out:
    337	spin_unlock_irqrestore(&sym53c8xx_lock, flags);
    338}
    339
    340/*
    341 *  Actual virtual to bus physical address translator 
    342 *  for 32 bit addressable DMAable memory.
    343 */
    344dma_addr_t __vtobus(m_pool_ident_t dev_dmat, void *m)
    345{
    346	unsigned long flags;
    347	m_pool_p mp;
    348	int hc = VTOB_HASH_CODE(m);
    349	m_vtob_p vp = NULL;
    350	void *a = (void *)((unsigned long)m & ~SYM_MEM_CLUSTER_MASK);
    351	dma_addr_t b;
    352
    353	spin_lock_irqsave(&sym53c8xx_lock, flags);
    354	mp = ___get_dma_pool(dev_dmat);
    355	if (mp) {
    356		vp = mp->vtob[hc];
    357		while (vp && vp->vaddr != a)
    358			vp = vp->next;
    359	}
    360	if (!vp)
    361		panic("sym: VTOBUS FAILED!\n");
    362	b = vp->baddr + (m - a);
    363	spin_unlock_irqrestore(&sym53c8xx_lock, flags);
    364	return b;
    365}