cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

sun3dvma.c (7148B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * linux/arch/m68k/sun3/sun3dvma.c
      4 *
      5 * Copyright (C) 2000 Sam Creasey
      6 *
      7 * Contains common routines for sun3/sun3x DVMA management.
      8 */
      9
     10#include <linux/memblock.h>
     11#include <linux/init.h>
     12#include <linux/module.h>
     13#include <linux/kernel.h>
     14#include <linux/gfp.h>
     15#include <linux/mm.h>
     16#include <linux/list.h>
     17
     18#include <asm/page.h>
     19#include <asm/dvma.h>
     20
     21#undef DVMA_DEBUG
     22
     23#ifdef CONFIG_SUN3X
     24extern void dvma_unmap_iommu(unsigned long baddr, int len);
     25#else
     26static inline void dvma_unmap_iommu(unsigned long a, int b)
     27{
     28}
     29#endif
     30
     31#ifdef CONFIG_SUN3
     32extern void sun3_dvma_init(void);
     33#endif
     34
     35static unsigned long *iommu_use;
     36
     37#define dvma_index(baddr) ((baddr - DVMA_START) >> DVMA_PAGE_SHIFT)
     38
     39#define dvma_entry_use(baddr)		(iommu_use[dvma_index(baddr)])
     40
     41struct hole {
     42	unsigned long start;
     43	unsigned long end;
     44	unsigned long size;
     45	struct list_head list;
     46};
     47
     48static struct list_head hole_list;
     49static struct list_head hole_cache;
     50static struct hole initholes[64];
     51
     52#ifdef DVMA_DEBUG
     53
     54static unsigned long dvma_allocs;
     55static unsigned long dvma_frees;
     56static unsigned long long dvma_alloc_bytes;
     57static unsigned long long dvma_free_bytes;
     58
     59static void print_use(void)
     60{
     61
     62	int i;
     63	int j = 0;
     64
     65	pr_info("dvma entry usage:\n");
     66
     67	for(i = 0; i < IOMMU_TOTAL_ENTRIES; i++) {
     68		if(!iommu_use[i])
     69			continue;
     70
     71		j++;
     72
     73		pr_info("dvma entry: %08x len %08lx\n",
     74			(i << DVMA_PAGE_SHIFT) + DVMA_START, iommu_use[i]);
     75	}
     76
     77	pr_info("%d entries in use total\n", j);
     78
     79	pr_info("allocation/free calls: %lu/%lu\n", dvma_allocs, dvma_frees);
     80	pr_info("allocation/free bytes: %Lx/%Lx\n", dvma_alloc_bytes,
     81		dvma_free_bytes);
     82}
     83
     84static void print_holes(struct list_head *holes)
     85{
     86
     87	struct list_head *cur;
     88	struct hole *hole;
     89
     90	pr_info("listing dvma holes\n");
     91	list_for_each(cur, holes) {
     92		hole = list_entry(cur, struct hole, list);
     93
     94		if((hole->start == 0) && (hole->end == 0) && (hole->size == 0))
     95			continue;
     96
     97		pr_info("hole: start %08lx end %08lx size %08lx\n",
     98			hole->start, hole->end, hole->size);
     99	}
    100
    101	pr_info("end of hole listing...\n");
    102}
    103#endif /* DVMA_DEBUG */
    104
    105static inline int refill(void)
    106{
    107
    108	struct hole *hole;
    109	struct hole *prev = NULL;
    110	struct list_head *cur;
    111	int ret = 0;
    112
    113	list_for_each(cur, &hole_list) {
    114		hole = list_entry(cur, struct hole, list);
    115
    116		if(!prev) {
    117			prev = hole;
    118			continue;
    119		}
    120
    121		if(hole->end == prev->start) {
    122			hole->size += prev->size;
    123			hole->end = prev->end;
    124			list_move(&(prev->list), &hole_cache);
    125			ret++;
    126		}
    127
    128	}
    129
    130	return ret;
    131}
    132
    133static inline struct hole *rmcache(void)
    134{
    135	struct hole *ret;
    136
    137	if(list_empty(&hole_cache)) {
    138		if(!refill()) {
    139			pr_crit("out of dvma hole cache!\n");
    140			BUG();
    141		}
    142	}
    143
    144	ret = list_entry(hole_cache.next, struct hole, list);
    145	list_del(&(ret->list));
    146
    147	return ret;
    148
    149}
    150
    151static inline unsigned long get_baddr(int len, unsigned long align)
    152{
    153
    154	struct list_head *cur;
    155	struct hole *hole;
    156
    157	if(list_empty(&hole_list)) {
    158#ifdef DVMA_DEBUG
    159		pr_crit("out of dvma holes! (printing hole cache)\n");
    160		print_holes(&hole_cache);
    161		print_use();
    162#endif
    163		BUG();
    164	}
    165
    166	list_for_each(cur, &hole_list) {
    167		unsigned long newlen;
    168
    169		hole = list_entry(cur, struct hole, list);
    170
    171		if(align > DVMA_PAGE_SIZE)
    172			newlen = len + ((hole->end - len) & (align-1));
    173		else
    174			newlen = len;
    175
    176		if(hole->size > newlen) {
    177			hole->end -= newlen;
    178			hole->size -= newlen;
    179			dvma_entry_use(hole->end) = newlen;
    180#ifdef DVMA_DEBUG
    181			dvma_allocs++;
    182			dvma_alloc_bytes += newlen;
    183#endif
    184			return hole->end;
    185		} else if(hole->size == newlen) {
    186			list_move(&(hole->list), &hole_cache);
    187			dvma_entry_use(hole->start) = newlen;
    188#ifdef DVMA_DEBUG
    189			dvma_allocs++;
    190			dvma_alloc_bytes += newlen;
    191#endif
    192			return hole->start;
    193		}
    194
    195	}
    196
    197	pr_crit("unable to find dvma hole!\n");
    198	BUG();
    199	return 0;
    200}
    201
    202static inline int free_baddr(unsigned long baddr)
    203{
    204
    205	unsigned long len;
    206	struct hole *hole;
    207	struct list_head *cur;
    208	unsigned long orig_baddr;
    209
    210	orig_baddr = baddr;
    211	len = dvma_entry_use(baddr);
    212	dvma_entry_use(baddr) = 0;
    213	baddr &= DVMA_PAGE_MASK;
    214	dvma_unmap_iommu(baddr, len);
    215
    216#ifdef DVMA_DEBUG
    217	dvma_frees++;
    218	dvma_free_bytes += len;
    219#endif
    220
    221	list_for_each(cur, &hole_list) {
    222		hole = list_entry(cur, struct hole, list);
    223
    224		if(hole->end == baddr) {
    225			hole->end += len;
    226			hole->size += len;
    227			return 0;
    228		} else if(hole->start == (baddr + len)) {
    229			hole->start = baddr;
    230			hole->size += len;
    231			return 0;
    232		}
    233
    234	}
    235
    236	hole = rmcache();
    237
    238	hole->start = baddr;
    239	hole->end = baddr + len;
    240	hole->size = len;
    241
    242//	list_add_tail(&(hole->list), cur);
    243	list_add(&(hole->list), cur);
    244
    245	return 0;
    246
    247}
    248
    249void __init dvma_init(void)
    250{
    251
    252	struct hole *hole;
    253	int i;
    254
    255	INIT_LIST_HEAD(&hole_list);
    256	INIT_LIST_HEAD(&hole_cache);
    257
    258	/* prepare the hole cache */
    259	for(i = 0; i < 64; i++)
    260		list_add(&(initholes[i].list), &hole_cache);
    261
    262	hole = rmcache();
    263	hole->start = DVMA_START;
    264	hole->end = DVMA_END;
    265	hole->size = DVMA_SIZE;
    266
    267	list_add(&(hole->list), &hole_list);
    268
    269	iommu_use = memblock_alloc(IOMMU_TOTAL_ENTRIES * sizeof(unsigned long),
    270				   SMP_CACHE_BYTES);
    271	if (!iommu_use)
    272		panic("%s: Failed to allocate %zu bytes\n", __func__,
    273		      IOMMU_TOTAL_ENTRIES * sizeof(unsigned long));
    274
    275	dvma_unmap_iommu(DVMA_START, DVMA_SIZE);
    276
    277#ifdef CONFIG_SUN3
    278	sun3_dvma_init();
    279#endif
    280
    281}
    282
    283unsigned long dvma_map_align(unsigned long kaddr, int len, int align)
    284{
    285
    286	unsigned long baddr;
    287	unsigned long off;
    288
    289	if(!len)
    290		len = 0x800;
    291
    292	if(!kaddr || !len) {
    293//		pr_err("error: kaddr %lx len %x\n", kaddr, len);
    294//		*(int *)4 = 0;
    295		return 0;
    296	}
    297
    298	pr_debug("dvma_map request %08x bytes from %08lx\n", len, kaddr);
    299	off = kaddr & ~DVMA_PAGE_MASK;
    300	kaddr &= PAGE_MASK;
    301	len += off;
    302	len = ((len + (DVMA_PAGE_SIZE-1)) & DVMA_PAGE_MASK);
    303
    304	if(align == 0)
    305		align = DVMA_PAGE_SIZE;
    306	else
    307		align = ((align + (DVMA_PAGE_SIZE-1)) & DVMA_PAGE_MASK);
    308
    309	baddr = get_baddr(len, align);
    310//	pr_info("using baddr %lx\n", baddr);
    311
    312	if(!dvma_map_iommu(kaddr, baddr, len))
    313		return (baddr + off);
    314
    315	pr_crit("dvma_map failed kaddr %lx baddr %lx len %x\n", kaddr, baddr,
    316	len);
    317	BUG();
    318	return 0;
    319}
    320EXPORT_SYMBOL(dvma_map_align);
    321
    322void dvma_unmap(void *baddr)
    323{
    324	unsigned long addr;
    325
    326	addr = (unsigned long)baddr;
    327	/* check if this is a vme mapping */
    328	if(!(addr & 0x00f00000))
    329		addr |= 0xf00000;
    330
    331	free_baddr(addr);
    332
    333	return;
    334
    335}
    336EXPORT_SYMBOL(dvma_unmap);
    337
    338void *dvma_malloc_align(unsigned long len, unsigned long align)
    339{
    340	unsigned long kaddr;
    341	unsigned long baddr;
    342	unsigned long vaddr;
    343
    344	if(!len)
    345		return NULL;
    346
    347	pr_debug("dvma_malloc request %lx bytes\n", len);
    348	len = ((len + (DVMA_PAGE_SIZE-1)) & DVMA_PAGE_MASK);
    349
    350        if((kaddr = __get_free_pages(GFP_ATOMIC, get_order(len))) == 0)
    351		return NULL;
    352
    353	if((baddr = (unsigned long)dvma_map_align(kaddr, len, align)) == 0) {
    354		free_pages(kaddr, get_order(len));
    355		return NULL;
    356	}
    357
    358	vaddr = dvma_btov(baddr);
    359
    360	if(dvma_map_cpu(kaddr, vaddr, len) < 0) {
    361		dvma_unmap((void *)baddr);
    362		free_pages(kaddr, get_order(len));
    363		return NULL;
    364	}
    365
    366	pr_debug("mapped %08lx bytes %08lx kern -> %08lx bus\n", len, kaddr,
    367		 baddr);
    368
    369	return (void *)vaddr;
    370
    371}
    372EXPORT_SYMBOL(dvma_malloc_align);
    373
    374void dvma_free(void *vaddr)
    375{
    376
    377	return;
    378
    379}
    380EXPORT_SYMBOL(dvma_free);