cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

mem.c (16841B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 *  linux/drivers/char/mem.c
      4 *
      5 *  Copyright (C) 1991, 1992  Linus Torvalds
      6 *
      7 *  Added devfs support.
      8 *    Jan-11-1998, C. Scott Ananian <cananian@alumni.princeton.edu>
      9 *  Shared /dev/zero mmapping support, Feb 2000, Kanoj Sarcar <kanoj@sgi.com>
     10 */
     11
     12#include <linux/mm.h>
     13#include <linux/miscdevice.h>
     14#include <linux/slab.h>
     15#include <linux/vmalloc.h>
     16#include <linux/mman.h>
     17#include <linux/random.h>
     18#include <linux/init.h>
     19#include <linux/tty.h>
     20#include <linux/capability.h>
     21#include <linux/ptrace.h>
     22#include <linux/device.h>
     23#include <linux/highmem.h>
     24#include <linux/backing-dev.h>
     25#include <linux/shmem_fs.h>
     26#include <linux/splice.h>
     27#include <linux/pfn.h>
     28#include <linux/export.h>
     29#include <linux/io.h>
     30#include <linux/uio.h>
     31#include <linux/uaccess.h>
     32#include <linux/security.h>
     33
     34#ifdef CONFIG_IA64
     35# include <linux/efi.h>
     36#endif
     37
     38#define DEVMEM_MINOR	1
     39#define DEVPORT_MINOR	4
     40
     41static inline unsigned long size_inside_page(unsigned long start,
     42					     unsigned long size)
     43{
     44	unsigned long sz;
     45
     46	sz = PAGE_SIZE - (start & (PAGE_SIZE - 1));
     47
     48	return min(sz, size);
     49}
     50
     51#ifndef ARCH_HAS_VALID_PHYS_ADDR_RANGE
     52static inline int valid_phys_addr_range(phys_addr_t addr, size_t count)
     53{
     54	return addr + count <= __pa(high_memory);
     55}
     56
     57static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
     58{
     59	return 1;
     60}
     61#endif
     62
     63#ifdef CONFIG_STRICT_DEVMEM
     64static inline int page_is_allowed(unsigned long pfn)
     65{
     66	return devmem_is_allowed(pfn);
     67}
     68static inline int range_is_allowed(unsigned long pfn, unsigned long size)
     69{
     70	u64 from = ((u64)pfn) << PAGE_SHIFT;
     71	u64 to = from + size;
     72	u64 cursor = from;
     73
     74	while (cursor < to) {
     75		if (!devmem_is_allowed(pfn))
     76			return 0;
     77		cursor += PAGE_SIZE;
     78		pfn++;
     79	}
     80	return 1;
     81}
     82#else
     83static inline int page_is_allowed(unsigned long pfn)
     84{
     85	return 1;
     86}
     87static inline int range_is_allowed(unsigned long pfn, unsigned long size)
     88{
     89	return 1;
     90}
     91#endif
     92
     93#ifndef unxlate_dev_mem_ptr
     94#define unxlate_dev_mem_ptr unxlate_dev_mem_ptr
     95void __weak unxlate_dev_mem_ptr(phys_addr_t phys, void *addr)
     96{
     97}
     98#endif
     99
    100static inline bool should_stop_iteration(void)
    101{
    102	if (need_resched())
    103		cond_resched();
    104	return signal_pending(current);
    105}
    106
    107/*
    108 * This funcion reads the *physical* memory. The f_pos points directly to the
    109 * memory location.
    110 */
    111static ssize_t read_mem(struct file *file, char __user *buf,
    112			size_t count, loff_t *ppos)
    113{
    114	phys_addr_t p = *ppos;
    115	ssize_t read, sz;
    116	void *ptr;
    117	char *bounce;
    118	int err;
    119
    120	if (p != *ppos)
    121		return 0;
    122
    123	if (!valid_phys_addr_range(p, count))
    124		return -EFAULT;
    125	read = 0;
    126#ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
    127	/* we don't have page 0 mapped on sparc and m68k.. */
    128	if (p < PAGE_SIZE) {
    129		sz = size_inside_page(p, count);
    130		if (sz > 0) {
    131			if (clear_user(buf, sz))
    132				return -EFAULT;
    133			buf += sz;
    134			p += sz;
    135			count -= sz;
    136			read += sz;
    137		}
    138	}
    139#endif
    140
    141	bounce = kmalloc(PAGE_SIZE, GFP_KERNEL);
    142	if (!bounce)
    143		return -ENOMEM;
    144
    145	while (count > 0) {
    146		unsigned long remaining;
    147		int allowed, probe;
    148
    149		sz = size_inside_page(p, count);
    150
    151		err = -EPERM;
    152		allowed = page_is_allowed(p >> PAGE_SHIFT);
    153		if (!allowed)
    154			goto failed;
    155
    156		err = -EFAULT;
    157		if (allowed == 2) {
    158			/* Show zeros for restricted memory. */
    159			remaining = clear_user(buf, sz);
    160		} else {
    161			/*
    162			 * On ia64 if a page has been mapped somewhere as
    163			 * uncached, then it must also be accessed uncached
    164			 * by the kernel or data corruption may occur.
    165			 */
    166			ptr = xlate_dev_mem_ptr(p);
    167			if (!ptr)
    168				goto failed;
    169
    170			probe = copy_from_kernel_nofault(bounce, ptr, sz);
    171			unxlate_dev_mem_ptr(p, ptr);
    172			if (probe)
    173				goto failed;
    174
    175			remaining = copy_to_user(buf, bounce, sz);
    176		}
    177
    178		if (remaining)
    179			goto failed;
    180
    181		buf += sz;
    182		p += sz;
    183		count -= sz;
    184		read += sz;
    185		if (should_stop_iteration())
    186			break;
    187	}
    188	kfree(bounce);
    189
    190	*ppos += read;
    191	return read;
    192
    193failed:
    194	kfree(bounce);
    195	return err;
    196}
    197
    198static ssize_t write_mem(struct file *file, const char __user *buf,
    199			 size_t count, loff_t *ppos)
    200{
    201	phys_addr_t p = *ppos;
    202	ssize_t written, sz;
    203	unsigned long copied;
    204	void *ptr;
    205
    206	if (p != *ppos)
    207		return -EFBIG;
    208
    209	if (!valid_phys_addr_range(p, count))
    210		return -EFAULT;
    211
    212	written = 0;
    213
    214#ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
    215	/* we don't have page 0 mapped on sparc and m68k.. */
    216	if (p < PAGE_SIZE) {
    217		sz = size_inside_page(p, count);
    218		/* Hmm. Do something? */
    219		buf += sz;
    220		p += sz;
    221		count -= sz;
    222		written += sz;
    223	}
    224#endif
    225
    226	while (count > 0) {
    227		int allowed;
    228
    229		sz = size_inside_page(p, count);
    230
    231		allowed = page_is_allowed(p >> PAGE_SHIFT);
    232		if (!allowed)
    233			return -EPERM;
    234
    235		/* Skip actual writing when a page is marked as restricted. */
    236		if (allowed == 1) {
    237			/*
    238			 * On ia64 if a page has been mapped somewhere as
    239			 * uncached, then it must also be accessed uncached
    240			 * by the kernel or data corruption may occur.
    241			 */
    242			ptr = xlate_dev_mem_ptr(p);
    243			if (!ptr) {
    244				if (written)
    245					break;
    246				return -EFAULT;
    247			}
    248
    249			copied = copy_from_user(ptr, buf, sz);
    250			unxlate_dev_mem_ptr(p, ptr);
    251			if (copied) {
    252				written += sz - copied;
    253				if (written)
    254					break;
    255				return -EFAULT;
    256			}
    257		}
    258
    259		buf += sz;
    260		p += sz;
    261		count -= sz;
    262		written += sz;
    263		if (should_stop_iteration())
    264			break;
    265	}
    266
    267	*ppos += written;
    268	return written;
    269}
    270
    271int __weak phys_mem_access_prot_allowed(struct file *file,
    272	unsigned long pfn, unsigned long size, pgprot_t *vma_prot)
    273{
    274	return 1;
    275}
    276
    277#ifndef __HAVE_PHYS_MEM_ACCESS_PROT
    278
    279/*
    280 * Architectures vary in how they handle caching for addresses
    281 * outside of main memory.
    282 *
    283 */
    284#ifdef pgprot_noncached
    285static int uncached_access(struct file *file, phys_addr_t addr)
    286{
    287#if defined(CONFIG_IA64)
    288	/*
    289	 * On ia64, we ignore O_DSYNC because we cannot tolerate memory
    290	 * attribute aliases.
    291	 */
    292	return !(efi_mem_attributes(addr) & EFI_MEMORY_WB);
    293#else
    294	/*
    295	 * Accessing memory above the top the kernel knows about or through a
    296	 * file pointer
    297	 * that was marked O_DSYNC will be done non-cached.
    298	 */
    299	if (file->f_flags & O_DSYNC)
    300		return 1;
    301	return addr >= __pa(high_memory);
    302#endif
    303}
    304#endif
    305
    306static pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
    307				     unsigned long size, pgprot_t vma_prot)
    308{
    309#ifdef pgprot_noncached
    310	phys_addr_t offset = pfn << PAGE_SHIFT;
    311
    312	if (uncached_access(file, offset))
    313		return pgprot_noncached(vma_prot);
    314#endif
    315	return vma_prot;
    316}
    317#endif
    318
    319#ifndef CONFIG_MMU
    320static unsigned long get_unmapped_area_mem(struct file *file,
    321					   unsigned long addr,
    322					   unsigned long len,
    323					   unsigned long pgoff,
    324					   unsigned long flags)
    325{
    326	if (!valid_mmap_phys_addr_range(pgoff, len))
    327		return (unsigned long) -EINVAL;
    328	return pgoff << PAGE_SHIFT;
    329}
    330
    331/* permit direct mmap, for read, write or exec */
    332static unsigned memory_mmap_capabilities(struct file *file)
    333{
    334	return NOMMU_MAP_DIRECT |
    335		NOMMU_MAP_READ | NOMMU_MAP_WRITE | NOMMU_MAP_EXEC;
    336}
    337
    338static unsigned zero_mmap_capabilities(struct file *file)
    339{
    340	return NOMMU_MAP_COPY;
    341}
    342
    343/* can't do an in-place private mapping if there's no MMU */
    344static inline int private_mapping_ok(struct vm_area_struct *vma)
    345{
    346	return vma->vm_flags & VM_MAYSHARE;
    347}
    348#else
    349
    350static inline int private_mapping_ok(struct vm_area_struct *vma)
    351{
    352	return 1;
    353}
    354#endif
    355
    356static const struct vm_operations_struct mmap_mem_ops = {
    357#ifdef CONFIG_HAVE_IOREMAP_PROT
    358	.access = generic_access_phys
    359#endif
    360};
    361
    362static int mmap_mem(struct file *file, struct vm_area_struct *vma)
    363{
    364	size_t size = vma->vm_end - vma->vm_start;
    365	phys_addr_t offset = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT;
    366
    367	/* Does it even fit in phys_addr_t? */
    368	if (offset >> PAGE_SHIFT != vma->vm_pgoff)
    369		return -EINVAL;
    370
    371	/* It's illegal to wrap around the end of the physical address space. */
    372	if (offset + (phys_addr_t)size - 1 < offset)
    373		return -EINVAL;
    374
    375	if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size))
    376		return -EINVAL;
    377
    378	if (!private_mapping_ok(vma))
    379		return -ENOSYS;
    380
    381	if (!range_is_allowed(vma->vm_pgoff, size))
    382		return -EPERM;
    383
    384	if (!phys_mem_access_prot_allowed(file, vma->vm_pgoff, size,
    385						&vma->vm_page_prot))
    386		return -EINVAL;
    387
    388	vma->vm_page_prot = phys_mem_access_prot(file, vma->vm_pgoff,
    389						 size,
    390						 vma->vm_page_prot);
    391
    392	vma->vm_ops = &mmap_mem_ops;
    393
    394	/* Remap-pfn-range will mark the range VM_IO */
    395	if (remap_pfn_range(vma,
    396			    vma->vm_start,
    397			    vma->vm_pgoff,
    398			    size,
    399			    vma->vm_page_prot)) {
    400		return -EAGAIN;
    401	}
    402	return 0;
    403}
    404
    405static ssize_t read_port(struct file *file, char __user *buf,
    406			 size_t count, loff_t *ppos)
    407{
    408	unsigned long i = *ppos;
    409	char __user *tmp = buf;
    410
    411	if (!access_ok(buf, count))
    412		return -EFAULT;
    413	while (count-- > 0 && i < 65536) {
    414		if (__put_user(inb(i), tmp) < 0)
    415			return -EFAULT;
    416		i++;
    417		tmp++;
    418	}
    419	*ppos = i;
    420	return tmp-buf;
    421}
    422
    423static ssize_t write_port(struct file *file, const char __user *buf,
    424			  size_t count, loff_t *ppos)
    425{
    426	unsigned long i = *ppos;
    427	const char __user *tmp = buf;
    428
    429	if (!access_ok(buf, count))
    430		return -EFAULT;
    431	while (count-- > 0 && i < 65536) {
    432		char c;
    433
    434		if (__get_user(c, tmp)) {
    435			if (tmp > buf)
    436				break;
    437			return -EFAULT;
    438		}
    439		outb(c, i);
    440		i++;
    441		tmp++;
    442	}
    443	*ppos = i;
    444	return tmp-buf;
    445}
    446
    447static ssize_t read_null(struct file *file, char __user *buf,
    448			 size_t count, loff_t *ppos)
    449{
    450	return 0;
    451}
    452
    453static ssize_t write_null(struct file *file, const char __user *buf,
    454			  size_t count, loff_t *ppos)
    455{
    456	return count;
    457}
    458
    459static ssize_t read_iter_null(struct kiocb *iocb, struct iov_iter *to)
    460{
    461	return 0;
    462}
    463
    464static ssize_t write_iter_null(struct kiocb *iocb, struct iov_iter *from)
    465{
    466	size_t count = iov_iter_count(from);
    467	iov_iter_advance(from, count);
    468	return count;
    469}
    470
    471static int pipe_to_null(struct pipe_inode_info *info, struct pipe_buffer *buf,
    472			struct splice_desc *sd)
    473{
    474	return sd->len;
    475}
    476
    477static ssize_t splice_write_null(struct pipe_inode_info *pipe, struct file *out,
    478				 loff_t *ppos, size_t len, unsigned int flags)
    479{
    480	return splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_null);
    481}
    482
    483static ssize_t read_iter_zero(struct kiocb *iocb, struct iov_iter *iter)
    484{
    485	size_t written = 0;
    486
    487	while (iov_iter_count(iter)) {
    488		size_t chunk = iov_iter_count(iter), n;
    489
    490		if (chunk > PAGE_SIZE)
    491			chunk = PAGE_SIZE;	/* Just for latency reasons */
    492		n = iov_iter_zero(chunk, iter);
    493		if (!n && iov_iter_count(iter))
    494			return written ? written : -EFAULT;
    495		written += n;
    496		if (signal_pending(current))
    497			return written ? written : -ERESTARTSYS;
    498		if (!need_resched())
    499			continue;
    500		if (iocb->ki_flags & IOCB_NOWAIT)
    501			return written ? written : -EAGAIN;
    502		cond_resched();
    503	}
    504	return written;
    505}
    506
    507static ssize_t read_zero(struct file *file, char __user *buf,
    508			 size_t count, loff_t *ppos)
    509{
    510	size_t cleared = 0;
    511
    512	while (count) {
    513		size_t chunk = min_t(size_t, count, PAGE_SIZE);
    514		size_t left;
    515
    516		left = clear_user(buf + cleared, chunk);
    517		if (unlikely(left)) {
    518			cleared += (chunk - left);
    519			if (!cleared)
    520				return -EFAULT;
    521			break;
    522		}
    523		cleared += chunk;
    524		count -= chunk;
    525
    526		if (signal_pending(current))
    527			break;
    528		cond_resched();
    529	}
    530
    531	return cleared;
    532}
    533
    534static int mmap_zero(struct file *file, struct vm_area_struct *vma)
    535{
    536#ifndef CONFIG_MMU
    537	return -ENOSYS;
    538#endif
    539	if (vma->vm_flags & VM_SHARED)
    540		return shmem_zero_setup(vma);
    541	vma_set_anonymous(vma);
    542	return 0;
    543}
    544
    545static unsigned long get_unmapped_area_zero(struct file *file,
    546				unsigned long addr, unsigned long len,
    547				unsigned long pgoff, unsigned long flags)
    548{
    549#ifdef CONFIG_MMU
    550	if (flags & MAP_SHARED) {
    551		/*
    552		 * mmap_zero() will call shmem_zero_setup() to create a file,
    553		 * so use shmem's get_unmapped_area in case it can be huge;
    554		 * and pass NULL for file as in mmap.c's get_unmapped_area(),
    555		 * so as not to confuse shmem with our handle on "/dev/zero".
    556		 */
    557		return shmem_get_unmapped_area(NULL, addr, len, pgoff, flags);
    558	}
    559
    560	/* Otherwise flags & MAP_PRIVATE: with no shmem object beneath it */
    561	return current->mm->get_unmapped_area(file, addr, len, pgoff, flags);
    562#else
    563	return -ENOSYS;
    564#endif
    565}
    566
    567static ssize_t write_full(struct file *file, const char __user *buf,
    568			  size_t count, loff_t *ppos)
    569{
    570	return -ENOSPC;
    571}
    572
    573/*
    574 * Special lseek() function for /dev/null and /dev/zero.  Most notably, you
    575 * can fopen() both devices with "a" now.  This was previously impossible.
    576 * -- SRB.
    577 */
    578static loff_t null_lseek(struct file *file, loff_t offset, int orig)
    579{
    580	return file->f_pos = 0;
    581}
    582
    583/*
    584 * The memory devices use the full 32/64 bits of the offset, and so we cannot
    585 * check against negative addresses: they are ok. The return value is weird,
    586 * though, in that case (0).
    587 *
    588 * also note that seeking relative to the "end of file" isn't supported:
    589 * it has no meaning, so it returns -EINVAL.
    590 */
    591static loff_t memory_lseek(struct file *file, loff_t offset, int orig)
    592{
    593	loff_t ret;
    594
    595	inode_lock(file_inode(file));
    596	switch (orig) {
    597	case SEEK_CUR:
    598		offset += file->f_pos;
    599		fallthrough;
    600	case SEEK_SET:
    601		/* to avoid userland mistaking f_pos=-9 as -EBADF=-9 */
    602		if ((unsigned long long)offset >= -MAX_ERRNO) {
    603			ret = -EOVERFLOW;
    604			break;
    605		}
    606		file->f_pos = offset;
    607		ret = file->f_pos;
    608		force_successful_syscall_return();
    609		break;
    610	default:
    611		ret = -EINVAL;
    612	}
    613	inode_unlock(file_inode(file));
    614	return ret;
    615}
    616
    617static int open_port(struct inode *inode, struct file *filp)
    618{
    619	int rc;
    620
    621	if (!capable(CAP_SYS_RAWIO))
    622		return -EPERM;
    623
    624	rc = security_locked_down(LOCKDOWN_DEV_MEM);
    625	if (rc)
    626		return rc;
    627
    628	if (iminor(inode) != DEVMEM_MINOR)
    629		return 0;
    630
    631	/*
    632	 * Use a unified address space to have a single point to manage
    633	 * revocations when drivers want to take over a /dev/mem mapped
    634	 * range.
    635	 */
    636	filp->f_mapping = iomem_get_mapping();
    637
    638	return 0;
    639}
    640
    641#define zero_lseek	null_lseek
    642#define full_lseek      null_lseek
    643#define write_zero	write_null
    644#define write_iter_zero	write_iter_null
    645#define open_mem	open_port
    646
    647static const struct file_operations __maybe_unused mem_fops = {
    648	.llseek		= memory_lseek,
    649	.read		= read_mem,
    650	.write		= write_mem,
    651	.mmap		= mmap_mem,
    652	.open		= open_mem,
    653#ifndef CONFIG_MMU
    654	.get_unmapped_area = get_unmapped_area_mem,
    655	.mmap_capabilities = memory_mmap_capabilities,
    656#endif
    657};
    658
    659static const struct file_operations null_fops = {
    660	.llseek		= null_lseek,
    661	.read		= read_null,
    662	.write		= write_null,
    663	.read_iter	= read_iter_null,
    664	.write_iter	= write_iter_null,
    665	.splice_write	= splice_write_null,
    666};
    667
    668static const struct file_operations __maybe_unused port_fops = {
    669	.llseek		= memory_lseek,
    670	.read		= read_port,
    671	.write		= write_port,
    672	.open		= open_port,
    673};
    674
    675static const struct file_operations zero_fops = {
    676	.llseek		= zero_lseek,
    677	.write		= write_zero,
    678	.read_iter	= read_iter_zero,
    679	.read		= read_zero,
    680	.write_iter	= write_iter_zero,
    681	.mmap		= mmap_zero,
    682	.get_unmapped_area = get_unmapped_area_zero,
    683#ifndef CONFIG_MMU
    684	.mmap_capabilities = zero_mmap_capabilities,
    685#endif
    686};
    687
    688static const struct file_operations full_fops = {
    689	.llseek		= full_lseek,
    690	.read_iter	= read_iter_zero,
    691	.write		= write_full,
    692};
    693
    694static const struct memdev {
    695	const char *name;
    696	umode_t mode;
    697	const struct file_operations *fops;
    698	fmode_t fmode;
    699} devlist[] = {
    700#ifdef CONFIG_DEVMEM
    701	 [DEVMEM_MINOR] = { "mem", 0, &mem_fops, FMODE_UNSIGNED_OFFSET },
    702#endif
    703	 [3] = { "null", 0666, &null_fops, FMODE_NOWAIT },
    704#ifdef CONFIG_DEVPORT
    705	 [4] = { "port", 0, &port_fops, 0 },
    706#endif
    707	 [5] = { "zero", 0666, &zero_fops, FMODE_NOWAIT },
    708	 [7] = { "full", 0666, &full_fops, 0 },
    709	 [8] = { "random", 0666, &random_fops, 0 },
    710	 [9] = { "urandom", 0666, &urandom_fops, 0 },
    711#ifdef CONFIG_PRINTK
    712	[11] = { "kmsg", 0644, &kmsg_fops, 0 },
    713#endif
    714};
    715
    716static int memory_open(struct inode *inode, struct file *filp)
    717{
    718	int minor;
    719	const struct memdev *dev;
    720
    721	minor = iminor(inode);
    722	if (minor >= ARRAY_SIZE(devlist))
    723		return -ENXIO;
    724
    725	dev = &devlist[minor];
    726	if (!dev->fops)
    727		return -ENXIO;
    728
    729	filp->f_op = dev->fops;
    730	filp->f_mode |= dev->fmode;
    731
    732	if (dev->fops->open)
    733		return dev->fops->open(inode, filp);
    734
    735	return 0;
    736}
    737
    738static const struct file_operations memory_fops = {
    739	.open = memory_open,
    740	.llseek = noop_llseek,
    741};
    742
    743static char *mem_devnode(struct device *dev, umode_t *mode)
    744{
    745	if (mode && devlist[MINOR(dev->devt)].mode)
    746		*mode = devlist[MINOR(dev->devt)].mode;
    747	return NULL;
    748}
    749
    750static struct class *mem_class;
    751
    752static int __init chr_dev_init(void)
    753{
    754	int minor;
    755
    756	if (register_chrdev(MEM_MAJOR, "mem", &memory_fops))
    757		printk("unable to get major %d for memory devs\n", MEM_MAJOR);
    758
    759	mem_class = class_create(THIS_MODULE, "mem");
    760	if (IS_ERR(mem_class))
    761		return PTR_ERR(mem_class);
    762
    763	mem_class->devnode = mem_devnode;
    764	for (minor = 1; minor < ARRAY_SIZE(devlist); minor++) {
    765		if (!devlist[minor].name)
    766			continue;
    767
    768		/*
    769		 * Create /dev/port?
    770		 */
    771		if ((minor == DEVPORT_MINOR) && !arch_has_dev_port())
    772			continue;
    773
    774		device_create(mem_class, NULL, MKDEV(MEM_MAJOR, minor),
    775			      NULL, devlist[minor].name);
    776	}
    777
    778	return tty_init();
    779}
    780
    781fs_initcall(chr_dev_init);