cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

file.c (62759B)


      1// SPDX-License-Identifier: GPL-2.0-or-later
      2/*
      3 * SPU file system -- file contents
      4 *
      5 * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
      6 *
      7 * Author: Arnd Bergmann <arndb@de.ibm.com>
      8 */
      9
     10#undef DEBUG
     11
     12#include <linux/coredump.h>
     13#include <linux/fs.h>
     14#include <linux/ioctl.h>
     15#include <linux/export.h>
     16#include <linux/pagemap.h>
     17#include <linux/poll.h>
     18#include <linux/ptrace.h>
     19#include <linux/seq_file.h>
     20#include <linux/slab.h>
     21
     22#include <asm/io.h>
     23#include <asm/time.h>
     24#include <asm/spu.h>
     25#include <asm/spu_info.h>
     26#include <linux/uaccess.h>
     27
     28#include "spufs.h"
     29#include "sputrace.h"
     30
     31#define SPUFS_MMAP_4K (PAGE_SIZE == 0x1000)
     32
     33/* Simple attribute files */
     34struct spufs_attr {
     35	int (*get)(void *, u64 *);
     36	int (*set)(void *, u64);
     37	char get_buf[24];       /* enough to store a u64 and "\n\0" */
     38	char set_buf[24];
     39	void *data;
     40	const char *fmt;        /* format for read operation */
     41	struct mutex mutex;     /* protects access to these buffers */
     42};
     43
     44static int spufs_attr_open(struct inode *inode, struct file *file,
     45		int (*get)(void *, u64 *), int (*set)(void *, u64),
     46		const char *fmt)
     47{
     48	struct spufs_attr *attr;
     49
     50	attr = kmalloc(sizeof(*attr), GFP_KERNEL);
     51	if (!attr)
     52		return -ENOMEM;
     53
     54	attr->get = get;
     55	attr->set = set;
     56	attr->data = inode->i_private;
     57	attr->fmt = fmt;
     58	mutex_init(&attr->mutex);
     59	file->private_data = attr;
     60
     61	return nonseekable_open(inode, file);
     62}
     63
     64static int spufs_attr_release(struct inode *inode, struct file *file)
     65{
     66       kfree(file->private_data);
     67	return 0;
     68}
     69
     70static ssize_t spufs_attr_read(struct file *file, char __user *buf,
     71		size_t len, loff_t *ppos)
     72{
     73	struct spufs_attr *attr;
     74	size_t size;
     75	ssize_t ret;
     76
     77	attr = file->private_data;
     78	if (!attr->get)
     79		return -EACCES;
     80
     81	ret = mutex_lock_interruptible(&attr->mutex);
     82	if (ret)
     83		return ret;
     84
     85	if (*ppos) {		/* continued read */
     86		size = strlen(attr->get_buf);
     87	} else {		/* first read */
     88		u64 val;
     89		ret = attr->get(attr->data, &val);
     90		if (ret)
     91			goto out;
     92
     93		size = scnprintf(attr->get_buf, sizeof(attr->get_buf),
     94				 attr->fmt, (unsigned long long)val);
     95	}
     96
     97	ret = simple_read_from_buffer(buf, len, ppos, attr->get_buf, size);
     98out:
     99	mutex_unlock(&attr->mutex);
    100	return ret;
    101}
    102
    103static ssize_t spufs_attr_write(struct file *file, const char __user *buf,
    104		size_t len, loff_t *ppos)
    105{
    106	struct spufs_attr *attr;
    107	u64 val;
    108	size_t size;
    109	ssize_t ret;
    110
    111	attr = file->private_data;
    112	if (!attr->set)
    113		return -EACCES;
    114
    115	ret = mutex_lock_interruptible(&attr->mutex);
    116	if (ret)
    117		return ret;
    118
    119	ret = -EFAULT;
    120	size = min(sizeof(attr->set_buf) - 1, len);
    121	if (copy_from_user(attr->set_buf, buf, size))
    122		goto out;
    123
    124	ret = len; /* claim we got the whole input */
    125	attr->set_buf[size] = '\0';
    126	val = simple_strtol(attr->set_buf, NULL, 0);
    127	attr->set(attr->data, val);
    128out:
    129	mutex_unlock(&attr->mutex);
    130	return ret;
    131}
    132
    133static ssize_t spufs_dump_emit(struct coredump_params *cprm, void *buf,
    134		size_t size)
    135{
    136	if (!dump_emit(cprm, buf, size))
    137		return -EIO;
    138	return size;
    139}
    140
    141#define DEFINE_SPUFS_SIMPLE_ATTRIBUTE(__fops, __get, __set, __fmt)	\
    142static int __fops ## _open(struct inode *inode, struct file *file)	\
    143{									\
    144	__simple_attr_check_format(__fmt, 0ull);			\
    145	return spufs_attr_open(inode, file, __get, __set, __fmt);	\
    146}									\
    147static const struct file_operations __fops = {				\
    148	.open	 = __fops ## _open,					\
    149	.release = spufs_attr_release,					\
    150	.read	 = spufs_attr_read,					\
    151	.write	 = spufs_attr_write,					\
    152	.llseek  = generic_file_llseek,					\
    153};
    154
    155
    156static int
    157spufs_mem_open(struct inode *inode, struct file *file)
    158{
    159	struct spufs_inode_info *i = SPUFS_I(inode);
    160	struct spu_context *ctx = i->i_ctx;
    161
    162	mutex_lock(&ctx->mapping_lock);
    163	file->private_data = ctx;
    164	if (!i->i_openers++)
    165		ctx->local_store = inode->i_mapping;
    166	mutex_unlock(&ctx->mapping_lock);
    167	return 0;
    168}
    169
    170static int
    171spufs_mem_release(struct inode *inode, struct file *file)
    172{
    173	struct spufs_inode_info *i = SPUFS_I(inode);
    174	struct spu_context *ctx = i->i_ctx;
    175
    176	mutex_lock(&ctx->mapping_lock);
    177	if (!--i->i_openers)
    178		ctx->local_store = NULL;
    179	mutex_unlock(&ctx->mapping_lock);
    180	return 0;
    181}
    182
    183static ssize_t
    184spufs_mem_dump(struct spu_context *ctx, struct coredump_params *cprm)
    185{
    186	return spufs_dump_emit(cprm, ctx->ops->get_ls(ctx), LS_SIZE);
    187}
    188
    189static ssize_t
    190spufs_mem_read(struct file *file, char __user *buffer,
    191				size_t size, loff_t *pos)
    192{
    193	struct spu_context *ctx = file->private_data;
    194	ssize_t ret;
    195
    196	ret = spu_acquire(ctx);
    197	if (ret)
    198		return ret;
    199	ret = simple_read_from_buffer(buffer, size, pos, ctx->ops->get_ls(ctx),
    200				      LS_SIZE);
    201	spu_release(ctx);
    202
    203	return ret;
    204}
    205
    206static ssize_t
    207spufs_mem_write(struct file *file, const char __user *buffer,
    208					size_t size, loff_t *ppos)
    209{
    210	struct spu_context *ctx = file->private_data;
    211	char *local_store;
    212	loff_t pos = *ppos;
    213	int ret;
    214
    215	if (pos > LS_SIZE)
    216		return -EFBIG;
    217
    218	ret = spu_acquire(ctx);
    219	if (ret)
    220		return ret;
    221
    222	local_store = ctx->ops->get_ls(ctx);
    223	size = simple_write_to_buffer(local_store, LS_SIZE, ppos, buffer, size);
    224	spu_release(ctx);
    225
    226	return size;
    227}
    228
    229static vm_fault_t
    230spufs_mem_mmap_fault(struct vm_fault *vmf)
    231{
    232	struct vm_area_struct *vma = vmf->vma;
    233	struct spu_context *ctx	= vma->vm_file->private_data;
    234	unsigned long pfn, offset;
    235	vm_fault_t ret;
    236
    237	offset = vmf->pgoff << PAGE_SHIFT;
    238	if (offset >= LS_SIZE)
    239		return VM_FAULT_SIGBUS;
    240
    241	pr_debug("spufs_mem_mmap_fault address=0x%lx, offset=0x%lx\n",
    242			vmf->address, offset);
    243
    244	if (spu_acquire(ctx))
    245		return VM_FAULT_NOPAGE;
    246
    247	if (ctx->state == SPU_STATE_SAVED) {
    248		vma->vm_page_prot = pgprot_cached(vma->vm_page_prot);
    249		pfn = vmalloc_to_pfn(ctx->csa.lscsa->ls + offset);
    250	} else {
    251		vma->vm_page_prot = pgprot_noncached_wc(vma->vm_page_prot);
    252		pfn = (ctx->spu->local_store_phys + offset) >> PAGE_SHIFT;
    253	}
    254	ret = vmf_insert_pfn(vma, vmf->address, pfn);
    255
    256	spu_release(ctx);
    257
    258	return ret;
    259}
    260
    261static int spufs_mem_mmap_access(struct vm_area_struct *vma,
    262				unsigned long address,
    263				void *buf, int len, int write)
    264{
    265	struct spu_context *ctx = vma->vm_file->private_data;
    266	unsigned long offset = address - vma->vm_start;
    267	char *local_store;
    268
    269	if (write && !(vma->vm_flags & VM_WRITE))
    270		return -EACCES;
    271	if (spu_acquire(ctx))
    272		return -EINTR;
    273	if ((offset + len) > vma->vm_end)
    274		len = vma->vm_end - offset;
    275	local_store = ctx->ops->get_ls(ctx);
    276	if (write)
    277		memcpy_toio(local_store + offset, buf, len);
    278	else
    279		memcpy_fromio(buf, local_store + offset, len);
    280	spu_release(ctx);
    281	return len;
    282}
    283
    284static const struct vm_operations_struct spufs_mem_mmap_vmops = {
    285	.fault = spufs_mem_mmap_fault,
    286	.access = spufs_mem_mmap_access,
    287};
    288
    289static int spufs_mem_mmap(struct file *file, struct vm_area_struct *vma)
    290{
    291	if (!(vma->vm_flags & VM_SHARED))
    292		return -EINVAL;
    293
    294	vma->vm_flags |= VM_IO | VM_PFNMAP;
    295	vma->vm_page_prot = pgprot_noncached_wc(vma->vm_page_prot);
    296
    297	vma->vm_ops = &spufs_mem_mmap_vmops;
    298	return 0;
    299}
    300
    301static const struct file_operations spufs_mem_fops = {
    302	.open			= spufs_mem_open,
    303	.release		= spufs_mem_release,
    304	.read			= spufs_mem_read,
    305	.write			= spufs_mem_write,
    306	.llseek			= generic_file_llseek,
    307	.mmap			= spufs_mem_mmap,
    308};
    309
    310static vm_fault_t spufs_ps_fault(struct vm_fault *vmf,
    311				    unsigned long ps_offs,
    312				    unsigned long ps_size)
    313{
    314	struct spu_context *ctx = vmf->vma->vm_file->private_data;
    315	unsigned long area, offset = vmf->pgoff << PAGE_SHIFT;
    316	int err = 0;
    317	vm_fault_t ret = VM_FAULT_NOPAGE;
    318
    319	spu_context_nospu_trace(spufs_ps_fault__enter, ctx);
    320
    321	if (offset >= ps_size)
    322		return VM_FAULT_SIGBUS;
    323
    324	if (fatal_signal_pending(current))
    325		return VM_FAULT_SIGBUS;
    326
    327	/*
    328	 * Because we release the mmap_lock, the context may be destroyed while
    329	 * we're in spu_wait. Grab an extra reference so it isn't destroyed
    330	 * in the meantime.
    331	 */
    332	get_spu_context(ctx);
    333
    334	/*
    335	 * We have to wait for context to be loaded before we have
    336	 * pages to hand out to the user, but we don't want to wait
    337	 * with the mmap_lock held.
    338	 * It is possible to drop the mmap_lock here, but then we need
    339	 * to return VM_FAULT_NOPAGE because the mappings may have
    340	 * hanged.
    341	 */
    342	if (spu_acquire(ctx))
    343		goto refault;
    344
    345	if (ctx->state == SPU_STATE_SAVED) {
    346		mmap_read_unlock(current->mm);
    347		spu_context_nospu_trace(spufs_ps_fault__sleep, ctx);
    348		err = spufs_wait(ctx->run_wq, ctx->state == SPU_STATE_RUNNABLE);
    349		spu_context_trace(spufs_ps_fault__wake, ctx, ctx->spu);
    350		mmap_read_lock(current->mm);
    351	} else {
    352		area = ctx->spu->problem_phys + ps_offs;
    353		ret = vmf_insert_pfn(vmf->vma, vmf->address,
    354				(area + offset) >> PAGE_SHIFT);
    355		spu_context_trace(spufs_ps_fault__insert, ctx, ctx->spu);
    356	}
    357
    358	if (!err)
    359		spu_release(ctx);
    360
    361refault:
    362	put_spu_context(ctx);
    363	return ret;
    364}
    365
    366#if SPUFS_MMAP_4K
    367static vm_fault_t spufs_cntl_mmap_fault(struct vm_fault *vmf)
    368{
    369	return spufs_ps_fault(vmf, 0x4000, SPUFS_CNTL_MAP_SIZE);
    370}
    371
    372static const struct vm_operations_struct spufs_cntl_mmap_vmops = {
    373	.fault = spufs_cntl_mmap_fault,
    374};
    375
    376/*
    377 * mmap support for problem state control area [0x4000 - 0x4fff].
    378 */
    379static int spufs_cntl_mmap(struct file *file, struct vm_area_struct *vma)
    380{
    381	if (!(vma->vm_flags & VM_SHARED))
    382		return -EINVAL;
    383
    384	vma->vm_flags |= VM_IO | VM_PFNMAP;
    385	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
    386
    387	vma->vm_ops = &spufs_cntl_mmap_vmops;
    388	return 0;
    389}
    390#else /* SPUFS_MMAP_4K */
    391#define spufs_cntl_mmap NULL
    392#endif /* !SPUFS_MMAP_4K */
    393
    394static int spufs_cntl_get(void *data, u64 *val)
    395{
    396	struct spu_context *ctx = data;
    397	int ret;
    398
    399	ret = spu_acquire(ctx);
    400	if (ret)
    401		return ret;
    402	*val = ctx->ops->status_read(ctx);
    403	spu_release(ctx);
    404
    405	return 0;
    406}
    407
    408static int spufs_cntl_set(void *data, u64 val)
    409{
    410	struct spu_context *ctx = data;
    411	int ret;
    412
    413	ret = spu_acquire(ctx);
    414	if (ret)
    415		return ret;
    416	ctx->ops->runcntl_write(ctx, val);
    417	spu_release(ctx);
    418
    419	return 0;
    420}
    421
    422static int spufs_cntl_open(struct inode *inode, struct file *file)
    423{
    424	struct spufs_inode_info *i = SPUFS_I(inode);
    425	struct spu_context *ctx = i->i_ctx;
    426
    427	mutex_lock(&ctx->mapping_lock);
    428	file->private_data = ctx;
    429	if (!i->i_openers++)
    430		ctx->cntl = inode->i_mapping;
    431	mutex_unlock(&ctx->mapping_lock);
    432	return simple_attr_open(inode, file, spufs_cntl_get,
    433					spufs_cntl_set, "0x%08lx");
    434}
    435
    436static int
    437spufs_cntl_release(struct inode *inode, struct file *file)
    438{
    439	struct spufs_inode_info *i = SPUFS_I(inode);
    440	struct spu_context *ctx = i->i_ctx;
    441
    442	simple_attr_release(inode, file);
    443
    444	mutex_lock(&ctx->mapping_lock);
    445	if (!--i->i_openers)
    446		ctx->cntl = NULL;
    447	mutex_unlock(&ctx->mapping_lock);
    448	return 0;
    449}
    450
    451static const struct file_operations spufs_cntl_fops = {
    452	.open = spufs_cntl_open,
    453	.release = spufs_cntl_release,
    454	.read = simple_attr_read,
    455	.write = simple_attr_write,
    456	.llseek	= no_llseek,
    457	.mmap = spufs_cntl_mmap,
    458};
    459
    460static int
    461spufs_regs_open(struct inode *inode, struct file *file)
    462{
    463	struct spufs_inode_info *i = SPUFS_I(inode);
    464	file->private_data = i->i_ctx;
    465	return 0;
    466}
    467
    468static ssize_t
    469spufs_regs_dump(struct spu_context *ctx, struct coredump_params *cprm)
    470{
    471	return spufs_dump_emit(cprm, ctx->csa.lscsa->gprs,
    472			       sizeof(ctx->csa.lscsa->gprs));
    473}
    474
    475static ssize_t
    476spufs_regs_read(struct file *file, char __user *buffer,
    477		size_t size, loff_t *pos)
    478{
    479	int ret;
    480	struct spu_context *ctx = file->private_data;
    481
    482	/* pre-check for file position: if we'd return EOF, there's no point
    483	 * causing a deschedule */
    484	if (*pos >= sizeof(ctx->csa.lscsa->gprs))
    485		return 0;
    486
    487	ret = spu_acquire_saved(ctx);
    488	if (ret)
    489		return ret;
    490	ret = simple_read_from_buffer(buffer, size, pos, ctx->csa.lscsa->gprs,
    491				      sizeof(ctx->csa.lscsa->gprs));
    492	spu_release_saved(ctx);
    493	return ret;
    494}
    495
    496static ssize_t
    497spufs_regs_write(struct file *file, const char __user *buffer,
    498		 size_t size, loff_t *pos)
    499{
    500	struct spu_context *ctx = file->private_data;
    501	struct spu_lscsa *lscsa = ctx->csa.lscsa;
    502	int ret;
    503
    504	if (*pos >= sizeof(lscsa->gprs))
    505		return -EFBIG;
    506
    507	ret = spu_acquire_saved(ctx);
    508	if (ret)
    509		return ret;
    510
    511	size = simple_write_to_buffer(lscsa->gprs, sizeof(lscsa->gprs), pos,
    512					buffer, size);
    513
    514	spu_release_saved(ctx);
    515	return size;
    516}
    517
    518static const struct file_operations spufs_regs_fops = {
    519	.open	 = spufs_regs_open,
    520	.read    = spufs_regs_read,
    521	.write   = spufs_regs_write,
    522	.llseek  = generic_file_llseek,
    523};
    524
    525static ssize_t
    526spufs_fpcr_dump(struct spu_context *ctx, struct coredump_params *cprm)
    527{
    528	return spufs_dump_emit(cprm, &ctx->csa.lscsa->fpcr,
    529			       sizeof(ctx->csa.lscsa->fpcr));
    530}
    531
    532static ssize_t
    533spufs_fpcr_read(struct file *file, char __user * buffer,
    534		size_t size, loff_t * pos)
    535{
    536	int ret;
    537	struct spu_context *ctx = file->private_data;
    538
    539	ret = spu_acquire_saved(ctx);
    540	if (ret)
    541		return ret;
    542	ret = simple_read_from_buffer(buffer, size, pos, &ctx->csa.lscsa->fpcr,
    543				      sizeof(ctx->csa.lscsa->fpcr));
    544	spu_release_saved(ctx);
    545	return ret;
    546}
    547
    548static ssize_t
    549spufs_fpcr_write(struct file *file, const char __user * buffer,
    550		 size_t size, loff_t * pos)
    551{
    552	struct spu_context *ctx = file->private_data;
    553	struct spu_lscsa *lscsa = ctx->csa.lscsa;
    554	int ret;
    555
    556	if (*pos >= sizeof(lscsa->fpcr))
    557		return -EFBIG;
    558
    559	ret = spu_acquire_saved(ctx);
    560	if (ret)
    561		return ret;
    562
    563	size = simple_write_to_buffer(&lscsa->fpcr, sizeof(lscsa->fpcr), pos,
    564					buffer, size);
    565
    566	spu_release_saved(ctx);
    567	return size;
    568}
    569
    570static const struct file_operations spufs_fpcr_fops = {
    571	.open = spufs_regs_open,
    572	.read = spufs_fpcr_read,
    573	.write = spufs_fpcr_write,
    574	.llseek = generic_file_llseek,
    575};
    576
    577/* generic open function for all pipe-like files */
    578static int spufs_pipe_open(struct inode *inode, struct file *file)
    579{
    580	struct spufs_inode_info *i = SPUFS_I(inode);
    581	file->private_data = i->i_ctx;
    582
    583	return stream_open(inode, file);
    584}
    585
    586/*
    587 * Read as many bytes from the mailbox as possible, until
    588 * one of the conditions becomes true:
    589 *
    590 * - no more data available in the mailbox
    591 * - end of the user provided buffer
    592 * - end of the mapped area
    593 */
    594static ssize_t spufs_mbox_read(struct file *file, char __user *buf,
    595			size_t len, loff_t *pos)
    596{
    597	struct spu_context *ctx = file->private_data;
    598	u32 mbox_data, __user *udata = (void __user *)buf;
    599	ssize_t count;
    600
    601	if (len < 4)
    602		return -EINVAL;
    603
    604	count = spu_acquire(ctx);
    605	if (count)
    606		return count;
    607
    608	for (count = 0; (count + 4) <= len; count += 4, udata++) {
    609		int ret;
    610		ret = ctx->ops->mbox_read(ctx, &mbox_data);
    611		if (ret == 0)
    612			break;
    613
    614		/*
    615		 * at the end of the mapped area, we can fault
    616		 * but still need to return the data we have
    617		 * read successfully so far.
    618		 */
    619		ret = put_user(mbox_data, udata);
    620		if (ret) {
    621			if (!count)
    622				count = -EFAULT;
    623			break;
    624		}
    625	}
    626	spu_release(ctx);
    627
    628	if (!count)
    629		count = -EAGAIN;
    630
    631	return count;
    632}
    633
    634static const struct file_operations spufs_mbox_fops = {
    635	.open	= spufs_pipe_open,
    636	.read	= spufs_mbox_read,
    637	.llseek	= no_llseek,
    638};
    639
    640static ssize_t spufs_mbox_stat_read(struct file *file, char __user *buf,
    641			size_t len, loff_t *pos)
    642{
    643	struct spu_context *ctx = file->private_data;
    644	ssize_t ret;
    645	u32 mbox_stat;
    646
    647	if (len < 4)
    648		return -EINVAL;
    649
    650	ret = spu_acquire(ctx);
    651	if (ret)
    652		return ret;
    653
    654	mbox_stat = ctx->ops->mbox_stat_read(ctx) & 0xff;
    655
    656	spu_release(ctx);
    657
    658	if (copy_to_user(buf, &mbox_stat, sizeof mbox_stat))
    659		return -EFAULT;
    660
    661	return 4;
    662}
    663
    664static const struct file_operations spufs_mbox_stat_fops = {
    665	.open	= spufs_pipe_open,
    666	.read	= spufs_mbox_stat_read,
    667	.llseek = no_llseek,
    668};
    669
    670/* low-level ibox access function */
    671size_t spu_ibox_read(struct spu_context *ctx, u32 *data)
    672{
    673	return ctx->ops->ibox_read(ctx, data);
    674}
    675
    676/* interrupt-level ibox callback function. */
    677void spufs_ibox_callback(struct spu *spu)
    678{
    679	struct spu_context *ctx = spu->ctx;
    680
    681	if (ctx)
    682		wake_up_all(&ctx->ibox_wq);
    683}
    684
    685/*
    686 * Read as many bytes from the interrupt mailbox as possible, until
    687 * one of the conditions becomes true:
    688 *
    689 * - no more data available in the mailbox
    690 * - end of the user provided buffer
    691 * - end of the mapped area
    692 *
    693 * If the file is opened without O_NONBLOCK, we wait here until
    694 * any data is available, but return when we have been able to
    695 * read something.
    696 */
    697static ssize_t spufs_ibox_read(struct file *file, char __user *buf,
    698			size_t len, loff_t *pos)
    699{
    700	struct spu_context *ctx = file->private_data;
    701	u32 ibox_data, __user *udata = (void __user *)buf;
    702	ssize_t count;
    703
    704	if (len < 4)
    705		return -EINVAL;
    706
    707	count = spu_acquire(ctx);
    708	if (count)
    709		goto out;
    710
    711	/* wait only for the first element */
    712	count = 0;
    713	if (file->f_flags & O_NONBLOCK) {
    714		if (!spu_ibox_read(ctx, &ibox_data)) {
    715			count = -EAGAIN;
    716			goto out_unlock;
    717		}
    718	} else {
    719		count = spufs_wait(ctx->ibox_wq, spu_ibox_read(ctx, &ibox_data));
    720		if (count)
    721			goto out;
    722	}
    723
    724	/* if we can't write at all, return -EFAULT */
    725	count = put_user(ibox_data, udata);
    726	if (count)
    727		goto out_unlock;
    728
    729	for (count = 4, udata++; (count + 4) <= len; count += 4, udata++) {
    730		int ret;
    731		ret = ctx->ops->ibox_read(ctx, &ibox_data);
    732		if (ret == 0)
    733			break;
    734		/*
    735		 * at the end of the mapped area, we can fault
    736		 * but still need to return the data we have
    737		 * read successfully so far.
    738		 */
    739		ret = put_user(ibox_data, udata);
    740		if (ret)
    741			break;
    742	}
    743
    744out_unlock:
    745	spu_release(ctx);
    746out:
    747	return count;
    748}
    749
    750static __poll_t spufs_ibox_poll(struct file *file, poll_table *wait)
    751{
    752	struct spu_context *ctx = file->private_data;
    753	__poll_t mask;
    754
    755	poll_wait(file, &ctx->ibox_wq, wait);
    756
    757	/*
    758	 * For now keep this uninterruptible and also ignore the rule
    759	 * that poll should not sleep.  Will be fixed later.
    760	 */
    761	mutex_lock(&ctx->state_mutex);
    762	mask = ctx->ops->mbox_stat_poll(ctx, EPOLLIN | EPOLLRDNORM);
    763	spu_release(ctx);
    764
    765	return mask;
    766}
    767
    768static const struct file_operations spufs_ibox_fops = {
    769	.open	= spufs_pipe_open,
    770	.read	= spufs_ibox_read,
    771	.poll	= spufs_ibox_poll,
    772	.llseek = no_llseek,
    773};
    774
    775static ssize_t spufs_ibox_stat_read(struct file *file, char __user *buf,
    776			size_t len, loff_t *pos)
    777{
    778	struct spu_context *ctx = file->private_data;
    779	ssize_t ret;
    780	u32 ibox_stat;
    781
    782	if (len < 4)
    783		return -EINVAL;
    784
    785	ret = spu_acquire(ctx);
    786	if (ret)
    787		return ret;
    788	ibox_stat = (ctx->ops->mbox_stat_read(ctx) >> 16) & 0xff;
    789	spu_release(ctx);
    790
    791	if (copy_to_user(buf, &ibox_stat, sizeof ibox_stat))
    792		return -EFAULT;
    793
    794	return 4;
    795}
    796
    797static const struct file_operations spufs_ibox_stat_fops = {
    798	.open	= spufs_pipe_open,
    799	.read	= spufs_ibox_stat_read,
    800	.llseek = no_llseek,
    801};
    802
    803/* low-level mailbox write */
    804size_t spu_wbox_write(struct spu_context *ctx, u32 data)
    805{
    806	return ctx->ops->wbox_write(ctx, data);
    807}
    808
    809/* interrupt-level wbox callback function. */
    810void spufs_wbox_callback(struct spu *spu)
    811{
    812	struct spu_context *ctx = spu->ctx;
    813
    814	if (ctx)
    815		wake_up_all(&ctx->wbox_wq);
    816}
    817
    818/*
    819 * Write as many bytes to the interrupt mailbox as possible, until
    820 * one of the conditions becomes true:
    821 *
    822 * - the mailbox is full
    823 * - end of the user provided buffer
    824 * - end of the mapped area
    825 *
    826 * If the file is opened without O_NONBLOCK, we wait here until
    827 * space is available, but return when we have been able to
    828 * write something.
    829 */
    830static ssize_t spufs_wbox_write(struct file *file, const char __user *buf,
    831			size_t len, loff_t *pos)
    832{
    833	struct spu_context *ctx = file->private_data;
    834	u32 wbox_data, __user *udata = (void __user *)buf;
    835	ssize_t count;
    836
    837	if (len < 4)
    838		return -EINVAL;
    839
    840	if (get_user(wbox_data, udata))
    841		return -EFAULT;
    842
    843	count = spu_acquire(ctx);
    844	if (count)
    845		goto out;
    846
    847	/*
    848	 * make sure we can at least write one element, by waiting
    849	 * in case of !O_NONBLOCK
    850	 */
    851	count = 0;
    852	if (file->f_flags & O_NONBLOCK) {
    853		if (!spu_wbox_write(ctx, wbox_data)) {
    854			count = -EAGAIN;
    855			goto out_unlock;
    856		}
    857	} else {
    858		count = spufs_wait(ctx->wbox_wq, spu_wbox_write(ctx, wbox_data));
    859		if (count)
    860			goto out;
    861	}
    862
    863
    864	/* write as much as possible */
    865	for (count = 4, udata++; (count + 4) <= len; count += 4, udata++) {
    866		int ret;
    867		ret = get_user(wbox_data, udata);
    868		if (ret)
    869			break;
    870
    871		ret = spu_wbox_write(ctx, wbox_data);
    872		if (ret == 0)
    873			break;
    874	}
    875
    876out_unlock:
    877	spu_release(ctx);
    878out:
    879	return count;
    880}
    881
    882static __poll_t spufs_wbox_poll(struct file *file, poll_table *wait)
    883{
    884	struct spu_context *ctx = file->private_data;
    885	__poll_t mask;
    886
    887	poll_wait(file, &ctx->wbox_wq, wait);
    888
    889	/*
    890	 * For now keep this uninterruptible and also ignore the rule
    891	 * that poll should not sleep.  Will be fixed later.
    892	 */
    893	mutex_lock(&ctx->state_mutex);
    894	mask = ctx->ops->mbox_stat_poll(ctx, EPOLLOUT | EPOLLWRNORM);
    895	spu_release(ctx);
    896
    897	return mask;
    898}
    899
    900static const struct file_operations spufs_wbox_fops = {
    901	.open	= spufs_pipe_open,
    902	.write	= spufs_wbox_write,
    903	.poll	= spufs_wbox_poll,
    904	.llseek = no_llseek,
    905};
    906
    907static ssize_t spufs_wbox_stat_read(struct file *file, char __user *buf,
    908			size_t len, loff_t *pos)
    909{
    910	struct spu_context *ctx = file->private_data;
    911	ssize_t ret;
    912	u32 wbox_stat;
    913
    914	if (len < 4)
    915		return -EINVAL;
    916
    917	ret = spu_acquire(ctx);
    918	if (ret)
    919		return ret;
    920	wbox_stat = (ctx->ops->mbox_stat_read(ctx) >> 8) & 0xff;
    921	spu_release(ctx);
    922
    923	if (copy_to_user(buf, &wbox_stat, sizeof wbox_stat))
    924		return -EFAULT;
    925
    926	return 4;
    927}
    928
    929static const struct file_operations spufs_wbox_stat_fops = {
    930	.open	= spufs_pipe_open,
    931	.read	= spufs_wbox_stat_read,
    932	.llseek = no_llseek,
    933};
    934
    935static int spufs_signal1_open(struct inode *inode, struct file *file)
    936{
    937	struct spufs_inode_info *i = SPUFS_I(inode);
    938	struct spu_context *ctx = i->i_ctx;
    939
    940	mutex_lock(&ctx->mapping_lock);
    941	file->private_data = ctx;
    942	if (!i->i_openers++)
    943		ctx->signal1 = inode->i_mapping;
    944	mutex_unlock(&ctx->mapping_lock);
    945	return nonseekable_open(inode, file);
    946}
    947
    948static int
    949spufs_signal1_release(struct inode *inode, struct file *file)
    950{
    951	struct spufs_inode_info *i = SPUFS_I(inode);
    952	struct spu_context *ctx = i->i_ctx;
    953
    954	mutex_lock(&ctx->mapping_lock);
    955	if (!--i->i_openers)
    956		ctx->signal1 = NULL;
    957	mutex_unlock(&ctx->mapping_lock);
    958	return 0;
    959}
    960
    961static ssize_t spufs_signal1_dump(struct spu_context *ctx,
    962		struct coredump_params *cprm)
    963{
    964	if (!ctx->csa.spu_chnlcnt_RW[3])
    965		return 0;
    966	return spufs_dump_emit(cprm, &ctx->csa.spu_chnldata_RW[3],
    967			       sizeof(ctx->csa.spu_chnldata_RW[3]));
    968}
    969
    970static ssize_t __spufs_signal1_read(struct spu_context *ctx, char __user *buf,
    971			size_t len)
    972{
    973	if (len < sizeof(ctx->csa.spu_chnldata_RW[3]))
    974		return -EINVAL;
    975	if (!ctx->csa.spu_chnlcnt_RW[3])
    976		return 0;
    977	if (copy_to_user(buf, &ctx->csa.spu_chnldata_RW[3],
    978			 sizeof(ctx->csa.spu_chnldata_RW[3])))
    979		return -EFAULT;
    980	return sizeof(ctx->csa.spu_chnldata_RW[3]);
    981}
    982
    983static ssize_t spufs_signal1_read(struct file *file, char __user *buf,
    984			size_t len, loff_t *pos)
    985{
    986	int ret;
    987	struct spu_context *ctx = file->private_data;
    988
    989	ret = spu_acquire_saved(ctx);
    990	if (ret)
    991		return ret;
    992	ret = __spufs_signal1_read(ctx, buf, len);
    993	spu_release_saved(ctx);
    994
    995	return ret;
    996}
    997
    998static ssize_t spufs_signal1_write(struct file *file, const char __user *buf,
    999			size_t len, loff_t *pos)
   1000{
   1001	struct spu_context *ctx;
   1002	ssize_t ret;
   1003	u32 data;
   1004
   1005	ctx = file->private_data;
   1006
   1007	if (len < 4)
   1008		return -EINVAL;
   1009
   1010	if (copy_from_user(&data, buf, 4))
   1011		return -EFAULT;
   1012
   1013	ret = spu_acquire(ctx);
   1014	if (ret)
   1015		return ret;
   1016	ctx->ops->signal1_write(ctx, data);
   1017	spu_release(ctx);
   1018
   1019	return 4;
   1020}
   1021
   1022static vm_fault_t
   1023spufs_signal1_mmap_fault(struct vm_fault *vmf)
   1024{
   1025#if SPUFS_SIGNAL_MAP_SIZE == 0x1000
   1026	return spufs_ps_fault(vmf, 0x14000, SPUFS_SIGNAL_MAP_SIZE);
   1027#elif SPUFS_SIGNAL_MAP_SIZE == 0x10000
   1028	/* For 64k pages, both signal1 and signal2 can be used to mmap the whole
   1029	 * signal 1 and 2 area
   1030	 */
   1031	return spufs_ps_fault(vmf, 0x10000, SPUFS_SIGNAL_MAP_SIZE);
   1032#else
   1033#error unsupported page size
   1034#endif
   1035}
   1036
   1037static const struct vm_operations_struct spufs_signal1_mmap_vmops = {
   1038	.fault = spufs_signal1_mmap_fault,
   1039};
   1040
   1041static int spufs_signal1_mmap(struct file *file, struct vm_area_struct *vma)
   1042{
   1043	if (!(vma->vm_flags & VM_SHARED))
   1044		return -EINVAL;
   1045
   1046	vma->vm_flags |= VM_IO | VM_PFNMAP;
   1047	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
   1048
   1049	vma->vm_ops = &spufs_signal1_mmap_vmops;
   1050	return 0;
   1051}
   1052
   1053static const struct file_operations spufs_signal1_fops = {
   1054	.open = spufs_signal1_open,
   1055	.release = spufs_signal1_release,
   1056	.read = spufs_signal1_read,
   1057	.write = spufs_signal1_write,
   1058	.mmap = spufs_signal1_mmap,
   1059	.llseek = no_llseek,
   1060};
   1061
   1062static const struct file_operations spufs_signal1_nosched_fops = {
   1063	.open = spufs_signal1_open,
   1064	.release = spufs_signal1_release,
   1065	.write = spufs_signal1_write,
   1066	.mmap = spufs_signal1_mmap,
   1067	.llseek = no_llseek,
   1068};
   1069
   1070static int spufs_signal2_open(struct inode *inode, struct file *file)
   1071{
   1072	struct spufs_inode_info *i = SPUFS_I(inode);
   1073	struct spu_context *ctx = i->i_ctx;
   1074
   1075	mutex_lock(&ctx->mapping_lock);
   1076	file->private_data = ctx;
   1077	if (!i->i_openers++)
   1078		ctx->signal2 = inode->i_mapping;
   1079	mutex_unlock(&ctx->mapping_lock);
   1080	return nonseekable_open(inode, file);
   1081}
   1082
   1083static int
   1084spufs_signal2_release(struct inode *inode, struct file *file)
   1085{
   1086	struct spufs_inode_info *i = SPUFS_I(inode);
   1087	struct spu_context *ctx = i->i_ctx;
   1088
   1089	mutex_lock(&ctx->mapping_lock);
   1090	if (!--i->i_openers)
   1091		ctx->signal2 = NULL;
   1092	mutex_unlock(&ctx->mapping_lock);
   1093	return 0;
   1094}
   1095
   1096static ssize_t spufs_signal2_dump(struct spu_context *ctx,
   1097		struct coredump_params *cprm)
   1098{
   1099	if (!ctx->csa.spu_chnlcnt_RW[4])
   1100		return 0;
   1101	return spufs_dump_emit(cprm, &ctx->csa.spu_chnldata_RW[4],
   1102			       sizeof(ctx->csa.spu_chnldata_RW[4]));
   1103}
   1104
   1105static ssize_t __spufs_signal2_read(struct spu_context *ctx, char __user *buf,
   1106			size_t len)
   1107{
   1108	if (len < sizeof(ctx->csa.spu_chnldata_RW[4]))
   1109		return -EINVAL;
   1110	if (!ctx->csa.spu_chnlcnt_RW[4])
   1111		return 0;
   1112	if (copy_to_user(buf, &ctx->csa.spu_chnldata_RW[4],
   1113			 sizeof(ctx->csa.spu_chnldata_RW[4])))
   1114		return -EFAULT;
   1115	return sizeof(ctx->csa.spu_chnldata_RW[4]);
   1116}
   1117
   1118static ssize_t spufs_signal2_read(struct file *file, char __user *buf,
   1119			size_t len, loff_t *pos)
   1120{
   1121	struct spu_context *ctx = file->private_data;
   1122	int ret;
   1123
   1124	ret = spu_acquire_saved(ctx);
   1125	if (ret)
   1126		return ret;
   1127	ret = __spufs_signal2_read(ctx, buf, len);
   1128	spu_release_saved(ctx);
   1129
   1130	return ret;
   1131}
   1132
   1133static ssize_t spufs_signal2_write(struct file *file, const char __user *buf,
   1134			size_t len, loff_t *pos)
   1135{
   1136	struct spu_context *ctx;
   1137	ssize_t ret;
   1138	u32 data;
   1139
   1140	ctx = file->private_data;
   1141
   1142	if (len < 4)
   1143		return -EINVAL;
   1144
   1145	if (copy_from_user(&data, buf, 4))
   1146		return -EFAULT;
   1147
   1148	ret = spu_acquire(ctx);
   1149	if (ret)
   1150		return ret;
   1151	ctx->ops->signal2_write(ctx, data);
   1152	spu_release(ctx);
   1153
   1154	return 4;
   1155}
   1156
   1157#if SPUFS_MMAP_4K
   1158static vm_fault_t
   1159spufs_signal2_mmap_fault(struct vm_fault *vmf)
   1160{
   1161#if SPUFS_SIGNAL_MAP_SIZE == 0x1000
   1162	return spufs_ps_fault(vmf, 0x1c000, SPUFS_SIGNAL_MAP_SIZE);
   1163#elif SPUFS_SIGNAL_MAP_SIZE == 0x10000
   1164	/* For 64k pages, both signal1 and signal2 can be used to mmap the whole
   1165	 * signal 1 and 2 area
   1166	 */
   1167	return spufs_ps_fault(vmf, 0x10000, SPUFS_SIGNAL_MAP_SIZE);
   1168#else
   1169#error unsupported page size
   1170#endif
   1171}
   1172
   1173static const struct vm_operations_struct spufs_signal2_mmap_vmops = {
   1174	.fault = spufs_signal2_mmap_fault,
   1175};
   1176
   1177static int spufs_signal2_mmap(struct file *file, struct vm_area_struct *vma)
   1178{
   1179	if (!(vma->vm_flags & VM_SHARED))
   1180		return -EINVAL;
   1181
   1182	vma->vm_flags |= VM_IO | VM_PFNMAP;
   1183	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
   1184
   1185	vma->vm_ops = &spufs_signal2_mmap_vmops;
   1186	return 0;
   1187}
   1188#else /* SPUFS_MMAP_4K */
   1189#define spufs_signal2_mmap NULL
   1190#endif /* !SPUFS_MMAP_4K */
   1191
   1192static const struct file_operations spufs_signal2_fops = {
   1193	.open = spufs_signal2_open,
   1194	.release = spufs_signal2_release,
   1195	.read = spufs_signal2_read,
   1196	.write = spufs_signal2_write,
   1197	.mmap = spufs_signal2_mmap,
   1198	.llseek = no_llseek,
   1199};
   1200
   1201static const struct file_operations spufs_signal2_nosched_fops = {
   1202	.open = spufs_signal2_open,
   1203	.release = spufs_signal2_release,
   1204	.write = spufs_signal2_write,
   1205	.mmap = spufs_signal2_mmap,
   1206	.llseek = no_llseek,
   1207};
   1208
   1209/*
   1210 * This is a wrapper around DEFINE_SIMPLE_ATTRIBUTE which does the
   1211 * work of acquiring (or not) the SPU context before calling through
   1212 * to the actual get routine. The set routine is called directly.
   1213 */
   1214#define SPU_ATTR_NOACQUIRE	0
   1215#define SPU_ATTR_ACQUIRE	1
   1216#define SPU_ATTR_ACQUIRE_SAVED	2
   1217
   1218#define DEFINE_SPUFS_ATTRIBUTE(__name, __get, __set, __fmt, __acquire)	\
   1219static int __##__get(void *data, u64 *val)				\
   1220{									\
   1221	struct spu_context *ctx = data;					\
   1222	int ret = 0;							\
   1223									\
   1224	if (__acquire == SPU_ATTR_ACQUIRE) {				\
   1225		ret = spu_acquire(ctx);					\
   1226		if (ret)						\
   1227			return ret;					\
   1228		*val = __get(ctx);					\
   1229		spu_release(ctx);					\
   1230	} else if (__acquire == SPU_ATTR_ACQUIRE_SAVED)	{		\
   1231		ret = spu_acquire_saved(ctx);				\
   1232		if (ret)						\
   1233			return ret;					\
   1234		*val = __get(ctx);					\
   1235		spu_release_saved(ctx);					\
   1236	} else								\
   1237		*val = __get(ctx);					\
   1238									\
   1239	return 0;							\
   1240}									\
   1241DEFINE_SPUFS_SIMPLE_ATTRIBUTE(__name, __##__get, __set, __fmt);
   1242
   1243static int spufs_signal1_type_set(void *data, u64 val)
   1244{
   1245	struct spu_context *ctx = data;
   1246	int ret;
   1247
   1248	ret = spu_acquire(ctx);
   1249	if (ret)
   1250		return ret;
   1251	ctx->ops->signal1_type_set(ctx, val);
   1252	spu_release(ctx);
   1253
   1254	return 0;
   1255}
   1256
   1257static u64 spufs_signal1_type_get(struct spu_context *ctx)
   1258{
   1259	return ctx->ops->signal1_type_get(ctx);
   1260}
   1261DEFINE_SPUFS_ATTRIBUTE(spufs_signal1_type, spufs_signal1_type_get,
   1262		       spufs_signal1_type_set, "%llu\n", SPU_ATTR_ACQUIRE);
   1263
   1264
   1265static int spufs_signal2_type_set(void *data, u64 val)
   1266{
   1267	struct spu_context *ctx = data;
   1268	int ret;
   1269
   1270	ret = spu_acquire(ctx);
   1271	if (ret)
   1272		return ret;
   1273	ctx->ops->signal2_type_set(ctx, val);
   1274	spu_release(ctx);
   1275
   1276	return 0;
   1277}
   1278
   1279static u64 spufs_signal2_type_get(struct spu_context *ctx)
   1280{
   1281	return ctx->ops->signal2_type_get(ctx);
   1282}
   1283DEFINE_SPUFS_ATTRIBUTE(spufs_signal2_type, spufs_signal2_type_get,
   1284		       spufs_signal2_type_set, "%llu\n", SPU_ATTR_ACQUIRE);
   1285
   1286#if SPUFS_MMAP_4K
   1287static vm_fault_t
   1288spufs_mss_mmap_fault(struct vm_fault *vmf)
   1289{
   1290	return spufs_ps_fault(vmf, 0x0000, SPUFS_MSS_MAP_SIZE);
   1291}
   1292
   1293static const struct vm_operations_struct spufs_mss_mmap_vmops = {
   1294	.fault = spufs_mss_mmap_fault,
   1295};
   1296
   1297/*
   1298 * mmap support for problem state MFC DMA area [0x0000 - 0x0fff].
   1299 */
   1300static int spufs_mss_mmap(struct file *file, struct vm_area_struct *vma)
   1301{
   1302	if (!(vma->vm_flags & VM_SHARED))
   1303		return -EINVAL;
   1304
   1305	vma->vm_flags |= VM_IO | VM_PFNMAP;
   1306	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
   1307
   1308	vma->vm_ops = &spufs_mss_mmap_vmops;
   1309	return 0;
   1310}
   1311#else /* SPUFS_MMAP_4K */
   1312#define spufs_mss_mmap NULL
   1313#endif /* !SPUFS_MMAP_4K */
   1314
   1315static int spufs_mss_open(struct inode *inode, struct file *file)
   1316{
   1317	struct spufs_inode_info *i = SPUFS_I(inode);
   1318	struct spu_context *ctx = i->i_ctx;
   1319
   1320	file->private_data = i->i_ctx;
   1321
   1322	mutex_lock(&ctx->mapping_lock);
   1323	if (!i->i_openers++)
   1324		ctx->mss = inode->i_mapping;
   1325	mutex_unlock(&ctx->mapping_lock);
   1326	return nonseekable_open(inode, file);
   1327}
   1328
   1329static int
   1330spufs_mss_release(struct inode *inode, struct file *file)
   1331{
   1332	struct spufs_inode_info *i = SPUFS_I(inode);
   1333	struct spu_context *ctx = i->i_ctx;
   1334
   1335	mutex_lock(&ctx->mapping_lock);
   1336	if (!--i->i_openers)
   1337		ctx->mss = NULL;
   1338	mutex_unlock(&ctx->mapping_lock);
   1339	return 0;
   1340}
   1341
   1342static const struct file_operations spufs_mss_fops = {
   1343	.open	 = spufs_mss_open,
   1344	.release = spufs_mss_release,
   1345	.mmap	 = spufs_mss_mmap,
   1346	.llseek  = no_llseek,
   1347};
   1348
   1349static vm_fault_t
   1350spufs_psmap_mmap_fault(struct vm_fault *vmf)
   1351{
   1352	return spufs_ps_fault(vmf, 0x0000, SPUFS_PS_MAP_SIZE);
   1353}
   1354
   1355static const struct vm_operations_struct spufs_psmap_mmap_vmops = {
   1356	.fault = spufs_psmap_mmap_fault,
   1357};
   1358
   1359/*
   1360 * mmap support for full problem state area [0x00000 - 0x1ffff].
   1361 */
   1362static int spufs_psmap_mmap(struct file *file, struct vm_area_struct *vma)
   1363{
   1364	if (!(vma->vm_flags & VM_SHARED))
   1365		return -EINVAL;
   1366
   1367	vma->vm_flags |= VM_IO | VM_PFNMAP;
   1368	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
   1369
   1370	vma->vm_ops = &spufs_psmap_mmap_vmops;
   1371	return 0;
   1372}
   1373
   1374static int spufs_psmap_open(struct inode *inode, struct file *file)
   1375{
   1376	struct spufs_inode_info *i = SPUFS_I(inode);
   1377	struct spu_context *ctx = i->i_ctx;
   1378
   1379	mutex_lock(&ctx->mapping_lock);
   1380	file->private_data = i->i_ctx;
   1381	if (!i->i_openers++)
   1382		ctx->psmap = inode->i_mapping;
   1383	mutex_unlock(&ctx->mapping_lock);
   1384	return nonseekable_open(inode, file);
   1385}
   1386
   1387static int
   1388spufs_psmap_release(struct inode *inode, struct file *file)
   1389{
   1390	struct spufs_inode_info *i = SPUFS_I(inode);
   1391	struct spu_context *ctx = i->i_ctx;
   1392
   1393	mutex_lock(&ctx->mapping_lock);
   1394	if (!--i->i_openers)
   1395		ctx->psmap = NULL;
   1396	mutex_unlock(&ctx->mapping_lock);
   1397	return 0;
   1398}
   1399
   1400static const struct file_operations spufs_psmap_fops = {
   1401	.open	 = spufs_psmap_open,
   1402	.release = spufs_psmap_release,
   1403	.mmap	 = spufs_psmap_mmap,
   1404	.llseek  = no_llseek,
   1405};
   1406
   1407
   1408#if SPUFS_MMAP_4K
   1409static vm_fault_t
   1410spufs_mfc_mmap_fault(struct vm_fault *vmf)
   1411{
   1412	return spufs_ps_fault(vmf, 0x3000, SPUFS_MFC_MAP_SIZE);
   1413}
   1414
   1415static const struct vm_operations_struct spufs_mfc_mmap_vmops = {
   1416	.fault = spufs_mfc_mmap_fault,
   1417};
   1418
   1419/*
   1420 * mmap support for problem state MFC DMA area [0x0000 - 0x0fff].
   1421 */
   1422static int spufs_mfc_mmap(struct file *file, struct vm_area_struct *vma)
   1423{
   1424	if (!(vma->vm_flags & VM_SHARED))
   1425		return -EINVAL;
   1426
   1427	vma->vm_flags |= VM_IO | VM_PFNMAP;
   1428	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
   1429
   1430	vma->vm_ops = &spufs_mfc_mmap_vmops;
   1431	return 0;
   1432}
   1433#else /* SPUFS_MMAP_4K */
   1434#define spufs_mfc_mmap NULL
   1435#endif /* !SPUFS_MMAP_4K */
   1436
   1437static int spufs_mfc_open(struct inode *inode, struct file *file)
   1438{
   1439	struct spufs_inode_info *i = SPUFS_I(inode);
   1440	struct spu_context *ctx = i->i_ctx;
   1441
   1442	/* we don't want to deal with DMA into other processes */
   1443	if (ctx->owner != current->mm)
   1444		return -EINVAL;
   1445
   1446	if (atomic_read(&inode->i_count) != 1)
   1447		return -EBUSY;
   1448
   1449	mutex_lock(&ctx->mapping_lock);
   1450	file->private_data = ctx;
   1451	if (!i->i_openers++)
   1452		ctx->mfc = inode->i_mapping;
   1453	mutex_unlock(&ctx->mapping_lock);
   1454	return nonseekable_open(inode, file);
   1455}
   1456
   1457static int
   1458spufs_mfc_release(struct inode *inode, struct file *file)
   1459{
   1460	struct spufs_inode_info *i = SPUFS_I(inode);
   1461	struct spu_context *ctx = i->i_ctx;
   1462
   1463	mutex_lock(&ctx->mapping_lock);
   1464	if (!--i->i_openers)
   1465		ctx->mfc = NULL;
   1466	mutex_unlock(&ctx->mapping_lock);
   1467	return 0;
   1468}
   1469
   1470/* interrupt-level mfc callback function. */
   1471void spufs_mfc_callback(struct spu *spu)
   1472{
   1473	struct spu_context *ctx = spu->ctx;
   1474
   1475	if (ctx)
   1476		wake_up_all(&ctx->mfc_wq);
   1477}
   1478
   1479static int spufs_read_mfc_tagstatus(struct spu_context *ctx, u32 *status)
   1480{
   1481	/* See if there is one tag group is complete */
   1482	/* FIXME we need locking around tagwait */
   1483	*status = ctx->ops->read_mfc_tagstatus(ctx) & ctx->tagwait;
   1484	ctx->tagwait &= ~*status;
   1485	if (*status)
   1486		return 1;
   1487
   1488	/* enable interrupt waiting for any tag group,
   1489	   may silently fail if interrupts are already enabled */
   1490	ctx->ops->set_mfc_query(ctx, ctx->tagwait, 1);
   1491	return 0;
   1492}
   1493
   1494static ssize_t spufs_mfc_read(struct file *file, char __user *buffer,
   1495			size_t size, loff_t *pos)
   1496{
   1497	struct spu_context *ctx = file->private_data;
   1498	int ret = -EINVAL;
   1499	u32 status;
   1500
   1501	if (size != 4)
   1502		goto out;
   1503
   1504	ret = spu_acquire(ctx);
   1505	if (ret)
   1506		return ret;
   1507
   1508	ret = -EINVAL;
   1509	if (file->f_flags & O_NONBLOCK) {
   1510		status = ctx->ops->read_mfc_tagstatus(ctx);
   1511		if (!(status & ctx->tagwait))
   1512			ret = -EAGAIN;
   1513		else
   1514			/* XXX(hch): shouldn't we clear ret here? */
   1515			ctx->tagwait &= ~status;
   1516	} else {
   1517		ret = spufs_wait(ctx->mfc_wq,
   1518			   spufs_read_mfc_tagstatus(ctx, &status));
   1519		if (ret)
   1520			goto out;
   1521	}
   1522	spu_release(ctx);
   1523
   1524	ret = 4;
   1525	if (copy_to_user(buffer, &status, 4))
   1526		ret = -EFAULT;
   1527
   1528out:
   1529	return ret;
   1530}
   1531
   1532static int spufs_check_valid_dma(struct mfc_dma_command *cmd)
   1533{
   1534	pr_debug("queueing DMA %x %llx %x %x %x\n", cmd->lsa,
   1535		 cmd->ea, cmd->size, cmd->tag, cmd->cmd);
   1536
   1537	switch (cmd->cmd) {
   1538	case MFC_PUT_CMD:
   1539	case MFC_PUTF_CMD:
   1540	case MFC_PUTB_CMD:
   1541	case MFC_GET_CMD:
   1542	case MFC_GETF_CMD:
   1543	case MFC_GETB_CMD:
   1544		break;
   1545	default:
   1546		pr_debug("invalid DMA opcode %x\n", cmd->cmd);
   1547		return -EIO;
   1548	}
   1549
   1550	if ((cmd->lsa & 0xf) != (cmd->ea &0xf)) {
   1551		pr_debug("invalid DMA alignment, ea %llx lsa %x\n",
   1552				cmd->ea, cmd->lsa);
   1553		return -EIO;
   1554	}
   1555
   1556	switch (cmd->size & 0xf) {
   1557	case 1:
   1558		break;
   1559	case 2:
   1560		if (cmd->lsa & 1)
   1561			goto error;
   1562		break;
   1563	case 4:
   1564		if (cmd->lsa & 3)
   1565			goto error;
   1566		break;
   1567	case 8:
   1568		if (cmd->lsa & 7)
   1569			goto error;
   1570		break;
   1571	case 0:
   1572		if (cmd->lsa & 15)
   1573			goto error;
   1574		break;
   1575	error:
   1576	default:
   1577		pr_debug("invalid DMA alignment %x for size %x\n",
   1578			cmd->lsa & 0xf, cmd->size);
   1579		return -EIO;
   1580	}
   1581
   1582	if (cmd->size > 16 * 1024) {
   1583		pr_debug("invalid DMA size %x\n", cmd->size);
   1584		return -EIO;
   1585	}
   1586
   1587	if (cmd->tag & 0xfff0) {
   1588		/* we reserve the higher tag numbers for kernel use */
   1589		pr_debug("invalid DMA tag\n");
   1590		return -EIO;
   1591	}
   1592
   1593	if (cmd->class) {
   1594		/* not supported in this version */
   1595		pr_debug("invalid DMA class\n");
   1596		return -EIO;
   1597	}
   1598
   1599	return 0;
   1600}
   1601
   1602static int spu_send_mfc_command(struct spu_context *ctx,
   1603				struct mfc_dma_command cmd,
   1604				int *error)
   1605{
   1606	*error = ctx->ops->send_mfc_command(ctx, &cmd);
   1607	if (*error == -EAGAIN) {
   1608		/* wait for any tag group to complete
   1609		   so we have space for the new command */
   1610		ctx->ops->set_mfc_query(ctx, ctx->tagwait, 1);
   1611		/* try again, because the queue might be
   1612		   empty again */
   1613		*error = ctx->ops->send_mfc_command(ctx, &cmd);
   1614		if (*error == -EAGAIN)
   1615			return 0;
   1616	}
   1617	return 1;
   1618}
   1619
   1620static ssize_t spufs_mfc_write(struct file *file, const char __user *buffer,
   1621			size_t size, loff_t *pos)
   1622{
   1623	struct spu_context *ctx = file->private_data;
   1624	struct mfc_dma_command cmd;
   1625	int ret = -EINVAL;
   1626
   1627	if (size != sizeof cmd)
   1628		goto out;
   1629
   1630	ret = -EFAULT;
   1631	if (copy_from_user(&cmd, buffer, sizeof cmd))
   1632		goto out;
   1633
   1634	ret = spufs_check_valid_dma(&cmd);
   1635	if (ret)
   1636		goto out;
   1637
   1638	ret = spu_acquire(ctx);
   1639	if (ret)
   1640		goto out;
   1641
   1642	ret = spufs_wait(ctx->run_wq, ctx->state == SPU_STATE_RUNNABLE);
   1643	if (ret)
   1644		goto out;
   1645
   1646	if (file->f_flags & O_NONBLOCK) {
   1647		ret = ctx->ops->send_mfc_command(ctx, &cmd);
   1648	} else {
   1649		int status;
   1650		ret = spufs_wait(ctx->mfc_wq,
   1651				 spu_send_mfc_command(ctx, cmd, &status));
   1652		if (ret)
   1653			goto out;
   1654		if (status)
   1655			ret = status;
   1656	}
   1657
   1658	if (ret)
   1659		goto out_unlock;
   1660
   1661	ctx->tagwait |= 1 << cmd.tag;
   1662	ret = size;
   1663
   1664out_unlock:
   1665	spu_release(ctx);
   1666out:
   1667	return ret;
   1668}
   1669
   1670static __poll_t spufs_mfc_poll(struct file *file,poll_table *wait)
   1671{
   1672	struct spu_context *ctx = file->private_data;
   1673	u32 free_elements, tagstatus;
   1674	__poll_t mask;
   1675
   1676	poll_wait(file, &ctx->mfc_wq, wait);
   1677
   1678	/*
   1679	 * For now keep this uninterruptible and also ignore the rule
   1680	 * that poll should not sleep.  Will be fixed later.
   1681	 */
   1682	mutex_lock(&ctx->state_mutex);
   1683	ctx->ops->set_mfc_query(ctx, ctx->tagwait, 2);
   1684	free_elements = ctx->ops->get_mfc_free_elements(ctx);
   1685	tagstatus = ctx->ops->read_mfc_tagstatus(ctx);
   1686	spu_release(ctx);
   1687
   1688	mask = 0;
   1689	if (free_elements & 0xffff)
   1690		mask |= EPOLLOUT | EPOLLWRNORM;
   1691	if (tagstatus & ctx->tagwait)
   1692		mask |= EPOLLIN | EPOLLRDNORM;
   1693
   1694	pr_debug("%s: free %d tagstatus %d tagwait %d\n", __func__,
   1695		free_elements, tagstatus, ctx->tagwait);
   1696
   1697	return mask;
   1698}
   1699
   1700static int spufs_mfc_flush(struct file *file, fl_owner_t id)
   1701{
   1702	struct spu_context *ctx = file->private_data;
   1703	int ret;
   1704
   1705	ret = spu_acquire(ctx);
   1706	if (ret)
   1707		goto out;
   1708#if 0
   1709/* this currently hangs */
   1710	ret = spufs_wait(ctx->mfc_wq,
   1711			 ctx->ops->set_mfc_query(ctx, ctx->tagwait, 2));
   1712	if (ret)
   1713		goto out;
   1714	ret = spufs_wait(ctx->mfc_wq,
   1715			 ctx->ops->read_mfc_tagstatus(ctx) == ctx->tagwait);
   1716	if (ret)
   1717		goto out;
   1718#else
   1719	ret = 0;
   1720#endif
   1721	spu_release(ctx);
   1722out:
   1723	return ret;
   1724}
   1725
   1726static int spufs_mfc_fsync(struct file *file, loff_t start, loff_t end, int datasync)
   1727{
   1728	struct inode *inode = file_inode(file);
   1729	int err = file_write_and_wait_range(file, start, end);
   1730	if (!err) {
   1731		inode_lock(inode);
   1732		err = spufs_mfc_flush(file, NULL);
   1733		inode_unlock(inode);
   1734	}
   1735	return err;
   1736}
   1737
   1738static const struct file_operations spufs_mfc_fops = {
   1739	.open	 = spufs_mfc_open,
   1740	.release = spufs_mfc_release,
   1741	.read	 = spufs_mfc_read,
   1742	.write	 = spufs_mfc_write,
   1743	.poll	 = spufs_mfc_poll,
   1744	.flush	 = spufs_mfc_flush,
   1745	.fsync	 = spufs_mfc_fsync,
   1746	.mmap	 = spufs_mfc_mmap,
   1747	.llseek  = no_llseek,
   1748};
   1749
   1750static int spufs_npc_set(void *data, u64 val)
   1751{
   1752	struct spu_context *ctx = data;
   1753	int ret;
   1754
   1755	ret = spu_acquire(ctx);
   1756	if (ret)
   1757		return ret;
   1758	ctx->ops->npc_write(ctx, val);
   1759	spu_release(ctx);
   1760
   1761	return 0;
   1762}
   1763
   1764static u64 spufs_npc_get(struct spu_context *ctx)
   1765{
   1766	return ctx->ops->npc_read(ctx);
   1767}
   1768DEFINE_SPUFS_ATTRIBUTE(spufs_npc_ops, spufs_npc_get, spufs_npc_set,
   1769		       "0x%llx\n", SPU_ATTR_ACQUIRE);
   1770
   1771static int spufs_decr_set(void *data, u64 val)
   1772{
   1773	struct spu_context *ctx = data;
   1774	struct spu_lscsa *lscsa = ctx->csa.lscsa;
   1775	int ret;
   1776
   1777	ret = spu_acquire_saved(ctx);
   1778	if (ret)
   1779		return ret;
   1780	lscsa->decr.slot[0] = (u32) val;
   1781	spu_release_saved(ctx);
   1782
   1783	return 0;
   1784}
   1785
   1786static u64 spufs_decr_get(struct spu_context *ctx)
   1787{
   1788	struct spu_lscsa *lscsa = ctx->csa.lscsa;
   1789	return lscsa->decr.slot[0];
   1790}
   1791DEFINE_SPUFS_ATTRIBUTE(spufs_decr_ops, spufs_decr_get, spufs_decr_set,
   1792		       "0x%llx\n", SPU_ATTR_ACQUIRE_SAVED);
   1793
   1794static int spufs_decr_status_set(void *data, u64 val)
   1795{
   1796	struct spu_context *ctx = data;
   1797	int ret;
   1798
   1799	ret = spu_acquire_saved(ctx);
   1800	if (ret)
   1801		return ret;
   1802	if (val)
   1803		ctx->csa.priv2.mfc_control_RW |= MFC_CNTL_DECREMENTER_RUNNING;
   1804	else
   1805		ctx->csa.priv2.mfc_control_RW &= ~MFC_CNTL_DECREMENTER_RUNNING;
   1806	spu_release_saved(ctx);
   1807
   1808	return 0;
   1809}
   1810
   1811static u64 spufs_decr_status_get(struct spu_context *ctx)
   1812{
   1813	if (ctx->csa.priv2.mfc_control_RW & MFC_CNTL_DECREMENTER_RUNNING)
   1814		return SPU_DECR_STATUS_RUNNING;
   1815	else
   1816		return 0;
   1817}
   1818DEFINE_SPUFS_ATTRIBUTE(spufs_decr_status_ops, spufs_decr_status_get,
   1819		       spufs_decr_status_set, "0x%llx\n",
   1820		       SPU_ATTR_ACQUIRE_SAVED);
   1821
   1822static int spufs_event_mask_set(void *data, u64 val)
   1823{
   1824	struct spu_context *ctx = data;
   1825	struct spu_lscsa *lscsa = ctx->csa.lscsa;
   1826	int ret;
   1827
   1828	ret = spu_acquire_saved(ctx);
   1829	if (ret)
   1830		return ret;
   1831	lscsa->event_mask.slot[0] = (u32) val;
   1832	spu_release_saved(ctx);
   1833
   1834	return 0;
   1835}
   1836
   1837static u64 spufs_event_mask_get(struct spu_context *ctx)
   1838{
   1839	struct spu_lscsa *lscsa = ctx->csa.lscsa;
   1840	return lscsa->event_mask.slot[0];
   1841}
   1842
   1843DEFINE_SPUFS_ATTRIBUTE(spufs_event_mask_ops, spufs_event_mask_get,
   1844		       spufs_event_mask_set, "0x%llx\n",
   1845		       SPU_ATTR_ACQUIRE_SAVED);
   1846
   1847static u64 spufs_event_status_get(struct spu_context *ctx)
   1848{
   1849	struct spu_state *state = &ctx->csa;
   1850	u64 stat;
   1851	stat = state->spu_chnlcnt_RW[0];
   1852	if (stat)
   1853		return state->spu_chnldata_RW[0];
   1854	return 0;
   1855}
   1856DEFINE_SPUFS_ATTRIBUTE(spufs_event_status_ops, spufs_event_status_get,
   1857		       NULL, "0x%llx\n", SPU_ATTR_ACQUIRE_SAVED)
   1858
   1859static int spufs_srr0_set(void *data, u64 val)
   1860{
   1861	struct spu_context *ctx = data;
   1862	struct spu_lscsa *lscsa = ctx->csa.lscsa;
   1863	int ret;
   1864
   1865	ret = spu_acquire_saved(ctx);
   1866	if (ret)
   1867		return ret;
   1868	lscsa->srr0.slot[0] = (u32) val;
   1869	spu_release_saved(ctx);
   1870
   1871	return 0;
   1872}
   1873
   1874static u64 spufs_srr0_get(struct spu_context *ctx)
   1875{
   1876	struct spu_lscsa *lscsa = ctx->csa.lscsa;
   1877	return lscsa->srr0.slot[0];
   1878}
   1879DEFINE_SPUFS_ATTRIBUTE(spufs_srr0_ops, spufs_srr0_get, spufs_srr0_set,
   1880		       "0x%llx\n", SPU_ATTR_ACQUIRE_SAVED)
   1881
   1882static u64 spufs_id_get(struct spu_context *ctx)
   1883{
   1884	u64 num;
   1885
   1886	if (ctx->state == SPU_STATE_RUNNABLE)
   1887		num = ctx->spu->number;
   1888	else
   1889		num = (unsigned int)-1;
   1890
   1891	return num;
   1892}
   1893DEFINE_SPUFS_ATTRIBUTE(spufs_id_ops, spufs_id_get, NULL, "0x%llx\n",
   1894		       SPU_ATTR_ACQUIRE)
   1895
   1896static u64 spufs_object_id_get(struct spu_context *ctx)
   1897{
   1898	/* FIXME: Should there really be no locking here? */
   1899	return ctx->object_id;
   1900}
   1901
   1902static int spufs_object_id_set(void *data, u64 id)
   1903{
   1904	struct spu_context *ctx = data;
   1905	ctx->object_id = id;
   1906
   1907	return 0;
   1908}
   1909
   1910DEFINE_SPUFS_ATTRIBUTE(spufs_object_id_ops, spufs_object_id_get,
   1911		       spufs_object_id_set, "0x%llx\n", SPU_ATTR_NOACQUIRE);
   1912
   1913static u64 spufs_lslr_get(struct spu_context *ctx)
   1914{
   1915	return ctx->csa.priv2.spu_lslr_RW;
   1916}
   1917DEFINE_SPUFS_ATTRIBUTE(spufs_lslr_ops, spufs_lslr_get, NULL, "0x%llx\n",
   1918		       SPU_ATTR_ACQUIRE_SAVED);
   1919
   1920static int spufs_info_open(struct inode *inode, struct file *file)
   1921{
   1922	struct spufs_inode_info *i = SPUFS_I(inode);
   1923	struct spu_context *ctx = i->i_ctx;
   1924	file->private_data = ctx;
   1925	return 0;
   1926}
   1927
   1928static int spufs_caps_show(struct seq_file *s, void *private)
   1929{
   1930	struct spu_context *ctx = s->private;
   1931
   1932	if (!(ctx->flags & SPU_CREATE_NOSCHED))
   1933		seq_puts(s, "sched\n");
   1934	if (!(ctx->flags & SPU_CREATE_ISOLATE))
   1935		seq_puts(s, "step\n");
   1936	return 0;
   1937}
   1938
   1939static int spufs_caps_open(struct inode *inode, struct file *file)
   1940{
   1941	return single_open(file, spufs_caps_show, SPUFS_I(inode)->i_ctx);
   1942}
   1943
   1944static const struct file_operations spufs_caps_fops = {
   1945	.open		= spufs_caps_open,
   1946	.read		= seq_read,
   1947	.llseek		= seq_lseek,
   1948	.release	= single_release,
   1949};
   1950
   1951static ssize_t spufs_mbox_info_dump(struct spu_context *ctx,
   1952		struct coredump_params *cprm)
   1953{
   1954	if (!(ctx->csa.prob.mb_stat_R & 0x0000ff))
   1955		return 0;
   1956	return spufs_dump_emit(cprm, &ctx->csa.prob.pu_mb_R,
   1957			       sizeof(ctx->csa.prob.pu_mb_R));
   1958}
   1959
   1960static ssize_t spufs_mbox_info_read(struct file *file, char __user *buf,
   1961				   size_t len, loff_t *pos)
   1962{
   1963	struct spu_context *ctx = file->private_data;
   1964	u32 stat, data;
   1965	int ret;
   1966
   1967	ret = spu_acquire_saved(ctx);
   1968	if (ret)
   1969		return ret;
   1970	spin_lock(&ctx->csa.register_lock);
   1971	stat = ctx->csa.prob.mb_stat_R;
   1972	data = ctx->csa.prob.pu_mb_R;
   1973	spin_unlock(&ctx->csa.register_lock);
   1974	spu_release_saved(ctx);
   1975
   1976	/* EOF if there's no entry in the mbox */
   1977	if (!(stat & 0x0000ff))
   1978		return 0;
   1979
   1980	return simple_read_from_buffer(buf, len, pos, &data, sizeof(data));
   1981}
   1982
   1983static const struct file_operations spufs_mbox_info_fops = {
   1984	.open = spufs_info_open,
   1985	.read = spufs_mbox_info_read,
   1986	.llseek  = generic_file_llseek,
   1987};
   1988
   1989static ssize_t spufs_ibox_info_dump(struct spu_context *ctx,
   1990		struct coredump_params *cprm)
   1991{
   1992	if (!(ctx->csa.prob.mb_stat_R & 0xff0000))
   1993		return 0;
   1994	return spufs_dump_emit(cprm, &ctx->csa.priv2.puint_mb_R,
   1995			       sizeof(ctx->csa.priv2.puint_mb_R));
   1996}
   1997
   1998static ssize_t spufs_ibox_info_read(struct file *file, char __user *buf,
   1999				   size_t len, loff_t *pos)
   2000{
   2001	struct spu_context *ctx = file->private_data;
   2002	u32 stat, data;
   2003	int ret;
   2004
   2005	ret = spu_acquire_saved(ctx);
   2006	if (ret)
   2007		return ret;
   2008	spin_lock(&ctx->csa.register_lock);
   2009	stat = ctx->csa.prob.mb_stat_R;
   2010	data = ctx->csa.priv2.puint_mb_R;
   2011	spin_unlock(&ctx->csa.register_lock);
   2012	spu_release_saved(ctx);
   2013
   2014	/* EOF if there's no entry in the ibox */
   2015	if (!(stat & 0xff0000))
   2016		return 0;
   2017
   2018	return simple_read_from_buffer(buf, len, pos, &data, sizeof(data));
   2019}
   2020
   2021static const struct file_operations spufs_ibox_info_fops = {
   2022	.open = spufs_info_open,
   2023	.read = spufs_ibox_info_read,
   2024	.llseek  = generic_file_llseek,
   2025};
   2026
   2027static size_t spufs_wbox_info_cnt(struct spu_context *ctx)
   2028{
   2029	return (4 - ((ctx->csa.prob.mb_stat_R & 0x00ff00) >> 8)) * sizeof(u32);
   2030}
   2031
   2032static ssize_t spufs_wbox_info_dump(struct spu_context *ctx,
   2033		struct coredump_params *cprm)
   2034{
   2035	return spufs_dump_emit(cprm, &ctx->csa.spu_mailbox_data,
   2036			spufs_wbox_info_cnt(ctx));
   2037}
   2038
   2039static ssize_t spufs_wbox_info_read(struct file *file, char __user *buf,
   2040				   size_t len, loff_t *pos)
   2041{
   2042	struct spu_context *ctx = file->private_data;
   2043	u32 data[ARRAY_SIZE(ctx->csa.spu_mailbox_data)];
   2044	int ret, count;
   2045
   2046	ret = spu_acquire_saved(ctx);
   2047	if (ret)
   2048		return ret;
   2049	spin_lock(&ctx->csa.register_lock);
   2050	count = spufs_wbox_info_cnt(ctx);
   2051	memcpy(&data, &ctx->csa.spu_mailbox_data, sizeof(data));
   2052	spin_unlock(&ctx->csa.register_lock);
   2053	spu_release_saved(ctx);
   2054
   2055	return simple_read_from_buffer(buf, len, pos, &data,
   2056				count * sizeof(u32));
   2057}
   2058
   2059static const struct file_operations spufs_wbox_info_fops = {
   2060	.open = spufs_info_open,
   2061	.read = spufs_wbox_info_read,
   2062	.llseek  = generic_file_llseek,
   2063};
   2064
   2065static void spufs_get_dma_info(struct spu_context *ctx,
   2066		struct spu_dma_info *info)
   2067{
   2068	int i;
   2069
   2070	info->dma_info_type = ctx->csa.priv2.spu_tag_status_query_RW;
   2071	info->dma_info_mask = ctx->csa.lscsa->tag_mask.slot[0];
   2072	info->dma_info_status = ctx->csa.spu_chnldata_RW[24];
   2073	info->dma_info_stall_and_notify = ctx->csa.spu_chnldata_RW[25];
   2074	info->dma_info_atomic_command_status = ctx->csa.spu_chnldata_RW[27];
   2075	for (i = 0; i < 16; i++) {
   2076		struct mfc_cq_sr *qp = &info->dma_info_command_data[i];
   2077		struct mfc_cq_sr *spuqp = &ctx->csa.priv2.spuq[i];
   2078
   2079		qp->mfc_cq_data0_RW = spuqp->mfc_cq_data0_RW;
   2080		qp->mfc_cq_data1_RW = spuqp->mfc_cq_data1_RW;
   2081		qp->mfc_cq_data2_RW = spuqp->mfc_cq_data2_RW;
   2082		qp->mfc_cq_data3_RW = spuqp->mfc_cq_data3_RW;
   2083	}
   2084}
   2085
   2086static ssize_t spufs_dma_info_dump(struct spu_context *ctx,
   2087		struct coredump_params *cprm)
   2088{
   2089	struct spu_dma_info info;
   2090
   2091	spufs_get_dma_info(ctx, &info);
   2092	return spufs_dump_emit(cprm, &info, sizeof(info));
   2093}
   2094
   2095static ssize_t spufs_dma_info_read(struct file *file, char __user *buf,
   2096			      size_t len, loff_t *pos)
   2097{
   2098	struct spu_context *ctx = file->private_data;
   2099	struct spu_dma_info info;
   2100	int ret;
   2101
   2102	ret = spu_acquire_saved(ctx);
   2103	if (ret)
   2104		return ret;
   2105	spin_lock(&ctx->csa.register_lock);
   2106	spufs_get_dma_info(ctx, &info);
   2107	spin_unlock(&ctx->csa.register_lock);
   2108	spu_release_saved(ctx);
   2109
   2110	return simple_read_from_buffer(buf, len, pos, &info,
   2111				sizeof(info));
   2112}
   2113
   2114static const struct file_operations spufs_dma_info_fops = {
   2115	.open = spufs_info_open,
   2116	.read = spufs_dma_info_read,
   2117	.llseek = no_llseek,
   2118};
   2119
   2120static void spufs_get_proxydma_info(struct spu_context *ctx,
   2121		struct spu_proxydma_info *info)
   2122{
   2123	int i;
   2124
   2125	info->proxydma_info_type = ctx->csa.prob.dma_querytype_RW;
   2126	info->proxydma_info_mask = ctx->csa.prob.dma_querymask_RW;
   2127	info->proxydma_info_status = ctx->csa.prob.dma_tagstatus_R;
   2128
   2129	for (i = 0; i < 8; i++) {
   2130		struct mfc_cq_sr *qp = &info->proxydma_info_command_data[i];
   2131		struct mfc_cq_sr *puqp = &ctx->csa.priv2.puq[i];
   2132
   2133		qp->mfc_cq_data0_RW = puqp->mfc_cq_data0_RW;
   2134		qp->mfc_cq_data1_RW = puqp->mfc_cq_data1_RW;
   2135		qp->mfc_cq_data2_RW = puqp->mfc_cq_data2_RW;
   2136		qp->mfc_cq_data3_RW = puqp->mfc_cq_data3_RW;
   2137	}
   2138}
   2139
   2140static ssize_t spufs_proxydma_info_dump(struct spu_context *ctx,
   2141		struct coredump_params *cprm)
   2142{
   2143	struct spu_proxydma_info info;
   2144
   2145	spufs_get_proxydma_info(ctx, &info);
   2146	return spufs_dump_emit(cprm, &info, sizeof(info));
   2147}
   2148
   2149static ssize_t spufs_proxydma_info_read(struct file *file, char __user *buf,
   2150				   size_t len, loff_t *pos)
   2151{
   2152	struct spu_context *ctx = file->private_data;
   2153	struct spu_proxydma_info info;
   2154	int ret;
   2155
   2156	if (len < sizeof(info))
   2157		return -EINVAL;
   2158
   2159	ret = spu_acquire_saved(ctx);
   2160	if (ret)
   2161		return ret;
   2162	spin_lock(&ctx->csa.register_lock);
   2163	spufs_get_proxydma_info(ctx, &info);
   2164	spin_unlock(&ctx->csa.register_lock);
   2165	spu_release_saved(ctx);
   2166
   2167	return simple_read_from_buffer(buf, len, pos, &info,
   2168				sizeof(info));
   2169}
   2170
   2171static const struct file_operations spufs_proxydma_info_fops = {
   2172	.open = spufs_info_open,
   2173	.read = spufs_proxydma_info_read,
   2174	.llseek = no_llseek,
   2175};
   2176
   2177static int spufs_show_tid(struct seq_file *s, void *private)
   2178{
   2179	struct spu_context *ctx = s->private;
   2180
   2181	seq_printf(s, "%d\n", ctx->tid);
   2182	return 0;
   2183}
   2184
   2185static int spufs_tid_open(struct inode *inode, struct file *file)
   2186{
   2187	return single_open(file, spufs_show_tid, SPUFS_I(inode)->i_ctx);
   2188}
   2189
   2190static const struct file_operations spufs_tid_fops = {
   2191	.open		= spufs_tid_open,
   2192	.read		= seq_read,
   2193	.llseek		= seq_lseek,
   2194	.release	= single_release,
   2195};
   2196
   2197static const char *ctx_state_names[] = {
   2198	"user", "system", "iowait", "loaded"
   2199};
   2200
   2201static unsigned long long spufs_acct_time(struct spu_context *ctx,
   2202		enum spu_utilization_state state)
   2203{
   2204	unsigned long long time = ctx->stats.times[state];
   2205
   2206	/*
   2207	 * In general, utilization statistics are updated by the controlling
   2208	 * thread as the spu context moves through various well defined
   2209	 * state transitions, but if the context is lazily loaded its
   2210	 * utilization statistics are not updated as the controlling thread
   2211	 * is not tightly coupled with the execution of the spu context.  We
   2212	 * calculate and apply the time delta from the last recorded state
   2213	 * of the spu context.
   2214	 */
   2215	if (ctx->spu && ctx->stats.util_state == state) {
   2216		time += ktime_get_ns() - ctx->stats.tstamp;
   2217	}
   2218
   2219	return time / NSEC_PER_MSEC;
   2220}
   2221
   2222static unsigned long long spufs_slb_flts(struct spu_context *ctx)
   2223{
   2224	unsigned long long slb_flts = ctx->stats.slb_flt;
   2225
   2226	if (ctx->state == SPU_STATE_RUNNABLE) {
   2227		slb_flts += (ctx->spu->stats.slb_flt -
   2228			     ctx->stats.slb_flt_base);
   2229	}
   2230
   2231	return slb_flts;
   2232}
   2233
   2234static unsigned long long spufs_class2_intrs(struct spu_context *ctx)
   2235{
   2236	unsigned long long class2_intrs = ctx->stats.class2_intr;
   2237
   2238	if (ctx->state == SPU_STATE_RUNNABLE) {
   2239		class2_intrs += (ctx->spu->stats.class2_intr -
   2240				 ctx->stats.class2_intr_base);
   2241	}
   2242
   2243	return class2_intrs;
   2244}
   2245
   2246
   2247static int spufs_show_stat(struct seq_file *s, void *private)
   2248{
   2249	struct spu_context *ctx = s->private;
   2250	int ret;
   2251
   2252	ret = spu_acquire(ctx);
   2253	if (ret)
   2254		return ret;
   2255
   2256	seq_printf(s, "%s %llu %llu %llu %llu "
   2257		      "%llu %llu %llu %llu %llu %llu %llu %llu\n",
   2258		ctx_state_names[ctx->stats.util_state],
   2259		spufs_acct_time(ctx, SPU_UTIL_USER),
   2260		spufs_acct_time(ctx, SPU_UTIL_SYSTEM),
   2261		spufs_acct_time(ctx, SPU_UTIL_IOWAIT),
   2262		spufs_acct_time(ctx, SPU_UTIL_IDLE_LOADED),
   2263		ctx->stats.vol_ctx_switch,
   2264		ctx->stats.invol_ctx_switch,
   2265		spufs_slb_flts(ctx),
   2266		ctx->stats.hash_flt,
   2267		ctx->stats.min_flt,
   2268		ctx->stats.maj_flt,
   2269		spufs_class2_intrs(ctx),
   2270		ctx->stats.libassist);
   2271	spu_release(ctx);
   2272	return 0;
   2273}
   2274
   2275static int spufs_stat_open(struct inode *inode, struct file *file)
   2276{
   2277	return single_open(file, spufs_show_stat, SPUFS_I(inode)->i_ctx);
   2278}
   2279
   2280static const struct file_operations spufs_stat_fops = {
   2281	.open		= spufs_stat_open,
   2282	.read		= seq_read,
   2283	.llseek		= seq_lseek,
   2284	.release	= single_release,
   2285};
   2286
   2287static inline int spufs_switch_log_used(struct spu_context *ctx)
   2288{
   2289	return (ctx->switch_log->head - ctx->switch_log->tail) %
   2290		SWITCH_LOG_BUFSIZE;
   2291}
   2292
   2293static inline int spufs_switch_log_avail(struct spu_context *ctx)
   2294{
   2295	return SWITCH_LOG_BUFSIZE - spufs_switch_log_used(ctx);
   2296}
   2297
   2298static int spufs_switch_log_open(struct inode *inode, struct file *file)
   2299{
   2300	struct spu_context *ctx = SPUFS_I(inode)->i_ctx;
   2301	int rc;
   2302
   2303	rc = spu_acquire(ctx);
   2304	if (rc)
   2305		return rc;
   2306
   2307	if (ctx->switch_log) {
   2308		rc = -EBUSY;
   2309		goto out;
   2310	}
   2311
   2312	ctx->switch_log = kmalloc(struct_size(ctx->switch_log, log,
   2313				  SWITCH_LOG_BUFSIZE), GFP_KERNEL);
   2314
   2315	if (!ctx->switch_log) {
   2316		rc = -ENOMEM;
   2317		goto out;
   2318	}
   2319
   2320	ctx->switch_log->head = ctx->switch_log->tail = 0;
   2321	init_waitqueue_head(&ctx->switch_log->wait);
   2322	rc = 0;
   2323
   2324out:
   2325	spu_release(ctx);
   2326	return rc;
   2327}
   2328
   2329static int spufs_switch_log_release(struct inode *inode, struct file *file)
   2330{
   2331	struct spu_context *ctx = SPUFS_I(inode)->i_ctx;
   2332	int rc;
   2333
   2334	rc = spu_acquire(ctx);
   2335	if (rc)
   2336		return rc;
   2337
   2338	kfree(ctx->switch_log);
   2339	ctx->switch_log = NULL;
   2340	spu_release(ctx);
   2341
   2342	return 0;
   2343}
   2344
   2345static int switch_log_sprint(struct spu_context *ctx, char *tbuf, int n)
   2346{
   2347	struct switch_log_entry *p;
   2348
   2349	p = ctx->switch_log->log + ctx->switch_log->tail % SWITCH_LOG_BUFSIZE;
   2350
   2351	return snprintf(tbuf, n, "%llu.%09u %d %u %u %llu\n",
   2352			(unsigned long long) p->tstamp.tv_sec,
   2353			(unsigned int) p->tstamp.tv_nsec,
   2354			p->spu_id,
   2355			(unsigned int) p->type,
   2356			(unsigned int) p->val,
   2357			(unsigned long long) p->timebase);
   2358}
   2359
   2360static ssize_t spufs_switch_log_read(struct file *file, char __user *buf,
   2361			     size_t len, loff_t *ppos)
   2362{
   2363	struct inode *inode = file_inode(file);
   2364	struct spu_context *ctx = SPUFS_I(inode)->i_ctx;
   2365	int error = 0, cnt = 0;
   2366
   2367	if (!buf)
   2368		return -EINVAL;
   2369
   2370	error = spu_acquire(ctx);
   2371	if (error)
   2372		return error;
   2373
   2374	while (cnt < len) {
   2375		char tbuf[128];
   2376		int width;
   2377
   2378		if (spufs_switch_log_used(ctx) == 0) {
   2379			if (cnt > 0) {
   2380				/* If there's data ready to go, we can
   2381				 * just return straight away */
   2382				break;
   2383
   2384			} else if (file->f_flags & O_NONBLOCK) {
   2385				error = -EAGAIN;
   2386				break;
   2387
   2388			} else {
   2389				/* spufs_wait will drop the mutex and
   2390				 * re-acquire, but since we're in read(), the
   2391				 * file cannot be _released (and so
   2392				 * ctx->switch_log is stable).
   2393				 */
   2394				error = spufs_wait(ctx->switch_log->wait,
   2395						spufs_switch_log_used(ctx) > 0);
   2396
   2397				/* On error, spufs_wait returns without the
   2398				 * state mutex held */
   2399				if (error)
   2400					return error;
   2401
   2402				/* We may have had entries read from underneath
   2403				 * us while we dropped the mutex in spufs_wait,
   2404				 * so re-check */
   2405				if (spufs_switch_log_used(ctx) == 0)
   2406					continue;
   2407			}
   2408		}
   2409
   2410		width = switch_log_sprint(ctx, tbuf, sizeof(tbuf));
   2411		if (width < len)
   2412			ctx->switch_log->tail =
   2413				(ctx->switch_log->tail + 1) %
   2414				 SWITCH_LOG_BUFSIZE;
   2415		else
   2416			/* If the record is greater than space available return
   2417			 * partial buffer (so far) */
   2418			break;
   2419
   2420		error = copy_to_user(buf + cnt, tbuf, width);
   2421		if (error)
   2422			break;
   2423		cnt += width;
   2424	}
   2425
   2426	spu_release(ctx);
   2427
   2428	return cnt == 0 ? error : cnt;
   2429}
   2430
   2431static __poll_t spufs_switch_log_poll(struct file *file, poll_table *wait)
   2432{
   2433	struct inode *inode = file_inode(file);
   2434	struct spu_context *ctx = SPUFS_I(inode)->i_ctx;
   2435	__poll_t mask = 0;
   2436	int rc;
   2437
   2438	poll_wait(file, &ctx->switch_log->wait, wait);
   2439
   2440	rc = spu_acquire(ctx);
   2441	if (rc)
   2442		return rc;
   2443
   2444	if (spufs_switch_log_used(ctx) > 0)
   2445		mask |= EPOLLIN;
   2446
   2447	spu_release(ctx);
   2448
   2449	return mask;
   2450}
   2451
   2452static const struct file_operations spufs_switch_log_fops = {
   2453	.open		= spufs_switch_log_open,
   2454	.read		= spufs_switch_log_read,
   2455	.poll		= spufs_switch_log_poll,
   2456	.release	= spufs_switch_log_release,
   2457	.llseek		= no_llseek,
   2458};
   2459
   2460/**
   2461 * Log a context switch event to a switch log reader.
   2462 *
   2463 * Must be called with ctx->state_mutex held.
   2464 */
   2465void spu_switch_log_notify(struct spu *spu, struct spu_context *ctx,
   2466		u32 type, u32 val)
   2467{
   2468	if (!ctx->switch_log)
   2469		return;
   2470
   2471	if (spufs_switch_log_avail(ctx) > 1) {
   2472		struct switch_log_entry *p;
   2473
   2474		p = ctx->switch_log->log + ctx->switch_log->head;
   2475		ktime_get_ts64(&p->tstamp);
   2476		p->timebase = get_tb();
   2477		p->spu_id = spu ? spu->number : -1;
   2478		p->type = type;
   2479		p->val = val;
   2480
   2481		ctx->switch_log->head =
   2482			(ctx->switch_log->head + 1) % SWITCH_LOG_BUFSIZE;
   2483	}
   2484
   2485	wake_up(&ctx->switch_log->wait);
   2486}
   2487
   2488static int spufs_show_ctx(struct seq_file *s, void *private)
   2489{
   2490	struct spu_context *ctx = s->private;
   2491	u64 mfc_control_RW;
   2492
   2493	mutex_lock(&ctx->state_mutex);
   2494	if (ctx->spu) {
   2495		struct spu *spu = ctx->spu;
   2496		struct spu_priv2 __iomem *priv2 = spu->priv2;
   2497
   2498		spin_lock_irq(&spu->register_lock);
   2499		mfc_control_RW = in_be64(&priv2->mfc_control_RW);
   2500		spin_unlock_irq(&spu->register_lock);
   2501	} else {
   2502		struct spu_state *csa = &ctx->csa;
   2503
   2504		mfc_control_RW = csa->priv2.mfc_control_RW;
   2505	}
   2506
   2507	seq_printf(s, "%c flgs(%lx) sflgs(%lx) pri(%d) ts(%d) spu(%02d)"
   2508		" %c %llx %llx %llx %llx %x %x\n",
   2509		ctx->state == SPU_STATE_SAVED ? 'S' : 'R',
   2510		ctx->flags,
   2511		ctx->sched_flags,
   2512		ctx->prio,
   2513		ctx->time_slice,
   2514		ctx->spu ? ctx->spu->number : -1,
   2515		!list_empty(&ctx->rq) ? 'q' : ' ',
   2516		ctx->csa.class_0_pending,
   2517		ctx->csa.class_0_dar,
   2518		ctx->csa.class_1_dsisr,
   2519		mfc_control_RW,
   2520		ctx->ops->runcntl_read(ctx),
   2521		ctx->ops->status_read(ctx));
   2522
   2523	mutex_unlock(&ctx->state_mutex);
   2524
   2525	return 0;
   2526}
   2527
   2528static int spufs_ctx_open(struct inode *inode, struct file *file)
   2529{
   2530	return single_open(file, spufs_show_ctx, SPUFS_I(inode)->i_ctx);
   2531}
   2532
   2533static const struct file_operations spufs_ctx_fops = {
   2534	.open           = spufs_ctx_open,
   2535	.read           = seq_read,
   2536	.llseek         = seq_lseek,
   2537	.release        = single_release,
   2538};
   2539
   2540const struct spufs_tree_descr spufs_dir_contents[] = {
   2541	{ "capabilities", &spufs_caps_fops, 0444, },
   2542	{ "mem",  &spufs_mem_fops,  0666, LS_SIZE, },
   2543	{ "regs", &spufs_regs_fops,  0666, sizeof(struct spu_reg128[128]), },
   2544	{ "mbox", &spufs_mbox_fops, 0444, },
   2545	{ "ibox", &spufs_ibox_fops, 0444, },
   2546	{ "wbox", &spufs_wbox_fops, 0222, },
   2547	{ "mbox_stat", &spufs_mbox_stat_fops, 0444, sizeof(u32), },
   2548	{ "ibox_stat", &spufs_ibox_stat_fops, 0444, sizeof(u32), },
   2549	{ "wbox_stat", &spufs_wbox_stat_fops, 0444, sizeof(u32), },
   2550	{ "signal1", &spufs_signal1_fops, 0666, },
   2551	{ "signal2", &spufs_signal2_fops, 0666, },
   2552	{ "signal1_type", &spufs_signal1_type, 0666, },
   2553	{ "signal2_type", &spufs_signal2_type, 0666, },
   2554	{ "cntl", &spufs_cntl_fops,  0666, },
   2555	{ "fpcr", &spufs_fpcr_fops, 0666, sizeof(struct spu_reg128), },
   2556	{ "lslr", &spufs_lslr_ops, 0444, },
   2557	{ "mfc", &spufs_mfc_fops, 0666, },
   2558	{ "mss", &spufs_mss_fops, 0666, },
   2559	{ "npc", &spufs_npc_ops, 0666, },
   2560	{ "srr0", &spufs_srr0_ops, 0666, },
   2561	{ "decr", &spufs_decr_ops, 0666, },
   2562	{ "decr_status", &spufs_decr_status_ops, 0666, },
   2563	{ "event_mask", &spufs_event_mask_ops, 0666, },
   2564	{ "event_status", &spufs_event_status_ops, 0444, },
   2565	{ "psmap", &spufs_psmap_fops, 0666, SPUFS_PS_MAP_SIZE, },
   2566	{ "phys-id", &spufs_id_ops, 0666, },
   2567	{ "object-id", &spufs_object_id_ops, 0666, },
   2568	{ "mbox_info", &spufs_mbox_info_fops, 0444, sizeof(u32), },
   2569	{ "ibox_info", &spufs_ibox_info_fops, 0444, sizeof(u32), },
   2570	{ "wbox_info", &spufs_wbox_info_fops, 0444, sizeof(u32), },
   2571	{ "dma_info", &spufs_dma_info_fops, 0444,
   2572		sizeof(struct spu_dma_info), },
   2573	{ "proxydma_info", &spufs_proxydma_info_fops, 0444,
   2574		sizeof(struct spu_proxydma_info)},
   2575	{ "tid", &spufs_tid_fops, 0444, },
   2576	{ "stat", &spufs_stat_fops, 0444, },
   2577	{ "switch_log", &spufs_switch_log_fops, 0444 },
   2578	{},
   2579};
   2580
   2581const struct spufs_tree_descr spufs_dir_nosched_contents[] = {
   2582	{ "capabilities", &spufs_caps_fops, 0444, },
   2583	{ "mem",  &spufs_mem_fops,  0666, LS_SIZE, },
   2584	{ "mbox", &spufs_mbox_fops, 0444, },
   2585	{ "ibox", &spufs_ibox_fops, 0444, },
   2586	{ "wbox", &spufs_wbox_fops, 0222, },
   2587	{ "mbox_stat", &spufs_mbox_stat_fops, 0444, sizeof(u32), },
   2588	{ "ibox_stat", &spufs_ibox_stat_fops, 0444, sizeof(u32), },
   2589	{ "wbox_stat", &spufs_wbox_stat_fops, 0444, sizeof(u32), },
   2590	{ "signal1", &spufs_signal1_nosched_fops, 0222, },
   2591	{ "signal2", &spufs_signal2_nosched_fops, 0222, },
   2592	{ "signal1_type", &spufs_signal1_type, 0666, },
   2593	{ "signal2_type", &spufs_signal2_type, 0666, },
   2594	{ "mss", &spufs_mss_fops, 0666, },
   2595	{ "mfc", &spufs_mfc_fops, 0666, },
   2596	{ "cntl", &spufs_cntl_fops,  0666, },
   2597	{ "npc", &spufs_npc_ops, 0666, },
   2598	{ "psmap", &spufs_psmap_fops, 0666, SPUFS_PS_MAP_SIZE, },
   2599	{ "phys-id", &spufs_id_ops, 0666, },
   2600	{ "object-id", &spufs_object_id_ops, 0666, },
   2601	{ "tid", &spufs_tid_fops, 0444, },
   2602	{ "stat", &spufs_stat_fops, 0444, },
   2603	{},
   2604};
   2605
   2606const struct spufs_tree_descr spufs_dir_debug_contents[] = {
   2607	{ ".ctx", &spufs_ctx_fops, 0444, },
   2608	{},
   2609};
   2610
   2611const struct spufs_coredump_reader spufs_coredump_read[] = {
   2612	{ "regs", spufs_regs_dump, NULL, sizeof(struct spu_reg128[128])},
   2613	{ "fpcr", spufs_fpcr_dump, NULL, sizeof(struct spu_reg128) },
   2614	{ "lslr", NULL, spufs_lslr_get, 19 },
   2615	{ "decr", NULL, spufs_decr_get, 19 },
   2616	{ "decr_status", NULL, spufs_decr_status_get, 19 },
   2617	{ "mem", spufs_mem_dump, NULL, LS_SIZE, },
   2618	{ "signal1", spufs_signal1_dump, NULL, sizeof(u32) },
   2619	{ "signal1_type", NULL, spufs_signal1_type_get, 19 },
   2620	{ "signal2", spufs_signal2_dump, NULL, sizeof(u32) },
   2621	{ "signal2_type", NULL, spufs_signal2_type_get, 19 },
   2622	{ "event_mask", NULL, spufs_event_mask_get, 19 },
   2623	{ "event_status", NULL, spufs_event_status_get, 19 },
   2624	{ "mbox_info", spufs_mbox_info_dump, NULL, sizeof(u32) },
   2625	{ "ibox_info", spufs_ibox_info_dump, NULL, sizeof(u32) },
   2626	{ "wbox_info", spufs_wbox_info_dump, NULL, 4 * sizeof(u32)},
   2627	{ "dma_info", spufs_dma_info_dump, NULL, sizeof(struct spu_dma_info)},
   2628	{ "proxydma_info", spufs_proxydma_info_dump,
   2629			   NULL, sizeof(struct spu_proxydma_info)},
   2630	{ "object-id", NULL, spufs_object_id_get, 19 },
   2631	{ "npc", NULL, spufs_npc_get, 19 },
   2632	{ NULL },
   2633};