file.c 64.3 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22
/*
 * SPU file system -- file contents
 *
 * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
 *
 * Author: Arnd Bergmann <arndb@de.ibm.com>
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2, or (at your option)
 * any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
 */

23 24
#undef DEBUG

25 26
#include <linux/fs.h>
#include <linux/ioctl.h>
27
#include <linux/export.h>
28
#include <linux/pagemap.h>
29
#include <linux/poll.h>
30
#include <linux/ptrace.h>
31
#include <linux/seq_file.h>
32
#include <linux/slab.h>
33 34

#include <asm/io.h>
35
#include <asm/time.h>
36
#include <asm/spu.h>
37
#include <asm/spu_info.h>
38 39 40
#include <asm/uaccess.h>

#include "spufs.h"
41
#include "sputrace.h"
42

43 44
#define SPUFS_MMAP_4K (PAGE_SIZE == 0x1000)

45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150
/* Simple attribute files */
struct spufs_attr {
	int (*get)(void *, u64 *);
	int (*set)(void *, u64);
	char get_buf[24];       /* enough to store a u64 and "\n\0" */
	char set_buf[24];
	void *data;
	const char *fmt;        /* format for read operation */
	struct mutex mutex;     /* protects access to these buffers */
};

static int spufs_attr_open(struct inode *inode, struct file *file,
		int (*get)(void *, u64 *), int (*set)(void *, u64),
		const char *fmt)
{
	struct spufs_attr *attr;

	attr = kmalloc(sizeof(*attr), GFP_KERNEL);
	if (!attr)
		return -ENOMEM;

	attr->get = get;
	attr->set = set;
	attr->data = inode->i_private;
	attr->fmt = fmt;
	mutex_init(&attr->mutex);
	file->private_data = attr;

	return nonseekable_open(inode, file);
}

static int spufs_attr_release(struct inode *inode, struct file *file)
{
       kfree(file->private_data);
	return 0;
}

static ssize_t spufs_attr_read(struct file *file, char __user *buf,
		size_t len, loff_t *ppos)
{
	struct spufs_attr *attr;
	size_t size;
	ssize_t ret;

	attr = file->private_data;
	if (!attr->get)
		return -EACCES;

	ret = mutex_lock_interruptible(&attr->mutex);
	if (ret)
		return ret;

	if (*ppos) {		/* continued read */
		size = strlen(attr->get_buf);
	} else {		/* first read */
		u64 val;
		ret = attr->get(attr->data, &val);
		if (ret)
			goto out;

		size = scnprintf(attr->get_buf, sizeof(attr->get_buf),
				 attr->fmt, (unsigned long long)val);
	}

	ret = simple_read_from_buffer(buf, len, ppos, attr->get_buf, size);
out:
	mutex_unlock(&attr->mutex);
	return ret;
}

static ssize_t spufs_attr_write(struct file *file, const char __user *buf,
		size_t len, loff_t *ppos)
{
	struct spufs_attr *attr;
	u64 val;
	size_t size;
	ssize_t ret;

	attr = file->private_data;
	if (!attr->set)
		return -EACCES;

	ret = mutex_lock_interruptible(&attr->mutex);
	if (ret)
		return ret;

	ret = -EFAULT;
	size = min(sizeof(attr->set_buf) - 1, len);
	if (copy_from_user(attr->set_buf, buf, size))
		goto out;

	ret = len; /* claim we got the whole input */
	attr->set_buf[size] = '\0';
	val = simple_strtol(attr->set_buf, NULL, 0);
	attr->set(attr->data, val);
out:
	mutex_unlock(&attr->mutex);
	return ret;
}

#define DEFINE_SPUFS_SIMPLE_ATTRIBUTE(__fops, __get, __set, __fmt)	\
static int __fops ## _open(struct inode *inode, struct file *file)	\
{									\
	__simple_attr_check_format(__fmt, 0ull);			\
	return spufs_attr_open(inode, file, __get, __set, __fmt);	\
}									\
151
static const struct file_operations __fops = {				\
152 153 154 155 156
	.owner	 = THIS_MODULE,						\
	.open	 = __fops ## _open,					\
	.release = spufs_attr_release,					\
	.read	 = spufs_attr_read,					\
	.write	 = spufs_attr_write,					\
157
	.llseek  = generic_file_llseek,					\
158 159
};

160

161 162 163 164
static int
spufs_mem_open(struct inode *inode, struct file *file)
{
	struct spufs_inode_info *i = SPUFS_I(inode);
165
	struct spu_context *ctx = i->i_ctx;
166

167
	mutex_lock(&ctx->mapping_lock);
168
	file->private_data = ctx;
169 170
	if (!i->i_openers++)
		ctx->local_store = inode->i_mapping;
171
	mutex_unlock(&ctx->mapping_lock);
172 173 174 175 176 177 178 179 180
	return 0;
}

static int
spufs_mem_release(struct inode *inode, struct file *file)
{
	struct spufs_inode_info *i = SPUFS_I(inode);
	struct spu_context *ctx = i->i_ctx;

181
	mutex_lock(&ctx->mapping_lock);
182 183
	if (!--i->i_openers)
		ctx->local_store = NULL;
184
	mutex_unlock(&ctx->mapping_lock);
185 186 187
	return 0;
}

188 189 190 191 192 193 194 195 196
static ssize_t
__spufs_mem_read(struct spu_context *ctx, char __user *buffer,
			size_t size, loff_t *pos)
{
	char *local_store = ctx->ops->get_ls(ctx);
	return simple_read_from_buffer(buffer, size, pos, local_store,
					LS_SIZE);
}

197 198 199 200
static ssize_t
spufs_mem_read(struct file *file, char __user *buffer,
				size_t size, loff_t *pos)
{
201
	struct spu_context *ctx = file->private_data;
202
	ssize_t ret;
203

204 205 206
	ret = spu_acquire(ctx);
	if (ret)
		return ret;
207
	ret = __spufs_mem_read(ctx, buffer, size, pos);
208
	spu_release(ctx);
209

210 211 212 213 214
	return ret;
}

static ssize_t
spufs_mem_write(struct file *file, const char __user *buffer,
215
					size_t size, loff_t *ppos)
216 217
{
	struct spu_context *ctx = file->private_data;
218
	char *local_store;
219
	loff_t pos = *ppos;
220
	int ret;
221

222
	if (pos > LS_SIZE)
223
		return -EFBIG;
224

225 226 227 228
	ret = spu_acquire(ctx);
	if (ret)
		return ret;

229
	local_store = ctx->ops->get_ls(ctx);
230
	size = simple_write_to_buffer(local_store, LS_SIZE, ppos, buffer, size);
231
	spu_release(ctx);
232 233

	return size;
234 235
}

Nick Piggin's avatar
Nick Piggin committed
236 237
static int
spufs_mem_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
238
{
239
	struct spu_context *ctx	= vma->vm_file->private_data;
Nick Piggin's avatar
Nick Piggin committed
240 241 242
	unsigned long address = (unsigned long)vmf->virtual_address;
	unsigned long pfn, offset;

243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258
#ifdef CONFIG_SPU_FS_64K_LS
	struct spu_state *csa = &ctx->csa;
	int psize;

	/* Check what page size we are using */
	psize = get_slice_psize(vma->vm_mm, address);

	/* Some sanity checking */
	BUG_ON(csa->use_big_pages != (psize == MMU_PAGE_64K));

	/* Wow, 64K, cool, we need to align the address though */
	if (csa->use_big_pages) {
		BUG_ON(vma->vm_start & 0xffff);
		address &= ~0xfffful;
	}
#endif /* CONFIG_SPU_FS_64K_LS */
259

Nick Piggin's avatar
Nick Piggin committed
260
	offset = vmf->pgoff << PAGE_SHIFT;
261
	if (offset >= LS_SIZE)
Nick Piggin's avatar
Nick Piggin committed
262
		return VM_FAULT_SIGBUS;
263

Nick Piggin's avatar
Nick Piggin committed
264 265
	pr_debug("spufs_mem_mmap_fault address=0x%lx, offset=0x%lx\n",
			address, offset);
266

267
	if (spu_acquire(ctx))
Nick Piggin's avatar
Nick Piggin committed
268
		return VM_FAULT_NOPAGE;
269

270
	if (ctx->state == SPU_STATE_SAVED) {
271
		vma->vm_page_prot = pgprot_cached(vma->vm_page_prot);
272
		pfn = vmalloc_to_pfn(ctx->csa.lscsa->ls + offset);
273
	} else {
274
		vma->vm_page_prot = pgprot_noncached_wc(vma->vm_page_prot);
275
		pfn = (ctx->spu->local_store_phys + offset) >> PAGE_SHIFT;
276
	}
277
	vm_insert_pfn(vma, address, pfn);
278

279
	spu_release(ctx);
280

Nick Piggin's avatar
Nick Piggin committed
281
	return VM_FAULT_NOPAGE;
282 283
}

284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305
static int spufs_mem_mmap_access(struct vm_area_struct *vma,
				unsigned long address,
				void *buf, int len, int write)
{
	struct spu_context *ctx = vma->vm_file->private_data;
	unsigned long offset = address - vma->vm_start;
	char *local_store;

	if (write && !(vma->vm_flags & VM_WRITE))
		return -EACCES;
	if (spu_acquire(ctx))
		return -EINTR;
	if ((offset + len) > vma->vm_end)
		len = vma->vm_end - offset;
	local_store = ctx->ops->get_ls(ctx);
	if (write)
		memcpy_toio(local_store + offset, buf, len);
	else
		memcpy_fromio(buf, local_store + offset, len);
	spu_release(ctx);
	return len;
}
306

307
static const struct vm_operations_struct spufs_mem_mmap_vmops = {
Nick Piggin's avatar
Nick Piggin committed
308
	.fault = spufs_mem_mmap_fault,
309
	.access = spufs_mem_mmap_access,
310 311
};

312
static int spufs_mem_mmap(struct file *file, struct vm_area_struct *vma)
313
{
314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329
#ifdef CONFIG_SPU_FS_64K_LS
	struct spu_context	*ctx = file->private_data;
	struct spu_state	*csa = &ctx->csa;

	/* Sanity check VMA alignment */
	if (csa->use_big_pages) {
		pr_debug("spufs_mem_mmap 64K, start=0x%lx, end=0x%lx,"
			 " pgoff=0x%lx\n", vma->vm_start, vma->vm_end,
			 vma->vm_pgoff);
		if (vma->vm_start & 0xffff)
			return -EINVAL;
		if (vma->vm_pgoff & 0xf)
			return -EINVAL;
	}
#endif /* CONFIG_SPU_FS_64K_LS */

330 331
	if (!(vma->vm_flags & VM_SHARED))
		return -EINVAL;
332

333
	vma->vm_flags |= VM_IO | VM_PFNMAP;
334
	vma->vm_page_prot = pgprot_noncached_wc(vma->vm_page_prot);
335 336

	vma->vm_ops = &spufs_mem_mmap_vmops;
337 338 339
	return 0;
}

340
#ifdef CONFIG_SPU_FS_64K_LS
341 342 343
static unsigned long spufs_get_unmapped_area(struct file *file,
		unsigned long addr, unsigned long len, unsigned long pgoff,
		unsigned long flags)
344 345 346 347 348 349 350 351 352 353 354 355 356 357 358
{
	struct spu_context	*ctx = file->private_data;
	struct spu_state	*csa = &ctx->csa;

	/* If not using big pages, fallback to normal MM g_u_a */
	if (!csa->use_big_pages)
		return current->mm->get_unmapped_area(file, addr, len,
						      pgoff, flags);

	/* Else, try to obtain a 64K pages slice */
	return slice_get_unmapped_area(addr, len, flags,
				       MMU_PAGE_64K, 1, 0);
}
#endif /* CONFIG_SPU_FS_64K_LS */

359
static const struct file_operations spufs_mem_fops = {
360 361 362 363 364 365
	.open			= spufs_mem_open,
	.release		= spufs_mem_release,
	.read			= spufs_mem_read,
	.write			= spufs_mem_write,
	.llseek			= generic_file_llseek,
	.mmap			= spufs_mem_mmap,
366 367 368
#ifdef CONFIG_SPU_FS_64K_LS
	.get_unmapped_area	= spufs_get_unmapped_area,
#endif
369 370
};

Nick Piggin's avatar
Nick Piggin committed
371 372
static int spufs_ps_fault(struct vm_area_struct *vma,
				    struct vm_fault *vmf,
373
				    unsigned long ps_offs,
374
				    unsigned long ps_size)
375 376
{
	struct spu_context *ctx = vma->vm_file->private_data;
Nick Piggin's avatar
Nick Piggin committed
377
	unsigned long area, offset = vmf->pgoff << PAGE_SHIFT;
378
	int ret = 0;
379

Nick Piggin's avatar
Nick Piggin committed
380
	spu_context_nospu_trace(spufs_ps_fault__enter, ctx);
381

382
	if (offset >= ps_size)
Nick Piggin's avatar
Nick Piggin committed
383
		return VM_FAULT_SIGBUS;
384

385 386 387
	if (fatal_signal_pending(current))
		return VM_FAULT_SIGBUS;

388 389 390 391 392 393 394
	/*
	 * Because we release the mmap_sem, the context may be destroyed while
	 * we're in spu_wait. Grab an extra reference so it isn't destroyed
	 * in the meantime.
	 */
	get_spu_context(ctx);

395 396 397 398 399
	/*
	 * We have to wait for context to be loaded before we have
	 * pages to hand out to the user, but we don't want to wait
	 * with the mmap_sem held.
	 * It is possible to drop the mmap_sem here, but then we need
Nick Piggin's avatar
Nick Piggin committed
400
	 * to return VM_FAULT_NOPAGE because the mappings may have
401
	 * hanged.
402
	 */
403
	if (spu_acquire(ctx))
404
		goto refault;
405

406 407
	if (ctx->state == SPU_STATE_SAVED) {
		up_read(&current->mm->mmap_sem);
Nick Piggin's avatar
Nick Piggin committed
408
		spu_context_nospu_trace(spufs_ps_fault__sleep, ctx);
409
		ret = spufs_wait(ctx->run_wq, ctx->state == SPU_STATE_RUNNABLE);
Nick Piggin's avatar
Nick Piggin committed
410
		spu_context_trace(spufs_ps_fault__wake, ctx, ctx->spu);
411
		down_read(&current->mm->mmap_sem);
412 413
	} else {
		area = ctx->spu->problem_phys + ps_offs;
Nick Piggin's avatar
Nick Piggin committed
414 415 416
		vm_insert_pfn(vma, (unsigned long)vmf->virtual_address,
					(area + offset) >> PAGE_SHIFT);
		spu_context_trace(spufs_ps_fault__insert, ctx, ctx->spu);
417
	}
418

419 420
	if (!ret)
		spu_release(ctx);
421 422 423

refault:
	put_spu_context(ctx);
Nick Piggin's avatar
Nick Piggin committed
424
	return VM_FAULT_NOPAGE;
425 426
}

427
#if SPUFS_MMAP_4K
Nick Piggin's avatar
Nick Piggin committed
428 429
static int spufs_cntl_mmap_fault(struct vm_area_struct *vma,
					   struct vm_fault *vmf)
430
{
431
	return spufs_ps_fault(vma, vmf, 0x4000, SPUFS_CNTL_MAP_SIZE);
432 433
}

434
static const struct vm_operations_struct spufs_cntl_mmap_vmops = {
Nick Piggin's avatar
Nick Piggin committed
435
	.fault = spufs_cntl_mmap_fault,
436 437 438 439 440 441 442 443 444 445
};

/*
 * mmap support for problem state control area [0x4000 - 0x4fff].
 */
static int spufs_cntl_mmap(struct file *file, struct vm_area_struct *vma)
{
	if (!(vma->vm_flags & VM_SHARED))
		return -EINVAL;

446
	vma->vm_flags |= VM_IO | VM_PFNMAP;
447
	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
448 449 450 451

	vma->vm_ops = &spufs_cntl_mmap_vmops;
	return 0;
}
452 453 454
#else /* SPUFS_MMAP_4K */
#define spufs_cntl_mmap NULL
#endif /* !SPUFS_MMAP_4K */
455

456
static int spufs_cntl_get(void *data, u64 *val)
457
{
458
	struct spu_context *ctx = data;
459
	int ret;
460

461 462 463
	ret = spu_acquire(ctx);
	if (ret)
		return ret;
464
	*val = ctx->ops->status_read(ctx);
465 466
	spu_release(ctx);

467
	return 0;
468 469
}

470
static int spufs_cntl_set(void *data, u64 val)
471
{
472
	struct spu_context *ctx = data;
473
	int ret;
474

475 476 477
	ret = spu_acquire(ctx);
	if (ret)
		return ret;
478 479
	ctx->ops->runcntl_write(ctx, val);
	spu_release(ctx);
480 481

	return 0;
482 483
}

484
static int spufs_cntl_open(struct inode *inode, struct file *file)
485
{
486 487 488
	struct spufs_inode_info *i = SPUFS_I(inode);
	struct spu_context *ctx = i->i_ctx;

489
	mutex_lock(&ctx->mapping_lock);
490
	file->private_data = ctx;
491 492
	if (!i->i_openers++)
		ctx->cntl = inode->i_mapping;
493
	mutex_unlock(&ctx->mapping_lock);
494
	return simple_attr_open(inode, file, spufs_cntl_get,
495
					spufs_cntl_set, "0x%08lx");
496 497
}

498 499 500 501 502 503
static int
spufs_cntl_release(struct inode *inode, struct file *file)
{
	struct spufs_inode_info *i = SPUFS_I(inode);
	struct spu_context *ctx = i->i_ctx;

504
	simple_attr_release(inode, file);
505

506
	mutex_lock(&ctx->mapping_lock);
507 508
	if (!--i->i_openers)
		ctx->cntl = NULL;
509
	mutex_unlock(&ctx->mapping_lock);
510 511 512
	return 0;
}

513
static const struct file_operations spufs_cntl_fops = {
514
	.open = spufs_cntl_open,
515
	.release = spufs_cntl_release,
516 517
	.read = simple_attr_read,
	.write = simple_attr_write,
518
	.llseek	= generic_file_llseek,
519 520 521
	.mmap = spufs_cntl_mmap,
};

522 523 524 525 526 527 528 529
static int
spufs_regs_open(struct inode *inode, struct file *file)
{
	struct spufs_inode_info *i = SPUFS_I(inode);
	file->private_data = i->i_ctx;
	return 0;
}

530 531 532 533 534 535 536 537 538
static ssize_t
__spufs_regs_read(struct spu_context *ctx, char __user *buffer,
			size_t size, loff_t *pos)
{
	struct spu_lscsa *lscsa = ctx->csa.lscsa;
	return simple_read_from_buffer(buffer, size, pos,
				      lscsa->gprs, sizeof lscsa->gprs);
}

539 540 541 542 543
static ssize_t
spufs_regs_read(struct file *file, char __user *buffer,
		size_t size, loff_t *pos)
{
	int ret;
544
	struct spu_context *ctx = file->private_data;
545

546 547 548 549 550
	/* pre-check for file position: if we'd return EOF, there's no point
	 * causing a deschedule */
	if (*pos >= sizeof(ctx->csa.lscsa->gprs))
		return 0;

551 552 553
	ret = spu_acquire_saved(ctx);
	if (ret)
		return ret;
554
	ret = __spufs_regs_read(ctx, buffer, size, pos);
555
	spu_release_saved(ctx);
556 557 558 559 560 561 562 563 564 565 566
	return ret;
}

static ssize_t
spufs_regs_write(struct file *file, const char __user *buffer,
		 size_t size, loff_t *pos)
{
	struct spu_context *ctx = file->private_data;
	struct spu_lscsa *lscsa = ctx->csa.lscsa;
	int ret;

567
	if (*pos >= sizeof(lscsa->gprs))
568
		return -EFBIG;
569

570 571 572
	ret = spu_acquire_saved(ctx);
	if (ret)
		return ret;
573

574 575
	size = simple_write_to_buffer(lscsa->gprs, sizeof(lscsa->gprs), pos,
					buffer, size);
576

577
	spu_release_saved(ctx);
578
	return size;
579 580
}

581
static const struct file_operations spufs_regs_fops = {
582 583 584
	.open	 = spufs_regs_open,
	.read    = spufs_regs_read,
	.write   = spufs_regs_write,
585 586 587
	.llseek  = generic_file_llseek,
};

588 589 590 591 592 593 594 595 596
static ssize_t
__spufs_fpcr_read(struct spu_context *ctx, char __user * buffer,
			size_t size, loff_t * pos)
{
	struct spu_lscsa *lscsa = ctx->csa.lscsa;
	return simple_read_from_buffer(buffer, size, pos,
				      &lscsa->fpcr, sizeof(lscsa->fpcr));
}

597 598 599 600 601
static ssize_t
spufs_fpcr_read(struct file *file, char __user * buffer,
		size_t size, loff_t * pos)
{
	int ret;
602
	struct spu_context *ctx = file->private_data;
603

604 605 606
	ret = spu_acquire_saved(ctx);
	if (ret)
		return ret;
607
	ret = __spufs_fpcr_read(ctx, buffer, size, pos);
608
	spu_release_saved(ctx);
609 610 611 612 613 614 615 616 617 618 619
	return ret;
}

static ssize_t
spufs_fpcr_write(struct file *file, const char __user * buffer,
		 size_t size, loff_t * pos)
{
	struct spu_context *ctx = file->private_data;
	struct spu_lscsa *lscsa = ctx->csa.lscsa;
	int ret;

620
	if (*pos >= sizeof(lscsa->fpcr))
621 622
		return -EFBIG;

623 624 625
	ret = spu_acquire_saved(ctx);
	if (ret)
		return ret;
626

627 628
	size = simple_write_to_buffer(&lscsa->fpcr, sizeof(lscsa->fpcr), pos,
					buffer, size);
629

630
	spu_release_saved(ctx);
631
	return size;
632 633
}

634
static const struct file_operations spufs_fpcr_fops = {
635 636 637 638 639 640
	.open = spufs_regs_open,
	.read = spufs_fpcr_read,
	.write = spufs_fpcr_write,
	.llseek = generic_file_llseek,
};

641 642 643 644 645 646 647 648 649
/* generic open function for all pipe-like files */
static int spufs_pipe_open(struct inode *inode, struct file *file)
{
	struct spufs_inode_info *i = SPUFS_I(inode);
	file->private_data = i->i_ctx;

	return nonseekable_open(inode, file);
}

650 651 652 653 654 655 656 657
/*
 * Read as many bytes from the mailbox as possible, until
 * one of the conditions becomes true:
 *
 * - no more data available in the mailbox
 * - end of the user provided buffer
 * - end of the mapped area
 */
658 659 660
static ssize_t spufs_mbox_read(struct file *file, char __user *buf,
			size_t len, loff_t *pos)
{
661
	struct spu_context *ctx = file->private_data;
662 663
	u32 mbox_data, __user *udata;
	ssize_t count;
664 665 666 667

	if (len < 4)
		return -EINVAL;

668 669 670 671 672
	if (!access_ok(VERIFY_WRITE, buf, len))
		return -EFAULT;

	udata = (void __user *)buf;

673 674 675 676
	count = spu_acquire(ctx);
	if (count)
		return count;

677
	for (count = 0; (count + 4) <= len; count += 4, udata++) {
678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694
		int ret;
		ret = ctx->ops->mbox_read(ctx, &mbox_data);
		if (ret == 0)
			break;

		/*
		 * at the end of the mapped area, we can fault
		 * but still need to return the data we have
		 * read successfully so far.
		 */
		ret = __put_user(mbox_data, udata);
		if (ret) {
			if (!count)
				count = -EFAULT;
			break;
		}
	}
695
	spu_release(ctx);
696

697 698
	if (!count)
		count = -EAGAIN;
699

700
	return count;
701 702
}

703
static const struct file_operations spufs_mbox_fops = {
704 705
	.open	= spufs_pipe_open,
	.read	= spufs_mbox_read,
706
	.llseek	= no_llseek,
707 708 709 710 711
};

static ssize_t spufs_mbox_stat_read(struct file *file, char __user *buf,
			size_t len, loff_t *pos)
{
712
	struct spu_context *ctx = file->private_data;
713
	ssize_t ret;
714 715 716 717 718
	u32 mbox_stat;

	if (len < 4)
		return -EINVAL;

719 720 721
	ret = spu_acquire(ctx);
	if (ret)
		return ret;
722 723 724 725

	mbox_stat = ctx->ops->mbox_stat_read(ctx) & 0xff;

	spu_release(ctx);
726 727 728 729 730 731 732

	if (copy_to_user(buf, &mbox_stat, sizeof mbox_stat))
		return -EFAULT;

	return 4;
}

733
static const struct file_operations spufs_mbox_stat_fops = {
734 735
	.open	= spufs_pipe_open,
	.read	= spufs_mbox_stat_read,
736
	.llseek = no_llseek,
737 738 739
};

/* low-level ibox access function */
740
size_t spu_ibox_read(struct spu_context *ctx, u32 *data)
741
{
742 743
	return ctx->ops->ibox_read(ctx, data);
}
744

745 746 747
static int spufs_ibox_fasync(int fd, struct file *file, int on)
{
	struct spu_context *ctx = file->private_data;
748

749
	return fasync_helper(fd, file, on, &ctx->ibox_fasync);
750 751
}

752 753
/* interrupt-level ibox callback function. */
void spufs_ibox_callback(struct spu *spu)
754
{
755 756
	struct spu_context *ctx = spu->ctx;

757 758 759
	if (!ctx)
		return;

760 761
	wake_up_all(&ctx->ibox_wq);
	kill_fasync(&ctx->ibox_fasync, SIGIO, POLLIN);
762 763
}

764 765 766 767 768 769 770 771 772 773 774 775
/*
 * Read as many bytes from the interrupt mailbox as possible, until
 * one of the conditions becomes true:
 *
 * - no more data available in the mailbox
 * - end of the user provided buffer
 * - end of the mapped area
 *
 * If the file is opened without O_NONBLOCK, we wait here until
 * any data is available, but return when we have been able to
 * read something.
 */
776 777 778
static ssize_t spufs_ibox_read(struct file *file, char __user *buf,
			size_t len, loff_t *pos)
{
779
	struct spu_context *ctx = file->private_data;
780 781
	u32 ibox_data, __user *udata;
	ssize_t count;
782 783 784 785

	if (len < 4)
		return -EINVAL;

786 787 788 789 790
	if (!access_ok(VERIFY_WRITE, buf, len))
		return -EFAULT;

	udata = (void __user *)buf;

791 792
	count = spu_acquire(ctx);
	if (count)
793
		goto out;
794

795 796
	/* wait only for the first element */
	count = 0;
797
	if (file->f_flags & O_NONBLOCK) {
798
		if (!spu_ibox_read(ctx, &ibox_data)) {
799
			count = -EAGAIN;
800 801
			goto out_unlock;
		}
802
	} else {
803
		count = spufs_wait(ctx->ibox_wq, spu_ibox_read(ctx, &ibox_data));
804 805
		if (count)
			goto out;
806 807
	}

808 809 810
	/* if we can't write at all, return -EFAULT */
	count = __put_user(ibox_data, udata);
	if (count)
811
		goto out_unlock;
812

813 814 815 816 817 818 819 820 821 822 823 824 825 826
	for (count = 4, udata++; (count + 4) <= len; count += 4, udata++) {
		int ret;
		ret = ctx->ops->ibox_read(ctx, &ibox_data);
		if (ret == 0)
			break;
		/*
		 * at the end of the mapped area, we can fault
		 * but still need to return the data we have
		 * read successfully so far.
		 */
		ret = __put_user(ibox_data, udata);
		if (ret)
			break;
	}
827

828
out_unlock:
829
	spu_release(ctx);
830
out:
831
	return count;
832 833 834 835
}

static unsigned int spufs_ibox_poll(struct file *file, poll_table *wait)
{
836
	struct spu_context *ctx = file->private_data;
837 838
	unsigned int mask;

839
	poll_wait(file, &ctx->ibox_wq, wait);
840

841 842 843 844 845
	/*
	 * For now keep this uninterruptible and also ignore the rule
	 * that poll should not sleep.  Will be fixed later.
	 */
	mutex_lock(&ctx->state_mutex);
846 847
	mask = ctx->ops->mbox_stat_poll(ctx, POLLIN | POLLRDNORM);
	spu_release(ctx);
848 849 850 851

	return mask;
}

852
static const struct file_operations spufs_ibox_fops = {
853 854 855 856
	.open	= spufs_pipe_open,
	.read	= spufs_ibox_read,
	.poll	= spufs_ibox_poll,
	.fasync	= spufs_ibox_fasync,
857
	.llseek = no_llseek,
858 859 860 861 862
};

static ssize_t spufs_ibox_stat_read(struct file *file, char __user *buf,
			size_t len, loff_t *pos)
{
863
	struct spu_context *ctx = file->private_data;
864
	ssize_t ret;
865 866 867 868 869
	u32 ibox_stat;

	if (len < 4)
		return -EINVAL;

870 871 872
	ret = spu_acquire(ctx);
	if (ret)
		return ret;
873 874
	ibox_stat = (ctx->ops->mbox_stat_read(ctx) >> 16) & 0xff;
	spu_release(ctx);
875 876 877 878 879 880 881

	if (copy_to_user(buf, &ibox_stat, sizeof ibox_stat))
		return -EFAULT;

	return 4;
}

882
static const struct file_operations spufs_ibox_stat_fops = {
883 884
	.open	= spufs_pipe_open,
	.read	= spufs_ibox_stat_read,
885
	.llseek = no_llseek,
886 887 888
};

/* low-level mailbox write */
889
size_t spu_wbox_write(struct spu_context *ctx, u32 data)
890
{
891 892
	return ctx->ops->wbox_write(ctx, data);
}
893

894 895 896 897
static int spufs_wbox_fasync(int fd, struct file *file, int on)
{
	struct spu_context *ctx = file->private_data;
	int ret;
898

899
	ret = fasync_helper(fd, file, on, &ctx->wbox_fasync);
900 901 902 903

	return ret;
}

904 905
/* interrupt-level wbox callback function. */
void spufs_wbox_callback(struct spu *spu)
906
{
907 908
	struct spu_context *ctx = spu->ctx;

909 910 911
	if (!ctx)
		return;

912 913
	wake_up_all(&ctx->wbox_wq);
	kill_fasync(&ctx->wbox_fasync, SIGIO, POLLOUT);
914 915
}

916 917 918 919 920 921 922 923 924 925 926 927
/*
 * Write as many bytes to the interrupt mailbox as possible, until
 * one of the conditions becomes true:
 *
 * - the mailbox is full
 * - end of the user provided buffer
 * - end of the mapped area
 *
 * If the file is opened without O_NONBLOCK, we wait here until
 * space is availabyl, but return when we have been able to
 * write something.
 */
928 929 930
static ssize_t spufs_wbox_write(struct file *file, const char __user *buf,
			size_t len, loff_t *pos)
{
931
	struct spu_context *ctx = file->private_data;
932 933
	u32 wbox_data, __user *udata;
	ssize_t count;
934 935 936 937

	if (len < 4)
		return -EINVAL;

938 939 940 941 942
	udata = (void __user *)buf;
	if (!access_ok(VERIFY_READ, buf, len))
		return -EFAULT;

	if (__get_user(wbox_data, udata))
943 944
		return -EFAULT;

945 946
	count = spu_acquire(ctx);
	if (count)
947
		goto out;
948

949 950 951 952 953
	/*
	 * make sure we can at least write one element, by waiting
	 * in case of !O_NONBLOCK
	 */
	count = 0;
954
	if (file->f_flags & O_NONBLOCK) {
955
		if (!spu_wbox_write(ctx, wbox_data)) {
956
			count = -EAGAIN;
957 958
			goto out_unlock;
		}
959
	} else {
960
		count = spufs_wait(ctx->wbox_wq, spu_wbox_write(ctx, wbox_data));
961 962
		if (count)
			goto out;
963 964
	}

965

966
	/* write as much as possible */
967 968 969 970 971 972 973 974 975 976 977
	for (count = 4, udata++; (count + 4) <= len; count += 4, udata++) {
		int ret;
		ret = __get_user(wbox_data, udata);
		if (ret)
			break;

		ret = spu_wbox_write(ctx, wbox_data);
		if (ret == 0)
			break;
	}

978
out_unlock:
979
	spu_release(ctx);
980
out:
981
	return count;
982 983 984 985
}

static unsigned int spufs_wbox_poll(struct file *file, poll_table *wait)
{
986
	struct spu_context *ctx = file->private_data;
987 988
	unsigned int mask;

989
	poll_wait(file, &ctx->wbox_wq, wait);
990

991 992 993 994 995
	/*
	 * For now keep this uninterruptible and also ignore the rule
	 * that poll should not sleep.  Will be fixed later.
	 */
	mutex_lock(&ctx->state_mutex);
996 997
	mask = ctx->ops->mbox_stat_poll(ctx, POLLOUT | POLLWRNORM);
	spu_release(ctx);
998 999 1000 1001

	return mask;
}

1002
static const struct file_operations spufs_wbox_fops = {
1003 1004 1005 1006
	.open	= spufs_pipe_open,
	.write	= spufs_wbox_write,
	.poll	= spufs_wbox_poll,
	.fasync	= spufs_wbox_fasync,
1007
	.llseek = no_llseek,
1008 1009 1010 1011 1012
};

static ssize_t spufs_wbox_stat_read(struct file *file, char __user *buf,
			size_t len, loff_t *pos)
{
1013
	struct spu_context *ctx = file->private_data;
1014
	ssize_t ret;
1015 1016 1017 1018 1019
	u32 wbox_stat;

	if (len < 4)
		return -EINVAL;

1020 1021 1022
	ret = spu_acquire(ctx);
	if (ret)
		return ret;
1023 1024
	wbox_stat = (ctx->ops->mbox_stat_read(ctx) >> 8) & 0xff;
	spu_release(ctx);
1025 1026 1027 1028 1029 1030 1031

	if (copy_to_user(buf, &wbox_stat, sizeof wbox_stat))
		return -EFAULT;

	return 4;
}

1032
static const struct file_operations spufs_wbox_stat_fops = {
1033 1034
	.open	= spufs_pipe_open,
	.read	= spufs_wbox_stat_read,
1035
	.llseek = no_llseek,