2 * SPU file system -- file contents
4 * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
6 * Author: Arnd Bergmann <arndb@de.ibm.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
26 #include <linux/ioctl.h>
27 #include <linux/module.h>
28 #include <linux/pagemap.h>
29 #include <linux/poll.h>
30 #include <linux/ptrace.h>
31 #include <linux/seq_file.h>
32 #include <linux/marker.h>
35 #include <asm/semaphore.h>
37 #include <asm/spu_info.h>
38 #include <asm/uaccess.h>
42 #define SPUFS_MMAP_4K (PAGE_SIZE == 0x1000)
44 /* Simple attribute files */
46 int (*get
)(void *, u64
*);
47 int (*set
)(void *, u64
);
48 char get_buf
[24]; /* enough to store a u64 and "\n\0" */
51 const char *fmt
; /* format for read operation */
52 struct mutex mutex
; /* protects access to these buffers */
55 static int spufs_attr_open(struct inode
*inode
, struct file
*file
,
56 int (*get
)(void *, u64
*), int (*set
)(void *, u64
),
59 struct spufs_attr
*attr
;
61 attr
= kmalloc(sizeof(*attr
), GFP_KERNEL
);
67 attr
->data
= inode
->i_private
;
69 mutex_init(&attr
->mutex
);
70 file
->private_data
= attr
;
72 return nonseekable_open(inode
, file
);
75 static int spufs_attr_release(struct inode
*inode
, struct file
*file
)
77 kfree(file
->private_data
);
81 static ssize_t
spufs_attr_read(struct file
*file
, char __user
*buf
,
82 size_t len
, loff_t
*ppos
)
84 struct spufs_attr
*attr
;
88 attr
= file
->private_data
;
92 ret
= mutex_lock_interruptible(&attr
->mutex
);
96 if (*ppos
) { /* continued read */
97 size
= strlen(attr
->get_buf
);
98 } else { /* first read */
100 ret
= attr
->get(attr
->data
, &val
);
104 size
= scnprintf(attr
->get_buf
, sizeof(attr
->get_buf
),
105 attr
->fmt
, (unsigned long long)val
);
108 ret
= simple_read_from_buffer(buf
, len
, ppos
, attr
->get_buf
, size
);
110 mutex_unlock(&attr
->mutex
);
114 static ssize_t
spufs_attr_write(struct file
*file
, const char __user
*buf
,
115 size_t len
, loff_t
*ppos
)
117 struct spufs_attr
*attr
;
122 attr
= file
->private_data
;
126 ret
= mutex_lock_interruptible(&attr
->mutex
);
131 size
= min(sizeof(attr
->set_buf
) - 1, len
);
132 if (copy_from_user(attr
->set_buf
, buf
, size
))
135 ret
= len
; /* claim we got the whole input */
136 attr
->set_buf
[size
] = '\0';
137 val
= simple_strtol(attr
->set_buf
, NULL
, 0);
138 attr
->set(attr
->data
, val
);
140 mutex_unlock(&attr
->mutex
);
144 #define DEFINE_SPUFS_SIMPLE_ATTRIBUTE(__fops, __get, __set, __fmt) \
145 static int __fops ## _open(struct inode *inode, struct file *file) \
147 __simple_attr_check_format(__fmt, 0ull); \
148 return spufs_attr_open(inode, file, __get, __set, __fmt); \
150 static struct file_operations __fops = { \
151 .owner = THIS_MODULE, \
152 .open = __fops ## _open, \
153 .release = spufs_attr_release, \
154 .read = spufs_attr_read, \
155 .write = spufs_attr_write, \
160 spufs_mem_open(struct inode
*inode
, struct file
*file
)
162 struct spufs_inode_info
*i
= SPUFS_I(inode
);
163 struct spu_context
*ctx
= i
->i_ctx
;
165 mutex_lock(&ctx
->mapping_lock
);
166 file
->private_data
= ctx
;
168 ctx
->local_store
= inode
->i_mapping
;
169 mutex_unlock(&ctx
->mapping_lock
);
174 spufs_mem_release(struct inode
*inode
, struct file
*file
)
176 struct spufs_inode_info
*i
= SPUFS_I(inode
);
177 struct spu_context
*ctx
= i
->i_ctx
;
179 mutex_lock(&ctx
->mapping_lock
);
181 ctx
->local_store
= NULL
;
182 mutex_unlock(&ctx
->mapping_lock
);
187 __spufs_mem_read(struct spu_context
*ctx
, char __user
*buffer
,
188 size_t size
, loff_t
*pos
)
190 char *local_store
= ctx
->ops
->get_ls(ctx
);
191 return simple_read_from_buffer(buffer
, size
, pos
, local_store
,
196 spufs_mem_read(struct file
*file
, char __user
*buffer
,
197 size_t size
, loff_t
*pos
)
199 struct spu_context
*ctx
= file
->private_data
;
202 ret
= spu_acquire(ctx
);
205 ret
= __spufs_mem_read(ctx
, buffer
, size
, pos
);
212 spufs_mem_write(struct file
*file
, const char __user
*buffer
,
213 size_t size
, loff_t
*ppos
)
215 struct spu_context
*ctx
= file
->private_data
;
224 if (size
> LS_SIZE
- pos
)
225 size
= LS_SIZE
- pos
;
227 ret
= spu_acquire(ctx
);
231 local_store
= ctx
->ops
->get_ls(ctx
);
232 ret
= copy_from_user(local_store
+ pos
, buffer
, size
);
241 static unsigned long spufs_mem_mmap_nopfn(struct vm_area_struct
*vma
,
242 unsigned long address
)
244 struct spu_context
*ctx
= vma
->vm_file
->private_data
;
245 unsigned long pfn
, offset
, addr0
= address
;
246 #ifdef CONFIG_SPU_FS_64K_LS
247 struct spu_state
*csa
= &ctx
->csa
;
250 /* Check what page size we are using */
251 psize
= get_slice_psize(vma
->vm_mm
, address
);
253 /* Some sanity checking */
254 BUG_ON(csa
->use_big_pages
!= (psize
== MMU_PAGE_64K
));
256 /* Wow, 64K, cool, we need to align the address though */
257 if (csa
->use_big_pages
) {
258 BUG_ON(vma
->vm_start
& 0xffff);
259 address
&= ~0xfffful
;
261 #endif /* CONFIG_SPU_FS_64K_LS */
263 offset
= (address
- vma
->vm_start
) + (vma
->vm_pgoff
<< PAGE_SHIFT
);
264 if (offset
>= LS_SIZE
)
267 pr_debug("spufs_mem_mmap_nopfn address=0x%lx -> 0x%lx, offset=0x%lx\n",
268 addr0
, address
, offset
);
270 if (spu_acquire(ctx
))
271 return NOPFN_REFAULT
;
273 if (ctx
->state
== SPU_STATE_SAVED
) {
274 vma
->vm_page_prot
= __pgprot(pgprot_val(vma
->vm_page_prot
)
276 pfn
= vmalloc_to_pfn(ctx
->csa
.lscsa
->ls
+ offset
);
278 vma
->vm_page_prot
= __pgprot(pgprot_val(vma
->vm_page_prot
)
280 pfn
= (ctx
->spu
->local_store_phys
+ offset
) >> PAGE_SHIFT
;
282 vm_insert_pfn(vma
, address
, pfn
);
286 return NOPFN_REFAULT
;
290 static struct vm_operations_struct spufs_mem_mmap_vmops
= {
291 .nopfn
= spufs_mem_mmap_nopfn
,
294 static int spufs_mem_mmap(struct file
*file
, struct vm_area_struct
*vma
)
296 #ifdef CONFIG_SPU_FS_64K_LS
297 struct spu_context
*ctx
= file
->private_data
;
298 struct spu_state
*csa
= &ctx
->csa
;
300 /* Sanity check VMA alignment */
301 if (csa
->use_big_pages
) {
302 pr_debug("spufs_mem_mmap 64K, start=0x%lx, end=0x%lx,"
303 " pgoff=0x%lx\n", vma
->vm_start
, vma
->vm_end
,
305 if (vma
->vm_start
& 0xffff)
307 if (vma
->vm_pgoff
& 0xf)
310 #endif /* CONFIG_SPU_FS_64K_LS */
312 if (!(vma
->vm_flags
& VM_SHARED
))
315 vma
->vm_flags
|= VM_IO
| VM_PFNMAP
;
316 vma
->vm_page_prot
= __pgprot(pgprot_val(vma
->vm_page_prot
)
319 vma
->vm_ops
= &spufs_mem_mmap_vmops
;
323 #ifdef CONFIG_SPU_FS_64K_LS
324 static unsigned long spufs_get_unmapped_area(struct file
*file
,
325 unsigned long addr
, unsigned long len
, unsigned long pgoff
,
328 struct spu_context
*ctx
= file
->private_data
;
329 struct spu_state
*csa
= &ctx
->csa
;
331 /* If not using big pages, fallback to normal MM g_u_a */
332 if (!csa
->use_big_pages
)
333 return current
->mm
->get_unmapped_area(file
, addr
, len
,
336 /* Else, try to obtain a 64K pages slice */
337 return slice_get_unmapped_area(addr
, len
, flags
,
340 #endif /* CONFIG_SPU_FS_64K_LS */
342 static const struct file_operations spufs_mem_fops
= {
343 .open
= spufs_mem_open
,
344 .release
= spufs_mem_release
,
345 .read
= spufs_mem_read
,
346 .write
= spufs_mem_write
,
347 .llseek
= generic_file_llseek
,
348 .mmap
= spufs_mem_mmap
,
349 #ifdef CONFIG_SPU_FS_64K_LS
350 .get_unmapped_area
= spufs_get_unmapped_area
,
354 static unsigned long spufs_ps_nopfn(struct vm_area_struct
*vma
,
355 unsigned long address
,
356 unsigned long ps_offs
,
357 unsigned long ps_size
)
359 struct spu_context
*ctx
= vma
->vm_file
->private_data
;
360 unsigned long area
, offset
= address
- vma
->vm_start
;
363 spu_context_nospu_trace(spufs_ps_nopfn__enter
, ctx
);
365 offset
+= vma
->vm_pgoff
<< PAGE_SHIFT
;
366 if (offset
>= ps_size
)
370 <<<<<<< HEAD:arch/powerpc/platforms/cell/spufs/file.c
372 * Because we release the mmap_sem, the context may be destroyed while
373 * we're in spu_wait. Grab an extra reference so it isn't destroyed
376 get_spu_context(ctx
);
379 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:arch/powerpc/platforms/cell/spufs/file.c
380 * We have to wait for context to be loaded before we have
381 * pages to hand out to the user, but we don't want to wait
382 * with the mmap_sem held.
383 * It is possible to drop the mmap_sem here, but then we need
384 * to return NOPFN_REFAULT because the mappings may have
387 if (spu_acquire(ctx
))
388 <<<<<<< HEAD
:arch
/powerpc
/platforms
/cell
/spufs
/file
.c
389 return NOPFN_REFAULT
;
392 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a
:arch
/powerpc
/platforms
/cell
/spufs
/file
.c
394 if (ctx
->state
== SPU_STATE_SAVED
) {
395 up_read(¤t
->mm
->mmap_sem
);
396 spu_context_nospu_trace(spufs_ps_nopfn__sleep
, ctx
);
397 ret
= spufs_wait(ctx
->run_wq
, ctx
->state
== SPU_STATE_RUNNABLE
);
398 spu_context_trace(spufs_ps_nopfn__wake
, ctx
, ctx
->spu
);
399 down_read(¤t
->mm
->mmap_sem
);
401 area
= ctx
->spu
->problem_phys
+ ps_offs
;
402 vm_insert_pfn(vma
, address
, (area
+ offset
) >> PAGE_SHIFT
);
403 spu_context_trace(spufs_ps_nopfn__insert
, ctx
, ctx
->spu
);
408 <<<<<<< HEAD
:arch
/powerpc
/platforms
/cell
/spufs
/file
.c
412 put_spu_context(ctx
);
413 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a
:arch
/powerpc
/platforms
/cell
/spufs
/file
.c
414 return NOPFN_REFAULT
;
418 static unsigned long spufs_cntl_mmap_nopfn(struct vm_area_struct
*vma
,
419 unsigned long address
)
421 return spufs_ps_nopfn(vma
, address
, 0x4000, 0x1000);
424 static struct vm_operations_struct spufs_cntl_mmap_vmops
= {
425 .nopfn
= spufs_cntl_mmap_nopfn
,
429 * mmap support for problem state control area [0x4000 - 0x4fff].
431 static int spufs_cntl_mmap(struct file
*file
, struct vm_area_struct
*vma
)
433 if (!(vma
->vm_flags
& VM_SHARED
))
436 vma
->vm_flags
|= VM_IO
| VM_PFNMAP
;
437 vma
->vm_page_prot
= __pgprot(pgprot_val(vma
->vm_page_prot
)
438 | _PAGE_NO_CACHE
| _PAGE_GUARDED
);
440 vma
->vm_ops
= &spufs_cntl_mmap_vmops
;
443 #else /* SPUFS_MMAP_4K */
444 #define spufs_cntl_mmap NULL
445 #endif /* !SPUFS_MMAP_4K */
447 static int spufs_cntl_get(void *data
, u64
*val
)
449 struct spu_context
*ctx
= data
;
452 ret
= spu_acquire(ctx
);
455 *val
= ctx
->ops
->status_read(ctx
);
461 static int spufs_cntl_set(void *data
, u64 val
)
463 struct spu_context
*ctx
= data
;
466 ret
= spu_acquire(ctx
);
469 ctx
->ops
->runcntl_write(ctx
, val
);
475 static int spufs_cntl_open(struct inode
*inode
, struct file
*file
)
477 struct spufs_inode_info
*i
= SPUFS_I(inode
);
478 struct spu_context
*ctx
= i
->i_ctx
;
480 mutex_lock(&ctx
->mapping_lock
);
481 file
->private_data
= ctx
;
483 ctx
->cntl
= inode
->i_mapping
;
484 mutex_unlock(&ctx
->mapping_lock
);
485 return simple_attr_open(inode
, file
, spufs_cntl_get
,
486 spufs_cntl_set
, "0x%08lx");
490 spufs_cntl_release(struct inode
*inode
, struct file
*file
)
492 struct spufs_inode_info
*i
= SPUFS_I(inode
);
493 struct spu_context
*ctx
= i
->i_ctx
;
495 simple_attr_release(inode
, file
);
497 mutex_lock(&ctx
->mapping_lock
);
500 mutex_unlock(&ctx
->mapping_lock
);
504 static const struct file_operations spufs_cntl_fops
= {
505 .open
= spufs_cntl_open
,
506 .release
= spufs_cntl_release
,
507 .read
= simple_attr_read
,
508 .write
= simple_attr_write
,
509 .mmap
= spufs_cntl_mmap
,
513 spufs_regs_open(struct inode
*inode
, struct file
*file
)
515 struct spufs_inode_info
*i
= SPUFS_I(inode
);
516 file
->private_data
= i
->i_ctx
;
521 __spufs_regs_read(struct spu_context
*ctx
, char __user
*buffer
,
522 size_t size
, loff_t
*pos
)
524 struct spu_lscsa
*lscsa
= ctx
->csa
.lscsa
;
525 return simple_read_from_buffer(buffer
, size
, pos
,
526 lscsa
->gprs
, sizeof lscsa
->gprs
);
530 spufs_regs_read(struct file
*file
, char __user
*buffer
,
531 size_t size
, loff_t
*pos
)
534 struct spu_context
*ctx
= file
->private_data
;
536 ret
= spu_acquire_saved(ctx
);
539 ret
= __spufs_regs_read(ctx
, buffer
, size
, pos
);
540 spu_release_saved(ctx
);
545 spufs_regs_write(struct file
*file
, const char __user
*buffer
,
546 size_t size
, loff_t
*pos
)
548 struct spu_context
*ctx
= file
->private_data
;
549 struct spu_lscsa
*lscsa
= ctx
->csa
.lscsa
;
552 size
= min_t(ssize_t
, sizeof lscsa
->gprs
- *pos
, size
);
557 ret
= spu_acquire_saved(ctx
);
561 ret
= copy_from_user(lscsa
->gprs
+ *pos
- size
,
562 buffer
, size
) ? -EFAULT
: size
;
564 spu_release_saved(ctx
);
568 static const struct file_operations spufs_regs_fops
= {
569 .open
= spufs_regs_open
,
570 .read
= spufs_regs_read
,
571 .write
= spufs_regs_write
,
572 .llseek
= generic_file_llseek
,
576 __spufs_fpcr_read(struct spu_context
*ctx
, char __user
* buffer
,
577 size_t size
, loff_t
* pos
)
579 struct spu_lscsa
*lscsa
= ctx
->csa
.lscsa
;
580 return simple_read_from_buffer(buffer
, size
, pos
,
581 &lscsa
->fpcr
, sizeof(lscsa
->fpcr
));
585 spufs_fpcr_read(struct file
*file
, char __user
* buffer
,
586 size_t size
, loff_t
* pos
)
589 struct spu_context
*ctx
= file
->private_data
;
591 ret
= spu_acquire_saved(ctx
);
594 ret
= __spufs_fpcr_read(ctx
, buffer
, size
, pos
);
595 spu_release_saved(ctx
);
600 spufs_fpcr_write(struct file
*file
, const char __user
* buffer
,
601 size_t size
, loff_t
* pos
)
603 struct spu_context
*ctx
= file
->private_data
;
604 struct spu_lscsa
*lscsa
= ctx
->csa
.lscsa
;
607 size
= min_t(ssize_t
, sizeof(lscsa
->fpcr
) - *pos
, size
);
611 ret
= spu_acquire_saved(ctx
);
616 ret
= copy_from_user((char *)&lscsa
->fpcr
+ *pos
- size
,
617 buffer
, size
) ? -EFAULT
: size
;
619 spu_release_saved(ctx
);
623 static const struct file_operations spufs_fpcr_fops
= {
624 .open
= spufs_regs_open
,
625 .read
= spufs_fpcr_read
,
626 .write
= spufs_fpcr_write
,
627 .llseek
= generic_file_llseek
,
630 /* generic open function for all pipe-like files */
631 static int spufs_pipe_open(struct inode
*inode
, struct file
*file
)
633 struct spufs_inode_info
*i
= SPUFS_I(inode
);
634 file
->private_data
= i
->i_ctx
;
636 return nonseekable_open(inode
, file
);
640 * Read as many bytes from the mailbox as possible, until
641 * one of the conditions becomes true:
643 * - no more data available in the mailbox
644 * - end of the user provided buffer
645 * - end of the mapped area
647 static ssize_t
spufs_mbox_read(struct file
*file
, char __user
*buf
,
648 size_t len
, loff_t
*pos
)
650 struct spu_context
*ctx
= file
->private_data
;
651 u32 mbox_data
, __user
*udata
;
657 if (!access_ok(VERIFY_WRITE
, buf
, len
))
660 udata
= (void __user
*)buf
;
662 count
= spu_acquire(ctx
);
666 for (count
= 0; (count
+ 4) <= len
; count
+= 4, udata
++) {
668 ret
= ctx
->ops
->mbox_read(ctx
, &mbox_data
);
673 * at the end of the mapped area, we can fault
674 * but still need to return the data we have
675 * read successfully so far.
677 ret
= __put_user(mbox_data
, udata
);
692 static const struct file_operations spufs_mbox_fops
= {
693 .open
= spufs_pipe_open
,
694 .read
= spufs_mbox_read
,
697 static ssize_t
spufs_mbox_stat_read(struct file
*file
, char __user
*buf
,
698 size_t len
, loff_t
*pos
)
700 struct spu_context
*ctx
= file
->private_data
;
707 ret
= spu_acquire(ctx
);
711 mbox_stat
= ctx
->ops
->mbox_stat_read(ctx
) & 0xff;
715 if (copy_to_user(buf
, &mbox_stat
, sizeof mbox_stat
))
721 static const struct file_operations spufs_mbox_stat_fops
= {
722 .open
= spufs_pipe_open
,
723 .read
= spufs_mbox_stat_read
,
726 /* low-level ibox access function */
727 size_t spu_ibox_read(struct spu_context
*ctx
, u32
*data
)
729 return ctx
->ops
->ibox_read(ctx
, data
);
732 static int spufs_ibox_fasync(int fd
, struct file
*file
, int on
)
734 struct spu_context
*ctx
= file
->private_data
;
736 return fasync_helper(fd
, file
, on
, &ctx
->ibox_fasync
);
739 /* interrupt-level ibox callback function. */
740 void spufs_ibox_callback(struct spu
*spu
)
742 struct spu_context
*ctx
= spu
->ctx
;
747 wake_up_all(&ctx
->ibox_wq
);
748 kill_fasync(&ctx
->ibox_fasync
, SIGIO
, POLLIN
);
752 * Read as many bytes from the interrupt mailbox as possible, until
753 * one of the conditions becomes true:
755 * - no more data available in the mailbox
756 * - end of the user provided buffer
757 * - end of the mapped area
759 * If the file is opened without O_NONBLOCK, we wait here until
760 * any data is available, but return when we have been able to
763 static ssize_t
spufs_ibox_read(struct file
*file
, char __user
*buf
,
764 size_t len
, loff_t
*pos
)
766 struct spu_context
*ctx
= file
->private_data
;
767 u32 ibox_data
, __user
*udata
;
773 if (!access_ok(VERIFY_WRITE
, buf
, len
))
776 udata
= (void __user
*)buf
;
778 count
= spu_acquire(ctx
);
782 /* wait only for the first element */
784 if (file
->f_flags
& O_NONBLOCK
) {
785 if (!spu_ibox_read(ctx
, &ibox_data
)) {
790 count
= spufs_wait(ctx
->ibox_wq
, spu_ibox_read(ctx
, &ibox_data
));
795 /* if we can't write at all, return -EFAULT */
796 count
= __put_user(ibox_data
, udata
);
800 for (count
= 4, udata
++; (count
+ 4) <= len
; count
+= 4, udata
++) {
802 ret
= ctx
->ops
->ibox_read(ctx
, &ibox_data
);
806 * at the end of the mapped area, we can fault
807 * but still need to return the data we have
808 * read successfully so far.
810 ret
= __put_user(ibox_data
, udata
);
821 static unsigned int spufs_ibox_poll(struct file
*file
, poll_table
*wait
)
823 struct spu_context
*ctx
= file
->private_data
;
826 poll_wait(file
, &ctx
->ibox_wq
, wait
);
829 * For now keep this uninterruptible and also ignore the rule
830 * that poll should not sleep. Will be fixed later.
832 mutex_lock(&ctx
->state_mutex
);
833 mask
= ctx
->ops
->mbox_stat_poll(ctx
, POLLIN
| POLLRDNORM
);
839 static const struct file_operations spufs_ibox_fops
= {
840 .open
= spufs_pipe_open
,
841 .read
= spufs_ibox_read
,
842 .poll
= spufs_ibox_poll
,
843 .fasync
= spufs_ibox_fasync
,
846 static ssize_t
spufs_ibox_stat_read(struct file
*file
, char __user
*buf
,
847 size_t len
, loff_t
*pos
)
849 struct spu_context
*ctx
= file
->private_data
;
856 ret
= spu_acquire(ctx
);
859 ibox_stat
= (ctx
->ops
->mbox_stat_read(ctx
) >> 16) & 0xff;
862 if (copy_to_user(buf
, &ibox_stat
, sizeof ibox_stat
))
868 static const struct file_operations spufs_ibox_stat_fops
= {
869 .open
= spufs_pipe_open
,
870 .read
= spufs_ibox_stat_read
,
873 /* low-level mailbox write */
874 size_t spu_wbox_write(struct spu_context
*ctx
, u32 data
)
876 return ctx
->ops
->wbox_write(ctx
, data
);
879 static int spufs_wbox_fasync(int fd
, struct file
*file
, int on
)
881 struct spu_context
*ctx
= file
->private_data
;
884 ret
= fasync_helper(fd
, file
, on
, &ctx
->wbox_fasync
);
889 /* interrupt-level wbox callback function. */
890 void spufs_wbox_callback(struct spu
*spu
)
892 struct spu_context
*ctx
= spu
->ctx
;
897 wake_up_all(&ctx
->wbox_wq
);
898 kill_fasync(&ctx
->wbox_fasync
, SIGIO
, POLLOUT
);
902 * Write as many bytes to the interrupt mailbox as possible, until
903 * one of the conditions becomes true:
905 * - the mailbox is full
906 * - end of the user provided buffer
907 * - end of the mapped area
909 * If the file is opened without O_NONBLOCK, we wait here until
910 * space is availabyl, but return when we have been able to
913 static ssize_t
spufs_wbox_write(struct file
*file
, const char __user
*buf
,
914 size_t len
, loff_t
*pos
)
916 struct spu_context
*ctx
= file
->private_data
;
917 u32 wbox_data
, __user
*udata
;
923 udata
= (void __user
*)buf
;
924 if (!access_ok(VERIFY_READ
, buf
, len
))
927 if (__get_user(wbox_data
, udata
))
930 count
= spu_acquire(ctx
);
935 * make sure we can at least write one element, by waiting
936 * in case of !O_NONBLOCK
939 if (file
->f_flags
& O_NONBLOCK
) {
940 if (!spu_wbox_write(ctx
, wbox_data
)) {
945 count
= spufs_wait(ctx
->wbox_wq
, spu_wbox_write(ctx
, wbox_data
));
951 /* write as much as possible */
952 for (count
= 4, udata
++; (count
+ 4) <= len
; count
+= 4, udata
++) {
954 ret
= __get_user(wbox_data
, udata
);
958 ret
= spu_wbox_write(ctx
, wbox_data
);
969 static unsigned int spufs_wbox_poll(struct file
*file
, poll_table
*wait
)
971 struct spu_context
*ctx
= file
->private_data
;
974 poll_wait(file
, &ctx
->wbox_wq
, wait
);
977 * For now keep this uninterruptible and also ignore the rule
978 * that poll should not sleep. Will be fixed later.
980 mutex_lock(&ctx
->state_mutex
);
981 mask
= ctx
->ops
->mbox_stat_poll(ctx
, POLLOUT
| POLLWRNORM
);
987 static const struct file_operations spufs_wbox_fops
= {
988 .open
= spufs_pipe_open
,
989 .write
= spufs_wbox_write
,
990 .poll
= spufs_wbox_poll
,
991 .fasync
= spufs_wbox_fasync
,
994 static ssize_t
spufs_wbox_stat_read(struct file
*file
, char __user
*buf
,
995 size_t len
, loff_t
*pos
)
997 struct spu_context
*ctx
= file
->private_data
;
1004 ret
= spu_acquire(ctx
);
1007 wbox_stat
= (ctx
->ops
->mbox_stat_read(ctx
) >> 8) & 0xff;
1010 if (copy_to_user(buf
, &wbox_stat
, sizeof wbox_stat
))
1016 static const struct file_operations spufs_wbox_stat_fops
= {
1017 .open
= spufs_pipe_open
,
1018 .read
= spufs_wbox_stat_read
,
1021 static int spufs_signal1_open(struct inode
*inode
, struct file
*file
)
1023 struct spufs_inode_info
*i
= SPUFS_I(inode
);
1024 struct spu_context
*ctx
= i
->i_ctx
;
1026 mutex_lock(&ctx
->mapping_lock
);
1027 file
->private_data
= ctx
;
1028 if (!i
->i_openers
++)
1029 ctx
->signal1
= inode
->i_mapping
;
1030 mutex_unlock(&ctx
->mapping_lock
);
1031 return nonseekable_open(inode
, file
);
1035 spufs_signal1_release(struct inode
*inode
, struct file
*file
)
1037 struct spufs_inode_info
*i
= SPUFS_I(inode
);
1038 struct spu_context
*ctx
= i
->i_ctx
;
1040 mutex_lock(&ctx
->mapping_lock
);
1041 if (!--i
->i_openers
)
1042 ctx
->signal1
= NULL
;
1043 mutex_unlock(&ctx
->mapping_lock
);
1047 static ssize_t
__spufs_signal1_read(struct spu_context
*ctx
, char __user
*buf
,
1048 size_t len
, loff_t
*pos
)
1056 if (ctx
->csa
.spu_chnlcnt_RW
[3]) {
1057 data
= ctx
->csa
.spu_chnldata_RW
[3];
1064 if (copy_to_user(buf
, &data
, 4))
1071 static ssize_t
spufs_signal1_read(struct file
*file
, char __user
*buf
,
1072 size_t len
, loff_t
*pos
)
1075 struct spu_context
*ctx
= file
->private_data
;
1077 ret
= spu_acquire_saved(ctx
);
1080 ret
= __spufs_signal1_read(ctx
, buf
, len
, pos
);
1081 spu_release_saved(ctx
);
1086 static ssize_t
spufs_signal1_write(struct file
*file
, const char __user
*buf
,
1087 size_t len
, loff_t
*pos
)
1089 struct spu_context
*ctx
;
1093 ctx
= file
->private_data
;
1098 if (copy_from_user(&data
, buf
, 4))
1101 ret
= spu_acquire(ctx
);
1104 ctx
->ops
->signal1_write(ctx
, data
);
1110 static unsigned long spufs_signal1_mmap_nopfn(struct vm_area_struct
*vma
,
1111 unsigned long address
)
1113 #if PAGE_SIZE == 0x1000
1114 return spufs_ps_nopfn(vma
, address
, 0x14000, 0x1000);
1115 #elif PAGE_SIZE == 0x10000
1116 /* For 64k pages, both signal1 and signal2 can be used to mmap the whole
1117 * signal 1 and 2 area
1119 return spufs_ps_nopfn(vma
, address
, 0x10000, 0x10000);
1121 #error unsupported page size
1125 static struct vm_operations_struct spufs_signal1_mmap_vmops
= {
1126 .nopfn
= spufs_signal1_mmap_nopfn
,
1129 static int spufs_signal1_mmap(struct file
*file
, struct vm_area_struct
*vma
)
1131 if (!(vma
->vm_flags
& VM_SHARED
))
1134 vma
->vm_flags
|= VM_IO
| VM_PFNMAP
;
1135 vma
->vm_page_prot
= __pgprot(pgprot_val(vma
->vm_page_prot
)
1136 | _PAGE_NO_CACHE
| _PAGE_GUARDED
);
1138 vma
->vm_ops
= &spufs_signal1_mmap_vmops
;
1142 static const struct file_operations spufs_signal1_fops
= {
1143 .open
= spufs_signal1_open
,
1144 .release
= spufs_signal1_release
,
1145 .read
= spufs_signal1_read
,
1146 .write
= spufs_signal1_write
,
1147 .mmap
= spufs_signal1_mmap
,
1150 static const struct file_operations spufs_signal1_nosched_fops
= {
1151 .open
= spufs_signal1_open
,
1152 .release
= spufs_signal1_release
,
1153 .write
= spufs_signal1_write
,
1154 .mmap
= spufs_signal1_mmap
,
1157 static int spufs_signal2_open(struct inode
*inode
, struct file
*file
)
1159 struct spufs_inode_info
*i
= SPUFS_I(inode
);
1160 struct spu_context
*ctx
= i
->i_ctx
;
1162 mutex_lock(&ctx
->mapping_lock
);
1163 file
->private_data
= ctx
;
1164 if (!i
->i_openers
++)
1165 ctx
->signal2
= inode
->i_mapping
;
1166 mutex_unlock(&ctx
->mapping_lock
);
1167 return nonseekable_open(inode
, file
);
1171 spufs_signal2_release(struct inode
*inode
, struct file
*file
)
1173 struct spufs_inode_info
*i
= SPUFS_I(inode
);
1174 struct spu_context
*ctx
= i
->i_ctx
;
1176 mutex_lock(&ctx
->mapping_lock
);
1177 if (!--i
->i_openers
)
1178 ctx
->signal2
= NULL
;
1179 mutex_unlock(&ctx
->mapping_lock
);
1183 static ssize_t
__spufs_signal2_read(struct spu_context
*ctx
, char __user
*buf
,
1184 size_t len
, loff_t
*pos
)
1192 if (ctx
->csa
.spu_chnlcnt_RW
[4]) {
1193 data
= ctx
->csa
.spu_chnldata_RW
[4];
1200 if (copy_to_user(buf
, &data
, 4))
1207 static ssize_t
spufs_signal2_read(struct file
*file
, char __user
*buf
,
1208 size_t len
, loff_t
*pos
)
1210 struct spu_context
*ctx
= file
->private_data
;
1213 ret
= spu_acquire_saved(ctx
);
1216 ret
= __spufs_signal2_read(ctx
, buf
, len
, pos
);
1217 spu_release_saved(ctx
);
1222 static ssize_t
spufs_signal2_write(struct file
*file
, const char __user
*buf
,
1223 size_t len
, loff_t
*pos
)
1225 struct spu_context
*ctx
;
1229 ctx
= file
->private_data
;
1234 if (copy_from_user(&data
, buf
, 4))
1237 ret
= spu_acquire(ctx
);
1240 ctx
->ops
->signal2_write(ctx
, data
);
1247 static unsigned long spufs_signal2_mmap_nopfn(struct vm_area_struct
*vma
,
1248 unsigned long address
)
1250 #if PAGE_SIZE == 0x1000
1251 return spufs_ps_nopfn(vma
, address
, 0x1c000, 0x1000);
1252 #elif PAGE_SIZE == 0x10000
1253 /* For 64k pages, both signal1 and signal2 can be used to mmap the whole
1254 * signal 1 and 2 area
1256 return spufs_ps_nopfn(vma
, address
, 0x10000, 0x10000);
1258 #error unsupported page size
1262 static struct vm_operations_struct spufs_signal2_mmap_vmops
= {
1263 .nopfn
= spufs_signal2_mmap_nopfn
,
1266 static int spufs_signal2_mmap(struct file
*file
, struct vm_area_struct
*vma
)
1268 if (!(vma
->vm_flags
& VM_SHARED
))
1271 vma
->vm_flags
|= VM_IO
| VM_PFNMAP
;
1272 vma
->vm_page_prot
= __pgprot(pgprot_val(vma
->vm_page_prot
)
1273 | _PAGE_NO_CACHE
| _PAGE_GUARDED
);
1275 vma
->vm_ops
= &spufs_signal2_mmap_vmops
;
1278 #else /* SPUFS_MMAP_4K */
1279 #define spufs_signal2_mmap NULL
1280 #endif /* !SPUFS_MMAP_4K */
1282 static const struct file_operations spufs_signal2_fops
= {
1283 .open
= spufs_signal2_open
,
1284 .release
= spufs_signal2_release
,
1285 .read
= spufs_signal2_read
,
1286 .write
= spufs_signal2_write
,
1287 .mmap
= spufs_signal2_mmap
,
1290 static const struct file_operations spufs_signal2_nosched_fops
= {
1291 .open
= spufs_signal2_open
,
1292 .release
= spufs_signal2_release
,
1293 .write
= spufs_signal2_write
,
1294 .mmap
= spufs_signal2_mmap
,
1298 * This is a wrapper around DEFINE_SIMPLE_ATTRIBUTE which does the
1299 * work of acquiring (or not) the SPU context before calling through
1300 * to the actual get routine. The set routine is called directly.
1302 #define SPU_ATTR_NOACQUIRE 0
1303 #define SPU_ATTR_ACQUIRE 1
1304 #define SPU_ATTR_ACQUIRE_SAVED 2
1306 #define DEFINE_SPUFS_ATTRIBUTE(__name, __get, __set, __fmt, __acquire) \
1307 static int __##__get(void *data, u64 *val) \
1309 struct spu_context *ctx = data; \
1312 if (__acquire == SPU_ATTR_ACQUIRE) { \
1313 ret = spu_acquire(ctx); \
1316 *val = __get(ctx); \
1318 } else if (__acquire == SPU_ATTR_ACQUIRE_SAVED) { \
1319 ret = spu_acquire_saved(ctx); \
1322 *val = __get(ctx); \
1323 spu_release_saved(ctx); \
1325 *val = __get(ctx); \
1329 DEFINE_SPUFS_SIMPLE_ATTRIBUTE(__name, __##__get, __set, __fmt);
1331 static int spufs_signal1_type_set(void *data
, u64 val
)
1333 struct spu_context
*ctx
= data
;
1336 ret
= spu_acquire(ctx
);
1339 ctx
->ops
->signal1_type_set(ctx
, val
);
1345 static u64
spufs_signal1_type_get(struct spu_context
*ctx
)
1347 return ctx
->ops
->signal1_type_get(ctx
);
1349 DEFINE_SPUFS_ATTRIBUTE(spufs_signal1_type
, spufs_signal1_type_get
,
1350 spufs_signal1_type_set
, "%llu", SPU_ATTR_ACQUIRE
);
1353 static int spufs_signal2_type_set(void *data
, u64 val
)
1355 struct spu_context
*ctx
= data
;
1358 ret
= spu_acquire(ctx
);
1361 ctx
->ops
->signal2_type_set(ctx
, val
);
1367 static u64
spufs_signal2_type_get(struct spu_context
*ctx
)
1369 return ctx
->ops
->signal2_type_get(ctx
);
1371 DEFINE_SPUFS_ATTRIBUTE(spufs_signal2_type
, spufs_signal2_type_get
,
1372 spufs_signal2_type_set
, "%llu", SPU_ATTR_ACQUIRE
);
1375 static unsigned long spufs_mss_mmap_nopfn(struct vm_area_struct
*vma
,
1376 unsigned long address
)
1378 return spufs_ps_nopfn(vma
, address
, 0x0000, 0x1000);
1381 static struct vm_operations_struct spufs_mss_mmap_vmops
= {
1382 .nopfn
= spufs_mss_mmap_nopfn
,
1386 * mmap support for problem state MFC DMA area [0x0000 - 0x0fff].
1388 static int spufs_mss_mmap(struct file
*file
, struct vm_area_struct
*vma
)
1390 if (!(vma
->vm_flags
& VM_SHARED
))
1393 vma
->vm_flags
|= VM_IO
| VM_PFNMAP
;
1394 vma
->vm_page_prot
= __pgprot(pgprot_val(vma
->vm_page_prot
)
1395 | _PAGE_NO_CACHE
| _PAGE_GUARDED
);
1397 vma
->vm_ops
= &spufs_mss_mmap_vmops
;
1400 #else /* SPUFS_MMAP_4K */
1401 #define spufs_mss_mmap NULL
1402 #endif /* !SPUFS_MMAP_4K */
1404 static int spufs_mss_open(struct inode
*inode
, struct file
*file
)
1406 struct spufs_inode_info
*i
= SPUFS_I(inode
);
1407 struct spu_context
*ctx
= i
->i_ctx
;
1409 file
->private_data
= i
->i_ctx
;
1411 mutex_lock(&ctx
->mapping_lock
);
1412 if (!i
->i_openers
++)
1413 ctx
->mss
= inode
->i_mapping
;
1414 mutex_unlock(&ctx
->mapping_lock
);
1415 return nonseekable_open(inode
, file
);
1419 spufs_mss_release(struct inode
*inode
, struct file
*file
)
1421 struct spufs_inode_info
*i
= SPUFS_I(inode
);
1422 struct spu_context
*ctx
= i
->i_ctx
;
1424 mutex_lock(&ctx
->mapping_lock
);
1425 if (!--i
->i_openers
)
1427 mutex_unlock(&ctx
->mapping_lock
);
1431 static const struct file_operations spufs_mss_fops
= {
1432 .open
= spufs_mss_open
,
1433 .release
= spufs_mss_release
,
1434 .mmap
= spufs_mss_mmap
,
1437 static unsigned long spufs_psmap_mmap_nopfn(struct vm_area_struct
*vma
,
1438 unsigned long address
)
1440 return spufs_ps_nopfn(vma
, address
, 0x0000, 0x20000);
1443 static struct vm_operations_struct spufs_psmap_mmap_vmops
= {
1444 .nopfn
= spufs_psmap_mmap_nopfn
,
1448 * mmap support for full problem state area [0x00000 - 0x1ffff].
1450 static int spufs_psmap_mmap(struct file
*file
, struct vm_area_struct
*vma
)
1452 if (!(vma
->vm_flags
& VM_SHARED
))
1455 vma
->vm_flags
|= VM_IO
| VM_PFNMAP
;
1456 vma
->vm_page_prot
= __pgprot(pgprot_val(vma
->vm_page_prot
)
1457 | _PAGE_NO_CACHE
| _PAGE_GUARDED
);
1459 vma
->vm_ops
= &spufs_psmap_mmap_vmops
;
1463 static int spufs_psmap_open(struct inode
*inode
, struct file
*file
)
1465 struct spufs_inode_info
*i
= SPUFS_I(inode
);
1466 struct spu_context
*ctx
= i
->i_ctx
;
1468 mutex_lock(&ctx
->mapping_lock
);
1469 file
->private_data
= i
->i_ctx
;
1470 if (!i
->i_openers
++)
1471 ctx
->psmap
= inode
->i_mapping
;
1472 mutex_unlock(&ctx
->mapping_lock
);
1473 return nonseekable_open(inode
, file
);
1477 spufs_psmap_release(struct inode
*inode
, struct file
*file
)
1479 struct spufs_inode_info
*i
= SPUFS_I(inode
);
1480 struct spu_context
*ctx
= i
->i_ctx
;
1482 mutex_lock(&ctx
->mapping_lock
);
1483 if (!--i
->i_openers
)
1485 mutex_unlock(&ctx
->mapping_lock
);
1489 static const struct file_operations spufs_psmap_fops
= {
1490 .open
= spufs_psmap_open
,
1491 .release
= spufs_psmap_release
,
1492 .mmap
= spufs_psmap_mmap
,
1497 static unsigned long spufs_mfc_mmap_nopfn(struct vm_area_struct
*vma
,
1498 unsigned long address
)
1500 return spufs_ps_nopfn(vma
, address
, 0x3000, 0x1000);
1503 static struct vm_operations_struct spufs_mfc_mmap_vmops
= {
1504 .nopfn
= spufs_mfc_mmap_nopfn
,
1508 * mmap support for problem state MFC DMA area [0x0000 - 0x0fff].
1510 static int spufs_mfc_mmap(struct file
*file
, struct vm_area_struct
*vma
)
1512 if (!(vma
->vm_flags
& VM_SHARED
))
1515 vma
->vm_flags
|= VM_IO
| VM_PFNMAP
;
1516 vma
->vm_page_prot
= __pgprot(pgprot_val(vma
->vm_page_prot
)
1517 | _PAGE_NO_CACHE
| _PAGE_GUARDED
);
1519 vma
->vm_ops
= &spufs_mfc_mmap_vmops
;
1522 #else /* SPUFS_MMAP_4K */
1523 #define spufs_mfc_mmap NULL
1524 #endif /* !SPUFS_MMAP_4K */
1526 static int spufs_mfc_open(struct inode
*inode
, struct file
*file
)
1528 struct spufs_inode_info
*i
= SPUFS_I(inode
);
1529 struct spu_context
*ctx
= i
->i_ctx
;
1531 /* we don't want to deal with DMA into other processes */
1532 if (ctx
->owner
!= current
->mm
)
1535 if (atomic_read(&inode
->i_count
) != 1)
1538 mutex_lock(&ctx
->mapping_lock
);
1539 file
->private_data
= ctx
;
1540 if (!i
->i_openers
++)
1541 ctx
->mfc
= inode
->i_mapping
;
1542 mutex_unlock(&ctx
->mapping_lock
);
1543 return nonseekable_open(inode
, file
);
1547 spufs_mfc_release(struct inode
*inode
, struct file
*file
)
1549 struct spufs_inode_info
*i
= SPUFS_I(inode
);
1550 struct spu_context
*ctx
= i
->i_ctx
;
1552 mutex_lock(&ctx
->mapping_lock
);
1553 if (!--i
->i_openers
)
1555 mutex_unlock(&ctx
->mapping_lock
);
1559 /* interrupt-level mfc callback function. */
1560 void spufs_mfc_callback(struct spu
*spu
)
1562 struct spu_context
*ctx
= spu
->ctx
;
1567 wake_up_all(&ctx
->mfc_wq
);
1569 pr_debug("%s %s\n", __FUNCTION__
, spu
->name
);
1570 if (ctx
->mfc_fasync
) {
1571 u32 free_elements
, tagstatus
;
1574 /* no need for spu_acquire in interrupt context */
1575 free_elements
= ctx
->ops
->get_mfc_free_elements(ctx
);
1576 tagstatus
= ctx
->ops
->read_mfc_tagstatus(ctx
);
1579 if (free_elements
& 0xffff)
1581 if (tagstatus
& ctx
->tagwait
)
1584 kill_fasync(&ctx
->mfc_fasync
, SIGIO
, mask
);
1588 static int spufs_read_mfc_tagstatus(struct spu_context
*ctx
, u32
*status
)
1590 /* See if there is one tag group is complete */
1591 /* FIXME we need locking around tagwait */
1592 *status
= ctx
->ops
->read_mfc_tagstatus(ctx
) & ctx
->tagwait
;
1593 ctx
->tagwait
&= ~*status
;
1597 /* enable interrupt waiting for any tag group,
1598 may silently fail if interrupts are already enabled */
1599 ctx
->ops
->set_mfc_query(ctx
, ctx
->tagwait
, 1);
1603 static ssize_t
spufs_mfc_read(struct file
*file
, char __user
*buffer
,
1604 size_t size
, loff_t
*pos
)
1606 struct spu_context
*ctx
= file
->private_data
;
1613 ret
= spu_acquire(ctx
);
1618 if (file
->f_flags
& O_NONBLOCK
) {
1619 status
= ctx
->ops
->read_mfc_tagstatus(ctx
);
1620 if (!(status
& ctx
->tagwait
))
1623 /* XXX(hch): shouldn't we clear ret here? */
1624 ctx
->tagwait
&= ~status
;
1626 ret
= spufs_wait(ctx
->mfc_wq
,
1627 spufs_read_mfc_tagstatus(ctx
, &status
));
1634 if (copy_to_user(buffer
, &status
, 4))
1641 static int spufs_check_valid_dma(struct mfc_dma_command
*cmd
)
1643 pr_debug("queueing DMA %x %lx %x %x %x\n", cmd
->lsa
,
1644 cmd
->ea
, cmd
->size
, cmd
->tag
, cmd
->cmd
);
1655 pr_debug("invalid DMA opcode %x\n", cmd
->cmd
);
1659 if ((cmd
->lsa
& 0xf) != (cmd
->ea
&0xf)) {
1660 pr_debug("invalid DMA alignment, ea %lx lsa %x\n",
1665 switch (cmd
->size
& 0xf) {
1686 pr_debug("invalid DMA alignment %x for size %x\n",
1687 cmd
->lsa
& 0xf, cmd
->size
);
1691 if (cmd
->size
> 16 * 1024) {
1692 pr_debug("invalid DMA size %x\n", cmd
->size
);
1696 if (cmd
->tag
& 0xfff0) {
1697 /* we reserve the higher tag numbers for kernel use */
1698 pr_debug("invalid DMA tag\n");
1703 /* not supported in this version */
1704 pr_debug("invalid DMA class\n");
1711 static int spu_send_mfc_command(struct spu_context
*ctx
,
1712 struct mfc_dma_command cmd
,
1715 *error
= ctx
->ops
->send_mfc_command(ctx
, &cmd
);
1716 if (*error
== -EAGAIN
) {
1717 /* wait for any tag group to complete
1718 so we have space for the new command */
1719 ctx
->ops
->set_mfc_query(ctx
, ctx
->tagwait
, 1);
1720 /* try again, because the queue might be
1722 *error
= ctx
->ops
->send_mfc_command(ctx
, &cmd
);
1723 if (*error
== -EAGAIN
)
1729 static ssize_t
spufs_mfc_write(struct file
*file
, const char __user
*buffer
,
1730 size_t size
, loff_t
*pos
)
1732 struct spu_context
*ctx
= file
->private_data
;
1733 struct mfc_dma_command cmd
;
1736 if (size
!= sizeof cmd
)
1740 if (copy_from_user(&cmd
, buffer
, sizeof cmd
))
1743 ret
= spufs_check_valid_dma(&cmd
);
1747 ret
= spu_acquire(ctx
);
1751 ret
= spufs_wait(ctx
->run_wq
, ctx
->state
== SPU_STATE_RUNNABLE
);
1755 if (file
->f_flags
& O_NONBLOCK
) {
1756 ret
= ctx
->ops
->send_mfc_command(ctx
, &cmd
);
1759 ret
= spufs_wait(ctx
->mfc_wq
,
1760 spu_send_mfc_command(ctx
, cmd
, &status
));
1770 ctx
->tagwait
|= 1 << cmd
.tag
;
1779 static unsigned int spufs_mfc_poll(struct file
*file
,poll_table
*wait
)
1781 struct spu_context
*ctx
= file
->private_data
;
1782 u32 free_elements
, tagstatus
;
1785 poll_wait(file
, &ctx
->mfc_wq
, wait
);
1788 * For now keep this uninterruptible and also ignore the rule
1789 * that poll should not sleep. Will be fixed later.
1791 mutex_lock(&ctx
->state_mutex
);
1792 ctx
->ops
->set_mfc_query(ctx
, ctx
->tagwait
, 2);
1793 free_elements
= ctx
->ops
->get_mfc_free_elements(ctx
);
1794 tagstatus
= ctx
->ops
->read_mfc_tagstatus(ctx
);
1798 if (free_elements
& 0xffff)
1799 mask
|= POLLOUT
| POLLWRNORM
;
1800 if (tagstatus
& ctx
->tagwait
)
1801 mask
|= POLLIN
| POLLRDNORM
;
1803 pr_debug("%s: free %d tagstatus %d tagwait %d\n", __FUNCTION__
,
1804 free_elements
, tagstatus
, ctx
->tagwait
);
1809 static int spufs_mfc_flush(struct file
*file
, fl_owner_t id
)
1811 struct spu_context
*ctx
= file
->private_data
;
1814 ret
= spu_acquire(ctx
);
1818 /* this currently hangs */
1819 ret
= spufs_wait(ctx
->mfc_wq
,
1820 ctx
->ops
->set_mfc_query(ctx
, ctx
->tagwait
, 2));
1823 ret
= spufs_wait(ctx
->mfc_wq
,
1824 ctx
->ops
->read_mfc_tagstatus(ctx
) == ctx
->tagwait
);
1835 static int spufs_mfc_fsync(struct file
*file
, struct dentry
*dentry
,
1838 return spufs_mfc_flush(file
, NULL
);
1841 static int spufs_mfc_fasync(int fd
, struct file
*file
, int on
)
1843 struct spu_context
*ctx
= file
->private_data
;
1845 return fasync_helper(fd
, file
, on
, &ctx
->mfc_fasync
);
1848 static const struct file_operations spufs_mfc_fops
= {
1849 .open
= spufs_mfc_open
,
1850 .release
= spufs_mfc_release
,
1851 .read
= spufs_mfc_read
,
1852 .write
= spufs_mfc_write
,
1853 .poll
= spufs_mfc_poll
,
1854 .flush
= spufs_mfc_flush
,
1855 .fsync
= spufs_mfc_fsync
,
1856 .fasync
= spufs_mfc_fasync
,
1857 .mmap
= spufs_mfc_mmap
,
1860 static int spufs_npc_set(void *data
, u64 val
)
1862 struct spu_context
*ctx
= data
;
1865 ret
= spu_acquire(ctx
);
1868 ctx
->ops
->npc_write(ctx
, val
);
1874 static u64
spufs_npc_get(struct spu_context
*ctx
)
1876 return ctx
->ops
->npc_read(ctx
);
1878 DEFINE_SPUFS_ATTRIBUTE(spufs_npc_ops
, spufs_npc_get
, spufs_npc_set
,
1879 "0x%llx\n", SPU_ATTR_ACQUIRE
);
1881 static int spufs_decr_set(void *data
, u64 val
)
1883 struct spu_context
*ctx
= data
;
1884 struct spu_lscsa
*lscsa
= ctx
->csa
.lscsa
;
1887 ret
= spu_acquire_saved(ctx
);
1890 lscsa
->decr
.slot
[0] = (u32
) val
;
1891 spu_release_saved(ctx
);
1896 static u64
spufs_decr_get(struct spu_context
*ctx
)
1898 struct spu_lscsa
*lscsa
= ctx
->csa
.lscsa
;
1899 return lscsa
->decr
.slot
[0];
1901 DEFINE_SPUFS_ATTRIBUTE(spufs_decr_ops
, spufs_decr_get
, spufs_decr_set
,
1902 "0x%llx\n", SPU_ATTR_ACQUIRE_SAVED
);
1904 static int spufs_decr_status_set(void *data
, u64 val
)
1906 struct spu_context
*ctx
= data
;
1909 ret
= spu_acquire_saved(ctx
);
1913 ctx
->csa
.priv2
.mfc_control_RW
|= MFC_CNTL_DECREMENTER_RUNNING
;
1915 ctx
->csa
.priv2
.mfc_control_RW
&= ~MFC_CNTL_DECREMENTER_RUNNING
;
1916 spu_release_saved(ctx
);
1921 static u64
spufs_decr_status_get(struct spu_context
*ctx
)
1923 if (ctx
->csa
.priv2
.mfc_control_RW
& MFC_CNTL_DECREMENTER_RUNNING
)
1924 return SPU_DECR_STATUS_RUNNING
;
1928 DEFINE_SPUFS_ATTRIBUTE(spufs_decr_status_ops
, spufs_decr_status_get
,
1929 spufs_decr_status_set
, "0x%llx\n",
1930 SPU_ATTR_ACQUIRE_SAVED
);
1932 static int spufs_event_mask_set(void *data
, u64 val
)
1934 struct spu_context
*ctx
= data
;
1935 struct spu_lscsa
*lscsa
= ctx
->csa
.lscsa
;
1938 ret
= spu_acquire_saved(ctx
);
1941 lscsa
->event_mask
.slot
[0] = (u32
) val
;
1942 spu_release_saved(ctx
);
1947 static u64
spufs_event_mask_get(struct spu_context
*ctx
)
1949 struct spu_lscsa
*lscsa
= ctx
->csa
.lscsa
;
1950 return lscsa
->event_mask
.slot
[0];
1953 DEFINE_SPUFS_ATTRIBUTE(spufs_event_mask_ops
, spufs_event_mask_get
,
1954 spufs_event_mask_set
, "0x%llx\n",
1955 SPU_ATTR_ACQUIRE_SAVED
);
1957 static u64
spufs_event_status_get(struct spu_context
*ctx
)
1959 struct spu_state
*state
= &ctx
->csa
;
1961 stat
= state
->spu_chnlcnt_RW
[0];
1963 return state
->spu_chnldata_RW
[0];
1966 DEFINE_SPUFS_ATTRIBUTE(spufs_event_status_ops
, spufs_event_status_get
,
1967 NULL
, "0x%llx\n", SPU_ATTR_ACQUIRE_SAVED
)
1969 static int spufs_srr0_set(void *data
, u64 val
)
1971 struct spu_context
*ctx
= data
;
1972 struct spu_lscsa
*lscsa
= ctx
->csa
.lscsa
;
1975 ret
= spu_acquire_saved(ctx
);
1978 lscsa
->srr0
.slot
[0] = (u32
) val
;
1979 spu_release_saved(ctx
);
1984 static u64
spufs_srr0_get(struct spu_context
*ctx
)
1986 struct spu_lscsa
*lscsa
= ctx
->csa
.lscsa
;
1987 return lscsa
->srr0
.slot
[0];
1989 DEFINE_SPUFS_ATTRIBUTE(spufs_srr0_ops
, spufs_srr0_get
, spufs_srr0_set
,
1990 "0x%llx\n", SPU_ATTR_ACQUIRE_SAVED
)
1992 static u64
spufs_id_get(struct spu_context
*ctx
)
1996 if (ctx
->state
== SPU_STATE_RUNNABLE
)
1997 num
= ctx
->spu
->number
;
1999 num
= (unsigned int)-1;
2003 DEFINE_SPUFS_ATTRIBUTE(spufs_id_ops
, spufs_id_get
, NULL
, "0x%llx\n",
2006 static u64
spufs_object_id_get(struct spu_context
*ctx
)
2008 /* FIXME: Should there really be no locking here? */
2009 return ctx
->object_id
;
2012 static int spufs_object_id_set(void *data
, u64 id
)
2014 struct spu_context
*ctx
= data
;
2015 ctx
->object_id
= id
;
2020 DEFINE_SPUFS_ATTRIBUTE(spufs_object_id_ops
, spufs_object_id_get
,
2021 spufs_object_id_set
, "0x%llx\n", SPU_ATTR_NOACQUIRE
);
2023 static u64
spufs_lslr_get(struct spu_context
*ctx
)
2025 return ctx
->csa
.priv2
.spu_lslr_RW
;
2027 DEFINE_SPUFS_ATTRIBUTE(spufs_lslr_ops
, spufs_lslr_get
, NULL
, "0x%llx\n",
2028 SPU_ATTR_ACQUIRE_SAVED
);
2030 static int spufs_info_open(struct inode
*inode
, struct file
*file
)
2032 struct spufs_inode_info
*i
= SPUFS_I(inode
);
2033 struct spu_context
*ctx
= i
->i_ctx
;
2034 file
->private_data
= ctx
;
2038 static int spufs_caps_show(struct seq_file
*s
, void *private)
2040 struct spu_context
*ctx
= s
->private;
2042 if (!(ctx
->flags
& SPU_CREATE_NOSCHED
))
2043 seq_puts(s
, "sched\n");
2044 if (!(ctx
->flags
& SPU_CREATE_ISOLATE
))
2045 seq_puts(s
, "step\n");
2049 static int spufs_caps_open(struct inode
*inode
, struct file
*file
)
2051 return single_open(file
, spufs_caps_show
, SPUFS_I(inode
)->i_ctx
);
2054 static const struct file_operations spufs_caps_fops
= {
2055 .open
= spufs_caps_open
,
2057 .llseek
= seq_lseek
,
2058 .release
= single_release
,
2061 static ssize_t
__spufs_mbox_info_read(struct spu_context
*ctx
,
2062 char __user
*buf
, size_t len
, loff_t
*pos
)
2066 /* EOF if there's no entry in the mbox */
2067 if (!(ctx
->csa
.prob
.mb_stat_R
& 0x0000ff))
2070 data
= ctx
->csa
.prob
.pu_mb_R
;
2072 return simple_read_from_buffer(buf
, len
, pos
, &data
, sizeof data
);
2075 static ssize_t
spufs_mbox_info_read(struct file
*file
, char __user
*buf
,
2076 size_t len
, loff_t
*pos
)
2079 struct spu_context
*ctx
= file
->private_data
;
2081 if (!access_ok(VERIFY_WRITE
, buf
, len
))
2084 ret
= spu_acquire_saved(ctx
);
2087 spin_lock(&ctx
->csa
.register_lock
);
2088 ret
= __spufs_mbox_info_read(ctx
, buf
, len
, pos
);
2089 spin_unlock(&ctx
->csa
.register_lock
);
2090 spu_release_saved(ctx
);
2095 static const struct file_operations spufs_mbox_info_fops
= {
2096 .open
= spufs_info_open
,
2097 .read
= spufs_mbox_info_read
,
2098 .llseek
= generic_file_llseek
,
2101 static ssize_t
__spufs_ibox_info_read(struct spu_context
*ctx
,
2102 char __user
*buf
, size_t len
, loff_t
*pos
)
2106 /* EOF if there's no entry in the ibox */
2107 if (!(ctx
->csa
.prob
.mb_stat_R
& 0xff0000))
2110 data
= ctx
->csa
.priv2
.puint_mb_R
;
2112 return simple_read_from_buffer(buf
, len
, pos
, &data
, sizeof data
);
2115 static ssize_t
spufs_ibox_info_read(struct file
*file
, char __user
*buf
,
2116 size_t len
, loff_t
*pos
)
2118 struct spu_context
*ctx
= file
->private_data
;
2121 if (!access_ok(VERIFY_WRITE
, buf
, len
))
2124 ret
= spu_acquire_saved(ctx
);
2127 spin_lock(&ctx
->csa
.register_lock
);
2128 ret
= __spufs_ibox_info_read(ctx
, buf
, len
, pos
);
2129 spin_unlock(&ctx
->csa
.register_lock
);
2130 spu_release_saved(ctx
);
2135 static const struct file_operations spufs_ibox_info_fops
= {
2136 .open
= spufs_info_open
,
2137 .read
= spufs_ibox_info_read
,
2138 .llseek
= generic_file_llseek
,
2141 static ssize_t
__spufs_wbox_info_read(struct spu_context
*ctx
,
2142 char __user
*buf
, size_t len
, loff_t
*pos
)
2148 wbox_stat
= ctx
->csa
.prob
.mb_stat_R
;
2149 cnt
= 4 - ((wbox_stat
& 0x00ff00) >> 8);
2150 for (i
= 0; i
< cnt
; i
++) {
2151 data
[i
] = ctx
->csa
.spu_mailbox_data
[i
];
2154 return simple_read_from_buffer(buf
, len
, pos
, &data
,
2158 static ssize_t
spufs_wbox_info_read(struct file
*file
, char __user
*buf
,
2159 size_t len
, loff_t
*pos
)
2161 struct spu_context
*ctx
= file
->private_data
;
2164 if (!access_ok(VERIFY_WRITE
, buf
, len
))
2167 ret
= spu_acquire_saved(ctx
);
2170 spin_lock(&ctx
->csa
.register_lock
);
2171 ret
= __spufs_wbox_info_read(ctx
, buf
, len
, pos
);
2172 spin_unlock(&ctx
->csa
.register_lock
);
2173 spu_release_saved(ctx
);
2178 static const struct file_operations spufs_wbox_info_fops
= {
2179 .open
= spufs_info_open
,
2180 .read
= spufs_wbox_info_read
,
2181 .llseek
= generic_file_llseek
,
2184 static ssize_t
__spufs_dma_info_read(struct spu_context
*ctx
,
2185 char __user
*buf
, size_t len
, loff_t
*pos
)
2187 struct spu_dma_info info
;
2188 struct mfc_cq_sr
*qp
, *spuqp
;
2191 info
.dma_info_type
= ctx
->csa
.priv2
.spu_tag_status_query_RW
;
2192 info
.dma_info_mask
= ctx
->csa
.lscsa
->tag_mask
.slot
[0];
2193 info
.dma_info_status
= ctx
->csa
.spu_chnldata_RW
[24];
2194 info
.dma_info_stall_and_notify
= ctx
->csa
.spu_chnldata_RW
[25];
2195 info
.dma_info_atomic_command_status
= ctx
->csa
.spu_chnldata_RW
[27];
2196 for (i
= 0; i
< 16; i
++) {
2197 qp
= &info
.dma_info_command_data
[i
];
2198 spuqp
= &ctx
->csa
.priv2
.spuq
[i
];
2200 qp
->mfc_cq_data0_RW
= spuqp
->mfc_cq_data0_RW
;
2201 qp
->mfc_cq_data1_RW
= spuqp
->mfc_cq_data1_RW
;
2202 qp
->mfc_cq_data2_RW
= spuqp
->mfc_cq_data2_RW
;
2203 qp
->mfc_cq_data3_RW
= spuqp
->mfc_cq_data3_RW
;
2206 return simple_read_from_buffer(buf
, len
, pos
, &info
,
2210 static ssize_t
spufs_dma_info_read(struct file
*file
, char __user
*buf
,
2211 size_t len
, loff_t
*pos
)
2213 struct spu_context
*ctx
= file
->private_data
;
2216 if (!access_ok(VERIFY_WRITE
, buf
, len
))
2219 ret
= spu_acquire_saved(ctx
);
2222 spin_lock(&ctx
->csa
.register_lock
);
2223 ret
= __spufs_dma_info_read(ctx
, buf
, len
, pos
);
2224 spin_unlock(&ctx
->csa
.register_lock
);
2225 spu_release_saved(ctx
);
2230 static const struct file_operations spufs_dma_info_fops
= {
2231 .open
= spufs_info_open
,
2232 .read
= spufs_dma_info_read
,
2235 static ssize_t
__spufs_proxydma_info_read(struct spu_context
*ctx
,
2236 char __user
*buf
, size_t len
, loff_t
*pos
)
2238 struct spu_proxydma_info info
;
2239 struct mfc_cq_sr
*qp
, *puqp
;
2240 int ret
= sizeof info
;
2246 if (!access_ok(VERIFY_WRITE
, buf
, len
))
2249 info
.proxydma_info_type
= ctx
->csa
.prob
.dma_querytype_RW
;
2250 info
.proxydma_info_mask
= ctx
->csa
.prob
.dma_querymask_RW
;
2251 info
.proxydma_info_status
= ctx
->csa
.prob
.dma_tagstatus_R
;
2252 for (i
= 0; i
< 8; i
++) {
2253 qp
= &info
.proxydma_info_command_data
[i
];
2254 puqp
= &ctx
->csa
.priv2
.puq
[i
];
2256 qp
->mfc_cq_data0_RW
= puqp
->mfc_cq_data0_RW
;
2257 qp
->mfc_cq_data1_RW
= puqp
->mfc_cq_data1_RW
;
2258 qp
->mfc_cq_data2_RW
= puqp
->mfc_cq_data2_RW
;
2259 qp
->mfc_cq_data3_RW
= puqp
->mfc_cq_data3_RW
;
2262 return simple_read_from_buffer(buf
, len
, pos
, &info
,
2266 static ssize_t
spufs_proxydma_info_read(struct file
*file
, char __user
*buf
,
2267 size_t len
, loff_t
*pos
)
2269 struct spu_context
*ctx
= file
->private_data
;
2272 ret
= spu_acquire_saved(ctx
);
2275 spin_lock(&ctx
->csa
.register_lock
);
2276 ret
= __spufs_proxydma_info_read(ctx
, buf
, len
, pos
);
2277 spin_unlock(&ctx
->csa
.register_lock
);
2278 spu_release_saved(ctx
);
2283 static const struct file_operations spufs_proxydma_info_fops
= {
2284 .open
= spufs_info_open
,
2285 .read
= spufs_proxydma_info_read
,
2288 static int spufs_show_tid(struct seq_file
*s
, void *private)
2290 struct spu_context
*ctx
= s
->private;
2292 seq_printf(s
, "%d\n", ctx
->tid
);
2296 static int spufs_tid_open(struct inode
*inode
, struct file
*file
)
2298 return single_open(file
, spufs_show_tid
, SPUFS_I(inode
)->i_ctx
);
2301 static const struct file_operations spufs_tid_fops
= {
2302 .open
= spufs_tid_open
,
2304 .llseek
= seq_lseek
,
2305 .release
= single_release
,
2308 static const char *ctx_state_names
[] = {
2309 "user", "system", "iowait", "loaded"
2312 static unsigned long long spufs_acct_time(struct spu_context
*ctx
,
2313 enum spu_utilization_state state
)
2316 unsigned long long time
= ctx
->stats
.times
[state
];
2319 * In general, utilization statistics are updated by the controlling
2320 * thread as the spu context moves through various well defined
2321 * state transitions, but if the context is lazily loaded its
2322 * utilization statistics are not updated as the controlling thread
2323 * is not tightly coupled with the execution of the spu context. We
2324 * calculate and apply the time delta from the last recorded state
2325 * of the spu context.
2327 if (ctx
->spu
&& ctx
->stats
.util_state
== state
) {
2329 time
+= timespec_to_ns(&ts
) - ctx
->stats
.tstamp
;
2332 return time
/ NSEC_PER_MSEC
;
2335 static unsigned long long spufs_slb_flts(struct spu_context
*ctx
)
2337 unsigned long long slb_flts
= ctx
->stats
.slb_flt
;
2339 if (ctx
->state
== SPU_STATE_RUNNABLE
) {
2340 slb_flts
+= (ctx
->spu
->stats
.slb_flt
-
2341 ctx
->stats
.slb_flt_base
);
2347 static unsigned long long spufs_class2_intrs(struct spu_context
*ctx
)
2349 unsigned long long class2_intrs
= ctx
->stats
.class2_intr
;
2351 if (ctx
->state
== SPU_STATE_RUNNABLE
) {
2352 class2_intrs
+= (ctx
->spu
->stats
.class2_intr
-
2353 ctx
->stats
.class2_intr_base
);
2356 return class2_intrs
;
2360 static int spufs_show_stat(struct seq_file
*s
, void *private)
2362 struct spu_context
*ctx
= s
->private;
2365 ret
= spu_acquire(ctx
);
2369 seq_printf(s
, "%s %llu %llu %llu %llu "
2370 "%llu %llu %llu %llu %llu %llu %llu %llu\n",
2371 ctx_state_names
[ctx
->stats
.util_state
],
2372 spufs_acct_time(ctx
, SPU_UTIL_USER
),
2373 spufs_acct_time(ctx
, SPU_UTIL_SYSTEM
),
2374 spufs_acct_time(ctx
, SPU_UTIL_IOWAIT
),
2375 spufs_acct_time(ctx
, SPU_UTIL_IDLE_LOADED
),
2376 ctx
->stats
.vol_ctx_switch
,
2377 ctx
->stats
.invol_ctx_switch
,
2378 spufs_slb_flts(ctx
),
2379 ctx
->stats
.hash_flt
,
2382 spufs_class2_intrs(ctx
),
2383 ctx
->stats
.libassist
);
2388 static int spufs_stat_open(struct inode
*inode
, struct file
*file
)
2390 return single_open(file
, spufs_show_stat
, SPUFS_I(inode
)->i_ctx
);
2393 static const struct file_operations spufs_stat_fops
= {
2394 .open
= spufs_stat_open
,
2396 .llseek
= seq_lseek
,
2397 .release
= single_release
,
2401 struct tree_descr spufs_dir_contents
[] = {
2402 { "capabilities", &spufs_caps_fops
, 0444, },
2403 { "mem", &spufs_mem_fops
, 0666, },
2404 { "regs", &spufs_regs_fops
, 0666, },
2405 { "mbox", &spufs_mbox_fops
, 0444, },
2406 { "ibox", &spufs_ibox_fops
, 0444, },
2407 { "wbox", &spufs_wbox_fops
, 0222, },
2408 { "mbox_stat", &spufs_mbox_stat_fops
, 0444, },
2409 { "ibox_stat", &spufs_ibox_stat_fops
, 0444, },
2410 { "wbox_stat", &spufs_wbox_stat_fops
, 0444, },
2411 { "signal1", &spufs_signal1_fops
, 0666, },
2412 { "signal2", &spufs_signal2_fops
, 0666, },
2413 { "signal1_type", &spufs_signal1_type
, 0666, },
2414 { "signal2_type", &spufs_signal2_type
, 0666, },
2415 { "cntl", &spufs_cntl_fops
, 0666, },
2416 { "fpcr", &spufs_fpcr_fops
, 0666, },
2417 { "lslr", &spufs_lslr_ops
, 0444, },
2418 { "mfc", &spufs_mfc_fops
, 0666, },
2419 { "mss", &spufs_mss_fops
, 0666, },
2420 { "npc", &spufs_npc_ops
, 0666, },
2421 { "srr0", &spufs_srr0_ops
, 0666, },
2422 { "decr", &spufs_decr_ops
, 0666, },
2423 { "decr_status", &spufs_decr_status_ops
, 0666, },
2424 { "event_mask", &spufs_event_mask_ops
, 0666, },
2425 { "event_status", &spufs_event_status_ops
, 0444, },
2426 { "psmap", &spufs_psmap_fops
, 0666, },
2427 { "phys-id", &spufs_id_ops
, 0666, },
2428 { "object-id", &spufs_object_id_ops
, 0666, },
2429 { "mbox_info", &spufs_mbox_info_fops
, 0444, },
2430 { "ibox_info", &spufs_ibox_info_fops
, 0444, },
2431 { "wbox_info", &spufs_wbox_info_fops
, 0444, },
2432 { "dma_info", &spufs_dma_info_fops
, 0444, },
2433 { "proxydma_info", &spufs_proxydma_info_fops
, 0444, },
2434 { "tid", &spufs_tid_fops
, 0444, },
2435 { "stat", &spufs_stat_fops
, 0444, },
2439 struct tree_descr spufs_dir_nosched_contents
[] = {
2440 { "capabilities", &spufs_caps_fops
, 0444, },
2441 { "mem", &spufs_mem_fops
, 0666, },
2442 { "mbox", &spufs_mbox_fops
, 0444, },
2443 { "ibox", &spufs_ibox_fops
, 0444, },
2444 { "wbox", &spufs_wbox_fops
, 0222, },
2445 { "mbox_stat", &spufs_mbox_stat_fops
, 0444, },
2446 { "ibox_stat", &spufs_ibox_stat_fops
, 0444, },
2447 { "wbox_stat", &spufs_wbox_stat_fops
, 0444, },
2448 { "signal1", &spufs_signal1_nosched_fops
, 0222, },
2449 { "signal2", &spufs_signal2_nosched_fops
, 0222, },
2450 { "signal1_type", &spufs_signal1_type
, 0666, },
2451 { "signal2_type", &spufs_signal2_type
, 0666, },
2452 { "mss", &spufs_mss_fops
, 0666, },
2453 { "mfc", &spufs_mfc_fops
, 0666, },
2454 { "cntl", &spufs_cntl_fops
, 0666, },
2455 { "npc", &spufs_npc_ops
, 0666, },
2456 { "psmap", &spufs_psmap_fops
, 0666, },
2457 { "phys-id", &spufs_id_ops
, 0666, },
2458 { "object-id", &spufs_object_id_ops
, 0666, },
2459 { "tid", &spufs_tid_fops
, 0444, },
2460 { "stat", &spufs_stat_fops
, 0444, },
2464 struct spufs_coredump_reader spufs_coredump_read
[] = {
2465 { "regs", __spufs_regs_read
, NULL
, sizeof(struct spu_reg128
[128])},
2466 { "fpcr", __spufs_fpcr_read
, NULL
, sizeof(struct spu_reg128
) },
2467 { "lslr", NULL
, spufs_lslr_get
, 19 },
2468 { "decr", NULL
, spufs_decr_get
, 19 },
2469 { "decr_status", NULL
, spufs_decr_status_get
, 19 },
2470 { "mem", __spufs_mem_read
, NULL
, LS_SIZE
, },
2471 { "signal1", __spufs_signal1_read
, NULL
, sizeof(u32
) },
2472 { "signal1_type", NULL
, spufs_signal1_type_get
, 19 },
2473 { "signal2", __spufs_signal2_read
, NULL
, sizeof(u32
) },
2474 { "signal2_type", NULL
, spufs_signal2_type_get
, 19 },
2475 { "event_mask", NULL
, spufs_event_mask_get
, 19 },
2476 { "event_status", NULL
, spufs_event_status_get
, 19 },
2477 { "mbox_info", __spufs_mbox_info_read
, NULL
, sizeof(u32
) },
2478 { "ibox_info", __spufs_ibox_info_read
, NULL
, sizeof(u32
) },
2479 { "wbox_info", __spufs_wbox_info_read
, NULL
, 4 * sizeof(u32
)},
2480 { "dma_info", __spufs_dma_info_read
, NULL
, sizeof(struct spu_dma_info
)},
2481 { "proxydma_info", __spufs_proxydma_info_read
,
2482 NULL
, sizeof(struct spu_proxydma_info
)},
2483 { "object-id", NULL
, spufs_object_id_get
, 19 },
2484 { "npc", NULL
, spufs_npc_get
, 19 },