2 * SPU file system -- file contents
4 * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
6 * Author: Arnd Bergmann <arndb@de.ibm.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
26 #include <linux/ioctl.h>
27 #include <linux/export.h>
28 #include <linux/pagemap.h>
29 #include <linux/poll.h>
30 #include <linux/ptrace.h>
31 #include <linux/seq_file.h>
32 #include <linux/slab.h>
37 #include <asm/spu_info.h>
38 #include <asm/uaccess.h>
43 #define SPUFS_MMAP_4K (PAGE_SIZE == 0x1000)
45 /* Simple attribute files */
47 int (*get
)(void *, u64
*);
48 int (*set
)(void *, u64
);
49 char get_buf
[24]; /* enough to store a u64 and "\n\0" */
52 const char *fmt
; /* format for read operation */
53 struct mutex mutex
; /* protects access to these buffers */
56 static int spufs_attr_open(struct inode
*inode
, struct file
*file
,
57 int (*get
)(void *, u64
*), int (*set
)(void *, u64
),
60 struct spufs_attr
*attr
;
62 attr
= kmalloc(sizeof(*attr
), GFP_KERNEL
);
68 attr
->data
= inode
->i_private
;
70 mutex_init(&attr
->mutex
);
71 file
->private_data
= attr
;
73 return nonseekable_open(inode
, file
);
76 static int spufs_attr_release(struct inode
*inode
, struct file
*file
)
78 kfree(file
->private_data
);
82 static ssize_t
spufs_attr_read(struct file
*file
, char __user
*buf
,
83 size_t len
, loff_t
*ppos
)
85 struct spufs_attr
*attr
;
89 attr
= file
->private_data
;
93 ret
= mutex_lock_interruptible(&attr
->mutex
);
97 if (*ppos
) { /* continued read */
98 size
= strlen(attr
->get_buf
);
99 } else { /* first read */
101 ret
= attr
->get(attr
->data
, &val
);
105 size
= scnprintf(attr
->get_buf
, sizeof(attr
->get_buf
),
106 attr
->fmt
, (unsigned long long)val
);
109 ret
= simple_read_from_buffer(buf
, len
, ppos
, attr
->get_buf
, size
);
111 mutex_unlock(&attr
->mutex
);
115 static ssize_t
spufs_attr_write(struct file
*file
, const char __user
*buf
,
116 size_t len
, loff_t
*ppos
)
118 struct spufs_attr
*attr
;
123 attr
= file
->private_data
;
127 ret
= mutex_lock_interruptible(&attr
->mutex
);
132 size
= min(sizeof(attr
->set_buf
) - 1, len
);
133 if (copy_from_user(attr
->set_buf
, buf
, size
))
136 ret
= len
; /* claim we got the whole input */
137 attr
->set_buf
[size
] = '\0';
138 val
= simple_strtol(attr
->set_buf
, NULL
, 0);
139 attr
->set(attr
->data
, val
);
141 mutex_unlock(&attr
->mutex
);
145 #define DEFINE_SPUFS_SIMPLE_ATTRIBUTE(__fops, __get, __set, __fmt) \
146 static int __fops ## _open(struct inode *inode, struct file *file) \
148 __simple_attr_check_format(__fmt, 0ull); \
149 return spufs_attr_open(inode, file, __get, __set, __fmt); \
151 static const struct file_operations __fops = { \
152 .open = __fops ## _open, \
153 .release = spufs_attr_release, \
154 .read = spufs_attr_read, \
155 .write = spufs_attr_write, \
156 .llseek = generic_file_llseek, \
161 spufs_mem_open(struct inode
*inode
, struct file
*file
)
163 struct spufs_inode_info
*i
= SPUFS_I(inode
);
164 struct spu_context
*ctx
= i
->i_ctx
;
166 mutex_lock(&ctx
->mapping_lock
);
167 file
->private_data
= ctx
;
169 ctx
->local_store
= inode
->i_mapping
;
170 mutex_unlock(&ctx
->mapping_lock
);
175 spufs_mem_release(struct inode
*inode
, struct file
*file
)
177 struct spufs_inode_info
*i
= SPUFS_I(inode
);
178 struct spu_context
*ctx
= i
->i_ctx
;
180 mutex_lock(&ctx
->mapping_lock
);
182 ctx
->local_store
= NULL
;
183 mutex_unlock(&ctx
->mapping_lock
);
188 __spufs_mem_read(struct spu_context
*ctx
, char __user
*buffer
,
189 size_t size
, loff_t
*pos
)
191 char *local_store
= ctx
->ops
->get_ls(ctx
);
192 return simple_read_from_buffer(buffer
, size
, pos
, local_store
,
197 spufs_mem_read(struct file
*file
, char __user
*buffer
,
198 size_t size
, loff_t
*pos
)
200 struct spu_context
*ctx
= file
->private_data
;
203 ret
= spu_acquire(ctx
);
206 ret
= __spufs_mem_read(ctx
, buffer
, size
, pos
);
213 spufs_mem_write(struct file
*file
, const char __user
*buffer
,
214 size_t size
, loff_t
*ppos
)
216 struct spu_context
*ctx
= file
->private_data
;
224 ret
= spu_acquire(ctx
);
228 local_store
= ctx
->ops
->get_ls(ctx
);
229 size
= simple_write_to_buffer(local_store
, LS_SIZE
, ppos
, buffer
, size
);
236 spufs_mem_mmap_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
238 struct spu_context
*ctx
= vma
->vm_file
->private_data
;
239 unsigned long address
= (unsigned long)vmf
->virtual_address
;
240 unsigned long pfn
, offset
;
242 offset
= vmf
->pgoff
<< PAGE_SHIFT
;
243 if (offset
>= LS_SIZE
)
244 return VM_FAULT_SIGBUS
;
246 pr_debug("spufs_mem_mmap_fault address=0x%lx, offset=0x%lx\n",
249 if (spu_acquire(ctx
))
250 return VM_FAULT_NOPAGE
;
252 if (ctx
->state
== SPU_STATE_SAVED
) {
253 vma
->vm_page_prot
= pgprot_cached(vma
->vm_page_prot
);
254 pfn
= vmalloc_to_pfn(ctx
->csa
.lscsa
->ls
+ offset
);
256 vma
->vm_page_prot
= pgprot_noncached_wc(vma
->vm_page_prot
);
257 pfn
= (ctx
->spu
->local_store_phys
+ offset
) >> PAGE_SHIFT
;
259 vm_insert_pfn(vma
, address
, pfn
);
263 return VM_FAULT_NOPAGE
;
266 static int spufs_mem_mmap_access(struct vm_area_struct
*vma
,
267 unsigned long address
,
268 void *buf
, int len
, int write
)
270 struct spu_context
*ctx
= vma
->vm_file
->private_data
;
271 unsigned long offset
= address
- vma
->vm_start
;
274 if (write
&& !(vma
->vm_flags
& VM_WRITE
))
276 if (spu_acquire(ctx
))
278 if ((offset
+ len
) > vma
->vm_end
)
279 len
= vma
->vm_end
- offset
;
280 local_store
= ctx
->ops
->get_ls(ctx
);
282 memcpy_toio(local_store
+ offset
, buf
, len
);
284 memcpy_fromio(buf
, local_store
+ offset
, len
);
289 static const struct vm_operations_struct spufs_mem_mmap_vmops
= {
290 .fault
= spufs_mem_mmap_fault
,
291 .access
= spufs_mem_mmap_access
,
294 static int spufs_mem_mmap(struct file
*file
, struct vm_area_struct
*vma
)
296 if (!(vma
->vm_flags
& VM_SHARED
))
299 vma
->vm_flags
|= VM_IO
| VM_PFNMAP
;
300 vma
->vm_page_prot
= pgprot_noncached_wc(vma
->vm_page_prot
);
302 vma
->vm_ops
= &spufs_mem_mmap_vmops
;
306 static const struct file_operations spufs_mem_fops
= {
307 .open
= spufs_mem_open
,
308 .release
= spufs_mem_release
,
309 .read
= spufs_mem_read
,
310 .write
= spufs_mem_write
,
311 .llseek
= generic_file_llseek
,
312 .mmap
= spufs_mem_mmap
,
315 static int spufs_ps_fault(struct vm_area_struct
*vma
,
316 struct vm_fault
*vmf
,
317 unsigned long ps_offs
,
318 unsigned long ps_size
)
320 struct spu_context
*ctx
= vma
->vm_file
->private_data
;
321 unsigned long area
, offset
= vmf
->pgoff
<< PAGE_SHIFT
;
324 spu_context_nospu_trace(spufs_ps_fault__enter
, ctx
);
326 if (offset
>= ps_size
)
327 return VM_FAULT_SIGBUS
;
329 if (fatal_signal_pending(current
))
330 return VM_FAULT_SIGBUS
;
333 * Because we release the mmap_sem, the context may be destroyed while
334 * we're in spu_wait. Grab an extra reference so it isn't destroyed
337 get_spu_context(ctx
);
340 * We have to wait for context to be loaded before we have
341 * pages to hand out to the user, but we don't want to wait
342 * with the mmap_sem held.
343 * It is possible to drop the mmap_sem here, but then we need
344 * to return VM_FAULT_NOPAGE because the mappings may have
347 if (spu_acquire(ctx
))
350 if (ctx
->state
== SPU_STATE_SAVED
) {
351 up_read(¤t
->mm
->mmap_sem
);
352 spu_context_nospu_trace(spufs_ps_fault__sleep
, ctx
);
353 ret
= spufs_wait(ctx
->run_wq
, ctx
->state
== SPU_STATE_RUNNABLE
);
354 spu_context_trace(spufs_ps_fault__wake
, ctx
, ctx
->spu
);
355 down_read(¤t
->mm
->mmap_sem
);
357 area
= ctx
->spu
->problem_phys
+ ps_offs
;
358 vm_insert_pfn(vma
, (unsigned long)vmf
->virtual_address
,
359 (area
+ offset
) >> PAGE_SHIFT
);
360 spu_context_trace(spufs_ps_fault__insert
, ctx
, ctx
->spu
);
367 put_spu_context(ctx
);
368 return VM_FAULT_NOPAGE
;
372 static int spufs_cntl_mmap_fault(struct vm_area_struct
*vma
,
373 struct vm_fault
*vmf
)
375 return spufs_ps_fault(vma
, vmf
, 0x4000, SPUFS_CNTL_MAP_SIZE
);
378 static const struct vm_operations_struct spufs_cntl_mmap_vmops
= {
379 .fault
= spufs_cntl_mmap_fault
,
383 * mmap support for problem state control area [0x4000 - 0x4fff].
385 static int spufs_cntl_mmap(struct file
*file
, struct vm_area_struct
*vma
)
387 if (!(vma
->vm_flags
& VM_SHARED
))
390 vma
->vm_flags
|= VM_IO
| VM_PFNMAP
;
391 vma
->vm_page_prot
= pgprot_noncached(vma
->vm_page_prot
);
393 vma
->vm_ops
= &spufs_cntl_mmap_vmops
;
396 #else /* SPUFS_MMAP_4K */
397 #define spufs_cntl_mmap NULL
398 #endif /* !SPUFS_MMAP_4K */
400 static int spufs_cntl_get(void *data
, u64
*val
)
402 struct spu_context
*ctx
= data
;
405 ret
= spu_acquire(ctx
);
408 *val
= ctx
->ops
->status_read(ctx
);
414 static int spufs_cntl_set(void *data
, u64 val
)
416 struct spu_context
*ctx
= data
;
419 ret
= spu_acquire(ctx
);
422 ctx
->ops
->runcntl_write(ctx
, val
);
428 static int spufs_cntl_open(struct inode
*inode
, struct file
*file
)
430 struct spufs_inode_info
*i
= SPUFS_I(inode
);
431 struct spu_context
*ctx
= i
->i_ctx
;
433 mutex_lock(&ctx
->mapping_lock
);
434 file
->private_data
= ctx
;
436 ctx
->cntl
= inode
->i_mapping
;
437 mutex_unlock(&ctx
->mapping_lock
);
438 return simple_attr_open(inode
, file
, spufs_cntl_get
,
439 spufs_cntl_set
, "0x%08lx");
443 spufs_cntl_release(struct inode
*inode
, struct file
*file
)
445 struct spufs_inode_info
*i
= SPUFS_I(inode
);
446 struct spu_context
*ctx
= i
->i_ctx
;
448 simple_attr_release(inode
, file
);
450 mutex_lock(&ctx
->mapping_lock
);
453 mutex_unlock(&ctx
->mapping_lock
);
457 static const struct file_operations spufs_cntl_fops
= {
458 .open
= spufs_cntl_open
,
459 .release
= spufs_cntl_release
,
460 .read
= simple_attr_read
,
461 .write
= simple_attr_write
,
462 .llseek
= generic_file_llseek
,
463 .mmap
= spufs_cntl_mmap
,
467 spufs_regs_open(struct inode
*inode
, struct file
*file
)
469 struct spufs_inode_info
*i
= SPUFS_I(inode
);
470 file
->private_data
= i
->i_ctx
;
475 __spufs_regs_read(struct spu_context
*ctx
, char __user
*buffer
,
476 size_t size
, loff_t
*pos
)
478 struct spu_lscsa
*lscsa
= ctx
->csa
.lscsa
;
479 return simple_read_from_buffer(buffer
, size
, pos
,
480 lscsa
->gprs
, sizeof lscsa
->gprs
);
484 spufs_regs_read(struct file
*file
, char __user
*buffer
,
485 size_t size
, loff_t
*pos
)
488 struct spu_context
*ctx
= file
->private_data
;
490 /* pre-check for file position: if we'd return EOF, there's no point
491 * causing a deschedule */
492 if (*pos
>= sizeof(ctx
->csa
.lscsa
->gprs
))
495 ret
= spu_acquire_saved(ctx
);
498 ret
= __spufs_regs_read(ctx
, buffer
, size
, pos
);
499 spu_release_saved(ctx
);
504 spufs_regs_write(struct file
*file
, const char __user
*buffer
,
505 size_t size
, loff_t
*pos
)
507 struct spu_context
*ctx
= file
->private_data
;
508 struct spu_lscsa
*lscsa
= ctx
->csa
.lscsa
;
511 if (*pos
>= sizeof(lscsa
->gprs
))
514 ret
= spu_acquire_saved(ctx
);
518 size
= simple_write_to_buffer(lscsa
->gprs
, sizeof(lscsa
->gprs
), pos
,
521 spu_release_saved(ctx
);
525 static const struct file_operations spufs_regs_fops
= {
526 .open
= spufs_regs_open
,
527 .read
= spufs_regs_read
,
528 .write
= spufs_regs_write
,
529 .llseek
= generic_file_llseek
,
533 __spufs_fpcr_read(struct spu_context
*ctx
, char __user
* buffer
,
534 size_t size
, loff_t
* pos
)
536 struct spu_lscsa
*lscsa
= ctx
->csa
.lscsa
;
537 return simple_read_from_buffer(buffer
, size
, pos
,
538 &lscsa
->fpcr
, sizeof(lscsa
->fpcr
));
542 spufs_fpcr_read(struct file
*file
, char __user
* buffer
,
543 size_t size
, loff_t
* pos
)
546 struct spu_context
*ctx
= file
->private_data
;
548 ret
= spu_acquire_saved(ctx
);
551 ret
= __spufs_fpcr_read(ctx
, buffer
, size
, pos
);
552 spu_release_saved(ctx
);
557 spufs_fpcr_write(struct file
*file
, const char __user
* buffer
,
558 size_t size
, loff_t
* pos
)
560 struct spu_context
*ctx
= file
->private_data
;
561 struct spu_lscsa
*lscsa
= ctx
->csa
.lscsa
;
564 if (*pos
>= sizeof(lscsa
->fpcr
))
567 ret
= spu_acquire_saved(ctx
);
571 size
= simple_write_to_buffer(&lscsa
->fpcr
, sizeof(lscsa
->fpcr
), pos
,
574 spu_release_saved(ctx
);
578 static const struct file_operations spufs_fpcr_fops
= {
579 .open
= spufs_regs_open
,
580 .read
= spufs_fpcr_read
,
581 .write
= spufs_fpcr_write
,
582 .llseek
= generic_file_llseek
,
585 /* generic open function for all pipe-like files */
586 static int spufs_pipe_open(struct inode
*inode
, struct file
*file
)
588 struct spufs_inode_info
*i
= SPUFS_I(inode
);
589 file
->private_data
= i
->i_ctx
;
591 return nonseekable_open(inode
, file
);
595 * Read as many bytes from the mailbox as possible, until
596 * one of the conditions becomes true:
598 * - no more data available in the mailbox
599 * - end of the user provided buffer
600 * - end of the mapped area
602 static ssize_t
spufs_mbox_read(struct file
*file
, char __user
*buf
,
603 size_t len
, loff_t
*pos
)
605 struct spu_context
*ctx
= file
->private_data
;
606 u32 mbox_data
, __user
*udata
;
612 if (!access_ok(VERIFY_WRITE
, buf
, len
))
615 udata
= (void __user
*)buf
;
617 count
= spu_acquire(ctx
);
621 for (count
= 0; (count
+ 4) <= len
; count
+= 4, udata
++) {
623 ret
= ctx
->ops
->mbox_read(ctx
, &mbox_data
);
628 * at the end of the mapped area, we can fault
629 * but still need to return the data we have
630 * read successfully so far.
632 ret
= __put_user(mbox_data
, udata
);
647 static const struct file_operations spufs_mbox_fops
= {
648 .open
= spufs_pipe_open
,
649 .read
= spufs_mbox_read
,
653 static ssize_t
spufs_mbox_stat_read(struct file
*file
, char __user
*buf
,
654 size_t len
, loff_t
*pos
)
656 struct spu_context
*ctx
= file
->private_data
;
663 ret
= spu_acquire(ctx
);
667 mbox_stat
= ctx
->ops
->mbox_stat_read(ctx
) & 0xff;
671 if (copy_to_user(buf
, &mbox_stat
, sizeof mbox_stat
))
677 static const struct file_operations spufs_mbox_stat_fops
= {
678 .open
= spufs_pipe_open
,
679 .read
= spufs_mbox_stat_read
,
683 /* low-level ibox access function */
684 size_t spu_ibox_read(struct spu_context
*ctx
, u32
*data
)
686 return ctx
->ops
->ibox_read(ctx
, data
);
689 static int spufs_ibox_fasync(int fd
, struct file
*file
, int on
)
691 struct spu_context
*ctx
= file
->private_data
;
693 return fasync_helper(fd
, file
, on
, &ctx
->ibox_fasync
);
696 /* interrupt-level ibox callback function. */
697 void spufs_ibox_callback(struct spu
*spu
)
699 struct spu_context
*ctx
= spu
->ctx
;
704 wake_up_all(&ctx
->ibox_wq
);
705 kill_fasync(&ctx
->ibox_fasync
, SIGIO
, POLLIN
);
709 * Read as many bytes from the interrupt mailbox as possible, until
710 * one of the conditions becomes true:
712 * - no more data available in the mailbox
713 * - end of the user provided buffer
714 * - end of the mapped area
716 * If the file is opened without O_NONBLOCK, we wait here until
717 * any data is available, but return when we have been able to
720 static ssize_t
spufs_ibox_read(struct file
*file
, char __user
*buf
,
721 size_t len
, loff_t
*pos
)
723 struct spu_context
*ctx
= file
->private_data
;
724 u32 ibox_data
, __user
*udata
;
730 if (!access_ok(VERIFY_WRITE
, buf
, len
))
733 udata
= (void __user
*)buf
;
735 count
= spu_acquire(ctx
);
739 /* wait only for the first element */
741 if (file
->f_flags
& O_NONBLOCK
) {
742 if (!spu_ibox_read(ctx
, &ibox_data
)) {
747 count
= spufs_wait(ctx
->ibox_wq
, spu_ibox_read(ctx
, &ibox_data
));
752 /* if we can't write at all, return -EFAULT */
753 count
= __put_user(ibox_data
, udata
);
757 for (count
= 4, udata
++; (count
+ 4) <= len
; count
+= 4, udata
++) {
759 ret
= ctx
->ops
->ibox_read(ctx
, &ibox_data
);
763 * at the end of the mapped area, we can fault
764 * but still need to return the data we have
765 * read successfully so far.
767 ret
= __put_user(ibox_data
, udata
);
778 static unsigned int spufs_ibox_poll(struct file
*file
, poll_table
*wait
)
780 struct spu_context
*ctx
= file
->private_data
;
783 poll_wait(file
, &ctx
->ibox_wq
, wait
);
786 * For now keep this uninterruptible and also ignore the rule
787 * that poll should not sleep. Will be fixed later.
789 mutex_lock(&ctx
->state_mutex
);
790 mask
= ctx
->ops
->mbox_stat_poll(ctx
, POLLIN
| POLLRDNORM
);
796 static const struct file_operations spufs_ibox_fops
= {
797 .open
= spufs_pipe_open
,
798 .read
= spufs_ibox_read
,
799 .poll
= spufs_ibox_poll
,
800 .fasync
= spufs_ibox_fasync
,
804 static ssize_t
spufs_ibox_stat_read(struct file
*file
, char __user
*buf
,
805 size_t len
, loff_t
*pos
)
807 struct spu_context
*ctx
= file
->private_data
;
814 ret
= spu_acquire(ctx
);
817 ibox_stat
= (ctx
->ops
->mbox_stat_read(ctx
) >> 16) & 0xff;
820 if (copy_to_user(buf
, &ibox_stat
, sizeof ibox_stat
))
826 static const struct file_operations spufs_ibox_stat_fops
= {
827 .open
= spufs_pipe_open
,
828 .read
= spufs_ibox_stat_read
,
832 /* low-level mailbox write */
833 size_t spu_wbox_write(struct spu_context
*ctx
, u32 data
)
835 return ctx
->ops
->wbox_write(ctx
, data
);
838 static int spufs_wbox_fasync(int fd
, struct file
*file
, int on
)
840 struct spu_context
*ctx
= file
->private_data
;
843 ret
= fasync_helper(fd
, file
, on
, &ctx
->wbox_fasync
);
848 /* interrupt-level wbox callback function. */
849 void spufs_wbox_callback(struct spu
*spu
)
851 struct spu_context
*ctx
= spu
->ctx
;
856 wake_up_all(&ctx
->wbox_wq
);
857 kill_fasync(&ctx
->wbox_fasync
, SIGIO
, POLLOUT
);
861 * Write as many bytes to the interrupt mailbox as possible, until
862 * one of the conditions becomes true:
864 * - the mailbox is full
865 * - end of the user provided buffer
866 * - end of the mapped area
868 * If the file is opened without O_NONBLOCK, we wait here until
869 * space is availabyl, but return when we have been able to
872 static ssize_t
spufs_wbox_write(struct file
*file
, const char __user
*buf
,
873 size_t len
, loff_t
*pos
)
875 struct spu_context
*ctx
= file
->private_data
;
876 u32 wbox_data
, __user
*udata
;
882 udata
= (void __user
*)buf
;
883 if (!access_ok(VERIFY_READ
, buf
, len
))
886 if (__get_user(wbox_data
, udata
))
889 count
= spu_acquire(ctx
);
894 * make sure we can at least write one element, by waiting
895 * in case of !O_NONBLOCK
898 if (file
->f_flags
& O_NONBLOCK
) {
899 if (!spu_wbox_write(ctx
, wbox_data
)) {
904 count
= spufs_wait(ctx
->wbox_wq
, spu_wbox_write(ctx
, wbox_data
));
910 /* write as much as possible */
911 for (count
= 4, udata
++; (count
+ 4) <= len
; count
+= 4, udata
++) {
913 ret
= __get_user(wbox_data
, udata
);
917 ret
= spu_wbox_write(ctx
, wbox_data
);
928 static unsigned int spufs_wbox_poll(struct file
*file
, poll_table
*wait
)
930 struct spu_context
*ctx
= file
->private_data
;
933 poll_wait(file
, &ctx
->wbox_wq
, wait
);
936 * For now keep this uninterruptible and also ignore the rule
937 * that poll should not sleep. Will be fixed later.
939 mutex_lock(&ctx
->state_mutex
);
940 mask
= ctx
->ops
->mbox_stat_poll(ctx
, POLLOUT
| POLLWRNORM
);
946 static const struct file_operations spufs_wbox_fops
= {
947 .open
= spufs_pipe_open
,
948 .write
= spufs_wbox_write
,
949 .poll
= spufs_wbox_poll
,
950 .fasync
= spufs_wbox_fasync
,
954 static ssize_t
spufs_wbox_stat_read(struct file
*file
, char __user
*buf
,
955 size_t len
, loff_t
*pos
)
957 struct spu_context
*ctx
= file
->private_data
;
964 ret
= spu_acquire(ctx
);
967 wbox_stat
= (ctx
->ops
->mbox_stat_read(ctx
) >> 8) & 0xff;
970 if (copy_to_user(buf
, &wbox_stat
, sizeof wbox_stat
))
976 static const struct file_operations spufs_wbox_stat_fops
= {
977 .open
= spufs_pipe_open
,
978 .read
= spufs_wbox_stat_read
,
982 static int spufs_signal1_open(struct inode
*inode
, struct file
*file
)
984 struct spufs_inode_info
*i
= SPUFS_I(inode
);
985 struct spu_context
*ctx
= i
->i_ctx
;
987 mutex_lock(&ctx
->mapping_lock
);
988 file
->private_data
= ctx
;
990 ctx
->signal1
= inode
->i_mapping
;
991 mutex_unlock(&ctx
->mapping_lock
);
992 return nonseekable_open(inode
, file
);
996 spufs_signal1_release(struct inode
*inode
, struct file
*file
)
998 struct spufs_inode_info
*i
= SPUFS_I(inode
);
999 struct spu_context
*ctx
= i
->i_ctx
;
1001 mutex_lock(&ctx
->mapping_lock
);
1002 if (!--i
->i_openers
)
1003 ctx
->signal1
= NULL
;
1004 mutex_unlock(&ctx
->mapping_lock
);
1008 static ssize_t
__spufs_signal1_read(struct spu_context
*ctx
, char __user
*buf
,
1009 size_t len
, loff_t
*pos
)
1017 if (ctx
->csa
.spu_chnlcnt_RW
[3]) {
1018 data
= ctx
->csa
.spu_chnldata_RW
[3];
1025 if (copy_to_user(buf
, &data
, 4))
1032 static ssize_t
spufs_signal1_read(struct file
*file
, char __user
*buf
,
1033 size_t len
, loff_t
*pos
)
1036 struct spu_context
*ctx
= file
->private_data
;
1038 ret
= spu_acquire_saved(ctx
);
1041 ret
= __spufs_signal1_read(ctx
, buf
, len
, pos
);
1042 spu_release_saved(ctx
);
1047 static ssize_t
spufs_signal1_write(struct file
*file
, const char __user
*buf
,
1048 size_t len
, loff_t
*pos
)
1050 struct spu_context
*ctx
;
1054 ctx
= file
->private_data
;
1059 if (copy_from_user(&data
, buf
, 4))
1062 ret
= spu_acquire(ctx
);
1065 ctx
->ops
->signal1_write(ctx
, data
);
1072 spufs_signal1_mmap_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
1074 #if SPUFS_SIGNAL_MAP_SIZE == 0x1000
1075 return spufs_ps_fault(vma
, vmf
, 0x14000, SPUFS_SIGNAL_MAP_SIZE
);
1076 #elif SPUFS_SIGNAL_MAP_SIZE == 0x10000
1077 /* For 64k pages, both signal1 and signal2 can be used to mmap the whole
1078 * signal 1 and 2 area
1080 return spufs_ps_fault(vma
, vmf
, 0x10000, SPUFS_SIGNAL_MAP_SIZE
);
1082 #error unsupported page size
1086 static const struct vm_operations_struct spufs_signal1_mmap_vmops
= {
1087 .fault
= spufs_signal1_mmap_fault
,
1090 static int spufs_signal1_mmap(struct file
*file
, struct vm_area_struct
*vma
)
1092 if (!(vma
->vm_flags
& VM_SHARED
))
1095 vma
->vm_flags
|= VM_IO
| VM_PFNMAP
;
1096 vma
->vm_page_prot
= pgprot_noncached(vma
->vm_page_prot
);
1098 vma
->vm_ops
= &spufs_signal1_mmap_vmops
;
1102 static const struct file_operations spufs_signal1_fops
= {
1103 .open
= spufs_signal1_open
,
1104 .release
= spufs_signal1_release
,
1105 .read
= spufs_signal1_read
,
1106 .write
= spufs_signal1_write
,
1107 .mmap
= spufs_signal1_mmap
,
1108 .llseek
= no_llseek
,
1111 static const struct file_operations spufs_signal1_nosched_fops
= {
1112 .open
= spufs_signal1_open
,
1113 .release
= spufs_signal1_release
,
1114 .write
= spufs_signal1_write
,
1115 .mmap
= spufs_signal1_mmap
,
1116 .llseek
= no_llseek
,
1119 static int spufs_signal2_open(struct inode
*inode
, struct file
*file
)
1121 struct spufs_inode_info
*i
= SPUFS_I(inode
);
1122 struct spu_context
*ctx
= i
->i_ctx
;
1124 mutex_lock(&ctx
->mapping_lock
);
1125 file
->private_data
= ctx
;
1126 if (!i
->i_openers
++)
1127 ctx
->signal2
= inode
->i_mapping
;
1128 mutex_unlock(&ctx
->mapping_lock
);
1129 return nonseekable_open(inode
, file
);
1133 spufs_signal2_release(struct inode
*inode
, struct file
*file
)
1135 struct spufs_inode_info
*i
= SPUFS_I(inode
);
1136 struct spu_context
*ctx
= i
->i_ctx
;
1138 mutex_lock(&ctx
->mapping_lock
);
1139 if (!--i
->i_openers
)
1140 ctx
->signal2
= NULL
;
1141 mutex_unlock(&ctx
->mapping_lock
);
1145 static ssize_t
__spufs_signal2_read(struct spu_context
*ctx
, char __user
*buf
,
1146 size_t len
, loff_t
*pos
)
1154 if (ctx
->csa
.spu_chnlcnt_RW
[4]) {
1155 data
= ctx
->csa
.spu_chnldata_RW
[4];
1162 if (copy_to_user(buf
, &data
, 4))
1169 static ssize_t
spufs_signal2_read(struct file
*file
, char __user
*buf
,
1170 size_t len
, loff_t
*pos
)
1172 struct spu_context
*ctx
= file
->private_data
;
1175 ret
= spu_acquire_saved(ctx
);
1178 ret
= __spufs_signal2_read(ctx
, buf
, len
, pos
);
1179 spu_release_saved(ctx
);
1184 static ssize_t
spufs_signal2_write(struct file
*file
, const char __user
*buf
,
1185 size_t len
, loff_t
*pos
)
1187 struct spu_context
*ctx
;
1191 ctx
= file
->private_data
;
1196 if (copy_from_user(&data
, buf
, 4))
1199 ret
= spu_acquire(ctx
);
1202 ctx
->ops
->signal2_write(ctx
, data
);
1210 spufs_signal2_mmap_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
1212 #if SPUFS_SIGNAL_MAP_SIZE == 0x1000
1213 return spufs_ps_fault(vma
, vmf
, 0x1c000, SPUFS_SIGNAL_MAP_SIZE
);
1214 #elif SPUFS_SIGNAL_MAP_SIZE == 0x10000
1215 /* For 64k pages, both signal1 and signal2 can be used to mmap the whole
1216 * signal 1 and 2 area
1218 return spufs_ps_fault(vma
, vmf
, 0x10000, SPUFS_SIGNAL_MAP_SIZE
);
1220 #error unsupported page size
1224 static const struct vm_operations_struct spufs_signal2_mmap_vmops
= {
1225 .fault
= spufs_signal2_mmap_fault
,
1228 static int spufs_signal2_mmap(struct file
*file
, struct vm_area_struct
*vma
)
1230 if (!(vma
->vm_flags
& VM_SHARED
))
1233 vma
->vm_flags
|= VM_IO
| VM_PFNMAP
;
1234 vma
->vm_page_prot
= pgprot_noncached(vma
->vm_page_prot
);
1236 vma
->vm_ops
= &spufs_signal2_mmap_vmops
;
1239 #else /* SPUFS_MMAP_4K */
1240 #define spufs_signal2_mmap NULL
1241 #endif /* !SPUFS_MMAP_4K */
1243 static const struct file_operations spufs_signal2_fops
= {
1244 .open
= spufs_signal2_open
,
1245 .release
= spufs_signal2_release
,
1246 .read
= spufs_signal2_read
,
1247 .write
= spufs_signal2_write
,
1248 .mmap
= spufs_signal2_mmap
,
1249 .llseek
= no_llseek
,
1252 static const struct file_operations spufs_signal2_nosched_fops
= {
1253 .open
= spufs_signal2_open
,
1254 .release
= spufs_signal2_release
,
1255 .write
= spufs_signal2_write
,
1256 .mmap
= spufs_signal2_mmap
,
1257 .llseek
= no_llseek
,
1261 * This is a wrapper around DEFINE_SIMPLE_ATTRIBUTE which does the
1262 * work of acquiring (or not) the SPU context before calling through
1263 * to the actual get routine. The set routine is called directly.
1265 #define SPU_ATTR_NOACQUIRE 0
1266 #define SPU_ATTR_ACQUIRE 1
1267 #define SPU_ATTR_ACQUIRE_SAVED 2
1269 #define DEFINE_SPUFS_ATTRIBUTE(__name, __get, __set, __fmt, __acquire) \
1270 static int __##__get(void *data, u64 *val) \
1272 struct spu_context *ctx = data; \
1275 if (__acquire == SPU_ATTR_ACQUIRE) { \
1276 ret = spu_acquire(ctx); \
1279 *val = __get(ctx); \
1281 } else if (__acquire == SPU_ATTR_ACQUIRE_SAVED) { \
1282 ret = spu_acquire_saved(ctx); \
1285 *val = __get(ctx); \
1286 spu_release_saved(ctx); \
1288 *val = __get(ctx); \
1292 DEFINE_SPUFS_SIMPLE_ATTRIBUTE(__name, __##__get, __set, __fmt);
1294 static int spufs_signal1_type_set(void *data
, u64 val
)
1296 struct spu_context
*ctx
= data
;
1299 ret
= spu_acquire(ctx
);
1302 ctx
->ops
->signal1_type_set(ctx
, val
);
1308 static u64
spufs_signal1_type_get(struct spu_context
*ctx
)
1310 return ctx
->ops
->signal1_type_get(ctx
);
1312 DEFINE_SPUFS_ATTRIBUTE(spufs_signal1_type
, spufs_signal1_type_get
,
1313 spufs_signal1_type_set
, "%llu\n", SPU_ATTR_ACQUIRE
);
1316 static int spufs_signal2_type_set(void *data
, u64 val
)
1318 struct spu_context
*ctx
= data
;
1321 ret
= spu_acquire(ctx
);
1324 ctx
->ops
->signal2_type_set(ctx
, val
);
1330 static u64
spufs_signal2_type_get(struct spu_context
*ctx
)
1332 return ctx
->ops
->signal2_type_get(ctx
);
1334 DEFINE_SPUFS_ATTRIBUTE(spufs_signal2_type
, spufs_signal2_type_get
,
1335 spufs_signal2_type_set
, "%llu\n", SPU_ATTR_ACQUIRE
);
1339 spufs_mss_mmap_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
1341 return spufs_ps_fault(vma
, vmf
, 0x0000, SPUFS_MSS_MAP_SIZE
);
1344 static const struct vm_operations_struct spufs_mss_mmap_vmops
= {
1345 .fault
= spufs_mss_mmap_fault
,
1349 * mmap support for problem state MFC DMA area [0x0000 - 0x0fff].
1351 static int spufs_mss_mmap(struct file
*file
, struct vm_area_struct
*vma
)
1353 if (!(vma
->vm_flags
& VM_SHARED
))
1356 vma
->vm_flags
|= VM_IO
| VM_PFNMAP
;
1357 vma
->vm_page_prot
= pgprot_noncached(vma
->vm_page_prot
);
1359 vma
->vm_ops
= &spufs_mss_mmap_vmops
;
1362 #else /* SPUFS_MMAP_4K */
1363 #define spufs_mss_mmap NULL
1364 #endif /* !SPUFS_MMAP_4K */
1366 static int spufs_mss_open(struct inode
*inode
, struct file
*file
)
1368 struct spufs_inode_info
*i
= SPUFS_I(inode
);
1369 struct spu_context
*ctx
= i
->i_ctx
;
1371 file
->private_data
= i
->i_ctx
;
1373 mutex_lock(&ctx
->mapping_lock
);
1374 if (!i
->i_openers
++)
1375 ctx
->mss
= inode
->i_mapping
;
1376 mutex_unlock(&ctx
->mapping_lock
);
1377 return nonseekable_open(inode
, file
);
1381 spufs_mss_release(struct inode
*inode
, struct file
*file
)
1383 struct spufs_inode_info
*i
= SPUFS_I(inode
);
1384 struct spu_context
*ctx
= i
->i_ctx
;
1386 mutex_lock(&ctx
->mapping_lock
);
1387 if (!--i
->i_openers
)
1389 mutex_unlock(&ctx
->mapping_lock
);
1393 static const struct file_operations spufs_mss_fops
= {
1394 .open
= spufs_mss_open
,
1395 .release
= spufs_mss_release
,
1396 .mmap
= spufs_mss_mmap
,
1397 .llseek
= no_llseek
,
1401 spufs_psmap_mmap_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
1403 return spufs_ps_fault(vma
, vmf
, 0x0000, SPUFS_PS_MAP_SIZE
);
1406 static const struct vm_operations_struct spufs_psmap_mmap_vmops
= {
1407 .fault
= spufs_psmap_mmap_fault
,
1411 * mmap support for full problem state area [0x00000 - 0x1ffff].
1413 static int spufs_psmap_mmap(struct file
*file
, struct vm_area_struct
*vma
)
1415 if (!(vma
->vm_flags
& VM_SHARED
))
1418 vma
->vm_flags
|= VM_IO
| VM_PFNMAP
;
1419 vma
->vm_page_prot
= pgprot_noncached(vma
->vm_page_prot
);
1421 vma
->vm_ops
= &spufs_psmap_mmap_vmops
;
1425 static int spufs_psmap_open(struct inode
*inode
, struct file
*file
)
1427 struct spufs_inode_info
*i
= SPUFS_I(inode
);
1428 struct spu_context
*ctx
= i
->i_ctx
;
1430 mutex_lock(&ctx
->mapping_lock
);
1431 file
->private_data
= i
->i_ctx
;
1432 if (!i
->i_openers
++)
1433 ctx
->psmap
= inode
->i_mapping
;
1434 mutex_unlock(&ctx
->mapping_lock
);
1435 return nonseekable_open(inode
, file
);
1439 spufs_psmap_release(struct inode
*inode
, struct file
*file
)
1441 struct spufs_inode_info
*i
= SPUFS_I(inode
);
1442 struct spu_context
*ctx
= i
->i_ctx
;
1444 mutex_lock(&ctx
->mapping_lock
);
1445 if (!--i
->i_openers
)
1447 mutex_unlock(&ctx
->mapping_lock
);
1451 static const struct file_operations spufs_psmap_fops
= {
1452 .open
= spufs_psmap_open
,
1453 .release
= spufs_psmap_release
,
1454 .mmap
= spufs_psmap_mmap
,
1455 .llseek
= no_llseek
,
1461 spufs_mfc_mmap_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
1463 return spufs_ps_fault(vma
, vmf
, 0x3000, SPUFS_MFC_MAP_SIZE
);
1466 static const struct vm_operations_struct spufs_mfc_mmap_vmops
= {
1467 .fault
= spufs_mfc_mmap_fault
,
1471 * mmap support for problem state MFC DMA area [0x0000 - 0x0fff].
1473 static int spufs_mfc_mmap(struct file
*file
, struct vm_area_struct
*vma
)
1475 if (!(vma
->vm_flags
& VM_SHARED
))
1478 vma
->vm_flags
|= VM_IO
| VM_PFNMAP
;
1479 vma
->vm_page_prot
= pgprot_noncached(vma
->vm_page_prot
);
1481 vma
->vm_ops
= &spufs_mfc_mmap_vmops
;
1484 #else /* SPUFS_MMAP_4K */
1485 #define spufs_mfc_mmap NULL
1486 #endif /* !SPUFS_MMAP_4K */
1488 static int spufs_mfc_open(struct inode
*inode
, struct file
*file
)
1490 struct spufs_inode_info
*i
= SPUFS_I(inode
);
1491 struct spu_context
*ctx
= i
->i_ctx
;
1493 /* we don't want to deal with DMA into other processes */
1494 if (ctx
->owner
!= current
->mm
)
1497 if (atomic_read(&inode
->i_count
) != 1)
1500 mutex_lock(&ctx
->mapping_lock
);
1501 file
->private_data
= ctx
;
1502 if (!i
->i_openers
++)
1503 ctx
->mfc
= inode
->i_mapping
;
1504 mutex_unlock(&ctx
->mapping_lock
);
1505 return nonseekable_open(inode
, file
);
1509 spufs_mfc_release(struct inode
*inode
, struct file
*file
)
1511 struct spufs_inode_info
*i
= SPUFS_I(inode
);
1512 struct spu_context
*ctx
= i
->i_ctx
;
1514 mutex_lock(&ctx
->mapping_lock
);
1515 if (!--i
->i_openers
)
1517 mutex_unlock(&ctx
->mapping_lock
);
1521 /* interrupt-level mfc callback function. */
1522 void spufs_mfc_callback(struct spu
*spu
)
1524 struct spu_context
*ctx
= spu
->ctx
;
1529 wake_up_all(&ctx
->mfc_wq
);
1531 pr_debug("%s %s\n", __func__
, spu
->name
);
1532 if (ctx
->mfc_fasync
) {
1533 u32 free_elements
, tagstatus
;
1536 /* no need for spu_acquire in interrupt context */
1537 free_elements
= ctx
->ops
->get_mfc_free_elements(ctx
);
1538 tagstatus
= ctx
->ops
->read_mfc_tagstatus(ctx
);
1541 if (free_elements
& 0xffff)
1543 if (tagstatus
& ctx
->tagwait
)
1546 kill_fasync(&ctx
->mfc_fasync
, SIGIO
, mask
);
1550 static int spufs_read_mfc_tagstatus(struct spu_context
*ctx
, u32
*status
)
1552 /* See if there is one tag group is complete */
1553 /* FIXME we need locking around tagwait */
1554 *status
= ctx
->ops
->read_mfc_tagstatus(ctx
) & ctx
->tagwait
;
1555 ctx
->tagwait
&= ~*status
;
1559 /* enable interrupt waiting for any tag group,
1560 may silently fail if interrupts are already enabled */
1561 ctx
->ops
->set_mfc_query(ctx
, ctx
->tagwait
, 1);
1565 static ssize_t
spufs_mfc_read(struct file
*file
, char __user
*buffer
,
1566 size_t size
, loff_t
*pos
)
1568 struct spu_context
*ctx
= file
->private_data
;
1575 ret
= spu_acquire(ctx
);
1580 if (file
->f_flags
& O_NONBLOCK
) {
1581 status
= ctx
->ops
->read_mfc_tagstatus(ctx
);
1582 if (!(status
& ctx
->tagwait
))
1585 /* XXX(hch): shouldn't we clear ret here? */
1586 ctx
->tagwait
&= ~status
;
1588 ret
= spufs_wait(ctx
->mfc_wq
,
1589 spufs_read_mfc_tagstatus(ctx
, &status
));
1596 if (copy_to_user(buffer
, &status
, 4))
1603 static int spufs_check_valid_dma(struct mfc_dma_command
*cmd
)
1605 pr_debug("queueing DMA %x %llx %x %x %x\n", cmd
->lsa
,
1606 cmd
->ea
, cmd
->size
, cmd
->tag
, cmd
->cmd
);
1617 pr_debug("invalid DMA opcode %x\n", cmd
->cmd
);
1621 if ((cmd
->lsa
& 0xf) != (cmd
->ea
&0xf)) {
1622 pr_debug("invalid DMA alignment, ea %llx lsa %x\n",
1627 switch (cmd
->size
& 0xf) {
1648 pr_debug("invalid DMA alignment %x for size %x\n",
1649 cmd
->lsa
& 0xf, cmd
->size
);
1653 if (cmd
->size
> 16 * 1024) {
1654 pr_debug("invalid DMA size %x\n", cmd
->size
);
1658 if (cmd
->tag
& 0xfff0) {
1659 /* we reserve the higher tag numbers for kernel use */
1660 pr_debug("invalid DMA tag\n");
1665 /* not supported in this version */
1666 pr_debug("invalid DMA class\n");
1673 static int spu_send_mfc_command(struct spu_context
*ctx
,
1674 struct mfc_dma_command cmd
,
1677 *error
= ctx
->ops
->send_mfc_command(ctx
, &cmd
);
1678 if (*error
== -EAGAIN
) {
1679 /* wait for any tag group to complete
1680 so we have space for the new command */
1681 ctx
->ops
->set_mfc_query(ctx
, ctx
->tagwait
, 1);
1682 /* try again, because the queue might be
1684 *error
= ctx
->ops
->send_mfc_command(ctx
, &cmd
);
1685 if (*error
== -EAGAIN
)
1691 static ssize_t
spufs_mfc_write(struct file
*file
, const char __user
*buffer
,
1692 size_t size
, loff_t
*pos
)
1694 struct spu_context
*ctx
= file
->private_data
;
1695 struct mfc_dma_command cmd
;
1698 if (size
!= sizeof cmd
)
1702 if (copy_from_user(&cmd
, buffer
, sizeof cmd
))
1705 ret
= spufs_check_valid_dma(&cmd
);
1709 ret
= spu_acquire(ctx
);
1713 ret
= spufs_wait(ctx
->run_wq
, ctx
->state
== SPU_STATE_RUNNABLE
);
1717 if (file
->f_flags
& O_NONBLOCK
) {
1718 ret
= ctx
->ops
->send_mfc_command(ctx
, &cmd
);
1721 ret
= spufs_wait(ctx
->mfc_wq
,
1722 spu_send_mfc_command(ctx
, cmd
, &status
));
1732 ctx
->tagwait
|= 1 << cmd
.tag
;
1741 static unsigned int spufs_mfc_poll(struct file
*file
,poll_table
*wait
)
1743 struct spu_context
*ctx
= file
->private_data
;
1744 u32 free_elements
, tagstatus
;
1747 poll_wait(file
, &ctx
->mfc_wq
, wait
);
1750 * For now keep this uninterruptible and also ignore the rule
1751 * that poll should not sleep. Will be fixed later.
1753 mutex_lock(&ctx
->state_mutex
);
1754 ctx
->ops
->set_mfc_query(ctx
, ctx
->tagwait
, 2);
1755 free_elements
= ctx
->ops
->get_mfc_free_elements(ctx
);
1756 tagstatus
= ctx
->ops
->read_mfc_tagstatus(ctx
);
1760 if (free_elements
& 0xffff)
1761 mask
|= POLLOUT
| POLLWRNORM
;
1762 if (tagstatus
& ctx
->tagwait
)
1763 mask
|= POLLIN
| POLLRDNORM
;
1765 pr_debug("%s: free %d tagstatus %d tagwait %d\n", __func__
,
1766 free_elements
, tagstatus
, ctx
->tagwait
);
1771 static int spufs_mfc_flush(struct file
*file
, fl_owner_t id
)
1773 struct spu_context
*ctx
= file
->private_data
;
1776 ret
= spu_acquire(ctx
);
1780 /* this currently hangs */
1781 ret
= spufs_wait(ctx
->mfc_wq
,
1782 ctx
->ops
->set_mfc_query(ctx
, ctx
->tagwait
, 2));
1785 ret
= spufs_wait(ctx
->mfc_wq
,
1786 ctx
->ops
->read_mfc_tagstatus(ctx
) == ctx
->tagwait
);
1797 static int spufs_mfc_fsync(struct file
*file
, loff_t start
, loff_t end
, int datasync
)
1799 struct inode
*inode
= file_inode(file
);
1800 int err
= filemap_write_and_wait_range(inode
->i_mapping
, start
, end
);
1803 err
= spufs_mfc_flush(file
, NULL
);
1804 inode_unlock(inode
);
1809 static int spufs_mfc_fasync(int fd
, struct file
*file
, int on
)
1811 struct spu_context
*ctx
= file
->private_data
;
1813 return fasync_helper(fd
, file
, on
, &ctx
->mfc_fasync
);
1816 static const struct file_operations spufs_mfc_fops
= {
1817 .open
= spufs_mfc_open
,
1818 .release
= spufs_mfc_release
,
1819 .read
= spufs_mfc_read
,
1820 .write
= spufs_mfc_write
,
1821 .poll
= spufs_mfc_poll
,
1822 .flush
= spufs_mfc_flush
,
1823 .fsync
= spufs_mfc_fsync
,
1824 .fasync
= spufs_mfc_fasync
,
1825 .mmap
= spufs_mfc_mmap
,
1826 .llseek
= no_llseek
,
1829 static int spufs_npc_set(void *data
, u64 val
)
1831 struct spu_context
*ctx
= data
;
1834 ret
= spu_acquire(ctx
);
1837 ctx
->ops
->npc_write(ctx
, val
);
1843 static u64
spufs_npc_get(struct spu_context
*ctx
)
1845 return ctx
->ops
->npc_read(ctx
);
1847 DEFINE_SPUFS_ATTRIBUTE(spufs_npc_ops
, spufs_npc_get
, spufs_npc_set
,
1848 "0x%llx\n", SPU_ATTR_ACQUIRE
);
1850 static int spufs_decr_set(void *data
, u64 val
)
1852 struct spu_context
*ctx
= data
;
1853 struct spu_lscsa
*lscsa
= ctx
->csa
.lscsa
;
1856 ret
= spu_acquire_saved(ctx
);
1859 lscsa
->decr
.slot
[0] = (u32
) val
;
1860 spu_release_saved(ctx
);
1865 static u64
spufs_decr_get(struct spu_context
*ctx
)
1867 struct spu_lscsa
*lscsa
= ctx
->csa
.lscsa
;
1868 return lscsa
->decr
.slot
[0];
1870 DEFINE_SPUFS_ATTRIBUTE(spufs_decr_ops
, spufs_decr_get
, spufs_decr_set
,
1871 "0x%llx\n", SPU_ATTR_ACQUIRE_SAVED
);
1873 static int spufs_decr_status_set(void *data
, u64 val
)
1875 struct spu_context
*ctx
= data
;
1878 ret
= spu_acquire_saved(ctx
);
1882 ctx
->csa
.priv2
.mfc_control_RW
|= MFC_CNTL_DECREMENTER_RUNNING
;
1884 ctx
->csa
.priv2
.mfc_control_RW
&= ~MFC_CNTL_DECREMENTER_RUNNING
;
1885 spu_release_saved(ctx
);
1890 static u64
spufs_decr_status_get(struct spu_context
*ctx
)
1892 if (ctx
->csa
.priv2
.mfc_control_RW
& MFC_CNTL_DECREMENTER_RUNNING
)
1893 return SPU_DECR_STATUS_RUNNING
;
1897 DEFINE_SPUFS_ATTRIBUTE(spufs_decr_status_ops
, spufs_decr_status_get
,
1898 spufs_decr_status_set
, "0x%llx\n",
1899 SPU_ATTR_ACQUIRE_SAVED
);
1901 static int spufs_event_mask_set(void *data
, u64 val
)
1903 struct spu_context
*ctx
= data
;
1904 struct spu_lscsa
*lscsa
= ctx
->csa
.lscsa
;
1907 ret
= spu_acquire_saved(ctx
);
1910 lscsa
->event_mask
.slot
[0] = (u32
) val
;
1911 spu_release_saved(ctx
);
1916 static u64
spufs_event_mask_get(struct spu_context
*ctx
)
1918 struct spu_lscsa
*lscsa
= ctx
->csa
.lscsa
;
1919 return lscsa
->event_mask
.slot
[0];
1922 DEFINE_SPUFS_ATTRIBUTE(spufs_event_mask_ops
, spufs_event_mask_get
,
1923 spufs_event_mask_set
, "0x%llx\n",
1924 SPU_ATTR_ACQUIRE_SAVED
);
1926 static u64
spufs_event_status_get(struct spu_context
*ctx
)
1928 struct spu_state
*state
= &ctx
->csa
;
1930 stat
= state
->spu_chnlcnt_RW
[0];
1932 return state
->spu_chnldata_RW
[0];
1935 DEFINE_SPUFS_ATTRIBUTE(spufs_event_status_ops
, spufs_event_status_get
,
1936 NULL
, "0x%llx\n", SPU_ATTR_ACQUIRE_SAVED
)
1938 static int spufs_srr0_set(void *data
, u64 val
)
1940 struct spu_context
*ctx
= data
;
1941 struct spu_lscsa
*lscsa
= ctx
->csa
.lscsa
;
1944 ret
= spu_acquire_saved(ctx
);
1947 lscsa
->srr0
.slot
[0] = (u32
) val
;
1948 spu_release_saved(ctx
);
1953 static u64
spufs_srr0_get(struct spu_context
*ctx
)
1955 struct spu_lscsa
*lscsa
= ctx
->csa
.lscsa
;
1956 return lscsa
->srr0
.slot
[0];
1958 DEFINE_SPUFS_ATTRIBUTE(spufs_srr0_ops
, spufs_srr0_get
, spufs_srr0_set
,
1959 "0x%llx\n", SPU_ATTR_ACQUIRE_SAVED
)
1961 static u64
spufs_id_get(struct spu_context
*ctx
)
1965 if (ctx
->state
== SPU_STATE_RUNNABLE
)
1966 num
= ctx
->spu
->number
;
1968 num
= (unsigned int)-1;
1972 DEFINE_SPUFS_ATTRIBUTE(spufs_id_ops
, spufs_id_get
, NULL
, "0x%llx\n",
1975 static u64
spufs_object_id_get(struct spu_context
*ctx
)
1977 /* FIXME: Should there really be no locking here? */
1978 return ctx
->object_id
;
1981 static int spufs_object_id_set(void *data
, u64 id
)
1983 struct spu_context
*ctx
= data
;
1984 ctx
->object_id
= id
;
1989 DEFINE_SPUFS_ATTRIBUTE(spufs_object_id_ops
, spufs_object_id_get
,
1990 spufs_object_id_set
, "0x%llx\n", SPU_ATTR_NOACQUIRE
);
1992 static u64
spufs_lslr_get(struct spu_context
*ctx
)
1994 return ctx
->csa
.priv2
.spu_lslr_RW
;
1996 DEFINE_SPUFS_ATTRIBUTE(spufs_lslr_ops
, spufs_lslr_get
, NULL
, "0x%llx\n",
1997 SPU_ATTR_ACQUIRE_SAVED
);
1999 static int spufs_info_open(struct inode
*inode
, struct file
*file
)
2001 struct spufs_inode_info
*i
= SPUFS_I(inode
);
2002 struct spu_context
*ctx
= i
->i_ctx
;
2003 file
->private_data
= ctx
;
2007 static int spufs_caps_show(struct seq_file
*s
, void *private)
2009 struct spu_context
*ctx
= s
->private;
2011 if (!(ctx
->flags
& SPU_CREATE_NOSCHED
))
2012 seq_puts(s
, "sched\n");
2013 if (!(ctx
->flags
& SPU_CREATE_ISOLATE
))
2014 seq_puts(s
, "step\n");
2018 static int spufs_caps_open(struct inode
*inode
, struct file
*file
)
2020 return single_open(file
, spufs_caps_show
, SPUFS_I(inode
)->i_ctx
);
2023 static const struct file_operations spufs_caps_fops
= {
2024 .open
= spufs_caps_open
,
2026 .llseek
= seq_lseek
,
2027 .release
= single_release
,
2030 static ssize_t
__spufs_mbox_info_read(struct spu_context
*ctx
,
2031 char __user
*buf
, size_t len
, loff_t
*pos
)
2035 /* EOF if there's no entry in the mbox */
2036 if (!(ctx
->csa
.prob
.mb_stat_R
& 0x0000ff))
2039 data
= ctx
->csa
.prob
.pu_mb_R
;
2041 return simple_read_from_buffer(buf
, len
, pos
, &data
, sizeof data
);
2044 static ssize_t
spufs_mbox_info_read(struct file
*file
, char __user
*buf
,
2045 size_t len
, loff_t
*pos
)
2048 struct spu_context
*ctx
= file
->private_data
;
2050 if (!access_ok(VERIFY_WRITE
, buf
, len
))
2053 ret
= spu_acquire_saved(ctx
);
2056 spin_lock(&ctx
->csa
.register_lock
);
2057 ret
= __spufs_mbox_info_read(ctx
, buf
, len
, pos
);
2058 spin_unlock(&ctx
->csa
.register_lock
);
2059 spu_release_saved(ctx
);
2064 static const struct file_operations spufs_mbox_info_fops
= {
2065 .open
= spufs_info_open
,
2066 .read
= spufs_mbox_info_read
,
2067 .llseek
= generic_file_llseek
,
2070 static ssize_t
__spufs_ibox_info_read(struct spu_context
*ctx
,
2071 char __user
*buf
, size_t len
, loff_t
*pos
)
2075 /* EOF if there's no entry in the ibox */
2076 if (!(ctx
->csa
.prob
.mb_stat_R
& 0xff0000))
2079 data
= ctx
->csa
.priv2
.puint_mb_R
;
2081 return simple_read_from_buffer(buf
, len
, pos
, &data
, sizeof data
);
2084 static ssize_t
spufs_ibox_info_read(struct file
*file
, char __user
*buf
,
2085 size_t len
, loff_t
*pos
)
2087 struct spu_context
*ctx
= file
->private_data
;
2090 if (!access_ok(VERIFY_WRITE
, buf
, len
))
2093 ret
= spu_acquire_saved(ctx
);
2096 spin_lock(&ctx
->csa
.register_lock
);
2097 ret
= __spufs_ibox_info_read(ctx
, buf
, len
, pos
);
2098 spin_unlock(&ctx
->csa
.register_lock
);
2099 spu_release_saved(ctx
);
2104 static const struct file_operations spufs_ibox_info_fops
= {
2105 .open
= spufs_info_open
,
2106 .read
= spufs_ibox_info_read
,
2107 .llseek
= generic_file_llseek
,
2110 static ssize_t
__spufs_wbox_info_read(struct spu_context
*ctx
,
2111 char __user
*buf
, size_t len
, loff_t
*pos
)
2117 wbox_stat
= ctx
->csa
.prob
.mb_stat_R
;
2118 cnt
= 4 - ((wbox_stat
& 0x00ff00) >> 8);
2119 for (i
= 0; i
< cnt
; i
++) {
2120 data
[i
] = ctx
->csa
.spu_mailbox_data
[i
];
2123 return simple_read_from_buffer(buf
, len
, pos
, &data
,
2127 static ssize_t
spufs_wbox_info_read(struct file
*file
, char __user
*buf
,
2128 size_t len
, loff_t
*pos
)
2130 struct spu_context
*ctx
= file
->private_data
;
2133 if (!access_ok(VERIFY_WRITE
, buf
, len
))
2136 ret
= spu_acquire_saved(ctx
);
2139 spin_lock(&ctx
->csa
.register_lock
);
2140 ret
= __spufs_wbox_info_read(ctx
, buf
, len
, pos
);
2141 spin_unlock(&ctx
->csa
.register_lock
);
2142 spu_release_saved(ctx
);
2147 static const struct file_operations spufs_wbox_info_fops
= {
2148 .open
= spufs_info_open
,
2149 .read
= spufs_wbox_info_read
,
2150 .llseek
= generic_file_llseek
,
2153 static ssize_t
__spufs_dma_info_read(struct spu_context
*ctx
,
2154 char __user
*buf
, size_t len
, loff_t
*pos
)
2156 struct spu_dma_info info
;
2157 struct mfc_cq_sr
*qp
, *spuqp
;
2160 info
.dma_info_type
= ctx
->csa
.priv2
.spu_tag_status_query_RW
;
2161 info
.dma_info_mask
= ctx
->csa
.lscsa
->tag_mask
.slot
[0];
2162 info
.dma_info_status
= ctx
->csa
.spu_chnldata_RW
[24];
2163 info
.dma_info_stall_and_notify
= ctx
->csa
.spu_chnldata_RW
[25];
2164 info
.dma_info_atomic_command_status
= ctx
->csa
.spu_chnldata_RW
[27];
2165 for (i
= 0; i
< 16; i
++) {
2166 qp
= &info
.dma_info_command_data
[i
];
2167 spuqp
= &ctx
->csa
.priv2
.spuq
[i
];
2169 qp
->mfc_cq_data0_RW
= spuqp
->mfc_cq_data0_RW
;
2170 qp
->mfc_cq_data1_RW
= spuqp
->mfc_cq_data1_RW
;
2171 qp
->mfc_cq_data2_RW
= spuqp
->mfc_cq_data2_RW
;
2172 qp
->mfc_cq_data3_RW
= spuqp
->mfc_cq_data3_RW
;
2175 return simple_read_from_buffer(buf
, len
, pos
, &info
,
2179 static ssize_t
spufs_dma_info_read(struct file
*file
, char __user
*buf
,
2180 size_t len
, loff_t
*pos
)
2182 struct spu_context
*ctx
= file
->private_data
;
2185 if (!access_ok(VERIFY_WRITE
, buf
, len
))
2188 ret
= spu_acquire_saved(ctx
);
2191 spin_lock(&ctx
->csa
.register_lock
);
2192 ret
= __spufs_dma_info_read(ctx
, buf
, len
, pos
);
2193 spin_unlock(&ctx
->csa
.register_lock
);
2194 spu_release_saved(ctx
);
2199 static const struct file_operations spufs_dma_info_fops
= {
2200 .open
= spufs_info_open
,
2201 .read
= spufs_dma_info_read
,
2202 .llseek
= no_llseek
,
2205 static ssize_t
__spufs_proxydma_info_read(struct spu_context
*ctx
,
2206 char __user
*buf
, size_t len
, loff_t
*pos
)
2208 struct spu_proxydma_info info
;
2209 struct mfc_cq_sr
*qp
, *puqp
;
2210 int ret
= sizeof info
;
2216 if (!access_ok(VERIFY_WRITE
, buf
, len
))
2219 info
.proxydma_info_type
= ctx
->csa
.prob
.dma_querytype_RW
;
2220 info
.proxydma_info_mask
= ctx
->csa
.prob
.dma_querymask_RW
;
2221 info
.proxydma_info_status
= ctx
->csa
.prob
.dma_tagstatus_R
;
2222 for (i
= 0; i
< 8; i
++) {
2223 qp
= &info
.proxydma_info_command_data
[i
];
2224 puqp
= &ctx
->csa
.priv2
.puq
[i
];
2226 qp
->mfc_cq_data0_RW
= puqp
->mfc_cq_data0_RW
;
2227 qp
->mfc_cq_data1_RW
= puqp
->mfc_cq_data1_RW
;
2228 qp
->mfc_cq_data2_RW
= puqp
->mfc_cq_data2_RW
;
2229 qp
->mfc_cq_data3_RW
= puqp
->mfc_cq_data3_RW
;
2232 return simple_read_from_buffer(buf
, len
, pos
, &info
,
2236 static ssize_t
spufs_proxydma_info_read(struct file
*file
, char __user
*buf
,
2237 size_t len
, loff_t
*pos
)
2239 struct spu_context
*ctx
= file
->private_data
;
2242 ret
= spu_acquire_saved(ctx
);
2245 spin_lock(&ctx
->csa
.register_lock
);
2246 ret
= __spufs_proxydma_info_read(ctx
, buf
, len
, pos
);
2247 spin_unlock(&ctx
->csa
.register_lock
);
2248 spu_release_saved(ctx
);
2253 static const struct file_operations spufs_proxydma_info_fops
= {
2254 .open
= spufs_info_open
,
2255 .read
= spufs_proxydma_info_read
,
2256 .llseek
= no_llseek
,
2259 static int spufs_show_tid(struct seq_file
*s
, void *private)
2261 struct spu_context
*ctx
= s
->private;
2263 seq_printf(s
, "%d\n", ctx
->tid
);
2267 static int spufs_tid_open(struct inode
*inode
, struct file
*file
)
2269 return single_open(file
, spufs_show_tid
, SPUFS_I(inode
)->i_ctx
);
2272 static const struct file_operations spufs_tid_fops
= {
2273 .open
= spufs_tid_open
,
2275 .llseek
= seq_lseek
,
2276 .release
= single_release
,
2279 static const char *ctx_state_names
[] = {
2280 "user", "system", "iowait", "loaded"
2283 static unsigned long long spufs_acct_time(struct spu_context
*ctx
,
2284 enum spu_utilization_state state
)
2286 unsigned long long time
= ctx
->stats
.times
[state
];
2289 * In general, utilization statistics are updated by the controlling
2290 * thread as the spu context moves through various well defined
2291 * state transitions, but if the context is lazily loaded its
2292 * utilization statistics are not updated as the controlling thread
2293 * is not tightly coupled with the execution of the spu context. We
2294 * calculate and apply the time delta from the last recorded state
2295 * of the spu context.
2297 if (ctx
->spu
&& ctx
->stats
.util_state
== state
) {
2298 time
+= ktime_get_ns() - ctx
->stats
.tstamp
;
2301 return time
/ NSEC_PER_MSEC
;
2304 static unsigned long long spufs_slb_flts(struct spu_context
*ctx
)
2306 unsigned long long slb_flts
= ctx
->stats
.slb_flt
;
2308 if (ctx
->state
== SPU_STATE_RUNNABLE
) {
2309 slb_flts
+= (ctx
->spu
->stats
.slb_flt
-
2310 ctx
->stats
.slb_flt_base
);
2316 static unsigned long long spufs_class2_intrs(struct spu_context
*ctx
)
2318 unsigned long long class2_intrs
= ctx
->stats
.class2_intr
;
2320 if (ctx
->state
== SPU_STATE_RUNNABLE
) {
2321 class2_intrs
+= (ctx
->spu
->stats
.class2_intr
-
2322 ctx
->stats
.class2_intr_base
);
2325 return class2_intrs
;
2329 static int spufs_show_stat(struct seq_file
*s
, void *private)
2331 struct spu_context
*ctx
= s
->private;
2334 ret
= spu_acquire(ctx
);
2338 seq_printf(s
, "%s %llu %llu %llu %llu "
2339 "%llu %llu %llu %llu %llu %llu %llu %llu\n",
2340 ctx_state_names
[ctx
->stats
.util_state
],
2341 spufs_acct_time(ctx
, SPU_UTIL_USER
),
2342 spufs_acct_time(ctx
, SPU_UTIL_SYSTEM
),
2343 spufs_acct_time(ctx
, SPU_UTIL_IOWAIT
),
2344 spufs_acct_time(ctx
, SPU_UTIL_IDLE_LOADED
),
2345 ctx
->stats
.vol_ctx_switch
,
2346 ctx
->stats
.invol_ctx_switch
,
2347 spufs_slb_flts(ctx
),
2348 ctx
->stats
.hash_flt
,
2351 spufs_class2_intrs(ctx
),
2352 ctx
->stats
.libassist
);
2357 static int spufs_stat_open(struct inode
*inode
, struct file
*file
)
2359 return single_open(file
, spufs_show_stat
, SPUFS_I(inode
)->i_ctx
);
2362 static const struct file_operations spufs_stat_fops
= {
2363 .open
= spufs_stat_open
,
2365 .llseek
= seq_lseek
,
2366 .release
= single_release
,
2369 static inline int spufs_switch_log_used(struct spu_context
*ctx
)
2371 return (ctx
->switch_log
->head
- ctx
->switch_log
->tail
) %
2375 static inline int spufs_switch_log_avail(struct spu_context
*ctx
)
2377 return SWITCH_LOG_BUFSIZE
- spufs_switch_log_used(ctx
);
2380 static int spufs_switch_log_open(struct inode
*inode
, struct file
*file
)
2382 struct spu_context
*ctx
= SPUFS_I(inode
)->i_ctx
;
2385 rc
= spu_acquire(ctx
);
2389 if (ctx
->switch_log
) {
2394 ctx
->switch_log
= kmalloc(sizeof(struct switch_log
) +
2395 SWITCH_LOG_BUFSIZE
* sizeof(struct switch_log_entry
),
2398 if (!ctx
->switch_log
) {
2403 ctx
->switch_log
->head
= ctx
->switch_log
->tail
= 0;
2404 init_waitqueue_head(&ctx
->switch_log
->wait
);
2412 static int spufs_switch_log_release(struct inode
*inode
, struct file
*file
)
2414 struct spu_context
*ctx
= SPUFS_I(inode
)->i_ctx
;
2417 rc
= spu_acquire(ctx
);
2421 kfree(ctx
->switch_log
);
2422 ctx
->switch_log
= NULL
;
2428 static int switch_log_sprint(struct spu_context
*ctx
, char *tbuf
, int n
)
2430 struct switch_log_entry
*p
;
2432 p
= ctx
->switch_log
->log
+ ctx
->switch_log
->tail
% SWITCH_LOG_BUFSIZE
;
2434 return snprintf(tbuf
, n
, "%u.%09u %d %u %u %llu\n",
2435 (unsigned int) p
->tstamp
.tv_sec
,
2436 (unsigned int) p
->tstamp
.tv_nsec
,
2438 (unsigned int) p
->type
,
2439 (unsigned int) p
->val
,
2440 (unsigned long long) p
->timebase
);
2443 static ssize_t
spufs_switch_log_read(struct file
*file
, char __user
*buf
,
2444 size_t len
, loff_t
*ppos
)
2446 struct inode
*inode
= file_inode(file
);
2447 struct spu_context
*ctx
= SPUFS_I(inode
)->i_ctx
;
2448 int error
= 0, cnt
= 0;
2453 error
= spu_acquire(ctx
);
2461 if (spufs_switch_log_used(ctx
) == 0) {
2463 /* If there's data ready to go, we can
2464 * just return straight away */
2467 } else if (file
->f_flags
& O_NONBLOCK
) {
2472 /* spufs_wait will drop the mutex and
2473 * re-acquire, but since we're in read(), the
2474 * file cannot be _released (and so
2475 * ctx->switch_log is stable).
2477 error
= spufs_wait(ctx
->switch_log
->wait
,
2478 spufs_switch_log_used(ctx
) > 0);
2480 /* On error, spufs_wait returns without the
2481 * state mutex held */
2485 /* We may have had entries read from underneath
2486 * us while we dropped the mutex in spufs_wait,
2488 if (spufs_switch_log_used(ctx
) == 0)
2493 width
= switch_log_sprint(ctx
, tbuf
, sizeof(tbuf
));
2495 ctx
->switch_log
->tail
=
2496 (ctx
->switch_log
->tail
+ 1) %
2499 /* If the record is greater than space available return
2500 * partial buffer (so far) */
2503 error
= copy_to_user(buf
+ cnt
, tbuf
, width
);
2511 return cnt
== 0 ? error
: cnt
;
2514 static unsigned int spufs_switch_log_poll(struct file
*file
, poll_table
*wait
)
2516 struct inode
*inode
= file_inode(file
);
2517 struct spu_context
*ctx
= SPUFS_I(inode
)->i_ctx
;
2518 unsigned int mask
= 0;
2521 poll_wait(file
, &ctx
->switch_log
->wait
, wait
);
2523 rc
= spu_acquire(ctx
);
2527 if (spufs_switch_log_used(ctx
) > 0)
2535 static const struct file_operations spufs_switch_log_fops
= {
2536 .open
= spufs_switch_log_open
,
2537 .read
= spufs_switch_log_read
,
2538 .poll
= spufs_switch_log_poll
,
2539 .release
= spufs_switch_log_release
,
2540 .llseek
= no_llseek
,
2544 * Log a context switch event to a switch log reader.
2546 * Must be called with ctx->state_mutex held.
2548 void spu_switch_log_notify(struct spu
*spu
, struct spu_context
*ctx
,
2551 if (!ctx
->switch_log
)
2554 if (spufs_switch_log_avail(ctx
) > 1) {
2555 struct switch_log_entry
*p
;
2557 p
= ctx
->switch_log
->log
+ ctx
->switch_log
->head
;
2558 ktime_get_ts(&p
->tstamp
);
2559 p
->timebase
= get_tb();
2560 p
->spu_id
= spu
? spu
->number
: -1;
2564 ctx
->switch_log
->head
=
2565 (ctx
->switch_log
->head
+ 1) % SWITCH_LOG_BUFSIZE
;
2568 wake_up(&ctx
->switch_log
->wait
);
2571 static int spufs_show_ctx(struct seq_file
*s
, void *private)
2573 struct spu_context
*ctx
= s
->private;
2576 mutex_lock(&ctx
->state_mutex
);
2578 struct spu
*spu
= ctx
->spu
;
2579 struct spu_priv2 __iomem
*priv2
= spu
->priv2
;
2581 spin_lock_irq(&spu
->register_lock
);
2582 mfc_control_RW
= in_be64(&priv2
->mfc_control_RW
);
2583 spin_unlock_irq(&spu
->register_lock
);
2585 struct spu_state
*csa
= &ctx
->csa
;
2587 mfc_control_RW
= csa
->priv2
.mfc_control_RW
;
2590 seq_printf(s
, "%c flgs(%lx) sflgs(%lx) pri(%d) ts(%d) spu(%02d)"
2591 " %c %llx %llx %llx %llx %x %x\n",
2592 ctx
->state
== SPU_STATE_SAVED
? 'S' : 'R',
2597 ctx
->spu
? ctx
->spu
->number
: -1,
2598 !list_empty(&ctx
->rq
) ? 'q' : ' ',
2599 ctx
->csa
.class_0_pending
,
2600 ctx
->csa
.class_0_dar
,
2601 ctx
->csa
.class_1_dsisr
,
2603 ctx
->ops
->runcntl_read(ctx
),
2604 ctx
->ops
->status_read(ctx
));
2606 mutex_unlock(&ctx
->state_mutex
);
2611 static int spufs_ctx_open(struct inode
*inode
, struct file
*file
)
2613 return single_open(file
, spufs_show_ctx
, SPUFS_I(inode
)->i_ctx
);
2616 static const struct file_operations spufs_ctx_fops
= {
2617 .open
= spufs_ctx_open
,
2619 .llseek
= seq_lseek
,
2620 .release
= single_release
,
2623 const struct spufs_tree_descr spufs_dir_contents
[] = {
2624 { "capabilities", &spufs_caps_fops
, 0444, },
2625 { "mem", &spufs_mem_fops
, 0666, LS_SIZE
, },
2626 { "regs", &spufs_regs_fops
, 0666, sizeof(struct spu_reg128
[128]), },
2627 { "mbox", &spufs_mbox_fops
, 0444, },
2628 { "ibox", &spufs_ibox_fops
, 0444, },
2629 { "wbox", &spufs_wbox_fops
, 0222, },
2630 { "mbox_stat", &spufs_mbox_stat_fops
, 0444, sizeof(u32
), },
2631 { "ibox_stat", &spufs_ibox_stat_fops
, 0444, sizeof(u32
), },
2632 { "wbox_stat", &spufs_wbox_stat_fops
, 0444, sizeof(u32
), },
2633 { "signal1", &spufs_signal1_fops
, 0666, },
2634 { "signal2", &spufs_signal2_fops
, 0666, },
2635 { "signal1_type", &spufs_signal1_type
, 0666, },
2636 { "signal2_type", &spufs_signal2_type
, 0666, },
2637 { "cntl", &spufs_cntl_fops
, 0666, },
2638 { "fpcr", &spufs_fpcr_fops
, 0666, sizeof(struct spu_reg128
), },
2639 { "lslr", &spufs_lslr_ops
, 0444, },
2640 { "mfc", &spufs_mfc_fops
, 0666, },
2641 { "mss", &spufs_mss_fops
, 0666, },
2642 { "npc", &spufs_npc_ops
, 0666, },
2643 { "srr0", &spufs_srr0_ops
, 0666, },
2644 { "decr", &spufs_decr_ops
, 0666, },
2645 { "decr_status", &spufs_decr_status_ops
, 0666, },
2646 { "event_mask", &spufs_event_mask_ops
, 0666, },
2647 { "event_status", &spufs_event_status_ops
, 0444, },
2648 { "psmap", &spufs_psmap_fops
, 0666, SPUFS_PS_MAP_SIZE
, },
2649 { "phys-id", &spufs_id_ops
, 0666, },
2650 { "object-id", &spufs_object_id_ops
, 0666, },
2651 { "mbox_info", &spufs_mbox_info_fops
, 0444, sizeof(u32
), },
2652 { "ibox_info", &spufs_ibox_info_fops
, 0444, sizeof(u32
), },
2653 { "wbox_info", &spufs_wbox_info_fops
, 0444, sizeof(u32
), },
2654 { "dma_info", &spufs_dma_info_fops
, 0444,
2655 sizeof(struct spu_dma_info
), },
2656 { "proxydma_info", &spufs_proxydma_info_fops
, 0444,
2657 sizeof(struct spu_proxydma_info
)},
2658 { "tid", &spufs_tid_fops
, 0444, },
2659 { "stat", &spufs_stat_fops
, 0444, },
2660 { "switch_log", &spufs_switch_log_fops
, 0444 },
2664 const struct spufs_tree_descr spufs_dir_nosched_contents
[] = {
2665 { "capabilities", &spufs_caps_fops
, 0444, },
2666 { "mem", &spufs_mem_fops
, 0666, LS_SIZE
, },
2667 { "mbox", &spufs_mbox_fops
, 0444, },
2668 { "ibox", &spufs_ibox_fops
, 0444, },
2669 { "wbox", &spufs_wbox_fops
, 0222, },
2670 { "mbox_stat", &spufs_mbox_stat_fops
, 0444, sizeof(u32
), },
2671 { "ibox_stat", &spufs_ibox_stat_fops
, 0444, sizeof(u32
), },
2672 { "wbox_stat", &spufs_wbox_stat_fops
, 0444, sizeof(u32
), },
2673 { "signal1", &spufs_signal1_nosched_fops
, 0222, },
2674 { "signal2", &spufs_signal2_nosched_fops
, 0222, },
2675 { "signal1_type", &spufs_signal1_type
, 0666, },
2676 { "signal2_type", &spufs_signal2_type
, 0666, },
2677 { "mss", &spufs_mss_fops
, 0666, },
2678 { "mfc", &spufs_mfc_fops
, 0666, },
2679 { "cntl", &spufs_cntl_fops
, 0666, },
2680 { "npc", &spufs_npc_ops
, 0666, },
2681 { "psmap", &spufs_psmap_fops
, 0666, SPUFS_PS_MAP_SIZE
, },
2682 { "phys-id", &spufs_id_ops
, 0666, },
2683 { "object-id", &spufs_object_id_ops
, 0666, },
2684 { "tid", &spufs_tid_fops
, 0444, },
2685 { "stat", &spufs_stat_fops
, 0444, },
2689 const struct spufs_tree_descr spufs_dir_debug_contents
[] = {
2690 { ".ctx", &spufs_ctx_fops
, 0444, },
2694 const struct spufs_coredump_reader spufs_coredump_read
[] = {
2695 { "regs", __spufs_regs_read
, NULL
, sizeof(struct spu_reg128
[128])},
2696 { "fpcr", __spufs_fpcr_read
, NULL
, sizeof(struct spu_reg128
) },
2697 { "lslr", NULL
, spufs_lslr_get
, 19 },
2698 { "decr", NULL
, spufs_decr_get
, 19 },
2699 { "decr_status", NULL
, spufs_decr_status_get
, 19 },
2700 { "mem", __spufs_mem_read
, NULL
, LS_SIZE
, },
2701 { "signal1", __spufs_signal1_read
, NULL
, sizeof(u32
) },
2702 { "signal1_type", NULL
, spufs_signal1_type_get
, 19 },
2703 { "signal2", __spufs_signal2_read
, NULL
, sizeof(u32
) },
2704 { "signal2_type", NULL
, spufs_signal2_type_get
, 19 },
2705 { "event_mask", NULL
, spufs_event_mask_get
, 19 },
2706 { "event_status", NULL
, spufs_event_status_get
, 19 },
2707 { "mbox_info", __spufs_mbox_info_read
, NULL
, sizeof(u32
) },
2708 { "ibox_info", __spufs_ibox_info_read
, NULL
, sizeof(u32
) },
2709 { "wbox_info", __spufs_wbox_info_read
, NULL
, 4 * sizeof(u32
)},
2710 { "dma_info", __spufs_dma_info_read
, NULL
, sizeof(struct spu_dma_info
)},
2711 { "proxydma_info", __spufs_proxydma_info_read
,
2712 NULL
, sizeof(struct spu_proxydma_info
)},
2713 { "object-id", NULL
, spufs_object_id_get
, 19 },
2714 { "npc", NULL
, spufs_npc_get
, 19 },