2 * SPU file system -- file contents
4 * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
6 * Author: Arnd Bergmann <arndb@de.ibm.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
26 #include <linux/ioctl.h>
27 #include <linux/export.h>
28 #include <linux/pagemap.h>
29 #include <linux/poll.h>
30 #include <linux/ptrace.h>
31 #include <linux/seq_file.h>
32 #include <linux/slab.h>
37 #include <asm/spu_info.h>
38 #include <asm/uaccess.h>
43 #define SPUFS_MMAP_4K (PAGE_SIZE == 0x1000)
45 /* Simple attribute files */
47 int (*get
)(void *, u64
*);
48 int (*set
)(void *, u64
);
49 char get_buf
[24]; /* enough to store a u64 and "\n\0" */
52 const char *fmt
; /* format for read operation */
53 struct mutex mutex
; /* protects access to these buffers */
56 static int spufs_attr_open(struct inode
*inode
, struct file
*file
,
57 int (*get
)(void *, u64
*), int (*set
)(void *, u64
),
60 struct spufs_attr
*attr
;
62 attr
= kmalloc(sizeof(*attr
), GFP_KERNEL
);
68 attr
->data
= inode
->i_private
;
70 mutex_init(&attr
->mutex
);
71 file
->private_data
= attr
;
73 return nonseekable_open(inode
, file
);
76 static int spufs_attr_release(struct inode
*inode
, struct file
*file
)
78 kfree(file
->private_data
);
82 static ssize_t
spufs_attr_read(struct file
*file
, char __user
*buf
,
83 size_t len
, loff_t
*ppos
)
85 struct spufs_attr
*attr
;
89 attr
= file
->private_data
;
93 ret
= mutex_lock_interruptible(&attr
->mutex
);
97 if (*ppos
) { /* continued read */
98 size
= strlen(attr
->get_buf
);
99 } else { /* first read */
101 ret
= attr
->get(attr
->data
, &val
);
105 size
= scnprintf(attr
->get_buf
, sizeof(attr
->get_buf
),
106 attr
->fmt
, (unsigned long long)val
);
109 ret
= simple_read_from_buffer(buf
, len
, ppos
, attr
->get_buf
, size
);
111 mutex_unlock(&attr
->mutex
);
115 static ssize_t
spufs_attr_write(struct file
*file
, const char __user
*buf
,
116 size_t len
, loff_t
*ppos
)
118 struct spufs_attr
*attr
;
123 attr
= file
->private_data
;
127 ret
= mutex_lock_interruptible(&attr
->mutex
);
132 size
= min(sizeof(attr
->set_buf
) - 1, len
);
133 if (copy_from_user(attr
->set_buf
, buf
, size
))
136 ret
= len
; /* claim we got the whole input */
137 attr
->set_buf
[size
] = '\0';
138 val
= simple_strtol(attr
->set_buf
, NULL
, 0);
139 attr
->set(attr
->data
, val
);
141 mutex_unlock(&attr
->mutex
);
145 #define DEFINE_SPUFS_SIMPLE_ATTRIBUTE(__fops, __get, __set, __fmt) \
146 static int __fops ## _open(struct inode *inode, struct file *file) \
148 __simple_attr_check_format(__fmt, 0ull); \
149 return spufs_attr_open(inode, file, __get, __set, __fmt); \
151 static const struct file_operations __fops = { \
152 .open = __fops ## _open, \
153 .release = spufs_attr_release, \
154 .read = spufs_attr_read, \
155 .write = spufs_attr_write, \
156 .llseek = generic_file_llseek, \
161 spufs_mem_open(struct inode
*inode
, struct file
*file
)
163 struct spufs_inode_info
*i
= SPUFS_I(inode
);
164 struct spu_context
*ctx
= i
->i_ctx
;
166 mutex_lock(&ctx
->mapping_lock
);
167 file
->private_data
= ctx
;
169 ctx
->local_store
= inode
->i_mapping
;
170 mutex_unlock(&ctx
->mapping_lock
);
175 spufs_mem_release(struct inode
*inode
, struct file
*file
)
177 struct spufs_inode_info
*i
= SPUFS_I(inode
);
178 struct spu_context
*ctx
= i
->i_ctx
;
180 mutex_lock(&ctx
->mapping_lock
);
182 ctx
->local_store
= NULL
;
183 mutex_unlock(&ctx
->mapping_lock
);
188 __spufs_mem_read(struct spu_context
*ctx
, char __user
*buffer
,
189 size_t size
, loff_t
*pos
)
191 char *local_store
= ctx
->ops
->get_ls(ctx
);
192 return simple_read_from_buffer(buffer
, size
, pos
, local_store
,
197 spufs_mem_read(struct file
*file
, char __user
*buffer
,
198 size_t size
, loff_t
*pos
)
200 struct spu_context
*ctx
= file
->private_data
;
203 ret
= spu_acquire(ctx
);
206 ret
= __spufs_mem_read(ctx
, buffer
, size
, pos
);
213 spufs_mem_write(struct file
*file
, const char __user
*buffer
,
214 size_t size
, loff_t
*ppos
)
216 struct spu_context
*ctx
= file
->private_data
;
224 ret
= spu_acquire(ctx
);
228 local_store
= ctx
->ops
->get_ls(ctx
);
229 size
= simple_write_to_buffer(local_store
, LS_SIZE
, ppos
, buffer
, size
);
236 spufs_mem_mmap_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
238 struct spu_context
*ctx
= vma
->vm_file
->private_data
;
239 unsigned long address
= (unsigned long)vmf
->virtual_address
;
240 unsigned long pfn
, offset
;
242 #ifdef CONFIG_SPU_FS_64K_LS
243 struct spu_state
*csa
= &ctx
->csa
;
246 /* Check what page size we are using */
247 psize
= get_slice_psize(vma
->vm_mm
, address
);
249 /* Some sanity checking */
250 BUG_ON(csa
->use_big_pages
!= (psize
== MMU_PAGE_64K
));
252 /* Wow, 64K, cool, we need to align the address though */
253 if (csa
->use_big_pages
) {
254 BUG_ON(vma
->vm_start
& 0xffff);
255 address
&= ~0xfffful
;
257 #endif /* CONFIG_SPU_FS_64K_LS */
259 offset
= vmf
->pgoff
<< PAGE_SHIFT
;
260 if (offset
>= LS_SIZE
)
261 return VM_FAULT_SIGBUS
;
263 pr_debug("spufs_mem_mmap_fault address=0x%lx, offset=0x%lx\n",
266 if (spu_acquire(ctx
))
267 return VM_FAULT_NOPAGE
;
269 if (ctx
->state
== SPU_STATE_SAVED
) {
270 vma
->vm_page_prot
= pgprot_cached(vma
->vm_page_prot
);
271 pfn
= vmalloc_to_pfn(ctx
->csa
.lscsa
->ls
+ offset
);
273 vma
->vm_page_prot
= pgprot_noncached_wc(vma
->vm_page_prot
);
274 pfn
= (ctx
->spu
->local_store_phys
+ offset
) >> PAGE_SHIFT
;
276 vm_insert_pfn(vma
, address
, pfn
);
280 return VM_FAULT_NOPAGE
;
283 static int spufs_mem_mmap_access(struct vm_area_struct
*vma
,
284 unsigned long address
,
285 void *buf
, int len
, int write
)
287 struct spu_context
*ctx
= vma
->vm_file
->private_data
;
288 unsigned long offset
= address
- vma
->vm_start
;
291 if (write
&& !(vma
->vm_flags
& VM_WRITE
))
293 if (spu_acquire(ctx
))
295 if ((offset
+ len
) > vma
->vm_end
)
296 len
= vma
->vm_end
- offset
;
297 local_store
= ctx
->ops
->get_ls(ctx
);
299 memcpy_toio(local_store
+ offset
, buf
, len
);
301 memcpy_fromio(buf
, local_store
+ offset
, len
);
306 static const struct vm_operations_struct spufs_mem_mmap_vmops
= {
307 .fault
= spufs_mem_mmap_fault
,
308 .access
= spufs_mem_mmap_access
,
311 static int spufs_mem_mmap(struct file
*file
, struct vm_area_struct
*vma
)
313 #ifdef CONFIG_SPU_FS_64K_LS
314 struct spu_context
*ctx
= file
->private_data
;
315 struct spu_state
*csa
= &ctx
->csa
;
317 /* Sanity check VMA alignment */
318 if (csa
->use_big_pages
) {
319 pr_debug("spufs_mem_mmap 64K, start=0x%lx, end=0x%lx,"
320 " pgoff=0x%lx\n", vma
->vm_start
, vma
->vm_end
,
322 if (vma
->vm_start
& 0xffff)
324 if (vma
->vm_pgoff
& 0xf)
327 #endif /* CONFIG_SPU_FS_64K_LS */
329 if (!(vma
->vm_flags
& VM_SHARED
))
332 vma
->vm_flags
|= VM_IO
| VM_PFNMAP
;
333 vma
->vm_page_prot
= pgprot_noncached_wc(vma
->vm_page_prot
);
335 vma
->vm_ops
= &spufs_mem_mmap_vmops
;
339 #ifdef CONFIG_SPU_FS_64K_LS
340 static unsigned long spufs_get_unmapped_area(struct file
*file
,
341 unsigned long addr
, unsigned long len
, unsigned long pgoff
,
344 struct spu_context
*ctx
= file
->private_data
;
345 struct spu_state
*csa
= &ctx
->csa
;
347 /* If not using big pages, fallback to normal MM g_u_a */
348 if (!csa
->use_big_pages
)
349 return current
->mm
->get_unmapped_area(file
, addr
, len
,
352 /* Else, try to obtain a 64K pages slice */
353 return slice_get_unmapped_area(addr
, len
, flags
,
356 #endif /* CONFIG_SPU_FS_64K_LS */
358 static const struct file_operations spufs_mem_fops
= {
359 .open
= spufs_mem_open
,
360 .release
= spufs_mem_release
,
361 .read
= spufs_mem_read
,
362 .write
= spufs_mem_write
,
363 .llseek
= generic_file_llseek
,
364 .mmap
= spufs_mem_mmap
,
365 #ifdef CONFIG_SPU_FS_64K_LS
366 .get_unmapped_area
= spufs_get_unmapped_area
,
370 static int spufs_ps_fault(struct vm_area_struct
*vma
,
371 struct vm_fault
*vmf
,
372 unsigned long ps_offs
,
373 unsigned long ps_size
)
375 struct spu_context
*ctx
= vma
->vm_file
->private_data
;
376 unsigned long area
, offset
= vmf
->pgoff
<< PAGE_SHIFT
;
379 spu_context_nospu_trace(spufs_ps_fault__enter
, ctx
);
381 if (offset
>= ps_size
)
382 return VM_FAULT_SIGBUS
;
384 if (fatal_signal_pending(current
))
385 return VM_FAULT_SIGBUS
;
388 * Because we release the mmap_sem, the context may be destroyed while
389 * we're in spu_wait. Grab an extra reference so it isn't destroyed
392 get_spu_context(ctx
);
395 * We have to wait for context to be loaded before we have
396 * pages to hand out to the user, but we don't want to wait
397 * with the mmap_sem held.
398 * It is possible to drop the mmap_sem here, but then we need
399 * to return VM_FAULT_NOPAGE because the mappings may have
402 if (spu_acquire(ctx
))
405 if (ctx
->state
== SPU_STATE_SAVED
) {
406 up_read(¤t
->mm
->mmap_sem
);
407 spu_context_nospu_trace(spufs_ps_fault__sleep
, ctx
);
408 ret
= spufs_wait(ctx
->run_wq
, ctx
->state
== SPU_STATE_RUNNABLE
);
409 spu_context_trace(spufs_ps_fault__wake
, ctx
, ctx
->spu
);
410 down_read(¤t
->mm
->mmap_sem
);
412 area
= ctx
->spu
->problem_phys
+ ps_offs
;
413 vm_insert_pfn(vma
, (unsigned long)vmf
->virtual_address
,
414 (area
+ offset
) >> PAGE_SHIFT
);
415 spu_context_trace(spufs_ps_fault__insert
, ctx
, ctx
->spu
);
422 put_spu_context(ctx
);
423 return VM_FAULT_NOPAGE
;
427 static int spufs_cntl_mmap_fault(struct vm_area_struct
*vma
,
428 struct vm_fault
*vmf
)
430 return spufs_ps_fault(vma
, vmf
, 0x4000, SPUFS_CNTL_MAP_SIZE
);
433 static const struct vm_operations_struct spufs_cntl_mmap_vmops
= {
434 .fault
= spufs_cntl_mmap_fault
,
438 * mmap support for problem state control area [0x4000 - 0x4fff].
440 static int spufs_cntl_mmap(struct file
*file
, struct vm_area_struct
*vma
)
442 if (!(vma
->vm_flags
& VM_SHARED
))
445 vma
->vm_flags
|= VM_IO
| VM_PFNMAP
;
446 vma
->vm_page_prot
= pgprot_noncached(vma
->vm_page_prot
);
448 vma
->vm_ops
= &spufs_cntl_mmap_vmops
;
451 #else /* SPUFS_MMAP_4K */
452 #define spufs_cntl_mmap NULL
453 #endif /* !SPUFS_MMAP_4K */
455 static int spufs_cntl_get(void *data
, u64
*val
)
457 struct spu_context
*ctx
= data
;
460 ret
= spu_acquire(ctx
);
463 *val
= ctx
->ops
->status_read(ctx
);
469 static int spufs_cntl_set(void *data
, u64 val
)
471 struct spu_context
*ctx
= data
;
474 ret
= spu_acquire(ctx
);
477 ctx
->ops
->runcntl_write(ctx
, val
);
483 static int spufs_cntl_open(struct inode
*inode
, struct file
*file
)
485 struct spufs_inode_info
*i
= SPUFS_I(inode
);
486 struct spu_context
*ctx
= i
->i_ctx
;
488 mutex_lock(&ctx
->mapping_lock
);
489 file
->private_data
= ctx
;
491 ctx
->cntl
= inode
->i_mapping
;
492 mutex_unlock(&ctx
->mapping_lock
);
493 return simple_attr_open(inode
, file
, spufs_cntl_get
,
494 spufs_cntl_set
, "0x%08lx");
498 spufs_cntl_release(struct inode
*inode
, struct file
*file
)
500 struct spufs_inode_info
*i
= SPUFS_I(inode
);
501 struct spu_context
*ctx
= i
->i_ctx
;
503 simple_attr_release(inode
, file
);
505 mutex_lock(&ctx
->mapping_lock
);
508 mutex_unlock(&ctx
->mapping_lock
);
512 static const struct file_operations spufs_cntl_fops
= {
513 .open
= spufs_cntl_open
,
514 .release
= spufs_cntl_release
,
515 .read
= simple_attr_read
,
516 .write
= simple_attr_write
,
517 .llseek
= generic_file_llseek
,
518 .mmap
= spufs_cntl_mmap
,
522 spufs_regs_open(struct inode
*inode
, struct file
*file
)
524 struct spufs_inode_info
*i
= SPUFS_I(inode
);
525 file
->private_data
= i
->i_ctx
;
530 __spufs_regs_read(struct spu_context
*ctx
, char __user
*buffer
,
531 size_t size
, loff_t
*pos
)
533 struct spu_lscsa
*lscsa
= ctx
->csa
.lscsa
;
534 return simple_read_from_buffer(buffer
, size
, pos
,
535 lscsa
->gprs
, sizeof lscsa
->gprs
);
539 spufs_regs_read(struct file
*file
, char __user
*buffer
,
540 size_t size
, loff_t
*pos
)
543 struct spu_context
*ctx
= file
->private_data
;
545 /* pre-check for file position: if we'd return EOF, there's no point
546 * causing a deschedule */
547 if (*pos
>= sizeof(ctx
->csa
.lscsa
->gprs
))
550 ret
= spu_acquire_saved(ctx
);
553 ret
= __spufs_regs_read(ctx
, buffer
, size
, pos
);
554 spu_release_saved(ctx
);
559 spufs_regs_write(struct file
*file
, const char __user
*buffer
,
560 size_t size
, loff_t
*pos
)
562 struct spu_context
*ctx
= file
->private_data
;
563 struct spu_lscsa
*lscsa
= ctx
->csa
.lscsa
;
566 if (*pos
>= sizeof(lscsa
->gprs
))
569 ret
= spu_acquire_saved(ctx
);
573 size
= simple_write_to_buffer(lscsa
->gprs
, sizeof(lscsa
->gprs
), pos
,
576 spu_release_saved(ctx
);
580 static const struct file_operations spufs_regs_fops
= {
581 .open
= spufs_regs_open
,
582 .read
= spufs_regs_read
,
583 .write
= spufs_regs_write
,
584 .llseek
= generic_file_llseek
,
588 __spufs_fpcr_read(struct spu_context
*ctx
, char __user
* buffer
,
589 size_t size
, loff_t
* pos
)
591 struct spu_lscsa
*lscsa
= ctx
->csa
.lscsa
;
592 return simple_read_from_buffer(buffer
, size
, pos
,
593 &lscsa
->fpcr
, sizeof(lscsa
->fpcr
));
597 spufs_fpcr_read(struct file
*file
, char __user
* buffer
,
598 size_t size
, loff_t
* pos
)
601 struct spu_context
*ctx
= file
->private_data
;
603 ret
= spu_acquire_saved(ctx
);
606 ret
= __spufs_fpcr_read(ctx
, buffer
, size
, pos
);
607 spu_release_saved(ctx
);
612 spufs_fpcr_write(struct file
*file
, const char __user
* buffer
,
613 size_t size
, loff_t
* pos
)
615 struct spu_context
*ctx
= file
->private_data
;
616 struct spu_lscsa
*lscsa
= ctx
->csa
.lscsa
;
619 if (*pos
>= sizeof(lscsa
->fpcr
))
622 ret
= spu_acquire_saved(ctx
);
626 size
= simple_write_to_buffer(&lscsa
->fpcr
, sizeof(lscsa
->fpcr
), pos
,
629 spu_release_saved(ctx
);
633 static const struct file_operations spufs_fpcr_fops
= {
634 .open
= spufs_regs_open
,
635 .read
= spufs_fpcr_read
,
636 .write
= spufs_fpcr_write
,
637 .llseek
= generic_file_llseek
,
640 /* generic open function for all pipe-like files */
641 static int spufs_pipe_open(struct inode
*inode
, struct file
*file
)
643 struct spufs_inode_info
*i
= SPUFS_I(inode
);
644 file
->private_data
= i
->i_ctx
;
646 return nonseekable_open(inode
, file
);
650 * Read as many bytes from the mailbox as possible, until
651 * one of the conditions becomes true:
653 * - no more data available in the mailbox
654 * - end of the user provided buffer
655 * - end of the mapped area
657 static ssize_t
spufs_mbox_read(struct file
*file
, char __user
*buf
,
658 size_t len
, loff_t
*pos
)
660 struct spu_context
*ctx
= file
->private_data
;
661 u32 mbox_data
, __user
*udata
;
667 if (!access_ok(VERIFY_WRITE
, buf
, len
))
670 udata
= (void __user
*)buf
;
672 count
= spu_acquire(ctx
);
676 for (count
= 0; (count
+ 4) <= len
; count
+= 4, udata
++) {
678 ret
= ctx
->ops
->mbox_read(ctx
, &mbox_data
);
683 * at the end of the mapped area, we can fault
684 * but still need to return the data we have
685 * read successfully so far.
687 ret
= __put_user(mbox_data
, udata
);
702 static const struct file_operations spufs_mbox_fops
= {
703 .open
= spufs_pipe_open
,
704 .read
= spufs_mbox_read
,
708 static ssize_t
spufs_mbox_stat_read(struct file
*file
, char __user
*buf
,
709 size_t len
, loff_t
*pos
)
711 struct spu_context
*ctx
= file
->private_data
;
718 ret
= spu_acquire(ctx
);
722 mbox_stat
= ctx
->ops
->mbox_stat_read(ctx
) & 0xff;
726 if (copy_to_user(buf
, &mbox_stat
, sizeof mbox_stat
))
732 static const struct file_operations spufs_mbox_stat_fops
= {
733 .open
= spufs_pipe_open
,
734 .read
= spufs_mbox_stat_read
,
738 /* low-level ibox access function */
739 size_t spu_ibox_read(struct spu_context
*ctx
, u32
*data
)
741 return ctx
->ops
->ibox_read(ctx
, data
);
744 static int spufs_ibox_fasync(int fd
, struct file
*file
, int on
)
746 struct spu_context
*ctx
= file
->private_data
;
748 return fasync_helper(fd
, file
, on
, &ctx
->ibox_fasync
);
751 /* interrupt-level ibox callback function. */
752 void spufs_ibox_callback(struct spu
*spu
)
754 struct spu_context
*ctx
= spu
->ctx
;
759 wake_up_all(&ctx
->ibox_wq
);
760 kill_fasync(&ctx
->ibox_fasync
, SIGIO
, POLLIN
);
764 * Read as many bytes from the interrupt mailbox as possible, until
765 * one of the conditions becomes true:
767 * - no more data available in the mailbox
768 * - end of the user provided buffer
769 * - end of the mapped area
771 * If the file is opened without O_NONBLOCK, we wait here until
772 * any data is available, but return when we have been able to
775 static ssize_t
spufs_ibox_read(struct file
*file
, char __user
*buf
,
776 size_t len
, loff_t
*pos
)
778 struct spu_context
*ctx
= file
->private_data
;
779 u32 ibox_data
, __user
*udata
;
785 if (!access_ok(VERIFY_WRITE
, buf
, len
))
788 udata
= (void __user
*)buf
;
790 count
= spu_acquire(ctx
);
794 /* wait only for the first element */
796 if (file
->f_flags
& O_NONBLOCK
) {
797 if (!spu_ibox_read(ctx
, &ibox_data
)) {
802 count
= spufs_wait(ctx
->ibox_wq
, spu_ibox_read(ctx
, &ibox_data
));
807 /* if we can't write at all, return -EFAULT */
808 count
= __put_user(ibox_data
, udata
);
812 for (count
= 4, udata
++; (count
+ 4) <= len
; count
+= 4, udata
++) {
814 ret
= ctx
->ops
->ibox_read(ctx
, &ibox_data
);
818 * at the end of the mapped area, we can fault
819 * but still need to return the data we have
820 * read successfully so far.
822 ret
= __put_user(ibox_data
, udata
);
833 static unsigned int spufs_ibox_poll(struct file
*file
, poll_table
*wait
)
835 struct spu_context
*ctx
= file
->private_data
;
838 poll_wait(file
, &ctx
->ibox_wq
, wait
);
841 * For now keep this uninterruptible and also ignore the rule
842 * that poll should not sleep. Will be fixed later.
844 mutex_lock(&ctx
->state_mutex
);
845 mask
= ctx
->ops
->mbox_stat_poll(ctx
, POLLIN
| POLLRDNORM
);
851 static const struct file_operations spufs_ibox_fops
= {
852 .open
= spufs_pipe_open
,
853 .read
= spufs_ibox_read
,
854 .poll
= spufs_ibox_poll
,
855 .fasync
= spufs_ibox_fasync
,
859 static ssize_t
spufs_ibox_stat_read(struct file
*file
, char __user
*buf
,
860 size_t len
, loff_t
*pos
)
862 struct spu_context
*ctx
= file
->private_data
;
869 ret
= spu_acquire(ctx
);
872 ibox_stat
= (ctx
->ops
->mbox_stat_read(ctx
) >> 16) & 0xff;
875 if (copy_to_user(buf
, &ibox_stat
, sizeof ibox_stat
))
881 static const struct file_operations spufs_ibox_stat_fops
= {
882 .open
= spufs_pipe_open
,
883 .read
= spufs_ibox_stat_read
,
887 /* low-level mailbox write */
888 size_t spu_wbox_write(struct spu_context
*ctx
, u32 data
)
890 return ctx
->ops
->wbox_write(ctx
, data
);
893 static int spufs_wbox_fasync(int fd
, struct file
*file
, int on
)
895 struct spu_context
*ctx
= file
->private_data
;
898 ret
= fasync_helper(fd
, file
, on
, &ctx
->wbox_fasync
);
903 /* interrupt-level wbox callback function. */
904 void spufs_wbox_callback(struct spu
*spu
)
906 struct spu_context
*ctx
= spu
->ctx
;
911 wake_up_all(&ctx
->wbox_wq
);
912 kill_fasync(&ctx
->wbox_fasync
, SIGIO
, POLLOUT
);
916 * Write as many bytes to the interrupt mailbox as possible, until
917 * one of the conditions becomes true:
919 * - the mailbox is full
920 * - end of the user provided buffer
921 * - end of the mapped area
923 * If the file is opened without O_NONBLOCK, we wait here until
924 * space is availabyl, but return when we have been able to
927 static ssize_t
spufs_wbox_write(struct file
*file
, const char __user
*buf
,
928 size_t len
, loff_t
*pos
)
930 struct spu_context
*ctx
= file
->private_data
;
931 u32 wbox_data
, __user
*udata
;
937 udata
= (void __user
*)buf
;
938 if (!access_ok(VERIFY_READ
, buf
, len
))
941 if (__get_user(wbox_data
, udata
))
944 count
= spu_acquire(ctx
);
949 * make sure we can at least write one element, by waiting
950 * in case of !O_NONBLOCK
953 if (file
->f_flags
& O_NONBLOCK
) {
954 if (!spu_wbox_write(ctx
, wbox_data
)) {
959 count
= spufs_wait(ctx
->wbox_wq
, spu_wbox_write(ctx
, wbox_data
));
965 /* write as much as possible */
966 for (count
= 4, udata
++; (count
+ 4) <= len
; count
+= 4, udata
++) {
968 ret
= __get_user(wbox_data
, udata
);
972 ret
= spu_wbox_write(ctx
, wbox_data
);
983 static unsigned int spufs_wbox_poll(struct file
*file
, poll_table
*wait
)
985 struct spu_context
*ctx
= file
->private_data
;
988 poll_wait(file
, &ctx
->wbox_wq
, wait
);
991 * For now keep this uninterruptible and also ignore the rule
992 * that poll should not sleep. Will be fixed later.
994 mutex_lock(&ctx
->state_mutex
);
995 mask
= ctx
->ops
->mbox_stat_poll(ctx
, POLLOUT
| POLLWRNORM
);
1001 static const struct file_operations spufs_wbox_fops
= {
1002 .open
= spufs_pipe_open
,
1003 .write
= spufs_wbox_write
,
1004 .poll
= spufs_wbox_poll
,
1005 .fasync
= spufs_wbox_fasync
,
1006 .llseek
= no_llseek
,
1009 static ssize_t
spufs_wbox_stat_read(struct file
*file
, char __user
*buf
,
1010 size_t len
, loff_t
*pos
)
1012 struct spu_context
*ctx
= file
->private_data
;
1019 ret
= spu_acquire(ctx
);
1022 wbox_stat
= (ctx
->ops
->mbox_stat_read(ctx
) >> 8) & 0xff;
1025 if (copy_to_user(buf
, &wbox_stat
, sizeof wbox_stat
))
1031 static const struct file_operations spufs_wbox_stat_fops
= {
1032 .open
= spufs_pipe_open
,
1033 .read
= spufs_wbox_stat_read
,
1034 .llseek
= no_llseek
,
1037 static int spufs_signal1_open(struct inode
*inode
, struct file
*file
)
1039 struct spufs_inode_info
*i
= SPUFS_I(inode
);
1040 struct spu_context
*ctx
= i
->i_ctx
;
1042 mutex_lock(&ctx
->mapping_lock
);
1043 file
->private_data
= ctx
;
1044 if (!i
->i_openers
++)
1045 ctx
->signal1
= inode
->i_mapping
;
1046 mutex_unlock(&ctx
->mapping_lock
);
1047 return nonseekable_open(inode
, file
);
1051 spufs_signal1_release(struct inode
*inode
, struct file
*file
)
1053 struct spufs_inode_info
*i
= SPUFS_I(inode
);
1054 struct spu_context
*ctx
= i
->i_ctx
;
1056 mutex_lock(&ctx
->mapping_lock
);
1057 if (!--i
->i_openers
)
1058 ctx
->signal1
= NULL
;
1059 mutex_unlock(&ctx
->mapping_lock
);
1063 static ssize_t
__spufs_signal1_read(struct spu_context
*ctx
, char __user
*buf
,
1064 size_t len
, loff_t
*pos
)
1072 if (ctx
->csa
.spu_chnlcnt_RW
[3]) {
1073 data
= ctx
->csa
.spu_chnldata_RW
[3];
1080 if (copy_to_user(buf
, &data
, 4))
1087 static ssize_t
spufs_signal1_read(struct file
*file
, char __user
*buf
,
1088 size_t len
, loff_t
*pos
)
1091 struct spu_context
*ctx
= file
->private_data
;
1093 ret
= spu_acquire_saved(ctx
);
1096 ret
= __spufs_signal1_read(ctx
, buf
, len
, pos
);
1097 spu_release_saved(ctx
);
1102 static ssize_t
spufs_signal1_write(struct file
*file
, const char __user
*buf
,
1103 size_t len
, loff_t
*pos
)
1105 struct spu_context
*ctx
;
1109 ctx
= file
->private_data
;
1114 if (copy_from_user(&data
, buf
, 4))
1117 ret
= spu_acquire(ctx
);
1120 ctx
->ops
->signal1_write(ctx
, data
);
1127 spufs_signal1_mmap_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
1129 #if SPUFS_SIGNAL_MAP_SIZE == 0x1000
1130 return spufs_ps_fault(vma
, vmf
, 0x14000, SPUFS_SIGNAL_MAP_SIZE
);
1131 #elif SPUFS_SIGNAL_MAP_SIZE == 0x10000
1132 /* For 64k pages, both signal1 and signal2 can be used to mmap the whole
1133 * signal 1 and 2 area
1135 return spufs_ps_fault(vma
, vmf
, 0x10000, SPUFS_SIGNAL_MAP_SIZE
);
1137 #error unsupported page size
1141 static const struct vm_operations_struct spufs_signal1_mmap_vmops
= {
1142 .fault
= spufs_signal1_mmap_fault
,
1145 static int spufs_signal1_mmap(struct file
*file
, struct vm_area_struct
*vma
)
1147 if (!(vma
->vm_flags
& VM_SHARED
))
1150 vma
->vm_flags
|= VM_IO
| VM_PFNMAP
;
1151 vma
->vm_page_prot
= pgprot_noncached(vma
->vm_page_prot
);
1153 vma
->vm_ops
= &spufs_signal1_mmap_vmops
;
1157 static const struct file_operations spufs_signal1_fops
= {
1158 .open
= spufs_signal1_open
,
1159 .release
= spufs_signal1_release
,
1160 .read
= spufs_signal1_read
,
1161 .write
= spufs_signal1_write
,
1162 .mmap
= spufs_signal1_mmap
,
1163 .llseek
= no_llseek
,
1166 static const struct file_operations spufs_signal1_nosched_fops
= {
1167 .open
= spufs_signal1_open
,
1168 .release
= spufs_signal1_release
,
1169 .write
= spufs_signal1_write
,
1170 .mmap
= spufs_signal1_mmap
,
1171 .llseek
= no_llseek
,
1174 static int spufs_signal2_open(struct inode
*inode
, struct file
*file
)
1176 struct spufs_inode_info
*i
= SPUFS_I(inode
);
1177 struct spu_context
*ctx
= i
->i_ctx
;
1179 mutex_lock(&ctx
->mapping_lock
);
1180 file
->private_data
= ctx
;
1181 if (!i
->i_openers
++)
1182 ctx
->signal2
= inode
->i_mapping
;
1183 mutex_unlock(&ctx
->mapping_lock
);
1184 return nonseekable_open(inode
, file
);
1188 spufs_signal2_release(struct inode
*inode
, struct file
*file
)
1190 struct spufs_inode_info
*i
= SPUFS_I(inode
);
1191 struct spu_context
*ctx
= i
->i_ctx
;
1193 mutex_lock(&ctx
->mapping_lock
);
1194 if (!--i
->i_openers
)
1195 ctx
->signal2
= NULL
;
1196 mutex_unlock(&ctx
->mapping_lock
);
1200 static ssize_t
__spufs_signal2_read(struct spu_context
*ctx
, char __user
*buf
,
1201 size_t len
, loff_t
*pos
)
1209 if (ctx
->csa
.spu_chnlcnt_RW
[4]) {
1210 data
= ctx
->csa
.spu_chnldata_RW
[4];
1217 if (copy_to_user(buf
, &data
, 4))
1224 static ssize_t
spufs_signal2_read(struct file
*file
, char __user
*buf
,
1225 size_t len
, loff_t
*pos
)
1227 struct spu_context
*ctx
= file
->private_data
;
1230 ret
= spu_acquire_saved(ctx
);
1233 ret
= __spufs_signal2_read(ctx
, buf
, len
, pos
);
1234 spu_release_saved(ctx
);
1239 static ssize_t
spufs_signal2_write(struct file
*file
, const char __user
*buf
,
1240 size_t len
, loff_t
*pos
)
1242 struct spu_context
*ctx
;
1246 ctx
= file
->private_data
;
1251 if (copy_from_user(&data
, buf
, 4))
1254 ret
= spu_acquire(ctx
);
1257 ctx
->ops
->signal2_write(ctx
, data
);
1265 spufs_signal2_mmap_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
1267 #if SPUFS_SIGNAL_MAP_SIZE == 0x1000
1268 return spufs_ps_fault(vma
, vmf
, 0x1c000, SPUFS_SIGNAL_MAP_SIZE
);
1269 #elif SPUFS_SIGNAL_MAP_SIZE == 0x10000
1270 /* For 64k pages, both signal1 and signal2 can be used to mmap the whole
1271 * signal 1 and 2 area
1273 return spufs_ps_fault(vma
, vmf
, 0x10000, SPUFS_SIGNAL_MAP_SIZE
);
1275 #error unsupported page size
1279 static const struct vm_operations_struct spufs_signal2_mmap_vmops
= {
1280 .fault
= spufs_signal2_mmap_fault
,
1283 static int spufs_signal2_mmap(struct file
*file
, struct vm_area_struct
*vma
)
1285 if (!(vma
->vm_flags
& VM_SHARED
))
1288 vma
->vm_flags
|= VM_IO
| VM_PFNMAP
;
1289 vma
->vm_page_prot
= pgprot_noncached(vma
->vm_page_prot
);
1291 vma
->vm_ops
= &spufs_signal2_mmap_vmops
;
1294 #else /* SPUFS_MMAP_4K */
1295 #define spufs_signal2_mmap NULL
1296 #endif /* !SPUFS_MMAP_4K */
1298 static const struct file_operations spufs_signal2_fops
= {
1299 .open
= spufs_signal2_open
,
1300 .release
= spufs_signal2_release
,
1301 .read
= spufs_signal2_read
,
1302 .write
= spufs_signal2_write
,
1303 .mmap
= spufs_signal2_mmap
,
1304 .llseek
= no_llseek
,
1307 static const struct file_operations spufs_signal2_nosched_fops
= {
1308 .open
= spufs_signal2_open
,
1309 .release
= spufs_signal2_release
,
1310 .write
= spufs_signal2_write
,
1311 .mmap
= spufs_signal2_mmap
,
1312 .llseek
= no_llseek
,
1316 * This is a wrapper around DEFINE_SIMPLE_ATTRIBUTE which does the
1317 * work of acquiring (or not) the SPU context before calling through
1318 * to the actual get routine. The set routine is called directly.
1320 #define SPU_ATTR_NOACQUIRE 0
1321 #define SPU_ATTR_ACQUIRE 1
1322 #define SPU_ATTR_ACQUIRE_SAVED 2
1324 #define DEFINE_SPUFS_ATTRIBUTE(__name, __get, __set, __fmt, __acquire) \
1325 static int __##__get(void *data, u64 *val) \
1327 struct spu_context *ctx = data; \
1330 if (__acquire == SPU_ATTR_ACQUIRE) { \
1331 ret = spu_acquire(ctx); \
1334 *val = __get(ctx); \
1336 } else if (__acquire == SPU_ATTR_ACQUIRE_SAVED) { \
1337 ret = spu_acquire_saved(ctx); \
1340 *val = __get(ctx); \
1341 spu_release_saved(ctx); \
1343 *val = __get(ctx); \
1347 DEFINE_SPUFS_SIMPLE_ATTRIBUTE(__name, __##__get, __set, __fmt);
1349 static int spufs_signal1_type_set(void *data
, u64 val
)
1351 struct spu_context
*ctx
= data
;
1354 ret
= spu_acquire(ctx
);
1357 ctx
->ops
->signal1_type_set(ctx
, val
);
1363 static u64
spufs_signal1_type_get(struct spu_context
*ctx
)
1365 return ctx
->ops
->signal1_type_get(ctx
);
1367 DEFINE_SPUFS_ATTRIBUTE(spufs_signal1_type
, spufs_signal1_type_get
,
1368 spufs_signal1_type_set
, "%llu\n", SPU_ATTR_ACQUIRE
);
1371 static int spufs_signal2_type_set(void *data
, u64 val
)
1373 struct spu_context
*ctx
= data
;
1376 ret
= spu_acquire(ctx
);
1379 ctx
->ops
->signal2_type_set(ctx
, val
);
1385 static u64
spufs_signal2_type_get(struct spu_context
*ctx
)
1387 return ctx
->ops
->signal2_type_get(ctx
);
1389 DEFINE_SPUFS_ATTRIBUTE(spufs_signal2_type
, spufs_signal2_type_get
,
1390 spufs_signal2_type_set
, "%llu\n", SPU_ATTR_ACQUIRE
);
1394 spufs_mss_mmap_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
1396 return spufs_ps_fault(vma
, vmf
, 0x0000, SPUFS_MSS_MAP_SIZE
);
1399 static const struct vm_operations_struct spufs_mss_mmap_vmops
= {
1400 .fault
= spufs_mss_mmap_fault
,
1404 * mmap support for problem state MFC DMA area [0x0000 - 0x0fff].
1406 static int spufs_mss_mmap(struct file
*file
, struct vm_area_struct
*vma
)
1408 if (!(vma
->vm_flags
& VM_SHARED
))
1411 vma
->vm_flags
|= VM_IO
| VM_PFNMAP
;
1412 vma
->vm_page_prot
= pgprot_noncached(vma
->vm_page_prot
);
1414 vma
->vm_ops
= &spufs_mss_mmap_vmops
;
1417 #else /* SPUFS_MMAP_4K */
1418 #define spufs_mss_mmap NULL
1419 #endif /* !SPUFS_MMAP_4K */
1421 static int spufs_mss_open(struct inode
*inode
, struct file
*file
)
1423 struct spufs_inode_info
*i
= SPUFS_I(inode
);
1424 struct spu_context
*ctx
= i
->i_ctx
;
1426 file
->private_data
= i
->i_ctx
;
1428 mutex_lock(&ctx
->mapping_lock
);
1429 if (!i
->i_openers
++)
1430 ctx
->mss
= inode
->i_mapping
;
1431 mutex_unlock(&ctx
->mapping_lock
);
1432 return nonseekable_open(inode
, file
);
1436 spufs_mss_release(struct inode
*inode
, struct file
*file
)
1438 struct spufs_inode_info
*i
= SPUFS_I(inode
);
1439 struct spu_context
*ctx
= i
->i_ctx
;
1441 mutex_lock(&ctx
->mapping_lock
);
1442 if (!--i
->i_openers
)
1444 mutex_unlock(&ctx
->mapping_lock
);
1448 static const struct file_operations spufs_mss_fops
= {
1449 .open
= spufs_mss_open
,
1450 .release
= spufs_mss_release
,
1451 .mmap
= spufs_mss_mmap
,
1452 .llseek
= no_llseek
,
1456 spufs_psmap_mmap_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
1458 return spufs_ps_fault(vma
, vmf
, 0x0000, SPUFS_PS_MAP_SIZE
);
1461 static const struct vm_operations_struct spufs_psmap_mmap_vmops
= {
1462 .fault
= spufs_psmap_mmap_fault
,
1466 * mmap support for full problem state area [0x00000 - 0x1ffff].
1468 static int spufs_psmap_mmap(struct file
*file
, struct vm_area_struct
*vma
)
1470 if (!(vma
->vm_flags
& VM_SHARED
))
1473 vma
->vm_flags
|= VM_IO
| VM_PFNMAP
;
1474 vma
->vm_page_prot
= pgprot_noncached(vma
->vm_page_prot
);
1476 vma
->vm_ops
= &spufs_psmap_mmap_vmops
;
1480 static int spufs_psmap_open(struct inode
*inode
, struct file
*file
)
1482 struct spufs_inode_info
*i
= SPUFS_I(inode
);
1483 struct spu_context
*ctx
= i
->i_ctx
;
1485 mutex_lock(&ctx
->mapping_lock
);
1486 file
->private_data
= i
->i_ctx
;
1487 if (!i
->i_openers
++)
1488 ctx
->psmap
= inode
->i_mapping
;
1489 mutex_unlock(&ctx
->mapping_lock
);
1490 return nonseekable_open(inode
, file
);
1494 spufs_psmap_release(struct inode
*inode
, struct file
*file
)
1496 struct spufs_inode_info
*i
= SPUFS_I(inode
);
1497 struct spu_context
*ctx
= i
->i_ctx
;
1499 mutex_lock(&ctx
->mapping_lock
);
1500 if (!--i
->i_openers
)
1502 mutex_unlock(&ctx
->mapping_lock
);
1506 static const struct file_operations spufs_psmap_fops
= {
1507 .open
= spufs_psmap_open
,
1508 .release
= spufs_psmap_release
,
1509 .mmap
= spufs_psmap_mmap
,
1510 .llseek
= no_llseek
,
1516 spufs_mfc_mmap_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
1518 return spufs_ps_fault(vma
, vmf
, 0x3000, SPUFS_MFC_MAP_SIZE
);
1521 static const struct vm_operations_struct spufs_mfc_mmap_vmops
= {
1522 .fault
= spufs_mfc_mmap_fault
,
1526 * mmap support for problem state MFC DMA area [0x0000 - 0x0fff].
1528 static int spufs_mfc_mmap(struct file
*file
, struct vm_area_struct
*vma
)
1530 if (!(vma
->vm_flags
& VM_SHARED
))
1533 vma
->vm_flags
|= VM_IO
| VM_PFNMAP
;
1534 vma
->vm_page_prot
= pgprot_noncached(vma
->vm_page_prot
);
1536 vma
->vm_ops
= &spufs_mfc_mmap_vmops
;
1539 #else /* SPUFS_MMAP_4K */
1540 #define spufs_mfc_mmap NULL
1541 #endif /* !SPUFS_MMAP_4K */
1543 static int spufs_mfc_open(struct inode
*inode
, struct file
*file
)
1545 struct spufs_inode_info
*i
= SPUFS_I(inode
);
1546 struct spu_context
*ctx
= i
->i_ctx
;
1548 /* we don't want to deal with DMA into other processes */
1549 if (ctx
->owner
!= current
->mm
)
1552 if (atomic_read(&inode
->i_count
) != 1)
1555 mutex_lock(&ctx
->mapping_lock
);
1556 file
->private_data
= ctx
;
1557 if (!i
->i_openers
++)
1558 ctx
->mfc
= inode
->i_mapping
;
1559 mutex_unlock(&ctx
->mapping_lock
);
1560 return nonseekable_open(inode
, file
);
1564 spufs_mfc_release(struct inode
*inode
, struct file
*file
)
1566 struct spufs_inode_info
*i
= SPUFS_I(inode
);
1567 struct spu_context
*ctx
= i
->i_ctx
;
1569 mutex_lock(&ctx
->mapping_lock
);
1570 if (!--i
->i_openers
)
1572 mutex_unlock(&ctx
->mapping_lock
);
1576 /* interrupt-level mfc callback function. */
1577 void spufs_mfc_callback(struct spu
*spu
)
1579 struct spu_context
*ctx
= spu
->ctx
;
1584 wake_up_all(&ctx
->mfc_wq
);
1586 pr_debug("%s %s\n", __func__
, spu
->name
);
1587 if (ctx
->mfc_fasync
) {
1588 u32 free_elements
, tagstatus
;
1591 /* no need for spu_acquire in interrupt context */
1592 free_elements
= ctx
->ops
->get_mfc_free_elements(ctx
);
1593 tagstatus
= ctx
->ops
->read_mfc_tagstatus(ctx
);
1596 if (free_elements
& 0xffff)
1598 if (tagstatus
& ctx
->tagwait
)
1601 kill_fasync(&ctx
->mfc_fasync
, SIGIO
, mask
);
1605 static int spufs_read_mfc_tagstatus(struct spu_context
*ctx
, u32
*status
)
1607 /* See if there is one tag group is complete */
1608 /* FIXME we need locking around tagwait */
1609 *status
= ctx
->ops
->read_mfc_tagstatus(ctx
) & ctx
->tagwait
;
1610 ctx
->tagwait
&= ~*status
;
1614 /* enable interrupt waiting for any tag group,
1615 may silently fail if interrupts are already enabled */
1616 ctx
->ops
->set_mfc_query(ctx
, ctx
->tagwait
, 1);
1620 static ssize_t
spufs_mfc_read(struct file
*file
, char __user
*buffer
,
1621 size_t size
, loff_t
*pos
)
1623 struct spu_context
*ctx
= file
->private_data
;
1630 ret
= spu_acquire(ctx
);
1635 if (file
->f_flags
& O_NONBLOCK
) {
1636 status
= ctx
->ops
->read_mfc_tagstatus(ctx
);
1637 if (!(status
& ctx
->tagwait
))
1640 /* XXX(hch): shouldn't we clear ret here? */
1641 ctx
->tagwait
&= ~status
;
1643 ret
= spufs_wait(ctx
->mfc_wq
,
1644 spufs_read_mfc_tagstatus(ctx
, &status
));
1651 if (copy_to_user(buffer
, &status
, 4))
1658 static int spufs_check_valid_dma(struct mfc_dma_command
*cmd
)
1660 pr_debug("queueing DMA %x %llx %x %x %x\n", cmd
->lsa
,
1661 cmd
->ea
, cmd
->size
, cmd
->tag
, cmd
->cmd
);
1672 pr_debug("invalid DMA opcode %x\n", cmd
->cmd
);
1676 if ((cmd
->lsa
& 0xf) != (cmd
->ea
&0xf)) {
1677 pr_debug("invalid DMA alignment, ea %llx lsa %x\n",
1682 switch (cmd
->size
& 0xf) {
1703 pr_debug("invalid DMA alignment %x for size %x\n",
1704 cmd
->lsa
& 0xf, cmd
->size
);
1708 if (cmd
->size
> 16 * 1024) {
1709 pr_debug("invalid DMA size %x\n", cmd
->size
);
1713 if (cmd
->tag
& 0xfff0) {
1714 /* we reserve the higher tag numbers for kernel use */
1715 pr_debug("invalid DMA tag\n");
1720 /* not supported in this version */
1721 pr_debug("invalid DMA class\n");
1728 static int spu_send_mfc_command(struct spu_context
*ctx
,
1729 struct mfc_dma_command cmd
,
1732 *error
= ctx
->ops
->send_mfc_command(ctx
, &cmd
);
1733 if (*error
== -EAGAIN
) {
1734 /* wait for any tag group to complete
1735 so we have space for the new command */
1736 ctx
->ops
->set_mfc_query(ctx
, ctx
->tagwait
, 1);
1737 /* try again, because the queue might be
1739 *error
= ctx
->ops
->send_mfc_command(ctx
, &cmd
);
1740 if (*error
== -EAGAIN
)
1746 static ssize_t
spufs_mfc_write(struct file
*file
, const char __user
*buffer
,
1747 size_t size
, loff_t
*pos
)
1749 struct spu_context
*ctx
= file
->private_data
;
1750 struct mfc_dma_command cmd
;
1753 if (size
!= sizeof cmd
)
1757 if (copy_from_user(&cmd
, buffer
, sizeof cmd
))
1760 ret
= spufs_check_valid_dma(&cmd
);
1764 ret
= spu_acquire(ctx
);
1768 ret
= spufs_wait(ctx
->run_wq
, ctx
->state
== SPU_STATE_RUNNABLE
);
1772 if (file
->f_flags
& O_NONBLOCK
) {
1773 ret
= ctx
->ops
->send_mfc_command(ctx
, &cmd
);
1776 ret
= spufs_wait(ctx
->mfc_wq
,
1777 spu_send_mfc_command(ctx
, cmd
, &status
));
1787 ctx
->tagwait
|= 1 << cmd
.tag
;
1796 static unsigned int spufs_mfc_poll(struct file
*file
,poll_table
*wait
)
1798 struct spu_context
*ctx
= file
->private_data
;
1799 u32 free_elements
, tagstatus
;
1802 poll_wait(file
, &ctx
->mfc_wq
, wait
);
1805 * For now keep this uninterruptible and also ignore the rule
1806 * that poll should not sleep. Will be fixed later.
1808 mutex_lock(&ctx
->state_mutex
);
1809 ctx
->ops
->set_mfc_query(ctx
, ctx
->tagwait
, 2);
1810 free_elements
= ctx
->ops
->get_mfc_free_elements(ctx
);
1811 tagstatus
= ctx
->ops
->read_mfc_tagstatus(ctx
);
1815 if (free_elements
& 0xffff)
1816 mask
|= POLLOUT
| POLLWRNORM
;
1817 if (tagstatus
& ctx
->tagwait
)
1818 mask
|= POLLIN
| POLLRDNORM
;
1820 pr_debug("%s: free %d tagstatus %d tagwait %d\n", __func__
,
1821 free_elements
, tagstatus
, ctx
->tagwait
);
1826 static int spufs_mfc_flush(struct file
*file
, fl_owner_t id
)
1828 struct spu_context
*ctx
= file
->private_data
;
1831 ret
= spu_acquire(ctx
);
1835 /* this currently hangs */
1836 ret
= spufs_wait(ctx
->mfc_wq
,
1837 ctx
->ops
->set_mfc_query(ctx
, ctx
->tagwait
, 2));
1840 ret
= spufs_wait(ctx
->mfc_wq
,
1841 ctx
->ops
->read_mfc_tagstatus(ctx
) == ctx
->tagwait
);
1852 static int spufs_mfc_fsync(struct file
*file
, loff_t start
, loff_t end
, int datasync
)
1854 struct inode
*inode
= file_inode(file
);
1855 int err
= filemap_write_and_wait_range(inode
->i_mapping
, start
, end
);
1857 mutex_lock(&inode
->i_mutex
);
1858 err
= spufs_mfc_flush(file
, NULL
);
1859 mutex_unlock(&inode
->i_mutex
);
1864 static int spufs_mfc_fasync(int fd
, struct file
*file
, int on
)
1866 struct spu_context
*ctx
= file
->private_data
;
1868 return fasync_helper(fd
, file
, on
, &ctx
->mfc_fasync
);
1871 static const struct file_operations spufs_mfc_fops
= {
1872 .open
= spufs_mfc_open
,
1873 .release
= spufs_mfc_release
,
1874 .read
= spufs_mfc_read
,
1875 .write
= spufs_mfc_write
,
1876 .poll
= spufs_mfc_poll
,
1877 .flush
= spufs_mfc_flush
,
1878 .fsync
= spufs_mfc_fsync
,
1879 .fasync
= spufs_mfc_fasync
,
1880 .mmap
= spufs_mfc_mmap
,
1881 .llseek
= no_llseek
,
1884 static int spufs_npc_set(void *data
, u64 val
)
1886 struct spu_context
*ctx
= data
;
1889 ret
= spu_acquire(ctx
);
1892 ctx
->ops
->npc_write(ctx
, val
);
1898 static u64
spufs_npc_get(struct spu_context
*ctx
)
1900 return ctx
->ops
->npc_read(ctx
);
1902 DEFINE_SPUFS_ATTRIBUTE(spufs_npc_ops
, spufs_npc_get
, spufs_npc_set
,
1903 "0x%llx\n", SPU_ATTR_ACQUIRE
);
1905 static int spufs_decr_set(void *data
, u64 val
)
1907 struct spu_context
*ctx
= data
;
1908 struct spu_lscsa
*lscsa
= ctx
->csa
.lscsa
;
1911 ret
= spu_acquire_saved(ctx
);
1914 lscsa
->decr
.slot
[0] = (u32
) val
;
1915 spu_release_saved(ctx
);
1920 static u64
spufs_decr_get(struct spu_context
*ctx
)
1922 struct spu_lscsa
*lscsa
= ctx
->csa
.lscsa
;
1923 return lscsa
->decr
.slot
[0];
1925 DEFINE_SPUFS_ATTRIBUTE(spufs_decr_ops
, spufs_decr_get
, spufs_decr_set
,
1926 "0x%llx\n", SPU_ATTR_ACQUIRE_SAVED
);
1928 static int spufs_decr_status_set(void *data
, u64 val
)
1930 struct spu_context
*ctx
= data
;
1933 ret
= spu_acquire_saved(ctx
);
1937 ctx
->csa
.priv2
.mfc_control_RW
|= MFC_CNTL_DECREMENTER_RUNNING
;
1939 ctx
->csa
.priv2
.mfc_control_RW
&= ~MFC_CNTL_DECREMENTER_RUNNING
;
1940 spu_release_saved(ctx
);
1945 static u64
spufs_decr_status_get(struct spu_context
*ctx
)
1947 if (ctx
->csa
.priv2
.mfc_control_RW
& MFC_CNTL_DECREMENTER_RUNNING
)
1948 return SPU_DECR_STATUS_RUNNING
;
1952 DEFINE_SPUFS_ATTRIBUTE(spufs_decr_status_ops
, spufs_decr_status_get
,
1953 spufs_decr_status_set
, "0x%llx\n",
1954 SPU_ATTR_ACQUIRE_SAVED
);
1956 static int spufs_event_mask_set(void *data
, u64 val
)
1958 struct spu_context
*ctx
= data
;
1959 struct spu_lscsa
*lscsa
= ctx
->csa
.lscsa
;
1962 ret
= spu_acquire_saved(ctx
);
1965 lscsa
->event_mask
.slot
[0] = (u32
) val
;
1966 spu_release_saved(ctx
);
1971 static u64
spufs_event_mask_get(struct spu_context
*ctx
)
1973 struct spu_lscsa
*lscsa
= ctx
->csa
.lscsa
;
1974 return lscsa
->event_mask
.slot
[0];
1977 DEFINE_SPUFS_ATTRIBUTE(spufs_event_mask_ops
, spufs_event_mask_get
,
1978 spufs_event_mask_set
, "0x%llx\n",
1979 SPU_ATTR_ACQUIRE_SAVED
);
1981 static u64
spufs_event_status_get(struct spu_context
*ctx
)
1983 struct spu_state
*state
= &ctx
->csa
;
1985 stat
= state
->spu_chnlcnt_RW
[0];
1987 return state
->spu_chnldata_RW
[0];
1990 DEFINE_SPUFS_ATTRIBUTE(spufs_event_status_ops
, spufs_event_status_get
,
1991 NULL
, "0x%llx\n", SPU_ATTR_ACQUIRE_SAVED
)
1993 static int spufs_srr0_set(void *data
, u64 val
)
1995 struct spu_context
*ctx
= data
;
1996 struct spu_lscsa
*lscsa
= ctx
->csa
.lscsa
;
1999 ret
= spu_acquire_saved(ctx
);
2002 lscsa
->srr0
.slot
[0] = (u32
) val
;
2003 spu_release_saved(ctx
);
2008 static u64
spufs_srr0_get(struct spu_context
*ctx
)
2010 struct spu_lscsa
*lscsa
= ctx
->csa
.lscsa
;
2011 return lscsa
->srr0
.slot
[0];
2013 DEFINE_SPUFS_ATTRIBUTE(spufs_srr0_ops
, spufs_srr0_get
, spufs_srr0_set
,
2014 "0x%llx\n", SPU_ATTR_ACQUIRE_SAVED
)
2016 static u64
spufs_id_get(struct spu_context
*ctx
)
2020 if (ctx
->state
== SPU_STATE_RUNNABLE
)
2021 num
= ctx
->spu
->number
;
2023 num
= (unsigned int)-1;
2027 DEFINE_SPUFS_ATTRIBUTE(spufs_id_ops
, spufs_id_get
, NULL
, "0x%llx\n",
2030 static u64
spufs_object_id_get(struct spu_context
*ctx
)
2032 /* FIXME: Should there really be no locking here? */
2033 return ctx
->object_id
;
2036 static int spufs_object_id_set(void *data
, u64 id
)
2038 struct spu_context
*ctx
= data
;
2039 ctx
->object_id
= id
;
2044 DEFINE_SPUFS_ATTRIBUTE(spufs_object_id_ops
, spufs_object_id_get
,
2045 spufs_object_id_set
, "0x%llx\n", SPU_ATTR_NOACQUIRE
);
2047 static u64
spufs_lslr_get(struct spu_context
*ctx
)
2049 return ctx
->csa
.priv2
.spu_lslr_RW
;
2051 DEFINE_SPUFS_ATTRIBUTE(spufs_lslr_ops
, spufs_lslr_get
, NULL
, "0x%llx\n",
2052 SPU_ATTR_ACQUIRE_SAVED
);
2054 static int spufs_info_open(struct inode
*inode
, struct file
*file
)
2056 struct spufs_inode_info
*i
= SPUFS_I(inode
);
2057 struct spu_context
*ctx
= i
->i_ctx
;
2058 file
->private_data
= ctx
;
2062 static int spufs_caps_show(struct seq_file
*s
, void *private)
2064 struct spu_context
*ctx
= s
->private;
2066 if (!(ctx
->flags
& SPU_CREATE_NOSCHED
))
2067 seq_puts(s
, "sched\n");
2068 if (!(ctx
->flags
& SPU_CREATE_ISOLATE
))
2069 seq_puts(s
, "step\n");
2073 static int spufs_caps_open(struct inode
*inode
, struct file
*file
)
2075 return single_open(file
, spufs_caps_show
, SPUFS_I(inode
)->i_ctx
);
2078 static const struct file_operations spufs_caps_fops
= {
2079 .open
= spufs_caps_open
,
2081 .llseek
= seq_lseek
,
2082 .release
= single_release
,
2085 static ssize_t
__spufs_mbox_info_read(struct spu_context
*ctx
,
2086 char __user
*buf
, size_t len
, loff_t
*pos
)
2090 /* EOF if there's no entry in the mbox */
2091 if (!(ctx
->csa
.prob
.mb_stat_R
& 0x0000ff))
2094 data
= ctx
->csa
.prob
.pu_mb_R
;
2096 return simple_read_from_buffer(buf
, len
, pos
, &data
, sizeof data
);
2099 static ssize_t
spufs_mbox_info_read(struct file
*file
, char __user
*buf
,
2100 size_t len
, loff_t
*pos
)
2103 struct spu_context
*ctx
= file
->private_data
;
2105 if (!access_ok(VERIFY_WRITE
, buf
, len
))
2108 ret
= spu_acquire_saved(ctx
);
2111 spin_lock(&ctx
->csa
.register_lock
);
2112 ret
= __spufs_mbox_info_read(ctx
, buf
, len
, pos
);
2113 spin_unlock(&ctx
->csa
.register_lock
);
2114 spu_release_saved(ctx
);
2119 static const struct file_operations spufs_mbox_info_fops
= {
2120 .open
= spufs_info_open
,
2121 .read
= spufs_mbox_info_read
,
2122 .llseek
= generic_file_llseek
,
2125 static ssize_t
__spufs_ibox_info_read(struct spu_context
*ctx
,
2126 char __user
*buf
, size_t len
, loff_t
*pos
)
2130 /* EOF if there's no entry in the ibox */
2131 if (!(ctx
->csa
.prob
.mb_stat_R
& 0xff0000))
2134 data
= ctx
->csa
.priv2
.puint_mb_R
;
2136 return simple_read_from_buffer(buf
, len
, pos
, &data
, sizeof data
);
2139 static ssize_t
spufs_ibox_info_read(struct file
*file
, char __user
*buf
,
2140 size_t len
, loff_t
*pos
)
2142 struct spu_context
*ctx
= file
->private_data
;
2145 if (!access_ok(VERIFY_WRITE
, buf
, len
))
2148 ret
= spu_acquire_saved(ctx
);
2151 spin_lock(&ctx
->csa
.register_lock
);
2152 ret
= __spufs_ibox_info_read(ctx
, buf
, len
, pos
);
2153 spin_unlock(&ctx
->csa
.register_lock
);
2154 spu_release_saved(ctx
);
2159 static const struct file_operations spufs_ibox_info_fops
= {
2160 .open
= spufs_info_open
,
2161 .read
= spufs_ibox_info_read
,
2162 .llseek
= generic_file_llseek
,
2165 static ssize_t
__spufs_wbox_info_read(struct spu_context
*ctx
,
2166 char __user
*buf
, size_t len
, loff_t
*pos
)
2172 wbox_stat
= ctx
->csa
.prob
.mb_stat_R
;
2173 cnt
= 4 - ((wbox_stat
& 0x00ff00) >> 8);
2174 for (i
= 0; i
< cnt
; i
++) {
2175 data
[i
] = ctx
->csa
.spu_mailbox_data
[i
];
2178 return simple_read_from_buffer(buf
, len
, pos
, &data
,
2182 static ssize_t
spufs_wbox_info_read(struct file
*file
, char __user
*buf
,
2183 size_t len
, loff_t
*pos
)
2185 struct spu_context
*ctx
= file
->private_data
;
2188 if (!access_ok(VERIFY_WRITE
, buf
, len
))
2191 ret
= spu_acquire_saved(ctx
);
2194 spin_lock(&ctx
->csa
.register_lock
);
2195 ret
= __spufs_wbox_info_read(ctx
, buf
, len
, pos
);
2196 spin_unlock(&ctx
->csa
.register_lock
);
2197 spu_release_saved(ctx
);
2202 static const struct file_operations spufs_wbox_info_fops
= {
2203 .open
= spufs_info_open
,
2204 .read
= spufs_wbox_info_read
,
2205 .llseek
= generic_file_llseek
,
2208 static ssize_t
__spufs_dma_info_read(struct spu_context
*ctx
,
2209 char __user
*buf
, size_t len
, loff_t
*pos
)
2211 struct spu_dma_info info
;
2212 struct mfc_cq_sr
*qp
, *spuqp
;
2215 info
.dma_info_type
= ctx
->csa
.priv2
.spu_tag_status_query_RW
;
2216 info
.dma_info_mask
= ctx
->csa
.lscsa
->tag_mask
.slot
[0];
2217 info
.dma_info_status
= ctx
->csa
.spu_chnldata_RW
[24];
2218 info
.dma_info_stall_and_notify
= ctx
->csa
.spu_chnldata_RW
[25];
2219 info
.dma_info_atomic_command_status
= ctx
->csa
.spu_chnldata_RW
[27];
2220 for (i
= 0; i
< 16; i
++) {
2221 qp
= &info
.dma_info_command_data
[i
];
2222 spuqp
= &ctx
->csa
.priv2
.spuq
[i
];
2224 qp
->mfc_cq_data0_RW
= spuqp
->mfc_cq_data0_RW
;
2225 qp
->mfc_cq_data1_RW
= spuqp
->mfc_cq_data1_RW
;
2226 qp
->mfc_cq_data2_RW
= spuqp
->mfc_cq_data2_RW
;
2227 qp
->mfc_cq_data3_RW
= spuqp
->mfc_cq_data3_RW
;
2230 return simple_read_from_buffer(buf
, len
, pos
, &info
,
2234 static ssize_t
spufs_dma_info_read(struct file
*file
, char __user
*buf
,
2235 size_t len
, loff_t
*pos
)
2237 struct spu_context
*ctx
= file
->private_data
;
2240 if (!access_ok(VERIFY_WRITE
, buf
, len
))
2243 ret
= spu_acquire_saved(ctx
);
2246 spin_lock(&ctx
->csa
.register_lock
);
2247 ret
= __spufs_dma_info_read(ctx
, buf
, len
, pos
);
2248 spin_unlock(&ctx
->csa
.register_lock
);
2249 spu_release_saved(ctx
);
2254 static const struct file_operations spufs_dma_info_fops
= {
2255 .open
= spufs_info_open
,
2256 .read
= spufs_dma_info_read
,
2257 .llseek
= no_llseek
,
2260 static ssize_t
__spufs_proxydma_info_read(struct spu_context
*ctx
,
2261 char __user
*buf
, size_t len
, loff_t
*pos
)
2263 struct spu_proxydma_info info
;
2264 struct mfc_cq_sr
*qp
, *puqp
;
2265 int ret
= sizeof info
;
2271 if (!access_ok(VERIFY_WRITE
, buf
, len
))
2274 info
.proxydma_info_type
= ctx
->csa
.prob
.dma_querytype_RW
;
2275 info
.proxydma_info_mask
= ctx
->csa
.prob
.dma_querymask_RW
;
2276 info
.proxydma_info_status
= ctx
->csa
.prob
.dma_tagstatus_R
;
2277 for (i
= 0; i
< 8; i
++) {
2278 qp
= &info
.proxydma_info_command_data
[i
];
2279 puqp
= &ctx
->csa
.priv2
.puq
[i
];
2281 qp
->mfc_cq_data0_RW
= puqp
->mfc_cq_data0_RW
;
2282 qp
->mfc_cq_data1_RW
= puqp
->mfc_cq_data1_RW
;
2283 qp
->mfc_cq_data2_RW
= puqp
->mfc_cq_data2_RW
;
2284 qp
->mfc_cq_data3_RW
= puqp
->mfc_cq_data3_RW
;
2287 return simple_read_from_buffer(buf
, len
, pos
, &info
,
2291 static ssize_t
spufs_proxydma_info_read(struct file
*file
, char __user
*buf
,
2292 size_t len
, loff_t
*pos
)
2294 struct spu_context
*ctx
= file
->private_data
;
2297 ret
= spu_acquire_saved(ctx
);
2300 spin_lock(&ctx
->csa
.register_lock
);
2301 ret
= __spufs_proxydma_info_read(ctx
, buf
, len
, pos
);
2302 spin_unlock(&ctx
->csa
.register_lock
);
2303 spu_release_saved(ctx
);
2308 static const struct file_operations spufs_proxydma_info_fops
= {
2309 .open
= spufs_info_open
,
2310 .read
= spufs_proxydma_info_read
,
2311 .llseek
= no_llseek
,
2314 static int spufs_show_tid(struct seq_file
*s
, void *private)
2316 struct spu_context
*ctx
= s
->private;
2318 seq_printf(s
, "%d\n", ctx
->tid
);
2322 static int spufs_tid_open(struct inode
*inode
, struct file
*file
)
2324 return single_open(file
, spufs_show_tid
, SPUFS_I(inode
)->i_ctx
);
2327 static const struct file_operations spufs_tid_fops
= {
2328 .open
= spufs_tid_open
,
2330 .llseek
= seq_lseek
,
2331 .release
= single_release
,
2334 static const char *ctx_state_names
[] = {
2335 "user", "system", "iowait", "loaded"
2338 static unsigned long long spufs_acct_time(struct spu_context
*ctx
,
2339 enum spu_utilization_state state
)
2342 unsigned long long time
= ctx
->stats
.times
[state
];
2345 * In general, utilization statistics are updated by the controlling
2346 * thread as the spu context moves through various well defined
2347 * state transitions, but if the context is lazily loaded its
2348 * utilization statistics are not updated as the controlling thread
2349 * is not tightly coupled with the execution of the spu context. We
2350 * calculate and apply the time delta from the last recorded state
2351 * of the spu context.
2353 if (ctx
->spu
&& ctx
->stats
.util_state
== state
) {
2355 time
+= timespec_to_ns(&ts
) - ctx
->stats
.tstamp
;
2358 return time
/ NSEC_PER_MSEC
;
2361 static unsigned long long spufs_slb_flts(struct spu_context
*ctx
)
2363 unsigned long long slb_flts
= ctx
->stats
.slb_flt
;
2365 if (ctx
->state
== SPU_STATE_RUNNABLE
) {
2366 slb_flts
+= (ctx
->spu
->stats
.slb_flt
-
2367 ctx
->stats
.slb_flt_base
);
2373 static unsigned long long spufs_class2_intrs(struct spu_context
*ctx
)
2375 unsigned long long class2_intrs
= ctx
->stats
.class2_intr
;
2377 if (ctx
->state
== SPU_STATE_RUNNABLE
) {
2378 class2_intrs
+= (ctx
->spu
->stats
.class2_intr
-
2379 ctx
->stats
.class2_intr_base
);
2382 return class2_intrs
;
2386 static int spufs_show_stat(struct seq_file
*s
, void *private)
2388 struct spu_context
*ctx
= s
->private;
2391 ret
= spu_acquire(ctx
);
2395 seq_printf(s
, "%s %llu %llu %llu %llu "
2396 "%llu %llu %llu %llu %llu %llu %llu %llu\n",
2397 ctx_state_names
[ctx
->stats
.util_state
],
2398 spufs_acct_time(ctx
, SPU_UTIL_USER
),
2399 spufs_acct_time(ctx
, SPU_UTIL_SYSTEM
),
2400 spufs_acct_time(ctx
, SPU_UTIL_IOWAIT
),
2401 spufs_acct_time(ctx
, SPU_UTIL_IDLE_LOADED
),
2402 ctx
->stats
.vol_ctx_switch
,
2403 ctx
->stats
.invol_ctx_switch
,
2404 spufs_slb_flts(ctx
),
2405 ctx
->stats
.hash_flt
,
2408 spufs_class2_intrs(ctx
),
2409 ctx
->stats
.libassist
);
2414 static int spufs_stat_open(struct inode
*inode
, struct file
*file
)
2416 return single_open(file
, spufs_show_stat
, SPUFS_I(inode
)->i_ctx
);
2419 static const struct file_operations spufs_stat_fops
= {
2420 .open
= spufs_stat_open
,
2422 .llseek
= seq_lseek
,
2423 .release
= single_release
,
2426 static inline int spufs_switch_log_used(struct spu_context
*ctx
)
2428 return (ctx
->switch_log
->head
- ctx
->switch_log
->tail
) %
2432 static inline int spufs_switch_log_avail(struct spu_context
*ctx
)
2434 return SWITCH_LOG_BUFSIZE
- spufs_switch_log_used(ctx
);
2437 static int spufs_switch_log_open(struct inode
*inode
, struct file
*file
)
2439 struct spu_context
*ctx
= SPUFS_I(inode
)->i_ctx
;
2442 rc
= spu_acquire(ctx
);
2446 if (ctx
->switch_log
) {
2451 ctx
->switch_log
= kmalloc(sizeof(struct switch_log
) +
2452 SWITCH_LOG_BUFSIZE
* sizeof(struct switch_log_entry
),
2455 if (!ctx
->switch_log
) {
2460 ctx
->switch_log
->head
= ctx
->switch_log
->tail
= 0;
2461 init_waitqueue_head(&ctx
->switch_log
->wait
);
2469 static int spufs_switch_log_release(struct inode
*inode
, struct file
*file
)
2471 struct spu_context
*ctx
= SPUFS_I(inode
)->i_ctx
;
2474 rc
= spu_acquire(ctx
);
2478 kfree(ctx
->switch_log
);
2479 ctx
->switch_log
= NULL
;
2485 static int switch_log_sprint(struct spu_context
*ctx
, char *tbuf
, int n
)
2487 struct switch_log_entry
*p
;
2489 p
= ctx
->switch_log
->log
+ ctx
->switch_log
->tail
% SWITCH_LOG_BUFSIZE
;
2491 return snprintf(tbuf
, n
, "%u.%09u %d %u %u %llu\n",
2492 (unsigned int) p
->tstamp
.tv_sec
,
2493 (unsigned int) p
->tstamp
.tv_nsec
,
2495 (unsigned int) p
->type
,
2496 (unsigned int) p
->val
,
2497 (unsigned long long) p
->timebase
);
2500 static ssize_t
spufs_switch_log_read(struct file
*file
, char __user
*buf
,
2501 size_t len
, loff_t
*ppos
)
2503 struct inode
*inode
= file_inode(file
);
2504 struct spu_context
*ctx
= SPUFS_I(inode
)->i_ctx
;
2505 int error
= 0, cnt
= 0;
2510 error
= spu_acquire(ctx
);
2518 if (spufs_switch_log_used(ctx
) == 0) {
2520 /* If there's data ready to go, we can
2521 * just return straight away */
2524 } else if (file
->f_flags
& O_NONBLOCK
) {
2529 /* spufs_wait will drop the mutex and
2530 * re-acquire, but since we're in read(), the
2531 * file cannot be _released (and so
2532 * ctx->switch_log is stable).
2534 error
= spufs_wait(ctx
->switch_log
->wait
,
2535 spufs_switch_log_used(ctx
) > 0);
2537 /* On error, spufs_wait returns without the
2538 * state mutex held */
2542 /* We may have had entries read from underneath
2543 * us while we dropped the mutex in spufs_wait,
2545 if (spufs_switch_log_used(ctx
) == 0)
2550 width
= switch_log_sprint(ctx
, tbuf
, sizeof(tbuf
));
2552 ctx
->switch_log
->tail
=
2553 (ctx
->switch_log
->tail
+ 1) %
2556 /* If the record is greater than space available return
2557 * partial buffer (so far) */
2560 error
= copy_to_user(buf
+ cnt
, tbuf
, width
);
2568 return cnt
== 0 ? error
: cnt
;
2571 static unsigned int spufs_switch_log_poll(struct file
*file
, poll_table
*wait
)
2573 struct inode
*inode
= file_inode(file
);
2574 struct spu_context
*ctx
= SPUFS_I(inode
)->i_ctx
;
2575 unsigned int mask
= 0;
2578 poll_wait(file
, &ctx
->switch_log
->wait
, wait
);
2580 rc
= spu_acquire(ctx
);
2584 if (spufs_switch_log_used(ctx
) > 0)
2592 static const struct file_operations spufs_switch_log_fops
= {
2593 .open
= spufs_switch_log_open
,
2594 .read
= spufs_switch_log_read
,
2595 .poll
= spufs_switch_log_poll
,
2596 .release
= spufs_switch_log_release
,
2597 .llseek
= no_llseek
,
2601 * Log a context switch event to a switch log reader.
2603 * Must be called with ctx->state_mutex held.
2605 void spu_switch_log_notify(struct spu
*spu
, struct spu_context
*ctx
,
2608 if (!ctx
->switch_log
)
2611 if (spufs_switch_log_avail(ctx
) > 1) {
2612 struct switch_log_entry
*p
;
2614 p
= ctx
->switch_log
->log
+ ctx
->switch_log
->head
;
2615 ktime_get_ts(&p
->tstamp
);
2616 p
->timebase
= get_tb();
2617 p
->spu_id
= spu
? spu
->number
: -1;
2621 ctx
->switch_log
->head
=
2622 (ctx
->switch_log
->head
+ 1) % SWITCH_LOG_BUFSIZE
;
2625 wake_up(&ctx
->switch_log
->wait
);
2628 static int spufs_show_ctx(struct seq_file
*s
, void *private)
2630 struct spu_context
*ctx
= s
->private;
2633 mutex_lock(&ctx
->state_mutex
);
2635 struct spu
*spu
= ctx
->spu
;
2636 struct spu_priv2 __iomem
*priv2
= spu
->priv2
;
2638 spin_lock_irq(&spu
->register_lock
);
2639 mfc_control_RW
= in_be64(&priv2
->mfc_control_RW
);
2640 spin_unlock_irq(&spu
->register_lock
);
2642 struct spu_state
*csa
= &ctx
->csa
;
2644 mfc_control_RW
= csa
->priv2
.mfc_control_RW
;
2647 seq_printf(s
, "%c flgs(%lx) sflgs(%lx) pri(%d) ts(%d) spu(%02d)"
2648 " %c %llx %llx %llx %llx %x %x\n",
2649 ctx
->state
== SPU_STATE_SAVED
? 'S' : 'R',
2654 ctx
->spu
? ctx
->spu
->number
: -1,
2655 !list_empty(&ctx
->rq
) ? 'q' : ' ',
2656 ctx
->csa
.class_0_pending
,
2657 ctx
->csa
.class_0_dar
,
2658 ctx
->csa
.class_1_dsisr
,
2660 ctx
->ops
->runcntl_read(ctx
),
2661 ctx
->ops
->status_read(ctx
));
2663 mutex_unlock(&ctx
->state_mutex
);
2668 static int spufs_ctx_open(struct inode
*inode
, struct file
*file
)
2670 return single_open(file
, spufs_show_ctx
, SPUFS_I(inode
)->i_ctx
);
2673 static const struct file_operations spufs_ctx_fops
= {
2674 .open
= spufs_ctx_open
,
2676 .llseek
= seq_lseek
,
2677 .release
= single_release
,
2680 const struct spufs_tree_descr spufs_dir_contents
[] = {
2681 { "capabilities", &spufs_caps_fops
, 0444, },
2682 { "mem", &spufs_mem_fops
, 0666, LS_SIZE
, },
2683 { "regs", &spufs_regs_fops
, 0666, sizeof(struct spu_reg128
[128]), },
2684 { "mbox", &spufs_mbox_fops
, 0444, },
2685 { "ibox", &spufs_ibox_fops
, 0444, },
2686 { "wbox", &spufs_wbox_fops
, 0222, },
2687 { "mbox_stat", &spufs_mbox_stat_fops
, 0444, sizeof(u32
), },
2688 { "ibox_stat", &spufs_ibox_stat_fops
, 0444, sizeof(u32
), },
2689 { "wbox_stat", &spufs_wbox_stat_fops
, 0444, sizeof(u32
), },
2690 { "signal1", &spufs_signal1_fops
, 0666, },
2691 { "signal2", &spufs_signal2_fops
, 0666, },
2692 { "signal1_type", &spufs_signal1_type
, 0666, },
2693 { "signal2_type", &spufs_signal2_type
, 0666, },
2694 { "cntl", &spufs_cntl_fops
, 0666, },
2695 { "fpcr", &spufs_fpcr_fops
, 0666, sizeof(struct spu_reg128
), },
2696 { "lslr", &spufs_lslr_ops
, 0444, },
2697 { "mfc", &spufs_mfc_fops
, 0666, },
2698 { "mss", &spufs_mss_fops
, 0666, },
2699 { "npc", &spufs_npc_ops
, 0666, },
2700 { "srr0", &spufs_srr0_ops
, 0666, },
2701 { "decr", &spufs_decr_ops
, 0666, },
2702 { "decr_status", &spufs_decr_status_ops
, 0666, },
2703 { "event_mask", &spufs_event_mask_ops
, 0666, },
2704 { "event_status", &spufs_event_status_ops
, 0444, },
2705 { "psmap", &spufs_psmap_fops
, 0666, SPUFS_PS_MAP_SIZE
, },
2706 { "phys-id", &spufs_id_ops
, 0666, },
2707 { "object-id", &spufs_object_id_ops
, 0666, },
2708 { "mbox_info", &spufs_mbox_info_fops
, 0444, sizeof(u32
), },
2709 { "ibox_info", &spufs_ibox_info_fops
, 0444, sizeof(u32
), },
2710 { "wbox_info", &spufs_wbox_info_fops
, 0444, sizeof(u32
), },
2711 { "dma_info", &spufs_dma_info_fops
, 0444,
2712 sizeof(struct spu_dma_info
), },
2713 { "proxydma_info", &spufs_proxydma_info_fops
, 0444,
2714 sizeof(struct spu_proxydma_info
)},
2715 { "tid", &spufs_tid_fops
, 0444, },
2716 { "stat", &spufs_stat_fops
, 0444, },
2717 { "switch_log", &spufs_switch_log_fops
, 0444 },
2721 const struct spufs_tree_descr spufs_dir_nosched_contents
[] = {
2722 { "capabilities", &spufs_caps_fops
, 0444, },
2723 { "mem", &spufs_mem_fops
, 0666, LS_SIZE
, },
2724 { "mbox", &spufs_mbox_fops
, 0444, },
2725 { "ibox", &spufs_ibox_fops
, 0444, },
2726 { "wbox", &spufs_wbox_fops
, 0222, },
2727 { "mbox_stat", &spufs_mbox_stat_fops
, 0444, sizeof(u32
), },
2728 { "ibox_stat", &spufs_ibox_stat_fops
, 0444, sizeof(u32
), },
2729 { "wbox_stat", &spufs_wbox_stat_fops
, 0444, sizeof(u32
), },
2730 { "signal1", &spufs_signal1_nosched_fops
, 0222, },
2731 { "signal2", &spufs_signal2_nosched_fops
, 0222, },
2732 { "signal1_type", &spufs_signal1_type
, 0666, },
2733 { "signal2_type", &spufs_signal2_type
, 0666, },
2734 { "mss", &spufs_mss_fops
, 0666, },
2735 { "mfc", &spufs_mfc_fops
, 0666, },
2736 { "cntl", &spufs_cntl_fops
, 0666, },
2737 { "npc", &spufs_npc_ops
, 0666, },
2738 { "psmap", &spufs_psmap_fops
, 0666, SPUFS_PS_MAP_SIZE
, },
2739 { "phys-id", &spufs_id_ops
, 0666, },
2740 { "object-id", &spufs_object_id_ops
, 0666, },
2741 { "tid", &spufs_tid_fops
, 0444, },
2742 { "stat", &spufs_stat_fops
, 0444, },
2746 const struct spufs_tree_descr spufs_dir_debug_contents
[] = {
2747 { ".ctx", &spufs_ctx_fops
, 0444, },
2751 const struct spufs_coredump_reader spufs_coredump_read
[] = {
2752 { "regs", __spufs_regs_read
, NULL
, sizeof(struct spu_reg128
[128])},
2753 { "fpcr", __spufs_fpcr_read
, NULL
, sizeof(struct spu_reg128
) },
2754 { "lslr", NULL
, spufs_lslr_get
, 19 },
2755 { "decr", NULL
, spufs_decr_get
, 19 },
2756 { "decr_status", NULL
, spufs_decr_status_get
, 19 },
2757 { "mem", __spufs_mem_read
, NULL
, LS_SIZE
, },
2758 { "signal1", __spufs_signal1_read
, NULL
, sizeof(u32
) },
2759 { "signal1_type", NULL
, spufs_signal1_type_get
, 19 },
2760 { "signal2", __spufs_signal2_read
, NULL
, sizeof(u32
) },
2761 { "signal2_type", NULL
, spufs_signal2_type_get
, 19 },
2762 { "event_mask", NULL
, spufs_event_mask_get
, 19 },
2763 { "event_status", NULL
, spufs_event_status_get
, 19 },
2764 { "mbox_info", __spufs_mbox_info_read
, NULL
, sizeof(u32
) },
2765 { "ibox_info", __spufs_ibox_info_read
, NULL
, sizeof(u32
) },
2766 { "wbox_info", __spufs_wbox_info_read
, NULL
, 4 * sizeof(u32
)},
2767 { "dma_info", __spufs_dma_info_read
, NULL
, sizeof(struct spu_dma_info
)},
2768 { "proxydma_info", __spufs_proxydma_info_read
,
2769 NULL
, sizeof(struct spu_proxydma_info
)},
2770 { "object-id", NULL
, spufs_object_id_get
, 19 },
2771 { "npc", NULL
, spufs_npc_get
, 19 },