4 * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
6 * Author: Arnd Bergmann <arndb@de.ibm.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 #include <linux/file.h>
25 #include <linux/backing-dev.h>
26 #include <linux/init.h>
27 #include <linux/ioctl.h>
28 #include <linux/module.h>
29 #include <linux/mount.h>
30 #include <linux/namei.h>
31 #include <linux/pagemap.h>
32 #include <linux/poll.h>
33 #include <linux/slab.h>
34 #include <linux/parser.h>
37 #include <asm/semaphore.h>
39 #include <asm/spu_priv1.h>
40 #include <asm/uaccess.h>
44 static struct kmem_cache
*spufs_inode_cache
;
45 char *isolated_loader
;
46 static int isolated_loader_size
;
49 spufs_alloc_inode(struct super_block
*sb
)
51 struct spufs_inode_info
*ei
;
53 ei
= kmem_cache_alloc(spufs_inode_cache
, GFP_KERNEL
);
61 return &ei
->vfs_inode
;
65 spufs_destroy_inode(struct inode
*inode
)
67 kmem_cache_free(spufs_inode_cache
, SPUFS_I(inode
));
71 spufs_init_once(struct kmem_cache
*cachep
, void *p
)
73 struct spufs_inode_info
*ei
= p
;
75 inode_init_once(&ei
->vfs_inode
);
79 spufs_new_inode(struct super_block
*sb
, int mode
)
83 inode
= new_inode(sb
);
88 inode
->i_uid
= current
->fsuid
;
89 inode
->i_gid
= current
->fsgid
;
91 inode
->i_atime
= inode
->i_mtime
= inode
->i_ctime
= CURRENT_TIME
;
97 spufs_setattr(struct dentry
*dentry
, struct iattr
*attr
)
99 struct inode
*inode
= dentry
->d_inode
;
101 if ((attr
->ia_valid
& ATTR_SIZE
) &&
102 (attr
->ia_size
!= inode
->i_size
))
104 return inode_setattr(inode
, attr
);
109 spufs_new_file(struct super_block
*sb
, struct dentry
*dentry
,
110 const struct file_operations
*fops
, int mode
,
111 struct spu_context
*ctx
)
113 static struct inode_operations spufs_file_iops
= {
114 .setattr
= spufs_setattr
,
120 inode
= spufs_new_inode(sb
, S_IFREG
| mode
);
125 inode
->i_op
= &spufs_file_iops
;
127 inode
->i_private
= SPUFS_I(inode
)->i_ctx
= get_spu_context(ctx
);
128 d_add(dentry
, inode
);
134 spufs_delete_inode(struct inode
*inode
)
136 struct spufs_inode_info
*ei
= SPUFS_I(inode
);
139 put_spu_context(ei
->i_ctx
);
141 put_spu_gang(ei
->i_gang
);
145 static void spufs_prune_dir(struct dentry
*dir
)
147 struct dentry
*dentry
, *tmp
;
149 mutex_lock(&dir
->d_inode
->i_mutex
);
150 list_for_each_entry_safe(dentry
, tmp
, &dir
->d_subdirs
, d_u
.d_child
) {
151 spin_lock(&dcache_lock
);
152 spin_lock(&dentry
->d_lock
);
153 if (!(d_unhashed(dentry
)) && dentry
->d_inode
) {
156 spin_unlock(&dentry
->d_lock
);
157 simple_unlink(dir
->d_inode
, dentry
);
158 spin_unlock(&dcache_lock
);
161 spin_unlock(&dentry
->d_lock
);
162 spin_unlock(&dcache_lock
);
165 shrink_dcache_parent(dir
);
166 mutex_unlock(&dir
->d_inode
->i_mutex
);
169 /* Caller must hold parent->i_mutex */
170 static int spufs_rmdir(struct inode
*parent
, struct dentry
*dir
)
172 /* remove all entries */
173 spufs_prune_dir(dir
);
176 return simple_rmdir(parent
, dir
);
179 static int spufs_fill_dir(struct dentry
*dir
, struct tree_descr
*files
,
180 int mode
, struct spu_context
*ctx
)
182 struct dentry
*dentry
, *tmp
;
185 while (files
->name
&& files
->name
[0]) {
187 dentry
= d_alloc_name(dir
, files
->name
);
190 ret
= spufs_new_file(dir
->d_sb
, dentry
, files
->ops
,
191 files
->mode
& mode
, ctx
);
199 * remove all children from dir. dir->inode is not set so don't
200 * just simply use spufs_prune_dir() and panic afterwards :)
201 * dput() looks like it will do the right thing:
202 * - dec parent's ref counter
203 * - remove child from parent's child list
204 * - free child's inode if possible
207 list_for_each_entry_safe(dentry
, tmp
, &dir
->d_subdirs
, d_u
.d_child
) {
211 shrink_dcache_parent(dir
);
215 static int spufs_dir_close(struct inode
*inode
, struct file
*file
)
217 struct spu_context
*ctx
;
218 struct inode
*parent
;
222 dir
= file
->f_path
.dentry
;
223 parent
= dir
->d_parent
->d_inode
;
224 ctx
= SPUFS_I(dir
->d_inode
)->i_ctx
;
226 mutex_lock(&parent
->i_mutex
);
227 ret
= spufs_rmdir(parent
, dir
);
228 mutex_unlock(&parent
->i_mutex
);
231 /* We have to give up the mm_struct */
234 return dcache_dir_close(inode
, file
);
237 const struct file_operations spufs_context_fops
= {
238 .open
= dcache_dir_open
,
239 .release
= spufs_dir_close
,
240 .llseek
= dcache_dir_lseek
,
241 .read
= generic_read_dir
,
242 .readdir
= dcache_readdir
,
243 .fsync
= simple_sync_file
,
245 EXPORT_SYMBOL_GPL(spufs_context_fops
);
248 spufs_mkdir(struct inode
*dir
, struct dentry
*dentry
, unsigned int flags
,
253 struct spu_context
*ctx
;
256 inode
= spufs_new_inode(dir
->i_sb
, mode
| S_IFDIR
);
260 if (dir
->i_mode
& S_ISGID
) {
261 inode
->i_gid
= dir
->i_gid
;
262 inode
->i_mode
&= S_ISGID
;
264 ctx
= alloc_spu_context(SPUFS_I(dir
)->i_gang
); /* XXX gang */
265 SPUFS_I(inode
)->i_ctx
= ctx
;
270 inode
->i_op
= &simple_dir_inode_operations
;
271 inode
->i_fop
= &simple_dir_operations
;
272 if (flags
& SPU_CREATE_NOSCHED
)
273 ret
= spufs_fill_dir(dentry
, spufs_dir_nosched_contents
,
276 ret
= spufs_fill_dir(dentry
, spufs_dir_contents
, mode
, ctx
);
281 d_instantiate(dentry
, inode
);
284 dentry
->d_inode
->i_nlink
++;
289 put_spu_context(ctx
);
296 static int spufs_context_open(struct dentry
*dentry
, struct vfsmount
*mnt
)
301 ret
= get_unused_fd();
308 filp
= dentry_open(dentry
, mnt
, O_RDONLY
);
315 filp
->f_op
= &spufs_context_fops
;
316 fd_install(ret
, filp
);
321 static struct spu_context
*
322 spufs_assert_affinity(unsigned int flags
, struct spu_gang
*gang
,
325 struct spu_context
*tmp
, *neighbor
;
329 aff_supp
= !list_empty(&(list_entry(cbe_spu_info
[0].spus
.next
,
330 struct spu
, cbe_list
))->aff_list
);
333 return ERR_PTR(-EINVAL
);
335 if (flags
& SPU_CREATE_GANG
)
336 return ERR_PTR(-EINVAL
);
338 if (flags
& SPU_CREATE_AFFINITY_MEM
&&
340 gang
->aff_ref_ctx
->flags
& SPU_CREATE_AFFINITY_MEM
)
341 return ERR_PTR(-EEXIST
);
343 if (gang
->aff_flags
& AFF_MERGED
)
344 return ERR_PTR(-EBUSY
);
347 if (flags
& SPU_CREATE_AFFINITY_SPU
) {
348 if (!filp
|| filp
->f_op
!= &spufs_context_fops
)
349 return ERR_PTR(-EINVAL
);
351 neighbor
= get_spu_context(
352 SPUFS_I(filp
->f_dentry
->d_inode
)->i_ctx
);
354 if (!list_empty(&neighbor
->aff_list
) && !(neighbor
->aff_head
) &&
355 !list_is_last(&neighbor
->aff_list
, &gang
->aff_list_head
) &&
356 !list_entry(neighbor
->aff_list
.next
, struct spu_context
,
358 return ERR_PTR(-EEXIST
);
360 if (gang
!= neighbor
->gang
)
361 return ERR_PTR(-EINVAL
);
364 list_for_each_entry(tmp
, &gang
->aff_list_head
, aff_list
)
366 if (list_empty(&neighbor
->aff_list
))
369 for (node
= 0; node
< MAX_NUMNODES
; node
++) {
370 if ((cbe_spu_info
[node
].n_spus
- atomic_read(
371 &cbe_spu_info
[node
].reserved_spus
)) >= count
)
375 if (node
== MAX_NUMNODES
)
376 return ERR_PTR(-EEXIST
);
383 spufs_set_affinity(unsigned int flags
, struct spu_context
*ctx
,
384 struct spu_context
*neighbor
)
386 if (flags
& SPU_CREATE_AFFINITY_MEM
)
387 ctx
->gang
->aff_ref_ctx
= ctx
;
389 if (flags
& SPU_CREATE_AFFINITY_SPU
) {
390 if (list_empty(&neighbor
->aff_list
)) {
391 list_add_tail(&neighbor
->aff_list
,
392 &ctx
->gang
->aff_list_head
);
393 neighbor
->aff_head
= 1;
396 if (list_is_last(&neighbor
->aff_list
, &ctx
->gang
->aff_list_head
)
397 || list_entry(neighbor
->aff_list
.next
, struct spu_context
,
398 aff_list
)->aff_head
) {
399 list_add(&ctx
->aff_list
, &neighbor
->aff_list
);
401 list_add_tail(&ctx
->aff_list
, &neighbor
->aff_list
);
402 if (neighbor
->aff_head
) {
403 neighbor
->aff_head
= 0;
408 if (!ctx
->gang
->aff_ref_ctx
)
409 ctx
->gang
->aff_ref_ctx
= ctx
;
414 spufs_create_context(struct inode
*inode
, struct dentry
*dentry
,
415 struct vfsmount
*mnt
, int flags
, int mode
,
416 struct file
*aff_filp
)
420 struct spu_gang
*gang
;
421 struct spu_context
*neighbor
;
424 if ((flags
& SPU_CREATE_NOSCHED
) &&
425 !capable(CAP_SYS_NICE
))
429 if ((flags
& (SPU_CREATE_NOSCHED
| SPU_CREATE_ISOLATE
))
430 == SPU_CREATE_ISOLATE
)
434 if ((flags
& SPU_CREATE_ISOLATE
) && !isolated_loader
)
439 affinity
= flags
& (SPU_CREATE_AFFINITY_MEM
| SPU_CREATE_AFFINITY_SPU
);
441 gang
= SPUFS_I(inode
)->i_gang
;
445 mutex_lock(&gang
->aff_mutex
);
446 neighbor
= spufs_assert_affinity(flags
, gang
, aff_filp
);
447 if (IS_ERR(neighbor
)) {
448 ret
= PTR_ERR(neighbor
);
453 ret
= spufs_mkdir(inode
, dentry
, flags
, mode
& S_IRWXUGO
);
458 spufs_set_affinity(flags
, SPUFS_I(dentry
->d_inode
)->i_ctx
,
462 * get references for dget and mntget, will be released
463 * in error path of *_open().
465 ret
= spufs_context_open(dget(dentry
), mntget(mnt
));
467 WARN_ON(spufs_rmdir(inode
, dentry
));
468 mutex_unlock(&inode
->i_mutex
);
469 spu_forget(SPUFS_I(dentry
->d_inode
)->i_ctx
);
475 mutex_unlock(&gang
->aff_mutex
);
477 mutex_unlock(&inode
->i_mutex
);
484 spufs_mkgang(struct inode
*dir
, struct dentry
*dentry
, int mode
)
488 struct spu_gang
*gang
;
491 inode
= spufs_new_inode(dir
->i_sb
, mode
| S_IFDIR
);
496 if (dir
->i_mode
& S_ISGID
) {
497 inode
->i_gid
= dir
->i_gid
;
498 inode
->i_mode
&= S_ISGID
;
500 gang
= alloc_spu_gang();
501 SPUFS_I(inode
)->i_ctx
= NULL
;
502 SPUFS_I(inode
)->i_gang
= gang
;
506 inode
->i_op
= &simple_dir_inode_operations
;
507 inode
->i_fop
= &simple_dir_operations
;
509 d_instantiate(dentry
, inode
);
511 dentry
->d_inode
->i_nlink
++;
520 static int spufs_gang_open(struct dentry
*dentry
, struct vfsmount
*mnt
)
525 ret
= get_unused_fd();
532 filp
= dentry_open(dentry
, mnt
, O_RDONLY
);
539 filp
->f_op
= &simple_dir_operations
;
540 fd_install(ret
, filp
);
545 static int spufs_create_gang(struct inode
*inode
,
546 struct dentry
*dentry
,
547 struct vfsmount
*mnt
, int mode
)
551 ret
= spufs_mkgang(inode
, dentry
, mode
& S_IRWXUGO
);
556 * get references for dget and mntget, will be released
557 * in error path of *_open().
559 ret
= spufs_gang_open(dget(dentry
), mntget(mnt
));
561 int err
= simple_rmdir(inode
, dentry
);
566 mutex_unlock(&inode
->i_mutex
);
572 static struct file_system_type spufs_type
;
574 long spufs_create(struct nameidata
*nd
, unsigned int flags
, mode_t mode
,
577 struct dentry
*dentry
;
581 /* check if we are on spufs */
582 if (nd
->dentry
->d_sb
->s_type
!= &spufs_type
)
585 /* don't accept undefined flags */
586 if (flags
& (~SPU_CREATE_FLAG_ALL
))
589 /* only threads can be underneath a gang */
590 if (nd
->dentry
!= nd
->dentry
->d_sb
->s_root
) {
591 if ((flags
& SPU_CREATE_GANG
) ||
592 !SPUFS_I(nd
->dentry
->d_inode
)->i_gang
)
596 dentry
= lookup_create(nd
, 1);
597 ret
= PTR_ERR(dentry
);
605 mode
&= ~current
->fs
->umask
;
607 if (flags
& SPU_CREATE_GANG
)
608 return spufs_create_gang(nd
->dentry
->d_inode
,
609 dentry
, nd
->mnt
, mode
);
611 return spufs_create_context(nd
->dentry
->d_inode
,
612 dentry
, nd
->mnt
, flags
, mode
, filp
);
617 mutex_unlock(&nd
->dentry
->d_inode
->i_mutex
);
622 /* File system initialization */
624 Opt_uid
, Opt_gid
, Opt_mode
, Opt_err
,
627 static match_table_t spufs_tokens
= {
628 { Opt_uid
, "uid=%d" },
629 { Opt_gid
, "gid=%d" },
630 { Opt_mode
, "mode=%o" },
635 spufs_parse_options(char *options
, struct inode
*root
)
638 substring_t args
[MAX_OPT_ARGS
];
640 while ((p
= strsep(&options
, ",")) != NULL
) {
646 token
= match_token(p
, spufs_tokens
, args
);
649 if (match_int(&args
[0], &option
))
651 root
->i_uid
= option
;
654 if (match_int(&args
[0], &option
))
656 root
->i_gid
= option
;
659 if (match_octal(&args
[0], &option
))
661 root
->i_mode
= option
| S_IFDIR
;
670 static void spufs_exit_isolated_loader(void)
672 free_pages((unsigned long) isolated_loader
,
673 get_order(isolated_loader_size
));
677 spufs_init_isolated_loader(void)
679 struct device_node
*dn
;
683 dn
= of_find_node_by_path("/spu-isolation");
687 loader
= of_get_property(dn
, "loader", &size
);
691 /* the loader must be align on a 16 byte boundary */
692 isolated_loader
= (char *)__get_free_pages(GFP_KERNEL
, get_order(size
));
693 if (!isolated_loader
)
696 isolated_loader_size
= size
;
697 memcpy(isolated_loader
, loader
, size
);
698 printk(KERN_INFO
"spufs: SPU isolation mode enabled\n");
702 spufs_create_root(struct super_block
*sb
, void *data
)
708 if (!spu_management_ops
)
712 inode
= spufs_new_inode(sb
, S_IFDIR
| 0775);
716 inode
->i_op
= &simple_dir_inode_operations
;
717 inode
->i_fop
= &simple_dir_operations
;
718 SPUFS_I(inode
)->i_ctx
= NULL
;
721 if (!spufs_parse_options(data
, inode
))
725 sb
->s_root
= d_alloc_root(inode
);
737 spufs_fill_super(struct super_block
*sb
, void *data
, int silent
)
739 static struct super_operations s_ops
= {
740 .alloc_inode
= spufs_alloc_inode
,
741 .destroy_inode
= spufs_destroy_inode
,
742 .statfs
= simple_statfs
,
743 .delete_inode
= spufs_delete_inode
,
744 .drop_inode
= generic_delete_inode
,
747 sb
->s_maxbytes
= MAX_LFS_FILESIZE
;
748 sb
->s_blocksize
= PAGE_CACHE_SIZE
;
749 sb
->s_blocksize_bits
= PAGE_CACHE_SHIFT
;
750 sb
->s_magic
= SPUFS_MAGIC
;
753 return spufs_create_root(sb
, data
);
757 spufs_get_sb(struct file_system_type
*fstype
, int flags
,
758 const char *name
, void *data
, struct vfsmount
*mnt
)
760 return get_sb_single(fstype
, flags
, data
, spufs_fill_super
, mnt
);
763 static struct file_system_type spufs_type
= {
764 .owner
= THIS_MODULE
,
766 .get_sb
= spufs_get_sb
,
767 .kill_sb
= kill_litter_super
,
770 static int __init
spufs_init(void)
775 if (!spu_management_ops
)
779 spufs_inode_cache
= kmem_cache_create("spufs_inode_cache",
780 sizeof(struct spufs_inode_info
), 0,
781 SLAB_HWCACHE_ALIGN
, spufs_init_once
);
783 if (!spufs_inode_cache
)
785 ret
= spu_sched_init();
788 ret
= register_filesystem(&spufs_type
);
791 ret
= register_spu_syscalls(&spufs_calls
);
795 spufs_init_isolated_loader();
800 unregister_filesystem(&spufs_type
);
804 kmem_cache_destroy(spufs_inode_cache
);
808 module_init(spufs_init
);
810 static void __exit
spufs_exit(void)
813 spufs_exit_isolated_loader();
814 unregister_spu_syscalls(&spufs_calls
);
815 unregister_filesystem(&spufs_type
);
816 kmem_cache_destroy(spufs_inode_cache
);
818 module_exit(spufs_exit
);
820 MODULE_LICENSE("GPL");
821 MODULE_AUTHOR("Arnd Bergmann <arndb@de.ibm.com>");