1 // SPDX-License-Identifier: GPL-2.0-or-later
6 * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
8 * Author: Arnd Bergmann <arndb@de.ibm.com>
11 #include <linux/file.h>
13 #include <linux/fs_context.h>
14 #include <linux/fs_parser.h>
15 #include <linux/fsnotify.h>
16 #include <linux/backing-dev.h>
17 #include <linux/init.h>
18 #include <linux/ioctl.h>
19 #include <linux/module.h>
20 #include <linux/mount.h>
21 #include <linux/namei.h>
22 #include <linux/pagemap.h>
23 #include <linux/poll.h>
24 #include <linux/slab.h>
28 #include <asm/spu_priv1.h>
29 #include <linux/uaccess.h>
33 struct spufs_sb_info
{
37 static struct kmem_cache
*spufs_inode_cache
;
38 char *isolated_loader
;
39 static int isolated_loader_size
;
41 static struct spufs_sb_info
*spufs_get_sb_info(struct super_block
*sb
)
47 spufs_alloc_inode(struct super_block
*sb
)
49 struct spufs_inode_info
*ei
;
51 ei
= kmem_cache_alloc(spufs_inode_cache
, GFP_KERNEL
);
59 return &ei
->vfs_inode
;
62 static void spufs_free_inode(struct inode
*inode
)
64 kmem_cache_free(spufs_inode_cache
, SPUFS_I(inode
));
68 spufs_init_once(void *p
)
70 struct spufs_inode_info
*ei
= p
;
72 inode_init_once(&ei
->vfs_inode
);
76 spufs_new_inode(struct super_block
*sb
, umode_t mode
)
80 inode
= new_inode(sb
);
84 inode
->i_ino
= get_next_ino();
86 inode
->i_uid
= current_fsuid();
87 inode
->i_gid
= current_fsgid();
88 inode
->i_atime
= inode
->i_mtime
= inode
->i_ctime
= current_time(inode
);
94 spufs_setattr(struct dentry
*dentry
, struct iattr
*attr
)
96 struct inode
*inode
= d_inode(dentry
);
98 if ((attr
->ia_valid
& ATTR_SIZE
) &&
99 (attr
->ia_size
!= inode
->i_size
))
101 setattr_copy(inode
, attr
);
102 mark_inode_dirty(inode
);
108 spufs_new_file(struct super_block
*sb
, struct dentry
*dentry
,
109 const struct file_operations
*fops
, umode_t mode
,
110 size_t size
, struct spu_context
*ctx
)
112 static const struct inode_operations spufs_file_iops
= {
113 .setattr
= spufs_setattr
,
119 inode
= spufs_new_inode(sb
, S_IFREG
| mode
);
124 inode
->i_op
= &spufs_file_iops
;
126 inode
->i_size
= size
;
127 inode
->i_private
= SPUFS_I(inode
)->i_ctx
= get_spu_context(ctx
);
128 d_add(dentry
, inode
);
134 spufs_evict_inode(struct inode
*inode
)
136 struct spufs_inode_info
*ei
= SPUFS_I(inode
);
139 put_spu_context(ei
->i_ctx
);
141 put_spu_gang(ei
->i_gang
);
144 static void spufs_prune_dir(struct dentry
*dir
)
146 struct dentry
*dentry
, *tmp
;
148 inode_lock(d_inode(dir
));
149 list_for_each_entry_safe(dentry
, tmp
, &dir
->d_subdirs
, d_child
) {
150 spin_lock(&dentry
->d_lock
);
151 if (simple_positive(dentry
)) {
154 spin_unlock(&dentry
->d_lock
);
155 simple_unlink(d_inode(dir
), dentry
);
156 /* XXX: what was dcache_lock protecting here? Other
157 * filesystems (IB, configfs) release dcache_lock
161 spin_unlock(&dentry
->d_lock
);
164 shrink_dcache_parent(dir
);
165 inode_unlock(d_inode(dir
));
168 /* Caller must hold parent->i_mutex */
169 static int spufs_rmdir(struct inode
*parent
, struct dentry
*dir
)
171 /* remove all entries */
173 spufs_prune_dir(dir
);
175 res
= simple_rmdir(parent
, dir
);
176 /* We have to give up the mm_struct */
177 spu_forget(SPUFS_I(d_inode(dir
))->i_ctx
);
181 static int spufs_fill_dir(struct dentry
*dir
,
182 const struct spufs_tree_descr
*files
, umode_t mode
,
183 struct spu_context
*ctx
)
185 while (files
->name
&& files
->name
[0]) {
187 struct dentry
*dentry
= d_alloc_name(dir
, files
->name
);
190 ret
= spufs_new_file(dir
->d_sb
, dentry
, files
->ops
,
191 files
->mode
& mode
, files
->size
, ctx
);
199 static int spufs_dir_close(struct inode
*inode
, struct file
*file
)
201 struct inode
*parent
;
205 dir
= file
->f_path
.dentry
;
206 parent
= d_inode(dir
->d_parent
);
208 inode_lock_nested(parent
, I_MUTEX_PARENT
);
209 ret
= spufs_rmdir(parent
, dir
);
210 inode_unlock(parent
);
213 return dcache_dir_close(inode
, file
);
216 const struct file_operations spufs_context_fops
= {
217 .open
= dcache_dir_open
,
218 .release
= spufs_dir_close
,
219 .llseek
= dcache_dir_lseek
,
220 .read
= generic_read_dir
,
221 .iterate_shared
= dcache_readdir
,
224 EXPORT_SYMBOL_GPL(spufs_context_fops
);
227 spufs_mkdir(struct inode
*dir
, struct dentry
*dentry
, unsigned int flags
,
232 struct spu_context
*ctx
;
234 inode
= spufs_new_inode(dir
->i_sb
, mode
| S_IFDIR
);
238 if (dir
->i_mode
& S_ISGID
) {
239 inode
->i_gid
= dir
->i_gid
;
240 inode
->i_mode
&= S_ISGID
;
242 ctx
= alloc_spu_context(SPUFS_I(dir
)->i_gang
); /* XXX gang */
243 SPUFS_I(inode
)->i_ctx
= ctx
;
250 inode
->i_op
= &simple_dir_inode_operations
;
251 inode
->i_fop
= &simple_dir_operations
;
259 d_instantiate(dentry
, inode
);
261 if (flags
& SPU_CREATE_NOSCHED
)
262 ret
= spufs_fill_dir(dentry
, spufs_dir_nosched_contents
,
265 ret
= spufs_fill_dir(dentry
, spufs_dir_contents
, mode
, ctx
);
267 if (!ret
&& spufs_get_sb_info(dir
->i_sb
)->debug
)
268 ret
= spufs_fill_dir(dentry
, spufs_dir_debug_contents
,
272 spufs_rmdir(dir
, dentry
);
279 static int spufs_context_open(struct path
*path
)
284 ret
= get_unused_fd_flags(0);
288 filp
= dentry_open(path
, O_RDONLY
, current_cred());
291 return PTR_ERR(filp
);
294 filp
->f_op
= &spufs_context_fops
;
295 fd_install(ret
, filp
);
299 static struct spu_context
*
300 spufs_assert_affinity(unsigned int flags
, struct spu_gang
*gang
,
303 struct spu_context
*tmp
, *neighbor
, *err
;
307 aff_supp
= !list_empty(&(list_entry(cbe_spu_info
[0].spus
.next
,
308 struct spu
, cbe_list
))->aff_list
);
311 return ERR_PTR(-EINVAL
);
313 if (flags
& SPU_CREATE_GANG
)
314 return ERR_PTR(-EINVAL
);
316 if (flags
& SPU_CREATE_AFFINITY_MEM
&&
318 gang
->aff_ref_ctx
->flags
& SPU_CREATE_AFFINITY_MEM
)
319 return ERR_PTR(-EEXIST
);
321 if (gang
->aff_flags
& AFF_MERGED
)
322 return ERR_PTR(-EBUSY
);
325 if (flags
& SPU_CREATE_AFFINITY_SPU
) {
326 if (!filp
|| filp
->f_op
!= &spufs_context_fops
)
327 return ERR_PTR(-EINVAL
);
329 neighbor
= get_spu_context(
330 SPUFS_I(file_inode(filp
))->i_ctx
);
332 if (!list_empty(&neighbor
->aff_list
) && !(neighbor
->aff_head
) &&
333 !list_is_last(&neighbor
->aff_list
, &gang
->aff_list_head
) &&
334 !list_entry(neighbor
->aff_list
.next
, struct spu_context
,
335 aff_list
)->aff_head
) {
336 err
= ERR_PTR(-EEXIST
);
337 goto out_put_neighbor
;
340 if (gang
!= neighbor
->gang
) {
341 err
= ERR_PTR(-EINVAL
);
342 goto out_put_neighbor
;
346 list_for_each_entry(tmp
, &gang
->aff_list_head
, aff_list
)
348 if (list_empty(&neighbor
->aff_list
))
351 for (node
= 0; node
< MAX_NUMNODES
; node
++) {
352 if ((cbe_spu_info
[node
].n_spus
- atomic_read(
353 &cbe_spu_info
[node
].reserved_spus
)) >= count
)
357 if (node
== MAX_NUMNODES
) {
358 err
= ERR_PTR(-EEXIST
);
359 goto out_put_neighbor
;
366 put_spu_context(neighbor
);
371 spufs_set_affinity(unsigned int flags
, struct spu_context
*ctx
,
372 struct spu_context
*neighbor
)
374 if (flags
& SPU_CREATE_AFFINITY_MEM
)
375 ctx
->gang
->aff_ref_ctx
= ctx
;
377 if (flags
& SPU_CREATE_AFFINITY_SPU
) {
378 if (list_empty(&neighbor
->aff_list
)) {
379 list_add_tail(&neighbor
->aff_list
,
380 &ctx
->gang
->aff_list_head
);
381 neighbor
->aff_head
= 1;
384 if (list_is_last(&neighbor
->aff_list
, &ctx
->gang
->aff_list_head
)
385 || list_entry(neighbor
->aff_list
.next
, struct spu_context
,
386 aff_list
)->aff_head
) {
387 list_add(&ctx
->aff_list
, &neighbor
->aff_list
);
389 list_add_tail(&ctx
->aff_list
, &neighbor
->aff_list
);
390 if (neighbor
->aff_head
) {
391 neighbor
->aff_head
= 0;
396 if (!ctx
->gang
->aff_ref_ctx
)
397 ctx
->gang
->aff_ref_ctx
= ctx
;
402 spufs_create_context(struct inode
*inode
, struct dentry
*dentry
,
403 struct vfsmount
*mnt
, int flags
, umode_t mode
,
404 struct file
*aff_filp
)
408 struct spu_gang
*gang
;
409 struct spu_context
*neighbor
;
410 struct path path
= {.mnt
= mnt
, .dentry
= dentry
};
412 if ((flags
& SPU_CREATE_NOSCHED
) &&
413 !capable(CAP_SYS_NICE
))
416 if ((flags
& (SPU_CREATE_NOSCHED
| SPU_CREATE_ISOLATE
))
417 == SPU_CREATE_ISOLATE
)
420 if ((flags
& SPU_CREATE_ISOLATE
) && !isolated_loader
)
425 affinity
= flags
& (SPU_CREATE_AFFINITY_MEM
| SPU_CREATE_AFFINITY_SPU
);
427 gang
= SPUFS_I(inode
)->i_gang
;
430 mutex_lock(&gang
->aff_mutex
);
431 neighbor
= spufs_assert_affinity(flags
, gang
, aff_filp
);
432 if (IS_ERR(neighbor
)) {
433 ret
= PTR_ERR(neighbor
);
438 ret
= spufs_mkdir(inode
, dentry
, flags
, mode
& 0777);
443 spufs_set_affinity(flags
, SPUFS_I(d_inode(dentry
))->i_ctx
,
446 put_spu_context(neighbor
);
449 ret
= spufs_context_open(&path
);
451 WARN_ON(spufs_rmdir(inode
, dentry
));
455 mutex_unlock(&gang
->aff_mutex
);
460 spufs_mkgang(struct inode
*dir
, struct dentry
*dentry
, umode_t mode
)
464 struct spu_gang
*gang
;
467 inode
= spufs_new_inode(dir
->i_sb
, mode
| S_IFDIR
);
472 if (dir
->i_mode
& S_ISGID
) {
473 inode
->i_gid
= dir
->i_gid
;
474 inode
->i_mode
&= S_ISGID
;
476 gang
= alloc_spu_gang();
477 SPUFS_I(inode
)->i_ctx
= NULL
;
478 SPUFS_I(inode
)->i_gang
= gang
;
484 inode
->i_op
= &simple_dir_inode_operations
;
485 inode
->i_fop
= &simple_dir_operations
;
487 d_instantiate(dentry
, inode
);
489 inc_nlink(d_inode(dentry
));
498 static int spufs_gang_open(struct path
*path
)
503 ret
= get_unused_fd_flags(0);
508 * get references for dget and mntget, will be released
509 * in error path of *_open().
511 filp
= dentry_open(path
, O_RDONLY
, current_cred());
514 return PTR_ERR(filp
);
517 filp
->f_op
= &simple_dir_operations
;
518 fd_install(ret
, filp
);
522 static int spufs_create_gang(struct inode
*inode
,
523 struct dentry
*dentry
,
524 struct vfsmount
*mnt
, umode_t mode
)
526 struct path path
= {.mnt
= mnt
, .dentry
= dentry
};
529 ret
= spufs_mkgang(inode
, dentry
, mode
& 0777);
531 ret
= spufs_gang_open(&path
);
533 int err
= simple_rmdir(inode
, dentry
);
541 static struct file_system_type spufs_type
;
543 long spufs_create(struct path
*path
, struct dentry
*dentry
,
544 unsigned int flags
, umode_t mode
, struct file
*filp
)
546 struct inode
*dir
= d_inode(path
->dentry
);
549 /* check if we are on spufs */
550 if (path
->dentry
->d_sb
->s_type
!= &spufs_type
)
553 /* don't accept undefined flags */
554 if (flags
& (~SPU_CREATE_FLAG_ALL
))
557 /* only threads can be underneath a gang */
558 if (path
->dentry
!= path
->dentry
->d_sb
->s_root
)
559 if ((flags
& SPU_CREATE_GANG
) || !SPUFS_I(dir
)->i_gang
)
562 mode
&= ~current_umask();
564 if (flags
& SPU_CREATE_GANG
)
565 ret
= spufs_create_gang(dir
, dentry
, path
->mnt
, mode
);
567 ret
= spufs_create_context(dir
, dentry
, path
->mnt
, flags
, mode
,
570 fsnotify_mkdir(dir
, dentry
);
575 /* File system initialization */
576 struct spufs_fs_context
{
583 Opt_uid
, Opt_gid
, Opt_mode
, Opt_debug
,
586 static const struct fs_parameter_spec spufs_fs_parameters
[] = {
587 fsparam_u32 ("gid", Opt_gid
),
588 fsparam_u32oct ("mode", Opt_mode
),
589 fsparam_u32 ("uid", Opt_uid
),
590 fsparam_flag ("debug", Opt_debug
),
594 static int spufs_show_options(struct seq_file
*m
, struct dentry
*root
)
596 struct spufs_sb_info
*sbi
= spufs_get_sb_info(root
->d_sb
);
597 struct inode
*inode
= root
->d_inode
;
599 if (!uid_eq(inode
->i_uid
, GLOBAL_ROOT_UID
))
600 seq_printf(m
, ",uid=%u",
601 from_kuid_munged(&init_user_ns
, inode
->i_uid
));
602 if (!gid_eq(inode
->i_gid
, GLOBAL_ROOT_GID
))
603 seq_printf(m
, ",gid=%u",
604 from_kgid_munged(&init_user_ns
, inode
->i_gid
));
605 if ((inode
->i_mode
& S_IALLUGO
) != 0775)
606 seq_printf(m
, ",mode=%o", inode
->i_mode
);
608 seq_puts(m
, ",debug");
612 static int spufs_parse_param(struct fs_context
*fc
, struct fs_parameter
*param
)
614 struct spufs_fs_context
*ctx
= fc
->fs_private
;
615 struct spufs_sb_info
*sbi
= fc
->s_fs_info
;
616 struct fs_parse_result result
;
621 opt
= fs_parse(fc
, spufs_fs_parameters
, param
, &result
);
627 uid
= make_kuid(current_user_ns(), result
.uint_32
);
629 return invalf(fc
, "Unknown uid");
633 gid
= make_kgid(current_user_ns(), result
.uint_32
);
635 return invalf(fc
, "Unknown gid");
639 ctx
->mode
= result
.uint_32
& S_IALLUGO
;
649 static void spufs_exit_isolated_loader(void)
651 free_pages((unsigned long) isolated_loader
,
652 get_order(isolated_loader_size
));
656 spufs_init_isolated_loader(void)
658 struct device_node
*dn
;
662 dn
= of_find_node_by_path("/spu-isolation");
666 loader
= of_get_property(dn
, "loader", &size
);
670 /* the loader must be align on a 16 byte boundary */
671 isolated_loader
= (char *)__get_free_pages(GFP_KERNEL
, get_order(size
));
672 if (!isolated_loader
)
675 isolated_loader_size
= size
;
676 memcpy(isolated_loader
, loader
, size
);
677 printk(KERN_INFO
"spufs: SPU isolation mode enabled\n");
680 static int spufs_create_root(struct super_block
*sb
, struct fs_context
*fc
)
682 struct spufs_fs_context
*ctx
= fc
->fs_private
;
685 if (!spu_management_ops
)
688 inode
= spufs_new_inode(sb
, S_IFDIR
| ctx
->mode
);
692 inode
->i_uid
= ctx
->uid
;
693 inode
->i_gid
= ctx
->gid
;
694 inode
->i_op
= &simple_dir_inode_operations
;
695 inode
->i_fop
= &simple_dir_operations
;
696 SPUFS_I(inode
)->i_ctx
= NULL
;
699 sb
->s_root
= d_make_root(inode
);
705 static const struct super_operations spufs_ops
= {
706 .alloc_inode
= spufs_alloc_inode
,
707 .free_inode
= spufs_free_inode
,
708 .statfs
= simple_statfs
,
709 .evict_inode
= spufs_evict_inode
,
710 .show_options
= spufs_show_options
,
713 static int spufs_fill_super(struct super_block
*sb
, struct fs_context
*fc
)
715 sb
->s_maxbytes
= MAX_LFS_FILESIZE
;
716 sb
->s_blocksize
= PAGE_SIZE
;
717 sb
->s_blocksize_bits
= PAGE_SHIFT
;
718 sb
->s_magic
= SPUFS_MAGIC
;
719 sb
->s_op
= &spufs_ops
;
721 return spufs_create_root(sb
, fc
);
724 static int spufs_get_tree(struct fs_context
*fc
)
726 return get_tree_single(fc
, spufs_fill_super
);
729 static void spufs_free_fc(struct fs_context
*fc
)
731 kfree(fc
->s_fs_info
);
734 static const struct fs_context_operations spufs_context_ops
= {
735 .free
= spufs_free_fc
,
736 .parse_param
= spufs_parse_param
,
737 .get_tree
= spufs_get_tree
,
740 static int spufs_init_fs_context(struct fs_context
*fc
)
742 struct spufs_fs_context
*ctx
;
743 struct spufs_sb_info
*sbi
;
745 ctx
= kzalloc(sizeof(struct spufs_fs_context
), GFP_KERNEL
);
749 sbi
= kzalloc(sizeof(struct spufs_sb_info
), GFP_KERNEL
);
753 ctx
->uid
= current_uid();
754 ctx
->gid
= current_gid();
757 fc
->fs_private
= ctx
;
759 fc
->ops
= &spufs_context_ops
;
768 static struct file_system_type spufs_type
= {
769 .owner
= THIS_MODULE
,
771 .init_fs_context
= spufs_init_fs_context
,
772 .parameters
= spufs_fs_parameters
,
773 .kill_sb
= kill_litter_super
,
775 MODULE_ALIAS_FS("spufs");
777 static int __init
spufs_init(void)
782 if (!spu_management_ops
)
786 spufs_inode_cache
= kmem_cache_create("spufs_inode_cache",
787 sizeof(struct spufs_inode_info
), 0,
788 SLAB_HWCACHE_ALIGN
|SLAB_ACCOUNT
, spufs_init_once
);
790 if (!spufs_inode_cache
)
792 ret
= spu_sched_init();
795 ret
= register_spu_syscalls(&spufs_calls
);
798 ret
= register_filesystem(&spufs_type
);
802 spufs_init_isolated_loader();
807 unregister_spu_syscalls(&spufs_calls
);
811 kmem_cache_destroy(spufs_inode_cache
);
815 module_init(spufs_init
);
817 static void __exit
spufs_exit(void)
820 spufs_exit_isolated_loader();
821 unregister_spu_syscalls(&spufs_calls
);
822 unregister_filesystem(&spufs_type
);
823 kmem_cache_destroy(spufs_inode_cache
);
825 module_exit(spufs_exit
);
827 MODULE_LICENSE("GPL");
828 MODULE_AUTHOR("Arnd Bergmann <arndb@de.ibm.com>");