Merge branch 'akpm'
[linux-2.6/next.git] / arch / powerpc / platforms / cell / spufs / inode.c
blobe481f6b9a7897d68ffa0b665fec683944d65b42b
2 /*
3 * SPU file system
5 * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
7 * Author: Arnd Bergmann <arndb@de.ibm.com>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2, or (at your option)
12 * any later version.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24 #include <linux/file.h>
25 #include <linux/fs.h>
26 #include <linux/fsnotify.h>
27 #include <linux/backing-dev.h>
28 #include <linux/init.h>
29 #include <linux/ioctl.h>
30 #include <linux/module.h>
31 #include <linux/mount.h>
32 #include <linux/namei.h>
33 #include <linux/pagemap.h>
34 #include <linux/poll.h>
35 #include <linux/slab.h>
36 #include <linux/parser.h>
38 #include <asm/prom.h>
39 #include <asm/spu.h>
40 #include <asm/spu_priv1.h>
41 #include <asm/uaccess.h>
43 #include "spufs.h"
45 struct spufs_sb_info {
46 int debug;
49 static struct kmem_cache *spufs_inode_cache;
50 char *isolated_loader;
51 static int isolated_loader_size;
53 static struct spufs_sb_info *spufs_get_sb_info(struct super_block *sb)
55 return sb->s_fs_info;
58 static struct inode *
59 spufs_alloc_inode(struct super_block *sb)
61 struct spufs_inode_info *ei;
63 ei = kmem_cache_alloc(spufs_inode_cache, GFP_KERNEL);
64 if (!ei)
65 return NULL;
67 ei->i_gang = NULL;
68 ei->i_ctx = NULL;
69 ei->i_openers = 0;
71 return &ei->vfs_inode;
74 static void spufs_i_callback(struct rcu_head *head)
76 struct inode *inode = container_of(head, struct inode, i_rcu);
77 INIT_LIST_HEAD(&inode->i_dentry);
78 kmem_cache_free(spufs_inode_cache, SPUFS_I(inode));
81 static void spufs_destroy_inode(struct inode *inode)
83 call_rcu(&inode->i_rcu, spufs_i_callback);
86 static void
87 spufs_init_once(void *p)
89 struct spufs_inode_info *ei = p;
91 inode_init_once(&ei->vfs_inode);
94 static struct inode *
95 spufs_new_inode(struct super_block *sb, int mode)
97 struct inode *inode;
99 inode = new_inode(sb);
100 if (!inode)
101 goto out;
103 inode->i_mode = mode;
104 inode->i_uid = current_fsuid();
105 inode->i_gid = current_fsgid();
106 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
107 out:
108 return inode;
111 static int
112 spufs_setattr(struct dentry *dentry, struct iattr *attr)
114 struct inode *inode = dentry->d_inode;
116 if ((attr->ia_valid & ATTR_SIZE) &&
117 (attr->ia_size != inode->i_size))
118 return -EINVAL;
119 setattr_copy(inode, attr);
120 mark_inode_dirty(inode);
121 return 0;
125 static int
126 spufs_new_file(struct super_block *sb, struct dentry *dentry,
127 const struct file_operations *fops, int mode,
128 size_t size, struct spu_context *ctx)
130 static const struct inode_operations spufs_file_iops = {
131 .setattr = spufs_setattr,
133 struct inode *inode;
134 int ret;
136 ret = -ENOSPC;
137 inode = spufs_new_inode(sb, S_IFREG | mode);
138 if (!inode)
139 goto out;
141 ret = 0;
142 inode->i_op = &spufs_file_iops;
143 inode->i_fop = fops;
144 inode->i_size = size;
145 inode->i_private = SPUFS_I(inode)->i_ctx = get_spu_context(ctx);
146 d_add(dentry, inode);
147 out:
148 return ret;
151 static void
152 spufs_evict_inode(struct inode *inode)
154 struct spufs_inode_info *ei = SPUFS_I(inode);
155 end_writeback(inode);
156 if (ei->i_ctx)
157 put_spu_context(ei->i_ctx);
158 if (ei->i_gang)
159 put_spu_gang(ei->i_gang);
162 static void spufs_prune_dir(struct dentry *dir)
164 struct dentry *dentry, *tmp;
166 mutex_lock(&dir->d_inode->i_mutex);
167 list_for_each_entry_safe(dentry, tmp, &dir->d_subdirs, d_u.d_child) {
168 spin_lock(&dentry->d_lock);
169 if (!(d_unhashed(dentry)) && dentry->d_inode) {
170 dget_dlock(dentry);
171 __d_drop(dentry);
172 spin_unlock(&dentry->d_lock);
173 simple_unlink(dir->d_inode, dentry);
174 /* XXX: what was dcache_lock protecting here? Other
175 * filesystems (IB, configfs) release dcache_lock
176 * before unlink */
177 dput(dentry);
178 } else {
179 spin_unlock(&dentry->d_lock);
182 shrink_dcache_parent(dir);
183 mutex_unlock(&dir->d_inode->i_mutex);
186 /* Caller must hold parent->i_mutex */
187 static int spufs_rmdir(struct inode *parent, struct dentry *dir)
189 /* remove all entries */
190 spufs_prune_dir(dir);
191 d_drop(dir);
193 return simple_rmdir(parent, dir);
196 static int spufs_fill_dir(struct dentry *dir,
197 const struct spufs_tree_descr *files, int mode,
198 struct spu_context *ctx)
200 struct dentry *dentry, *tmp;
201 int ret;
203 while (files->name && files->name[0]) {
204 ret = -ENOMEM;
205 dentry = d_alloc_name(dir, files->name);
206 if (!dentry)
207 goto out;
208 ret = spufs_new_file(dir->d_sb, dentry, files->ops,
209 files->mode & mode, files->size, ctx);
210 if (ret)
211 goto out;
212 files++;
214 return 0;
215 out:
217 * remove all children from dir. dir->inode is not set so don't
218 * just simply use spufs_prune_dir() and panic afterwards :)
219 * dput() looks like it will do the right thing:
220 * - dec parent's ref counter
221 * - remove child from parent's child list
222 * - free child's inode if possible
223 * - free child
225 list_for_each_entry_safe(dentry, tmp, &dir->d_subdirs, d_u.d_child) {
226 dput(dentry);
229 shrink_dcache_parent(dir);
230 return ret;
233 static int spufs_dir_close(struct inode *inode, struct file *file)
235 struct spu_context *ctx;
236 struct inode *parent;
237 struct dentry *dir;
238 int ret;
240 dir = file->f_path.dentry;
241 parent = dir->d_parent->d_inode;
242 ctx = SPUFS_I(dir->d_inode)->i_ctx;
244 mutex_lock_nested(&parent->i_mutex, I_MUTEX_PARENT);
245 ret = spufs_rmdir(parent, dir);
246 mutex_unlock(&parent->i_mutex);
247 WARN_ON(ret);
249 /* We have to give up the mm_struct */
250 spu_forget(ctx);
252 return dcache_dir_close(inode, file);
255 const struct file_operations spufs_context_fops = {
256 .open = dcache_dir_open,
257 .release = spufs_dir_close,
258 .llseek = dcache_dir_lseek,
259 .read = generic_read_dir,
260 .readdir = dcache_readdir,
261 .fsync = noop_fsync,
263 EXPORT_SYMBOL_GPL(spufs_context_fops);
265 static int
266 spufs_mkdir(struct inode *dir, struct dentry *dentry, unsigned int flags,
267 int mode)
269 int ret;
270 struct inode *inode;
271 struct spu_context *ctx;
273 ret = -ENOSPC;
274 inode = spufs_new_inode(dir->i_sb, mode | S_IFDIR);
275 if (!inode)
276 goto out;
278 if (dir->i_mode & S_ISGID) {
279 inode->i_gid = dir->i_gid;
280 inode->i_mode &= S_ISGID;
282 ctx = alloc_spu_context(SPUFS_I(dir)->i_gang); /* XXX gang */
283 SPUFS_I(inode)->i_ctx = ctx;
284 if (!ctx)
285 goto out_iput;
287 ctx->flags = flags;
288 inode->i_op = &simple_dir_inode_operations;
289 inode->i_fop = &simple_dir_operations;
290 if (flags & SPU_CREATE_NOSCHED)
291 ret = spufs_fill_dir(dentry, spufs_dir_nosched_contents,
292 mode, ctx);
293 else
294 ret = spufs_fill_dir(dentry, spufs_dir_contents, mode, ctx);
296 if (ret)
297 goto out_free_ctx;
299 if (spufs_get_sb_info(dir->i_sb)->debug)
300 ret = spufs_fill_dir(dentry, spufs_dir_debug_contents,
301 mode, ctx);
303 if (ret)
304 goto out_free_ctx;
306 d_instantiate(dentry, inode);
307 dget(dentry);
308 inc_nlink(dir);
309 inc_nlink(dentry->d_inode);
310 goto out;
312 out_free_ctx:
313 spu_forget(ctx);
314 put_spu_context(ctx);
315 out_iput:
316 iput(inode);
317 out:
318 return ret;
321 static int spufs_context_open(struct dentry *dentry, struct vfsmount *mnt)
323 int ret;
324 struct file *filp;
326 ret = get_unused_fd();
327 if (ret < 0) {
328 dput(dentry);
329 mntput(mnt);
330 goto out;
333 filp = dentry_open(dentry, mnt, O_RDONLY, current_cred());
334 if (IS_ERR(filp)) {
335 put_unused_fd(ret);
336 ret = PTR_ERR(filp);
337 goto out;
340 filp->f_op = &spufs_context_fops;
341 fd_install(ret, filp);
342 out:
343 return ret;
346 static struct spu_context *
347 spufs_assert_affinity(unsigned int flags, struct spu_gang *gang,
348 struct file *filp)
350 struct spu_context *tmp, *neighbor, *err;
351 int count, node;
352 int aff_supp;
354 aff_supp = !list_empty(&(list_entry(cbe_spu_info[0].spus.next,
355 struct spu, cbe_list))->aff_list);
357 if (!aff_supp)
358 return ERR_PTR(-EINVAL);
360 if (flags & SPU_CREATE_GANG)
361 return ERR_PTR(-EINVAL);
363 if (flags & SPU_CREATE_AFFINITY_MEM &&
364 gang->aff_ref_ctx &&
365 gang->aff_ref_ctx->flags & SPU_CREATE_AFFINITY_MEM)
366 return ERR_PTR(-EEXIST);
368 if (gang->aff_flags & AFF_MERGED)
369 return ERR_PTR(-EBUSY);
371 neighbor = NULL;
372 if (flags & SPU_CREATE_AFFINITY_SPU) {
373 if (!filp || filp->f_op != &spufs_context_fops)
374 return ERR_PTR(-EINVAL);
376 neighbor = get_spu_context(
377 SPUFS_I(filp->f_dentry->d_inode)->i_ctx);
379 if (!list_empty(&neighbor->aff_list) && !(neighbor->aff_head) &&
380 !list_is_last(&neighbor->aff_list, &gang->aff_list_head) &&
381 !list_entry(neighbor->aff_list.next, struct spu_context,
382 aff_list)->aff_head) {
383 err = ERR_PTR(-EEXIST);
384 goto out_put_neighbor;
387 if (gang != neighbor->gang) {
388 err = ERR_PTR(-EINVAL);
389 goto out_put_neighbor;
392 count = 1;
393 list_for_each_entry(tmp, &gang->aff_list_head, aff_list)
394 count++;
395 if (list_empty(&neighbor->aff_list))
396 count++;
398 for (node = 0; node < MAX_NUMNODES; node++) {
399 if ((cbe_spu_info[node].n_spus - atomic_read(
400 &cbe_spu_info[node].reserved_spus)) >= count)
401 break;
404 if (node == MAX_NUMNODES) {
405 err = ERR_PTR(-EEXIST);
406 goto out_put_neighbor;
410 return neighbor;
412 out_put_neighbor:
413 put_spu_context(neighbor);
414 return err;
417 static void
418 spufs_set_affinity(unsigned int flags, struct spu_context *ctx,
419 struct spu_context *neighbor)
421 if (flags & SPU_CREATE_AFFINITY_MEM)
422 ctx->gang->aff_ref_ctx = ctx;
424 if (flags & SPU_CREATE_AFFINITY_SPU) {
425 if (list_empty(&neighbor->aff_list)) {
426 list_add_tail(&neighbor->aff_list,
427 &ctx->gang->aff_list_head);
428 neighbor->aff_head = 1;
431 if (list_is_last(&neighbor->aff_list, &ctx->gang->aff_list_head)
432 || list_entry(neighbor->aff_list.next, struct spu_context,
433 aff_list)->aff_head) {
434 list_add(&ctx->aff_list, &neighbor->aff_list);
435 } else {
436 list_add_tail(&ctx->aff_list, &neighbor->aff_list);
437 if (neighbor->aff_head) {
438 neighbor->aff_head = 0;
439 ctx->aff_head = 1;
443 if (!ctx->gang->aff_ref_ctx)
444 ctx->gang->aff_ref_ctx = ctx;
448 static int
449 spufs_create_context(struct inode *inode, struct dentry *dentry,
450 struct vfsmount *mnt, int flags, int mode,
451 struct file *aff_filp)
453 int ret;
454 int affinity;
455 struct spu_gang *gang;
456 struct spu_context *neighbor;
458 ret = -EPERM;
459 if ((flags & SPU_CREATE_NOSCHED) &&
460 !capable(CAP_SYS_NICE))
461 goto out_unlock;
463 ret = -EINVAL;
464 if ((flags & (SPU_CREATE_NOSCHED | SPU_CREATE_ISOLATE))
465 == SPU_CREATE_ISOLATE)
466 goto out_unlock;
468 ret = -ENODEV;
469 if ((flags & SPU_CREATE_ISOLATE) && !isolated_loader)
470 goto out_unlock;
472 gang = NULL;
473 neighbor = NULL;
474 affinity = flags & (SPU_CREATE_AFFINITY_MEM | SPU_CREATE_AFFINITY_SPU);
475 if (affinity) {
476 gang = SPUFS_I(inode)->i_gang;
477 ret = -EINVAL;
478 if (!gang)
479 goto out_unlock;
480 mutex_lock(&gang->aff_mutex);
481 neighbor = spufs_assert_affinity(flags, gang, aff_filp);
482 if (IS_ERR(neighbor)) {
483 ret = PTR_ERR(neighbor);
484 goto out_aff_unlock;
488 ret = spufs_mkdir(inode, dentry, flags, mode & S_IRWXUGO);
489 if (ret)
490 goto out_aff_unlock;
492 if (affinity) {
493 spufs_set_affinity(flags, SPUFS_I(dentry->d_inode)->i_ctx,
494 neighbor);
495 if (neighbor)
496 put_spu_context(neighbor);
500 * get references for dget and mntget, will be released
501 * in error path of *_open().
503 ret = spufs_context_open(dget(dentry), mntget(mnt));
504 if (ret < 0) {
505 WARN_ON(spufs_rmdir(inode, dentry));
506 if (affinity)
507 mutex_unlock(&gang->aff_mutex);
508 mutex_unlock(&inode->i_mutex);
509 spu_forget(SPUFS_I(dentry->d_inode)->i_ctx);
510 goto out;
513 out_aff_unlock:
514 if (affinity)
515 mutex_unlock(&gang->aff_mutex);
516 out_unlock:
517 mutex_unlock(&inode->i_mutex);
518 out:
519 dput(dentry);
520 return ret;
523 static int
524 spufs_mkgang(struct inode *dir, struct dentry *dentry, int mode)
526 int ret;
527 struct inode *inode;
528 struct spu_gang *gang;
530 ret = -ENOSPC;
531 inode = spufs_new_inode(dir->i_sb, mode | S_IFDIR);
532 if (!inode)
533 goto out;
535 ret = 0;
536 if (dir->i_mode & S_ISGID) {
537 inode->i_gid = dir->i_gid;
538 inode->i_mode &= S_ISGID;
540 gang = alloc_spu_gang();
541 SPUFS_I(inode)->i_ctx = NULL;
542 SPUFS_I(inode)->i_gang = gang;
543 if (!gang)
544 goto out_iput;
546 inode->i_op = &simple_dir_inode_operations;
547 inode->i_fop = &simple_dir_operations;
549 d_instantiate(dentry, inode);
550 inc_nlink(dir);
551 inc_nlink(dentry->d_inode);
552 return ret;
554 out_iput:
555 iput(inode);
556 out:
557 return ret;
560 static int spufs_gang_open(struct dentry *dentry, struct vfsmount *mnt)
562 int ret;
563 struct file *filp;
565 ret = get_unused_fd();
566 if (ret < 0) {
567 dput(dentry);
568 mntput(mnt);
569 goto out;
572 filp = dentry_open(dentry, mnt, O_RDONLY, current_cred());
573 if (IS_ERR(filp)) {
574 put_unused_fd(ret);
575 ret = PTR_ERR(filp);
576 goto out;
579 filp->f_op = &simple_dir_operations;
580 fd_install(ret, filp);
581 out:
582 return ret;
585 static int spufs_create_gang(struct inode *inode,
586 struct dentry *dentry,
587 struct vfsmount *mnt, int mode)
589 int ret;
591 ret = spufs_mkgang(inode, dentry, mode & S_IRWXUGO);
592 if (ret)
593 goto out;
596 * get references for dget and mntget, will be released
597 * in error path of *_open().
599 ret = spufs_gang_open(dget(dentry), mntget(mnt));
600 if (ret < 0) {
601 int err = simple_rmdir(inode, dentry);
602 WARN_ON(err);
605 out:
606 mutex_unlock(&inode->i_mutex);
607 dput(dentry);
608 return ret;
612 static struct file_system_type spufs_type;
614 long spufs_create(struct path *path, struct dentry *dentry,
615 unsigned int flags, mode_t mode, struct file *filp)
617 int ret;
619 ret = -EINVAL;
620 /* check if we are on spufs */
621 if (path->dentry->d_sb->s_type != &spufs_type)
622 goto out;
624 /* don't accept undefined flags */
625 if (flags & (~SPU_CREATE_FLAG_ALL))
626 goto out;
628 /* only threads can be underneath a gang */
629 if (path->dentry != path->dentry->d_sb->s_root) {
630 if ((flags & SPU_CREATE_GANG) ||
631 !SPUFS_I(path->dentry->d_inode)->i_gang)
632 goto out;
635 mode &= ~current_umask();
637 if (flags & SPU_CREATE_GANG)
638 ret = spufs_create_gang(path->dentry->d_inode,
639 dentry, path->mnt, mode);
640 else
641 ret = spufs_create_context(path->dentry->d_inode,
642 dentry, path->mnt, flags, mode,
643 filp);
644 if (ret >= 0)
645 fsnotify_mkdir(path->dentry->d_inode, dentry);
646 return ret;
648 out:
649 mutex_unlock(&path->dentry->d_inode->i_mutex);
650 return ret;
653 /* File system initialization */
654 enum {
655 Opt_uid, Opt_gid, Opt_mode, Opt_debug, Opt_err,
658 static const match_table_t spufs_tokens = {
659 { Opt_uid, "uid=%d" },
660 { Opt_gid, "gid=%d" },
661 { Opt_mode, "mode=%o" },
662 { Opt_debug, "debug" },
663 { Opt_err, NULL },
666 static int
667 spufs_parse_options(struct super_block *sb, char *options, struct inode *root)
669 char *p;
670 substring_t args[MAX_OPT_ARGS];
672 while ((p = strsep(&options, ",")) != NULL) {
673 int token, option;
675 if (!*p)
676 continue;
678 token = match_token(p, spufs_tokens, args);
679 switch (token) {
680 case Opt_uid:
681 if (match_int(&args[0], &option))
682 return 0;
683 root->i_uid = option;
684 break;
685 case Opt_gid:
686 if (match_int(&args[0], &option))
687 return 0;
688 root->i_gid = option;
689 break;
690 case Opt_mode:
691 if (match_octal(&args[0], &option))
692 return 0;
693 root->i_mode = option | S_IFDIR;
694 break;
695 case Opt_debug:
696 spufs_get_sb_info(sb)->debug = 1;
697 break;
698 default:
699 return 0;
702 return 1;
705 static void spufs_exit_isolated_loader(void)
707 free_pages((unsigned long) isolated_loader,
708 get_order(isolated_loader_size));
711 static void
712 spufs_init_isolated_loader(void)
714 struct device_node *dn;
715 const char *loader;
716 int size;
718 dn = of_find_node_by_path("/spu-isolation");
719 if (!dn)
720 return;
722 loader = of_get_property(dn, "loader", &size);
723 if (!loader)
724 return;
726 /* the loader must be align on a 16 byte boundary */
727 isolated_loader = (char *)__get_free_pages(GFP_KERNEL, get_order(size));
728 if (!isolated_loader)
729 return;
731 isolated_loader_size = size;
732 memcpy(isolated_loader, loader, size);
733 printk(KERN_INFO "spufs: SPU isolation mode enabled\n");
736 static int
737 spufs_create_root(struct super_block *sb, void *data)
739 struct inode *inode;
740 int ret;
742 ret = -ENODEV;
743 if (!spu_management_ops)
744 goto out;
746 ret = -ENOMEM;
747 inode = spufs_new_inode(sb, S_IFDIR | 0775);
748 if (!inode)
749 goto out;
751 inode->i_op = &simple_dir_inode_operations;
752 inode->i_fop = &simple_dir_operations;
753 SPUFS_I(inode)->i_ctx = NULL;
754 inc_nlink(inode);
756 ret = -EINVAL;
757 if (!spufs_parse_options(sb, data, inode))
758 goto out_iput;
760 ret = -ENOMEM;
761 sb->s_root = d_alloc_root(inode);
762 if (!sb->s_root)
763 goto out_iput;
765 return 0;
766 out_iput:
767 iput(inode);
768 out:
769 return ret;
772 static int
773 spufs_fill_super(struct super_block *sb, void *data, int silent)
775 struct spufs_sb_info *info;
776 static const struct super_operations s_ops = {
777 .alloc_inode = spufs_alloc_inode,
778 .destroy_inode = spufs_destroy_inode,
779 .statfs = simple_statfs,
780 .evict_inode = spufs_evict_inode,
781 .show_options = generic_show_options,
784 save_mount_options(sb, data);
786 info = kzalloc(sizeof(*info), GFP_KERNEL);
787 if (!info)
788 return -ENOMEM;
790 sb->s_maxbytes = MAX_LFS_FILESIZE;
791 sb->s_blocksize = PAGE_CACHE_SIZE;
792 sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
793 sb->s_magic = SPUFS_MAGIC;
794 sb->s_op = &s_ops;
795 sb->s_fs_info = info;
797 return spufs_create_root(sb, data);
800 static struct dentry *
801 spufs_mount(struct file_system_type *fstype, int flags,
802 const char *name, void *data)
804 return mount_single(fstype, flags, data, spufs_fill_super);
807 static struct file_system_type spufs_type = {
808 .owner = THIS_MODULE,
809 .name = "spufs",
810 .mount = spufs_mount,
811 .kill_sb = kill_litter_super,
814 static int __init spufs_init(void)
816 int ret;
818 ret = -ENODEV;
819 if (!spu_management_ops)
820 goto out;
822 ret = -ENOMEM;
823 spufs_inode_cache = kmem_cache_create("spufs_inode_cache",
824 sizeof(struct spufs_inode_info), 0,
825 SLAB_HWCACHE_ALIGN, spufs_init_once);
827 if (!spufs_inode_cache)
828 goto out;
829 ret = spu_sched_init();
830 if (ret)
831 goto out_cache;
832 ret = register_filesystem(&spufs_type);
833 if (ret)
834 goto out_sched;
835 ret = register_spu_syscalls(&spufs_calls);
836 if (ret)
837 goto out_fs;
839 spufs_init_isolated_loader();
841 return 0;
843 out_fs:
844 unregister_filesystem(&spufs_type);
845 out_sched:
846 spu_sched_exit();
847 out_cache:
848 kmem_cache_destroy(spufs_inode_cache);
849 out:
850 return ret;
852 module_init(spufs_init);
854 static void __exit spufs_exit(void)
856 spu_sched_exit();
857 spufs_exit_isolated_loader();
858 unregister_spu_syscalls(&spufs_calls);
859 unregister_filesystem(&spufs_type);
860 kmem_cache_destroy(spufs_inode_cache);
862 module_exit(spufs_exit);
864 MODULE_LICENSE("GPL");
865 MODULE_AUTHOR("Arnd Bergmann <arndb@de.ibm.com>");