OMAP3: GPIO: disable GPIO debounce clocks on idle
[linux-ginger.git] / fs / jfs / super.c
blob2234c73fc5773531bd59ee81b3bbdf45d05a83ff
1 /*
2 * Copyright (C) International Business Machines Corp., 2000-2004
3 * Portions Copyright (C) Christoph Hellwig, 2001-2002
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
13 * the GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #include <linux/fs.h>
21 #include <linux/module.h>
22 #include <linux/parser.h>
23 #include <linux/completion.h>
24 #include <linux/vfs.h>
25 #include <linux/quotaops.h>
26 #include <linux/mount.h>
27 #include <linux/moduleparam.h>
28 #include <linux/kthread.h>
29 #include <linux/posix_acl.h>
30 #include <linux/buffer_head.h>
31 #include <linux/exportfs.h>
32 #include <linux/crc32.h>
33 #include <asm/uaccess.h>
34 #include <linux/seq_file.h>
35 #include <linux/smp_lock.h>
37 #include "jfs_incore.h"
38 #include "jfs_filsys.h"
39 #include "jfs_inode.h"
40 #include "jfs_metapage.h"
41 #include "jfs_superblock.h"
42 #include "jfs_dmap.h"
43 #include "jfs_imap.h"
44 #include "jfs_acl.h"
45 #include "jfs_debug.h"
47 MODULE_DESCRIPTION("The Journaled Filesystem (JFS)");
48 MODULE_AUTHOR("Steve Best/Dave Kleikamp/Barry Arndt, IBM");
49 MODULE_LICENSE("GPL");
51 static struct kmem_cache * jfs_inode_cachep;
53 static const struct super_operations jfs_super_operations;
54 static const struct export_operations jfs_export_operations;
55 static struct file_system_type jfs_fs_type;
57 #define MAX_COMMIT_THREADS 64
58 static int commit_threads = 0;
59 module_param(commit_threads, int, 0);
60 MODULE_PARM_DESC(commit_threads, "Number of commit threads");
62 static struct task_struct *jfsCommitThread[MAX_COMMIT_THREADS];
63 struct task_struct *jfsIOthread;
64 struct task_struct *jfsSyncThread;
66 #ifdef CONFIG_JFS_DEBUG
67 int jfsloglevel = JFS_LOGLEVEL_WARN;
68 module_param(jfsloglevel, int, 0644);
69 MODULE_PARM_DESC(jfsloglevel, "Specify JFS loglevel (0, 1 or 2)");
70 #endif
72 static void jfs_handle_error(struct super_block *sb)
74 struct jfs_sb_info *sbi = JFS_SBI(sb);
76 if (sb->s_flags & MS_RDONLY)
77 return;
79 updateSuper(sb, FM_DIRTY);
81 if (sbi->flag & JFS_ERR_PANIC)
82 panic("JFS (device %s): panic forced after error\n",
83 sb->s_id);
84 else if (sbi->flag & JFS_ERR_REMOUNT_RO) {
85 jfs_err("ERROR: (device %s): remounting filesystem "
86 "as read-only\n",
87 sb->s_id);
88 sb->s_flags |= MS_RDONLY;
91 /* nothing is done for continue beyond marking the superblock dirty */
94 void jfs_error(struct super_block *sb, const char * function, ...)
96 static char error_buf[256];
97 va_list args;
99 va_start(args, function);
100 vsnprintf(error_buf, sizeof(error_buf), function, args);
101 va_end(args);
103 printk(KERN_ERR "ERROR: (device %s): %s\n", sb->s_id, error_buf);
105 jfs_handle_error(sb);
108 static struct inode *jfs_alloc_inode(struct super_block *sb)
110 struct jfs_inode_info *jfs_inode;
112 jfs_inode = kmem_cache_alloc(jfs_inode_cachep, GFP_NOFS);
113 if (!jfs_inode)
114 return NULL;
115 return &jfs_inode->vfs_inode;
118 static void jfs_destroy_inode(struct inode *inode)
120 struct jfs_inode_info *ji = JFS_IP(inode);
122 BUG_ON(!list_empty(&ji->anon_inode_list));
124 spin_lock_irq(&ji->ag_lock);
125 if (ji->active_ag != -1) {
126 struct bmap *bmap = JFS_SBI(inode->i_sb)->bmap;
127 atomic_dec(&bmap->db_active[ji->active_ag]);
128 ji->active_ag = -1;
130 spin_unlock_irq(&ji->ag_lock);
131 kmem_cache_free(jfs_inode_cachep, ji);
134 static int jfs_statfs(struct dentry *dentry, struct kstatfs *buf)
136 struct jfs_sb_info *sbi = JFS_SBI(dentry->d_sb);
137 s64 maxinodes;
138 struct inomap *imap = JFS_IP(sbi->ipimap)->i_imap;
140 jfs_info("In jfs_statfs");
141 buf->f_type = JFS_SUPER_MAGIC;
142 buf->f_bsize = sbi->bsize;
143 buf->f_blocks = sbi->bmap->db_mapsize;
144 buf->f_bfree = sbi->bmap->db_nfree;
145 buf->f_bavail = sbi->bmap->db_nfree;
147 * If we really return the number of allocated & free inodes, some
148 * applications will fail because they won't see enough free inodes.
149 * We'll try to calculate some guess as to how may inodes we can
150 * really allocate
152 * buf->f_files = atomic_read(&imap->im_numinos);
153 * buf->f_ffree = atomic_read(&imap->im_numfree);
155 maxinodes = min((s64) atomic_read(&imap->im_numinos) +
156 ((sbi->bmap->db_nfree >> imap->im_l2nbperiext)
157 << L2INOSPEREXT), (s64) 0xffffffffLL);
158 buf->f_files = maxinodes;
159 buf->f_ffree = maxinodes - (atomic_read(&imap->im_numinos) -
160 atomic_read(&imap->im_numfree));
161 buf->f_fsid.val[0] = (u32)crc32_le(0, sbi->uuid, sizeof(sbi->uuid)/2);
162 buf->f_fsid.val[1] = (u32)crc32_le(0, sbi->uuid + sizeof(sbi->uuid)/2,
163 sizeof(sbi->uuid)/2);
165 buf->f_namelen = JFS_NAME_MAX;
166 return 0;
169 static void jfs_put_super(struct super_block *sb)
171 struct jfs_sb_info *sbi = JFS_SBI(sb);
172 int rc;
174 jfs_info("In jfs_put_super");
176 lock_kernel();
178 rc = jfs_umount(sb);
179 if (rc)
180 jfs_err("jfs_umount failed with return code %d", rc);
182 unload_nls(sbi->nls_tab);
184 truncate_inode_pages(sbi->direct_inode->i_mapping, 0);
185 iput(sbi->direct_inode);
187 kfree(sbi);
189 unlock_kernel();
192 enum {
193 Opt_integrity, Opt_nointegrity, Opt_iocharset, Opt_resize,
194 Opt_resize_nosize, Opt_errors, Opt_ignore, Opt_err, Opt_quota,
195 Opt_usrquota, Opt_grpquota, Opt_uid, Opt_gid, Opt_umask
198 static const match_table_t tokens = {
199 {Opt_integrity, "integrity"},
200 {Opt_nointegrity, "nointegrity"},
201 {Opt_iocharset, "iocharset=%s"},
202 {Opt_resize, "resize=%u"},
203 {Opt_resize_nosize, "resize"},
204 {Opt_errors, "errors=%s"},
205 {Opt_ignore, "noquota"},
206 {Opt_ignore, "quota"},
207 {Opt_usrquota, "usrquota"},
208 {Opt_grpquota, "grpquota"},
209 {Opt_uid, "uid=%u"},
210 {Opt_gid, "gid=%u"},
211 {Opt_umask, "umask=%u"},
212 {Opt_err, NULL}
215 static int parse_options(char *options, struct super_block *sb, s64 *newLVSize,
216 int *flag)
218 void *nls_map = (void *)-1; /* -1: no change; NULL: none */
219 char *p;
220 struct jfs_sb_info *sbi = JFS_SBI(sb);
222 *newLVSize = 0;
224 if (!options)
225 return 1;
227 while ((p = strsep(&options, ",")) != NULL) {
228 substring_t args[MAX_OPT_ARGS];
229 int token;
230 if (!*p)
231 continue;
233 token = match_token(p, tokens, args);
234 switch (token) {
235 case Opt_integrity:
236 *flag &= ~JFS_NOINTEGRITY;
237 break;
238 case Opt_nointegrity:
239 *flag |= JFS_NOINTEGRITY;
240 break;
241 case Opt_ignore:
242 /* Silently ignore the quota options */
243 /* Don't do anything ;-) */
244 break;
245 case Opt_iocharset:
246 if (nls_map && nls_map != (void *) -1)
247 unload_nls(nls_map);
248 if (!strcmp(args[0].from, "none"))
249 nls_map = NULL;
250 else {
251 nls_map = load_nls(args[0].from);
252 if (!nls_map) {
253 printk(KERN_ERR
254 "JFS: charset not found\n");
255 goto cleanup;
258 break;
259 case Opt_resize:
261 char *resize = args[0].from;
262 *newLVSize = simple_strtoull(resize, &resize, 0);
263 break;
265 case Opt_resize_nosize:
267 *newLVSize = sb->s_bdev->bd_inode->i_size >>
268 sb->s_blocksize_bits;
269 if (*newLVSize == 0)
270 printk(KERN_ERR
271 "JFS: Cannot determine volume size\n");
272 break;
274 case Opt_errors:
276 char *errors = args[0].from;
277 if (!errors || !*errors)
278 goto cleanup;
279 if (!strcmp(errors, "continue")) {
280 *flag &= ~JFS_ERR_REMOUNT_RO;
281 *flag &= ~JFS_ERR_PANIC;
282 *flag |= JFS_ERR_CONTINUE;
283 } else if (!strcmp(errors, "remount-ro")) {
284 *flag &= ~JFS_ERR_CONTINUE;
285 *flag &= ~JFS_ERR_PANIC;
286 *flag |= JFS_ERR_REMOUNT_RO;
287 } else if (!strcmp(errors, "panic")) {
288 *flag &= ~JFS_ERR_CONTINUE;
289 *flag &= ~JFS_ERR_REMOUNT_RO;
290 *flag |= JFS_ERR_PANIC;
291 } else {
292 printk(KERN_ERR
293 "JFS: %s is an invalid error handler\n",
294 errors);
295 goto cleanup;
297 break;
300 #ifdef CONFIG_QUOTA
301 case Opt_quota:
302 case Opt_usrquota:
303 *flag |= JFS_USRQUOTA;
304 break;
305 case Opt_grpquota:
306 *flag |= JFS_GRPQUOTA;
307 break;
308 #else
309 case Opt_usrquota:
310 case Opt_grpquota:
311 case Opt_quota:
312 printk(KERN_ERR
313 "JFS: quota operations not supported\n");
314 break;
315 #endif
316 case Opt_uid:
318 char *uid = args[0].from;
319 sbi->uid = simple_strtoul(uid, &uid, 0);
320 break;
322 case Opt_gid:
324 char *gid = args[0].from;
325 sbi->gid = simple_strtoul(gid, &gid, 0);
326 break;
328 case Opt_umask:
330 char *umask = args[0].from;
331 sbi->umask = simple_strtoul(umask, &umask, 8);
332 if (sbi->umask & ~0777) {
333 printk(KERN_ERR
334 "JFS: Invalid value of umask\n");
335 goto cleanup;
337 break;
339 default:
340 printk("jfs: Unrecognized mount option \"%s\" "
341 " or missing value\n", p);
342 goto cleanup;
346 if (nls_map != (void *) -1) {
347 /* Discard old (if remount) */
348 unload_nls(sbi->nls_tab);
349 sbi->nls_tab = nls_map;
351 return 1;
353 cleanup:
354 if (nls_map && nls_map != (void *) -1)
355 unload_nls(nls_map);
356 return 0;
359 static int jfs_remount(struct super_block *sb, int *flags, char *data)
361 s64 newLVSize = 0;
362 int rc = 0;
363 int flag = JFS_SBI(sb)->flag;
364 int ret;
366 if (!parse_options(data, sb, &newLVSize, &flag)) {
367 return -EINVAL;
369 lock_kernel();
370 if (newLVSize) {
371 if (sb->s_flags & MS_RDONLY) {
372 printk(KERN_ERR
373 "JFS: resize requires volume to be mounted read-write\n");
374 unlock_kernel();
375 return -EROFS;
377 rc = jfs_extendfs(sb, newLVSize, 0);
378 if (rc) {
379 unlock_kernel();
380 return rc;
384 if ((sb->s_flags & MS_RDONLY) && !(*flags & MS_RDONLY)) {
386 * Invalidate any previously read metadata. fsck may have
387 * changed the on-disk data since we mounted r/o
389 truncate_inode_pages(JFS_SBI(sb)->direct_inode->i_mapping, 0);
391 JFS_SBI(sb)->flag = flag;
392 ret = jfs_mount_rw(sb, 1);
393 unlock_kernel();
394 return ret;
396 if ((!(sb->s_flags & MS_RDONLY)) && (*flags & MS_RDONLY)) {
397 rc = jfs_umount_rw(sb);
398 JFS_SBI(sb)->flag = flag;
399 unlock_kernel();
400 return rc;
402 if ((JFS_SBI(sb)->flag & JFS_NOINTEGRITY) != (flag & JFS_NOINTEGRITY))
403 if (!(sb->s_flags & MS_RDONLY)) {
404 rc = jfs_umount_rw(sb);
405 if (rc) {
406 unlock_kernel();
407 return rc;
409 JFS_SBI(sb)->flag = flag;
410 ret = jfs_mount_rw(sb, 1);
411 unlock_kernel();
412 return ret;
414 JFS_SBI(sb)->flag = flag;
416 unlock_kernel();
417 return 0;
420 static int jfs_fill_super(struct super_block *sb, void *data, int silent)
422 struct jfs_sb_info *sbi;
423 struct inode *inode;
424 int rc;
425 s64 newLVSize = 0;
426 int flag, ret = -EINVAL;
428 jfs_info("In jfs_read_super: s_flags=0x%lx", sb->s_flags);
430 if (!new_valid_dev(sb->s_bdev->bd_dev))
431 return -EOVERFLOW;
433 sbi = kzalloc(sizeof (struct jfs_sb_info), GFP_KERNEL);
434 if (!sbi)
435 return -ENOMEM;
436 sb->s_fs_info = sbi;
437 sbi->sb = sb;
438 sbi->uid = sbi->gid = sbi->umask = -1;
440 /* initialize the mount flag and determine the default error handler */
441 flag = JFS_ERR_REMOUNT_RO;
443 if (!parse_options((char *) data, sb, &newLVSize, &flag)) {
444 kfree(sbi);
445 return -EINVAL;
447 sbi->flag = flag;
449 #ifdef CONFIG_JFS_POSIX_ACL
450 sb->s_flags |= MS_POSIXACL;
451 #endif
453 if (newLVSize) {
454 printk(KERN_ERR "resize option for remount only\n");
455 return -EINVAL;
459 * Initialize blocksize to 4K.
461 sb_set_blocksize(sb, PSIZE);
464 * Set method vectors.
466 sb->s_op = &jfs_super_operations;
467 sb->s_export_op = &jfs_export_operations;
470 * Initialize direct-mapping inode/address-space
472 inode = new_inode(sb);
473 if (inode == NULL) {
474 ret = -ENOMEM;
475 goto out_kfree;
477 inode->i_ino = 0;
478 inode->i_nlink = 1;
479 inode->i_size = sb->s_bdev->bd_inode->i_size;
480 inode->i_mapping->a_ops = &jfs_metapage_aops;
481 insert_inode_hash(inode);
482 mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS);
484 sbi->direct_inode = inode;
486 rc = jfs_mount(sb);
487 if (rc) {
488 if (!silent) {
489 jfs_err("jfs_mount failed w/return code = %d", rc);
491 goto out_mount_failed;
493 if (sb->s_flags & MS_RDONLY)
494 sbi->log = NULL;
495 else {
496 rc = jfs_mount_rw(sb, 0);
497 if (rc) {
498 if (!silent) {
499 jfs_err("jfs_mount_rw failed, return code = %d",
500 rc);
502 goto out_no_rw;
506 sb->s_magic = JFS_SUPER_MAGIC;
508 inode = jfs_iget(sb, ROOT_I);
509 if (IS_ERR(inode)) {
510 ret = PTR_ERR(inode);
511 goto out_no_rw;
513 sb->s_root = d_alloc_root(inode);
514 if (!sb->s_root)
515 goto out_no_root;
517 if (sbi->mntflag & JFS_OS2)
518 sb->s_root->d_op = &jfs_ci_dentry_operations;
520 /* logical blocks are represented by 40 bits in pxd_t, etc. */
521 sb->s_maxbytes = ((u64) sb->s_blocksize) << 40;
522 #if BITS_PER_LONG == 32
524 * Page cache is indexed by long.
525 * I would use MAX_LFS_FILESIZE, but it's only half as big
527 sb->s_maxbytes = min(((u64) PAGE_CACHE_SIZE << 32) - 1, sb->s_maxbytes);
528 #endif
529 sb->s_time_gran = 1;
530 return 0;
532 out_no_root:
533 jfs_err("jfs_read_super: get root dentry failed");
534 iput(inode);
536 out_no_rw:
537 rc = jfs_umount(sb);
538 if (rc) {
539 jfs_err("jfs_umount failed with return code %d", rc);
541 out_mount_failed:
542 filemap_write_and_wait(sbi->direct_inode->i_mapping);
543 truncate_inode_pages(sbi->direct_inode->i_mapping, 0);
544 make_bad_inode(sbi->direct_inode);
545 iput(sbi->direct_inode);
546 sbi->direct_inode = NULL;
547 out_kfree:
548 if (sbi->nls_tab)
549 unload_nls(sbi->nls_tab);
550 kfree(sbi);
551 return ret;
554 static int jfs_freeze(struct super_block *sb)
556 struct jfs_sb_info *sbi = JFS_SBI(sb);
557 struct jfs_log *log = sbi->log;
559 if (!(sb->s_flags & MS_RDONLY)) {
560 txQuiesce(sb);
561 lmLogShutdown(log);
562 updateSuper(sb, FM_CLEAN);
564 return 0;
567 static int jfs_unfreeze(struct super_block *sb)
569 struct jfs_sb_info *sbi = JFS_SBI(sb);
570 struct jfs_log *log = sbi->log;
571 int rc = 0;
573 if (!(sb->s_flags & MS_RDONLY)) {
574 updateSuper(sb, FM_MOUNT);
575 if ((rc = lmLogInit(log)))
576 jfs_err("jfs_unlock failed with return code %d", rc);
577 else
578 txResume(sb);
580 return 0;
583 static int jfs_get_sb(struct file_system_type *fs_type,
584 int flags, const char *dev_name, void *data, struct vfsmount *mnt)
586 return get_sb_bdev(fs_type, flags, dev_name, data, jfs_fill_super,
587 mnt);
590 static int jfs_sync_fs(struct super_block *sb, int wait)
592 struct jfs_log *log = JFS_SBI(sb)->log;
594 /* log == NULL indicates read-only mount */
595 if (log) {
596 jfs_flush_journal(log, wait);
597 jfs_syncpt(log, 0);
600 return 0;
603 static int jfs_show_options(struct seq_file *seq, struct vfsmount *vfs)
605 struct jfs_sb_info *sbi = JFS_SBI(vfs->mnt_sb);
607 if (sbi->uid != -1)
608 seq_printf(seq, ",uid=%d", sbi->uid);
609 if (sbi->gid != -1)
610 seq_printf(seq, ",gid=%d", sbi->gid);
611 if (sbi->umask != -1)
612 seq_printf(seq, ",umask=%03o", sbi->umask);
613 if (sbi->flag & JFS_NOINTEGRITY)
614 seq_puts(seq, ",nointegrity");
615 if (sbi->nls_tab)
616 seq_printf(seq, ",iocharset=%s", sbi->nls_tab->charset);
617 if (sbi->flag & JFS_ERR_CONTINUE)
618 seq_printf(seq, ",errors=continue");
619 if (sbi->flag & JFS_ERR_PANIC)
620 seq_printf(seq, ",errors=panic");
622 #ifdef CONFIG_QUOTA
623 if (sbi->flag & JFS_USRQUOTA)
624 seq_puts(seq, ",usrquota");
626 if (sbi->flag & JFS_GRPQUOTA)
627 seq_puts(seq, ",grpquota");
628 #endif
630 return 0;
633 #ifdef CONFIG_QUOTA
635 /* Read data from quotafile - avoid pagecache and such because we cannot afford
636 * acquiring the locks... As quota files are never truncated and quota code
637 * itself serializes the operations (and noone else should touch the files)
638 * we don't have to be afraid of races */
639 static ssize_t jfs_quota_read(struct super_block *sb, int type, char *data,
640 size_t len, loff_t off)
642 struct inode *inode = sb_dqopt(sb)->files[type];
643 sector_t blk = off >> sb->s_blocksize_bits;
644 int err = 0;
645 int offset = off & (sb->s_blocksize - 1);
646 int tocopy;
647 size_t toread;
648 struct buffer_head tmp_bh;
649 struct buffer_head *bh;
650 loff_t i_size = i_size_read(inode);
652 if (off > i_size)
653 return 0;
654 if (off+len > i_size)
655 len = i_size-off;
656 toread = len;
657 while (toread > 0) {
658 tocopy = sb->s_blocksize - offset < toread ?
659 sb->s_blocksize - offset : toread;
661 tmp_bh.b_state = 0;
662 tmp_bh.b_size = 1 << inode->i_blkbits;
663 err = jfs_get_block(inode, blk, &tmp_bh, 0);
664 if (err)
665 return err;
666 if (!buffer_mapped(&tmp_bh)) /* A hole? */
667 memset(data, 0, tocopy);
668 else {
669 bh = sb_bread(sb, tmp_bh.b_blocknr);
670 if (!bh)
671 return -EIO;
672 memcpy(data, bh->b_data+offset, tocopy);
673 brelse(bh);
675 offset = 0;
676 toread -= tocopy;
677 data += tocopy;
678 blk++;
680 return len;
683 /* Write to quotafile */
684 static ssize_t jfs_quota_write(struct super_block *sb, int type,
685 const char *data, size_t len, loff_t off)
687 struct inode *inode = sb_dqopt(sb)->files[type];
688 sector_t blk = off >> sb->s_blocksize_bits;
689 int err = 0;
690 int offset = off & (sb->s_blocksize - 1);
691 int tocopy;
692 size_t towrite = len;
693 struct buffer_head tmp_bh;
694 struct buffer_head *bh;
696 mutex_lock(&inode->i_mutex);
697 while (towrite > 0) {
698 tocopy = sb->s_blocksize - offset < towrite ?
699 sb->s_blocksize - offset : towrite;
701 tmp_bh.b_state = 0;
702 tmp_bh.b_size = 1 << inode->i_blkbits;
703 err = jfs_get_block(inode, blk, &tmp_bh, 1);
704 if (err)
705 goto out;
706 if (offset || tocopy != sb->s_blocksize)
707 bh = sb_bread(sb, tmp_bh.b_blocknr);
708 else
709 bh = sb_getblk(sb, tmp_bh.b_blocknr);
710 if (!bh) {
711 err = -EIO;
712 goto out;
714 lock_buffer(bh);
715 memcpy(bh->b_data+offset, data, tocopy);
716 flush_dcache_page(bh->b_page);
717 set_buffer_uptodate(bh);
718 mark_buffer_dirty(bh);
719 unlock_buffer(bh);
720 brelse(bh);
721 offset = 0;
722 towrite -= tocopy;
723 data += tocopy;
724 blk++;
726 out:
727 if (len == towrite) {
728 mutex_unlock(&inode->i_mutex);
729 return err;
731 if (inode->i_size < off+len-towrite)
732 i_size_write(inode, off+len-towrite);
733 inode->i_version++;
734 inode->i_mtime = inode->i_ctime = CURRENT_TIME;
735 mark_inode_dirty(inode);
736 mutex_unlock(&inode->i_mutex);
737 return len - towrite;
740 #endif
742 static const struct super_operations jfs_super_operations = {
743 .alloc_inode = jfs_alloc_inode,
744 .destroy_inode = jfs_destroy_inode,
745 .dirty_inode = jfs_dirty_inode,
746 .write_inode = jfs_write_inode,
747 .delete_inode = jfs_delete_inode,
748 .put_super = jfs_put_super,
749 .sync_fs = jfs_sync_fs,
750 .freeze_fs = jfs_freeze,
751 .unfreeze_fs = jfs_unfreeze,
752 .statfs = jfs_statfs,
753 .remount_fs = jfs_remount,
754 .show_options = jfs_show_options,
755 #ifdef CONFIG_QUOTA
756 .quota_read = jfs_quota_read,
757 .quota_write = jfs_quota_write,
758 #endif
761 static const struct export_operations jfs_export_operations = {
762 .fh_to_dentry = jfs_fh_to_dentry,
763 .fh_to_parent = jfs_fh_to_parent,
764 .get_parent = jfs_get_parent,
767 static struct file_system_type jfs_fs_type = {
768 .owner = THIS_MODULE,
769 .name = "jfs",
770 .get_sb = jfs_get_sb,
771 .kill_sb = kill_block_super,
772 .fs_flags = FS_REQUIRES_DEV,
775 static void init_once(void *foo)
777 struct jfs_inode_info *jfs_ip = (struct jfs_inode_info *) foo;
779 memset(jfs_ip, 0, sizeof(struct jfs_inode_info));
780 INIT_LIST_HEAD(&jfs_ip->anon_inode_list);
781 init_rwsem(&jfs_ip->rdwrlock);
782 mutex_init(&jfs_ip->commit_mutex);
783 init_rwsem(&jfs_ip->xattr_sem);
784 spin_lock_init(&jfs_ip->ag_lock);
785 jfs_ip->active_ag = -1;
786 inode_init_once(&jfs_ip->vfs_inode);
789 static int __init init_jfs_fs(void)
791 int i;
792 int rc;
794 jfs_inode_cachep =
795 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
796 SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
797 init_once);
798 if (jfs_inode_cachep == NULL)
799 return -ENOMEM;
802 * Metapage initialization
804 rc = metapage_init();
805 if (rc) {
806 jfs_err("metapage_init failed w/rc = %d", rc);
807 goto free_slab;
811 * Transaction Manager initialization
813 rc = txInit();
814 if (rc) {
815 jfs_err("txInit failed w/rc = %d", rc);
816 goto free_metapage;
820 * I/O completion thread (endio)
822 jfsIOthread = kthread_run(jfsIOWait, NULL, "jfsIO");
823 if (IS_ERR(jfsIOthread)) {
824 rc = PTR_ERR(jfsIOthread);
825 jfs_err("init_jfs_fs: fork failed w/rc = %d", rc);
826 goto end_txmngr;
829 if (commit_threads < 1)
830 commit_threads = num_online_cpus();
831 if (commit_threads > MAX_COMMIT_THREADS)
832 commit_threads = MAX_COMMIT_THREADS;
834 for (i = 0; i < commit_threads; i++) {
835 jfsCommitThread[i] = kthread_run(jfs_lazycommit, NULL, "jfsCommit");
836 if (IS_ERR(jfsCommitThread[i])) {
837 rc = PTR_ERR(jfsCommitThread[i]);
838 jfs_err("init_jfs_fs: fork failed w/rc = %d", rc);
839 commit_threads = i;
840 goto kill_committask;
844 jfsSyncThread = kthread_run(jfs_sync, NULL, "jfsSync");
845 if (IS_ERR(jfsSyncThread)) {
846 rc = PTR_ERR(jfsSyncThread);
847 jfs_err("init_jfs_fs: fork failed w/rc = %d", rc);
848 goto kill_committask;
851 #ifdef PROC_FS_JFS
852 jfs_proc_init();
853 #endif
855 return register_filesystem(&jfs_fs_type);
857 kill_committask:
858 for (i = 0; i < commit_threads; i++)
859 kthread_stop(jfsCommitThread[i]);
860 kthread_stop(jfsIOthread);
861 end_txmngr:
862 txExit();
863 free_metapage:
864 metapage_exit();
865 free_slab:
866 kmem_cache_destroy(jfs_inode_cachep);
867 return rc;
870 static void __exit exit_jfs_fs(void)
872 int i;
874 jfs_info("exit_jfs_fs called");
876 txExit();
877 metapage_exit();
879 kthread_stop(jfsIOthread);
880 for (i = 0; i < commit_threads; i++)
881 kthread_stop(jfsCommitThread[i]);
882 kthread_stop(jfsSyncThread);
883 #ifdef PROC_FS_JFS
884 jfs_proc_clean();
885 #endif
886 unregister_filesystem(&jfs_fs_type);
887 kmem_cache_destroy(jfs_inode_cachep);
890 module_init(init_jfs_fs)
891 module_exit(exit_jfs_fs)