x86: convert cpuinfo_x86 array to a per_cpu array
[wrt350n-kernel.git] / fs / ufs / inode.c
blob4320782761ae875c54fc9f6a04592a3990ecfdc3
1 /*
2 * linux/fs/ufs/inode.c
4 * Copyright (C) 1998
5 * Daniel Pirkl <daniel.pirkl@email.cz>
6 * Charles University, Faculty of Mathematics and Physics
8 * from
10 * linux/fs/ext2/inode.c
12 * Copyright (C) 1992, 1993, 1994, 1995
13 * Remy Card (card@masi.ibp.fr)
14 * Laboratoire MASI - Institut Blaise Pascal
15 * Universite Pierre et Marie Curie (Paris VI)
17 * from
19 * linux/fs/minix/inode.c
21 * Copyright (C) 1991, 1992 Linus Torvalds
23 * Goal-directed block allocation by Stephen Tweedie (sct@dcs.ed.ac.uk), 1993
24 * Big-endian to little-endian byte-swapping/bitmaps by
25 * David S. Miller (davem@caip.rutgers.edu), 1995
28 #include <asm/uaccess.h>
29 #include <asm/system.h>
31 #include <linux/errno.h>
32 #include <linux/fs.h>
33 #include <linux/ufs_fs.h>
34 #include <linux/time.h>
35 #include <linux/stat.h>
36 #include <linux/string.h>
37 #include <linux/mm.h>
38 #include <linux/smp_lock.h>
39 #include <linux/buffer_head.h>
41 #include "ufs.h"
42 #include "swab.h"
43 #include "util.h"
45 static u64 ufs_frag_map(struct inode *inode, sector_t frag);
47 static int ufs_block_to_path(struct inode *inode, sector_t i_block, sector_t offsets[4])
49 struct ufs_sb_private_info *uspi = UFS_SB(inode->i_sb)->s_uspi;
50 int ptrs = uspi->s_apb;
51 int ptrs_bits = uspi->s_apbshift;
52 const long direct_blocks = UFS_NDADDR,
53 indirect_blocks = ptrs,
54 double_blocks = (1 << (ptrs_bits * 2));
55 int n = 0;
58 UFSD("ptrs=uspi->s_apb = %d,double_blocks=%ld \n",ptrs,double_blocks);
59 if (i_block < 0) {
60 ufs_warning(inode->i_sb, "ufs_block_to_path", "block < 0");
61 } else if (i_block < direct_blocks) {
62 offsets[n++] = i_block;
63 } else if ((i_block -= direct_blocks) < indirect_blocks) {
64 offsets[n++] = UFS_IND_BLOCK;
65 offsets[n++] = i_block;
66 } else if ((i_block -= indirect_blocks) < double_blocks) {
67 offsets[n++] = UFS_DIND_BLOCK;
68 offsets[n++] = i_block >> ptrs_bits;
69 offsets[n++] = i_block & (ptrs - 1);
70 } else if (((i_block -= double_blocks) >> (ptrs_bits * 2)) < ptrs) {
71 offsets[n++] = UFS_TIND_BLOCK;
72 offsets[n++] = i_block >> (ptrs_bits * 2);
73 offsets[n++] = (i_block >> ptrs_bits) & (ptrs - 1);
74 offsets[n++] = i_block & (ptrs - 1);
75 } else {
76 ufs_warning(inode->i_sb, "ufs_block_to_path", "block > big");
78 return n;
82 * Returns the location of the fragment from
83 * the begining of the filesystem.
86 static u64 ufs_frag_map(struct inode *inode, sector_t frag)
88 struct ufs_inode_info *ufsi = UFS_I(inode);
89 struct super_block *sb = inode->i_sb;
90 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
91 u64 mask = (u64) uspi->s_apbmask>>uspi->s_fpbshift;
92 int shift = uspi->s_apbshift-uspi->s_fpbshift;
93 sector_t offsets[4], *p;
94 int depth = ufs_block_to_path(inode, frag >> uspi->s_fpbshift, offsets);
95 u64 ret = 0L;
96 __fs32 block;
97 __fs64 u2_block = 0L;
98 unsigned flags = UFS_SB(sb)->s_flags;
99 u64 temp = 0L;
101 UFSD(": frag = %llu depth = %d\n", (unsigned long long)frag, depth);
102 UFSD(": uspi->s_fpbshift = %d ,uspi->s_apbmask = %x, mask=%llx\n",
103 uspi->s_fpbshift, uspi->s_apbmask,
104 (unsigned long long)mask);
106 if (depth == 0)
107 return 0;
109 p = offsets;
111 lock_kernel();
112 if ((flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2)
113 goto ufs2;
115 block = ufsi->i_u1.i_data[*p++];
116 if (!block)
117 goto out;
118 while (--depth) {
119 struct buffer_head *bh;
120 sector_t n = *p++;
122 bh = sb_bread(sb, uspi->s_sbbase + fs32_to_cpu(sb, block)+(n>>shift));
123 if (!bh)
124 goto out;
125 block = ((__fs32 *) bh->b_data)[n & mask];
126 brelse (bh);
127 if (!block)
128 goto out;
130 ret = (u64) (uspi->s_sbbase + fs32_to_cpu(sb, block) + (frag & uspi->s_fpbmask));
131 goto out;
132 ufs2:
133 u2_block = ufsi->i_u1.u2_i_data[*p++];
134 if (!u2_block)
135 goto out;
138 while (--depth) {
139 struct buffer_head *bh;
140 sector_t n = *p++;
143 temp = (u64)(uspi->s_sbbase) + fs64_to_cpu(sb, u2_block);
144 bh = sb_bread(sb, temp +(u64) (n>>shift));
145 if (!bh)
146 goto out;
147 u2_block = ((__fs64 *)bh->b_data)[n & mask];
148 brelse(bh);
149 if (!u2_block)
150 goto out;
152 temp = (u64)uspi->s_sbbase + fs64_to_cpu(sb, u2_block);
153 ret = temp + (u64) (frag & uspi->s_fpbmask);
155 out:
156 unlock_kernel();
157 return ret;
161 * ufs_inode_getfrag() - allocate new fragment(s)
162 * @inode - pointer to inode
163 * @fragment - number of `fragment' which hold pointer
164 * to new allocated fragment(s)
165 * @new_fragment - number of new allocated fragment(s)
166 * @required - how many fragment(s) we require
167 * @err - we set it if something wrong
168 * @phys - pointer to where we save physical number of new allocated fragments,
169 * NULL if we allocate not data(indirect blocks for example).
170 * @new - we set it if we allocate new block
171 * @locked_page - for ufs_new_fragments()
173 static struct buffer_head *
174 ufs_inode_getfrag(struct inode *inode, u64 fragment,
175 sector_t new_fragment, unsigned int required, int *err,
176 long *phys, int *new, struct page *locked_page)
178 struct ufs_inode_info *ufsi = UFS_I(inode);
179 struct super_block *sb = inode->i_sb;
180 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
181 struct buffer_head * result;
182 unsigned blockoff, lastblockoff;
183 u64 tmp, goal, lastfrag, block, lastblock;
184 void *p, *p2;
186 UFSD("ENTER, ino %lu, fragment %llu, new_fragment %llu, required %u, "
187 "metadata %d\n", inode->i_ino, (unsigned long long)fragment,
188 (unsigned long long)new_fragment, required, !phys);
190 /* TODO : to be done for write support
191 if ( (flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2)
192 goto ufs2;
195 block = ufs_fragstoblks (fragment);
196 blockoff = ufs_fragnum (fragment);
197 p = ufs_get_direct_data_ptr(uspi, ufsi, block);
199 goal = 0;
201 repeat:
202 tmp = ufs_data_ptr_to_cpu(sb, p);
204 lastfrag = ufsi->i_lastfrag;
205 if (tmp && fragment < lastfrag) {
206 if (!phys) {
207 result = sb_getblk(sb, uspi->s_sbbase + tmp + blockoff);
208 if (tmp == ufs_data_ptr_to_cpu(sb, p)) {
209 UFSD("EXIT, result %llu\n",
210 (unsigned long long)tmp + blockoff);
211 return result;
213 brelse (result);
214 goto repeat;
215 } else {
216 *phys = uspi->s_sbbase + tmp + blockoff;
217 return NULL;
221 lastblock = ufs_fragstoblks (lastfrag);
222 lastblockoff = ufs_fragnum (lastfrag);
224 * We will extend file into new block beyond last allocated block
226 if (lastblock < block) {
228 * We must reallocate last allocated block
230 if (lastblockoff) {
231 p2 = ufs_get_direct_data_ptr(uspi, ufsi, lastblock);
232 tmp = ufs_new_fragments(inode, p2, lastfrag,
233 ufs_data_ptr_to_cpu(sb, p2),
234 uspi->s_fpb - lastblockoff,
235 err, locked_page);
236 if (!tmp) {
237 if (lastfrag != ufsi->i_lastfrag)
238 goto repeat;
239 else
240 return NULL;
242 lastfrag = ufsi->i_lastfrag;
245 tmp = ufs_data_ptr_to_cpu(sb,
246 ufs_get_direct_data_ptr(uspi, ufsi,
247 lastblock));
248 if (tmp)
249 goal = tmp + uspi->s_fpb;
250 tmp = ufs_new_fragments (inode, p, fragment - blockoff,
251 goal, required + blockoff,
252 err,
253 phys != NULL ? locked_page : NULL);
254 } else if (lastblock == block) {
256 * We will extend last allocated block
258 tmp = ufs_new_fragments(inode, p, fragment -
259 (blockoff - lastblockoff),
260 ufs_data_ptr_to_cpu(sb, p),
261 required + (blockoff - lastblockoff),
262 err, phys != NULL ? locked_page : NULL);
263 } else /* (lastblock > block) */ {
265 * We will allocate new block before last allocated block
267 if (block) {
268 tmp = ufs_data_ptr_to_cpu(sb,
269 ufs_get_direct_data_ptr(uspi, ufsi, block - 1));
270 if (tmp)
271 goal = tmp + uspi->s_fpb;
273 tmp = ufs_new_fragments(inode, p, fragment - blockoff,
274 goal, uspi->s_fpb, err,
275 phys != NULL ? locked_page : NULL);
277 if (!tmp) {
278 if ((!blockoff && ufs_data_ptr_to_cpu(sb, p)) ||
279 (blockoff && lastfrag != ufsi->i_lastfrag))
280 goto repeat;
281 *err = -ENOSPC;
282 return NULL;
285 if (!phys) {
286 result = sb_getblk(sb, uspi->s_sbbase + tmp + blockoff);
287 } else {
288 *phys = uspi->s_sbbase + tmp + blockoff;
289 result = NULL;
290 *err = 0;
291 *new = 1;
294 inode->i_ctime = CURRENT_TIME_SEC;
295 if (IS_SYNC(inode))
296 ufs_sync_inode (inode);
297 mark_inode_dirty(inode);
298 UFSD("EXIT, result %llu\n", (unsigned long long)tmp + blockoff);
299 return result;
301 /* This part : To be implemented ....
302 Required only for writing, not required for READ-ONLY.
303 ufs2:
305 u2_block = ufs_fragstoblks(fragment);
306 u2_blockoff = ufs_fragnum(fragment);
307 p = ufsi->i_u1.u2_i_data + block;
308 goal = 0;
310 repeat2:
311 tmp = fs32_to_cpu(sb, *p);
312 lastfrag = ufsi->i_lastfrag;
318 * ufs_inode_getblock() - allocate new block
319 * @inode - pointer to inode
320 * @bh - pointer to block which hold "pointer" to new allocated block
321 * @fragment - number of `fragment' which hold pointer
322 * to new allocated block
323 * @new_fragment - number of new allocated fragment
324 * (block will hold this fragment and also uspi->s_fpb-1)
325 * @err - see ufs_inode_getfrag()
326 * @phys - see ufs_inode_getfrag()
327 * @new - see ufs_inode_getfrag()
328 * @locked_page - see ufs_inode_getfrag()
330 static struct buffer_head *
331 ufs_inode_getblock(struct inode *inode, struct buffer_head *bh,
332 u64 fragment, sector_t new_fragment, int *err,
333 long *phys, int *new, struct page *locked_page)
335 struct super_block *sb = inode->i_sb;
336 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
337 struct buffer_head * result;
338 unsigned blockoff;
339 u64 tmp, goal, block;
340 void *p;
342 block = ufs_fragstoblks (fragment);
343 blockoff = ufs_fragnum (fragment);
345 UFSD("ENTER, ino %lu, fragment %llu, new_fragment %llu, metadata %d\n",
346 inode->i_ino, (unsigned long long)fragment,
347 (unsigned long long)new_fragment, !phys);
349 result = NULL;
350 if (!bh)
351 goto out;
352 if (!buffer_uptodate(bh)) {
353 ll_rw_block (READ, 1, &bh);
354 wait_on_buffer (bh);
355 if (!buffer_uptodate(bh))
356 goto out;
358 if (uspi->fs_magic == UFS2_MAGIC)
359 p = (__fs64 *)bh->b_data + block;
360 else
361 p = (__fs32 *)bh->b_data + block;
362 repeat:
363 tmp = ufs_data_ptr_to_cpu(sb, p);
364 if (tmp) {
365 if (!phys) {
366 result = sb_getblk(sb, uspi->s_sbbase + tmp + blockoff);
367 if (tmp == ufs_data_ptr_to_cpu(sb, p))
368 goto out;
369 brelse (result);
370 goto repeat;
371 } else {
372 *phys = uspi->s_sbbase + tmp + blockoff;
373 goto out;
377 if (block && (uspi->fs_magic == UFS2_MAGIC ?
378 (tmp = fs64_to_cpu(sb, ((__fs64 *)bh->b_data)[block-1])) :
379 (tmp = fs32_to_cpu(sb, ((__fs32 *)bh->b_data)[block-1]))))
380 goal = tmp + uspi->s_fpb;
381 else
382 goal = bh->b_blocknr + uspi->s_fpb;
383 tmp = ufs_new_fragments(inode, p, ufs_blknum(new_fragment), goal,
384 uspi->s_fpb, err, locked_page);
385 if (!tmp) {
386 if (ufs_data_ptr_to_cpu(sb, p))
387 goto repeat;
388 goto out;
392 if (!phys) {
393 result = sb_getblk(sb, uspi->s_sbbase + tmp + blockoff);
394 } else {
395 *phys = uspi->s_sbbase + tmp + blockoff;
396 *new = 1;
399 mark_buffer_dirty(bh);
400 if (IS_SYNC(inode))
401 sync_dirty_buffer(bh);
402 inode->i_ctime = CURRENT_TIME_SEC;
403 mark_inode_dirty(inode);
404 UFSD("result %llu\n", (unsigned long long)tmp + blockoff);
405 out:
406 brelse (bh);
407 UFSD("EXIT\n");
408 return result;
412 * ufs_getfrag_bloc() - `get_block_t' function, interface between UFS and
413 * readpage, writepage and so on
416 int ufs_getfrag_block(struct inode *inode, sector_t fragment, struct buffer_head *bh_result, int create)
418 struct super_block * sb = inode->i_sb;
419 struct ufs_sb_private_info * uspi = UFS_SB(sb)->s_uspi;
420 struct buffer_head * bh;
421 int ret, err, new;
422 unsigned long ptr,phys;
423 u64 phys64 = 0;
425 if (!create) {
426 phys64 = ufs_frag_map(inode, fragment);
427 UFSD("phys64 = %llu\n", (unsigned long long)phys64);
428 if (phys64)
429 map_bh(bh_result, sb, phys64);
430 return 0;
433 /* This code entered only while writing ....? */
435 err = -EIO;
436 new = 0;
437 ret = 0;
438 bh = NULL;
440 lock_kernel();
442 UFSD("ENTER, ino %lu, fragment %llu\n", inode->i_ino, (unsigned long long)fragment);
443 if (fragment < 0)
444 goto abort_negative;
445 if (fragment >
446 ((UFS_NDADDR + uspi->s_apb + uspi->s_2apb + uspi->s_3apb)
447 << uspi->s_fpbshift))
448 goto abort_too_big;
450 err = 0;
451 ptr = fragment;
454 * ok, these macros clean the logic up a bit and make
455 * it much more readable:
457 #define GET_INODE_DATABLOCK(x) \
458 ufs_inode_getfrag(inode, x, fragment, 1, &err, &phys, &new,\
459 bh_result->b_page)
460 #define GET_INODE_PTR(x) \
461 ufs_inode_getfrag(inode, x, fragment, uspi->s_fpb, &err, NULL, NULL,\
462 bh_result->b_page)
463 #define GET_INDIRECT_DATABLOCK(x) \
464 ufs_inode_getblock(inode, bh, x, fragment, \
465 &err, &phys, &new, bh_result->b_page)
466 #define GET_INDIRECT_PTR(x) \
467 ufs_inode_getblock(inode, bh, x, fragment, \
468 &err, NULL, NULL, NULL)
470 if (ptr < UFS_NDIR_FRAGMENT) {
471 bh = GET_INODE_DATABLOCK(ptr);
472 goto out;
474 ptr -= UFS_NDIR_FRAGMENT;
475 if (ptr < (1 << (uspi->s_apbshift + uspi->s_fpbshift))) {
476 bh = GET_INODE_PTR(UFS_IND_FRAGMENT + (ptr >> uspi->s_apbshift));
477 goto get_indirect;
479 ptr -= 1 << (uspi->s_apbshift + uspi->s_fpbshift);
480 if (ptr < (1 << (uspi->s_2apbshift + uspi->s_fpbshift))) {
481 bh = GET_INODE_PTR(UFS_DIND_FRAGMENT + (ptr >> uspi->s_2apbshift));
482 goto get_double;
484 ptr -= 1 << (uspi->s_2apbshift + uspi->s_fpbshift);
485 bh = GET_INODE_PTR(UFS_TIND_FRAGMENT + (ptr >> uspi->s_3apbshift));
486 bh = GET_INDIRECT_PTR((ptr >> uspi->s_2apbshift) & uspi->s_apbmask);
487 get_double:
488 bh = GET_INDIRECT_PTR((ptr >> uspi->s_apbshift) & uspi->s_apbmask);
489 get_indirect:
490 bh = GET_INDIRECT_DATABLOCK(ptr & uspi->s_apbmask);
492 #undef GET_INODE_DATABLOCK
493 #undef GET_INODE_PTR
494 #undef GET_INDIRECT_DATABLOCK
495 #undef GET_INDIRECT_PTR
497 out:
498 if (err)
499 goto abort;
500 if (new)
501 set_buffer_new(bh_result);
502 map_bh(bh_result, sb, phys);
503 abort:
504 unlock_kernel();
505 return err;
507 abort_negative:
508 ufs_warning(sb, "ufs_get_block", "block < 0");
509 goto abort;
511 abort_too_big:
512 ufs_warning(sb, "ufs_get_block", "block > big");
513 goto abort;
516 static struct buffer_head *ufs_getfrag(struct inode *inode,
517 unsigned int fragment,
518 int create, int *err)
520 struct buffer_head dummy;
521 int error;
523 dummy.b_state = 0;
524 dummy.b_blocknr = -1000;
525 error = ufs_getfrag_block(inode, fragment, &dummy, create);
526 *err = error;
527 if (!error && buffer_mapped(&dummy)) {
528 struct buffer_head *bh;
529 bh = sb_getblk(inode->i_sb, dummy.b_blocknr);
530 if (buffer_new(&dummy)) {
531 memset(bh->b_data, 0, inode->i_sb->s_blocksize);
532 set_buffer_uptodate(bh);
533 mark_buffer_dirty(bh);
535 return bh;
537 return NULL;
540 struct buffer_head * ufs_bread (struct inode * inode, unsigned fragment,
541 int create, int * err)
543 struct buffer_head * bh;
545 UFSD("ENTER, ino %lu, fragment %u\n", inode->i_ino, fragment);
546 bh = ufs_getfrag (inode, fragment, create, err);
547 if (!bh || buffer_uptodate(bh))
548 return bh;
549 ll_rw_block (READ, 1, &bh);
550 wait_on_buffer (bh);
551 if (buffer_uptodate(bh))
552 return bh;
553 brelse (bh);
554 *err = -EIO;
555 return NULL;
558 static int ufs_writepage(struct page *page, struct writeback_control *wbc)
560 return block_write_full_page(page,ufs_getfrag_block,wbc);
563 static int ufs_readpage(struct file *file, struct page *page)
565 return block_read_full_page(page,ufs_getfrag_block);
568 int __ufs_write_begin(struct file *file, struct address_space *mapping,
569 loff_t pos, unsigned len, unsigned flags,
570 struct page **pagep, void **fsdata)
572 return block_write_begin(file, mapping, pos, len, flags, pagep, fsdata,
573 ufs_getfrag_block);
576 static int ufs_write_begin(struct file *file, struct address_space *mapping,
577 loff_t pos, unsigned len, unsigned flags,
578 struct page **pagep, void **fsdata)
580 *pagep = NULL;
581 return __ufs_write_begin(file, mapping, pos, len, flags, pagep, fsdata);
584 static sector_t ufs_bmap(struct address_space *mapping, sector_t block)
586 return generic_block_bmap(mapping,block,ufs_getfrag_block);
589 const struct address_space_operations ufs_aops = {
590 .readpage = ufs_readpage,
591 .writepage = ufs_writepage,
592 .sync_page = block_sync_page,
593 .write_begin = ufs_write_begin,
594 .write_end = generic_write_end,
595 .bmap = ufs_bmap
598 static void ufs_set_inode_ops(struct inode *inode)
600 if (S_ISREG(inode->i_mode)) {
601 inode->i_op = &ufs_file_inode_operations;
602 inode->i_fop = &ufs_file_operations;
603 inode->i_mapping->a_ops = &ufs_aops;
604 } else if (S_ISDIR(inode->i_mode)) {
605 inode->i_op = &ufs_dir_inode_operations;
606 inode->i_fop = &ufs_dir_operations;
607 inode->i_mapping->a_ops = &ufs_aops;
608 } else if (S_ISLNK(inode->i_mode)) {
609 if (!inode->i_blocks)
610 inode->i_op = &ufs_fast_symlink_inode_operations;
611 else {
612 inode->i_op = &page_symlink_inode_operations;
613 inode->i_mapping->a_ops = &ufs_aops;
615 } else
616 init_special_inode(inode, inode->i_mode,
617 ufs_get_inode_dev(inode->i_sb, UFS_I(inode)));
620 static int ufs1_read_inode(struct inode *inode, struct ufs_inode *ufs_inode)
622 struct ufs_inode_info *ufsi = UFS_I(inode);
623 struct super_block *sb = inode->i_sb;
624 mode_t mode;
625 unsigned i;
628 * Copy data to the in-core inode.
630 inode->i_mode = mode = fs16_to_cpu(sb, ufs_inode->ui_mode);
631 inode->i_nlink = fs16_to_cpu(sb, ufs_inode->ui_nlink);
632 if (inode->i_nlink == 0) {
633 ufs_error (sb, "ufs_read_inode", "inode %lu has zero nlink\n", inode->i_ino);
634 return -1;
638 * Linux now has 32-bit uid and gid, so we can support EFT.
640 inode->i_uid = ufs_get_inode_uid(sb, ufs_inode);
641 inode->i_gid = ufs_get_inode_gid(sb, ufs_inode);
643 inode->i_size = fs64_to_cpu(sb, ufs_inode->ui_size);
644 inode->i_atime.tv_sec = fs32_to_cpu(sb, ufs_inode->ui_atime.tv_sec);
645 inode->i_ctime.tv_sec = fs32_to_cpu(sb, ufs_inode->ui_ctime.tv_sec);
646 inode->i_mtime.tv_sec = fs32_to_cpu(sb, ufs_inode->ui_mtime.tv_sec);
647 inode->i_mtime.tv_nsec = 0;
648 inode->i_atime.tv_nsec = 0;
649 inode->i_ctime.tv_nsec = 0;
650 inode->i_blocks = fs32_to_cpu(sb, ufs_inode->ui_blocks);
651 inode->i_generation = fs32_to_cpu(sb, ufs_inode->ui_gen);
652 ufsi->i_flags = fs32_to_cpu(sb, ufs_inode->ui_flags);
653 ufsi->i_shadow = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_shadow);
654 ufsi->i_oeftflag = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_oeftflag);
657 if (S_ISCHR(mode) || S_ISBLK(mode) || inode->i_blocks) {
658 for (i = 0; i < (UFS_NDADDR + UFS_NINDIR); i++)
659 ufsi->i_u1.i_data[i] = ufs_inode->ui_u2.ui_addr.ui_db[i];
660 } else {
661 for (i = 0; i < (UFS_NDADDR + UFS_NINDIR) * 4; i++)
662 ufsi->i_u1.i_symlink[i] = ufs_inode->ui_u2.ui_symlink[i];
664 return 0;
667 static int ufs2_read_inode(struct inode *inode, struct ufs2_inode *ufs2_inode)
669 struct ufs_inode_info *ufsi = UFS_I(inode);
670 struct super_block *sb = inode->i_sb;
671 mode_t mode;
672 unsigned i;
674 UFSD("Reading ufs2 inode, ino %lu\n", inode->i_ino);
676 * Copy data to the in-core inode.
678 inode->i_mode = mode = fs16_to_cpu(sb, ufs2_inode->ui_mode);
679 inode->i_nlink = fs16_to_cpu(sb, ufs2_inode->ui_nlink);
680 if (inode->i_nlink == 0) {
681 ufs_error (sb, "ufs_read_inode", "inode %lu has zero nlink\n", inode->i_ino);
682 return -1;
686 * Linux now has 32-bit uid and gid, so we can support EFT.
688 inode->i_uid = fs32_to_cpu(sb, ufs2_inode->ui_uid);
689 inode->i_gid = fs32_to_cpu(sb, ufs2_inode->ui_gid);
691 inode->i_size = fs64_to_cpu(sb, ufs2_inode->ui_size);
692 inode->i_atime.tv_sec = fs64_to_cpu(sb, ufs2_inode->ui_atime);
693 inode->i_ctime.tv_sec = fs64_to_cpu(sb, ufs2_inode->ui_ctime);
694 inode->i_mtime.tv_sec = fs64_to_cpu(sb, ufs2_inode->ui_mtime);
695 inode->i_atime.tv_nsec = fs32_to_cpu(sb, ufs2_inode->ui_atimensec);
696 inode->i_ctime.tv_nsec = fs32_to_cpu(sb, ufs2_inode->ui_ctimensec);
697 inode->i_mtime.tv_nsec = fs32_to_cpu(sb, ufs2_inode->ui_mtimensec);
698 inode->i_blocks = fs64_to_cpu(sb, ufs2_inode->ui_blocks);
699 inode->i_generation = fs32_to_cpu(sb, ufs2_inode->ui_gen);
700 ufsi->i_flags = fs32_to_cpu(sb, ufs2_inode->ui_flags);
702 ufsi->i_shadow = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_shadow);
703 ufsi->i_oeftflag = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_oeftflag);
706 if (S_ISCHR(mode) || S_ISBLK(mode) || inode->i_blocks) {
707 for (i = 0; i < (UFS_NDADDR + UFS_NINDIR); i++)
708 ufsi->i_u1.u2_i_data[i] =
709 ufs2_inode->ui_u2.ui_addr.ui_db[i];
710 } else {
711 for (i = 0; i < (UFS_NDADDR + UFS_NINDIR) * 4; i++)
712 ufsi->i_u1.i_symlink[i] = ufs2_inode->ui_u2.ui_symlink[i];
714 return 0;
717 void ufs_read_inode(struct inode * inode)
719 struct ufs_inode_info *ufsi = UFS_I(inode);
720 struct super_block * sb;
721 struct ufs_sb_private_info * uspi;
722 struct buffer_head * bh;
723 int err;
725 UFSD("ENTER, ino %lu\n", inode->i_ino);
727 sb = inode->i_sb;
728 uspi = UFS_SB(sb)->s_uspi;
730 if (inode->i_ino < UFS_ROOTINO ||
731 inode->i_ino > (uspi->s_ncg * uspi->s_ipg)) {
732 ufs_warning(sb, "ufs_read_inode", "bad inode number (%lu)\n",
733 inode->i_ino);
734 goto bad_inode;
737 bh = sb_bread(sb, uspi->s_sbbase + ufs_inotofsba(inode->i_ino));
738 if (!bh) {
739 ufs_warning(sb, "ufs_read_inode", "unable to read inode %lu\n",
740 inode->i_ino);
741 goto bad_inode;
743 if ((UFS_SB(sb)->s_flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2) {
744 struct ufs2_inode *ufs2_inode = (struct ufs2_inode *)bh->b_data;
746 err = ufs2_read_inode(inode,
747 ufs2_inode + ufs_inotofsbo(inode->i_ino));
748 } else {
749 struct ufs_inode *ufs_inode = (struct ufs_inode *)bh->b_data;
751 err = ufs1_read_inode(inode,
752 ufs_inode + ufs_inotofsbo(inode->i_ino));
755 if (err)
756 goto bad_inode;
757 inode->i_version++;
758 ufsi->i_lastfrag =
759 (inode->i_size + uspi->s_fsize - 1) >> uspi->s_fshift;
760 ufsi->i_dir_start_lookup = 0;
761 ufsi->i_osync = 0;
763 ufs_set_inode_ops(inode);
765 brelse(bh);
767 UFSD("EXIT\n");
768 return;
770 bad_inode:
771 make_bad_inode(inode);
774 static void ufs1_update_inode(struct inode *inode, struct ufs_inode *ufs_inode)
776 struct super_block *sb = inode->i_sb;
777 struct ufs_inode_info *ufsi = UFS_I(inode);
778 unsigned i;
780 ufs_inode->ui_mode = cpu_to_fs16(sb, inode->i_mode);
781 ufs_inode->ui_nlink = cpu_to_fs16(sb, inode->i_nlink);
783 ufs_set_inode_uid(sb, ufs_inode, inode->i_uid);
784 ufs_set_inode_gid(sb, ufs_inode, inode->i_gid);
786 ufs_inode->ui_size = cpu_to_fs64(sb, inode->i_size);
787 ufs_inode->ui_atime.tv_sec = cpu_to_fs32(sb, inode->i_atime.tv_sec);
788 ufs_inode->ui_atime.tv_usec = 0;
789 ufs_inode->ui_ctime.tv_sec = cpu_to_fs32(sb, inode->i_ctime.tv_sec);
790 ufs_inode->ui_ctime.tv_usec = 0;
791 ufs_inode->ui_mtime.tv_sec = cpu_to_fs32(sb, inode->i_mtime.tv_sec);
792 ufs_inode->ui_mtime.tv_usec = 0;
793 ufs_inode->ui_blocks = cpu_to_fs32(sb, inode->i_blocks);
794 ufs_inode->ui_flags = cpu_to_fs32(sb, ufsi->i_flags);
795 ufs_inode->ui_gen = cpu_to_fs32(sb, inode->i_generation);
797 if ((UFS_SB(sb)->s_flags & UFS_UID_MASK) == UFS_UID_EFT) {
798 ufs_inode->ui_u3.ui_sun.ui_shadow = cpu_to_fs32(sb, ufsi->i_shadow);
799 ufs_inode->ui_u3.ui_sun.ui_oeftflag = cpu_to_fs32(sb, ufsi->i_oeftflag);
802 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
803 /* ufs_inode->ui_u2.ui_addr.ui_db[0] = cpu_to_fs32(sb, inode->i_rdev); */
804 ufs_inode->ui_u2.ui_addr.ui_db[0] = ufsi->i_u1.i_data[0];
805 } else if (inode->i_blocks) {
806 for (i = 0; i < (UFS_NDADDR + UFS_NINDIR); i++)
807 ufs_inode->ui_u2.ui_addr.ui_db[i] = ufsi->i_u1.i_data[i];
809 else {
810 for (i = 0; i < (UFS_NDADDR + UFS_NINDIR) * 4; i++)
811 ufs_inode->ui_u2.ui_symlink[i] = ufsi->i_u1.i_symlink[i];
814 if (!inode->i_nlink)
815 memset (ufs_inode, 0, sizeof(struct ufs_inode));
818 static void ufs2_update_inode(struct inode *inode, struct ufs2_inode *ufs_inode)
820 struct super_block *sb = inode->i_sb;
821 struct ufs_inode_info *ufsi = UFS_I(inode);
822 unsigned i;
824 UFSD("ENTER\n");
825 ufs_inode->ui_mode = cpu_to_fs16(sb, inode->i_mode);
826 ufs_inode->ui_nlink = cpu_to_fs16(sb, inode->i_nlink);
828 ufs_inode->ui_uid = cpu_to_fs32(sb, inode->i_uid);
829 ufs_inode->ui_gid = cpu_to_fs32(sb, inode->i_gid);
831 ufs_inode->ui_size = cpu_to_fs64(sb, inode->i_size);
832 ufs_inode->ui_atime = cpu_to_fs64(sb, inode->i_atime.tv_sec);
833 ufs_inode->ui_atimensec = cpu_to_fs32(sb, inode->i_atime.tv_nsec);
834 ufs_inode->ui_ctime = cpu_to_fs64(sb, inode->i_ctime.tv_sec);
835 ufs_inode->ui_ctimensec = cpu_to_fs32(sb, inode->i_ctime.tv_nsec);
836 ufs_inode->ui_mtime = cpu_to_fs64(sb, inode->i_mtime.tv_sec);
837 ufs_inode->ui_mtimensec = cpu_to_fs32(sb, inode->i_mtime.tv_nsec);
839 ufs_inode->ui_blocks = cpu_to_fs64(sb, inode->i_blocks);
840 ufs_inode->ui_flags = cpu_to_fs32(sb, ufsi->i_flags);
841 ufs_inode->ui_gen = cpu_to_fs32(sb, inode->i_generation);
843 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
844 /* ufs_inode->ui_u2.ui_addr.ui_db[0] = cpu_to_fs32(sb, inode->i_rdev); */
845 ufs_inode->ui_u2.ui_addr.ui_db[0] = ufsi->i_u1.u2_i_data[0];
846 } else if (inode->i_blocks) {
847 for (i = 0; i < (UFS_NDADDR + UFS_NINDIR); i++)
848 ufs_inode->ui_u2.ui_addr.ui_db[i] = ufsi->i_u1.u2_i_data[i];
849 } else {
850 for (i = 0; i < (UFS_NDADDR + UFS_NINDIR) * 4; i++)
851 ufs_inode->ui_u2.ui_symlink[i] = ufsi->i_u1.i_symlink[i];
854 if (!inode->i_nlink)
855 memset (ufs_inode, 0, sizeof(struct ufs2_inode));
856 UFSD("EXIT\n");
859 static int ufs_update_inode(struct inode * inode, int do_sync)
861 struct super_block *sb = inode->i_sb;
862 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
863 struct buffer_head * bh;
865 UFSD("ENTER, ino %lu\n", inode->i_ino);
867 if (inode->i_ino < UFS_ROOTINO ||
868 inode->i_ino > (uspi->s_ncg * uspi->s_ipg)) {
869 ufs_warning (sb, "ufs_read_inode", "bad inode number (%lu)\n", inode->i_ino);
870 return -1;
873 bh = sb_bread(sb, ufs_inotofsba(inode->i_ino));
874 if (!bh) {
875 ufs_warning (sb, "ufs_read_inode", "unable to read inode %lu\n", inode->i_ino);
876 return -1;
878 if (uspi->fs_magic == UFS2_MAGIC) {
879 struct ufs2_inode *ufs2_inode = (struct ufs2_inode *)bh->b_data;
881 ufs2_update_inode(inode,
882 ufs2_inode + ufs_inotofsbo(inode->i_ino));
883 } else {
884 struct ufs_inode *ufs_inode = (struct ufs_inode *) bh->b_data;
886 ufs1_update_inode(inode, ufs_inode + ufs_inotofsbo(inode->i_ino));
889 mark_buffer_dirty(bh);
890 if (do_sync)
891 sync_dirty_buffer(bh);
892 brelse (bh);
894 UFSD("EXIT\n");
895 return 0;
898 int ufs_write_inode (struct inode * inode, int wait)
900 int ret;
901 lock_kernel();
902 ret = ufs_update_inode (inode, wait);
903 unlock_kernel();
904 return ret;
907 int ufs_sync_inode (struct inode *inode)
909 return ufs_update_inode (inode, 1);
912 void ufs_delete_inode (struct inode * inode)
914 loff_t old_i_size;
916 truncate_inode_pages(&inode->i_data, 0);
917 if (is_bad_inode(inode))
918 goto no_delete;
919 /*UFS_I(inode)->i_dtime = CURRENT_TIME;*/
920 lock_kernel();
921 mark_inode_dirty(inode);
922 ufs_update_inode(inode, IS_SYNC(inode));
923 old_i_size = inode->i_size;
924 inode->i_size = 0;
925 if (inode->i_blocks && ufs_truncate(inode, old_i_size))
926 ufs_warning(inode->i_sb, __FUNCTION__, "ufs_truncate failed\n");
927 ufs_free_inode (inode);
928 unlock_kernel();
929 return;
930 no_delete:
931 clear_inode(inode); /* We must guarantee clearing of inode... */