* added 0.99 linux version
[mascara-docs.git] / i386 / linux / linux-2.3.21 / fs / ufs / inode.c
blob3d9c8f6026e56b0279b56cd95f56a95dde802594
1 /*
2 * linux/fs/ufs/inode.c
4 * Copyright (C) 1998
5 * Daniel Pirkl <daniel.pirkl@email.cz>
6 * Charles University, Faculty of Mathematics and Physics
8 * from
10 * linux/fs/ext2/inode.c
12 * Copyright (C) 1992, 1993, 1994, 1995
13 * Remy Card (card@masi.ibp.fr)
14 * Laboratoire MASI - Institut Blaise Pascal
15 * Universite Pierre et Marie Curie (Paris VI)
17 * from
19 * linux/fs/minix/inode.c
21 * Copyright (C) 1991, 1992 Linus Torvalds
23 * Goal-directed block allocation by Stephen Tweedie (sct@dcs.ed.ac.uk), 1993
24 * Big-endian to little-endian byte-swapping/bitmaps by
25 * David S. Miller (davem@caip.rutgers.edu), 1995
28 #include <asm/uaccess.h>
29 #include <asm/system.h>
31 #include <linux/errno.h>
32 #include <linux/fs.h>
33 #include <linux/ufs_fs.h>
34 #include <linux/sched.h>
35 #include <linux/stat.h>
36 #include <linux/string.h>
37 #include <linux/locks.h>
38 #include <linux/mm.h>
39 #include <linux/smp_lock.h>
41 #include "swab.h"
42 #include "util.h"
44 #undef UFS_INODE_DEBUG
45 #undef UFS_INODE_DEBUG_MORE
47 #ifdef UFS_INODE_DEBUG
48 #define UFSD(x) printk("(%s, %d), %s: ", __FILE__, __LINE__, __FUNCTION__); printk x;
49 #else
50 #define UFSD(x)
51 #endif
53 #ifdef UFS_INODE_DEBUG_MORE
54 static void ufs_print_inode(struct inode * inode)
56 unsigned swab = inode->i_sb->u.ufs_sb.s_swab;
57 printk("ino %lu mode 0%6.6o nlink %d uid %d uid32 %u"
58 " gid %d gid32 %u size %lu blocks %lu\n",
59 inode->i_ino, inode->i_mode, inode->i_nlink,
60 inode->i_uid, inode->u.ufs_i.i_uid, inode->i_gid,
61 inode->u.ufs_i.i_gid, inode->i_size, inode->i_blocks);
62 printk(" db <%u %u %u %u %u %u %u %u %u %u %u %u>\n",
63 SWAB32(inode->u.ufs_i.i_u1.i_data[0]),
64 SWAB32(inode->u.ufs_i.i_u1.i_data[1]),
65 SWAB32(inode->u.ufs_i.i_u1.i_data[2]),
66 SWAB32(inode->u.ufs_i.i_u1.i_data[3]),
67 SWAB32(inode->u.ufs_i.i_u1.i_data[4]),
68 SWAB32(inode->u.ufs_i.i_u1.i_data[5]),
69 SWAB32(inode->u.ufs_i.i_u1.i_data[6]),
70 SWAB32(inode->u.ufs_i.i_u1.i_data[7]),
71 SWAB32(inode->u.ufs_i.i_u1.i_data[8]),
72 SWAB32(inode->u.ufs_i.i_u1.i_data[9]),
73 SWAB32(inode->u.ufs_i.i_u1.i_data[10]),
74 SWAB32(inode->u.ufs_i.i_u1.i_data[11]));
75 printk(" gen %u ib <%u %u %u>\n",
76 inode->u.ufs_i.i_gen,
77 SWAB32(inode->u.ufs_i.i_u1.i_data[UFS_IND_BLOCK]),
78 SWAB32(inode->u.ufs_i.i_u1.i_data[UFS_DIND_BLOCK]),
79 SWAB32(inode->u.ufs_i.i_u1.i_data[UFS_TIND_BLOCK]));
81 #endif
83 #define ufs_inode_bmap(inode, nr) \
84 (SWAB32((inode)->u.ufs_i.i_u1.i_data[(nr) >> uspi->s_fpbshift]) + ((nr) & uspi->s_fpbmask))
86 static inline unsigned int ufs_block_bmap (struct buffer_head * bh, unsigned nr,
87 struct ufs_sb_private_info * uspi, unsigned swab)
89 unsigned int tmp;
91 UFSD(("ENTER, nr %u\n", nr))
92 if (!bh)
93 return 0;
94 tmp = SWAB32(((u32 *) bh->b_data)[nr >> uspi->s_fpbshift]) + (nr & uspi->s_fpbmask);
95 brelse (bh);
96 UFSD(("EXIT, result %u\n", tmp))
97 return tmp;
100 int ufs_frag_map(struct inode *inode, int frag)
102 struct super_block *sb;
103 struct ufs_sb_private_info *uspi;
104 unsigned int swab;
105 int i, ret;
107 ret = 0;
108 lock_kernel();
110 sb = inode->i_sb;
111 uspi = sb->u.ufs_sb.s_uspi;
112 swab = sb->u.ufs_sb.s_swab;
113 if (frag < 0) {
114 ufs_warning(sb, "ufs_frag_map", "frag < 0");
115 goto out;
117 if (frag >=
118 ((UFS_NDADDR + uspi->s_apb + uspi->s_2apb + uspi->s_3apb)
119 << uspi->s_fpbshift)) {
120 ufs_warning(sb, "ufs_frag_map", "frag > big");
121 goto out;
124 if (frag < UFS_NDIR_FRAGMENT) {
125 ret = uspi->s_sbbase + ufs_inode_bmap(inode, frag);
126 goto out;
129 frag -= UFS_NDIR_FRAGMENT;
130 if (frag < (1 << (uspi->s_apbshift + uspi->s_fpbshift))) {
131 i = ufs_inode_bmap(inode,
132 UFS_IND_FRAGMENT + (frag >> uspi->s_apbshift));
133 if (!i)
134 goto out;
135 ret = (uspi->s_sbbase +
136 ufs_block_bmap(bread(sb->s_dev, uspi->s_sbbase + i,
137 sb->s_blocksize),
138 frag & uspi->s_apbmask, uspi, swab));
140 frag -= 1 << (uspi->s_apbshift + uspi->s_fpbshift);
141 if (frag < (1 << (uspi->s_2apbshift + uspi->s_fpbshift))) {
142 i = ufs_inode_bmap (inode,
143 UFS_DIND_FRAGMENT + (frag >> uspi->s_2apbshift));
144 if (!i)
145 goto out;
146 i = ufs_block_bmap(bread(sb->s_dev, uspi->s_sbbase + i,
147 sb->s_blocksize),
148 (frag >> uspi->s_apbshift) & uspi->s_apbmask,
149 uspi, swab);
150 if (!i)
151 goto out;
152 ret = (uspi->s_sbbase +
153 ufs_block_bmap(bread(sb->s_dev, uspi->s_sbbase + i,
154 sb->s_blocksize),
155 (frag & uspi->s_apbmask), uspi, swab));
156 goto out;
158 frag -= 1 << (uspi->s_2apbshift + uspi->s_fpbshift);
159 i = ufs_inode_bmap(inode,
160 UFS_TIND_FRAGMENT + (frag >> uspi->s_3apbshift));
161 if (!i)
162 goto out;
163 i = ufs_block_bmap(bread(sb->s_dev, uspi->s_sbbase + i, sb->s_blocksize),
164 (frag >> uspi->s_2apbshift) & uspi->s_apbmask,
165 uspi, swab);
166 if (!i)
167 goto out;
168 i = ufs_block_bmap(bread(sb->s_dev, uspi->s_sbbase + i, sb->s_blocksize),
169 (frag >> uspi->s_apbshift) & uspi->s_apbmask,
170 uspi, swab);
171 if (!i)
172 goto out;
173 ret = (uspi->s_sbbase +
174 ufs_block_bmap(bread(sb->s_dev, uspi->s_sbbase + i, sb->s_blocksize),
175 (frag & uspi->s_apbmask), uspi, swab));
176 out:
177 unlock_kernel();
178 return ret;
181 static struct buffer_head * ufs_inode_getfrag (struct inode *inode,
182 unsigned int fragment, unsigned int new_fragment,
183 unsigned int required, int *err, int metadata, long *phys, int *new)
185 struct super_block * sb;
186 struct ufs_sb_private_info * uspi;
187 struct buffer_head * result;
188 unsigned long limit;
189 unsigned block, blockoff, lastfrag, lastblock, lastblockoff;
190 unsigned tmp, goal;
191 u32 * p, * p2;
192 unsigned int swab;
194 UFSD(("ENTER, ino %lu, fragment %u, new_fragment %u, required %u\n",
195 inode->i_ino, fragment, new_fragment, required))
197 sb = inode->i_sb;
198 swab = sb->u.ufs_sb.s_swab;
199 uspi = sb->u.ufs_sb.s_uspi;
200 block = ufs_fragstoblks (fragment);
201 blockoff = ufs_fragnum (fragment);
202 p = inode->u.ufs_i.i_u1.i_data + block;
203 goal = 0;
205 repeat:
206 tmp = SWAB32(*p);
207 lastfrag = inode->u.ufs_i.i_lastfrag;
208 if (tmp && fragment < lastfrag) {
209 if (metadata) {
210 result = getblk (sb->s_dev, uspi->s_sbbase + tmp + blockoff,
211 sb->s_blocksize);
212 if (tmp == SWAB32(*p)) {
213 UFSD(("EXIT, result %u\n", tmp + blockoff))
214 return result;
216 brelse (result);
217 goto repeat;
218 } else {
219 *phys = tmp;
220 return NULL;
223 *err = -EFBIG;
225 limit = current->rlim[RLIMIT_FSIZE].rlim_cur;
226 if (limit < RLIM_INFINITY) {
227 limit >>= sb->s_blocksize_bits;
228 if (new_fragment >= limit) {
229 send_sig(SIGXFSZ, current, 0);
230 return NULL;
234 lastblock = ufs_fragstoblks (lastfrag);
235 lastblockoff = ufs_fragnum (lastfrag);
237 * We will extend file into new block beyond last allocated block
239 if (lastblock < block) {
241 * We must reallocate last allocated block
243 if (lastblockoff) {
244 p2 = inode->u.ufs_i.i_u1.i_data + lastblock;
245 tmp = ufs_new_fragments (inode, p2, lastfrag,
246 SWAB32(*p2), uspi->s_fpb - lastblockoff, err);
247 if (!tmp) {
248 if (lastfrag != inode->u.ufs_i.i_lastfrag)
249 goto repeat;
250 else
251 return NULL;
253 lastfrag = inode->u.ufs_i.i_lastfrag;
256 goal = SWAB32(inode->u.ufs_i.i_u1.i_data[lastblock]) + uspi->s_fpb;
257 tmp = ufs_new_fragments (inode, p, fragment - blockoff,
258 goal, required + blockoff, err);
261 * We will extend last allocated block
263 else if (lastblock == block) {
264 tmp = ufs_new_fragments (inode, p, fragment - (blockoff - lastblockoff),
265 SWAB32(*p), required + (blockoff - lastblockoff), err);
268 * We will allocate new block before last allocated block
270 else /* (lastblock > block) */ {
271 if (lastblock && (tmp = SWAB32(inode->u.ufs_i.i_u1.i_data[lastblock-1])))
272 goal = tmp + uspi->s_fpb;
273 tmp = ufs_new_fragments (inode, p, fragment - blockoff,
274 goal, uspi->s_fpb, err);
276 if (!tmp) {
277 if ((!blockoff && SWAB32(*p)) ||
278 (blockoff && lastfrag != inode->u.ufs_i.i_lastfrag))
279 goto repeat;
280 *err = -ENOSPC;
281 return NULL;
284 /* The nullification of framgents done in ufs/balloc.c is
285 * something I don't have the stomache to move into here right
286 * now. -DaveM
288 if (metadata) {
289 result = getblk (inode->i_dev, tmp + blockoff, sb->s_blocksize);
290 } else {
291 *phys = tmp;
292 result = NULL;
293 *err = 0;
294 *new = 1;
297 inode->i_ctime = CURRENT_TIME;
298 if (IS_SYNC(inode))
299 ufs_sync_inode (inode);
300 mark_inode_dirty(inode);
301 UFSD(("EXIT, result %u\n", tmp + blockoff))
302 return result;
305 static struct buffer_head * ufs_block_getfrag (struct inode *inode,
306 struct buffer_head *bh, unsigned int fragment, unsigned int new_fragment,
307 unsigned int blocksize, int * err, int metadata, long *phys, int *new)
309 struct super_block * sb;
310 struct ufs_sb_private_info * uspi;
311 struct buffer_head * result;
312 unsigned tmp, goal, block, blockoff;
313 u32 * p;
314 unsigned int swab;
316 sb = inode->i_sb;
317 swab = sb->u.ufs_sb.s_swab;
318 uspi = sb->u.ufs_sb.s_uspi;
319 block = ufs_fragstoblks (fragment);
320 blockoff = ufs_fragnum (fragment);
322 UFSD(("ENTER, ino %lu, fragment %u, new_fragment %u\n", inode->i_ino, fragment, new_fragment))
324 result = NULL;
325 if (!bh)
326 goto out;
327 if (!buffer_uptodate(bh)) {
328 ll_rw_block (READ, 1, &bh);
329 wait_on_buffer (bh);
330 if (!buffer_uptodate(bh))
331 goto out;
334 p = (u32 *) bh->b_data + block;
335 repeat:
336 tmp = SWAB32(*p);
337 if (tmp) {
338 if (metadata) {
339 result = getblk (bh->b_dev, uspi->s_sbbase + tmp + blockoff,
340 sb->s_blocksize);
341 if (tmp == SWAB32(*p))
342 goto out;
343 brelse (result);
344 goto repeat;
345 } else {
346 *phys = tmp;
347 goto out;
350 *err = -EFBIG;
353 unsigned long limit = current->rlim[RLIMIT_FSIZE].rlim_cur;
354 if (limit < RLIM_INFINITY) {
355 limit >>= sb->s_blocksize_bits;
356 if (new_fragment >= limit) {
357 brelse (bh);
358 send_sig(SIGXFSZ, current, 0);
359 return NULL;
363 if (block && (tmp = SWAB32(((u32*)bh->b_data)[block-1]) + uspi->s_fpb))
364 goal = tmp + uspi->s_fpb;
365 else
366 goal = bh->b_blocknr + uspi->s_fpb;
367 tmp = ufs_new_fragments (inode, p, ufs_blknum(new_fragment), goal, uspi->s_fpb, err);
368 if (!tmp) {
369 if (SWAB32(*p))
370 goto repeat;
371 goto out;
374 /* The nullification of framgents done in ufs/balloc.c is
375 * something I don't have the stomache to move into here right
376 * now. -DaveM
378 if (metadata) {
379 result = getblk (bh->b_dev, tmp + blockoff, sb->s_blocksize);
380 } else {
381 *phys = tmp;
382 *new = 1;
385 mark_buffer_dirty(bh, 1);
386 if (IS_SYNC(inode)) {
387 ll_rw_block (WRITE, 1, &bh);
388 wait_on_buffer (bh);
390 inode->i_ctime = CURRENT_TIME;
391 mark_inode_dirty(inode);
392 out:
393 brelse (bh);
394 UFSD(("EXIT, result %u\n", tmp + blockoff))
395 return result;
398 int ufs_getfrag_block (struct inode *inode, long fragment, struct buffer_head *bh_result, int create)
400 struct super_block * sb;
401 struct ufs_sb_private_info * uspi;
402 struct buffer_head * bh;
403 unsigned int swab;
404 int ret, err, new;
405 unsigned long ptr, phys;
407 sb = inode->i_sb;
408 uspi = sb->u.ufs_sb.s_uspi;
409 swab = sb->u.ufs_sb.s_swab;
411 if (!create) {
412 phys = ufs_frag_map(inode, fragment);
413 if (phys) {
414 bh_result->b_dev = inode->i_dev;
415 bh_result->b_blocknr = phys;
416 bh_result->b_state |= (1UL << BH_Mapped);
418 return 0;
421 err = -EIO;
422 new = 0;
423 ret = 0;
424 bh = NULL;
426 lock_kernel();
428 UFSD(("ENTER, ino %lu, fragment %u\n", inode->i_ino, fragment))
429 if (fragment < 0)
430 goto abort_negative;
431 if (fragment >
432 ((UFS_NDADDR + uspi->s_apb + uspi->s_2apb + uspi->s_3apb)
433 << uspi->s_fpbshift))
434 goto abort_too_big;
436 err = 0;
437 ptr = fragment;
440 * ok, these macros clean the logic up a bit and make
441 * it much more readable:
443 #define GET_INODE_DATABLOCK(x) \
444 ufs_inode_getfrag(inode, x, fragment, 1, &err, 0, &phys, &new)
445 #define GET_INODE_PTR(x) \
446 ufs_inode_getfrag(inode, x, fragment, uspi->s_fpb, &err, 1, NULL, NULL)
447 #define GET_INDIRECT_DATABLOCK(x) \
448 ufs_block_getfrag(inode, bh, x, fragment, sb->s_blocksize, \
449 &err, 0, &phys, &new);
450 #define GET_INDIRECT_PTR(x) \
451 ufs_block_getfrag(inode, bh, x, fragment, sb->s_blocksize, \
452 &err, 1, NULL, NULL);
454 if (ptr < UFS_NDIR_FRAGMENT) {
455 bh = GET_INODE_DATABLOCK(ptr);
456 goto out;
458 ptr -= UFS_NDIR_FRAGMENT;
459 if (ptr < (1 << (uspi->s_apbshift + uspi->s_fpbshift))) {
460 bh = GET_INODE_PTR(UFS_IND_FRAGMENT + (ptr >> uspi->s_apbshift));
461 goto get_indirect;
463 ptr -= 1 << (uspi->s_apbshift + uspi->s_fpbshift);
464 if (ptr < (1 << (uspi->s_2apbshift + uspi->s_fpbshift))) {
465 bh = GET_INODE_PTR(UFS_DIND_FRAGMENT + (ptr >> uspi->s_2apbshift));
466 goto get_double;
468 ptr -= 1 << (uspi->s_2apbshift + uspi->s_fpbshift);
469 bh = GET_INODE_PTR(UFS_TIND_FRAGMENT + (ptr >> uspi->s_3apbshift));
470 bh = GET_INDIRECT_PTR((ptr >> uspi->s_2apbshift) & uspi->s_apbmask);
471 get_double:
472 bh = GET_INDIRECT_PTR((ptr >> uspi->s_apbshift) & uspi->s_apbmask);
473 get_indirect:
474 bh = GET_INDIRECT_DATABLOCK(ptr & uspi->s_apbmask);
476 #undef GET_INODE_DATABLOCK
477 #undef GET_INODE_PTR
478 #undef GET_INDIRECT_DATABLOCK
479 #undef GET_INDIRECT_PTR
481 out:
482 if (err)
483 goto abort;
484 bh_result->b_dev = inode->i_dev;
485 bh_result->b_blocknr = phys;
486 bh_result->b_state |= (1UL << BH_Mapped);
487 if (new)
488 bh_result->b_state |= (1UL << BH_New);
489 abort:
490 unlock_kernel();
491 return err;
493 abort_negative:
494 ufs_warning(sb, "ufs_get_block", "block < 0");
495 goto abort;
497 abort_too_big:
498 ufs_warning(sb, "ufs_get_block", "block > big");
499 goto abort;
502 struct buffer_head *ufs_getfrag(struct inode *inode, unsigned int fragment,
503 int create, int *err)
505 struct buffer_head dummy;
506 int error;
508 dummy.b_state = 0;
509 dummy.b_blocknr = -1000;
510 error = ufs_getfrag_block(inode, fragment, &dummy, create);
511 *err = error;
512 if (!error && buffer_mapped(&dummy)) {
513 struct buffer_head *bh;
514 bh = getblk(dummy.b_dev, dummy.b_blocknr, inode->i_sb->s_blocksize);
515 if (buffer_new(&dummy)) {
516 memset(bh->b_data, 0, inode->i_sb->s_blocksize);
517 mark_buffer_uptodate(bh, 1);
518 mark_buffer_dirty(bh, 1);
520 return bh;
522 return NULL;
525 struct buffer_head * ufs_bread (struct inode * inode, unsigned fragment,
526 int create, int * err)
528 struct buffer_head * bh;
530 UFSD(("ENTER, ino %lu, fragment %u\n", inode->i_ino, fragment))
531 bh = ufs_getfrag (inode, fragment, create, err);
532 if (!bh || buffer_uptodate(bh))
533 return bh;
534 ll_rw_block (READ, 1, &bh);
535 wait_on_buffer (bh);
536 if (buffer_uptodate(bh))
537 return bh;
538 brelse (bh);
539 *err = -EIO;
540 return NULL;
543 void ufs_read_inode (struct inode * inode)
545 struct super_block * sb;
546 struct ufs_sb_private_info * uspi;
547 struct ufs_inode * ufs_inode;
548 struct buffer_head * bh;
549 unsigned i;
550 unsigned flags, swab;
552 UFSD(("ENTER, ino %lu\n", inode->i_ino))
554 sb = inode->i_sb;
555 uspi = sb->u.ufs_sb.s_uspi;
556 flags = sb->u.ufs_sb.s_flags;
557 swab = sb->u.ufs_sb.s_swab;
559 if (inode->i_ino < UFS_ROOTINO ||
560 inode->i_ino > (uspi->s_ncg * uspi->s_ipg)) {
561 ufs_warning (sb, "ufs_read_inode", "bad inode number (%lu)\n", inode->i_ino);
562 return;
565 bh = bread (sb->s_dev, uspi->s_sbbase + ufs_inotofsba(inode->i_ino), sb->s_blocksize);
566 if (!bh) {
567 ufs_warning (sb, "ufs_read_inode", "unable to read inode %lu\n", inode->i_ino);
568 return;
570 ufs_inode = (struct ufs_inode *) (bh->b_data + sizeof(struct ufs_inode) * ufs_inotofsbo(inode->i_ino));
573 * Copy data to the in-core inode.
575 inode->i_mode = SWAB16(ufs_inode->ui_mode);
576 inode->i_nlink = SWAB16(ufs_inode->ui_nlink);
577 if (inode->i_nlink == 0)
578 ufs_error (sb, "ufs_read_inode", "inode %lu has zero nlink\n", inode->i_ino);
581 * Linux has only 16-bit uid and gid, so we can't support EFT.
582 * Files are dynamically chown()ed to root.
584 inode->i_uid = inode->u.ufs_i.i_uid = ufs_get_inode_uid(ufs_inode);
585 inode->i_gid = inode->u.ufs_i.i_gid = ufs_get_inode_gid(ufs_inode);
586 if (inode->u.ufs_i.i_uid >= UFS_USEEFT) {
587 inode->i_uid = 0;
589 if (inode->u.ufs_i.i_gid >= UFS_USEEFT) {
590 inode->i_gid = 0;
594 * Linux i_size can be 32 on some architectures. We will mark
595 * big files as read only and let user access first 32 bits.
597 inode->u.ufs_i.i_size = SWAB64(ufs_inode->ui_size);
598 inode->i_size = (off_t) inode->u.ufs_i.i_size;
599 if (sizeof(off_t) == 4 && (inode->u.ufs_i.i_size >> 32))
600 inode->i_size = (__u32)-1;
602 inode->i_atime = SWAB32(ufs_inode->ui_atime.tv_sec);
603 inode->i_ctime = SWAB32(ufs_inode->ui_ctime.tv_sec);
604 inode->i_mtime = SWAB32(ufs_inode->ui_mtime.tv_sec);
605 inode->i_blocks = SWAB32(ufs_inode->ui_blocks);
606 inode->i_blksize = PAGE_SIZE; /* This is the optimal IO size (for stat) */
607 inode->i_version = ++event;
609 inode->u.ufs_i.i_flags = SWAB32(ufs_inode->ui_flags);
610 inode->u.ufs_i.i_gen = SWAB32(ufs_inode->ui_gen);
611 inode->u.ufs_i.i_shadow = SWAB32(ufs_inode->ui_u3.ui_sun.ui_shadow);
612 inode->u.ufs_i.i_oeftflag = SWAB32(ufs_inode->ui_u3.ui_sun.ui_oeftflag);
613 inode->u.ufs_i.i_lastfrag = howmany (inode->i_size, uspi->s_fsize);
615 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
617 else if (inode->i_blocks) {
618 for (i = 0; i < (UFS_NDADDR + UFS_NINDIR); i++)
619 inode->u.ufs_i.i_u1.i_data[i] = ufs_inode->ui_u2.ui_addr.ui_db[i];
621 else {
622 for (i = 0; i < (UFS_NDADDR + UFS_NINDIR) * 4; i++)
623 inode->u.ufs_i.i_u1.i_symlink[i] = ufs_inode->ui_u2.ui_symlink[i];
627 inode->i_op = NULL;
629 if (S_ISREG(inode->i_mode))
630 inode->i_op = &ufs_file_inode_operations;
631 else if (S_ISDIR(inode->i_mode))
632 inode->i_op = &ufs_dir_inode_operations;
633 else if (S_ISLNK(inode->i_mode))
634 inode->i_op = &ufs_symlink_inode_operations;
635 else
636 init_special_inode(inode, inode->i_mode,
637 SWAB32(ufs_inode->ui_u2.ui_addr.ui_db[0]));
639 brelse (bh);
641 #ifdef UFS_INODE_DEBUG_MORE
642 ufs_print_inode (inode);
643 #endif
644 UFSD(("EXIT\n"))
647 static int ufs_update_inode(struct inode * inode, int do_sync)
649 struct super_block * sb;
650 struct ufs_sb_private_info * uspi;
651 struct buffer_head * bh;
652 struct ufs_inode * ufs_inode;
653 unsigned i;
654 unsigned flags, swab;
656 UFSD(("ENTER, ino %lu\n", inode->i_ino))
658 sb = inode->i_sb;
659 uspi = sb->u.ufs_sb.s_uspi;
660 flags = sb->u.ufs_sb.s_flags;
661 swab = sb->u.ufs_sb.s_swab;
663 if (inode->i_ino < UFS_ROOTINO ||
664 inode->i_ino > (uspi->s_ncg * uspi->s_ipg)) {
665 ufs_warning (sb, "ufs_read_inode", "bad inode number (%lu)\n", inode->i_ino);
666 return -1;
669 bh = bread (sb->s_dev, ufs_inotofsba(inode->i_ino), sb->s_blocksize);
670 if (!bh) {
671 ufs_warning (sb, "ufs_read_inode", "unable to read inode %lu\n", inode->i_ino);
672 return -1;
674 ufs_inode = (struct ufs_inode *) (bh->b_data + ufs_inotofsbo(inode->i_ino) * sizeof(struct ufs_inode));
676 ufs_inode->ui_mode = SWAB16(inode->i_mode);
677 ufs_inode->ui_nlink = SWAB16(inode->i_nlink);
679 if (inode->i_uid == 0 && inode->u.ufs_i.i_uid >= UFS_USEEFT)
680 ufs_set_inode_uid (ufs_inode, inode->u.ufs_i.i_uid);
681 else
682 ufs_set_inode_uid (ufs_inode, inode->i_uid);
684 if (inode->i_gid == 0 && inode->u.ufs_i.i_gid >= UFS_USEEFT)
685 ufs_set_inode_gid (ufs_inode, inode->u.ufs_i.i_gid);
686 else
687 ufs_set_inode_gid (ufs_inode, inode->i_gid);
689 ufs_inode->ui_size = SWAB64((u64)inode->i_size);
690 ufs_inode->ui_atime.tv_sec = SWAB32(inode->i_atime);
691 ufs_inode->ui_atime.tv_usec = SWAB32(0);
692 ufs_inode->ui_ctime.tv_sec = SWAB32(inode->i_ctime);
693 ufs_inode->ui_ctime.tv_usec = SWAB32(0);
694 ufs_inode->ui_mtime.tv_sec = SWAB32(inode->i_mtime);
695 ufs_inode->ui_mtime.tv_usec = SWAB32(0);
696 ufs_inode->ui_blocks = SWAB32(inode->i_blocks);
697 ufs_inode->ui_flags = SWAB32(inode->u.ufs_i.i_flags);
698 ufs_inode->ui_gen = SWAB32(inode->u.ufs_i.i_gen);
700 if ((flags & UFS_UID_MASK) == UFS_UID_EFT) {
701 ufs_inode->ui_u3.ui_sun.ui_shadow = SWAB32(inode->u.ufs_i.i_shadow);
702 ufs_inode->ui_u3.ui_sun.ui_oeftflag = SWAB32(inode->u.ufs_i.i_oeftflag);
705 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
706 ufs_inode->ui_u2.ui_addr.ui_db[0] = SWAB32(kdev_t_to_nr(inode->i_rdev));
707 else if (inode->i_blocks) {
708 for (i = 0; i < (UFS_NDADDR + UFS_NINDIR); i++)
709 ufs_inode->ui_u2.ui_addr.ui_db[i] = inode->u.ufs_i.i_u1.i_data[i];
711 else {
712 for (i = 0; i < (UFS_NDADDR + UFS_NINDIR) * 4; i++)
713 ufs_inode->ui_u2.ui_symlink[i] = inode->u.ufs_i.i_u1.i_symlink[i];
716 if (!inode->i_nlink)
717 memset (ufs_inode, 0, sizeof(struct ufs_inode));
719 mark_buffer_dirty(bh, 1);
720 if (do_sync) {
721 ll_rw_block (WRITE, 1, &bh);
722 wait_on_buffer (bh);
724 brelse (bh);
726 UFSD(("EXIT\n"))
727 return 0;
730 void ufs_write_inode (struct inode * inode)
732 ufs_update_inode (inode, 0);
735 int ufs_sync_inode (struct inode *inode)
737 return ufs_update_inode (inode, 1);
740 void ufs_put_inode (struct inode * inode)
742 UFSD(("ENTER & EXIT\n"))
745 void ufs_delete_inode (struct inode * inode)
747 /*inode->u.ufs_i.i_dtime = CURRENT_TIME;*/
748 mark_inode_dirty(inode);
749 ufs_update_inode(inode, IS_SYNC(inode));
750 inode->i_size = 0;
751 if (inode->i_blocks)
752 ufs_truncate (inode);
753 ufs_free_inode (inode);