5 * Daniel Pirkl <daniel.pirkl@email.cz>
6 * Charles University, Faculty of Mathematics and Physics
10 * linux/fs/ext2/inode.c
12 * Copyright (C) 1992, 1993, 1994, 1995
13 * Remy Card (card@masi.ibp.fr)
14 * Laboratoire MASI - Institut Blaise Pascal
15 * Universite Pierre et Marie Curie (Paris VI)
19 * linux/fs/minix/inode.c
21 * Copyright (C) 1991, 1992 Linus Torvalds
23 * Goal-directed block allocation by Stephen Tweedie (sct@dcs.ed.ac.uk), 1993
24 * Big-endian to little-endian byte-swapping/bitmaps by
25 * David S. Miller (davem@caip.rutgers.edu), 1995
28 #include <asm/uaccess.h>
29 #include <asm/system.h>
31 #include <linux/errno.h>
33 #include <linux/ufs_fs.h>
34 #include <linux/sched.h>
35 #include <linux/stat.h>
36 #include <linux/string.h>
37 #include <linux/locks.h>
39 #include <linux/smp_lock.h>
44 #undef UFS_INODE_DEBUG
45 #undef UFS_INODE_DEBUG_MORE
47 #ifdef UFS_INODE_DEBUG
48 #define UFSD(x) printk("(%s, %d), %s: ", __FILE__, __LINE__, __FUNCTION__); printk x;
53 #ifdef UFS_INODE_DEBUG_MORE
54 static void ufs_print_inode(struct inode
* inode
)
56 unsigned swab
= inode
->i_sb
->u
.ufs_sb
.s_swab
;
57 printk("ino %lu mode 0%6.6o nlink %d uid %d uid32 %u"
58 " gid %d gid32 %u size %lu blocks %lu\n",
59 inode
->i_ino
, inode
->i_mode
, inode
->i_nlink
,
60 inode
->i_uid
, inode
->u
.ufs_i
.i_uid
, inode
->i_gid
,
61 inode
->u
.ufs_i
.i_gid
, inode
->i_size
, inode
->i_blocks
);
62 printk(" db <%u %u %u %u %u %u %u %u %u %u %u %u>\n",
63 SWAB32(inode
->u
.ufs_i
.i_u1
.i_data
[0]),
64 SWAB32(inode
->u
.ufs_i
.i_u1
.i_data
[1]),
65 SWAB32(inode
->u
.ufs_i
.i_u1
.i_data
[2]),
66 SWAB32(inode
->u
.ufs_i
.i_u1
.i_data
[3]),
67 SWAB32(inode
->u
.ufs_i
.i_u1
.i_data
[4]),
68 SWAB32(inode
->u
.ufs_i
.i_u1
.i_data
[5]),
69 SWAB32(inode
->u
.ufs_i
.i_u1
.i_data
[6]),
70 SWAB32(inode
->u
.ufs_i
.i_u1
.i_data
[7]),
71 SWAB32(inode
->u
.ufs_i
.i_u1
.i_data
[8]),
72 SWAB32(inode
->u
.ufs_i
.i_u1
.i_data
[9]),
73 SWAB32(inode
->u
.ufs_i
.i_u1
.i_data
[10]),
74 SWAB32(inode
->u
.ufs_i
.i_u1
.i_data
[11]));
75 printk(" gen %u ib <%u %u %u>\n",
77 SWAB32(inode
->u
.ufs_i
.i_u1
.i_data
[UFS_IND_BLOCK
]),
78 SWAB32(inode
->u
.ufs_i
.i_u1
.i_data
[UFS_DIND_BLOCK
]),
79 SWAB32(inode
->u
.ufs_i
.i_u1
.i_data
[UFS_TIND_BLOCK
]));
83 #define ufs_inode_bmap(inode, nr) \
84 (SWAB32((inode)->u.ufs_i.i_u1.i_data[(nr) >> uspi->s_fpbshift]) + ((nr) & uspi->s_fpbmask))
86 static inline unsigned int ufs_block_bmap (struct buffer_head
* bh
, unsigned nr
,
87 struct ufs_sb_private_info
* uspi
, unsigned swab
)
91 UFSD(("ENTER, nr %u\n", nr
))
94 tmp
= SWAB32(((u32
*) bh
->b_data
)[nr
>> uspi
->s_fpbshift
]) + (nr
& uspi
->s_fpbmask
);
96 UFSD(("EXIT, result %u\n", tmp
))
100 int ufs_frag_map(struct inode
*inode
, int frag
)
102 struct super_block
*sb
;
103 struct ufs_sb_private_info
*uspi
;
111 uspi
= sb
->u
.ufs_sb
.s_uspi
;
112 swab
= sb
->u
.ufs_sb
.s_swab
;
114 ufs_warning(sb
, "ufs_frag_map", "frag < 0");
118 ((UFS_NDADDR
+ uspi
->s_apb
+ uspi
->s_2apb
+ uspi
->s_3apb
)
119 << uspi
->s_fpbshift
)) {
120 ufs_warning(sb
, "ufs_frag_map", "frag > big");
124 if (frag
< UFS_NDIR_FRAGMENT
) {
125 ret
= uspi
->s_sbbase
+ ufs_inode_bmap(inode
, frag
);
129 frag
-= UFS_NDIR_FRAGMENT
;
130 if (frag
< (1 << (uspi
->s_apbshift
+ uspi
->s_fpbshift
))) {
131 i
= ufs_inode_bmap(inode
,
132 UFS_IND_FRAGMENT
+ (frag
>> uspi
->s_apbshift
));
135 ret
= (uspi
->s_sbbase
+
136 ufs_block_bmap(bread(sb
->s_dev
, uspi
->s_sbbase
+ i
,
138 frag
& uspi
->s_apbmask
, uspi
, swab
));
140 frag
-= 1 << (uspi
->s_apbshift
+ uspi
->s_fpbshift
);
141 if (frag
< (1 << (uspi
->s_2apbshift
+ uspi
->s_fpbshift
))) {
142 i
= ufs_inode_bmap (inode
,
143 UFS_DIND_FRAGMENT
+ (frag
>> uspi
->s_2apbshift
));
146 i
= ufs_block_bmap(bread(sb
->s_dev
, uspi
->s_sbbase
+ i
,
148 (frag
>> uspi
->s_apbshift
) & uspi
->s_apbmask
,
152 ret
= (uspi
->s_sbbase
+
153 ufs_block_bmap(bread(sb
->s_dev
, uspi
->s_sbbase
+ i
,
155 (frag
& uspi
->s_apbmask
), uspi
, swab
));
158 frag
-= 1 << (uspi
->s_2apbshift
+ uspi
->s_fpbshift
);
159 i
= ufs_inode_bmap(inode
,
160 UFS_TIND_FRAGMENT
+ (frag
>> uspi
->s_3apbshift
));
163 i
= ufs_block_bmap(bread(sb
->s_dev
, uspi
->s_sbbase
+ i
, sb
->s_blocksize
),
164 (frag
>> uspi
->s_2apbshift
) & uspi
->s_apbmask
,
168 i
= ufs_block_bmap(bread(sb
->s_dev
, uspi
->s_sbbase
+ i
, sb
->s_blocksize
),
169 (frag
>> uspi
->s_apbshift
) & uspi
->s_apbmask
,
173 ret
= (uspi
->s_sbbase
+
174 ufs_block_bmap(bread(sb
->s_dev
, uspi
->s_sbbase
+ i
, sb
->s_blocksize
),
175 (frag
& uspi
->s_apbmask
), uspi
, swab
));
181 static struct buffer_head
* ufs_inode_getfrag (struct inode
*inode
,
182 unsigned int fragment
, unsigned int new_fragment
,
183 unsigned int required
, int *err
, int metadata
, long *phys
, int *new)
185 struct super_block
* sb
;
186 struct ufs_sb_private_info
* uspi
;
187 struct buffer_head
* result
;
189 unsigned block
, blockoff
, lastfrag
, lastblock
, lastblockoff
;
194 UFSD(("ENTER, ino %lu, fragment %u, new_fragment %u, required %u\n",
195 inode
->i_ino
, fragment
, new_fragment
, required
))
198 swab
= sb
->u
.ufs_sb
.s_swab
;
199 uspi
= sb
->u
.ufs_sb
.s_uspi
;
200 block
= ufs_fragstoblks (fragment
);
201 blockoff
= ufs_fragnum (fragment
);
202 p
= inode
->u
.ufs_i
.i_u1
.i_data
+ block
;
207 lastfrag
= inode
->u
.ufs_i
.i_lastfrag
;
208 if (tmp
&& fragment
< lastfrag
) {
210 result
= getblk (sb
->s_dev
, uspi
->s_sbbase
+ tmp
+ blockoff
,
212 if (tmp
== SWAB32(*p
)) {
213 UFSD(("EXIT, result %u\n", tmp
+ blockoff
))
225 limit
= current
->rlim
[RLIMIT_FSIZE
].rlim_cur
;
226 if (limit
< RLIM_INFINITY
) {
227 limit
>>= sb
->s_blocksize_bits
;
228 if (new_fragment
>= limit
) {
229 send_sig(SIGXFSZ
, current
, 0);
234 lastblock
= ufs_fragstoblks (lastfrag
);
235 lastblockoff
= ufs_fragnum (lastfrag
);
237 * We will extend file into new block beyond last allocated block
239 if (lastblock
< block
) {
241 * We must reallocate last allocated block
244 p2
= inode
->u
.ufs_i
.i_u1
.i_data
+ lastblock
;
245 tmp
= ufs_new_fragments (inode
, p2
, lastfrag
,
246 SWAB32(*p2
), uspi
->s_fpb
- lastblockoff
, err
);
248 if (lastfrag
!= inode
->u
.ufs_i
.i_lastfrag
)
253 lastfrag
= inode
->u
.ufs_i
.i_lastfrag
;
256 goal
= SWAB32(inode
->u
.ufs_i
.i_u1
.i_data
[lastblock
]) + uspi
->s_fpb
;
257 tmp
= ufs_new_fragments (inode
, p
, fragment
- blockoff
,
258 goal
, required
+ blockoff
, err
);
261 * We will extend last allocated block
263 else if (lastblock
== block
) {
264 tmp
= ufs_new_fragments (inode
, p
, fragment
- (blockoff
- lastblockoff
),
265 SWAB32(*p
), required
+ (blockoff
- lastblockoff
), err
);
268 * We will allocate new block before last allocated block
270 else /* (lastblock > block) */ {
271 if (lastblock
&& (tmp
= SWAB32(inode
->u
.ufs_i
.i_u1
.i_data
[lastblock
-1])))
272 goal
= tmp
+ uspi
->s_fpb
;
273 tmp
= ufs_new_fragments (inode
, p
, fragment
- blockoff
,
274 goal
, uspi
->s_fpb
, err
);
277 if ((!blockoff
&& SWAB32(*p
)) ||
278 (blockoff
&& lastfrag
!= inode
->u
.ufs_i
.i_lastfrag
))
284 /* The nullification of framgents done in ufs/balloc.c is
285 * something I don't have the stomache to move into here right
289 result
= getblk (inode
->i_dev
, tmp
+ blockoff
, sb
->s_blocksize
);
297 inode
->i_ctime
= CURRENT_TIME
;
299 ufs_sync_inode (inode
);
300 mark_inode_dirty(inode
);
301 UFSD(("EXIT, result %u\n", tmp
+ blockoff
))
305 static struct buffer_head
* ufs_block_getfrag (struct inode
*inode
,
306 struct buffer_head
*bh
, unsigned int fragment
, unsigned int new_fragment
,
307 unsigned int blocksize
, int * err
, int metadata
, long *phys
, int *new)
309 struct super_block
* sb
;
310 struct ufs_sb_private_info
* uspi
;
311 struct buffer_head
* result
;
312 unsigned tmp
, goal
, block
, blockoff
;
317 swab
= sb
->u
.ufs_sb
.s_swab
;
318 uspi
= sb
->u
.ufs_sb
.s_uspi
;
319 block
= ufs_fragstoblks (fragment
);
320 blockoff
= ufs_fragnum (fragment
);
322 UFSD(("ENTER, ino %lu, fragment %u, new_fragment %u\n", inode
->i_ino
, fragment
, new_fragment
))
327 if (!buffer_uptodate(bh
)) {
328 ll_rw_block (READ
, 1, &bh
);
330 if (!buffer_uptodate(bh
))
334 p
= (u32
*) bh
->b_data
+ block
;
339 result
= getblk (bh
->b_dev
, uspi
->s_sbbase
+ tmp
+ blockoff
,
341 if (tmp
== SWAB32(*p
))
353 unsigned long limit
= current
->rlim
[RLIMIT_FSIZE
].rlim_cur
;
354 if (limit
< RLIM_INFINITY
) {
355 limit
>>= sb
->s_blocksize_bits
;
356 if (new_fragment
>= limit
) {
358 send_sig(SIGXFSZ
, current
, 0);
363 if (block
&& (tmp
= SWAB32(((u32
*)bh
->b_data
)[block
-1]) + uspi
->s_fpb
))
364 goal
= tmp
+ uspi
->s_fpb
;
366 goal
= bh
->b_blocknr
+ uspi
->s_fpb
;
367 tmp
= ufs_new_fragments (inode
, p
, ufs_blknum(new_fragment
), goal
, uspi
->s_fpb
, err
);
374 /* The nullification of framgents done in ufs/balloc.c is
375 * something I don't have the stomache to move into here right
379 result
= getblk (bh
->b_dev
, tmp
+ blockoff
, sb
->s_blocksize
);
385 mark_buffer_dirty(bh
, 1);
386 if (IS_SYNC(inode
)) {
387 ll_rw_block (WRITE
, 1, &bh
);
390 inode
->i_ctime
= CURRENT_TIME
;
391 mark_inode_dirty(inode
);
394 UFSD(("EXIT, result %u\n", tmp
+ blockoff
))
398 int ufs_getfrag_block (struct inode
*inode
, long fragment
, struct buffer_head
*bh_result
, int create
)
400 struct super_block
* sb
;
401 struct ufs_sb_private_info
* uspi
;
402 struct buffer_head
* bh
;
405 unsigned long ptr
, phys
;
408 uspi
= sb
->u
.ufs_sb
.s_uspi
;
409 swab
= sb
->u
.ufs_sb
.s_swab
;
412 phys
= ufs_frag_map(inode
, fragment
);
414 bh_result
->b_dev
= inode
->i_dev
;
415 bh_result
->b_blocknr
= phys
;
416 bh_result
->b_state
|= (1UL << BH_Mapped
);
428 UFSD(("ENTER, ino %lu, fragment %u\n", inode
->i_ino
, fragment
))
432 ((UFS_NDADDR
+ uspi
->s_apb
+ uspi
->s_2apb
+ uspi
->s_3apb
)
433 << uspi
->s_fpbshift
))
440 * ok, these macros clean the logic up a bit and make
441 * it much more readable:
443 #define GET_INODE_DATABLOCK(x) \
444 ufs_inode_getfrag(inode, x, fragment, 1, &err, 0, &phys, &new)
445 #define GET_INODE_PTR(x) \
446 ufs_inode_getfrag(inode, x, fragment, uspi->s_fpb, &err, 1, NULL, NULL)
447 #define GET_INDIRECT_DATABLOCK(x) \
448 ufs_block_getfrag(inode, bh, x, fragment, sb->s_blocksize, \
449 &err, 0, &phys, &new);
450 #define GET_INDIRECT_PTR(x) \
451 ufs_block_getfrag(inode, bh, x, fragment, sb->s_blocksize, \
452 &err, 1, NULL, NULL);
454 if (ptr
< UFS_NDIR_FRAGMENT
) {
455 bh
= GET_INODE_DATABLOCK(ptr
);
458 ptr
-= UFS_NDIR_FRAGMENT
;
459 if (ptr
< (1 << (uspi
->s_apbshift
+ uspi
->s_fpbshift
))) {
460 bh
= GET_INODE_PTR(UFS_IND_FRAGMENT
+ (ptr
>> uspi
->s_apbshift
));
463 ptr
-= 1 << (uspi
->s_apbshift
+ uspi
->s_fpbshift
);
464 if (ptr
< (1 << (uspi
->s_2apbshift
+ uspi
->s_fpbshift
))) {
465 bh
= GET_INODE_PTR(UFS_DIND_FRAGMENT
+ (ptr
>> uspi
->s_2apbshift
));
468 ptr
-= 1 << (uspi
->s_2apbshift
+ uspi
->s_fpbshift
);
469 bh
= GET_INODE_PTR(UFS_TIND_FRAGMENT
+ (ptr
>> uspi
->s_3apbshift
));
470 bh
= GET_INDIRECT_PTR((ptr
>> uspi
->s_2apbshift
) & uspi
->s_apbmask
);
472 bh
= GET_INDIRECT_PTR((ptr
>> uspi
->s_apbshift
) & uspi
->s_apbmask
);
474 bh
= GET_INDIRECT_DATABLOCK(ptr
& uspi
->s_apbmask
);
476 #undef GET_INODE_DATABLOCK
478 #undef GET_INDIRECT_DATABLOCK
479 #undef GET_INDIRECT_PTR
484 bh_result
->b_dev
= inode
->i_dev
;
485 bh_result
->b_blocknr
= phys
;
486 bh_result
->b_state
|= (1UL << BH_Mapped
);
488 bh_result
->b_state
|= (1UL << BH_New
);
494 ufs_warning(sb
, "ufs_get_block", "block < 0");
498 ufs_warning(sb
, "ufs_get_block", "block > big");
502 struct buffer_head
*ufs_getfrag(struct inode
*inode
, unsigned int fragment
,
503 int create
, int *err
)
505 struct buffer_head dummy
;
509 dummy
.b_blocknr
= -1000;
510 error
= ufs_getfrag_block(inode
, fragment
, &dummy
, create
);
512 if (!error
&& buffer_mapped(&dummy
)) {
513 struct buffer_head
*bh
;
514 bh
= getblk(dummy
.b_dev
, dummy
.b_blocknr
, inode
->i_sb
->s_blocksize
);
515 if (buffer_new(&dummy
)) {
516 memset(bh
->b_data
, 0, inode
->i_sb
->s_blocksize
);
517 mark_buffer_uptodate(bh
, 1);
518 mark_buffer_dirty(bh
, 1);
525 struct buffer_head
* ufs_bread (struct inode
* inode
, unsigned fragment
,
526 int create
, int * err
)
528 struct buffer_head
* bh
;
530 UFSD(("ENTER, ino %lu, fragment %u\n", inode
->i_ino
, fragment
))
531 bh
= ufs_getfrag (inode
, fragment
, create
, err
);
532 if (!bh
|| buffer_uptodate(bh
))
534 ll_rw_block (READ
, 1, &bh
);
536 if (buffer_uptodate(bh
))
543 void ufs_read_inode (struct inode
* inode
)
545 struct super_block
* sb
;
546 struct ufs_sb_private_info
* uspi
;
547 struct ufs_inode
* ufs_inode
;
548 struct buffer_head
* bh
;
550 unsigned flags
, swab
;
552 UFSD(("ENTER, ino %lu\n", inode
->i_ino
))
555 uspi
= sb
->u
.ufs_sb
.s_uspi
;
556 flags
= sb
->u
.ufs_sb
.s_flags
;
557 swab
= sb
->u
.ufs_sb
.s_swab
;
559 if (inode
->i_ino
< UFS_ROOTINO
||
560 inode
->i_ino
> (uspi
->s_ncg
* uspi
->s_ipg
)) {
561 ufs_warning (sb
, "ufs_read_inode", "bad inode number (%lu)\n", inode
->i_ino
);
565 bh
= bread (sb
->s_dev
, uspi
->s_sbbase
+ ufs_inotofsba(inode
->i_ino
), sb
->s_blocksize
);
567 ufs_warning (sb
, "ufs_read_inode", "unable to read inode %lu\n", inode
->i_ino
);
570 ufs_inode
= (struct ufs_inode
*) (bh
->b_data
+ sizeof(struct ufs_inode
) * ufs_inotofsbo(inode
->i_ino
));
573 * Copy data to the in-core inode.
575 inode
->i_mode
= SWAB16(ufs_inode
->ui_mode
);
576 inode
->i_nlink
= SWAB16(ufs_inode
->ui_nlink
);
577 if (inode
->i_nlink
== 0)
578 ufs_error (sb
, "ufs_read_inode", "inode %lu has zero nlink\n", inode
->i_ino
);
581 * Linux has only 16-bit uid and gid, so we can't support EFT.
582 * Files are dynamically chown()ed to root.
584 inode
->i_uid
= inode
->u
.ufs_i
.i_uid
= ufs_get_inode_uid(ufs_inode
);
585 inode
->i_gid
= inode
->u
.ufs_i
.i_gid
= ufs_get_inode_gid(ufs_inode
);
586 if (inode
->u
.ufs_i
.i_uid
>= UFS_USEEFT
) {
589 if (inode
->u
.ufs_i
.i_gid
>= UFS_USEEFT
) {
594 * Linux i_size can be 32 on some architectures. We will mark
595 * big files as read only and let user access first 32 bits.
597 inode
->u
.ufs_i
.i_size
= SWAB64(ufs_inode
->ui_size
);
598 inode
->i_size
= (off_t
) inode
->u
.ufs_i
.i_size
;
599 if (sizeof(off_t
) == 4 && (inode
->u
.ufs_i
.i_size
>> 32))
600 inode
->i_size
= (__u32
)-1;
602 inode
->i_atime
= SWAB32(ufs_inode
->ui_atime
.tv_sec
);
603 inode
->i_ctime
= SWAB32(ufs_inode
->ui_ctime
.tv_sec
);
604 inode
->i_mtime
= SWAB32(ufs_inode
->ui_mtime
.tv_sec
);
605 inode
->i_blocks
= SWAB32(ufs_inode
->ui_blocks
);
606 inode
->i_blksize
= PAGE_SIZE
; /* This is the optimal IO size (for stat) */
607 inode
->i_version
= ++event
;
609 inode
->u
.ufs_i
.i_flags
= SWAB32(ufs_inode
->ui_flags
);
610 inode
->u
.ufs_i
.i_gen
= SWAB32(ufs_inode
->ui_gen
);
611 inode
->u
.ufs_i
.i_shadow
= SWAB32(ufs_inode
->ui_u3
.ui_sun
.ui_shadow
);
612 inode
->u
.ufs_i
.i_oeftflag
= SWAB32(ufs_inode
->ui_u3
.ui_sun
.ui_oeftflag
);
613 inode
->u
.ufs_i
.i_lastfrag
= howmany (inode
->i_size
, uspi
->s_fsize
);
615 if (S_ISCHR(inode
->i_mode
) || S_ISBLK(inode
->i_mode
))
617 else if (inode
->i_blocks
) {
618 for (i
= 0; i
< (UFS_NDADDR
+ UFS_NINDIR
); i
++)
619 inode
->u
.ufs_i
.i_u1
.i_data
[i
] = ufs_inode
->ui_u2
.ui_addr
.ui_db
[i
];
622 for (i
= 0; i
< (UFS_NDADDR
+ UFS_NINDIR
) * 4; i
++)
623 inode
->u
.ufs_i
.i_u1
.i_symlink
[i
] = ufs_inode
->ui_u2
.ui_symlink
[i
];
629 if (S_ISREG(inode
->i_mode
))
630 inode
->i_op
= &ufs_file_inode_operations
;
631 else if (S_ISDIR(inode
->i_mode
))
632 inode
->i_op
= &ufs_dir_inode_operations
;
633 else if (S_ISLNK(inode
->i_mode
))
634 inode
->i_op
= &ufs_symlink_inode_operations
;
636 init_special_inode(inode
, inode
->i_mode
,
637 SWAB32(ufs_inode
->ui_u2
.ui_addr
.ui_db
[0]));
641 #ifdef UFS_INODE_DEBUG_MORE
642 ufs_print_inode (inode
);
647 static int ufs_update_inode(struct inode
* inode
, int do_sync
)
649 struct super_block
* sb
;
650 struct ufs_sb_private_info
* uspi
;
651 struct buffer_head
* bh
;
652 struct ufs_inode
* ufs_inode
;
654 unsigned flags
, swab
;
656 UFSD(("ENTER, ino %lu\n", inode
->i_ino
))
659 uspi
= sb
->u
.ufs_sb
.s_uspi
;
660 flags
= sb
->u
.ufs_sb
.s_flags
;
661 swab
= sb
->u
.ufs_sb
.s_swab
;
663 if (inode
->i_ino
< UFS_ROOTINO
||
664 inode
->i_ino
> (uspi
->s_ncg
* uspi
->s_ipg
)) {
665 ufs_warning (sb
, "ufs_read_inode", "bad inode number (%lu)\n", inode
->i_ino
);
669 bh
= bread (sb
->s_dev
, ufs_inotofsba(inode
->i_ino
), sb
->s_blocksize
);
671 ufs_warning (sb
, "ufs_read_inode", "unable to read inode %lu\n", inode
->i_ino
);
674 ufs_inode
= (struct ufs_inode
*) (bh
->b_data
+ ufs_inotofsbo(inode
->i_ino
) * sizeof(struct ufs_inode
));
676 ufs_inode
->ui_mode
= SWAB16(inode
->i_mode
);
677 ufs_inode
->ui_nlink
= SWAB16(inode
->i_nlink
);
679 if (inode
->i_uid
== 0 && inode
->u
.ufs_i
.i_uid
>= UFS_USEEFT
)
680 ufs_set_inode_uid (ufs_inode
, inode
->u
.ufs_i
.i_uid
);
682 ufs_set_inode_uid (ufs_inode
, inode
->i_uid
);
684 if (inode
->i_gid
== 0 && inode
->u
.ufs_i
.i_gid
>= UFS_USEEFT
)
685 ufs_set_inode_gid (ufs_inode
, inode
->u
.ufs_i
.i_gid
);
687 ufs_set_inode_gid (ufs_inode
, inode
->i_gid
);
689 ufs_inode
->ui_size
= SWAB64((u64
)inode
->i_size
);
690 ufs_inode
->ui_atime
.tv_sec
= SWAB32(inode
->i_atime
);
691 ufs_inode
->ui_atime
.tv_usec
= SWAB32(0);
692 ufs_inode
->ui_ctime
.tv_sec
= SWAB32(inode
->i_ctime
);
693 ufs_inode
->ui_ctime
.tv_usec
= SWAB32(0);
694 ufs_inode
->ui_mtime
.tv_sec
= SWAB32(inode
->i_mtime
);
695 ufs_inode
->ui_mtime
.tv_usec
= SWAB32(0);
696 ufs_inode
->ui_blocks
= SWAB32(inode
->i_blocks
);
697 ufs_inode
->ui_flags
= SWAB32(inode
->u
.ufs_i
.i_flags
);
698 ufs_inode
->ui_gen
= SWAB32(inode
->u
.ufs_i
.i_gen
);
700 if ((flags
& UFS_UID_MASK
) == UFS_UID_EFT
) {
701 ufs_inode
->ui_u3
.ui_sun
.ui_shadow
= SWAB32(inode
->u
.ufs_i
.i_shadow
);
702 ufs_inode
->ui_u3
.ui_sun
.ui_oeftflag
= SWAB32(inode
->u
.ufs_i
.i_oeftflag
);
705 if (S_ISCHR(inode
->i_mode
) || S_ISBLK(inode
->i_mode
))
706 ufs_inode
->ui_u2
.ui_addr
.ui_db
[0] = SWAB32(kdev_t_to_nr(inode
->i_rdev
));
707 else if (inode
->i_blocks
) {
708 for (i
= 0; i
< (UFS_NDADDR
+ UFS_NINDIR
); i
++)
709 ufs_inode
->ui_u2
.ui_addr
.ui_db
[i
] = inode
->u
.ufs_i
.i_u1
.i_data
[i
];
712 for (i
= 0; i
< (UFS_NDADDR
+ UFS_NINDIR
) * 4; i
++)
713 ufs_inode
->ui_u2
.ui_symlink
[i
] = inode
->u
.ufs_i
.i_u1
.i_symlink
[i
];
717 memset (ufs_inode
, 0, sizeof(struct ufs_inode
));
719 mark_buffer_dirty(bh
, 1);
721 ll_rw_block (WRITE
, 1, &bh
);
730 void ufs_write_inode (struct inode
* inode
)
732 ufs_update_inode (inode
, 0);
735 int ufs_sync_inode (struct inode
*inode
)
737 return ufs_update_inode (inode
, 1);
740 void ufs_put_inode (struct inode
* inode
)
742 UFSD(("ENTER & EXIT\n"))
745 void ufs_delete_inode (struct inode
* inode
)
747 /*inode->u.ufs_i.i_dtime = CURRENT_TIME;*/
748 mark_inode_dirty(inode
);
749 ufs_update_inode(inode
, IS_SYNC(inode
));
752 ufs_truncate (inode
);
753 ufs_free_inode (inode
);