5 * Inode handling routines for the OSTA-UDF(tm) filesystem.
8 * This file is distributed under the terms of the GNU General Public
9 * License (GPL). Copies of the GPL can be obtained from:
10 * ftp://prep.ai.mit.edu/pub/gnu/GPL
11 * Each contributing author retains all rights to their own work.
13 * (C) 1998 Dave Boynton
14 * (C) 1998-2004 Ben Fennema
15 * (C) 1999-2000 Stelias Computing Inc
19 * 10/04/98 dgb Added rudimentary directory functions
20 * 10/07/98 Fully working udf_block_map! It works!
21 * 11/25/98 bmap altered to better support extents
22 * 12/06/98 blf partition support in udf_iget, udf_block_map
24 * 12/12/98 rewrote udf_block_map to handle next extents and descs across
25 * block boundaries (which is not actually allowed)
26 * 12/20/98 added support for strategy 4096
27 * 03/07/99 rewrote udf_block_map (again)
28 * New funcs, inode_bmap, udf_next_aext
29 * 04/19/99 Support for writing device EA's for major/minor #
34 #include <linux/module.h>
35 #include <linux/pagemap.h>
36 #include <linux/buffer_head.h>
37 #include <linux/writeback.h>
38 #include <linux/slab.h>
39 #include <linux/crc-itu-t.h>
40 #include <linux/mpage.h>
41 #include <linux/aio.h>
46 MODULE_AUTHOR("Ben Fennema");
47 MODULE_DESCRIPTION("Universal Disk Format Filesystem");
48 MODULE_LICENSE("GPL");
50 #define EXTENT_MERGE_SIZE 5
52 static umode_t
udf_convert_permissions(struct fileEntry
*);
53 static int udf_update_inode(struct inode
*, int);
54 static int udf_sync_inode(struct inode
*inode
);
55 static int udf_alloc_i_data(struct inode
*inode
, size_t size
);
56 static sector_t
inode_getblk(struct inode
*, sector_t
, int *, int *);
57 static int8_t udf_insert_aext(struct inode
*, struct extent_position
,
58 struct kernel_lb_addr
, uint32_t);
59 static void udf_split_extents(struct inode
*, int *, int, int,
60 struct kernel_long_ad
[EXTENT_MERGE_SIZE
], int *);
61 static void udf_prealloc_extents(struct inode
*, int, int,
62 struct kernel_long_ad
[EXTENT_MERGE_SIZE
], int *);
63 static void udf_merge_extents(struct inode
*,
64 struct kernel_long_ad
[EXTENT_MERGE_SIZE
], int *);
65 static void udf_update_extents(struct inode
*,
66 struct kernel_long_ad
[EXTENT_MERGE_SIZE
], int, int,
67 struct extent_position
*);
68 static int udf_get_block(struct inode
*, sector_t
, struct buffer_head
*, int);
70 static void __udf_clear_extent_cache(struct inode
*inode
)
72 struct udf_inode_info
*iinfo
= UDF_I(inode
);
74 if (iinfo
->cached_extent
.lstart
!= -1) {
75 brelse(iinfo
->cached_extent
.epos
.bh
);
76 iinfo
->cached_extent
.lstart
= -1;
80 /* Invalidate extent cache */
81 static void udf_clear_extent_cache(struct inode
*inode
)
83 struct udf_inode_info
*iinfo
= UDF_I(inode
);
85 spin_lock(&iinfo
->i_extent_cache_lock
);
86 __udf_clear_extent_cache(inode
);
87 spin_unlock(&iinfo
->i_extent_cache_lock
);
90 /* Return contents of extent cache */
91 static int udf_read_extent_cache(struct inode
*inode
, loff_t bcount
,
92 loff_t
*lbcount
, struct extent_position
*pos
)
94 struct udf_inode_info
*iinfo
= UDF_I(inode
);
97 spin_lock(&iinfo
->i_extent_cache_lock
);
98 if ((iinfo
->cached_extent
.lstart
<= bcount
) &&
99 (iinfo
->cached_extent
.lstart
!= -1)) {
101 *lbcount
= iinfo
->cached_extent
.lstart
;
102 memcpy(pos
, &iinfo
->cached_extent
.epos
,
103 sizeof(struct extent_position
));
108 spin_unlock(&iinfo
->i_extent_cache_lock
);
112 /* Add extent to extent cache */
113 static void udf_update_extent_cache(struct inode
*inode
, loff_t estart
,
114 struct extent_position
*pos
, int next_epos
)
116 struct udf_inode_info
*iinfo
= UDF_I(inode
);
118 spin_lock(&iinfo
->i_extent_cache_lock
);
119 /* Invalidate previously cached extent */
120 __udf_clear_extent_cache(inode
);
123 memcpy(&iinfo
->cached_extent
.epos
, pos
,
124 sizeof(struct extent_position
));
125 iinfo
->cached_extent
.lstart
= estart
;
127 switch (iinfo
->i_alloc_type
) {
128 case ICBTAG_FLAG_AD_SHORT
:
129 iinfo
->cached_extent
.epos
.offset
-=
130 sizeof(struct short_ad
);
132 case ICBTAG_FLAG_AD_LONG
:
133 iinfo
->cached_extent
.epos
.offset
-=
134 sizeof(struct long_ad
);
136 spin_unlock(&iinfo
->i_extent_cache_lock
);
139 void udf_evict_inode(struct inode
*inode
)
141 struct udf_inode_info
*iinfo
= UDF_I(inode
);
144 if (!inode
->i_nlink
&& !is_bad_inode(inode
)) {
146 udf_setsize(inode
, 0);
147 udf_update_inode(inode
, IS_SYNC(inode
));
149 truncate_inode_pages_final(&inode
->i_data
);
150 invalidate_inode_buffers(inode
);
152 if (iinfo
->i_alloc_type
!= ICBTAG_FLAG_AD_IN_ICB
&&
153 inode
->i_size
!= iinfo
->i_lenExtents
) {
154 udf_warn(inode
->i_sb
, "Inode %lu (mode %o) has inode size %llu different from extent length %llu. Filesystem need not be standards compliant.\n",
155 inode
->i_ino
, inode
->i_mode
,
156 (unsigned long long)inode
->i_size
,
157 (unsigned long long)iinfo
->i_lenExtents
);
159 kfree(iinfo
->i_ext
.i_data
);
160 iinfo
->i_ext
.i_data
= NULL
;
161 udf_clear_extent_cache(inode
);
163 udf_free_inode(inode
);
167 static void udf_write_failed(struct address_space
*mapping
, loff_t to
)
169 struct inode
*inode
= mapping
->host
;
170 struct udf_inode_info
*iinfo
= UDF_I(inode
);
171 loff_t isize
= inode
->i_size
;
174 truncate_pagecache(inode
, isize
);
175 if (iinfo
->i_alloc_type
!= ICBTAG_FLAG_AD_IN_ICB
) {
176 down_write(&iinfo
->i_data_sem
);
177 udf_clear_extent_cache(inode
);
178 udf_truncate_extents(inode
);
179 up_write(&iinfo
->i_data_sem
);
184 static int udf_writepage(struct page
*page
, struct writeback_control
*wbc
)
186 return block_write_full_page(page
, udf_get_block
, wbc
);
189 static int udf_writepages(struct address_space
*mapping
,
190 struct writeback_control
*wbc
)
192 return mpage_writepages(mapping
, wbc
, udf_get_block
);
195 static int udf_readpage(struct file
*file
, struct page
*page
)
197 return mpage_readpage(page
, udf_get_block
);
200 static int udf_readpages(struct file
*file
, struct address_space
*mapping
,
201 struct list_head
*pages
, unsigned nr_pages
)
203 return mpage_readpages(mapping
, pages
, nr_pages
, udf_get_block
);
206 static int udf_write_begin(struct file
*file
, struct address_space
*mapping
,
207 loff_t pos
, unsigned len
, unsigned flags
,
208 struct page
**pagep
, void **fsdata
)
212 ret
= block_write_begin(mapping
, pos
, len
, flags
, pagep
, udf_get_block
);
214 udf_write_failed(mapping
, pos
+ len
);
218 static ssize_t
udf_direct_IO(int rw
, struct kiocb
*iocb
,
219 struct iov_iter
*iter
,
222 struct file
*file
= iocb
->ki_filp
;
223 struct address_space
*mapping
= file
->f_mapping
;
224 struct inode
*inode
= mapping
->host
;
225 size_t count
= iov_iter_count(iter
);
228 ret
= blockdev_direct_IO(rw
, iocb
, inode
, iter
, offset
, udf_get_block
);
229 if (unlikely(ret
< 0 && (rw
& WRITE
)))
230 udf_write_failed(mapping
, offset
+ count
);
234 static sector_t
udf_bmap(struct address_space
*mapping
, sector_t block
)
236 return generic_block_bmap(mapping
, block
, udf_get_block
);
239 const struct address_space_operations udf_aops
= {
240 .readpage
= udf_readpage
,
241 .readpages
= udf_readpages
,
242 .writepage
= udf_writepage
,
243 .writepages
= udf_writepages
,
244 .write_begin
= udf_write_begin
,
245 .write_end
= generic_write_end
,
246 .direct_IO
= udf_direct_IO
,
251 * Expand file stored in ICB to a normal one-block-file
253 * This function requires i_data_sem for writing and releases it.
254 * This function requires i_mutex held
256 int udf_expand_file_adinicb(struct inode
*inode
)
260 struct udf_inode_info
*iinfo
= UDF_I(inode
);
262 struct writeback_control udf_wbc
= {
263 .sync_mode
= WB_SYNC_NONE
,
267 WARN_ON_ONCE(!mutex_is_locked(&inode
->i_mutex
));
268 if (!iinfo
->i_lenAlloc
) {
269 if (UDF_QUERY_FLAG(inode
->i_sb
, UDF_FLAG_USE_SHORT_AD
))
270 iinfo
->i_alloc_type
= ICBTAG_FLAG_AD_SHORT
;
272 iinfo
->i_alloc_type
= ICBTAG_FLAG_AD_LONG
;
273 /* from now on we have normal address_space methods */
274 inode
->i_data
.a_ops
= &udf_aops
;
275 up_write(&iinfo
->i_data_sem
);
276 mark_inode_dirty(inode
);
280 * Release i_data_sem so that we can lock a page - page lock ranks
281 * above i_data_sem. i_mutex still protects us against file changes.
283 up_write(&iinfo
->i_data_sem
);
285 page
= find_or_create_page(inode
->i_mapping
, 0, GFP_NOFS
);
289 if (!PageUptodate(page
)) {
291 memset(kaddr
+ iinfo
->i_lenAlloc
, 0x00,
292 PAGE_CACHE_SIZE
- iinfo
->i_lenAlloc
);
293 memcpy(kaddr
, iinfo
->i_ext
.i_data
+ iinfo
->i_lenEAttr
,
295 flush_dcache_page(page
);
296 SetPageUptodate(page
);
299 down_write(&iinfo
->i_data_sem
);
300 memset(iinfo
->i_ext
.i_data
+ iinfo
->i_lenEAttr
, 0x00,
302 iinfo
->i_lenAlloc
= 0;
303 if (UDF_QUERY_FLAG(inode
->i_sb
, UDF_FLAG_USE_SHORT_AD
))
304 iinfo
->i_alloc_type
= ICBTAG_FLAG_AD_SHORT
;
306 iinfo
->i_alloc_type
= ICBTAG_FLAG_AD_LONG
;
307 /* from now on we have normal address_space methods */
308 inode
->i_data
.a_ops
= &udf_aops
;
309 up_write(&iinfo
->i_data_sem
);
310 err
= inode
->i_data
.a_ops
->writepage(page
, &udf_wbc
);
312 /* Restore everything back so that we don't lose data... */
315 down_write(&iinfo
->i_data_sem
);
316 memcpy(iinfo
->i_ext
.i_data
+ iinfo
->i_lenEAttr
, kaddr
,
320 iinfo
->i_alloc_type
= ICBTAG_FLAG_AD_IN_ICB
;
321 inode
->i_data
.a_ops
= &udf_adinicb_aops
;
322 up_write(&iinfo
->i_data_sem
);
324 page_cache_release(page
);
325 mark_inode_dirty(inode
);
330 struct buffer_head
*udf_expand_dir_adinicb(struct inode
*inode
, int *block
,
334 struct buffer_head
*dbh
= NULL
;
335 struct kernel_lb_addr eloc
;
337 struct extent_position epos
;
339 struct udf_fileident_bh sfibh
, dfibh
;
340 loff_t f_pos
= udf_ext0_offset(inode
);
341 int size
= udf_ext0_offset(inode
) + inode
->i_size
;
342 struct fileIdentDesc cfi
, *sfi
, *dfi
;
343 struct udf_inode_info
*iinfo
= UDF_I(inode
);
345 if (UDF_QUERY_FLAG(inode
->i_sb
, UDF_FLAG_USE_SHORT_AD
))
346 alloctype
= ICBTAG_FLAG_AD_SHORT
;
348 alloctype
= ICBTAG_FLAG_AD_LONG
;
350 if (!inode
->i_size
) {
351 iinfo
->i_alloc_type
= alloctype
;
352 mark_inode_dirty(inode
);
356 /* alloc block, and copy data to it */
357 *block
= udf_new_block(inode
->i_sb
, inode
,
358 iinfo
->i_location
.partitionReferenceNum
,
359 iinfo
->i_location
.logicalBlockNum
, err
);
362 newblock
= udf_get_pblock(inode
->i_sb
, *block
,
363 iinfo
->i_location
.partitionReferenceNum
,
367 dbh
= udf_tgetblk(inode
->i_sb
, newblock
);
371 memset(dbh
->b_data
, 0x00, inode
->i_sb
->s_blocksize
);
372 set_buffer_uptodate(dbh
);
374 mark_buffer_dirty_inode(dbh
, inode
);
376 sfibh
.soffset
= sfibh
.eoffset
=
377 f_pos
& (inode
->i_sb
->s_blocksize
- 1);
378 sfibh
.sbh
= sfibh
.ebh
= NULL
;
379 dfibh
.soffset
= dfibh
.eoffset
= 0;
380 dfibh
.sbh
= dfibh
.ebh
= dbh
;
381 while (f_pos
< size
) {
382 iinfo
->i_alloc_type
= ICBTAG_FLAG_AD_IN_ICB
;
383 sfi
= udf_fileident_read(inode
, &f_pos
, &sfibh
, &cfi
, NULL
,
389 iinfo
->i_alloc_type
= alloctype
;
390 sfi
->descTag
.tagLocation
= cpu_to_le32(*block
);
391 dfibh
.soffset
= dfibh
.eoffset
;
392 dfibh
.eoffset
+= (sfibh
.eoffset
- sfibh
.soffset
);
393 dfi
= (struct fileIdentDesc
*)(dbh
->b_data
+ dfibh
.soffset
);
394 if (udf_write_fi(inode
, sfi
, dfi
, &dfibh
, sfi
->impUse
,
396 le16_to_cpu(sfi
->lengthOfImpUse
))) {
397 iinfo
->i_alloc_type
= ICBTAG_FLAG_AD_IN_ICB
;
402 mark_buffer_dirty_inode(dbh
, inode
);
404 memset(iinfo
->i_ext
.i_data
+ iinfo
->i_lenEAttr
, 0,
406 iinfo
->i_lenAlloc
= 0;
407 eloc
.logicalBlockNum
= *block
;
408 eloc
.partitionReferenceNum
=
409 iinfo
->i_location
.partitionReferenceNum
;
410 iinfo
->i_lenExtents
= inode
->i_size
;
412 epos
.block
= iinfo
->i_location
;
413 epos
.offset
= udf_file_entry_alloc_offset(inode
);
414 udf_add_aext(inode
, &epos
, &eloc
, inode
->i_size
, 0);
418 mark_inode_dirty(inode
);
422 static int udf_get_block(struct inode
*inode
, sector_t block
,
423 struct buffer_head
*bh_result
, int create
)
427 struct udf_inode_info
*iinfo
;
430 phys
= udf_block_map(inode
, block
);
432 map_bh(bh_result
, inode
->i_sb
, phys
);
438 iinfo
= UDF_I(inode
);
440 down_write(&iinfo
->i_data_sem
);
441 if (block
== iinfo
->i_next_alloc_block
+ 1) {
442 iinfo
->i_next_alloc_block
++;
443 iinfo
->i_next_alloc_goal
++;
446 udf_clear_extent_cache(inode
);
447 phys
= inode_getblk(inode
, block
, &err
, &new);
452 set_buffer_new(bh_result
);
453 map_bh(bh_result
, inode
->i_sb
, phys
);
456 up_write(&iinfo
->i_data_sem
);
460 static struct buffer_head
*udf_getblk(struct inode
*inode
, long block
,
461 int create
, int *err
)
463 struct buffer_head
*bh
;
464 struct buffer_head dummy
;
467 dummy
.b_blocknr
= -1000;
468 *err
= udf_get_block(inode
, block
, &dummy
, create
);
469 if (!*err
&& buffer_mapped(&dummy
)) {
470 bh
= sb_getblk(inode
->i_sb
, dummy
.b_blocknr
);
471 if (buffer_new(&dummy
)) {
473 memset(bh
->b_data
, 0x00, inode
->i_sb
->s_blocksize
);
474 set_buffer_uptodate(bh
);
476 mark_buffer_dirty_inode(bh
, inode
);
484 /* Extend the file by 'blocks' blocks, return the number of extents added */
485 static int udf_do_extend_file(struct inode
*inode
,
486 struct extent_position
*last_pos
,
487 struct kernel_long_ad
*last_ext
,
491 int count
= 0, fake
= !(last_ext
->extLength
& UDF_EXTENT_LENGTH_MASK
);
492 struct super_block
*sb
= inode
->i_sb
;
493 struct kernel_lb_addr prealloc_loc
= {};
494 int prealloc_len
= 0;
495 struct udf_inode_info
*iinfo
;
498 /* The previous extent is fake and we should not extend by anything
499 * - there's nothing to do... */
503 iinfo
= UDF_I(inode
);
504 /* Round the last extent up to a multiple of block size */
505 if (last_ext
->extLength
& (sb
->s_blocksize
- 1)) {
506 last_ext
->extLength
=
507 (last_ext
->extLength
& UDF_EXTENT_FLAG_MASK
) |
508 (((last_ext
->extLength
& UDF_EXTENT_LENGTH_MASK
) +
509 sb
->s_blocksize
- 1) & ~(sb
->s_blocksize
- 1));
510 iinfo
->i_lenExtents
=
511 (iinfo
->i_lenExtents
+ sb
->s_blocksize
- 1) &
512 ~(sb
->s_blocksize
- 1);
515 /* Last extent are just preallocated blocks? */
516 if ((last_ext
->extLength
& UDF_EXTENT_FLAG_MASK
) ==
517 EXT_NOT_RECORDED_ALLOCATED
) {
518 /* Save the extent so that we can reattach it to the end */
519 prealloc_loc
= last_ext
->extLocation
;
520 prealloc_len
= last_ext
->extLength
;
521 /* Mark the extent as a hole */
522 last_ext
->extLength
= EXT_NOT_RECORDED_NOT_ALLOCATED
|
523 (last_ext
->extLength
& UDF_EXTENT_LENGTH_MASK
);
524 last_ext
->extLocation
.logicalBlockNum
= 0;
525 last_ext
->extLocation
.partitionReferenceNum
= 0;
528 /* Can we merge with the previous extent? */
529 if ((last_ext
->extLength
& UDF_EXTENT_FLAG_MASK
) ==
530 EXT_NOT_RECORDED_NOT_ALLOCATED
) {
531 add
= ((1 << 30) - sb
->s_blocksize
-
532 (last_ext
->extLength
& UDF_EXTENT_LENGTH_MASK
)) >>
533 sb
->s_blocksize_bits
;
537 last_ext
->extLength
+= add
<< sb
->s_blocksize_bits
;
541 udf_add_aext(inode
, last_pos
, &last_ext
->extLocation
,
542 last_ext
->extLength
, 1);
545 udf_write_aext(inode
, last_pos
, &last_ext
->extLocation
,
546 last_ext
->extLength
, 1);
548 /* Managed to do everything necessary? */
552 /* All further extents will be NOT_RECORDED_NOT_ALLOCATED */
553 last_ext
->extLocation
.logicalBlockNum
= 0;
554 last_ext
->extLocation
.partitionReferenceNum
= 0;
555 add
= (1 << (30-sb
->s_blocksize_bits
)) - 1;
556 last_ext
->extLength
= EXT_NOT_RECORDED_NOT_ALLOCATED
|
557 (add
<< sb
->s_blocksize_bits
);
559 /* Create enough extents to cover the whole hole */
560 while (blocks
> add
) {
562 err
= udf_add_aext(inode
, last_pos
, &last_ext
->extLocation
,
563 last_ext
->extLength
, 1);
569 last_ext
->extLength
= EXT_NOT_RECORDED_NOT_ALLOCATED
|
570 (blocks
<< sb
->s_blocksize_bits
);
571 err
= udf_add_aext(inode
, last_pos
, &last_ext
->extLocation
,
572 last_ext
->extLength
, 1);
579 /* Do we have some preallocated blocks saved? */
581 err
= udf_add_aext(inode
, last_pos
, &prealloc_loc
,
585 last_ext
->extLocation
= prealloc_loc
;
586 last_ext
->extLength
= prealloc_len
;
590 /* last_pos should point to the last written extent... */
591 if (iinfo
->i_alloc_type
== ICBTAG_FLAG_AD_SHORT
)
592 last_pos
->offset
-= sizeof(struct short_ad
);
593 else if (iinfo
->i_alloc_type
== ICBTAG_FLAG_AD_LONG
)
594 last_pos
->offset
-= sizeof(struct long_ad
);
601 static int udf_extend_file(struct inode
*inode
, loff_t newsize
)
604 struct extent_position epos
;
605 struct kernel_lb_addr eloc
;
608 struct super_block
*sb
= inode
->i_sb
;
609 sector_t first_block
= newsize
>> sb
->s_blocksize_bits
, offset
;
611 struct udf_inode_info
*iinfo
= UDF_I(inode
);
612 struct kernel_long_ad extent
;
615 if (iinfo
->i_alloc_type
== ICBTAG_FLAG_AD_SHORT
)
616 adsize
= sizeof(struct short_ad
);
617 else if (iinfo
->i_alloc_type
== ICBTAG_FLAG_AD_LONG
)
618 adsize
= sizeof(struct long_ad
);
622 etype
= inode_bmap(inode
, first_block
, &epos
, &eloc
, &elen
, &offset
);
624 /* File has extent covering the new size (could happen when extending
625 * inside a block)? */
628 if (newsize
& (sb
->s_blocksize
- 1))
630 /* Extended file just to the boundary of the last file block? */
634 /* Truncate is extending the file by 'offset' blocks */
635 if ((!epos
.bh
&& epos
.offset
== udf_file_entry_alloc_offset(inode
)) ||
636 (epos
.bh
&& epos
.offset
== sizeof(struct allocExtDesc
))) {
637 /* File has no extents at all or has empty last
638 * indirect extent! Create a fake extent... */
639 extent
.extLocation
.logicalBlockNum
= 0;
640 extent
.extLocation
.partitionReferenceNum
= 0;
641 extent
.extLength
= EXT_NOT_RECORDED_NOT_ALLOCATED
;
643 epos
.offset
-= adsize
;
644 etype
= udf_next_aext(inode
, &epos
, &extent
.extLocation
,
645 &extent
.extLength
, 0);
646 extent
.extLength
|= etype
<< 30;
648 err
= udf_do_extend_file(inode
, &epos
, &extent
, offset
);
652 iinfo
->i_lenExtents
= newsize
;
658 static sector_t
inode_getblk(struct inode
*inode
, sector_t block
,
661 struct kernel_long_ad laarr
[EXTENT_MERGE_SIZE
];
662 struct extent_position prev_epos
, cur_epos
, next_epos
;
663 int count
= 0, startnum
= 0, endnum
= 0;
664 uint32_t elen
= 0, tmpelen
;
665 struct kernel_lb_addr eloc
, tmpeloc
;
667 loff_t lbcount
= 0, b_off
= 0;
668 uint32_t newblocknum
, newblock
;
671 struct udf_inode_info
*iinfo
= UDF_I(inode
);
672 int goal
= 0, pgoal
= iinfo
->i_location
.logicalBlockNum
;
678 prev_epos
.offset
= udf_file_entry_alloc_offset(inode
);
679 prev_epos
.block
= iinfo
->i_location
;
681 cur_epos
= next_epos
= prev_epos
;
682 b_off
= (loff_t
)block
<< inode
->i_sb
->s_blocksize_bits
;
684 /* find the extent which contains the block we are looking for.
685 alternate between laarr[0] and laarr[1] for locations of the
686 current extent, and the previous extent */
688 if (prev_epos
.bh
!= cur_epos
.bh
) {
689 brelse(prev_epos
.bh
);
691 prev_epos
.bh
= cur_epos
.bh
;
693 if (cur_epos
.bh
!= next_epos
.bh
) {
695 get_bh(next_epos
.bh
);
696 cur_epos
.bh
= next_epos
.bh
;
701 prev_epos
.block
= cur_epos
.block
;
702 cur_epos
.block
= next_epos
.block
;
704 prev_epos
.offset
= cur_epos
.offset
;
705 cur_epos
.offset
= next_epos
.offset
;
707 etype
= udf_next_aext(inode
, &next_epos
, &eloc
, &elen
, 1);
713 laarr
[c
].extLength
= (etype
<< 30) | elen
;
714 laarr
[c
].extLocation
= eloc
;
716 if (etype
!= (EXT_NOT_RECORDED_NOT_ALLOCATED
>> 30))
717 pgoal
= eloc
.logicalBlockNum
+
718 ((elen
+ inode
->i_sb
->s_blocksize
- 1) >>
719 inode
->i_sb
->s_blocksize_bits
);
722 } while (lbcount
+ elen
<= b_off
);
725 offset
= b_off
>> inode
->i_sb
->s_blocksize_bits
;
727 * Move prev_epos and cur_epos into indirect extent if we are at
730 udf_next_aext(inode
, &prev_epos
, &tmpeloc
, &tmpelen
, 0);
731 udf_next_aext(inode
, &cur_epos
, &tmpeloc
, &tmpelen
, 0);
733 /* if the extent is allocated and recorded, return the block
734 if the extent is not a multiple of the blocksize, round up */
736 if (etype
== (EXT_RECORDED_ALLOCATED
>> 30)) {
737 if (elen
& (inode
->i_sb
->s_blocksize
- 1)) {
738 elen
= EXT_RECORDED_ALLOCATED
|
739 ((elen
+ inode
->i_sb
->s_blocksize
- 1) &
740 ~(inode
->i_sb
->s_blocksize
- 1));
741 udf_write_aext(inode
, &cur_epos
, &eloc
, elen
, 1);
743 brelse(prev_epos
.bh
);
745 brelse(next_epos
.bh
);
746 newblock
= udf_get_lb_pblock(inode
->i_sb
, &eloc
, offset
);
750 /* Are we beyond EOF? */
759 /* Create a fake extent when there's not one */
760 memset(&laarr
[0].extLocation
, 0x00,
761 sizeof(struct kernel_lb_addr
));
762 laarr
[0].extLength
= EXT_NOT_RECORDED_NOT_ALLOCATED
;
763 /* Will udf_do_extend_file() create real extent from
765 startnum
= (offset
> 0);
767 /* Create extents for the hole between EOF and offset */
768 ret
= udf_do_extend_file(inode
, &prev_epos
, laarr
, offset
);
770 brelse(prev_epos
.bh
);
772 brelse(next_epos
.bh
);
779 /* We are not covered by a preallocated extent? */
780 if ((laarr
[0].extLength
& UDF_EXTENT_FLAG_MASK
) !=
781 EXT_NOT_RECORDED_ALLOCATED
) {
782 /* Is there any real extent? - otherwise we overwrite
786 laarr
[c
].extLength
= EXT_NOT_RECORDED_NOT_ALLOCATED
|
787 inode
->i_sb
->s_blocksize
;
788 memset(&laarr
[c
].extLocation
, 0x00,
789 sizeof(struct kernel_lb_addr
));
796 endnum
= startnum
= ((count
> 2) ? 2 : count
);
798 /* if the current extent is in position 0,
799 swap it with the previous */
800 if (!c
&& count
!= 1) {
807 /* if the current block is located in an extent,
808 read the next extent */
809 etype
= udf_next_aext(inode
, &next_epos
, &eloc
, &elen
, 0);
811 laarr
[c
+ 1].extLength
= (etype
<< 30) | elen
;
812 laarr
[c
+ 1].extLocation
= eloc
;
820 /* if the current extent is not recorded but allocated, get the
821 * block in the extent corresponding to the requested block */
822 if ((laarr
[c
].extLength
>> 30) == (EXT_NOT_RECORDED_ALLOCATED
>> 30))
823 newblocknum
= laarr
[c
].extLocation
.logicalBlockNum
+ offset
;
824 else { /* otherwise, allocate a new block */
825 if (iinfo
->i_next_alloc_block
== block
)
826 goal
= iinfo
->i_next_alloc_goal
;
829 if (!(goal
= pgoal
)) /* XXX: what was intended here? */
830 goal
= iinfo
->i_location
.logicalBlockNum
+ 1;
833 newblocknum
= udf_new_block(inode
->i_sb
, inode
,
834 iinfo
->i_location
.partitionReferenceNum
,
837 brelse(prev_epos
.bh
);
839 brelse(next_epos
.bh
);
844 iinfo
->i_lenExtents
+= inode
->i_sb
->s_blocksize
;
847 /* if the extent the requsted block is located in contains multiple
848 * blocks, split the extent into at most three extents. blocks prior
849 * to requested block, requested block, and blocks after requested
851 udf_split_extents(inode
, &c
, offset
, newblocknum
, laarr
, &endnum
);
853 #ifdef UDF_PREALLOCATE
854 /* We preallocate blocks only for regular files. It also makes sense
855 * for directories but there's a problem when to drop the
856 * preallocation. We might use some delayed work for that but I feel
857 * it's overengineering for a filesystem like UDF. */
858 if (S_ISREG(inode
->i_mode
))
859 udf_prealloc_extents(inode
, c
, lastblock
, laarr
, &endnum
);
862 /* merge any continuous blocks in laarr */
863 udf_merge_extents(inode
, laarr
, &endnum
);
865 /* write back the new extents, inserting new extents if the new number
866 * of extents is greater than the old number, and deleting extents if
867 * the new number of extents is less than the old number */
868 udf_update_extents(inode
, laarr
, startnum
, endnum
, &prev_epos
);
870 brelse(prev_epos
.bh
);
872 brelse(next_epos
.bh
);
874 newblock
= udf_get_pblock(inode
->i_sb
, newblocknum
,
875 iinfo
->i_location
.partitionReferenceNum
, 0);
881 iinfo
->i_next_alloc_block
= block
;
882 iinfo
->i_next_alloc_goal
= newblocknum
;
883 inode
->i_ctime
= current_fs_time(inode
->i_sb
);
886 udf_sync_inode(inode
);
888 mark_inode_dirty(inode
);
893 static void udf_split_extents(struct inode
*inode
, int *c
, int offset
,
895 struct kernel_long_ad laarr
[EXTENT_MERGE_SIZE
],
898 unsigned long blocksize
= inode
->i_sb
->s_blocksize
;
899 unsigned char blocksize_bits
= inode
->i_sb
->s_blocksize_bits
;
901 if ((laarr
[*c
].extLength
>> 30) == (EXT_NOT_RECORDED_ALLOCATED
>> 30) ||
902 (laarr
[*c
].extLength
>> 30) ==
903 (EXT_NOT_RECORDED_NOT_ALLOCATED
>> 30)) {
905 int blen
= ((laarr
[curr
].extLength
& UDF_EXTENT_LENGTH_MASK
) +
906 blocksize
- 1) >> blocksize_bits
;
907 int8_t etype
= (laarr
[curr
].extLength
>> 30);
911 else if (!offset
|| blen
== offset
+ 1) {
912 laarr
[curr
+ 2] = laarr
[curr
+ 1];
913 laarr
[curr
+ 1] = laarr
[curr
];
915 laarr
[curr
+ 3] = laarr
[curr
+ 1];
916 laarr
[curr
+ 2] = laarr
[curr
+ 1] = laarr
[curr
];
920 if (etype
== (EXT_NOT_RECORDED_ALLOCATED
>> 30)) {
921 udf_free_blocks(inode
->i_sb
, inode
,
922 &laarr
[curr
].extLocation
,
924 laarr
[curr
].extLength
=
925 EXT_NOT_RECORDED_NOT_ALLOCATED
|
926 (offset
<< blocksize_bits
);
927 laarr
[curr
].extLocation
.logicalBlockNum
= 0;
928 laarr
[curr
].extLocation
.
929 partitionReferenceNum
= 0;
931 laarr
[curr
].extLength
= (etype
<< 30) |
932 (offset
<< blocksize_bits
);
938 laarr
[curr
].extLocation
.logicalBlockNum
= newblocknum
;
939 if (etype
== (EXT_NOT_RECORDED_NOT_ALLOCATED
>> 30))
940 laarr
[curr
].extLocation
.partitionReferenceNum
=
941 UDF_I(inode
)->i_location
.partitionReferenceNum
;
942 laarr
[curr
].extLength
= EXT_RECORDED_ALLOCATED
|
946 if (blen
!= offset
+ 1) {
947 if (etype
== (EXT_NOT_RECORDED_ALLOCATED
>> 30))
948 laarr
[curr
].extLocation
.logicalBlockNum
+=
950 laarr
[curr
].extLength
= (etype
<< 30) |
951 ((blen
- (offset
+ 1)) << blocksize_bits
);
958 static void udf_prealloc_extents(struct inode
*inode
, int c
, int lastblock
,
959 struct kernel_long_ad laarr
[EXTENT_MERGE_SIZE
],
962 int start
, length
= 0, currlength
= 0, i
;
964 if (*endnum
>= (c
+ 1)) {
970 if ((laarr
[c
+ 1].extLength
>> 30) ==
971 (EXT_NOT_RECORDED_ALLOCATED
>> 30)) {
973 length
= currlength
=
974 (((laarr
[c
+ 1].extLength
&
975 UDF_EXTENT_LENGTH_MASK
) +
976 inode
->i_sb
->s_blocksize
- 1) >>
977 inode
->i_sb
->s_blocksize_bits
);
982 for (i
= start
+ 1; i
<= *endnum
; i
++) {
985 length
+= UDF_DEFAULT_PREALLOC_BLOCKS
;
986 } else if ((laarr
[i
].extLength
>> 30) ==
987 (EXT_NOT_RECORDED_NOT_ALLOCATED
>> 30)) {
988 length
+= (((laarr
[i
].extLength
&
989 UDF_EXTENT_LENGTH_MASK
) +
990 inode
->i_sb
->s_blocksize
- 1) >>
991 inode
->i_sb
->s_blocksize_bits
);
997 int next
= laarr
[start
].extLocation
.logicalBlockNum
+
998 (((laarr
[start
].extLength
& UDF_EXTENT_LENGTH_MASK
) +
999 inode
->i_sb
->s_blocksize
- 1) >>
1000 inode
->i_sb
->s_blocksize_bits
);
1001 int numalloc
= udf_prealloc_blocks(inode
->i_sb
, inode
,
1002 laarr
[start
].extLocation
.partitionReferenceNum
,
1003 next
, (UDF_DEFAULT_PREALLOC_BLOCKS
> length
?
1004 length
: UDF_DEFAULT_PREALLOC_BLOCKS
) -
1007 if (start
== (c
+ 1))
1008 laarr
[start
].extLength
+=
1010 inode
->i_sb
->s_blocksize_bits
);
1012 memmove(&laarr
[c
+ 2], &laarr
[c
+ 1],
1013 sizeof(struct long_ad
) * (*endnum
- (c
+ 1)));
1015 laarr
[c
+ 1].extLocation
.logicalBlockNum
= next
;
1016 laarr
[c
+ 1].extLocation
.partitionReferenceNum
=
1017 laarr
[c
].extLocation
.
1018 partitionReferenceNum
;
1019 laarr
[c
+ 1].extLength
=
1020 EXT_NOT_RECORDED_ALLOCATED
|
1022 inode
->i_sb
->s_blocksize_bits
);
1026 for (i
= start
+ 1; numalloc
&& i
< *endnum
; i
++) {
1027 int elen
= ((laarr
[i
].extLength
&
1028 UDF_EXTENT_LENGTH_MASK
) +
1029 inode
->i_sb
->s_blocksize
- 1) >>
1030 inode
->i_sb
->s_blocksize_bits
;
1032 if (elen
> numalloc
) {
1033 laarr
[i
].extLength
-=
1035 inode
->i_sb
->s_blocksize_bits
);
1039 if (*endnum
> (i
+ 1))
1042 sizeof(struct long_ad
) *
1043 (*endnum
- (i
+ 1)));
1048 UDF_I(inode
)->i_lenExtents
+=
1049 numalloc
<< inode
->i_sb
->s_blocksize_bits
;
1054 static void udf_merge_extents(struct inode
*inode
,
1055 struct kernel_long_ad laarr
[EXTENT_MERGE_SIZE
],
1059 unsigned long blocksize
= inode
->i_sb
->s_blocksize
;
1060 unsigned char blocksize_bits
= inode
->i_sb
->s_blocksize_bits
;
1062 for (i
= 0; i
< (*endnum
- 1); i
++) {
1063 struct kernel_long_ad
*li
/*l[i]*/ = &laarr
[i
];
1064 struct kernel_long_ad
*lip1
/*l[i plus 1]*/ = &laarr
[i
+ 1];
1066 if (((li
->extLength
>> 30) == (lip1
->extLength
>> 30)) &&
1067 (((li
->extLength
>> 30) ==
1068 (EXT_NOT_RECORDED_NOT_ALLOCATED
>> 30)) ||
1069 ((lip1
->extLocation
.logicalBlockNum
-
1070 li
->extLocation
.logicalBlockNum
) ==
1071 (((li
->extLength
& UDF_EXTENT_LENGTH_MASK
) +
1072 blocksize
- 1) >> blocksize_bits
)))) {
1074 if (((li
->extLength
& UDF_EXTENT_LENGTH_MASK
) +
1075 (lip1
->extLength
& UDF_EXTENT_LENGTH_MASK
) +
1076 blocksize
- 1) & ~UDF_EXTENT_LENGTH_MASK
) {
1077 lip1
->extLength
= (lip1
->extLength
-
1079 UDF_EXTENT_LENGTH_MASK
) +
1080 UDF_EXTENT_LENGTH_MASK
) &
1082 li
->extLength
= (li
->extLength
&
1083 UDF_EXTENT_FLAG_MASK
) +
1084 (UDF_EXTENT_LENGTH_MASK
+ 1) -
1086 lip1
->extLocation
.logicalBlockNum
=
1087 li
->extLocation
.logicalBlockNum
+
1089 UDF_EXTENT_LENGTH_MASK
) >>
1092 li
->extLength
= lip1
->extLength
+
1094 UDF_EXTENT_LENGTH_MASK
) +
1095 blocksize
- 1) & ~(blocksize
- 1));
1096 if (*endnum
> (i
+ 2))
1097 memmove(&laarr
[i
+ 1], &laarr
[i
+ 2],
1098 sizeof(struct long_ad
) *
1099 (*endnum
- (i
+ 2)));
1103 } else if (((li
->extLength
>> 30) ==
1104 (EXT_NOT_RECORDED_ALLOCATED
>> 30)) &&
1105 ((lip1
->extLength
>> 30) ==
1106 (EXT_NOT_RECORDED_NOT_ALLOCATED
>> 30))) {
1107 udf_free_blocks(inode
->i_sb
, inode
, &li
->extLocation
, 0,
1109 UDF_EXTENT_LENGTH_MASK
) +
1110 blocksize
- 1) >> blocksize_bits
);
1111 li
->extLocation
.logicalBlockNum
= 0;
1112 li
->extLocation
.partitionReferenceNum
= 0;
1114 if (((li
->extLength
& UDF_EXTENT_LENGTH_MASK
) +
1115 (lip1
->extLength
& UDF_EXTENT_LENGTH_MASK
) +
1116 blocksize
- 1) & ~UDF_EXTENT_LENGTH_MASK
) {
1117 lip1
->extLength
= (lip1
->extLength
-
1119 UDF_EXTENT_LENGTH_MASK
) +
1120 UDF_EXTENT_LENGTH_MASK
) &
1122 li
->extLength
= (li
->extLength
&
1123 UDF_EXTENT_FLAG_MASK
) +
1124 (UDF_EXTENT_LENGTH_MASK
+ 1) -
1127 li
->extLength
= lip1
->extLength
+
1129 UDF_EXTENT_LENGTH_MASK
) +
1130 blocksize
- 1) & ~(blocksize
- 1));
1131 if (*endnum
> (i
+ 2))
1132 memmove(&laarr
[i
+ 1], &laarr
[i
+ 2],
1133 sizeof(struct long_ad
) *
1134 (*endnum
- (i
+ 2)));
1138 } else if ((li
->extLength
>> 30) ==
1139 (EXT_NOT_RECORDED_ALLOCATED
>> 30)) {
1140 udf_free_blocks(inode
->i_sb
, inode
,
1141 &li
->extLocation
, 0,
1143 UDF_EXTENT_LENGTH_MASK
) +
1144 blocksize
- 1) >> blocksize_bits
);
1145 li
->extLocation
.logicalBlockNum
= 0;
1146 li
->extLocation
.partitionReferenceNum
= 0;
1147 li
->extLength
= (li
->extLength
&
1148 UDF_EXTENT_LENGTH_MASK
) |
1149 EXT_NOT_RECORDED_NOT_ALLOCATED
;
1154 static void udf_update_extents(struct inode
*inode
,
1155 struct kernel_long_ad laarr
[EXTENT_MERGE_SIZE
],
1156 int startnum
, int endnum
,
1157 struct extent_position
*epos
)
1160 struct kernel_lb_addr tmploc
;
1163 if (startnum
> endnum
) {
1164 for (i
= 0; i
< (startnum
- endnum
); i
++)
1165 udf_delete_aext(inode
, *epos
, laarr
[i
].extLocation
,
1166 laarr
[i
].extLength
);
1167 } else if (startnum
< endnum
) {
1168 for (i
= 0; i
< (endnum
- startnum
); i
++) {
1169 udf_insert_aext(inode
, *epos
, laarr
[i
].extLocation
,
1170 laarr
[i
].extLength
);
1171 udf_next_aext(inode
, epos
, &laarr
[i
].extLocation
,
1172 &laarr
[i
].extLength
, 1);
1177 for (i
= start
; i
< endnum
; i
++) {
1178 udf_next_aext(inode
, epos
, &tmploc
, &tmplen
, 0);
1179 udf_write_aext(inode
, epos
, &laarr
[i
].extLocation
,
1180 laarr
[i
].extLength
, 1);
1184 struct buffer_head
*udf_bread(struct inode
*inode
, int block
,
1185 int create
, int *err
)
1187 struct buffer_head
*bh
= NULL
;
1189 bh
= udf_getblk(inode
, block
, create
, err
);
1193 if (buffer_uptodate(bh
))
1196 ll_rw_block(READ
, 1, &bh
);
1199 if (buffer_uptodate(bh
))
1207 int udf_setsize(struct inode
*inode
, loff_t newsize
)
1210 struct udf_inode_info
*iinfo
;
1211 int bsize
= 1 << inode
->i_blkbits
;
1213 if (!(S_ISREG(inode
->i_mode
) || S_ISDIR(inode
->i_mode
) ||
1214 S_ISLNK(inode
->i_mode
)))
1216 if (IS_APPEND(inode
) || IS_IMMUTABLE(inode
))
1219 iinfo
= UDF_I(inode
);
1220 if (newsize
> inode
->i_size
) {
1221 down_write(&iinfo
->i_data_sem
);
1222 if (iinfo
->i_alloc_type
== ICBTAG_FLAG_AD_IN_ICB
) {
1224 (udf_file_entry_alloc_offset(inode
) + newsize
)) {
1225 err
= udf_expand_file_adinicb(inode
);
1228 down_write(&iinfo
->i_data_sem
);
1230 iinfo
->i_lenAlloc
= newsize
;
1234 err
= udf_extend_file(inode
, newsize
);
1236 up_write(&iinfo
->i_data_sem
);
1240 truncate_setsize(inode
, newsize
);
1241 up_write(&iinfo
->i_data_sem
);
1243 if (iinfo
->i_alloc_type
== ICBTAG_FLAG_AD_IN_ICB
) {
1244 down_write(&iinfo
->i_data_sem
);
1245 udf_clear_extent_cache(inode
);
1246 memset(iinfo
->i_ext
.i_data
+ iinfo
->i_lenEAttr
+ newsize
,
1247 0x00, bsize
- newsize
-
1248 udf_file_entry_alloc_offset(inode
));
1249 iinfo
->i_lenAlloc
= newsize
;
1250 truncate_setsize(inode
, newsize
);
1251 up_write(&iinfo
->i_data_sem
);
1254 err
= block_truncate_page(inode
->i_mapping
, newsize
,
1258 down_write(&iinfo
->i_data_sem
);
1259 udf_clear_extent_cache(inode
);
1260 truncate_setsize(inode
, newsize
);
1261 udf_truncate_extents(inode
);
1262 up_write(&iinfo
->i_data_sem
);
1265 inode
->i_mtime
= inode
->i_ctime
= current_fs_time(inode
->i_sb
);
1267 udf_sync_inode(inode
);
1269 mark_inode_dirty(inode
);
1274 * Maximum length of linked list formed by ICB hierarchy. The chosen number is
1275 * arbitrary - just that we hopefully don't limit any real use of rewritten
1276 * inode on write-once media but avoid looping for too long on corrupted media.
1278 #define UDF_MAX_ICB_NESTING 1024
1280 static int udf_read_inode(struct inode
*inode
)
1282 struct buffer_head
*bh
= NULL
;
1283 struct fileEntry
*fe
;
1284 struct extendedFileEntry
*efe
;
1286 struct udf_inode_info
*iinfo
= UDF_I(inode
);
1287 struct udf_sb_info
*sbi
= UDF_SB(inode
->i_sb
);
1288 struct kernel_lb_addr
*iloc
= &iinfo
->i_location
;
1289 unsigned int link_count
;
1290 unsigned int indirections
= 0;
1294 if (iloc
->logicalBlockNum
>=
1295 sbi
->s_partmaps
[iloc
->partitionReferenceNum
].s_partition_len
) {
1296 udf_debug("block=%d, partition=%d out of range\n",
1297 iloc
->logicalBlockNum
, iloc
->partitionReferenceNum
);
1302 * Set defaults, but the inode is still incomplete!
1303 * Note: get_new_inode() sets the following on a new inode:
1306 * i_flags = sb->s_flags
1308 * clean_inode(): zero fills and sets
1313 bh
= udf_read_ptagged(inode
->i_sb
, iloc
, 0, &ident
);
1315 udf_err(inode
->i_sb
, "(ino %ld) failed !bh\n", inode
->i_ino
);
1319 if (ident
!= TAG_IDENT_FE
&& ident
!= TAG_IDENT_EFE
&&
1320 ident
!= TAG_IDENT_USE
) {
1321 udf_err(inode
->i_sb
, "(ino %ld) failed ident=%d\n",
1322 inode
->i_ino
, ident
);
1326 fe
= (struct fileEntry
*)bh
->b_data
;
1327 efe
= (struct extendedFileEntry
*)bh
->b_data
;
1329 if (fe
->icbTag
.strategyType
== cpu_to_le16(4096)) {
1330 struct buffer_head
*ibh
;
1332 ibh
= udf_read_ptagged(inode
->i_sb
, iloc
, 1, &ident
);
1333 if (ident
== TAG_IDENT_IE
&& ibh
) {
1334 struct kernel_lb_addr loc
;
1335 struct indirectEntry
*ie
;
1337 ie
= (struct indirectEntry
*)ibh
->b_data
;
1338 loc
= lelb_to_cpu(ie
->indirectICB
.extLocation
);
1340 if (ie
->indirectICB
.extLength
) {
1342 memcpy(&iinfo
->i_location
, &loc
,
1343 sizeof(struct kernel_lb_addr
));
1344 if (++indirections
> UDF_MAX_ICB_NESTING
) {
1345 udf_err(inode
->i_sb
,
1346 "too many ICBs in ICB hierarchy"
1347 " (max %d supported)\n",
1348 UDF_MAX_ICB_NESTING
);
1356 } else if (fe
->icbTag
.strategyType
!= cpu_to_le16(4)) {
1357 udf_err(inode
->i_sb
, "unsupported strategy type: %d\n",
1358 le16_to_cpu(fe
->icbTag
.strategyType
));
1361 if (fe
->icbTag
.strategyType
== cpu_to_le16(4))
1362 iinfo
->i_strat4096
= 0;
1363 else /* if (fe->icbTag.strategyType == cpu_to_le16(4096)) */
1364 iinfo
->i_strat4096
= 1;
1366 iinfo
->i_alloc_type
= le16_to_cpu(fe
->icbTag
.flags
) &
1367 ICBTAG_FLAG_AD_MASK
;
1368 iinfo
->i_unique
= 0;
1369 iinfo
->i_lenEAttr
= 0;
1370 iinfo
->i_lenExtents
= 0;
1371 iinfo
->i_lenAlloc
= 0;
1372 iinfo
->i_next_alloc_block
= 0;
1373 iinfo
->i_next_alloc_goal
= 0;
1374 if (fe
->descTag
.tagIdent
== cpu_to_le16(TAG_IDENT_EFE
)) {
1377 ret
= udf_alloc_i_data(inode
, inode
->i_sb
->s_blocksize
-
1378 sizeof(struct extendedFileEntry
));
1381 memcpy(iinfo
->i_ext
.i_data
,
1382 bh
->b_data
+ sizeof(struct extendedFileEntry
),
1383 inode
->i_sb
->s_blocksize
-
1384 sizeof(struct extendedFileEntry
));
1385 } else if (fe
->descTag
.tagIdent
== cpu_to_le16(TAG_IDENT_FE
)) {
1388 ret
= udf_alloc_i_data(inode
, inode
->i_sb
->s_blocksize
-
1389 sizeof(struct fileEntry
));
1392 memcpy(iinfo
->i_ext
.i_data
,
1393 bh
->b_data
+ sizeof(struct fileEntry
),
1394 inode
->i_sb
->s_blocksize
- sizeof(struct fileEntry
));
1395 } else if (fe
->descTag
.tagIdent
== cpu_to_le16(TAG_IDENT_USE
)) {
1398 iinfo
->i_lenAlloc
= le32_to_cpu(
1399 ((struct unallocSpaceEntry
*)bh
->b_data
)->
1401 ret
= udf_alloc_i_data(inode
, inode
->i_sb
->s_blocksize
-
1402 sizeof(struct unallocSpaceEntry
));
1405 memcpy(iinfo
->i_ext
.i_data
,
1406 bh
->b_data
+ sizeof(struct unallocSpaceEntry
),
1407 inode
->i_sb
->s_blocksize
-
1408 sizeof(struct unallocSpaceEntry
));
1413 read_lock(&sbi
->s_cred_lock
);
1414 i_uid_write(inode
, le32_to_cpu(fe
->uid
));
1415 if (!uid_valid(inode
->i_uid
) ||
1416 UDF_QUERY_FLAG(inode
->i_sb
, UDF_FLAG_UID_IGNORE
) ||
1417 UDF_QUERY_FLAG(inode
->i_sb
, UDF_FLAG_UID_SET
))
1418 inode
->i_uid
= UDF_SB(inode
->i_sb
)->s_uid
;
1420 i_gid_write(inode
, le32_to_cpu(fe
->gid
));
1421 if (!gid_valid(inode
->i_gid
) ||
1422 UDF_QUERY_FLAG(inode
->i_sb
, UDF_FLAG_GID_IGNORE
) ||
1423 UDF_QUERY_FLAG(inode
->i_sb
, UDF_FLAG_GID_SET
))
1424 inode
->i_gid
= UDF_SB(inode
->i_sb
)->s_gid
;
1426 if (fe
->icbTag
.fileType
!= ICBTAG_FILE_TYPE_DIRECTORY
&&
1427 sbi
->s_fmode
!= UDF_INVALID_MODE
)
1428 inode
->i_mode
= sbi
->s_fmode
;
1429 else if (fe
->icbTag
.fileType
== ICBTAG_FILE_TYPE_DIRECTORY
&&
1430 sbi
->s_dmode
!= UDF_INVALID_MODE
)
1431 inode
->i_mode
= sbi
->s_dmode
;
1433 inode
->i_mode
= udf_convert_permissions(fe
);
1434 inode
->i_mode
&= ~sbi
->s_umask
;
1435 read_unlock(&sbi
->s_cred_lock
);
1437 link_count
= le16_to_cpu(fe
->fileLinkCount
);
1440 set_nlink(inode
, link_count
);
1442 inode
->i_size
= le64_to_cpu(fe
->informationLength
);
1443 iinfo
->i_lenExtents
= inode
->i_size
;
1445 if (iinfo
->i_efe
== 0) {
1446 inode
->i_blocks
= le64_to_cpu(fe
->logicalBlocksRecorded
) <<
1447 (inode
->i_sb
->s_blocksize_bits
- 9);
1449 if (!udf_disk_stamp_to_time(&inode
->i_atime
, fe
->accessTime
))
1450 inode
->i_atime
= sbi
->s_record_time
;
1452 if (!udf_disk_stamp_to_time(&inode
->i_mtime
,
1453 fe
->modificationTime
))
1454 inode
->i_mtime
= sbi
->s_record_time
;
1456 if (!udf_disk_stamp_to_time(&inode
->i_ctime
, fe
->attrTime
))
1457 inode
->i_ctime
= sbi
->s_record_time
;
1459 iinfo
->i_unique
= le64_to_cpu(fe
->uniqueID
);
1460 iinfo
->i_lenEAttr
= le32_to_cpu(fe
->lengthExtendedAttr
);
1461 iinfo
->i_lenAlloc
= le32_to_cpu(fe
->lengthAllocDescs
);
1462 iinfo
->i_checkpoint
= le32_to_cpu(fe
->checkpoint
);
1464 inode
->i_blocks
= le64_to_cpu(efe
->logicalBlocksRecorded
) <<
1465 (inode
->i_sb
->s_blocksize_bits
- 9);
1467 if (!udf_disk_stamp_to_time(&inode
->i_atime
, efe
->accessTime
))
1468 inode
->i_atime
= sbi
->s_record_time
;
1470 if (!udf_disk_stamp_to_time(&inode
->i_mtime
,
1471 efe
->modificationTime
))
1472 inode
->i_mtime
= sbi
->s_record_time
;
1474 if (!udf_disk_stamp_to_time(&iinfo
->i_crtime
, efe
->createTime
))
1475 iinfo
->i_crtime
= sbi
->s_record_time
;
1477 if (!udf_disk_stamp_to_time(&inode
->i_ctime
, efe
->attrTime
))
1478 inode
->i_ctime
= sbi
->s_record_time
;
1480 iinfo
->i_unique
= le64_to_cpu(efe
->uniqueID
);
1481 iinfo
->i_lenEAttr
= le32_to_cpu(efe
->lengthExtendedAttr
);
1482 iinfo
->i_lenAlloc
= le32_to_cpu(efe
->lengthAllocDescs
);
1483 iinfo
->i_checkpoint
= le32_to_cpu(efe
->checkpoint
);
1486 switch (fe
->icbTag
.fileType
) {
1487 case ICBTAG_FILE_TYPE_DIRECTORY
:
1488 inode
->i_op
= &udf_dir_inode_operations
;
1489 inode
->i_fop
= &udf_dir_operations
;
1490 inode
->i_mode
|= S_IFDIR
;
1493 case ICBTAG_FILE_TYPE_REALTIME
:
1494 case ICBTAG_FILE_TYPE_REGULAR
:
1495 case ICBTAG_FILE_TYPE_UNDEF
:
1496 case ICBTAG_FILE_TYPE_VAT20
:
1497 if (iinfo
->i_alloc_type
== ICBTAG_FLAG_AD_IN_ICB
)
1498 inode
->i_data
.a_ops
= &udf_adinicb_aops
;
1500 inode
->i_data
.a_ops
= &udf_aops
;
1501 inode
->i_op
= &udf_file_inode_operations
;
1502 inode
->i_fop
= &udf_file_operations
;
1503 inode
->i_mode
|= S_IFREG
;
1505 case ICBTAG_FILE_TYPE_BLOCK
:
1506 inode
->i_mode
|= S_IFBLK
;
1508 case ICBTAG_FILE_TYPE_CHAR
:
1509 inode
->i_mode
|= S_IFCHR
;
1511 case ICBTAG_FILE_TYPE_FIFO
:
1512 init_special_inode(inode
, inode
->i_mode
| S_IFIFO
, 0);
1514 case ICBTAG_FILE_TYPE_SOCKET
:
1515 init_special_inode(inode
, inode
->i_mode
| S_IFSOCK
, 0);
1517 case ICBTAG_FILE_TYPE_SYMLINK
:
1518 inode
->i_data
.a_ops
= &udf_symlink_aops
;
1519 inode
->i_op
= &udf_symlink_inode_operations
;
1520 inode
->i_mode
= S_IFLNK
| S_IRWXUGO
;
1522 case ICBTAG_FILE_TYPE_MAIN
:
1523 udf_debug("METADATA FILE-----\n");
1525 case ICBTAG_FILE_TYPE_MIRROR
:
1526 udf_debug("METADATA MIRROR FILE-----\n");
1528 case ICBTAG_FILE_TYPE_BITMAP
:
1529 udf_debug("METADATA BITMAP FILE-----\n");
1532 udf_err(inode
->i_sb
, "(ino %ld) failed unknown file type=%d\n",
1533 inode
->i_ino
, fe
->icbTag
.fileType
);
1536 if (S_ISCHR(inode
->i_mode
) || S_ISBLK(inode
->i_mode
)) {
1537 struct deviceSpec
*dsea
=
1538 (struct deviceSpec
*)udf_get_extendedattr(inode
, 12, 1);
1540 init_special_inode(inode
, inode
->i_mode
,
1541 MKDEV(le32_to_cpu(dsea
->majorDeviceIdent
),
1542 le32_to_cpu(dsea
->minorDeviceIdent
)));
1543 /* Developer ID ??? */
1553 static int udf_alloc_i_data(struct inode
*inode
, size_t size
)
1555 struct udf_inode_info
*iinfo
= UDF_I(inode
);
1556 iinfo
->i_ext
.i_data
= kmalloc(size
, GFP_KERNEL
);
1558 if (!iinfo
->i_ext
.i_data
) {
1559 udf_err(inode
->i_sb
, "(ino %ld) no free memory\n",
1567 static umode_t
udf_convert_permissions(struct fileEntry
*fe
)
1570 uint32_t permissions
;
1573 permissions
= le32_to_cpu(fe
->permissions
);
1574 flags
= le16_to_cpu(fe
->icbTag
.flags
);
1576 mode
= ((permissions
) & S_IRWXO
) |
1577 ((permissions
>> 2) & S_IRWXG
) |
1578 ((permissions
>> 4) & S_IRWXU
) |
1579 ((flags
& ICBTAG_FLAG_SETUID
) ? S_ISUID
: 0) |
1580 ((flags
& ICBTAG_FLAG_SETGID
) ? S_ISGID
: 0) |
1581 ((flags
& ICBTAG_FLAG_STICKY
) ? S_ISVTX
: 0);
1586 int udf_write_inode(struct inode
*inode
, struct writeback_control
*wbc
)
1588 return udf_update_inode(inode
, wbc
->sync_mode
== WB_SYNC_ALL
);
1591 static int udf_sync_inode(struct inode
*inode
)
1593 return udf_update_inode(inode
, 1);
1596 static int udf_update_inode(struct inode
*inode
, int do_sync
)
1598 struct buffer_head
*bh
= NULL
;
1599 struct fileEntry
*fe
;
1600 struct extendedFileEntry
*efe
;
1601 uint64_t lb_recorded
;
1606 struct udf_sb_info
*sbi
= UDF_SB(inode
->i_sb
);
1607 unsigned char blocksize_bits
= inode
->i_sb
->s_blocksize_bits
;
1608 struct udf_inode_info
*iinfo
= UDF_I(inode
);
1610 bh
= udf_tgetblk(inode
->i_sb
,
1611 udf_get_lb_pblock(inode
->i_sb
, &iinfo
->i_location
, 0));
1613 udf_debug("getblk failure\n");
1618 memset(bh
->b_data
, 0, inode
->i_sb
->s_blocksize
);
1619 fe
= (struct fileEntry
*)bh
->b_data
;
1620 efe
= (struct extendedFileEntry
*)bh
->b_data
;
1623 struct unallocSpaceEntry
*use
=
1624 (struct unallocSpaceEntry
*)bh
->b_data
;
1626 use
->lengthAllocDescs
= cpu_to_le32(iinfo
->i_lenAlloc
);
1627 memcpy(bh
->b_data
+ sizeof(struct unallocSpaceEntry
),
1628 iinfo
->i_ext
.i_data
, inode
->i_sb
->s_blocksize
-
1629 sizeof(struct unallocSpaceEntry
));
1630 use
->descTag
.tagIdent
= cpu_to_le16(TAG_IDENT_USE
);
1631 use
->descTag
.tagLocation
=
1632 cpu_to_le32(iinfo
->i_location
.logicalBlockNum
);
1633 crclen
= sizeof(struct unallocSpaceEntry
) +
1634 iinfo
->i_lenAlloc
- sizeof(struct tag
);
1635 use
->descTag
.descCRCLength
= cpu_to_le16(crclen
);
1636 use
->descTag
.descCRC
= cpu_to_le16(crc_itu_t(0, (char *)use
+
1639 use
->descTag
.tagChecksum
= udf_tag_checksum(&use
->descTag
);
1644 if (UDF_QUERY_FLAG(inode
->i_sb
, UDF_FLAG_UID_FORGET
))
1645 fe
->uid
= cpu_to_le32(-1);
1647 fe
->uid
= cpu_to_le32(i_uid_read(inode
));
1649 if (UDF_QUERY_FLAG(inode
->i_sb
, UDF_FLAG_GID_FORGET
))
1650 fe
->gid
= cpu_to_le32(-1);
1652 fe
->gid
= cpu_to_le32(i_gid_read(inode
));
1654 udfperms
= ((inode
->i_mode
& S_IRWXO
)) |
1655 ((inode
->i_mode
& S_IRWXG
) << 2) |
1656 ((inode
->i_mode
& S_IRWXU
) << 4);
1658 udfperms
|= (le32_to_cpu(fe
->permissions
) &
1659 (FE_PERM_O_DELETE
| FE_PERM_O_CHATTR
|
1660 FE_PERM_G_DELETE
| FE_PERM_G_CHATTR
|
1661 FE_PERM_U_DELETE
| FE_PERM_U_CHATTR
));
1662 fe
->permissions
= cpu_to_le32(udfperms
);
1664 if (S_ISDIR(inode
->i_mode
) && inode
->i_nlink
> 0)
1665 fe
->fileLinkCount
= cpu_to_le16(inode
->i_nlink
- 1);
1667 fe
->fileLinkCount
= cpu_to_le16(inode
->i_nlink
);
1669 fe
->informationLength
= cpu_to_le64(inode
->i_size
);
1671 if (S_ISCHR(inode
->i_mode
) || S_ISBLK(inode
->i_mode
)) {
1673 struct deviceSpec
*dsea
=
1674 (struct deviceSpec
*)udf_get_extendedattr(inode
, 12, 1);
1676 dsea
= (struct deviceSpec
*)
1677 udf_add_extendedattr(inode
,
1678 sizeof(struct deviceSpec
) +
1679 sizeof(struct regid
), 12, 0x3);
1680 dsea
->attrType
= cpu_to_le32(12);
1681 dsea
->attrSubtype
= 1;
1682 dsea
->attrLength
= cpu_to_le32(
1683 sizeof(struct deviceSpec
) +
1684 sizeof(struct regid
));
1685 dsea
->impUseLength
= cpu_to_le32(sizeof(struct regid
));
1687 eid
= (struct regid
*)dsea
->impUse
;
1688 memset(eid
, 0, sizeof(struct regid
));
1689 strcpy(eid
->ident
, UDF_ID_DEVELOPER
);
1690 eid
->identSuffix
[0] = UDF_OS_CLASS_UNIX
;
1691 eid
->identSuffix
[1] = UDF_OS_ID_LINUX
;
1692 dsea
->majorDeviceIdent
= cpu_to_le32(imajor(inode
));
1693 dsea
->minorDeviceIdent
= cpu_to_le32(iminor(inode
));
1696 if (iinfo
->i_alloc_type
== ICBTAG_FLAG_AD_IN_ICB
)
1697 lb_recorded
= 0; /* No extents => no blocks! */
1700 (inode
->i_blocks
+ (1 << (blocksize_bits
- 9)) - 1) >>
1701 (blocksize_bits
- 9);
1703 if (iinfo
->i_efe
== 0) {
1704 memcpy(bh
->b_data
+ sizeof(struct fileEntry
),
1705 iinfo
->i_ext
.i_data
,
1706 inode
->i_sb
->s_blocksize
- sizeof(struct fileEntry
));
1707 fe
->logicalBlocksRecorded
= cpu_to_le64(lb_recorded
);
1709 udf_time_to_disk_stamp(&fe
->accessTime
, inode
->i_atime
);
1710 udf_time_to_disk_stamp(&fe
->modificationTime
, inode
->i_mtime
);
1711 udf_time_to_disk_stamp(&fe
->attrTime
, inode
->i_ctime
);
1712 memset(&(fe
->impIdent
), 0, sizeof(struct regid
));
1713 strcpy(fe
->impIdent
.ident
, UDF_ID_DEVELOPER
);
1714 fe
->impIdent
.identSuffix
[0] = UDF_OS_CLASS_UNIX
;
1715 fe
->impIdent
.identSuffix
[1] = UDF_OS_ID_LINUX
;
1716 fe
->uniqueID
= cpu_to_le64(iinfo
->i_unique
);
1717 fe
->lengthExtendedAttr
= cpu_to_le32(iinfo
->i_lenEAttr
);
1718 fe
->lengthAllocDescs
= cpu_to_le32(iinfo
->i_lenAlloc
);
1719 fe
->checkpoint
= cpu_to_le32(iinfo
->i_checkpoint
);
1720 fe
->descTag
.tagIdent
= cpu_to_le16(TAG_IDENT_FE
);
1721 crclen
= sizeof(struct fileEntry
);
1723 memcpy(bh
->b_data
+ sizeof(struct extendedFileEntry
),
1724 iinfo
->i_ext
.i_data
,
1725 inode
->i_sb
->s_blocksize
-
1726 sizeof(struct extendedFileEntry
));
1727 efe
->objectSize
= cpu_to_le64(inode
->i_size
);
1728 efe
->logicalBlocksRecorded
= cpu_to_le64(lb_recorded
);
1730 if (iinfo
->i_crtime
.tv_sec
> inode
->i_atime
.tv_sec
||
1731 (iinfo
->i_crtime
.tv_sec
== inode
->i_atime
.tv_sec
&&
1732 iinfo
->i_crtime
.tv_nsec
> inode
->i_atime
.tv_nsec
))
1733 iinfo
->i_crtime
= inode
->i_atime
;
1735 if (iinfo
->i_crtime
.tv_sec
> inode
->i_mtime
.tv_sec
||
1736 (iinfo
->i_crtime
.tv_sec
== inode
->i_mtime
.tv_sec
&&
1737 iinfo
->i_crtime
.tv_nsec
> inode
->i_mtime
.tv_nsec
))
1738 iinfo
->i_crtime
= inode
->i_mtime
;
1740 if (iinfo
->i_crtime
.tv_sec
> inode
->i_ctime
.tv_sec
||
1741 (iinfo
->i_crtime
.tv_sec
== inode
->i_ctime
.tv_sec
&&
1742 iinfo
->i_crtime
.tv_nsec
> inode
->i_ctime
.tv_nsec
))
1743 iinfo
->i_crtime
= inode
->i_ctime
;
1745 udf_time_to_disk_stamp(&efe
->accessTime
, inode
->i_atime
);
1746 udf_time_to_disk_stamp(&efe
->modificationTime
, inode
->i_mtime
);
1747 udf_time_to_disk_stamp(&efe
->createTime
, iinfo
->i_crtime
);
1748 udf_time_to_disk_stamp(&efe
->attrTime
, inode
->i_ctime
);
1750 memset(&(efe
->impIdent
), 0, sizeof(struct regid
));
1751 strcpy(efe
->impIdent
.ident
, UDF_ID_DEVELOPER
);
1752 efe
->impIdent
.identSuffix
[0] = UDF_OS_CLASS_UNIX
;
1753 efe
->impIdent
.identSuffix
[1] = UDF_OS_ID_LINUX
;
1754 efe
->uniqueID
= cpu_to_le64(iinfo
->i_unique
);
1755 efe
->lengthExtendedAttr
= cpu_to_le32(iinfo
->i_lenEAttr
);
1756 efe
->lengthAllocDescs
= cpu_to_le32(iinfo
->i_lenAlloc
);
1757 efe
->checkpoint
= cpu_to_le32(iinfo
->i_checkpoint
);
1758 efe
->descTag
.tagIdent
= cpu_to_le16(TAG_IDENT_EFE
);
1759 crclen
= sizeof(struct extendedFileEntry
);
1761 if (iinfo
->i_strat4096
) {
1762 fe
->icbTag
.strategyType
= cpu_to_le16(4096);
1763 fe
->icbTag
.strategyParameter
= cpu_to_le16(1);
1764 fe
->icbTag
.numEntries
= cpu_to_le16(2);
1766 fe
->icbTag
.strategyType
= cpu_to_le16(4);
1767 fe
->icbTag
.numEntries
= cpu_to_le16(1);
1770 if (S_ISDIR(inode
->i_mode
))
1771 fe
->icbTag
.fileType
= ICBTAG_FILE_TYPE_DIRECTORY
;
1772 else if (S_ISREG(inode
->i_mode
))
1773 fe
->icbTag
.fileType
= ICBTAG_FILE_TYPE_REGULAR
;
1774 else if (S_ISLNK(inode
->i_mode
))
1775 fe
->icbTag
.fileType
= ICBTAG_FILE_TYPE_SYMLINK
;
1776 else if (S_ISBLK(inode
->i_mode
))
1777 fe
->icbTag
.fileType
= ICBTAG_FILE_TYPE_BLOCK
;
1778 else if (S_ISCHR(inode
->i_mode
))
1779 fe
->icbTag
.fileType
= ICBTAG_FILE_TYPE_CHAR
;
1780 else if (S_ISFIFO(inode
->i_mode
))
1781 fe
->icbTag
.fileType
= ICBTAG_FILE_TYPE_FIFO
;
1782 else if (S_ISSOCK(inode
->i_mode
))
1783 fe
->icbTag
.fileType
= ICBTAG_FILE_TYPE_SOCKET
;
1785 icbflags
= iinfo
->i_alloc_type
|
1786 ((inode
->i_mode
& S_ISUID
) ? ICBTAG_FLAG_SETUID
: 0) |
1787 ((inode
->i_mode
& S_ISGID
) ? ICBTAG_FLAG_SETGID
: 0) |
1788 ((inode
->i_mode
& S_ISVTX
) ? ICBTAG_FLAG_STICKY
: 0) |
1789 (le16_to_cpu(fe
->icbTag
.flags
) &
1790 ~(ICBTAG_FLAG_AD_MASK
| ICBTAG_FLAG_SETUID
|
1791 ICBTAG_FLAG_SETGID
| ICBTAG_FLAG_STICKY
));
1793 fe
->icbTag
.flags
= cpu_to_le16(icbflags
);
1794 if (sbi
->s_udfrev
>= 0x0200)
1795 fe
->descTag
.descVersion
= cpu_to_le16(3);
1797 fe
->descTag
.descVersion
= cpu_to_le16(2);
1798 fe
->descTag
.tagSerialNum
= cpu_to_le16(sbi
->s_serial_number
);
1799 fe
->descTag
.tagLocation
= cpu_to_le32(
1800 iinfo
->i_location
.logicalBlockNum
);
1801 crclen
+= iinfo
->i_lenEAttr
+ iinfo
->i_lenAlloc
- sizeof(struct tag
);
1802 fe
->descTag
.descCRCLength
= cpu_to_le16(crclen
);
1803 fe
->descTag
.descCRC
= cpu_to_le16(crc_itu_t(0, (char *)fe
+ sizeof(struct tag
),
1805 fe
->descTag
.tagChecksum
= udf_tag_checksum(&fe
->descTag
);
1808 set_buffer_uptodate(bh
);
1811 /* write the data blocks */
1812 mark_buffer_dirty(bh
);
1814 sync_dirty_buffer(bh
);
1815 if (buffer_write_io_error(bh
)) {
1816 udf_warn(inode
->i_sb
, "IO error syncing udf inode [%08lx]\n",
1826 struct inode
*udf_iget(struct super_block
*sb
, struct kernel_lb_addr
*ino
)
1828 unsigned long block
= udf_get_lb_pblock(sb
, ino
, 0);
1829 struct inode
*inode
= iget_locked(sb
, block
);
1833 return ERR_PTR(-ENOMEM
);
1835 if (!(inode
->i_state
& I_NEW
))
1838 memcpy(&UDF_I(inode
)->i_location
, ino
, sizeof(struct kernel_lb_addr
));
1839 err
= udf_read_inode(inode
);
1842 return ERR_PTR(err
);
1844 unlock_new_inode(inode
);
1849 int udf_add_aext(struct inode
*inode
, struct extent_position
*epos
,
1850 struct kernel_lb_addr
*eloc
, uint32_t elen
, int inc
)
1853 struct short_ad
*sad
= NULL
;
1854 struct long_ad
*lad
= NULL
;
1855 struct allocExtDesc
*aed
;
1857 struct udf_inode_info
*iinfo
= UDF_I(inode
);
1860 ptr
= iinfo
->i_ext
.i_data
+ epos
->offset
-
1861 udf_file_entry_alloc_offset(inode
) +
1864 ptr
= epos
->bh
->b_data
+ epos
->offset
;
1866 if (iinfo
->i_alloc_type
== ICBTAG_FLAG_AD_SHORT
)
1867 adsize
= sizeof(struct short_ad
);
1868 else if (iinfo
->i_alloc_type
== ICBTAG_FLAG_AD_LONG
)
1869 adsize
= sizeof(struct long_ad
);
1873 if (epos
->offset
+ (2 * adsize
) > inode
->i_sb
->s_blocksize
) {
1874 unsigned char *sptr
, *dptr
;
1875 struct buffer_head
*nbh
;
1877 struct kernel_lb_addr obloc
= epos
->block
;
1879 epos
->block
.logicalBlockNum
= udf_new_block(inode
->i_sb
, NULL
,
1880 obloc
.partitionReferenceNum
,
1881 obloc
.logicalBlockNum
, &err
);
1882 if (!epos
->block
.logicalBlockNum
)
1884 nbh
= udf_tgetblk(inode
->i_sb
, udf_get_lb_pblock(inode
->i_sb
,
1890 memset(nbh
->b_data
, 0x00, inode
->i_sb
->s_blocksize
);
1891 set_buffer_uptodate(nbh
);
1893 mark_buffer_dirty_inode(nbh
, inode
);
1895 aed
= (struct allocExtDesc
*)(nbh
->b_data
);
1896 if (!UDF_QUERY_FLAG(inode
->i_sb
, UDF_FLAG_STRICT
))
1897 aed
->previousAllocExtLocation
=
1898 cpu_to_le32(obloc
.logicalBlockNum
);
1899 if (epos
->offset
+ adsize
> inode
->i_sb
->s_blocksize
) {
1900 loffset
= epos
->offset
;
1901 aed
->lengthAllocDescs
= cpu_to_le32(adsize
);
1902 sptr
= ptr
- adsize
;
1903 dptr
= nbh
->b_data
+ sizeof(struct allocExtDesc
);
1904 memcpy(dptr
, sptr
, adsize
);
1905 epos
->offset
= sizeof(struct allocExtDesc
) + adsize
;
1907 loffset
= epos
->offset
+ adsize
;
1908 aed
->lengthAllocDescs
= cpu_to_le32(0);
1910 epos
->offset
= sizeof(struct allocExtDesc
);
1913 aed
= (struct allocExtDesc
*)epos
->bh
->b_data
;
1914 le32_add_cpu(&aed
->lengthAllocDescs
, adsize
);
1916 iinfo
->i_lenAlloc
+= adsize
;
1917 mark_inode_dirty(inode
);
1920 if (UDF_SB(inode
->i_sb
)->s_udfrev
>= 0x0200)
1921 udf_new_tag(nbh
->b_data
, TAG_IDENT_AED
, 3, 1,
1922 epos
->block
.logicalBlockNum
, sizeof(struct tag
));
1924 udf_new_tag(nbh
->b_data
, TAG_IDENT_AED
, 2, 1,
1925 epos
->block
.logicalBlockNum
, sizeof(struct tag
));
1926 switch (iinfo
->i_alloc_type
) {
1927 case ICBTAG_FLAG_AD_SHORT
:
1928 sad
= (struct short_ad
*)sptr
;
1929 sad
->extLength
= cpu_to_le32(EXT_NEXT_EXTENT_ALLOCDECS
|
1930 inode
->i_sb
->s_blocksize
);
1932 cpu_to_le32(epos
->block
.logicalBlockNum
);
1934 case ICBTAG_FLAG_AD_LONG
:
1935 lad
= (struct long_ad
*)sptr
;
1936 lad
->extLength
= cpu_to_le32(EXT_NEXT_EXTENT_ALLOCDECS
|
1937 inode
->i_sb
->s_blocksize
);
1938 lad
->extLocation
= cpu_to_lelb(epos
->block
);
1939 memset(lad
->impUse
, 0x00, sizeof(lad
->impUse
));
1943 if (!UDF_QUERY_FLAG(inode
->i_sb
, UDF_FLAG_STRICT
) ||
1944 UDF_SB(inode
->i_sb
)->s_udfrev
>= 0x0201)
1945 udf_update_tag(epos
->bh
->b_data
, loffset
);
1947 udf_update_tag(epos
->bh
->b_data
,
1948 sizeof(struct allocExtDesc
));
1949 mark_buffer_dirty_inode(epos
->bh
, inode
);
1952 mark_inode_dirty(inode
);
1957 udf_write_aext(inode
, epos
, eloc
, elen
, inc
);
1960 iinfo
->i_lenAlloc
+= adsize
;
1961 mark_inode_dirty(inode
);
1963 aed
= (struct allocExtDesc
*)epos
->bh
->b_data
;
1964 le32_add_cpu(&aed
->lengthAllocDescs
, adsize
);
1965 if (!UDF_QUERY_FLAG(inode
->i_sb
, UDF_FLAG_STRICT
) ||
1966 UDF_SB(inode
->i_sb
)->s_udfrev
>= 0x0201)
1967 udf_update_tag(epos
->bh
->b_data
,
1968 epos
->offset
+ (inc
? 0 : adsize
));
1970 udf_update_tag(epos
->bh
->b_data
,
1971 sizeof(struct allocExtDesc
));
1972 mark_buffer_dirty_inode(epos
->bh
, inode
);
1978 void udf_write_aext(struct inode
*inode
, struct extent_position
*epos
,
1979 struct kernel_lb_addr
*eloc
, uint32_t elen
, int inc
)
1983 struct short_ad
*sad
;
1984 struct long_ad
*lad
;
1985 struct udf_inode_info
*iinfo
= UDF_I(inode
);
1988 ptr
= iinfo
->i_ext
.i_data
+ epos
->offset
-
1989 udf_file_entry_alloc_offset(inode
) +
1992 ptr
= epos
->bh
->b_data
+ epos
->offset
;
1994 switch (iinfo
->i_alloc_type
) {
1995 case ICBTAG_FLAG_AD_SHORT
:
1996 sad
= (struct short_ad
*)ptr
;
1997 sad
->extLength
= cpu_to_le32(elen
);
1998 sad
->extPosition
= cpu_to_le32(eloc
->logicalBlockNum
);
1999 adsize
= sizeof(struct short_ad
);
2001 case ICBTAG_FLAG_AD_LONG
:
2002 lad
= (struct long_ad
*)ptr
;
2003 lad
->extLength
= cpu_to_le32(elen
);
2004 lad
->extLocation
= cpu_to_lelb(*eloc
);
2005 memset(lad
->impUse
, 0x00, sizeof(lad
->impUse
));
2006 adsize
= sizeof(struct long_ad
);
2013 if (!UDF_QUERY_FLAG(inode
->i_sb
, UDF_FLAG_STRICT
) ||
2014 UDF_SB(inode
->i_sb
)->s_udfrev
>= 0x0201) {
2015 struct allocExtDesc
*aed
=
2016 (struct allocExtDesc
*)epos
->bh
->b_data
;
2017 udf_update_tag(epos
->bh
->b_data
,
2018 le32_to_cpu(aed
->lengthAllocDescs
) +
2019 sizeof(struct allocExtDesc
));
2021 mark_buffer_dirty_inode(epos
->bh
, inode
);
2023 mark_inode_dirty(inode
);
2027 epos
->offset
+= adsize
;
2030 int8_t udf_next_aext(struct inode
*inode
, struct extent_position
*epos
,
2031 struct kernel_lb_addr
*eloc
, uint32_t *elen
, int inc
)
2035 while ((etype
= udf_current_aext(inode
, epos
, eloc
, elen
, inc
)) ==
2036 (EXT_NEXT_EXTENT_ALLOCDECS
>> 30)) {
2038 epos
->block
= *eloc
;
2039 epos
->offset
= sizeof(struct allocExtDesc
);
2041 block
= udf_get_lb_pblock(inode
->i_sb
, &epos
->block
, 0);
2042 epos
->bh
= udf_tread(inode
->i_sb
, block
);
2044 udf_debug("reading block %d failed!\n", block
);
2052 int8_t udf_current_aext(struct inode
*inode
, struct extent_position
*epos
,
2053 struct kernel_lb_addr
*eloc
, uint32_t *elen
, int inc
)
2058 struct short_ad
*sad
;
2059 struct long_ad
*lad
;
2060 struct udf_inode_info
*iinfo
= UDF_I(inode
);
2064 epos
->offset
= udf_file_entry_alloc_offset(inode
);
2065 ptr
= iinfo
->i_ext
.i_data
+ epos
->offset
-
2066 udf_file_entry_alloc_offset(inode
) +
2068 alen
= udf_file_entry_alloc_offset(inode
) +
2072 epos
->offset
= sizeof(struct allocExtDesc
);
2073 ptr
= epos
->bh
->b_data
+ epos
->offset
;
2074 alen
= sizeof(struct allocExtDesc
) +
2075 le32_to_cpu(((struct allocExtDesc
*)epos
->bh
->b_data
)->
2079 switch (iinfo
->i_alloc_type
) {
2080 case ICBTAG_FLAG_AD_SHORT
:
2081 sad
= udf_get_fileshortad(ptr
, alen
, &epos
->offset
, inc
);
2084 etype
= le32_to_cpu(sad
->extLength
) >> 30;
2085 eloc
->logicalBlockNum
= le32_to_cpu(sad
->extPosition
);
2086 eloc
->partitionReferenceNum
=
2087 iinfo
->i_location
.partitionReferenceNum
;
2088 *elen
= le32_to_cpu(sad
->extLength
) & UDF_EXTENT_LENGTH_MASK
;
2090 case ICBTAG_FLAG_AD_LONG
:
2091 lad
= udf_get_filelongad(ptr
, alen
, &epos
->offset
, inc
);
2094 etype
= le32_to_cpu(lad
->extLength
) >> 30;
2095 *eloc
= lelb_to_cpu(lad
->extLocation
);
2096 *elen
= le32_to_cpu(lad
->extLength
) & UDF_EXTENT_LENGTH_MASK
;
2099 udf_debug("alloc_type = %d unsupported\n", iinfo
->i_alloc_type
);
2106 static int8_t udf_insert_aext(struct inode
*inode
, struct extent_position epos
,
2107 struct kernel_lb_addr neloc
, uint32_t nelen
)
2109 struct kernel_lb_addr oeloc
;
2116 while ((etype
= udf_next_aext(inode
, &epos
, &oeloc
, &oelen
, 0)) != -1) {
2117 udf_write_aext(inode
, &epos
, &neloc
, nelen
, 1);
2119 nelen
= (etype
<< 30) | oelen
;
2121 udf_add_aext(inode
, &epos
, &neloc
, nelen
, 1);
2124 return (nelen
>> 30);
2127 int8_t udf_delete_aext(struct inode
*inode
, struct extent_position epos
,
2128 struct kernel_lb_addr eloc
, uint32_t elen
)
2130 struct extent_position oepos
;
2133 struct allocExtDesc
*aed
;
2134 struct udf_inode_info
*iinfo
;
2141 iinfo
= UDF_I(inode
);
2142 if (iinfo
->i_alloc_type
== ICBTAG_FLAG_AD_SHORT
)
2143 adsize
= sizeof(struct short_ad
);
2144 else if (iinfo
->i_alloc_type
== ICBTAG_FLAG_AD_LONG
)
2145 adsize
= sizeof(struct long_ad
);
2150 if (udf_next_aext(inode
, &epos
, &eloc
, &elen
, 1) == -1)
2153 while ((etype
= udf_next_aext(inode
, &epos
, &eloc
, &elen
, 1)) != -1) {
2154 udf_write_aext(inode
, &oepos
, &eloc
, (etype
<< 30) | elen
, 1);
2155 if (oepos
.bh
!= epos
.bh
) {
2156 oepos
.block
= epos
.block
;
2160 oepos
.offset
= epos
.offset
- adsize
;
2163 memset(&eloc
, 0x00, sizeof(struct kernel_lb_addr
));
2166 if (epos
.bh
!= oepos
.bh
) {
2167 udf_free_blocks(inode
->i_sb
, inode
, &epos
.block
, 0, 1);
2168 udf_write_aext(inode
, &oepos
, &eloc
, elen
, 1);
2169 udf_write_aext(inode
, &oepos
, &eloc
, elen
, 1);
2171 iinfo
->i_lenAlloc
-= (adsize
* 2);
2172 mark_inode_dirty(inode
);
2174 aed
= (struct allocExtDesc
*)oepos
.bh
->b_data
;
2175 le32_add_cpu(&aed
->lengthAllocDescs
, -(2 * adsize
));
2176 if (!UDF_QUERY_FLAG(inode
->i_sb
, UDF_FLAG_STRICT
) ||
2177 UDF_SB(inode
->i_sb
)->s_udfrev
>= 0x0201)
2178 udf_update_tag(oepos
.bh
->b_data
,
2179 oepos
.offset
- (2 * adsize
));
2181 udf_update_tag(oepos
.bh
->b_data
,
2182 sizeof(struct allocExtDesc
));
2183 mark_buffer_dirty_inode(oepos
.bh
, inode
);
2186 udf_write_aext(inode
, &oepos
, &eloc
, elen
, 1);
2188 iinfo
->i_lenAlloc
-= adsize
;
2189 mark_inode_dirty(inode
);
2191 aed
= (struct allocExtDesc
*)oepos
.bh
->b_data
;
2192 le32_add_cpu(&aed
->lengthAllocDescs
, -adsize
);
2193 if (!UDF_QUERY_FLAG(inode
->i_sb
, UDF_FLAG_STRICT
) ||
2194 UDF_SB(inode
->i_sb
)->s_udfrev
>= 0x0201)
2195 udf_update_tag(oepos
.bh
->b_data
,
2196 epos
.offset
- adsize
);
2198 udf_update_tag(oepos
.bh
->b_data
,
2199 sizeof(struct allocExtDesc
));
2200 mark_buffer_dirty_inode(oepos
.bh
, inode
);
2207 return (elen
>> 30);
2210 int8_t inode_bmap(struct inode
*inode
, sector_t block
,
2211 struct extent_position
*pos
, struct kernel_lb_addr
*eloc
,
2212 uint32_t *elen
, sector_t
*offset
)
2214 unsigned char blocksize_bits
= inode
->i_sb
->s_blocksize_bits
;
2215 loff_t lbcount
= 0, bcount
=
2216 (loff_t
) block
<< blocksize_bits
;
2218 struct udf_inode_info
*iinfo
;
2220 iinfo
= UDF_I(inode
);
2221 if (!udf_read_extent_cache(inode
, bcount
, &lbcount
, pos
)) {
2223 pos
->block
= iinfo
->i_location
;
2228 etype
= udf_next_aext(inode
, pos
, eloc
, elen
, 1);
2230 *offset
= (bcount
- lbcount
) >> blocksize_bits
;
2231 iinfo
->i_lenExtents
= lbcount
;
2235 } while (lbcount
<= bcount
);
2236 /* update extent cache */
2237 udf_update_extent_cache(inode
, lbcount
- *elen
, pos
, 1);
2238 *offset
= (bcount
+ *elen
- lbcount
) >> blocksize_bits
;
2243 long udf_block_map(struct inode
*inode
, sector_t block
)
2245 struct kernel_lb_addr eloc
;
2248 struct extent_position epos
= {};
2251 down_read(&UDF_I(inode
)->i_data_sem
);
2253 if (inode_bmap(inode
, block
, &epos
, &eloc
, &elen
, &offset
) ==
2254 (EXT_RECORDED_ALLOCATED
>> 30))
2255 ret
= udf_get_lb_pblock(inode
->i_sb
, &eloc
, offset
);
2259 up_read(&UDF_I(inode
)->i_data_sem
);
2262 if (UDF_QUERY_FLAG(inode
->i_sb
, UDF_FLAG_VARCONV
))
2263 return udf_fixed_to_variable(ret
);