5 * Inode handling routines for the OSTA-UDF(tm) filesystem.
8 * This file is distributed under the terms of the GNU General Public
9 * License (GPL). Copies of the GPL can be obtained from:
10 * ftp://prep.ai.mit.edu/pub/gnu/GPL
11 * Each contributing author retains all rights to their own work.
13 * (C) 1998 Dave Boynton
14 * (C) 1998-2004 Ben Fennema
15 * (C) 1999-2000 Stelias Computing Inc
19 * 10/04/98 dgb Added rudimentary directory functions
20 * 10/07/98 Fully working udf_block_map! It works!
21 * 11/25/98 bmap altered to better support extents
22 * 12/06/98 blf partition support in udf_iget, udf_block_map
24 * 12/12/98 rewrote udf_block_map to handle next extents and descs across
25 * block boundaries (which is not actually allowed)
26 * 12/20/98 added support for strategy 4096
27 * 03/07/99 rewrote udf_block_map (again)
28 * New funcs, inode_bmap, udf_next_aext
29 * 04/19/99 Support for writing device EA's for major/minor #
34 #include <linux/module.h>
35 #include <linux/pagemap.h>
36 #include <linux/writeback.h>
37 #include <linux/slab.h>
38 #include <linux/crc-itu-t.h>
39 #include <linux/mpage.h>
40 #include <linux/uio.h>
41 #include <linux/bio.h>
46 #define EXTENT_MERGE_SIZE 5
48 static umode_t
udf_convert_permissions(struct fileEntry
*);
49 static int udf_update_inode(struct inode
*, int);
50 static int udf_sync_inode(struct inode
*inode
);
51 static int udf_alloc_i_data(struct inode
*inode
, size_t size
);
52 static sector_t
inode_getblk(struct inode
*, sector_t
, int *, int *);
53 static int8_t udf_insert_aext(struct inode
*, struct extent_position
,
54 struct kernel_lb_addr
, uint32_t);
55 static void udf_split_extents(struct inode
*, int *, int, udf_pblk_t
,
56 struct kernel_long_ad
*, int *);
57 static void udf_prealloc_extents(struct inode
*, int, int,
58 struct kernel_long_ad
*, int *);
59 static void udf_merge_extents(struct inode
*, struct kernel_long_ad
*, int *);
60 static void udf_update_extents(struct inode
*, struct kernel_long_ad
*, int,
61 int, struct extent_position
*);
62 static int udf_get_block(struct inode
*, sector_t
, struct buffer_head
*, int);
64 static void __udf_clear_extent_cache(struct inode
*inode
)
66 struct udf_inode_info
*iinfo
= UDF_I(inode
);
68 if (iinfo
->cached_extent
.lstart
!= -1) {
69 brelse(iinfo
->cached_extent
.epos
.bh
);
70 iinfo
->cached_extent
.lstart
= -1;
74 /* Invalidate extent cache */
75 static void udf_clear_extent_cache(struct inode
*inode
)
77 struct udf_inode_info
*iinfo
= UDF_I(inode
);
79 spin_lock(&iinfo
->i_extent_cache_lock
);
80 __udf_clear_extent_cache(inode
);
81 spin_unlock(&iinfo
->i_extent_cache_lock
);
84 /* Return contents of extent cache */
85 static int udf_read_extent_cache(struct inode
*inode
, loff_t bcount
,
86 loff_t
*lbcount
, struct extent_position
*pos
)
88 struct udf_inode_info
*iinfo
= UDF_I(inode
);
91 spin_lock(&iinfo
->i_extent_cache_lock
);
92 if ((iinfo
->cached_extent
.lstart
<= bcount
) &&
93 (iinfo
->cached_extent
.lstart
!= -1)) {
95 *lbcount
= iinfo
->cached_extent
.lstart
;
96 memcpy(pos
, &iinfo
->cached_extent
.epos
,
97 sizeof(struct extent_position
));
102 spin_unlock(&iinfo
->i_extent_cache_lock
);
106 /* Add extent to extent cache */
107 static void udf_update_extent_cache(struct inode
*inode
, loff_t estart
,
108 struct extent_position
*pos
)
110 struct udf_inode_info
*iinfo
= UDF_I(inode
);
112 spin_lock(&iinfo
->i_extent_cache_lock
);
113 /* Invalidate previously cached extent */
114 __udf_clear_extent_cache(inode
);
117 memcpy(&iinfo
->cached_extent
.epos
, pos
, sizeof(*pos
));
118 iinfo
->cached_extent
.lstart
= estart
;
119 switch (iinfo
->i_alloc_type
) {
120 case ICBTAG_FLAG_AD_SHORT
:
121 iinfo
->cached_extent
.epos
.offset
-= sizeof(struct short_ad
);
123 case ICBTAG_FLAG_AD_LONG
:
124 iinfo
->cached_extent
.epos
.offset
-= sizeof(struct long_ad
);
127 spin_unlock(&iinfo
->i_extent_cache_lock
);
130 void udf_evict_inode(struct inode
*inode
)
132 struct udf_inode_info
*iinfo
= UDF_I(inode
);
135 if (!inode
->i_nlink
&& !is_bad_inode(inode
)) {
137 udf_setsize(inode
, 0);
138 udf_update_inode(inode
, IS_SYNC(inode
));
140 truncate_inode_pages_final(&inode
->i_data
);
141 invalidate_inode_buffers(inode
);
143 if (iinfo
->i_alloc_type
!= ICBTAG_FLAG_AD_IN_ICB
&&
144 inode
->i_size
!= iinfo
->i_lenExtents
) {
145 udf_warn(inode
->i_sb
, "Inode %lu (mode %o) has inode size %llu different from extent length %llu. Filesystem need not be standards compliant.\n",
146 inode
->i_ino
, inode
->i_mode
,
147 (unsigned long long)inode
->i_size
,
148 (unsigned long long)iinfo
->i_lenExtents
);
150 kfree(iinfo
->i_ext
.i_data
);
151 iinfo
->i_ext
.i_data
= NULL
;
152 udf_clear_extent_cache(inode
);
154 udf_free_inode(inode
);
158 static void udf_write_failed(struct address_space
*mapping
, loff_t to
)
160 struct inode
*inode
= mapping
->host
;
161 struct udf_inode_info
*iinfo
= UDF_I(inode
);
162 loff_t isize
= inode
->i_size
;
165 truncate_pagecache(inode
, isize
);
166 if (iinfo
->i_alloc_type
!= ICBTAG_FLAG_AD_IN_ICB
) {
167 down_write(&iinfo
->i_data_sem
);
168 udf_clear_extent_cache(inode
);
169 udf_truncate_extents(inode
);
170 up_write(&iinfo
->i_data_sem
);
175 static int udf_writepage(struct page
*page
, struct writeback_control
*wbc
)
177 return block_write_full_page(page
, udf_get_block
, wbc
);
180 static int udf_writepages(struct address_space
*mapping
,
181 struct writeback_control
*wbc
)
183 return mpage_writepages(mapping
, wbc
, udf_get_block
);
186 static int udf_readpage(struct file
*file
, struct page
*page
)
188 return mpage_readpage(page
, udf_get_block
);
191 static int udf_readpages(struct file
*file
, struct address_space
*mapping
,
192 struct list_head
*pages
, unsigned nr_pages
)
194 return mpage_readpages(mapping
, pages
, nr_pages
, udf_get_block
);
197 static int udf_write_begin(struct file
*file
, struct address_space
*mapping
,
198 loff_t pos
, unsigned len
, unsigned flags
,
199 struct page
**pagep
, void **fsdata
)
203 ret
= block_write_begin(mapping
, pos
, len
, flags
, pagep
, udf_get_block
);
205 udf_write_failed(mapping
, pos
+ len
);
209 static ssize_t
udf_direct_IO(struct kiocb
*iocb
, struct iov_iter
*iter
)
211 struct file
*file
= iocb
->ki_filp
;
212 struct address_space
*mapping
= file
->f_mapping
;
213 struct inode
*inode
= mapping
->host
;
214 size_t count
= iov_iter_count(iter
);
217 ret
= blockdev_direct_IO(iocb
, inode
, iter
, udf_get_block
);
218 if (unlikely(ret
< 0 && iov_iter_rw(iter
) == WRITE
))
219 udf_write_failed(mapping
, iocb
->ki_pos
+ count
);
223 static sector_t
udf_bmap(struct address_space
*mapping
, sector_t block
)
225 return generic_block_bmap(mapping
, block
, udf_get_block
);
228 const struct address_space_operations udf_aops
= {
229 .readpage
= udf_readpage
,
230 .readpages
= udf_readpages
,
231 .writepage
= udf_writepage
,
232 .writepages
= udf_writepages
,
233 .write_begin
= udf_write_begin
,
234 .write_end
= generic_write_end
,
235 .direct_IO
= udf_direct_IO
,
240 * Expand file stored in ICB to a normal one-block-file
242 * This function requires i_data_sem for writing and releases it.
243 * This function requires i_mutex held
245 int udf_expand_file_adinicb(struct inode
*inode
)
249 struct udf_inode_info
*iinfo
= UDF_I(inode
);
251 struct writeback_control udf_wbc
= {
252 .sync_mode
= WB_SYNC_NONE
,
256 WARN_ON_ONCE(!inode_is_locked(inode
));
257 if (!iinfo
->i_lenAlloc
) {
258 if (UDF_QUERY_FLAG(inode
->i_sb
, UDF_FLAG_USE_SHORT_AD
))
259 iinfo
->i_alloc_type
= ICBTAG_FLAG_AD_SHORT
;
261 iinfo
->i_alloc_type
= ICBTAG_FLAG_AD_LONG
;
262 /* from now on we have normal address_space methods */
263 inode
->i_data
.a_ops
= &udf_aops
;
264 up_write(&iinfo
->i_data_sem
);
265 mark_inode_dirty(inode
);
269 * Release i_data_sem so that we can lock a page - page lock ranks
270 * above i_data_sem. i_mutex still protects us against file changes.
272 up_write(&iinfo
->i_data_sem
);
274 page
= find_or_create_page(inode
->i_mapping
, 0, GFP_NOFS
);
278 if (!PageUptodate(page
)) {
279 kaddr
= kmap_atomic(page
);
280 memset(kaddr
+ iinfo
->i_lenAlloc
, 0x00,
281 PAGE_SIZE
- iinfo
->i_lenAlloc
);
282 memcpy(kaddr
, iinfo
->i_ext
.i_data
+ iinfo
->i_lenEAttr
,
284 flush_dcache_page(page
);
285 SetPageUptodate(page
);
286 kunmap_atomic(kaddr
);
288 down_write(&iinfo
->i_data_sem
);
289 memset(iinfo
->i_ext
.i_data
+ iinfo
->i_lenEAttr
, 0x00,
291 iinfo
->i_lenAlloc
= 0;
292 if (UDF_QUERY_FLAG(inode
->i_sb
, UDF_FLAG_USE_SHORT_AD
))
293 iinfo
->i_alloc_type
= ICBTAG_FLAG_AD_SHORT
;
295 iinfo
->i_alloc_type
= ICBTAG_FLAG_AD_LONG
;
296 /* from now on we have normal address_space methods */
297 inode
->i_data
.a_ops
= &udf_aops
;
298 up_write(&iinfo
->i_data_sem
);
299 err
= inode
->i_data
.a_ops
->writepage(page
, &udf_wbc
);
301 /* Restore everything back so that we don't lose data... */
303 down_write(&iinfo
->i_data_sem
);
304 kaddr
= kmap_atomic(page
);
305 memcpy(iinfo
->i_ext
.i_data
+ iinfo
->i_lenEAttr
, kaddr
,
307 kunmap_atomic(kaddr
);
309 iinfo
->i_alloc_type
= ICBTAG_FLAG_AD_IN_ICB
;
310 inode
->i_data
.a_ops
= &udf_adinicb_aops
;
311 up_write(&iinfo
->i_data_sem
);
314 mark_inode_dirty(inode
);
319 struct buffer_head
*udf_expand_dir_adinicb(struct inode
*inode
,
320 udf_pblk_t
*block
, int *err
)
323 struct buffer_head
*dbh
= NULL
;
324 struct kernel_lb_addr eloc
;
326 struct extent_position epos
;
328 struct udf_fileident_bh sfibh
, dfibh
;
329 loff_t f_pos
= udf_ext0_offset(inode
);
330 int size
= udf_ext0_offset(inode
) + inode
->i_size
;
331 struct fileIdentDesc cfi
, *sfi
, *dfi
;
332 struct udf_inode_info
*iinfo
= UDF_I(inode
);
334 if (UDF_QUERY_FLAG(inode
->i_sb
, UDF_FLAG_USE_SHORT_AD
))
335 alloctype
= ICBTAG_FLAG_AD_SHORT
;
337 alloctype
= ICBTAG_FLAG_AD_LONG
;
339 if (!inode
->i_size
) {
340 iinfo
->i_alloc_type
= alloctype
;
341 mark_inode_dirty(inode
);
345 /* alloc block, and copy data to it */
346 *block
= udf_new_block(inode
->i_sb
, inode
,
347 iinfo
->i_location
.partitionReferenceNum
,
348 iinfo
->i_location
.logicalBlockNum
, err
);
351 newblock
= udf_get_pblock(inode
->i_sb
, *block
,
352 iinfo
->i_location
.partitionReferenceNum
,
356 dbh
= udf_tgetblk(inode
->i_sb
, newblock
);
360 memset(dbh
->b_data
, 0x00, inode
->i_sb
->s_blocksize
);
361 set_buffer_uptodate(dbh
);
363 mark_buffer_dirty_inode(dbh
, inode
);
365 sfibh
.soffset
= sfibh
.eoffset
=
366 f_pos
& (inode
->i_sb
->s_blocksize
- 1);
367 sfibh
.sbh
= sfibh
.ebh
= NULL
;
368 dfibh
.soffset
= dfibh
.eoffset
= 0;
369 dfibh
.sbh
= dfibh
.ebh
= dbh
;
370 while (f_pos
< size
) {
371 iinfo
->i_alloc_type
= ICBTAG_FLAG_AD_IN_ICB
;
372 sfi
= udf_fileident_read(inode
, &f_pos
, &sfibh
, &cfi
, NULL
,
378 iinfo
->i_alloc_type
= alloctype
;
379 sfi
->descTag
.tagLocation
= cpu_to_le32(*block
);
380 dfibh
.soffset
= dfibh
.eoffset
;
381 dfibh
.eoffset
+= (sfibh
.eoffset
- sfibh
.soffset
);
382 dfi
= (struct fileIdentDesc
*)(dbh
->b_data
+ dfibh
.soffset
);
383 if (udf_write_fi(inode
, sfi
, dfi
, &dfibh
, sfi
->impUse
,
385 le16_to_cpu(sfi
->lengthOfImpUse
))) {
386 iinfo
->i_alloc_type
= ICBTAG_FLAG_AD_IN_ICB
;
391 mark_buffer_dirty_inode(dbh
, inode
);
393 memset(iinfo
->i_ext
.i_data
+ iinfo
->i_lenEAttr
, 0,
395 iinfo
->i_lenAlloc
= 0;
396 eloc
.logicalBlockNum
= *block
;
397 eloc
.partitionReferenceNum
=
398 iinfo
->i_location
.partitionReferenceNum
;
399 iinfo
->i_lenExtents
= inode
->i_size
;
401 epos
.block
= iinfo
->i_location
;
402 epos
.offset
= udf_file_entry_alloc_offset(inode
);
403 udf_add_aext(inode
, &epos
, &eloc
, inode
->i_size
, 0);
407 mark_inode_dirty(inode
);
411 static int udf_get_block(struct inode
*inode
, sector_t block
,
412 struct buffer_head
*bh_result
, int create
)
416 struct udf_inode_info
*iinfo
;
419 phys
= udf_block_map(inode
, block
);
421 map_bh(bh_result
, inode
->i_sb
, phys
);
427 iinfo
= UDF_I(inode
);
429 down_write(&iinfo
->i_data_sem
);
430 if (block
== iinfo
->i_next_alloc_block
+ 1) {
431 iinfo
->i_next_alloc_block
++;
432 iinfo
->i_next_alloc_goal
++;
435 udf_clear_extent_cache(inode
);
436 phys
= inode_getblk(inode
, block
, &err
, &new);
441 set_buffer_new(bh_result
);
442 map_bh(bh_result
, inode
->i_sb
, phys
);
445 up_write(&iinfo
->i_data_sem
);
449 static struct buffer_head
*udf_getblk(struct inode
*inode
, udf_pblk_t block
,
450 int create
, int *err
)
452 struct buffer_head
*bh
;
453 struct buffer_head dummy
;
456 dummy
.b_blocknr
= -1000;
457 *err
= udf_get_block(inode
, block
, &dummy
, create
);
458 if (!*err
&& buffer_mapped(&dummy
)) {
459 bh
= sb_getblk(inode
->i_sb
, dummy
.b_blocknr
);
460 if (buffer_new(&dummy
)) {
462 memset(bh
->b_data
, 0x00, inode
->i_sb
->s_blocksize
);
463 set_buffer_uptodate(bh
);
465 mark_buffer_dirty_inode(bh
, inode
);
473 /* Extend the file with new blocks totaling 'new_block_bytes',
474 * return the number of extents added
476 static int udf_do_extend_file(struct inode
*inode
,
477 struct extent_position
*last_pos
,
478 struct kernel_long_ad
*last_ext
,
479 loff_t new_block_bytes
)
482 int count
= 0, fake
= !(last_ext
->extLength
& UDF_EXTENT_LENGTH_MASK
);
483 struct super_block
*sb
= inode
->i_sb
;
484 struct kernel_lb_addr prealloc_loc
= {};
485 uint32_t prealloc_len
= 0;
486 struct udf_inode_info
*iinfo
;
489 /* The previous extent is fake and we should not extend by anything
490 * - there's nothing to do... */
491 if (!new_block_bytes
&& fake
)
494 iinfo
= UDF_I(inode
);
495 /* Round the last extent up to a multiple of block size */
496 if (last_ext
->extLength
& (sb
->s_blocksize
- 1)) {
497 last_ext
->extLength
=
498 (last_ext
->extLength
& UDF_EXTENT_FLAG_MASK
) |
499 (((last_ext
->extLength
& UDF_EXTENT_LENGTH_MASK
) +
500 sb
->s_blocksize
- 1) & ~(sb
->s_blocksize
- 1));
501 iinfo
->i_lenExtents
=
502 (iinfo
->i_lenExtents
+ sb
->s_blocksize
- 1) &
503 ~(sb
->s_blocksize
- 1);
506 /* Last extent are just preallocated blocks? */
507 if ((last_ext
->extLength
& UDF_EXTENT_FLAG_MASK
) ==
508 EXT_NOT_RECORDED_ALLOCATED
) {
509 /* Save the extent so that we can reattach it to the end */
510 prealloc_loc
= last_ext
->extLocation
;
511 prealloc_len
= last_ext
->extLength
;
512 /* Mark the extent as a hole */
513 last_ext
->extLength
= EXT_NOT_RECORDED_NOT_ALLOCATED
|
514 (last_ext
->extLength
& UDF_EXTENT_LENGTH_MASK
);
515 last_ext
->extLocation
.logicalBlockNum
= 0;
516 last_ext
->extLocation
.partitionReferenceNum
= 0;
519 /* Can we merge with the previous extent? */
520 if ((last_ext
->extLength
& UDF_EXTENT_FLAG_MASK
) ==
521 EXT_NOT_RECORDED_NOT_ALLOCATED
) {
522 add
= (1 << 30) - sb
->s_blocksize
-
523 (last_ext
->extLength
& UDF_EXTENT_LENGTH_MASK
);
524 if (add
> new_block_bytes
)
525 add
= new_block_bytes
;
526 new_block_bytes
-= add
;
527 last_ext
->extLength
+= add
;
531 udf_add_aext(inode
, last_pos
, &last_ext
->extLocation
,
532 last_ext
->extLength
, 1);
535 struct kernel_lb_addr tmploc
;
538 udf_write_aext(inode
, last_pos
, &last_ext
->extLocation
,
539 last_ext
->extLength
, 1);
541 * We've rewritten the last extent but there may be empty
542 * indirect extent after it - enter it.
544 udf_next_aext(inode
, last_pos
, &tmploc
, &tmplen
, 0);
547 /* Managed to do everything necessary? */
548 if (!new_block_bytes
)
551 /* All further extents will be NOT_RECORDED_NOT_ALLOCATED */
552 last_ext
->extLocation
.logicalBlockNum
= 0;
553 last_ext
->extLocation
.partitionReferenceNum
= 0;
554 add
= (1 << 30) - sb
->s_blocksize
;
555 last_ext
->extLength
= EXT_NOT_RECORDED_NOT_ALLOCATED
| add
;
557 /* Create enough extents to cover the whole hole */
558 while (new_block_bytes
> add
) {
559 new_block_bytes
-= add
;
560 err
= udf_add_aext(inode
, last_pos
, &last_ext
->extLocation
,
561 last_ext
->extLength
, 1);
566 if (new_block_bytes
) {
567 last_ext
->extLength
= EXT_NOT_RECORDED_NOT_ALLOCATED
|
569 err
= udf_add_aext(inode
, last_pos
, &last_ext
->extLocation
,
570 last_ext
->extLength
, 1);
577 /* Do we have some preallocated blocks saved? */
579 err
= udf_add_aext(inode
, last_pos
, &prealloc_loc
,
583 last_ext
->extLocation
= prealloc_loc
;
584 last_ext
->extLength
= prealloc_len
;
588 /* last_pos should point to the last written extent... */
589 if (iinfo
->i_alloc_type
== ICBTAG_FLAG_AD_SHORT
)
590 last_pos
->offset
-= sizeof(struct short_ad
);
591 else if (iinfo
->i_alloc_type
== ICBTAG_FLAG_AD_LONG
)
592 last_pos
->offset
-= sizeof(struct long_ad
);
599 /* Extend the final block of the file to final_block_len bytes */
600 static void udf_do_extend_final_block(struct inode
*inode
,
601 struct extent_position
*last_pos
,
602 struct kernel_long_ad
*last_ext
,
603 uint32_t final_block_len
)
605 struct super_block
*sb
= inode
->i_sb
;
606 uint32_t added_bytes
;
608 added_bytes
= final_block_len
-
609 (last_ext
->extLength
& (sb
->s_blocksize
- 1));
610 last_ext
->extLength
+= added_bytes
;
611 UDF_I(inode
)->i_lenExtents
+= added_bytes
;
613 udf_write_aext(inode
, last_pos
, &last_ext
->extLocation
,
614 last_ext
->extLength
, 1);
617 static int udf_extend_file(struct inode
*inode
, loff_t newsize
)
620 struct extent_position epos
;
621 struct kernel_lb_addr eloc
;
624 struct super_block
*sb
= inode
->i_sb
;
625 sector_t first_block
= newsize
>> sb
->s_blocksize_bits
, offset
;
626 unsigned long partial_final_block
;
628 struct udf_inode_info
*iinfo
= UDF_I(inode
);
629 struct kernel_long_ad extent
;
631 int within_final_block
;
633 if (iinfo
->i_alloc_type
== ICBTAG_FLAG_AD_SHORT
)
634 adsize
= sizeof(struct short_ad
);
635 else if (iinfo
->i_alloc_type
== ICBTAG_FLAG_AD_LONG
)
636 adsize
= sizeof(struct long_ad
);
640 etype
= inode_bmap(inode
, first_block
, &epos
, &eloc
, &elen
, &offset
);
641 within_final_block
= (etype
!= -1);
643 if ((!epos
.bh
&& epos
.offset
== udf_file_entry_alloc_offset(inode
)) ||
644 (epos
.bh
&& epos
.offset
== sizeof(struct allocExtDesc
))) {
645 /* File has no extents at all or has empty last
646 * indirect extent! Create a fake extent... */
647 extent
.extLocation
.logicalBlockNum
= 0;
648 extent
.extLocation
.partitionReferenceNum
= 0;
649 extent
.extLength
= EXT_NOT_RECORDED_NOT_ALLOCATED
;
651 epos
.offset
-= adsize
;
652 etype
= udf_next_aext(inode
, &epos
, &extent
.extLocation
,
653 &extent
.extLength
, 0);
654 extent
.extLength
|= etype
<< 30;
657 partial_final_block
= newsize
& (sb
->s_blocksize
- 1);
659 /* File has extent covering the new size (could happen when extending
662 if (within_final_block
) {
663 /* Extending file within the last file block */
664 udf_do_extend_final_block(inode
, &epos
, &extent
,
665 partial_final_block
);
667 loff_t add
= ((loff_t
)offset
<< sb
->s_blocksize_bits
) |
669 err
= udf_do_extend_file(inode
, &epos
, &extent
, add
);
675 iinfo
->i_lenExtents
= newsize
;
681 static sector_t
inode_getblk(struct inode
*inode
, sector_t block
,
684 struct kernel_long_ad laarr
[EXTENT_MERGE_SIZE
];
685 struct extent_position prev_epos
, cur_epos
, next_epos
;
686 int count
= 0, startnum
= 0, endnum
= 0;
687 uint32_t elen
= 0, tmpelen
;
688 struct kernel_lb_addr eloc
, tmpeloc
;
690 loff_t lbcount
= 0, b_off
= 0;
691 udf_pblk_t newblocknum
, newblock
;
694 struct udf_inode_info
*iinfo
= UDF_I(inode
);
695 udf_pblk_t goal
= 0, pgoal
= iinfo
->i_location
.logicalBlockNum
;
701 prev_epos
.offset
= udf_file_entry_alloc_offset(inode
);
702 prev_epos
.block
= iinfo
->i_location
;
704 cur_epos
= next_epos
= prev_epos
;
705 b_off
= (loff_t
)block
<< inode
->i_sb
->s_blocksize_bits
;
707 /* find the extent which contains the block we are looking for.
708 alternate between laarr[0] and laarr[1] for locations of the
709 current extent, and the previous extent */
711 if (prev_epos
.bh
!= cur_epos
.bh
) {
712 brelse(prev_epos
.bh
);
714 prev_epos
.bh
= cur_epos
.bh
;
716 if (cur_epos
.bh
!= next_epos
.bh
) {
718 get_bh(next_epos
.bh
);
719 cur_epos
.bh
= next_epos
.bh
;
724 prev_epos
.block
= cur_epos
.block
;
725 cur_epos
.block
= next_epos
.block
;
727 prev_epos
.offset
= cur_epos
.offset
;
728 cur_epos
.offset
= next_epos
.offset
;
730 etype
= udf_next_aext(inode
, &next_epos
, &eloc
, &elen
, 1);
736 laarr
[c
].extLength
= (etype
<< 30) | elen
;
737 laarr
[c
].extLocation
= eloc
;
739 if (etype
!= (EXT_NOT_RECORDED_NOT_ALLOCATED
>> 30))
740 pgoal
= eloc
.logicalBlockNum
+
741 ((elen
+ inode
->i_sb
->s_blocksize
- 1) >>
742 inode
->i_sb
->s_blocksize_bits
);
745 } while (lbcount
+ elen
<= b_off
);
748 offset
= b_off
>> inode
->i_sb
->s_blocksize_bits
;
750 * Move prev_epos and cur_epos into indirect extent if we are at
753 udf_next_aext(inode
, &prev_epos
, &tmpeloc
, &tmpelen
, 0);
754 udf_next_aext(inode
, &cur_epos
, &tmpeloc
, &tmpelen
, 0);
756 /* if the extent is allocated and recorded, return the block
757 if the extent is not a multiple of the blocksize, round up */
759 if (etype
== (EXT_RECORDED_ALLOCATED
>> 30)) {
760 if (elen
& (inode
->i_sb
->s_blocksize
- 1)) {
761 elen
= EXT_RECORDED_ALLOCATED
|
762 ((elen
+ inode
->i_sb
->s_blocksize
- 1) &
763 ~(inode
->i_sb
->s_blocksize
- 1));
764 udf_write_aext(inode
, &cur_epos
, &eloc
, elen
, 1);
766 newblock
= udf_get_lb_pblock(inode
->i_sb
, &eloc
, offset
);
770 /* Are we beyond EOF? */
780 /* Create a fake extent when there's not one */
781 memset(&laarr
[0].extLocation
, 0x00,
782 sizeof(struct kernel_lb_addr
));
783 laarr
[0].extLength
= EXT_NOT_RECORDED_NOT_ALLOCATED
;
784 /* Will udf_do_extend_file() create real extent from
786 startnum
= (offset
> 0);
788 /* Create extents for the hole between EOF and offset */
789 hole_len
= (loff_t
)offset
<< inode
->i_blkbits
;
790 ret
= udf_do_extend_file(inode
, &prev_epos
, laarr
, hole_len
);
799 /* We are not covered by a preallocated extent? */
800 if ((laarr
[0].extLength
& UDF_EXTENT_FLAG_MASK
) !=
801 EXT_NOT_RECORDED_ALLOCATED
) {
802 /* Is there any real extent? - otherwise we overwrite
806 laarr
[c
].extLength
= EXT_NOT_RECORDED_NOT_ALLOCATED
|
807 inode
->i_sb
->s_blocksize
;
808 memset(&laarr
[c
].extLocation
, 0x00,
809 sizeof(struct kernel_lb_addr
));
816 endnum
= startnum
= ((count
> 2) ? 2 : count
);
818 /* if the current extent is in position 0,
819 swap it with the previous */
820 if (!c
&& count
!= 1) {
827 /* if the current block is located in an extent,
828 read the next extent */
829 etype
= udf_next_aext(inode
, &next_epos
, &eloc
, &elen
, 0);
831 laarr
[c
+ 1].extLength
= (etype
<< 30) | elen
;
832 laarr
[c
+ 1].extLocation
= eloc
;
840 /* if the current extent is not recorded but allocated, get the
841 * block in the extent corresponding to the requested block */
842 if ((laarr
[c
].extLength
>> 30) == (EXT_NOT_RECORDED_ALLOCATED
>> 30))
843 newblocknum
= laarr
[c
].extLocation
.logicalBlockNum
+ offset
;
844 else { /* otherwise, allocate a new block */
845 if (iinfo
->i_next_alloc_block
== block
)
846 goal
= iinfo
->i_next_alloc_goal
;
849 if (!(goal
= pgoal
)) /* XXX: what was intended here? */
850 goal
= iinfo
->i_location
.logicalBlockNum
+ 1;
853 newblocknum
= udf_new_block(inode
->i_sb
, inode
,
854 iinfo
->i_location
.partitionReferenceNum
,
862 iinfo
->i_lenExtents
+= inode
->i_sb
->s_blocksize
;
865 /* if the extent the requsted block is located in contains multiple
866 * blocks, split the extent into at most three extents. blocks prior
867 * to requested block, requested block, and blocks after requested
869 udf_split_extents(inode
, &c
, offset
, newblocknum
, laarr
, &endnum
);
871 /* We preallocate blocks only for regular files. It also makes sense
872 * for directories but there's a problem when to drop the
873 * preallocation. We might use some delayed work for that but I feel
874 * it's overengineering for a filesystem like UDF. */
875 if (S_ISREG(inode
->i_mode
))
876 udf_prealloc_extents(inode
, c
, lastblock
, laarr
, &endnum
);
878 /* merge any continuous blocks in laarr */
879 udf_merge_extents(inode
, laarr
, &endnum
);
881 /* write back the new extents, inserting new extents if the new number
882 * of extents is greater than the old number, and deleting extents if
883 * the new number of extents is less than the old number */
884 udf_update_extents(inode
, laarr
, startnum
, endnum
, &prev_epos
);
886 newblock
= udf_get_pblock(inode
->i_sb
, newblocknum
,
887 iinfo
->i_location
.partitionReferenceNum
, 0);
893 iinfo
->i_next_alloc_block
= block
;
894 iinfo
->i_next_alloc_goal
= newblocknum
;
895 inode
->i_ctime
= current_time(inode
);
898 udf_sync_inode(inode
);
900 mark_inode_dirty(inode
);
902 brelse(prev_epos
.bh
);
904 brelse(next_epos
.bh
);
908 static void udf_split_extents(struct inode
*inode
, int *c
, int offset
,
909 udf_pblk_t newblocknum
,
910 struct kernel_long_ad
*laarr
, int *endnum
)
912 unsigned long blocksize
= inode
->i_sb
->s_blocksize
;
913 unsigned char blocksize_bits
= inode
->i_sb
->s_blocksize_bits
;
915 if ((laarr
[*c
].extLength
>> 30) == (EXT_NOT_RECORDED_ALLOCATED
>> 30) ||
916 (laarr
[*c
].extLength
>> 30) ==
917 (EXT_NOT_RECORDED_NOT_ALLOCATED
>> 30)) {
919 int blen
= ((laarr
[curr
].extLength
& UDF_EXTENT_LENGTH_MASK
) +
920 blocksize
- 1) >> blocksize_bits
;
921 int8_t etype
= (laarr
[curr
].extLength
>> 30);
925 else if (!offset
|| blen
== offset
+ 1) {
926 laarr
[curr
+ 2] = laarr
[curr
+ 1];
927 laarr
[curr
+ 1] = laarr
[curr
];
929 laarr
[curr
+ 3] = laarr
[curr
+ 1];
930 laarr
[curr
+ 2] = laarr
[curr
+ 1] = laarr
[curr
];
934 if (etype
== (EXT_NOT_RECORDED_ALLOCATED
>> 30)) {
935 udf_free_blocks(inode
->i_sb
, inode
,
936 &laarr
[curr
].extLocation
,
938 laarr
[curr
].extLength
=
939 EXT_NOT_RECORDED_NOT_ALLOCATED
|
940 (offset
<< blocksize_bits
);
941 laarr
[curr
].extLocation
.logicalBlockNum
= 0;
942 laarr
[curr
].extLocation
.
943 partitionReferenceNum
= 0;
945 laarr
[curr
].extLength
= (etype
<< 30) |
946 (offset
<< blocksize_bits
);
952 laarr
[curr
].extLocation
.logicalBlockNum
= newblocknum
;
953 if (etype
== (EXT_NOT_RECORDED_NOT_ALLOCATED
>> 30))
954 laarr
[curr
].extLocation
.partitionReferenceNum
=
955 UDF_I(inode
)->i_location
.partitionReferenceNum
;
956 laarr
[curr
].extLength
= EXT_RECORDED_ALLOCATED
|
960 if (blen
!= offset
+ 1) {
961 if (etype
== (EXT_NOT_RECORDED_ALLOCATED
>> 30))
962 laarr
[curr
].extLocation
.logicalBlockNum
+=
964 laarr
[curr
].extLength
= (etype
<< 30) |
965 ((blen
- (offset
+ 1)) << blocksize_bits
);
972 static void udf_prealloc_extents(struct inode
*inode
, int c
, int lastblock
,
973 struct kernel_long_ad
*laarr
,
976 int start
, length
= 0, currlength
= 0, i
;
978 if (*endnum
>= (c
+ 1)) {
984 if ((laarr
[c
+ 1].extLength
>> 30) ==
985 (EXT_NOT_RECORDED_ALLOCATED
>> 30)) {
987 length
= currlength
=
988 (((laarr
[c
+ 1].extLength
&
989 UDF_EXTENT_LENGTH_MASK
) +
990 inode
->i_sb
->s_blocksize
- 1) >>
991 inode
->i_sb
->s_blocksize_bits
);
996 for (i
= start
+ 1; i
<= *endnum
; i
++) {
999 length
+= UDF_DEFAULT_PREALLOC_BLOCKS
;
1000 } else if ((laarr
[i
].extLength
>> 30) ==
1001 (EXT_NOT_RECORDED_NOT_ALLOCATED
>> 30)) {
1002 length
+= (((laarr
[i
].extLength
&
1003 UDF_EXTENT_LENGTH_MASK
) +
1004 inode
->i_sb
->s_blocksize
- 1) >>
1005 inode
->i_sb
->s_blocksize_bits
);
1011 int next
= laarr
[start
].extLocation
.logicalBlockNum
+
1012 (((laarr
[start
].extLength
& UDF_EXTENT_LENGTH_MASK
) +
1013 inode
->i_sb
->s_blocksize
- 1) >>
1014 inode
->i_sb
->s_blocksize_bits
);
1015 int numalloc
= udf_prealloc_blocks(inode
->i_sb
, inode
,
1016 laarr
[start
].extLocation
.partitionReferenceNum
,
1017 next
, (UDF_DEFAULT_PREALLOC_BLOCKS
> length
?
1018 length
: UDF_DEFAULT_PREALLOC_BLOCKS
) -
1021 if (start
== (c
+ 1))
1022 laarr
[start
].extLength
+=
1024 inode
->i_sb
->s_blocksize_bits
);
1026 memmove(&laarr
[c
+ 2], &laarr
[c
+ 1],
1027 sizeof(struct long_ad
) * (*endnum
- (c
+ 1)));
1029 laarr
[c
+ 1].extLocation
.logicalBlockNum
= next
;
1030 laarr
[c
+ 1].extLocation
.partitionReferenceNum
=
1031 laarr
[c
].extLocation
.
1032 partitionReferenceNum
;
1033 laarr
[c
+ 1].extLength
=
1034 EXT_NOT_RECORDED_ALLOCATED
|
1036 inode
->i_sb
->s_blocksize_bits
);
1040 for (i
= start
+ 1; numalloc
&& i
< *endnum
; i
++) {
1041 int elen
= ((laarr
[i
].extLength
&
1042 UDF_EXTENT_LENGTH_MASK
) +
1043 inode
->i_sb
->s_blocksize
- 1) >>
1044 inode
->i_sb
->s_blocksize_bits
;
1046 if (elen
> numalloc
) {
1047 laarr
[i
].extLength
-=
1049 inode
->i_sb
->s_blocksize_bits
);
1053 if (*endnum
> (i
+ 1))
1056 sizeof(struct long_ad
) *
1057 (*endnum
- (i
+ 1)));
1062 UDF_I(inode
)->i_lenExtents
+=
1063 numalloc
<< inode
->i_sb
->s_blocksize_bits
;
1068 static void udf_merge_extents(struct inode
*inode
, struct kernel_long_ad
*laarr
,
1072 unsigned long blocksize
= inode
->i_sb
->s_blocksize
;
1073 unsigned char blocksize_bits
= inode
->i_sb
->s_blocksize_bits
;
1075 for (i
= 0; i
< (*endnum
- 1); i
++) {
1076 struct kernel_long_ad
*li
/*l[i]*/ = &laarr
[i
];
1077 struct kernel_long_ad
*lip1
/*l[i plus 1]*/ = &laarr
[i
+ 1];
1079 if (((li
->extLength
>> 30) == (lip1
->extLength
>> 30)) &&
1080 (((li
->extLength
>> 30) ==
1081 (EXT_NOT_RECORDED_NOT_ALLOCATED
>> 30)) ||
1082 ((lip1
->extLocation
.logicalBlockNum
-
1083 li
->extLocation
.logicalBlockNum
) ==
1084 (((li
->extLength
& UDF_EXTENT_LENGTH_MASK
) +
1085 blocksize
- 1) >> blocksize_bits
)))) {
1087 if (((li
->extLength
& UDF_EXTENT_LENGTH_MASK
) +
1088 (lip1
->extLength
& UDF_EXTENT_LENGTH_MASK
) +
1089 blocksize
- 1) & ~UDF_EXTENT_LENGTH_MASK
) {
1090 lip1
->extLength
= (lip1
->extLength
-
1092 UDF_EXTENT_LENGTH_MASK
) +
1093 UDF_EXTENT_LENGTH_MASK
) &
1095 li
->extLength
= (li
->extLength
&
1096 UDF_EXTENT_FLAG_MASK
) +
1097 (UDF_EXTENT_LENGTH_MASK
+ 1) -
1099 lip1
->extLocation
.logicalBlockNum
=
1100 li
->extLocation
.logicalBlockNum
+
1102 UDF_EXTENT_LENGTH_MASK
) >>
1105 li
->extLength
= lip1
->extLength
+
1107 UDF_EXTENT_LENGTH_MASK
) +
1108 blocksize
- 1) & ~(blocksize
- 1));
1109 if (*endnum
> (i
+ 2))
1110 memmove(&laarr
[i
+ 1], &laarr
[i
+ 2],
1111 sizeof(struct long_ad
) *
1112 (*endnum
- (i
+ 2)));
1116 } else if (((li
->extLength
>> 30) ==
1117 (EXT_NOT_RECORDED_ALLOCATED
>> 30)) &&
1118 ((lip1
->extLength
>> 30) ==
1119 (EXT_NOT_RECORDED_NOT_ALLOCATED
>> 30))) {
1120 udf_free_blocks(inode
->i_sb
, inode
, &li
->extLocation
, 0,
1122 UDF_EXTENT_LENGTH_MASK
) +
1123 blocksize
- 1) >> blocksize_bits
);
1124 li
->extLocation
.logicalBlockNum
= 0;
1125 li
->extLocation
.partitionReferenceNum
= 0;
1127 if (((li
->extLength
& UDF_EXTENT_LENGTH_MASK
) +
1128 (lip1
->extLength
& UDF_EXTENT_LENGTH_MASK
) +
1129 blocksize
- 1) & ~UDF_EXTENT_LENGTH_MASK
) {
1130 lip1
->extLength
= (lip1
->extLength
-
1132 UDF_EXTENT_LENGTH_MASK
) +
1133 UDF_EXTENT_LENGTH_MASK
) &
1135 li
->extLength
= (li
->extLength
&
1136 UDF_EXTENT_FLAG_MASK
) +
1137 (UDF_EXTENT_LENGTH_MASK
+ 1) -
1140 li
->extLength
= lip1
->extLength
+
1142 UDF_EXTENT_LENGTH_MASK
) +
1143 blocksize
- 1) & ~(blocksize
- 1));
1144 if (*endnum
> (i
+ 2))
1145 memmove(&laarr
[i
+ 1], &laarr
[i
+ 2],
1146 sizeof(struct long_ad
) *
1147 (*endnum
- (i
+ 2)));
1151 } else if ((li
->extLength
>> 30) ==
1152 (EXT_NOT_RECORDED_ALLOCATED
>> 30)) {
1153 udf_free_blocks(inode
->i_sb
, inode
,
1154 &li
->extLocation
, 0,
1156 UDF_EXTENT_LENGTH_MASK
) +
1157 blocksize
- 1) >> blocksize_bits
);
1158 li
->extLocation
.logicalBlockNum
= 0;
1159 li
->extLocation
.partitionReferenceNum
= 0;
1160 li
->extLength
= (li
->extLength
&
1161 UDF_EXTENT_LENGTH_MASK
) |
1162 EXT_NOT_RECORDED_NOT_ALLOCATED
;
1167 static void udf_update_extents(struct inode
*inode
, struct kernel_long_ad
*laarr
,
1168 int startnum
, int endnum
,
1169 struct extent_position
*epos
)
1172 struct kernel_lb_addr tmploc
;
1175 if (startnum
> endnum
) {
1176 for (i
= 0; i
< (startnum
- endnum
); i
++)
1177 udf_delete_aext(inode
, *epos
);
1178 } else if (startnum
< endnum
) {
1179 for (i
= 0; i
< (endnum
- startnum
); i
++) {
1180 udf_insert_aext(inode
, *epos
, laarr
[i
].extLocation
,
1181 laarr
[i
].extLength
);
1182 udf_next_aext(inode
, epos
, &laarr
[i
].extLocation
,
1183 &laarr
[i
].extLength
, 1);
1188 for (i
= start
; i
< endnum
; i
++) {
1189 udf_next_aext(inode
, epos
, &tmploc
, &tmplen
, 0);
1190 udf_write_aext(inode
, epos
, &laarr
[i
].extLocation
,
1191 laarr
[i
].extLength
, 1);
1195 struct buffer_head
*udf_bread(struct inode
*inode
, udf_pblk_t block
,
1196 int create
, int *err
)
1198 struct buffer_head
*bh
= NULL
;
1200 bh
= udf_getblk(inode
, block
, create
, err
);
1204 if (buffer_uptodate(bh
))
1207 ll_rw_block(REQ_OP_READ
, 0, 1, &bh
);
1210 if (buffer_uptodate(bh
))
1218 int udf_setsize(struct inode
*inode
, loff_t newsize
)
1221 struct udf_inode_info
*iinfo
;
1222 unsigned int bsize
= i_blocksize(inode
);
1224 if (!(S_ISREG(inode
->i_mode
) || S_ISDIR(inode
->i_mode
) ||
1225 S_ISLNK(inode
->i_mode
)))
1227 if (IS_APPEND(inode
) || IS_IMMUTABLE(inode
))
1230 iinfo
= UDF_I(inode
);
1231 if (newsize
> inode
->i_size
) {
1232 down_write(&iinfo
->i_data_sem
);
1233 if (iinfo
->i_alloc_type
== ICBTAG_FLAG_AD_IN_ICB
) {
1235 (udf_file_entry_alloc_offset(inode
) + newsize
)) {
1236 err
= udf_expand_file_adinicb(inode
);
1239 down_write(&iinfo
->i_data_sem
);
1241 iinfo
->i_lenAlloc
= newsize
;
1245 err
= udf_extend_file(inode
, newsize
);
1247 up_write(&iinfo
->i_data_sem
);
1251 up_write(&iinfo
->i_data_sem
);
1252 truncate_setsize(inode
, newsize
);
1254 if (iinfo
->i_alloc_type
== ICBTAG_FLAG_AD_IN_ICB
) {
1255 down_write(&iinfo
->i_data_sem
);
1256 udf_clear_extent_cache(inode
);
1257 memset(iinfo
->i_ext
.i_data
+ iinfo
->i_lenEAttr
+ newsize
,
1258 0x00, bsize
- newsize
-
1259 udf_file_entry_alloc_offset(inode
));
1260 iinfo
->i_lenAlloc
= newsize
;
1261 truncate_setsize(inode
, newsize
);
1262 up_write(&iinfo
->i_data_sem
);
1265 err
= block_truncate_page(inode
->i_mapping
, newsize
,
1269 truncate_setsize(inode
, newsize
);
1270 down_write(&iinfo
->i_data_sem
);
1271 udf_clear_extent_cache(inode
);
1272 err
= udf_truncate_extents(inode
);
1273 up_write(&iinfo
->i_data_sem
);
1278 inode
->i_mtime
= inode
->i_ctime
= current_time(inode
);
1280 udf_sync_inode(inode
);
1282 mark_inode_dirty(inode
);
1287 * Maximum length of linked list formed by ICB hierarchy. The chosen number is
1288 * arbitrary - just that we hopefully don't limit any real use of rewritten
1289 * inode on write-once media but avoid looping for too long on corrupted media.
1291 #define UDF_MAX_ICB_NESTING 1024
1293 static int udf_read_inode(struct inode
*inode
, bool hidden_inode
)
1295 struct buffer_head
*bh
= NULL
;
1296 struct fileEntry
*fe
;
1297 struct extendedFileEntry
*efe
;
1299 struct udf_inode_info
*iinfo
= UDF_I(inode
);
1300 struct udf_sb_info
*sbi
= UDF_SB(inode
->i_sb
);
1301 struct kernel_lb_addr
*iloc
= &iinfo
->i_location
;
1302 unsigned int link_count
;
1303 unsigned int indirections
= 0;
1304 int bs
= inode
->i_sb
->s_blocksize
;
1309 if (iloc
->partitionReferenceNum
>= sbi
->s_partitions
) {
1310 udf_debug("partition reference: %u > logical volume partitions: %u\n",
1311 iloc
->partitionReferenceNum
, sbi
->s_partitions
);
1315 if (iloc
->logicalBlockNum
>=
1316 sbi
->s_partmaps
[iloc
->partitionReferenceNum
].s_partition_len
) {
1317 udf_debug("block=%u, partition=%u out of range\n",
1318 iloc
->logicalBlockNum
, iloc
->partitionReferenceNum
);
1323 * Set defaults, but the inode is still incomplete!
1324 * Note: get_new_inode() sets the following on a new inode:
1327 * i_flags = sb->s_flags
1329 * clean_inode(): zero fills and sets
1334 bh
= udf_read_ptagged(inode
->i_sb
, iloc
, 0, &ident
);
1336 udf_err(inode
->i_sb
, "(ino %lu) failed !bh\n", inode
->i_ino
);
1340 if (ident
!= TAG_IDENT_FE
&& ident
!= TAG_IDENT_EFE
&&
1341 ident
!= TAG_IDENT_USE
) {
1342 udf_err(inode
->i_sb
, "(ino %lu) failed ident=%u\n",
1343 inode
->i_ino
, ident
);
1347 fe
= (struct fileEntry
*)bh
->b_data
;
1348 efe
= (struct extendedFileEntry
*)bh
->b_data
;
1350 if (fe
->icbTag
.strategyType
== cpu_to_le16(4096)) {
1351 struct buffer_head
*ibh
;
1353 ibh
= udf_read_ptagged(inode
->i_sb
, iloc
, 1, &ident
);
1354 if (ident
== TAG_IDENT_IE
&& ibh
) {
1355 struct kernel_lb_addr loc
;
1356 struct indirectEntry
*ie
;
1358 ie
= (struct indirectEntry
*)ibh
->b_data
;
1359 loc
= lelb_to_cpu(ie
->indirectICB
.extLocation
);
1361 if (ie
->indirectICB
.extLength
) {
1363 memcpy(&iinfo
->i_location
, &loc
,
1364 sizeof(struct kernel_lb_addr
));
1365 if (++indirections
> UDF_MAX_ICB_NESTING
) {
1366 udf_err(inode
->i_sb
,
1367 "too many ICBs in ICB hierarchy"
1368 " (max %d supported)\n",
1369 UDF_MAX_ICB_NESTING
);
1377 } else if (fe
->icbTag
.strategyType
!= cpu_to_le16(4)) {
1378 udf_err(inode
->i_sb
, "unsupported strategy type: %u\n",
1379 le16_to_cpu(fe
->icbTag
.strategyType
));
1382 if (fe
->icbTag
.strategyType
== cpu_to_le16(4))
1383 iinfo
->i_strat4096
= 0;
1384 else /* if (fe->icbTag.strategyType == cpu_to_le16(4096)) */
1385 iinfo
->i_strat4096
= 1;
1387 iinfo
->i_alloc_type
= le16_to_cpu(fe
->icbTag
.flags
) &
1388 ICBTAG_FLAG_AD_MASK
;
1389 if (iinfo
->i_alloc_type
!= ICBTAG_FLAG_AD_SHORT
&&
1390 iinfo
->i_alloc_type
!= ICBTAG_FLAG_AD_LONG
&&
1391 iinfo
->i_alloc_type
!= ICBTAG_FLAG_AD_IN_ICB
) {
1395 iinfo
->i_unique
= 0;
1396 iinfo
->i_lenEAttr
= 0;
1397 iinfo
->i_lenExtents
= 0;
1398 iinfo
->i_lenAlloc
= 0;
1399 iinfo
->i_next_alloc_block
= 0;
1400 iinfo
->i_next_alloc_goal
= 0;
1401 if (fe
->descTag
.tagIdent
== cpu_to_le16(TAG_IDENT_EFE
)) {
1404 ret
= udf_alloc_i_data(inode
, bs
-
1405 sizeof(struct extendedFileEntry
));
1408 memcpy(iinfo
->i_ext
.i_data
,
1409 bh
->b_data
+ sizeof(struct extendedFileEntry
),
1410 bs
- sizeof(struct extendedFileEntry
));
1411 } else if (fe
->descTag
.tagIdent
== cpu_to_le16(TAG_IDENT_FE
)) {
1414 ret
= udf_alloc_i_data(inode
, bs
- sizeof(struct fileEntry
));
1417 memcpy(iinfo
->i_ext
.i_data
,
1418 bh
->b_data
+ sizeof(struct fileEntry
),
1419 bs
- sizeof(struct fileEntry
));
1420 } else if (fe
->descTag
.tagIdent
== cpu_to_le16(TAG_IDENT_USE
)) {
1423 iinfo
->i_lenAlloc
= le32_to_cpu(
1424 ((struct unallocSpaceEntry
*)bh
->b_data
)->
1426 ret
= udf_alloc_i_data(inode
, bs
-
1427 sizeof(struct unallocSpaceEntry
));
1430 memcpy(iinfo
->i_ext
.i_data
,
1431 bh
->b_data
+ sizeof(struct unallocSpaceEntry
),
1432 bs
- sizeof(struct unallocSpaceEntry
));
1437 read_lock(&sbi
->s_cred_lock
);
1438 uid
= le32_to_cpu(fe
->uid
);
1439 if (uid
== UDF_INVALID_ID
||
1440 UDF_QUERY_FLAG(inode
->i_sb
, UDF_FLAG_UID_SET
))
1441 inode
->i_uid
= sbi
->s_uid
;
1443 i_uid_write(inode
, uid
);
1445 gid
= le32_to_cpu(fe
->gid
);
1446 if (gid
== UDF_INVALID_ID
||
1447 UDF_QUERY_FLAG(inode
->i_sb
, UDF_FLAG_GID_SET
))
1448 inode
->i_gid
= sbi
->s_gid
;
1450 i_gid_write(inode
, gid
);
1452 if (fe
->icbTag
.fileType
!= ICBTAG_FILE_TYPE_DIRECTORY
&&
1453 sbi
->s_fmode
!= UDF_INVALID_MODE
)
1454 inode
->i_mode
= sbi
->s_fmode
;
1455 else if (fe
->icbTag
.fileType
== ICBTAG_FILE_TYPE_DIRECTORY
&&
1456 sbi
->s_dmode
!= UDF_INVALID_MODE
)
1457 inode
->i_mode
= sbi
->s_dmode
;
1459 inode
->i_mode
= udf_convert_permissions(fe
);
1460 inode
->i_mode
&= ~sbi
->s_umask
;
1461 read_unlock(&sbi
->s_cred_lock
);
1463 link_count
= le16_to_cpu(fe
->fileLinkCount
);
1465 if (!hidden_inode
) {
1471 set_nlink(inode
, link_count
);
1473 inode
->i_size
= le64_to_cpu(fe
->informationLength
);
1474 iinfo
->i_lenExtents
= inode
->i_size
;
1476 if (iinfo
->i_efe
== 0) {
1477 inode
->i_blocks
= le64_to_cpu(fe
->logicalBlocksRecorded
) <<
1478 (inode
->i_sb
->s_blocksize_bits
- 9);
1480 udf_disk_stamp_to_time(&inode
->i_atime
, fe
->accessTime
);
1481 udf_disk_stamp_to_time(&inode
->i_mtime
, fe
->modificationTime
);
1482 udf_disk_stamp_to_time(&inode
->i_ctime
, fe
->attrTime
);
1484 iinfo
->i_unique
= le64_to_cpu(fe
->uniqueID
);
1485 iinfo
->i_lenEAttr
= le32_to_cpu(fe
->lengthExtendedAttr
);
1486 iinfo
->i_lenAlloc
= le32_to_cpu(fe
->lengthAllocDescs
);
1487 iinfo
->i_checkpoint
= le32_to_cpu(fe
->checkpoint
);
1489 inode
->i_blocks
= le64_to_cpu(efe
->logicalBlocksRecorded
) <<
1490 (inode
->i_sb
->s_blocksize_bits
- 9);
1492 udf_disk_stamp_to_time(&inode
->i_atime
, efe
->accessTime
);
1493 udf_disk_stamp_to_time(&inode
->i_mtime
, efe
->modificationTime
);
1494 udf_disk_stamp_to_time(&iinfo
->i_crtime
, efe
->createTime
);
1495 udf_disk_stamp_to_time(&inode
->i_ctime
, efe
->attrTime
);
1497 iinfo
->i_unique
= le64_to_cpu(efe
->uniqueID
);
1498 iinfo
->i_lenEAttr
= le32_to_cpu(efe
->lengthExtendedAttr
);
1499 iinfo
->i_lenAlloc
= le32_to_cpu(efe
->lengthAllocDescs
);
1500 iinfo
->i_checkpoint
= le32_to_cpu(efe
->checkpoint
);
1502 inode
->i_generation
= iinfo
->i_unique
;
1505 * Sanity check length of allocation descriptors and extended attrs to
1506 * avoid integer overflows
1508 if (iinfo
->i_lenEAttr
> bs
|| iinfo
->i_lenAlloc
> bs
)
1510 /* Now do exact checks */
1511 if (udf_file_entry_alloc_offset(inode
) + iinfo
->i_lenAlloc
> bs
)
1513 /* Sanity checks for files in ICB so that we don't get confused later */
1514 if (iinfo
->i_alloc_type
== ICBTAG_FLAG_AD_IN_ICB
) {
1516 * For file in ICB data is stored in allocation descriptor
1517 * so sizes should match
1519 if (iinfo
->i_lenAlloc
!= inode
->i_size
)
1521 /* File in ICB has to fit in there... */
1522 if (inode
->i_size
> bs
- udf_file_entry_alloc_offset(inode
))
1526 switch (fe
->icbTag
.fileType
) {
1527 case ICBTAG_FILE_TYPE_DIRECTORY
:
1528 inode
->i_op
= &udf_dir_inode_operations
;
1529 inode
->i_fop
= &udf_dir_operations
;
1530 inode
->i_mode
|= S_IFDIR
;
1533 case ICBTAG_FILE_TYPE_REALTIME
:
1534 case ICBTAG_FILE_TYPE_REGULAR
:
1535 case ICBTAG_FILE_TYPE_UNDEF
:
1536 case ICBTAG_FILE_TYPE_VAT20
:
1537 if (iinfo
->i_alloc_type
== ICBTAG_FLAG_AD_IN_ICB
)
1538 inode
->i_data
.a_ops
= &udf_adinicb_aops
;
1540 inode
->i_data
.a_ops
= &udf_aops
;
1541 inode
->i_op
= &udf_file_inode_operations
;
1542 inode
->i_fop
= &udf_file_operations
;
1543 inode
->i_mode
|= S_IFREG
;
1545 case ICBTAG_FILE_TYPE_BLOCK
:
1546 inode
->i_mode
|= S_IFBLK
;
1548 case ICBTAG_FILE_TYPE_CHAR
:
1549 inode
->i_mode
|= S_IFCHR
;
1551 case ICBTAG_FILE_TYPE_FIFO
:
1552 init_special_inode(inode
, inode
->i_mode
| S_IFIFO
, 0);
1554 case ICBTAG_FILE_TYPE_SOCKET
:
1555 init_special_inode(inode
, inode
->i_mode
| S_IFSOCK
, 0);
1557 case ICBTAG_FILE_TYPE_SYMLINK
:
1558 inode
->i_data
.a_ops
= &udf_symlink_aops
;
1559 inode
->i_op
= &udf_symlink_inode_operations
;
1560 inode_nohighmem(inode
);
1561 inode
->i_mode
= S_IFLNK
| 0777;
1563 case ICBTAG_FILE_TYPE_MAIN
:
1564 udf_debug("METADATA FILE-----\n");
1566 case ICBTAG_FILE_TYPE_MIRROR
:
1567 udf_debug("METADATA MIRROR FILE-----\n");
1569 case ICBTAG_FILE_TYPE_BITMAP
:
1570 udf_debug("METADATA BITMAP FILE-----\n");
1573 udf_err(inode
->i_sb
, "(ino %lu) failed unknown file type=%u\n",
1574 inode
->i_ino
, fe
->icbTag
.fileType
);
1577 if (S_ISCHR(inode
->i_mode
) || S_ISBLK(inode
->i_mode
)) {
1578 struct deviceSpec
*dsea
=
1579 (struct deviceSpec
*)udf_get_extendedattr(inode
, 12, 1);
1581 init_special_inode(inode
, inode
->i_mode
,
1582 MKDEV(le32_to_cpu(dsea
->majorDeviceIdent
),
1583 le32_to_cpu(dsea
->minorDeviceIdent
)));
1584 /* Developer ID ??? */
1594 static int udf_alloc_i_data(struct inode
*inode
, size_t size
)
1596 struct udf_inode_info
*iinfo
= UDF_I(inode
);
1597 iinfo
->i_ext
.i_data
= kmalloc(size
, GFP_KERNEL
);
1598 if (!iinfo
->i_ext
.i_data
)
1603 static umode_t
udf_convert_permissions(struct fileEntry
*fe
)
1606 uint32_t permissions
;
1609 permissions
= le32_to_cpu(fe
->permissions
);
1610 flags
= le16_to_cpu(fe
->icbTag
.flags
);
1612 mode
= ((permissions
) & 0007) |
1613 ((permissions
>> 2) & 0070) |
1614 ((permissions
>> 4) & 0700) |
1615 ((flags
& ICBTAG_FLAG_SETUID
) ? S_ISUID
: 0) |
1616 ((flags
& ICBTAG_FLAG_SETGID
) ? S_ISGID
: 0) |
1617 ((flags
& ICBTAG_FLAG_STICKY
) ? S_ISVTX
: 0);
1622 int udf_write_inode(struct inode
*inode
, struct writeback_control
*wbc
)
1624 return udf_update_inode(inode
, wbc
->sync_mode
== WB_SYNC_ALL
);
1627 static int udf_sync_inode(struct inode
*inode
)
1629 return udf_update_inode(inode
, 1);
1632 static void udf_adjust_time(struct udf_inode_info
*iinfo
, struct timespec64 time
)
1634 if (iinfo
->i_crtime
.tv_sec
> time
.tv_sec
||
1635 (iinfo
->i_crtime
.tv_sec
== time
.tv_sec
&&
1636 iinfo
->i_crtime
.tv_nsec
> time
.tv_nsec
))
1637 iinfo
->i_crtime
= time
;
1640 static int udf_update_inode(struct inode
*inode
, int do_sync
)
1642 struct buffer_head
*bh
= NULL
;
1643 struct fileEntry
*fe
;
1644 struct extendedFileEntry
*efe
;
1645 uint64_t lb_recorded
;
1650 struct udf_sb_info
*sbi
= UDF_SB(inode
->i_sb
);
1651 unsigned char blocksize_bits
= inode
->i_sb
->s_blocksize_bits
;
1652 struct udf_inode_info
*iinfo
= UDF_I(inode
);
1654 bh
= udf_tgetblk(inode
->i_sb
,
1655 udf_get_lb_pblock(inode
->i_sb
, &iinfo
->i_location
, 0));
1657 udf_debug("getblk failure\n");
1662 memset(bh
->b_data
, 0, inode
->i_sb
->s_blocksize
);
1663 fe
= (struct fileEntry
*)bh
->b_data
;
1664 efe
= (struct extendedFileEntry
*)bh
->b_data
;
1667 struct unallocSpaceEntry
*use
=
1668 (struct unallocSpaceEntry
*)bh
->b_data
;
1670 use
->lengthAllocDescs
= cpu_to_le32(iinfo
->i_lenAlloc
);
1671 memcpy(bh
->b_data
+ sizeof(struct unallocSpaceEntry
),
1672 iinfo
->i_ext
.i_data
, inode
->i_sb
->s_blocksize
-
1673 sizeof(struct unallocSpaceEntry
));
1674 use
->descTag
.tagIdent
= cpu_to_le16(TAG_IDENT_USE
);
1675 crclen
= sizeof(struct unallocSpaceEntry
);
1680 if (UDF_QUERY_FLAG(inode
->i_sb
, UDF_FLAG_UID_FORGET
))
1681 fe
->uid
= cpu_to_le32(UDF_INVALID_ID
);
1683 fe
->uid
= cpu_to_le32(i_uid_read(inode
));
1685 if (UDF_QUERY_FLAG(inode
->i_sb
, UDF_FLAG_GID_FORGET
))
1686 fe
->gid
= cpu_to_le32(UDF_INVALID_ID
);
1688 fe
->gid
= cpu_to_le32(i_gid_read(inode
));
1690 udfperms
= ((inode
->i_mode
& 0007)) |
1691 ((inode
->i_mode
& 0070) << 2) |
1692 ((inode
->i_mode
& 0700) << 4);
1694 udfperms
|= (le32_to_cpu(fe
->permissions
) &
1695 (FE_PERM_O_DELETE
| FE_PERM_O_CHATTR
|
1696 FE_PERM_G_DELETE
| FE_PERM_G_CHATTR
|
1697 FE_PERM_U_DELETE
| FE_PERM_U_CHATTR
));
1698 fe
->permissions
= cpu_to_le32(udfperms
);
1700 if (S_ISDIR(inode
->i_mode
) && inode
->i_nlink
> 0)
1701 fe
->fileLinkCount
= cpu_to_le16(inode
->i_nlink
- 1);
1703 fe
->fileLinkCount
= cpu_to_le16(inode
->i_nlink
);
1705 fe
->informationLength
= cpu_to_le64(inode
->i_size
);
1707 if (S_ISCHR(inode
->i_mode
) || S_ISBLK(inode
->i_mode
)) {
1709 struct deviceSpec
*dsea
=
1710 (struct deviceSpec
*)udf_get_extendedattr(inode
, 12, 1);
1712 dsea
= (struct deviceSpec
*)
1713 udf_add_extendedattr(inode
,
1714 sizeof(struct deviceSpec
) +
1715 sizeof(struct regid
), 12, 0x3);
1716 dsea
->attrType
= cpu_to_le32(12);
1717 dsea
->attrSubtype
= 1;
1718 dsea
->attrLength
= cpu_to_le32(
1719 sizeof(struct deviceSpec
) +
1720 sizeof(struct regid
));
1721 dsea
->impUseLength
= cpu_to_le32(sizeof(struct regid
));
1723 eid
= (struct regid
*)dsea
->impUse
;
1724 memset(eid
, 0, sizeof(*eid
));
1725 strcpy(eid
->ident
, UDF_ID_DEVELOPER
);
1726 eid
->identSuffix
[0] = UDF_OS_CLASS_UNIX
;
1727 eid
->identSuffix
[1] = UDF_OS_ID_LINUX
;
1728 dsea
->majorDeviceIdent
= cpu_to_le32(imajor(inode
));
1729 dsea
->minorDeviceIdent
= cpu_to_le32(iminor(inode
));
1732 if (iinfo
->i_alloc_type
== ICBTAG_FLAG_AD_IN_ICB
)
1733 lb_recorded
= 0; /* No extents => no blocks! */
1736 (inode
->i_blocks
+ (1 << (blocksize_bits
- 9)) - 1) >>
1737 (blocksize_bits
- 9);
1739 if (iinfo
->i_efe
== 0) {
1740 memcpy(bh
->b_data
+ sizeof(struct fileEntry
),
1741 iinfo
->i_ext
.i_data
,
1742 inode
->i_sb
->s_blocksize
- sizeof(struct fileEntry
));
1743 fe
->logicalBlocksRecorded
= cpu_to_le64(lb_recorded
);
1745 udf_time_to_disk_stamp(&fe
->accessTime
, inode
->i_atime
);
1746 udf_time_to_disk_stamp(&fe
->modificationTime
, inode
->i_mtime
);
1747 udf_time_to_disk_stamp(&fe
->attrTime
, inode
->i_ctime
);
1748 memset(&(fe
->impIdent
), 0, sizeof(struct regid
));
1749 strcpy(fe
->impIdent
.ident
, UDF_ID_DEVELOPER
);
1750 fe
->impIdent
.identSuffix
[0] = UDF_OS_CLASS_UNIX
;
1751 fe
->impIdent
.identSuffix
[1] = UDF_OS_ID_LINUX
;
1752 fe
->uniqueID
= cpu_to_le64(iinfo
->i_unique
);
1753 fe
->lengthExtendedAttr
= cpu_to_le32(iinfo
->i_lenEAttr
);
1754 fe
->lengthAllocDescs
= cpu_to_le32(iinfo
->i_lenAlloc
);
1755 fe
->checkpoint
= cpu_to_le32(iinfo
->i_checkpoint
);
1756 fe
->descTag
.tagIdent
= cpu_to_le16(TAG_IDENT_FE
);
1757 crclen
= sizeof(struct fileEntry
);
1759 memcpy(bh
->b_data
+ sizeof(struct extendedFileEntry
),
1760 iinfo
->i_ext
.i_data
,
1761 inode
->i_sb
->s_blocksize
-
1762 sizeof(struct extendedFileEntry
));
1763 efe
->objectSize
= cpu_to_le64(inode
->i_size
);
1764 efe
->logicalBlocksRecorded
= cpu_to_le64(lb_recorded
);
1766 udf_adjust_time(iinfo
, inode
->i_atime
);
1767 udf_adjust_time(iinfo
, inode
->i_mtime
);
1768 udf_adjust_time(iinfo
, inode
->i_ctime
);
1770 udf_time_to_disk_stamp(&efe
->accessTime
, inode
->i_atime
);
1771 udf_time_to_disk_stamp(&efe
->modificationTime
, inode
->i_mtime
);
1772 udf_time_to_disk_stamp(&efe
->createTime
, iinfo
->i_crtime
);
1773 udf_time_to_disk_stamp(&efe
->attrTime
, inode
->i_ctime
);
1775 memset(&(efe
->impIdent
), 0, sizeof(efe
->impIdent
));
1776 strcpy(efe
->impIdent
.ident
, UDF_ID_DEVELOPER
);
1777 efe
->impIdent
.identSuffix
[0] = UDF_OS_CLASS_UNIX
;
1778 efe
->impIdent
.identSuffix
[1] = UDF_OS_ID_LINUX
;
1779 efe
->uniqueID
= cpu_to_le64(iinfo
->i_unique
);
1780 efe
->lengthExtendedAttr
= cpu_to_le32(iinfo
->i_lenEAttr
);
1781 efe
->lengthAllocDescs
= cpu_to_le32(iinfo
->i_lenAlloc
);
1782 efe
->checkpoint
= cpu_to_le32(iinfo
->i_checkpoint
);
1783 efe
->descTag
.tagIdent
= cpu_to_le16(TAG_IDENT_EFE
);
1784 crclen
= sizeof(struct extendedFileEntry
);
1788 if (iinfo
->i_strat4096
) {
1789 fe
->icbTag
.strategyType
= cpu_to_le16(4096);
1790 fe
->icbTag
.strategyParameter
= cpu_to_le16(1);
1791 fe
->icbTag
.numEntries
= cpu_to_le16(2);
1793 fe
->icbTag
.strategyType
= cpu_to_le16(4);
1794 fe
->icbTag
.numEntries
= cpu_to_le16(1);
1798 fe
->icbTag
.fileType
= ICBTAG_FILE_TYPE_USE
;
1799 else if (S_ISDIR(inode
->i_mode
))
1800 fe
->icbTag
.fileType
= ICBTAG_FILE_TYPE_DIRECTORY
;
1801 else if (S_ISREG(inode
->i_mode
))
1802 fe
->icbTag
.fileType
= ICBTAG_FILE_TYPE_REGULAR
;
1803 else if (S_ISLNK(inode
->i_mode
))
1804 fe
->icbTag
.fileType
= ICBTAG_FILE_TYPE_SYMLINK
;
1805 else if (S_ISBLK(inode
->i_mode
))
1806 fe
->icbTag
.fileType
= ICBTAG_FILE_TYPE_BLOCK
;
1807 else if (S_ISCHR(inode
->i_mode
))
1808 fe
->icbTag
.fileType
= ICBTAG_FILE_TYPE_CHAR
;
1809 else if (S_ISFIFO(inode
->i_mode
))
1810 fe
->icbTag
.fileType
= ICBTAG_FILE_TYPE_FIFO
;
1811 else if (S_ISSOCK(inode
->i_mode
))
1812 fe
->icbTag
.fileType
= ICBTAG_FILE_TYPE_SOCKET
;
1814 icbflags
= iinfo
->i_alloc_type
|
1815 ((inode
->i_mode
& S_ISUID
) ? ICBTAG_FLAG_SETUID
: 0) |
1816 ((inode
->i_mode
& S_ISGID
) ? ICBTAG_FLAG_SETGID
: 0) |
1817 ((inode
->i_mode
& S_ISVTX
) ? ICBTAG_FLAG_STICKY
: 0) |
1818 (le16_to_cpu(fe
->icbTag
.flags
) &
1819 ~(ICBTAG_FLAG_AD_MASK
| ICBTAG_FLAG_SETUID
|
1820 ICBTAG_FLAG_SETGID
| ICBTAG_FLAG_STICKY
));
1822 fe
->icbTag
.flags
= cpu_to_le16(icbflags
);
1823 if (sbi
->s_udfrev
>= 0x0200)
1824 fe
->descTag
.descVersion
= cpu_to_le16(3);
1826 fe
->descTag
.descVersion
= cpu_to_le16(2);
1827 fe
->descTag
.tagSerialNum
= cpu_to_le16(sbi
->s_serial_number
);
1828 fe
->descTag
.tagLocation
= cpu_to_le32(
1829 iinfo
->i_location
.logicalBlockNum
);
1830 crclen
+= iinfo
->i_lenEAttr
+ iinfo
->i_lenAlloc
- sizeof(struct tag
);
1831 fe
->descTag
.descCRCLength
= cpu_to_le16(crclen
);
1832 fe
->descTag
.descCRC
= cpu_to_le16(crc_itu_t(0, (char *)fe
+ sizeof(struct tag
),
1834 fe
->descTag
.tagChecksum
= udf_tag_checksum(&fe
->descTag
);
1836 set_buffer_uptodate(bh
);
1839 /* write the data blocks */
1840 mark_buffer_dirty(bh
);
1842 sync_dirty_buffer(bh
);
1843 if (buffer_write_io_error(bh
)) {
1844 udf_warn(inode
->i_sb
, "IO error syncing udf inode [%08lx]\n",
1854 struct inode
*__udf_iget(struct super_block
*sb
, struct kernel_lb_addr
*ino
,
1857 unsigned long block
= udf_get_lb_pblock(sb
, ino
, 0);
1858 struct inode
*inode
= iget_locked(sb
, block
);
1862 return ERR_PTR(-ENOMEM
);
1864 if (!(inode
->i_state
& I_NEW
))
1867 memcpy(&UDF_I(inode
)->i_location
, ino
, sizeof(struct kernel_lb_addr
));
1868 err
= udf_read_inode(inode
, hidden_inode
);
1871 return ERR_PTR(err
);
1873 unlock_new_inode(inode
);
1878 int udf_setup_indirect_aext(struct inode
*inode
, udf_pblk_t block
,
1879 struct extent_position
*epos
)
1881 struct super_block
*sb
= inode
->i_sb
;
1882 struct buffer_head
*bh
;
1883 struct allocExtDesc
*aed
;
1884 struct extent_position nepos
;
1885 struct kernel_lb_addr neloc
;
1888 if (UDF_I(inode
)->i_alloc_type
== ICBTAG_FLAG_AD_SHORT
)
1889 adsize
= sizeof(struct short_ad
);
1890 else if (UDF_I(inode
)->i_alloc_type
== ICBTAG_FLAG_AD_LONG
)
1891 adsize
= sizeof(struct long_ad
);
1895 neloc
.logicalBlockNum
= block
;
1896 neloc
.partitionReferenceNum
= epos
->block
.partitionReferenceNum
;
1898 bh
= udf_tgetblk(sb
, udf_get_lb_pblock(sb
, &neloc
, 0));
1902 memset(bh
->b_data
, 0x00, sb
->s_blocksize
);
1903 set_buffer_uptodate(bh
);
1905 mark_buffer_dirty_inode(bh
, inode
);
1907 aed
= (struct allocExtDesc
*)(bh
->b_data
);
1908 if (!UDF_QUERY_FLAG(sb
, UDF_FLAG_STRICT
)) {
1909 aed
->previousAllocExtLocation
=
1910 cpu_to_le32(epos
->block
.logicalBlockNum
);
1912 aed
->lengthAllocDescs
= cpu_to_le32(0);
1913 if (UDF_SB(sb
)->s_udfrev
>= 0x0200)
1917 udf_new_tag(bh
->b_data
, TAG_IDENT_AED
, ver
, 1, block
,
1918 sizeof(struct tag
));
1920 nepos
.block
= neloc
;
1921 nepos
.offset
= sizeof(struct allocExtDesc
);
1925 * Do we have to copy current last extent to make space for indirect
1928 if (epos
->offset
+ adsize
> sb
->s_blocksize
) {
1929 struct kernel_lb_addr cp_loc
;
1933 epos
->offset
-= adsize
;
1934 cp_type
= udf_current_aext(inode
, epos
, &cp_loc
, &cp_len
, 0);
1935 cp_len
|= ((uint32_t)cp_type
) << 30;
1937 __udf_add_aext(inode
, &nepos
, &cp_loc
, cp_len
, 1);
1938 udf_write_aext(inode
, epos
, &nepos
.block
,
1939 sb
->s_blocksize
| EXT_NEXT_EXTENT_ALLOCDECS
, 0);
1941 __udf_add_aext(inode
, epos
, &nepos
.block
,
1942 sb
->s_blocksize
| EXT_NEXT_EXTENT_ALLOCDECS
, 0);
1952 * Append extent at the given position - should be the first free one in inode
1953 * / indirect extent. This function assumes there is enough space in the inode
1954 * or indirect extent. Use udf_add_aext() if you didn't check for this before.
1956 int __udf_add_aext(struct inode
*inode
, struct extent_position
*epos
,
1957 struct kernel_lb_addr
*eloc
, uint32_t elen
, int inc
)
1959 struct udf_inode_info
*iinfo
= UDF_I(inode
);
1960 struct allocExtDesc
*aed
;
1963 if (iinfo
->i_alloc_type
== ICBTAG_FLAG_AD_SHORT
)
1964 adsize
= sizeof(struct short_ad
);
1965 else if (iinfo
->i_alloc_type
== ICBTAG_FLAG_AD_LONG
)
1966 adsize
= sizeof(struct long_ad
);
1971 WARN_ON(iinfo
->i_lenAlloc
!=
1972 epos
->offset
- udf_file_entry_alloc_offset(inode
));
1974 aed
= (struct allocExtDesc
*)epos
->bh
->b_data
;
1975 WARN_ON(le32_to_cpu(aed
->lengthAllocDescs
) !=
1976 epos
->offset
- sizeof(struct allocExtDesc
));
1977 WARN_ON(epos
->offset
+ adsize
> inode
->i_sb
->s_blocksize
);
1980 udf_write_aext(inode
, epos
, eloc
, elen
, inc
);
1983 iinfo
->i_lenAlloc
+= adsize
;
1984 mark_inode_dirty(inode
);
1986 aed
= (struct allocExtDesc
*)epos
->bh
->b_data
;
1987 le32_add_cpu(&aed
->lengthAllocDescs
, adsize
);
1988 if (!UDF_QUERY_FLAG(inode
->i_sb
, UDF_FLAG_STRICT
) ||
1989 UDF_SB(inode
->i_sb
)->s_udfrev
>= 0x0201)
1990 udf_update_tag(epos
->bh
->b_data
,
1991 epos
->offset
+ (inc
? 0 : adsize
));
1993 udf_update_tag(epos
->bh
->b_data
,
1994 sizeof(struct allocExtDesc
));
1995 mark_buffer_dirty_inode(epos
->bh
, inode
);
2002 * Append extent at given position - should be the first free one in inode
2003 * / indirect extent. Takes care of allocating and linking indirect blocks.
2005 int udf_add_aext(struct inode
*inode
, struct extent_position
*epos
,
2006 struct kernel_lb_addr
*eloc
, uint32_t elen
, int inc
)
2009 struct super_block
*sb
= inode
->i_sb
;
2011 if (UDF_I(inode
)->i_alloc_type
== ICBTAG_FLAG_AD_SHORT
)
2012 adsize
= sizeof(struct short_ad
);
2013 else if (UDF_I(inode
)->i_alloc_type
== ICBTAG_FLAG_AD_LONG
)
2014 adsize
= sizeof(struct long_ad
);
2018 if (epos
->offset
+ (2 * adsize
) > sb
->s_blocksize
) {
2020 udf_pblk_t new_block
;
2022 new_block
= udf_new_block(sb
, NULL
,
2023 epos
->block
.partitionReferenceNum
,
2024 epos
->block
.logicalBlockNum
, &err
);
2028 err
= udf_setup_indirect_aext(inode
, new_block
, epos
);
2033 return __udf_add_aext(inode
, epos
, eloc
, elen
, inc
);
2036 void udf_write_aext(struct inode
*inode
, struct extent_position
*epos
,
2037 struct kernel_lb_addr
*eloc
, uint32_t elen
, int inc
)
2041 struct short_ad
*sad
;
2042 struct long_ad
*lad
;
2043 struct udf_inode_info
*iinfo
= UDF_I(inode
);
2046 ptr
= iinfo
->i_ext
.i_data
+ epos
->offset
-
2047 udf_file_entry_alloc_offset(inode
) +
2050 ptr
= epos
->bh
->b_data
+ epos
->offset
;
2052 switch (iinfo
->i_alloc_type
) {
2053 case ICBTAG_FLAG_AD_SHORT
:
2054 sad
= (struct short_ad
*)ptr
;
2055 sad
->extLength
= cpu_to_le32(elen
);
2056 sad
->extPosition
= cpu_to_le32(eloc
->logicalBlockNum
);
2057 adsize
= sizeof(struct short_ad
);
2059 case ICBTAG_FLAG_AD_LONG
:
2060 lad
= (struct long_ad
*)ptr
;
2061 lad
->extLength
= cpu_to_le32(elen
);
2062 lad
->extLocation
= cpu_to_lelb(*eloc
);
2063 memset(lad
->impUse
, 0x00, sizeof(lad
->impUse
));
2064 adsize
= sizeof(struct long_ad
);
2071 if (!UDF_QUERY_FLAG(inode
->i_sb
, UDF_FLAG_STRICT
) ||
2072 UDF_SB(inode
->i_sb
)->s_udfrev
>= 0x0201) {
2073 struct allocExtDesc
*aed
=
2074 (struct allocExtDesc
*)epos
->bh
->b_data
;
2075 udf_update_tag(epos
->bh
->b_data
,
2076 le32_to_cpu(aed
->lengthAllocDescs
) +
2077 sizeof(struct allocExtDesc
));
2079 mark_buffer_dirty_inode(epos
->bh
, inode
);
2081 mark_inode_dirty(inode
);
2085 epos
->offset
+= adsize
;
2089 * Only 1 indirect extent in a row really makes sense but allow upto 16 in case
2090 * someone does some weird stuff.
2092 #define UDF_MAX_INDIR_EXTS 16
2094 int8_t udf_next_aext(struct inode
*inode
, struct extent_position
*epos
,
2095 struct kernel_lb_addr
*eloc
, uint32_t *elen
, int inc
)
2098 unsigned int indirections
= 0;
2100 while ((etype
= udf_current_aext(inode
, epos
, eloc
, elen
, inc
)) ==
2101 (EXT_NEXT_EXTENT_ALLOCDECS
>> 30)) {
2104 if (++indirections
> UDF_MAX_INDIR_EXTS
) {
2105 udf_err(inode
->i_sb
,
2106 "too many indirect extents in inode %lu\n",
2111 epos
->block
= *eloc
;
2112 epos
->offset
= sizeof(struct allocExtDesc
);
2114 block
= udf_get_lb_pblock(inode
->i_sb
, &epos
->block
, 0);
2115 epos
->bh
= udf_tread(inode
->i_sb
, block
);
2117 udf_debug("reading block %u failed!\n", block
);
2125 int8_t udf_current_aext(struct inode
*inode
, struct extent_position
*epos
,
2126 struct kernel_lb_addr
*eloc
, uint32_t *elen
, int inc
)
2131 struct short_ad
*sad
;
2132 struct long_ad
*lad
;
2133 struct udf_inode_info
*iinfo
= UDF_I(inode
);
2137 epos
->offset
= udf_file_entry_alloc_offset(inode
);
2138 ptr
= iinfo
->i_ext
.i_data
+ epos
->offset
-
2139 udf_file_entry_alloc_offset(inode
) +
2141 alen
= udf_file_entry_alloc_offset(inode
) +
2145 epos
->offset
= sizeof(struct allocExtDesc
);
2146 ptr
= epos
->bh
->b_data
+ epos
->offset
;
2147 alen
= sizeof(struct allocExtDesc
) +
2148 le32_to_cpu(((struct allocExtDesc
*)epos
->bh
->b_data
)->
2152 switch (iinfo
->i_alloc_type
) {
2153 case ICBTAG_FLAG_AD_SHORT
:
2154 sad
= udf_get_fileshortad(ptr
, alen
, &epos
->offset
, inc
);
2157 etype
= le32_to_cpu(sad
->extLength
) >> 30;
2158 eloc
->logicalBlockNum
= le32_to_cpu(sad
->extPosition
);
2159 eloc
->partitionReferenceNum
=
2160 iinfo
->i_location
.partitionReferenceNum
;
2161 *elen
= le32_to_cpu(sad
->extLength
) & UDF_EXTENT_LENGTH_MASK
;
2163 case ICBTAG_FLAG_AD_LONG
:
2164 lad
= udf_get_filelongad(ptr
, alen
, &epos
->offset
, inc
);
2167 etype
= le32_to_cpu(lad
->extLength
) >> 30;
2168 *eloc
= lelb_to_cpu(lad
->extLocation
);
2169 *elen
= le32_to_cpu(lad
->extLength
) & UDF_EXTENT_LENGTH_MASK
;
2172 udf_debug("alloc_type = %u unsupported\n", iinfo
->i_alloc_type
);
2179 static int8_t udf_insert_aext(struct inode
*inode
, struct extent_position epos
,
2180 struct kernel_lb_addr neloc
, uint32_t nelen
)
2182 struct kernel_lb_addr oeloc
;
2189 while ((etype
= udf_next_aext(inode
, &epos
, &oeloc
, &oelen
, 0)) != -1) {
2190 udf_write_aext(inode
, &epos
, &neloc
, nelen
, 1);
2192 nelen
= (etype
<< 30) | oelen
;
2194 udf_add_aext(inode
, &epos
, &neloc
, nelen
, 1);
2197 return (nelen
>> 30);
2200 int8_t udf_delete_aext(struct inode
*inode
, struct extent_position epos
)
2202 struct extent_position oepos
;
2205 struct allocExtDesc
*aed
;
2206 struct udf_inode_info
*iinfo
;
2207 struct kernel_lb_addr eloc
;
2215 iinfo
= UDF_I(inode
);
2216 if (iinfo
->i_alloc_type
== ICBTAG_FLAG_AD_SHORT
)
2217 adsize
= sizeof(struct short_ad
);
2218 else if (iinfo
->i_alloc_type
== ICBTAG_FLAG_AD_LONG
)
2219 adsize
= sizeof(struct long_ad
);
2224 if (udf_next_aext(inode
, &epos
, &eloc
, &elen
, 1) == -1)
2227 while ((etype
= udf_next_aext(inode
, &epos
, &eloc
, &elen
, 1)) != -1) {
2228 udf_write_aext(inode
, &oepos
, &eloc
, (etype
<< 30) | elen
, 1);
2229 if (oepos
.bh
!= epos
.bh
) {
2230 oepos
.block
= epos
.block
;
2234 oepos
.offset
= epos
.offset
- adsize
;
2237 memset(&eloc
, 0x00, sizeof(struct kernel_lb_addr
));
2240 if (epos
.bh
!= oepos
.bh
) {
2241 udf_free_blocks(inode
->i_sb
, inode
, &epos
.block
, 0, 1);
2242 udf_write_aext(inode
, &oepos
, &eloc
, elen
, 1);
2243 udf_write_aext(inode
, &oepos
, &eloc
, elen
, 1);
2245 iinfo
->i_lenAlloc
-= (adsize
* 2);
2246 mark_inode_dirty(inode
);
2248 aed
= (struct allocExtDesc
*)oepos
.bh
->b_data
;
2249 le32_add_cpu(&aed
->lengthAllocDescs
, -(2 * adsize
));
2250 if (!UDF_QUERY_FLAG(inode
->i_sb
, UDF_FLAG_STRICT
) ||
2251 UDF_SB(inode
->i_sb
)->s_udfrev
>= 0x0201)
2252 udf_update_tag(oepos
.bh
->b_data
,
2253 oepos
.offset
- (2 * adsize
));
2255 udf_update_tag(oepos
.bh
->b_data
,
2256 sizeof(struct allocExtDesc
));
2257 mark_buffer_dirty_inode(oepos
.bh
, inode
);
2260 udf_write_aext(inode
, &oepos
, &eloc
, elen
, 1);
2262 iinfo
->i_lenAlloc
-= adsize
;
2263 mark_inode_dirty(inode
);
2265 aed
= (struct allocExtDesc
*)oepos
.bh
->b_data
;
2266 le32_add_cpu(&aed
->lengthAllocDescs
, -adsize
);
2267 if (!UDF_QUERY_FLAG(inode
->i_sb
, UDF_FLAG_STRICT
) ||
2268 UDF_SB(inode
->i_sb
)->s_udfrev
>= 0x0201)
2269 udf_update_tag(oepos
.bh
->b_data
,
2270 epos
.offset
- adsize
);
2272 udf_update_tag(oepos
.bh
->b_data
,
2273 sizeof(struct allocExtDesc
));
2274 mark_buffer_dirty_inode(oepos
.bh
, inode
);
2281 return (elen
>> 30);
2284 int8_t inode_bmap(struct inode
*inode
, sector_t block
,
2285 struct extent_position
*pos
, struct kernel_lb_addr
*eloc
,
2286 uint32_t *elen
, sector_t
*offset
)
2288 unsigned char blocksize_bits
= inode
->i_sb
->s_blocksize_bits
;
2289 loff_t lbcount
= 0, bcount
= (loff_t
) block
<< blocksize_bits
;
2291 struct udf_inode_info
*iinfo
;
2293 iinfo
= UDF_I(inode
);
2294 if (!udf_read_extent_cache(inode
, bcount
, &lbcount
, pos
)) {
2296 pos
->block
= iinfo
->i_location
;
2301 etype
= udf_next_aext(inode
, pos
, eloc
, elen
, 1);
2303 *offset
= (bcount
- lbcount
) >> blocksize_bits
;
2304 iinfo
->i_lenExtents
= lbcount
;
2308 } while (lbcount
<= bcount
);
2309 /* update extent cache */
2310 udf_update_extent_cache(inode
, lbcount
- *elen
, pos
);
2311 *offset
= (bcount
+ *elen
- lbcount
) >> blocksize_bits
;
2316 udf_pblk_t
udf_block_map(struct inode
*inode
, sector_t block
)
2318 struct kernel_lb_addr eloc
;
2321 struct extent_position epos
= {};
2324 down_read(&UDF_I(inode
)->i_data_sem
);
2326 if (inode_bmap(inode
, block
, &epos
, &eloc
, &elen
, &offset
) ==
2327 (EXT_RECORDED_ALLOCATED
>> 30))
2328 ret
= udf_get_lb_pblock(inode
->i_sb
, &eloc
, offset
);
2332 up_read(&UDF_I(inode
)->i_data_sem
);
2335 if (UDF_QUERY_FLAG(inode
->i_sb
, UDF_FLAG_VARCONV
))
2336 return udf_fixed_to_variable(ret
);