5 * Inode handling routines for the OSTA-UDF(tm) filesystem.
8 * This file is distributed under the terms of the GNU General Public
9 * License (GPL). Copies of the GPL can be obtained from:
10 * ftp://prep.ai.mit.edu/pub/gnu/GPL
11 * Each contributing author retains all rights to their own work.
13 * (C) 1998 Dave Boynton
14 * (C) 1998-2004 Ben Fennema
15 * (C) 1999-2000 Stelias Computing Inc
19 * 10/04/98 dgb Added rudimentary directory functions
20 * 10/07/98 Fully working udf_block_map! It works!
21 * 11/25/98 bmap altered to better support extents
22 * 12/06/98 blf partition support in udf_iget, udf_block_map
24 * 12/12/98 rewrote udf_block_map to handle next extents and descs across
25 * block boundaries (which is not actually allowed)
26 * 12/20/98 added support for strategy 4096
27 * 03/07/99 rewrote udf_block_map (again)
28 * New funcs, inode_bmap, udf_next_aext
29 * 04/19/99 Support for writing device EA's for major/minor #
34 #include <linux/module.h>
35 #include <linux/pagemap.h>
36 #include <linux/writeback.h>
37 #include <linux/slab.h>
38 #include <linux/crc-itu-t.h>
39 #include <linux/mpage.h>
40 #include <linux/uio.h>
41 #include <linux/bio.h>
46 #define EXTENT_MERGE_SIZE 5
48 #define FE_MAPPED_PERMS (FE_PERM_U_READ | FE_PERM_U_WRITE | FE_PERM_U_EXEC | \
49 FE_PERM_G_READ | FE_PERM_G_WRITE | FE_PERM_G_EXEC | \
50 FE_PERM_O_READ | FE_PERM_O_WRITE | FE_PERM_O_EXEC)
52 #define FE_DELETE_PERMS (FE_PERM_U_DELETE | FE_PERM_G_DELETE | \
55 static umode_t
udf_convert_permissions(struct fileEntry
*);
56 static int udf_update_inode(struct inode
*, int);
57 static int udf_sync_inode(struct inode
*inode
);
58 static int udf_alloc_i_data(struct inode
*inode
, size_t size
);
59 static sector_t
inode_getblk(struct inode
*, sector_t
, int *, int *);
60 static int8_t udf_insert_aext(struct inode
*, struct extent_position
,
61 struct kernel_lb_addr
, uint32_t);
62 static void udf_split_extents(struct inode
*, int *, int, udf_pblk_t
,
63 struct kernel_long_ad
*, int *);
64 static void udf_prealloc_extents(struct inode
*, int, int,
65 struct kernel_long_ad
*, int *);
66 static void udf_merge_extents(struct inode
*, struct kernel_long_ad
*, int *);
67 static void udf_update_extents(struct inode
*, struct kernel_long_ad
*, int,
68 int, struct extent_position
*);
69 static int udf_get_block(struct inode
*, sector_t
, struct buffer_head
*, int);
71 static void __udf_clear_extent_cache(struct inode
*inode
)
73 struct udf_inode_info
*iinfo
= UDF_I(inode
);
75 if (iinfo
->cached_extent
.lstart
!= -1) {
76 brelse(iinfo
->cached_extent
.epos
.bh
);
77 iinfo
->cached_extent
.lstart
= -1;
81 /* Invalidate extent cache */
82 static void udf_clear_extent_cache(struct inode
*inode
)
84 struct udf_inode_info
*iinfo
= UDF_I(inode
);
86 spin_lock(&iinfo
->i_extent_cache_lock
);
87 __udf_clear_extent_cache(inode
);
88 spin_unlock(&iinfo
->i_extent_cache_lock
);
91 /* Return contents of extent cache */
92 static int udf_read_extent_cache(struct inode
*inode
, loff_t bcount
,
93 loff_t
*lbcount
, struct extent_position
*pos
)
95 struct udf_inode_info
*iinfo
= UDF_I(inode
);
98 spin_lock(&iinfo
->i_extent_cache_lock
);
99 if ((iinfo
->cached_extent
.lstart
<= bcount
) &&
100 (iinfo
->cached_extent
.lstart
!= -1)) {
102 *lbcount
= iinfo
->cached_extent
.lstart
;
103 memcpy(pos
, &iinfo
->cached_extent
.epos
,
104 sizeof(struct extent_position
));
109 spin_unlock(&iinfo
->i_extent_cache_lock
);
113 /* Add extent to extent cache */
114 static void udf_update_extent_cache(struct inode
*inode
, loff_t estart
,
115 struct extent_position
*pos
)
117 struct udf_inode_info
*iinfo
= UDF_I(inode
);
119 spin_lock(&iinfo
->i_extent_cache_lock
);
120 /* Invalidate previously cached extent */
121 __udf_clear_extent_cache(inode
);
124 memcpy(&iinfo
->cached_extent
.epos
, pos
, sizeof(*pos
));
125 iinfo
->cached_extent
.lstart
= estart
;
126 switch (iinfo
->i_alloc_type
) {
127 case ICBTAG_FLAG_AD_SHORT
:
128 iinfo
->cached_extent
.epos
.offset
-= sizeof(struct short_ad
);
130 case ICBTAG_FLAG_AD_LONG
:
131 iinfo
->cached_extent
.epos
.offset
-= sizeof(struct long_ad
);
134 spin_unlock(&iinfo
->i_extent_cache_lock
);
137 void udf_evict_inode(struct inode
*inode
)
139 struct udf_inode_info
*iinfo
= UDF_I(inode
);
142 if (!inode
->i_nlink
&& !is_bad_inode(inode
)) {
144 udf_setsize(inode
, 0);
145 udf_update_inode(inode
, IS_SYNC(inode
));
147 truncate_inode_pages_final(&inode
->i_data
);
148 invalidate_inode_buffers(inode
);
150 if (iinfo
->i_alloc_type
!= ICBTAG_FLAG_AD_IN_ICB
&&
151 inode
->i_size
!= iinfo
->i_lenExtents
) {
152 udf_warn(inode
->i_sb
, "Inode %lu (mode %o) has inode size %llu different from extent length %llu. Filesystem need not be standards compliant.\n",
153 inode
->i_ino
, inode
->i_mode
,
154 (unsigned long long)inode
->i_size
,
155 (unsigned long long)iinfo
->i_lenExtents
);
157 kfree(iinfo
->i_ext
.i_data
);
158 iinfo
->i_ext
.i_data
= NULL
;
159 udf_clear_extent_cache(inode
);
161 udf_free_inode(inode
);
165 static void udf_write_failed(struct address_space
*mapping
, loff_t to
)
167 struct inode
*inode
= mapping
->host
;
168 struct udf_inode_info
*iinfo
= UDF_I(inode
);
169 loff_t isize
= inode
->i_size
;
172 truncate_pagecache(inode
, isize
);
173 if (iinfo
->i_alloc_type
!= ICBTAG_FLAG_AD_IN_ICB
) {
174 down_write(&iinfo
->i_data_sem
);
175 udf_clear_extent_cache(inode
);
176 udf_truncate_extents(inode
);
177 up_write(&iinfo
->i_data_sem
);
182 static int udf_writepage(struct page
*page
, struct writeback_control
*wbc
)
184 return block_write_full_page(page
, udf_get_block
, wbc
);
187 static int udf_writepages(struct address_space
*mapping
,
188 struct writeback_control
*wbc
)
190 return mpage_writepages(mapping
, wbc
, udf_get_block
);
193 static int udf_readpage(struct file
*file
, struct page
*page
)
195 return mpage_readpage(page
, udf_get_block
);
198 static void udf_readahead(struct readahead_control
*rac
)
200 mpage_readahead(rac
, udf_get_block
);
203 static int udf_write_begin(struct file
*file
, struct address_space
*mapping
,
204 loff_t pos
, unsigned len
, unsigned flags
,
205 struct page
**pagep
, void **fsdata
)
209 ret
= block_write_begin(mapping
, pos
, len
, flags
, pagep
, udf_get_block
);
211 udf_write_failed(mapping
, pos
+ len
);
215 static ssize_t
udf_direct_IO(struct kiocb
*iocb
, struct iov_iter
*iter
)
217 struct file
*file
= iocb
->ki_filp
;
218 struct address_space
*mapping
= file
->f_mapping
;
219 struct inode
*inode
= mapping
->host
;
220 size_t count
= iov_iter_count(iter
);
223 ret
= blockdev_direct_IO(iocb
, inode
, iter
, udf_get_block
);
224 if (unlikely(ret
< 0 && iov_iter_rw(iter
) == WRITE
))
225 udf_write_failed(mapping
, iocb
->ki_pos
+ count
);
229 static sector_t
udf_bmap(struct address_space
*mapping
, sector_t block
)
231 return generic_block_bmap(mapping
, block
, udf_get_block
);
234 const struct address_space_operations udf_aops
= {
235 .readpage
= udf_readpage
,
236 .readahead
= udf_readahead
,
237 .writepage
= udf_writepage
,
238 .writepages
= udf_writepages
,
239 .write_begin
= udf_write_begin
,
240 .write_end
= generic_write_end
,
241 .direct_IO
= udf_direct_IO
,
246 * Expand file stored in ICB to a normal one-block-file
248 * This function requires i_data_sem for writing and releases it.
249 * This function requires i_mutex held
251 int udf_expand_file_adinicb(struct inode
*inode
)
255 struct udf_inode_info
*iinfo
= UDF_I(inode
);
257 struct writeback_control udf_wbc
= {
258 .sync_mode
= WB_SYNC_NONE
,
262 WARN_ON_ONCE(!inode_is_locked(inode
));
263 if (!iinfo
->i_lenAlloc
) {
264 if (UDF_QUERY_FLAG(inode
->i_sb
, UDF_FLAG_USE_SHORT_AD
))
265 iinfo
->i_alloc_type
= ICBTAG_FLAG_AD_SHORT
;
267 iinfo
->i_alloc_type
= ICBTAG_FLAG_AD_LONG
;
268 /* from now on we have normal address_space methods */
269 inode
->i_data
.a_ops
= &udf_aops
;
270 up_write(&iinfo
->i_data_sem
);
271 mark_inode_dirty(inode
);
275 * Release i_data_sem so that we can lock a page - page lock ranks
276 * above i_data_sem. i_mutex still protects us against file changes.
278 up_write(&iinfo
->i_data_sem
);
280 page
= find_or_create_page(inode
->i_mapping
, 0, GFP_NOFS
);
284 if (!PageUptodate(page
)) {
285 kaddr
= kmap_atomic(page
);
286 memset(kaddr
+ iinfo
->i_lenAlloc
, 0x00,
287 PAGE_SIZE
- iinfo
->i_lenAlloc
);
288 memcpy(kaddr
, iinfo
->i_ext
.i_data
+ iinfo
->i_lenEAttr
,
290 flush_dcache_page(page
);
291 SetPageUptodate(page
);
292 kunmap_atomic(kaddr
);
294 down_write(&iinfo
->i_data_sem
);
295 memset(iinfo
->i_ext
.i_data
+ iinfo
->i_lenEAttr
, 0x00,
297 iinfo
->i_lenAlloc
= 0;
298 if (UDF_QUERY_FLAG(inode
->i_sb
, UDF_FLAG_USE_SHORT_AD
))
299 iinfo
->i_alloc_type
= ICBTAG_FLAG_AD_SHORT
;
301 iinfo
->i_alloc_type
= ICBTAG_FLAG_AD_LONG
;
302 /* from now on we have normal address_space methods */
303 inode
->i_data
.a_ops
= &udf_aops
;
304 up_write(&iinfo
->i_data_sem
);
305 err
= inode
->i_data
.a_ops
->writepage(page
, &udf_wbc
);
307 /* Restore everything back so that we don't lose data... */
309 down_write(&iinfo
->i_data_sem
);
310 kaddr
= kmap_atomic(page
);
311 memcpy(iinfo
->i_ext
.i_data
+ iinfo
->i_lenEAttr
, kaddr
,
313 kunmap_atomic(kaddr
);
315 iinfo
->i_alloc_type
= ICBTAG_FLAG_AD_IN_ICB
;
316 inode
->i_data
.a_ops
= &udf_adinicb_aops
;
317 up_write(&iinfo
->i_data_sem
);
320 mark_inode_dirty(inode
);
325 struct buffer_head
*udf_expand_dir_adinicb(struct inode
*inode
,
326 udf_pblk_t
*block
, int *err
)
329 struct buffer_head
*dbh
= NULL
;
330 struct kernel_lb_addr eloc
;
332 struct extent_position epos
;
334 struct udf_fileident_bh sfibh
, dfibh
;
335 loff_t f_pos
= udf_ext0_offset(inode
);
336 int size
= udf_ext0_offset(inode
) + inode
->i_size
;
337 struct fileIdentDesc cfi
, *sfi
, *dfi
;
338 struct udf_inode_info
*iinfo
= UDF_I(inode
);
340 if (UDF_QUERY_FLAG(inode
->i_sb
, UDF_FLAG_USE_SHORT_AD
))
341 alloctype
= ICBTAG_FLAG_AD_SHORT
;
343 alloctype
= ICBTAG_FLAG_AD_LONG
;
345 if (!inode
->i_size
) {
346 iinfo
->i_alloc_type
= alloctype
;
347 mark_inode_dirty(inode
);
351 /* alloc block, and copy data to it */
352 *block
= udf_new_block(inode
->i_sb
, inode
,
353 iinfo
->i_location
.partitionReferenceNum
,
354 iinfo
->i_location
.logicalBlockNum
, err
);
357 newblock
= udf_get_pblock(inode
->i_sb
, *block
,
358 iinfo
->i_location
.partitionReferenceNum
,
362 dbh
= udf_tgetblk(inode
->i_sb
, newblock
);
366 memset(dbh
->b_data
, 0x00, inode
->i_sb
->s_blocksize
);
367 set_buffer_uptodate(dbh
);
369 mark_buffer_dirty_inode(dbh
, inode
);
371 sfibh
.soffset
= sfibh
.eoffset
=
372 f_pos
& (inode
->i_sb
->s_blocksize
- 1);
373 sfibh
.sbh
= sfibh
.ebh
= NULL
;
374 dfibh
.soffset
= dfibh
.eoffset
= 0;
375 dfibh
.sbh
= dfibh
.ebh
= dbh
;
376 while (f_pos
< size
) {
377 iinfo
->i_alloc_type
= ICBTAG_FLAG_AD_IN_ICB
;
378 sfi
= udf_fileident_read(inode
, &f_pos
, &sfibh
, &cfi
, NULL
,
384 iinfo
->i_alloc_type
= alloctype
;
385 sfi
->descTag
.tagLocation
= cpu_to_le32(*block
);
386 dfibh
.soffset
= dfibh
.eoffset
;
387 dfibh
.eoffset
+= (sfibh
.eoffset
- sfibh
.soffset
);
388 dfi
= (struct fileIdentDesc
*)(dbh
->b_data
+ dfibh
.soffset
);
389 if (udf_write_fi(inode
, sfi
, dfi
, &dfibh
, sfi
->impUse
,
391 le16_to_cpu(sfi
->lengthOfImpUse
))) {
392 iinfo
->i_alloc_type
= ICBTAG_FLAG_AD_IN_ICB
;
397 mark_buffer_dirty_inode(dbh
, inode
);
399 memset(iinfo
->i_ext
.i_data
+ iinfo
->i_lenEAttr
, 0,
401 iinfo
->i_lenAlloc
= 0;
402 eloc
.logicalBlockNum
= *block
;
403 eloc
.partitionReferenceNum
=
404 iinfo
->i_location
.partitionReferenceNum
;
405 iinfo
->i_lenExtents
= inode
->i_size
;
407 epos
.block
= iinfo
->i_location
;
408 epos
.offset
= udf_file_entry_alloc_offset(inode
);
409 udf_add_aext(inode
, &epos
, &eloc
, inode
->i_size
, 0);
413 mark_inode_dirty(inode
);
417 static int udf_get_block(struct inode
*inode
, sector_t block
,
418 struct buffer_head
*bh_result
, int create
)
422 struct udf_inode_info
*iinfo
;
425 phys
= udf_block_map(inode
, block
);
427 map_bh(bh_result
, inode
->i_sb
, phys
);
433 iinfo
= UDF_I(inode
);
435 down_write(&iinfo
->i_data_sem
);
436 if (block
== iinfo
->i_next_alloc_block
+ 1) {
437 iinfo
->i_next_alloc_block
++;
438 iinfo
->i_next_alloc_goal
++;
441 udf_clear_extent_cache(inode
);
442 phys
= inode_getblk(inode
, block
, &err
, &new);
447 set_buffer_new(bh_result
);
448 map_bh(bh_result
, inode
->i_sb
, phys
);
451 up_write(&iinfo
->i_data_sem
);
455 static struct buffer_head
*udf_getblk(struct inode
*inode
, udf_pblk_t block
,
456 int create
, int *err
)
458 struct buffer_head
*bh
;
459 struct buffer_head dummy
;
462 dummy
.b_blocknr
= -1000;
463 *err
= udf_get_block(inode
, block
, &dummy
, create
);
464 if (!*err
&& buffer_mapped(&dummy
)) {
465 bh
= sb_getblk(inode
->i_sb
, dummy
.b_blocknr
);
466 if (buffer_new(&dummy
)) {
468 memset(bh
->b_data
, 0x00, inode
->i_sb
->s_blocksize
);
469 set_buffer_uptodate(bh
);
471 mark_buffer_dirty_inode(bh
, inode
);
479 /* Extend the file with new blocks totaling 'new_block_bytes',
480 * return the number of extents added
482 static int udf_do_extend_file(struct inode
*inode
,
483 struct extent_position
*last_pos
,
484 struct kernel_long_ad
*last_ext
,
485 loff_t new_block_bytes
)
488 int count
= 0, fake
= !(last_ext
->extLength
& UDF_EXTENT_LENGTH_MASK
);
489 struct super_block
*sb
= inode
->i_sb
;
490 struct kernel_lb_addr prealloc_loc
= {};
491 uint32_t prealloc_len
= 0;
492 struct udf_inode_info
*iinfo
;
495 /* The previous extent is fake and we should not extend by anything
496 * - there's nothing to do... */
497 if (!new_block_bytes
&& fake
)
500 iinfo
= UDF_I(inode
);
501 /* Round the last extent up to a multiple of block size */
502 if (last_ext
->extLength
& (sb
->s_blocksize
- 1)) {
503 last_ext
->extLength
=
504 (last_ext
->extLength
& UDF_EXTENT_FLAG_MASK
) |
505 (((last_ext
->extLength
& UDF_EXTENT_LENGTH_MASK
) +
506 sb
->s_blocksize
- 1) & ~(sb
->s_blocksize
- 1));
507 iinfo
->i_lenExtents
=
508 (iinfo
->i_lenExtents
+ sb
->s_blocksize
- 1) &
509 ~(sb
->s_blocksize
- 1);
512 /* Last extent are just preallocated blocks? */
513 if ((last_ext
->extLength
& UDF_EXTENT_FLAG_MASK
) ==
514 EXT_NOT_RECORDED_ALLOCATED
) {
515 /* Save the extent so that we can reattach it to the end */
516 prealloc_loc
= last_ext
->extLocation
;
517 prealloc_len
= last_ext
->extLength
;
518 /* Mark the extent as a hole */
519 last_ext
->extLength
= EXT_NOT_RECORDED_NOT_ALLOCATED
|
520 (last_ext
->extLength
& UDF_EXTENT_LENGTH_MASK
);
521 last_ext
->extLocation
.logicalBlockNum
= 0;
522 last_ext
->extLocation
.partitionReferenceNum
= 0;
525 /* Can we merge with the previous extent? */
526 if ((last_ext
->extLength
& UDF_EXTENT_FLAG_MASK
) ==
527 EXT_NOT_RECORDED_NOT_ALLOCATED
) {
528 add
= (1 << 30) - sb
->s_blocksize
-
529 (last_ext
->extLength
& UDF_EXTENT_LENGTH_MASK
);
530 if (add
> new_block_bytes
)
531 add
= new_block_bytes
;
532 new_block_bytes
-= add
;
533 last_ext
->extLength
+= add
;
537 udf_add_aext(inode
, last_pos
, &last_ext
->extLocation
,
538 last_ext
->extLength
, 1);
541 struct kernel_lb_addr tmploc
;
544 udf_write_aext(inode
, last_pos
, &last_ext
->extLocation
,
545 last_ext
->extLength
, 1);
547 * We've rewritten the last extent but there may be empty
548 * indirect extent after it - enter it.
550 udf_next_aext(inode
, last_pos
, &tmploc
, &tmplen
, 0);
553 /* Managed to do everything necessary? */
554 if (!new_block_bytes
)
557 /* All further extents will be NOT_RECORDED_NOT_ALLOCATED */
558 last_ext
->extLocation
.logicalBlockNum
= 0;
559 last_ext
->extLocation
.partitionReferenceNum
= 0;
560 add
= (1 << 30) - sb
->s_blocksize
;
561 last_ext
->extLength
= EXT_NOT_RECORDED_NOT_ALLOCATED
| add
;
563 /* Create enough extents to cover the whole hole */
564 while (new_block_bytes
> add
) {
565 new_block_bytes
-= add
;
566 err
= udf_add_aext(inode
, last_pos
, &last_ext
->extLocation
,
567 last_ext
->extLength
, 1);
572 if (new_block_bytes
) {
573 last_ext
->extLength
= EXT_NOT_RECORDED_NOT_ALLOCATED
|
575 err
= udf_add_aext(inode
, last_pos
, &last_ext
->extLocation
,
576 last_ext
->extLength
, 1);
583 /* Do we have some preallocated blocks saved? */
585 err
= udf_add_aext(inode
, last_pos
, &prealloc_loc
,
589 last_ext
->extLocation
= prealloc_loc
;
590 last_ext
->extLength
= prealloc_len
;
594 /* last_pos should point to the last written extent... */
595 if (iinfo
->i_alloc_type
== ICBTAG_FLAG_AD_SHORT
)
596 last_pos
->offset
-= sizeof(struct short_ad
);
597 else if (iinfo
->i_alloc_type
== ICBTAG_FLAG_AD_LONG
)
598 last_pos
->offset
-= sizeof(struct long_ad
);
605 /* Extend the final block of the file to final_block_len bytes */
606 static void udf_do_extend_final_block(struct inode
*inode
,
607 struct extent_position
*last_pos
,
608 struct kernel_long_ad
*last_ext
,
609 uint32_t final_block_len
)
611 struct super_block
*sb
= inode
->i_sb
;
612 uint32_t added_bytes
;
614 added_bytes
= final_block_len
-
615 (last_ext
->extLength
& (sb
->s_blocksize
- 1));
616 last_ext
->extLength
+= added_bytes
;
617 UDF_I(inode
)->i_lenExtents
+= added_bytes
;
619 udf_write_aext(inode
, last_pos
, &last_ext
->extLocation
,
620 last_ext
->extLength
, 1);
623 static int udf_extend_file(struct inode
*inode
, loff_t newsize
)
626 struct extent_position epos
;
627 struct kernel_lb_addr eloc
;
630 struct super_block
*sb
= inode
->i_sb
;
631 sector_t first_block
= newsize
>> sb
->s_blocksize_bits
, offset
;
632 unsigned long partial_final_block
;
634 struct udf_inode_info
*iinfo
= UDF_I(inode
);
635 struct kernel_long_ad extent
;
637 int within_final_block
;
639 if (iinfo
->i_alloc_type
== ICBTAG_FLAG_AD_SHORT
)
640 adsize
= sizeof(struct short_ad
);
641 else if (iinfo
->i_alloc_type
== ICBTAG_FLAG_AD_LONG
)
642 adsize
= sizeof(struct long_ad
);
646 etype
= inode_bmap(inode
, first_block
, &epos
, &eloc
, &elen
, &offset
);
647 within_final_block
= (etype
!= -1);
649 if ((!epos
.bh
&& epos
.offset
== udf_file_entry_alloc_offset(inode
)) ||
650 (epos
.bh
&& epos
.offset
== sizeof(struct allocExtDesc
))) {
651 /* File has no extents at all or has empty last
652 * indirect extent! Create a fake extent... */
653 extent
.extLocation
.logicalBlockNum
= 0;
654 extent
.extLocation
.partitionReferenceNum
= 0;
655 extent
.extLength
= EXT_NOT_RECORDED_NOT_ALLOCATED
;
657 epos
.offset
-= adsize
;
658 etype
= udf_next_aext(inode
, &epos
, &extent
.extLocation
,
659 &extent
.extLength
, 0);
660 extent
.extLength
|= etype
<< 30;
663 partial_final_block
= newsize
& (sb
->s_blocksize
- 1);
665 /* File has extent covering the new size (could happen when extending
668 if (within_final_block
) {
669 /* Extending file within the last file block */
670 udf_do_extend_final_block(inode
, &epos
, &extent
,
671 partial_final_block
);
673 loff_t add
= ((loff_t
)offset
<< sb
->s_blocksize_bits
) |
675 err
= udf_do_extend_file(inode
, &epos
, &extent
, add
);
681 iinfo
->i_lenExtents
= newsize
;
687 static sector_t
inode_getblk(struct inode
*inode
, sector_t block
,
690 struct kernel_long_ad laarr
[EXTENT_MERGE_SIZE
];
691 struct extent_position prev_epos
, cur_epos
, next_epos
;
692 int count
= 0, startnum
= 0, endnum
= 0;
693 uint32_t elen
= 0, tmpelen
;
694 struct kernel_lb_addr eloc
, tmpeloc
;
696 loff_t lbcount
= 0, b_off
= 0;
697 udf_pblk_t newblocknum
, newblock
;
700 struct udf_inode_info
*iinfo
= UDF_I(inode
);
701 udf_pblk_t goal
= 0, pgoal
= iinfo
->i_location
.logicalBlockNum
;
707 prev_epos
.offset
= udf_file_entry_alloc_offset(inode
);
708 prev_epos
.block
= iinfo
->i_location
;
710 cur_epos
= next_epos
= prev_epos
;
711 b_off
= (loff_t
)block
<< inode
->i_sb
->s_blocksize_bits
;
713 /* find the extent which contains the block we are looking for.
714 alternate between laarr[0] and laarr[1] for locations of the
715 current extent, and the previous extent */
717 if (prev_epos
.bh
!= cur_epos
.bh
) {
718 brelse(prev_epos
.bh
);
720 prev_epos
.bh
= cur_epos
.bh
;
722 if (cur_epos
.bh
!= next_epos
.bh
) {
724 get_bh(next_epos
.bh
);
725 cur_epos
.bh
= next_epos
.bh
;
730 prev_epos
.block
= cur_epos
.block
;
731 cur_epos
.block
= next_epos
.block
;
733 prev_epos
.offset
= cur_epos
.offset
;
734 cur_epos
.offset
= next_epos
.offset
;
736 etype
= udf_next_aext(inode
, &next_epos
, &eloc
, &elen
, 1);
742 laarr
[c
].extLength
= (etype
<< 30) | elen
;
743 laarr
[c
].extLocation
= eloc
;
745 if (etype
!= (EXT_NOT_RECORDED_NOT_ALLOCATED
>> 30))
746 pgoal
= eloc
.logicalBlockNum
+
747 ((elen
+ inode
->i_sb
->s_blocksize
- 1) >>
748 inode
->i_sb
->s_blocksize_bits
);
751 } while (lbcount
+ elen
<= b_off
);
754 offset
= b_off
>> inode
->i_sb
->s_blocksize_bits
;
756 * Move prev_epos and cur_epos into indirect extent if we are at
759 udf_next_aext(inode
, &prev_epos
, &tmpeloc
, &tmpelen
, 0);
760 udf_next_aext(inode
, &cur_epos
, &tmpeloc
, &tmpelen
, 0);
762 /* if the extent is allocated and recorded, return the block
763 if the extent is not a multiple of the blocksize, round up */
765 if (etype
== (EXT_RECORDED_ALLOCATED
>> 30)) {
766 if (elen
& (inode
->i_sb
->s_blocksize
- 1)) {
767 elen
= EXT_RECORDED_ALLOCATED
|
768 ((elen
+ inode
->i_sb
->s_blocksize
- 1) &
769 ~(inode
->i_sb
->s_blocksize
- 1));
770 udf_write_aext(inode
, &cur_epos
, &eloc
, elen
, 1);
772 newblock
= udf_get_lb_pblock(inode
->i_sb
, &eloc
, offset
);
776 /* Are we beyond EOF? */
786 /* Create a fake extent when there's not one */
787 memset(&laarr
[0].extLocation
, 0x00,
788 sizeof(struct kernel_lb_addr
));
789 laarr
[0].extLength
= EXT_NOT_RECORDED_NOT_ALLOCATED
;
790 /* Will udf_do_extend_file() create real extent from
792 startnum
= (offset
> 0);
794 /* Create extents for the hole between EOF and offset */
795 hole_len
= (loff_t
)offset
<< inode
->i_blkbits
;
796 ret
= udf_do_extend_file(inode
, &prev_epos
, laarr
, hole_len
);
805 /* We are not covered by a preallocated extent? */
806 if ((laarr
[0].extLength
& UDF_EXTENT_FLAG_MASK
) !=
807 EXT_NOT_RECORDED_ALLOCATED
) {
808 /* Is there any real extent? - otherwise we overwrite
812 laarr
[c
].extLength
= EXT_NOT_RECORDED_NOT_ALLOCATED
|
813 inode
->i_sb
->s_blocksize
;
814 memset(&laarr
[c
].extLocation
, 0x00,
815 sizeof(struct kernel_lb_addr
));
822 endnum
= startnum
= ((count
> 2) ? 2 : count
);
824 /* if the current extent is in position 0,
825 swap it with the previous */
826 if (!c
&& count
!= 1) {
833 /* if the current block is located in an extent,
834 read the next extent */
835 etype
= udf_next_aext(inode
, &next_epos
, &eloc
, &elen
, 0);
837 laarr
[c
+ 1].extLength
= (etype
<< 30) | elen
;
838 laarr
[c
+ 1].extLocation
= eloc
;
846 /* if the current extent is not recorded but allocated, get the
847 * block in the extent corresponding to the requested block */
848 if ((laarr
[c
].extLength
>> 30) == (EXT_NOT_RECORDED_ALLOCATED
>> 30))
849 newblocknum
= laarr
[c
].extLocation
.logicalBlockNum
+ offset
;
850 else { /* otherwise, allocate a new block */
851 if (iinfo
->i_next_alloc_block
== block
)
852 goal
= iinfo
->i_next_alloc_goal
;
855 if (!(goal
= pgoal
)) /* XXX: what was intended here? */
856 goal
= iinfo
->i_location
.logicalBlockNum
+ 1;
859 newblocknum
= udf_new_block(inode
->i_sb
, inode
,
860 iinfo
->i_location
.partitionReferenceNum
,
868 iinfo
->i_lenExtents
+= inode
->i_sb
->s_blocksize
;
871 /* if the extent the requsted block is located in contains multiple
872 * blocks, split the extent into at most three extents. blocks prior
873 * to requested block, requested block, and blocks after requested
875 udf_split_extents(inode
, &c
, offset
, newblocknum
, laarr
, &endnum
);
877 /* We preallocate blocks only for regular files. It also makes sense
878 * for directories but there's a problem when to drop the
879 * preallocation. We might use some delayed work for that but I feel
880 * it's overengineering for a filesystem like UDF. */
881 if (S_ISREG(inode
->i_mode
))
882 udf_prealloc_extents(inode
, c
, lastblock
, laarr
, &endnum
);
884 /* merge any continuous blocks in laarr */
885 udf_merge_extents(inode
, laarr
, &endnum
);
887 /* write back the new extents, inserting new extents if the new number
888 * of extents is greater than the old number, and deleting extents if
889 * the new number of extents is less than the old number */
890 udf_update_extents(inode
, laarr
, startnum
, endnum
, &prev_epos
);
892 newblock
= udf_get_pblock(inode
->i_sb
, newblocknum
,
893 iinfo
->i_location
.partitionReferenceNum
, 0);
899 iinfo
->i_next_alloc_block
= block
;
900 iinfo
->i_next_alloc_goal
= newblocknum
;
901 inode
->i_ctime
= current_time(inode
);
904 udf_sync_inode(inode
);
906 mark_inode_dirty(inode
);
908 brelse(prev_epos
.bh
);
910 brelse(next_epos
.bh
);
914 static void udf_split_extents(struct inode
*inode
, int *c
, int offset
,
915 udf_pblk_t newblocknum
,
916 struct kernel_long_ad
*laarr
, int *endnum
)
918 unsigned long blocksize
= inode
->i_sb
->s_blocksize
;
919 unsigned char blocksize_bits
= inode
->i_sb
->s_blocksize_bits
;
921 if ((laarr
[*c
].extLength
>> 30) == (EXT_NOT_RECORDED_ALLOCATED
>> 30) ||
922 (laarr
[*c
].extLength
>> 30) ==
923 (EXT_NOT_RECORDED_NOT_ALLOCATED
>> 30)) {
925 int blen
= ((laarr
[curr
].extLength
& UDF_EXTENT_LENGTH_MASK
) +
926 blocksize
- 1) >> blocksize_bits
;
927 int8_t etype
= (laarr
[curr
].extLength
>> 30);
931 else if (!offset
|| blen
== offset
+ 1) {
932 laarr
[curr
+ 2] = laarr
[curr
+ 1];
933 laarr
[curr
+ 1] = laarr
[curr
];
935 laarr
[curr
+ 3] = laarr
[curr
+ 1];
936 laarr
[curr
+ 2] = laarr
[curr
+ 1] = laarr
[curr
];
940 if (etype
== (EXT_NOT_RECORDED_ALLOCATED
>> 30)) {
941 udf_free_blocks(inode
->i_sb
, inode
,
942 &laarr
[curr
].extLocation
,
944 laarr
[curr
].extLength
=
945 EXT_NOT_RECORDED_NOT_ALLOCATED
|
946 (offset
<< blocksize_bits
);
947 laarr
[curr
].extLocation
.logicalBlockNum
= 0;
948 laarr
[curr
].extLocation
.
949 partitionReferenceNum
= 0;
951 laarr
[curr
].extLength
= (etype
<< 30) |
952 (offset
<< blocksize_bits
);
958 laarr
[curr
].extLocation
.logicalBlockNum
= newblocknum
;
959 if (etype
== (EXT_NOT_RECORDED_NOT_ALLOCATED
>> 30))
960 laarr
[curr
].extLocation
.partitionReferenceNum
=
961 UDF_I(inode
)->i_location
.partitionReferenceNum
;
962 laarr
[curr
].extLength
= EXT_RECORDED_ALLOCATED
|
966 if (blen
!= offset
+ 1) {
967 if (etype
== (EXT_NOT_RECORDED_ALLOCATED
>> 30))
968 laarr
[curr
].extLocation
.logicalBlockNum
+=
970 laarr
[curr
].extLength
= (etype
<< 30) |
971 ((blen
- (offset
+ 1)) << blocksize_bits
);
978 static void udf_prealloc_extents(struct inode
*inode
, int c
, int lastblock
,
979 struct kernel_long_ad
*laarr
,
982 int start
, length
= 0, currlength
= 0, i
;
984 if (*endnum
>= (c
+ 1)) {
990 if ((laarr
[c
+ 1].extLength
>> 30) ==
991 (EXT_NOT_RECORDED_ALLOCATED
>> 30)) {
993 length
= currlength
=
994 (((laarr
[c
+ 1].extLength
&
995 UDF_EXTENT_LENGTH_MASK
) +
996 inode
->i_sb
->s_blocksize
- 1) >>
997 inode
->i_sb
->s_blocksize_bits
);
1002 for (i
= start
+ 1; i
<= *endnum
; i
++) {
1005 length
+= UDF_DEFAULT_PREALLOC_BLOCKS
;
1006 } else if ((laarr
[i
].extLength
>> 30) ==
1007 (EXT_NOT_RECORDED_NOT_ALLOCATED
>> 30)) {
1008 length
+= (((laarr
[i
].extLength
&
1009 UDF_EXTENT_LENGTH_MASK
) +
1010 inode
->i_sb
->s_blocksize
- 1) >>
1011 inode
->i_sb
->s_blocksize_bits
);
1017 int next
= laarr
[start
].extLocation
.logicalBlockNum
+
1018 (((laarr
[start
].extLength
& UDF_EXTENT_LENGTH_MASK
) +
1019 inode
->i_sb
->s_blocksize
- 1) >>
1020 inode
->i_sb
->s_blocksize_bits
);
1021 int numalloc
= udf_prealloc_blocks(inode
->i_sb
, inode
,
1022 laarr
[start
].extLocation
.partitionReferenceNum
,
1023 next
, (UDF_DEFAULT_PREALLOC_BLOCKS
> length
?
1024 length
: UDF_DEFAULT_PREALLOC_BLOCKS
) -
1027 if (start
== (c
+ 1))
1028 laarr
[start
].extLength
+=
1030 inode
->i_sb
->s_blocksize_bits
);
1032 memmove(&laarr
[c
+ 2], &laarr
[c
+ 1],
1033 sizeof(struct long_ad
) * (*endnum
- (c
+ 1)));
1035 laarr
[c
+ 1].extLocation
.logicalBlockNum
= next
;
1036 laarr
[c
+ 1].extLocation
.partitionReferenceNum
=
1037 laarr
[c
].extLocation
.
1038 partitionReferenceNum
;
1039 laarr
[c
+ 1].extLength
=
1040 EXT_NOT_RECORDED_ALLOCATED
|
1042 inode
->i_sb
->s_blocksize_bits
);
1046 for (i
= start
+ 1; numalloc
&& i
< *endnum
; i
++) {
1047 int elen
= ((laarr
[i
].extLength
&
1048 UDF_EXTENT_LENGTH_MASK
) +
1049 inode
->i_sb
->s_blocksize
- 1) >>
1050 inode
->i_sb
->s_blocksize_bits
;
1052 if (elen
> numalloc
) {
1053 laarr
[i
].extLength
-=
1055 inode
->i_sb
->s_blocksize_bits
);
1059 if (*endnum
> (i
+ 1))
1062 sizeof(struct long_ad
) *
1063 (*endnum
- (i
+ 1)));
1068 UDF_I(inode
)->i_lenExtents
+=
1069 numalloc
<< inode
->i_sb
->s_blocksize_bits
;
1074 static void udf_merge_extents(struct inode
*inode
, struct kernel_long_ad
*laarr
,
1078 unsigned long blocksize
= inode
->i_sb
->s_blocksize
;
1079 unsigned char blocksize_bits
= inode
->i_sb
->s_blocksize_bits
;
1081 for (i
= 0; i
< (*endnum
- 1); i
++) {
1082 struct kernel_long_ad
*li
/*l[i]*/ = &laarr
[i
];
1083 struct kernel_long_ad
*lip1
/*l[i plus 1]*/ = &laarr
[i
+ 1];
1085 if (((li
->extLength
>> 30) == (lip1
->extLength
>> 30)) &&
1086 (((li
->extLength
>> 30) ==
1087 (EXT_NOT_RECORDED_NOT_ALLOCATED
>> 30)) ||
1088 ((lip1
->extLocation
.logicalBlockNum
-
1089 li
->extLocation
.logicalBlockNum
) ==
1090 (((li
->extLength
& UDF_EXTENT_LENGTH_MASK
) +
1091 blocksize
- 1) >> blocksize_bits
)))) {
1093 if (((li
->extLength
& UDF_EXTENT_LENGTH_MASK
) +
1094 (lip1
->extLength
& UDF_EXTENT_LENGTH_MASK
) +
1095 blocksize
- 1) & ~UDF_EXTENT_LENGTH_MASK
) {
1096 lip1
->extLength
= (lip1
->extLength
-
1098 UDF_EXTENT_LENGTH_MASK
) +
1099 UDF_EXTENT_LENGTH_MASK
) &
1101 li
->extLength
= (li
->extLength
&
1102 UDF_EXTENT_FLAG_MASK
) +
1103 (UDF_EXTENT_LENGTH_MASK
+ 1) -
1105 lip1
->extLocation
.logicalBlockNum
=
1106 li
->extLocation
.logicalBlockNum
+
1108 UDF_EXTENT_LENGTH_MASK
) >>
1111 li
->extLength
= lip1
->extLength
+
1113 UDF_EXTENT_LENGTH_MASK
) +
1114 blocksize
- 1) & ~(blocksize
- 1));
1115 if (*endnum
> (i
+ 2))
1116 memmove(&laarr
[i
+ 1], &laarr
[i
+ 2],
1117 sizeof(struct long_ad
) *
1118 (*endnum
- (i
+ 2)));
1122 } else if (((li
->extLength
>> 30) ==
1123 (EXT_NOT_RECORDED_ALLOCATED
>> 30)) &&
1124 ((lip1
->extLength
>> 30) ==
1125 (EXT_NOT_RECORDED_NOT_ALLOCATED
>> 30))) {
1126 udf_free_blocks(inode
->i_sb
, inode
, &li
->extLocation
, 0,
1128 UDF_EXTENT_LENGTH_MASK
) +
1129 blocksize
- 1) >> blocksize_bits
);
1130 li
->extLocation
.logicalBlockNum
= 0;
1131 li
->extLocation
.partitionReferenceNum
= 0;
1133 if (((li
->extLength
& UDF_EXTENT_LENGTH_MASK
) +
1134 (lip1
->extLength
& UDF_EXTENT_LENGTH_MASK
) +
1135 blocksize
- 1) & ~UDF_EXTENT_LENGTH_MASK
) {
1136 lip1
->extLength
= (lip1
->extLength
-
1138 UDF_EXTENT_LENGTH_MASK
) +
1139 UDF_EXTENT_LENGTH_MASK
) &
1141 li
->extLength
= (li
->extLength
&
1142 UDF_EXTENT_FLAG_MASK
) +
1143 (UDF_EXTENT_LENGTH_MASK
+ 1) -
1146 li
->extLength
= lip1
->extLength
+
1148 UDF_EXTENT_LENGTH_MASK
) +
1149 blocksize
- 1) & ~(blocksize
- 1));
1150 if (*endnum
> (i
+ 2))
1151 memmove(&laarr
[i
+ 1], &laarr
[i
+ 2],
1152 sizeof(struct long_ad
) *
1153 (*endnum
- (i
+ 2)));
1157 } else if ((li
->extLength
>> 30) ==
1158 (EXT_NOT_RECORDED_ALLOCATED
>> 30)) {
1159 udf_free_blocks(inode
->i_sb
, inode
,
1160 &li
->extLocation
, 0,
1162 UDF_EXTENT_LENGTH_MASK
) +
1163 blocksize
- 1) >> blocksize_bits
);
1164 li
->extLocation
.logicalBlockNum
= 0;
1165 li
->extLocation
.partitionReferenceNum
= 0;
1166 li
->extLength
= (li
->extLength
&
1167 UDF_EXTENT_LENGTH_MASK
) |
1168 EXT_NOT_RECORDED_NOT_ALLOCATED
;
1173 static void udf_update_extents(struct inode
*inode
, struct kernel_long_ad
*laarr
,
1174 int startnum
, int endnum
,
1175 struct extent_position
*epos
)
1178 struct kernel_lb_addr tmploc
;
1181 if (startnum
> endnum
) {
1182 for (i
= 0; i
< (startnum
- endnum
); i
++)
1183 udf_delete_aext(inode
, *epos
);
1184 } else if (startnum
< endnum
) {
1185 for (i
= 0; i
< (endnum
- startnum
); i
++) {
1186 udf_insert_aext(inode
, *epos
, laarr
[i
].extLocation
,
1187 laarr
[i
].extLength
);
1188 udf_next_aext(inode
, epos
, &laarr
[i
].extLocation
,
1189 &laarr
[i
].extLength
, 1);
1194 for (i
= start
; i
< endnum
; i
++) {
1195 udf_next_aext(inode
, epos
, &tmploc
, &tmplen
, 0);
1196 udf_write_aext(inode
, epos
, &laarr
[i
].extLocation
,
1197 laarr
[i
].extLength
, 1);
1201 struct buffer_head
*udf_bread(struct inode
*inode
, udf_pblk_t block
,
1202 int create
, int *err
)
1204 struct buffer_head
*bh
= NULL
;
1206 bh
= udf_getblk(inode
, block
, create
, err
);
1210 if (buffer_uptodate(bh
))
1213 ll_rw_block(REQ_OP_READ
, 0, 1, &bh
);
1216 if (buffer_uptodate(bh
))
1224 int udf_setsize(struct inode
*inode
, loff_t newsize
)
1227 struct udf_inode_info
*iinfo
;
1228 unsigned int bsize
= i_blocksize(inode
);
1230 if (!(S_ISREG(inode
->i_mode
) || S_ISDIR(inode
->i_mode
) ||
1231 S_ISLNK(inode
->i_mode
)))
1233 if (IS_APPEND(inode
) || IS_IMMUTABLE(inode
))
1236 iinfo
= UDF_I(inode
);
1237 if (newsize
> inode
->i_size
) {
1238 down_write(&iinfo
->i_data_sem
);
1239 if (iinfo
->i_alloc_type
== ICBTAG_FLAG_AD_IN_ICB
) {
1241 (udf_file_entry_alloc_offset(inode
) + newsize
)) {
1242 err
= udf_expand_file_adinicb(inode
);
1245 down_write(&iinfo
->i_data_sem
);
1247 iinfo
->i_lenAlloc
= newsize
;
1251 err
= udf_extend_file(inode
, newsize
);
1253 up_write(&iinfo
->i_data_sem
);
1257 up_write(&iinfo
->i_data_sem
);
1258 truncate_setsize(inode
, newsize
);
1260 if (iinfo
->i_alloc_type
== ICBTAG_FLAG_AD_IN_ICB
) {
1261 down_write(&iinfo
->i_data_sem
);
1262 udf_clear_extent_cache(inode
);
1263 memset(iinfo
->i_ext
.i_data
+ iinfo
->i_lenEAttr
+ newsize
,
1264 0x00, bsize
- newsize
-
1265 udf_file_entry_alloc_offset(inode
));
1266 iinfo
->i_lenAlloc
= newsize
;
1267 truncate_setsize(inode
, newsize
);
1268 up_write(&iinfo
->i_data_sem
);
1271 err
= block_truncate_page(inode
->i_mapping
, newsize
,
1275 truncate_setsize(inode
, newsize
);
1276 down_write(&iinfo
->i_data_sem
);
1277 udf_clear_extent_cache(inode
);
1278 err
= udf_truncate_extents(inode
);
1279 up_write(&iinfo
->i_data_sem
);
1284 inode
->i_mtime
= inode
->i_ctime
= current_time(inode
);
1286 udf_sync_inode(inode
);
1288 mark_inode_dirty(inode
);
1293 * Maximum length of linked list formed by ICB hierarchy. The chosen number is
1294 * arbitrary - just that we hopefully don't limit any real use of rewritten
1295 * inode on write-once media but avoid looping for too long on corrupted media.
1297 #define UDF_MAX_ICB_NESTING 1024
1299 static int udf_read_inode(struct inode
*inode
, bool hidden_inode
)
1301 struct buffer_head
*bh
= NULL
;
1302 struct fileEntry
*fe
;
1303 struct extendedFileEntry
*efe
;
1305 struct udf_inode_info
*iinfo
= UDF_I(inode
);
1306 struct udf_sb_info
*sbi
= UDF_SB(inode
->i_sb
);
1307 struct kernel_lb_addr
*iloc
= &iinfo
->i_location
;
1308 unsigned int link_count
;
1309 unsigned int indirections
= 0;
1310 int bs
= inode
->i_sb
->s_blocksize
;
1315 if (iloc
->partitionReferenceNum
>= sbi
->s_partitions
) {
1316 udf_debug("partition reference: %u > logical volume partitions: %u\n",
1317 iloc
->partitionReferenceNum
, sbi
->s_partitions
);
1321 if (iloc
->logicalBlockNum
>=
1322 sbi
->s_partmaps
[iloc
->partitionReferenceNum
].s_partition_len
) {
1323 udf_debug("block=%u, partition=%u out of range\n",
1324 iloc
->logicalBlockNum
, iloc
->partitionReferenceNum
);
1329 * Set defaults, but the inode is still incomplete!
1330 * Note: get_new_inode() sets the following on a new inode:
1333 * i_flags = sb->s_flags
1335 * clean_inode(): zero fills and sets
1340 bh
= udf_read_ptagged(inode
->i_sb
, iloc
, 0, &ident
);
1342 udf_err(inode
->i_sb
, "(ino %lu) failed !bh\n", inode
->i_ino
);
1346 if (ident
!= TAG_IDENT_FE
&& ident
!= TAG_IDENT_EFE
&&
1347 ident
!= TAG_IDENT_USE
) {
1348 udf_err(inode
->i_sb
, "(ino %lu) failed ident=%u\n",
1349 inode
->i_ino
, ident
);
1353 fe
= (struct fileEntry
*)bh
->b_data
;
1354 efe
= (struct extendedFileEntry
*)bh
->b_data
;
1356 if (fe
->icbTag
.strategyType
== cpu_to_le16(4096)) {
1357 struct buffer_head
*ibh
;
1359 ibh
= udf_read_ptagged(inode
->i_sb
, iloc
, 1, &ident
);
1360 if (ident
== TAG_IDENT_IE
&& ibh
) {
1361 struct kernel_lb_addr loc
;
1362 struct indirectEntry
*ie
;
1364 ie
= (struct indirectEntry
*)ibh
->b_data
;
1365 loc
= lelb_to_cpu(ie
->indirectICB
.extLocation
);
1367 if (ie
->indirectICB
.extLength
) {
1369 memcpy(&iinfo
->i_location
, &loc
,
1370 sizeof(struct kernel_lb_addr
));
1371 if (++indirections
> UDF_MAX_ICB_NESTING
) {
1372 udf_err(inode
->i_sb
,
1373 "too many ICBs in ICB hierarchy"
1374 " (max %d supported)\n",
1375 UDF_MAX_ICB_NESTING
);
1383 } else if (fe
->icbTag
.strategyType
!= cpu_to_le16(4)) {
1384 udf_err(inode
->i_sb
, "unsupported strategy type: %u\n",
1385 le16_to_cpu(fe
->icbTag
.strategyType
));
1388 if (fe
->icbTag
.strategyType
== cpu_to_le16(4))
1389 iinfo
->i_strat4096
= 0;
1390 else /* if (fe->icbTag.strategyType == cpu_to_le16(4096)) */
1391 iinfo
->i_strat4096
= 1;
1393 iinfo
->i_alloc_type
= le16_to_cpu(fe
->icbTag
.flags
) &
1394 ICBTAG_FLAG_AD_MASK
;
1395 if (iinfo
->i_alloc_type
!= ICBTAG_FLAG_AD_SHORT
&&
1396 iinfo
->i_alloc_type
!= ICBTAG_FLAG_AD_LONG
&&
1397 iinfo
->i_alloc_type
!= ICBTAG_FLAG_AD_IN_ICB
) {
1401 iinfo
->i_unique
= 0;
1402 iinfo
->i_lenEAttr
= 0;
1403 iinfo
->i_lenExtents
= 0;
1404 iinfo
->i_lenAlloc
= 0;
1405 iinfo
->i_next_alloc_block
= 0;
1406 iinfo
->i_next_alloc_goal
= 0;
1407 if (fe
->descTag
.tagIdent
== cpu_to_le16(TAG_IDENT_EFE
)) {
1410 ret
= udf_alloc_i_data(inode
, bs
-
1411 sizeof(struct extendedFileEntry
));
1414 memcpy(iinfo
->i_ext
.i_data
,
1415 bh
->b_data
+ sizeof(struct extendedFileEntry
),
1416 bs
- sizeof(struct extendedFileEntry
));
1417 } else if (fe
->descTag
.tagIdent
== cpu_to_le16(TAG_IDENT_FE
)) {
1420 ret
= udf_alloc_i_data(inode
, bs
- sizeof(struct fileEntry
));
1423 memcpy(iinfo
->i_ext
.i_data
,
1424 bh
->b_data
+ sizeof(struct fileEntry
),
1425 bs
- sizeof(struct fileEntry
));
1426 } else if (fe
->descTag
.tagIdent
== cpu_to_le16(TAG_IDENT_USE
)) {
1429 iinfo
->i_lenAlloc
= le32_to_cpu(
1430 ((struct unallocSpaceEntry
*)bh
->b_data
)->
1432 ret
= udf_alloc_i_data(inode
, bs
-
1433 sizeof(struct unallocSpaceEntry
));
1436 memcpy(iinfo
->i_ext
.i_data
,
1437 bh
->b_data
+ sizeof(struct unallocSpaceEntry
),
1438 bs
- sizeof(struct unallocSpaceEntry
));
1443 read_lock(&sbi
->s_cred_lock
);
1444 uid
= le32_to_cpu(fe
->uid
);
1445 if (uid
== UDF_INVALID_ID
||
1446 UDF_QUERY_FLAG(inode
->i_sb
, UDF_FLAG_UID_SET
))
1447 inode
->i_uid
= sbi
->s_uid
;
1449 i_uid_write(inode
, uid
);
1451 gid
= le32_to_cpu(fe
->gid
);
1452 if (gid
== UDF_INVALID_ID
||
1453 UDF_QUERY_FLAG(inode
->i_sb
, UDF_FLAG_GID_SET
))
1454 inode
->i_gid
= sbi
->s_gid
;
1456 i_gid_write(inode
, gid
);
1458 if (fe
->icbTag
.fileType
!= ICBTAG_FILE_TYPE_DIRECTORY
&&
1459 sbi
->s_fmode
!= UDF_INVALID_MODE
)
1460 inode
->i_mode
= sbi
->s_fmode
;
1461 else if (fe
->icbTag
.fileType
== ICBTAG_FILE_TYPE_DIRECTORY
&&
1462 sbi
->s_dmode
!= UDF_INVALID_MODE
)
1463 inode
->i_mode
= sbi
->s_dmode
;
1465 inode
->i_mode
= udf_convert_permissions(fe
);
1466 inode
->i_mode
&= ~sbi
->s_umask
;
1467 iinfo
->i_extraPerms
= le32_to_cpu(fe
->permissions
) & ~FE_MAPPED_PERMS
;
1469 read_unlock(&sbi
->s_cred_lock
);
1471 link_count
= le16_to_cpu(fe
->fileLinkCount
);
1473 if (!hidden_inode
) {
1479 set_nlink(inode
, link_count
);
1481 inode
->i_size
= le64_to_cpu(fe
->informationLength
);
1482 iinfo
->i_lenExtents
= inode
->i_size
;
1484 if (iinfo
->i_efe
== 0) {
1485 inode
->i_blocks
= le64_to_cpu(fe
->logicalBlocksRecorded
) <<
1486 (inode
->i_sb
->s_blocksize_bits
- 9);
1488 udf_disk_stamp_to_time(&inode
->i_atime
, fe
->accessTime
);
1489 udf_disk_stamp_to_time(&inode
->i_mtime
, fe
->modificationTime
);
1490 udf_disk_stamp_to_time(&inode
->i_ctime
, fe
->attrTime
);
1492 iinfo
->i_unique
= le64_to_cpu(fe
->uniqueID
);
1493 iinfo
->i_lenEAttr
= le32_to_cpu(fe
->lengthExtendedAttr
);
1494 iinfo
->i_lenAlloc
= le32_to_cpu(fe
->lengthAllocDescs
);
1495 iinfo
->i_checkpoint
= le32_to_cpu(fe
->checkpoint
);
1496 iinfo
->i_streamdir
= 0;
1497 iinfo
->i_lenStreams
= 0;
1499 inode
->i_blocks
= le64_to_cpu(efe
->logicalBlocksRecorded
) <<
1500 (inode
->i_sb
->s_blocksize_bits
- 9);
1502 udf_disk_stamp_to_time(&inode
->i_atime
, efe
->accessTime
);
1503 udf_disk_stamp_to_time(&inode
->i_mtime
, efe
->modificationTime
);
1504 udf_disk_stamp_to_time(&iinfo
->i_crtime
, efe
->createTime
);
1505 udf_disk_stamp_to_time(&inode
->i_ctime
, efe
->attrTime
);
1507 iinfo
->i_unique
= le64_to_cpu(efe
->uniqueID
);
1508 iinfo
->i_lenEAttr
= le32_to_cpu(efe
->lengthExtendedAttr
);
1509 iinfo
->i_lenAlloc
= le32_to_cpu(efe
->lengthAllocDescs
);
1510 iinfo
->i_checkpoint
= le32_to_cpu(efe
->checkpoint
);
1513 iinfo
->i_streamdir
= (efe
->streamDirectoryICB
.extLength
!= 0);
1514 iinfo
->i_locStreamdir
=
1515 lelb_to_cpu(efe
->streamDirectoryICB
.extLocation
);
1516 iinfo
->i_lenStreams
= le64_to_cpu(efe
->objectSize
);
1517 if (iinfo
->i_lenStreams
>= inode
->i_size
)
1518 iinfo
->i_lenStreams
-= inode
->i_size
;
1520 iinfo
->i_lenStreams
= 0;
1522 inode
->i_generation
= iinfo
->i_unique
;
1525 * Sanity check length of allocation descriptors and extended attrs to
1526 * avoid integer overflows
1528 if (iinfo
->i_lenEAttr
> bs
|| iinfo
->i_lenAlloc
> bs
)
1530 /* Now do exact checks */
1531 if (udf_file_entry_alloc_offset(inode
) + iinfo
->i_lenAlloc
> bs
)
1533 /* Sanity checks for files in ICB so that we don't get confused later */
1534 if (iinfo
->i_alloc_type
== ICBTAG_FLAG_AD_IN_ICB
) {
1536 * For file in ICB data is stored in allocation descriptor
1537 * so sizes should match
1539 if (iinfo
->i_lenAlloc
!= inode
->i_size
)
1541 /* File in ICB has to fit in there... */
1542 if (inode
->i_size
> bs
- udf_file_entry_alloc_offset(inode
))
1546 switch (fe
->icbTag
.fileType
) {
1547 case ICBTAG_FILE_TYPE_DIRECTORY
:
1548 inode
->i_op
= &udf_dir_inode_operations
;
1549 inode
->i_fop
= &udf_dir_operations
;
1550 inode
->i_mode
|= S_IFDIR
;
1553 case ICBTAG_FILE_TYPE_REALTIME
:
1554 case ICBTAG_FILE_TYPE_REGULAR
:
1555 case ICBTAG_FILE_TYPE_UNDEF
:
1556 case ICBTAG_FILE_TYPE_VAT20
:
1557 if (iinfo
->i_alloc_type
== ICBTAG_FLAG_AD_IN_ICB
)
1558 inode
->i_data
.a_ops
= &udf_adinicb_aops
;
1560 inode
->i_data
.a_ops
= &udf_aops
;
1561 inode
->i_op
= &udf_file_inode_operations
;
1562 inode
->i_fop
= &udf_file_operations
;
1563 inode
->i_mode
|= S_IFREG
;
1565 case ICBTAG_FILE_TYPE_BLOCK
:
1566 inode
->i_mode
|= S_IFBLK
;
1568 case ICBTAG_FILE_TYPE_CHAR
:
1569 inode
->i_mode
|= S_IFCHR
;
1571 case ICBTAG_FILE_TYPE_FIFO
:
1572 init_special_inode(inode
, inode
->i_mode
| S_IFIFO
, 0);
1574 case ICBTAG_FILE_TYPE_SOCKET
:
1575 init_special_inode(inode
, inode
->i_mode
| S_IFSOCK
, 0);
1577 case ICBTAG_FILE_TYPE_SYMLINK
:
1578 inode
->i_data
.a_ops
= &udf_symlink_aops
;
1579 inode
->i_op
= &udf_symlink_inode_operations
;
1580 inode_nohighmem(inode
);
1581 inode
->i_mode
= S_IFLNK
| 0777;
1583 case ICBTAG_FILE_TYPE_MAIN
:
1584 udf_debug("METADATA FILE-----\n");
1586 case ICBTAG_FILE_TYPE_MIRROR
:
1587 udf_debug("METADATA MIRROR FILE-----\n");
1589 case ICBTAG_FILE_TYPE_BITMAP
:
1590 udf_debug("METADATA BITMAP FILE-----\n");
1593 udf_err(inode
->i_sb
, "(ino %lu) failed unknown file type=%u\n",
1594 inode
->i_ino
, fe
->icbTag
.fileType
);
1597 if (S_ISCHR(inode
->i_mode
) || S_ISBLK(inode
->i_mode
)) {
1598 struct deviceSpec
*dsea
=
1599 (struct deviceSpec
*)udf_get_extendedattr(inode
, 12, 1);
1601 init_special_inode(inode
, inode
->i_mode
,
1602 MKDEV(le32_to_cpu(dsea
->majorDeviceIdent
),
1603 le32_to_cpu(dsea
->minorDeviceIdent
)));
1604 /* Developer ID ??? */
1614 static int udf_alloc_i_data(struct inode
*inode
, size_t size
)
1616 struct udf_inode_info
*iinfo
= UDF_I(inode
);
1617 iinfo
->i_ext
.i_data
= kmalloc(size
, GFP_KERNEL
);
1618 if (!iinfo
->i_ext
.i_data
)
1623 static umode_t
udf_convert_permissions(struct fileEntry
*fe
)
1626 uint32_t permissions
;
1629 permissions
= le32_to_cpu(fe
->permissions
);
1630 flags
= le16_to_cpu(fe
->icbTag
.flags
);
1632 mode
= ((permissions
) & 0007) |
1633 ((permissions
>> 2) & 0070) |
1634 ((permissions
>> 4) & 0700) |
1635 ((flags
& ICBTAG_FLAG_SETUID
) ? S_ISUID
: 0) |
1636 ((flags
& ICBTAG_FLAG_SETGID
) ? S_ISGID
: 0) |
1637 ((flags
& ICBTAG_FLAG_STICKY
) ? S_ISVTX
: 0);
1642 void udf_update_extra_perms(struct inode
*inode
, umode_t mode
)
1644 struct udf_inode_info
*iinfo
= UDF_I(inode
);
1647 * UDF 2.01 sec. 3.3.3.3 Note 2:
1648 * In Unix, delete permission tracks write
1650 iinfo
->i_extraPerms
&= ~FE_DELETE_PERMS
;
1652 iinfo
->i_extraPerms
|= FE_PERM_U_DELETE
;
1654 iinfo
->i_extraPerms
|= FE_PERM_G_DELETE
;
1656 iinfo
->i_extraPerms
|= FE_PERM_O_DELETE
;
1659 int udf_write_inode(struct inode
*inode
, struct writeback_control
*wbc
)
1661 return udf_update_inode(inode
, wbc
->sync_mode
== WB_SYNC_ALL
);
1664 static int udf_sync_inode(struct inode
*inode
)
1666 return udf_update_inode(inode
, 1);
1669 static void udf_adjust_time(struct udf_inode_info
*iinfo
, struct timespec64 time
)
1671 if (iinfo
->i_crtime
.tv_sec
> time
.tv_sec
||
1672 (iinfo
->i_crtime
.tv_sec
== time
.tv_sec
&&
1673 iinfo
->i_crtime
.tv_nsec
> time
.tv_nsec
))
1674 iinfo
->i_crtime
= time
;
1677 static int udf_update_inode(struct inode
*inode
, int do_sync
)
1679 struct buffer_head
*bh
= NULL
;
1680 struct fileEntry
*fe
;
1681 struct extendedFileEntry
*efe
;
1682 uint64_t lb_recorded
;
1687 struct udf_sb_info
*sbi
= UDF_SB(inode
->i_sb
);
1688 unsigned char blocksize_bits
= inode
->i_sb
->s_blocksize_bits
;
1689 struct udf_inode_info
*iinfo
= UDF_I(inode
);
1691 bh
= udf_tgetblk(inode
->i_sb
,
1692 udf_get_lb_pblock(inode
->i_sb
, &iinfo
->i_location
, 0));
1694 udf_debug("getblk failure\n");
1699 memset(bh
->b_data
, 0, inode
->i_sb
->s_blocksize
);
1700 fe
= (struct fileEntry
*)bh
->b_data
;
1701 efe
= (struct extendedFileEntry
*)bh
->b_data
;
1704 struct unallocSpaceEntry
*use
=
1705 (struct unallocSpaceEntry
*)bh
->b_data
;
1707 use
->lengthAllocDescs
= cpu_to_le32(iinfo
->i_lenAlloc
);
1708 memcpy(bh
->b_data
+ sizeof(struct unallocSpaceEntry
),
1709 iinfo
->i_ext
.i_data
, inode
->i_sb
->s_blocksize
-
1710 sizeof(struct unallocSpaceEntry
));
1711 use
->descTag
.tagIdent
= cpu_to_le16(TAG_IDENT_USE
);
1712 crclen
= sizeof(struct unallocSpaceEntry
);
1717 if (UDF_QUERY_FLAG(inode
->i_sb
, UDF_FLAG_UID_FORGET
))
1718 fe
->uid
= cpu_to_le32(UDF_INVALID_ID
);
1720 fe
->uid
= cpu_to_le32(i_uid_read(inode
));
1722 if (UDF_QUERY_FLAG(inode
->i_sb
, UDF_FLAG_GID_FORGET
))
1723 fe
->gid
= cpu_to_le32(UDF_INVALID_ID
);
1725 fe
->gid
= cpu_to_le32(i_gid_read(inode
));
1727 udfperms
= ((inode
->i_mode
& 0007)) |
1728 ((inode
->i_mode
& 0070) << 2) |
1729 ((inode
->i_mode
& 0700) << 4);
1731 udfperms
|= iinfo
->i_extraPerms
;
1732 fe
->permissions
= cpu_to_le32(udfperms
);
1734 if (S_ISDIR(inode
->i_mode
) && inode
->i_nlink
> 0)
1735 fe
->fileLinkCount
= cpu_to_le16(inode
->i_nlink
- 1);
1737 fe
->fileLinkCount
= cpu_to_le16(inode
->i_nlink
);
1739 fe
->informationLength
= cpu_to_le64(inode
->i_size
);
1741 if (S_ISCHR(inode
->i_mode
) || S_ISBLK(inode
->i_mode
)) {
1743 struct deviceSpec
*dsea
=
1744 (struct deviceSpec
*)udf_get_extendedattr(inode
, 12, 1);
1746 dsea
= (struct deviceSpec
*)
1747 udf_add_extendedattr(inode
,
1748 sizeof(struct deviceSpec
) +
1749 sizeof(struct regid
), 12, 0x3);
1750 dsea
->attrType
= cpu_to_le32(12);
1751 dsea
->attrSubtype
= 1;
1752 dsea
->attrLength
= cpu_to_le32(
1753 sizeof(struct deviceSpec
) +
1754 sizeof(struct regid
));
1755 dsea
->impUseLength
= cpu_to_le32(sizeof(struct regid
));
1757 eid
= (struct regid
*)dsea
->impUse
;
1758 memset(eid
, 0, sizeof(*eid
));
1759 strcpy(eid
->ident
, UDF_ID_DEVELOPER
);
1760 eid
->identSuffix
[0] = UDF_OS_CLASS_UNIX
;
1761 eid
->identSuffix
[1] = UDF_OS_ID_LINUX
;
1762 dsea
->majorDeviceIdent
= cpu_to_le32(imajor(inode
));
1763 dsea
->minorDeviceIdent
= cpu_to_le32(iminor(inode
));
1766 if (iinfo
->i_alloc_type
== ICBTAG_FLAG_AD_IN_ICB
)
1767 lb_recorded
= 0; /* No extents => no blocks! */
1770 (inode
->i_blocks
+ (1 << (blocksize_bits
- 9)) - 1) >>
1771 (blocksize_bits
- 9);
1773 if (iinfo
->i_efe
== 0) {
1774 memcpy(bh
->b_data
+ sizeof(struct fileEntry
),
1775 iinfo
->i_ext
.i_data
,
1776 inode
->i_sb
->s_blocksize
- sizeof(struct fileEntry
));
1777 fe
->logicalBlocksRecorded
= cpu_to_le64(lb_recorded
);
1779 udf_time_to_disk_stamp(&fe
->accessTime
, inode
->i_atime
);
1780 udf_time_to_disk_stamp(&fe
->modificationTime
, inode
->i_mtime
);
1781 udf_time_to_disk_stamp(&fe
->attrTime
, inode
->i_ctime
);
1782 memset(&(fe
->impIdent
), 0, sizeof(struct regid
));
1783 strcpy(fe
->impIdent
.ident
, UDF_ID_DEVELOPER
);
1784 fe
->impIdent
.identSuffix
[0] = UDF_OS_CLASS_UNIX
;
1785 fe
->impIdent
.identSuffix
[1] = UDF_OS_ID_LINUX
;
1786 fe
->uniqueID
= cpu_to_le64(iinfo
->i_unique
);
1787 fe
->lengthExtendedAttr
= cpu_to_le32(iinfo
->i_lenEAttr
);
1788 fe
->lengthAllocDescs
= cpu_to_le32(iinfo
->i_lenAlloc
);
1789 fe
->checkpoint
= cpu_to_le32(iinfo
->i_checkpoint
);
1790 fe
->descTag
.tagIdent
= cpu_to_le16(TAG_IDENT_FE
);
1791 crclen
= sizeof(struct fileEntry
);
1793 memcpy(bh
->b_data
+ sizeof(struct extendedFileEntry
),
1794 iinfo
->i_ext
.i_data
,
1795 inode
->i_sb
->s_blocksize
-
1796 sizeof(struct extendedFileEntry
));
1798 cpu_to_le64(inode
->i_size
+ iinfo
->i_lenStreams
);
1799 efe
->logicalBlocksRecorded
= cpu_to_le64(lb_recorded
);
1801 if (iinfo
->i_streamdir
) {
1802 struct long_ad
*icb_lad
= &efe
->streamDirectoryICB
;
1804 icb_lad
->extLocation
=
1805 cpu_to_lelb(iinfo
->i_locStreamdir
);
1806 icb_lad
->extLength
=
1807 cpu_to_le32(inode
->i_sb
->s_blocksize
);
1810 udf_adjust_time(iinfo
, inode
->i_atime
);
1811 udf_adjust_time(iinfo
, inode
->i_mtime
);
1812 udf_adjust_time(iinfo
, inode
->i_ctime
);
1814 udf_time_to_disk_stamp(&efe
->accessTime
, inode
->i_atime
);
1815 udf_time_to_disk_stamp(&efe
->modificationTime
, inode
->i_mtime
);
1816 udf_time_to_disk_stamp(&efe
->createTime
, iinfo
->i_crtime
);
1817 udf_time_to_disk_stamp(&efe
->attrTime
, inode
->i_ctime
);
1819 memset(&(efe
->impIdent
), 0, sizeof(efe
->impIdent
));
1820 strcpy(efe
->impIdent
.ident
, UDF_ID_DEVELOPER
);
1821 efe
->impIdent
.identSuffix
[0] = UDF_OS_CLASS_UNIX
;
1822 efe
->impIdent
.identSuffix
[1] = UDF_OS_ID_LINUX
;
1823 efe
->uniqueID
= cpu_to_le64(iinfo
->i_unique
);
1824 efe
->lengthExtendedAttr
= cpu_to_le32(iinfo
->i_lenEAttr
);
1825 efe
->lengthAllocDescs
= cpu_to_le32(iinfo
->i_lenAlloc
);
1826 efe
->checkpoint
= cpu_to_le32(iinfo
->i_checkpoint
);
1827 efe
->descTag
.tagIdent
= cpu_to_le16(TAG_IDENT_EFE
);
1828 crclen
= sizeof(struct extendedFileEntry
);
1832 if (iinfo
->i_strat4096
) {
1833 fe
->icbTag
.strategyType
= cpu_to_le16(4096);
1834 fe
->icbTag
.strategyParameter
= cpu_to_le16(1);
1835 fe
->icbTag
.numEntries
= cpu_to_le16(2);
1837 fe
->icbTag
.strategyType
= cpu_to_le16(4);
1838 fe
->icbTag
.numEntries
= cpu_to_le16(1);
1842 fe
->icbTag
.fileType
= ICBTAG_FILE_TYPE_USE
;
1843 else if (S_ISDIR(inode
->i_mode
))
1844 fe
->icbTag
.fileType
= ICBTAG_FILE_TYPE_DIRECTORY
;
1845 else if (S_ISREG(inode
->i_mode
))
1846 fe
->icbTag
.fileType
= ICBTAG_FILE_TYPE_REGULAR
;
1847 else if (S_ISLNK(inode
->i_mode
))
1848 fe
->icbTag
.fileType
= ICBTAG_FILE_TYPE_SYMLINK
;
1849 else if (S_ISBLK(inode
->i_mode
))
1850 fe
->icbTag
.fileType
= ICBTAG_FILE_TYPE_BLOCK
;
1851 else if (S_ISCHR(inode
->i_mode
))
1852 fe
->icbTag
.fileType
= ICBTAG_FILE_TYPE_CHAR
;
1853 else if (S_ISFIFO(inode
->i_mode
))
1854 fe
->icbTag
.fileType
= ICBTAG_FILE_TYPE_FIFO
;
1855 else if (S_ISSOCK(inode
->i_mode
))
1856 fe
->icbTag
.fileType
= ICBTAG_FILE_TYPE_SOCKET
;
1858 icbflags
= iinfo
->i_alloc_type
|
1859 ((inode
->i_mode
& S_ISUID
) ? ICBTAG_FLAG_SETUID
: 0) |
1860 ((inode
->i_mode
& S_ISGID
) ? ICBTAG_FLAG_SETGID
: 0) |
1861 ((inode
->i_mode
& S_ISVTX
) ? ICBTAG_FLAG_STICKY
: 0) |
1862 (le16_to_cpu(fe
->icbTag
.flags
) &
1863 ~(ICBTAG_FLAG_AD_MASK
| ICBTAG_FLAG_SETUID
|
1864 ICBTAG_FLAG_SETGID
| ICBTAG_FLAG_STICKY
));
1866 fe
->icbTag
.flags
= cpu_to_le16(icbflags
);
1867 if (sbi
->s_udfrev
>= 0x0200)
1868 fe
->descTag
.descVersion
= cpu_to_le16(3);
1870 fe
->descTag
.descVersion
= cpu_to_le16(2);
1871 fe
->descTag
.tagSerialNum
= cpu_to_le16(sbi
->s_serial_number
);
1872 fe
->descTag
.tagLocation
= cpu_to_le32(
1873 iinfo
->i_location
.logicalBlockNum
);
1874 crclen
+= iinfo
->i_lenEAttr
+ iinfo
->i_lenAlloc
- sizeof(struct tag
);
1875 fe
->descTag
.descCRCLength
= cpu_to_le16(crclen
);
1876 fe
->descTag
.descCRC
= cpu_to_le16(crc_itu_t(0, (char *)fe
+ sizeof(struct tag
),
1878 fe
->descTag
.tagChecksum
= udf_tag_checksum(&fe
->descTag
);
1880 set_buffer_uptodate(bh
);
1883 /* write the data blocks */
1884 mark_buffer_dirty(bh
);
1886 sync_dirty_buffer(bh
);
1887 if (buffer_write_io_error(bh
)) {
1888 udf_warn(inode
->i_sb
, "IO error syncing udf inode [%08lx]\n",
1898 struct inode
*__udf_iget(struct super_block
*sb
, struct kernel_lb_addr
*ino
,
1901 unsigned long block
= udf_get_lb_pblock(sb
, ino
, 0);
1902 struct inode
*inode
= iget_locked(sb
, block
);
1906 return ERR_PTR(-ENOMEM
);
1908 if (!(inode
->i_state
& I_NEW
))
1911 memcpy(&UDF_I(inode
)->i_location
, ino
, sizeof(struct kernel_lb_addr
));
1912 err
= udf_read_inode(inode
, hidden_inode
);
1915 return ERR_PTR(err
);
1917 unlock_new_inode(inode
);
1922 int udf_setup_indirect_aext(struct inode
*inode
, udf_pblk_t block
,
1923 struct extent_position
*epos
)
1925 struct super_block
*sb
= inode
->i_sb
;
1926 struct buffer_head
*bh
;
1927 struct allocExtDesc
*aed
;
1928 struct extent_position nepos
;
1929 struct kernel_lb_addr neloc
;
1932 if (UDF_I(inode
)->i_alloc_type
== ICBTAG_FLAG_AD_SHORT
)
1933 adsize
= sizeof(struct short_ad
);
1934 else if (UDF_I(inode
)->i_alloc_type
== ICBTAG_FLAG_AD_LONG
)
1935 adsize
= sizeof(struct long_ad
);
1939 neloc
.logicalBlockNum
= block
;
1940 neloc
.partitionReferenceNum
= epos
->block
.partitionReferenceNum
;
1942 bh
= udf_tgetblk(sb
, udf_get_lb_pblock(sb
, &neloc
, 0));
1946 memset(bh
->b_data
, 0x00, sb
->s_blocksize
);
1947 set_buffer_uptodate(bh
);
1949 mark_buffer_dirty_inode(bh
, inode
);
1951 aed
= (struct allocExtDesc
*)(bh
->b_data
);
1952 if (!UDF_QUERY_FLAG(sb
, UDF_FLAG_STRICT
)) {
1953 aed
->previousAllocExtLocation
=
1954 cpu_to_le32(epos
->block
.logicalBlockNum
);
1956 aed
->lengthAllocDescs
= cpu_to_le32(0);
1957 if (UDF_SB(sb
)->s_udfrev
>= 0x0200)
1961 udf_new_tag(bh
->b_data
, TAG_IDENT_AED
, ver
, 1, block
,
1962 sizeof(struct tag
));
1964 nepos
.block
= neloc
;
1965 nepos
.offset
= sizeof(struct allocExtDesc
);
1969 * Do we have to copy current last extent to make space for indirect
1972 if (epos
->offset
+ adsize
> sb
->s_blocksize
) {
1973 struct kernel_lb_addr cp_loc
;
1977 epos
->offset
-= adsize
;
1978 cp_type
= udf_current_aext(inode
, epos
, &cp_loc
, &cp_len
, 0);
1979 cp_len
|= ((uint32_t)cp_type
) << 30;
1981 __udf_add_aext(inode
, &nepos
, &cp_loc
, cp_len
, 1);
1982 udf_write_aext(inode
, epos
, &nepos
.block
,
1983 sb
->s_blocksize
| EXT_NEXT_EXTENT_ALLOCDESCS
, 0);
1985 __udf_add_aext(inode
, epos
, &nepos
.block
,
1986 sb
->s_blocksize
| EXT_NEXT_EXTENT_ALLOCDESCS
, 0);
1996 * Append extent at the given position - should be the first free one in inode
1997 * / indirect extent. This function assumes there is enough space in the inode
1998 * or indirect extent. Use udf_add_aext() if you didn't check for this before.
2000 int __udf_add_aext(struct inode
*inode
, struct extent_position
*epos
,
2001 struct kernel_lb_addr
*eloc
, uint32_t elen
, int inc
)
2003 struct udf_inode_info
*iinfo
= UDF_I(inode
);
2004 struct allocExtDesc
*aed
;
2007 if (iinfo
->i_alloc_type
== ICBTAG_FLAG_AD_SHORT
)
2008 adsize
= sizeof(struct short_ad
);
2009 else if (iinfo
->i_alloc_type
== ICBTAG_FLAG_AD_LONG
)
2010 adsize
= sizeof(struct long_ad
);
2015 WARN_ON(iinfo
->i_lenAlloc
!=
2016 epos
->offset
- udf_file_entry_alloc_offset(inode
));
2018 aed
= (struct allocExtDesc
*)epos
->bh
->b_data
;
2019 WARN_ON(le32_to_cpu(aed
->lengthAllocDescs
) !=
2020 epos
->offset
- sizeof(struct allocExtDesc
));
2021 WARN_ON(epos
->offset
+ adsize
> inode
->i_sb
->s_blocksize
);
2024 udf_write_aext(inode
, epos
, eloc
, elen
, inc
);
2027 iinfo
->i_lenAlloc
+= adsize
;
2028 mark_inode_dirty(inode
);
2030 aed
= (struct allocExtDesc
*)epos
->bh
->b_data
;
2031 le32_add_cpu(&aed
->lengthAllocDescs
, adsize
);
2032 if (!UDF_QUERY_FLAG(inode
->i_sb
, UDF_FLAG_STRICT
) ||
2033 UDF_SB(inode
->i_sb
)->s_udfrev
>= 0x0201)
2034 udf_update_tag(epos
->bh
->b_data
,
2035 epos
->offset
+ (inc
? 0 : adsize
));
2037 udf_update_tag(epos
->bh
->b_data
,
2038 sizeof(struct allocExtDesc
));
2039 mark_buffer_dirty_inode(epos
->bh
, inode
);
2046 * Append extent at given position - should be the first free one in inode
2047 * / indirect extent. Takes care of allocating and linking indirect blocks.
2049 int udf_add_aext(struct inode
*inode
, struct extent_position
*epos
,
2050 struct kernel_lb_addr
*eloc
, uint32_t elen
, int inc
)
2053 struct super_block
*sb
= inode
->i_sb
;
2055 if (UDF_I(inode
)->i_alloc_type
== ICBTAG_FLAG_AD_SHORT
)
2056 adsize
= sizeof(struct short_ad
);
2057 else if (UDF_I(inode
)->i_alloc_type
== ICBTAG_FLAG_AD_LONG
)
2058 adsize
= sizeof(struct long_ad
);
2062 if (epos
->offset
+ (2 * adsize
) > sb
->s_blocksize
) {
2064 udf_pblk_t new_block
;
2066 new_block
= udf_new_block(sb
, NULL
,
2067 epos
->block
.partitionReferenceNum
,
2068 epos
->block
.logicalBlockNum
, &err
);
2072 err
= udf_setup_indirect_aext(inode
, new_block
, epos
);
2077 return __udf_add_aext(inode
, epos
, eloc
, elen
, inc
);
2080 void udf_write_aext(struct inode
*inode
, struct extent_position
*epos
,
2081 struct kernel_lb_addr
*eloc
, uint32_t elen
, int inc
)
2085 struct short_ad
*sad
;
2086 struct long_ad
*lad
;
2087 struct udf_inode_info
*iinfo
= UDF_I(inode
);
2090 ptr
= iinfo
->i_ext
.i_data
+ epos
->offset
-
2091 udf_file_entry_alloc_offset(inode
) +
2094 ptr
= epos
->bh
->b_data
+ epos
->offset
;
2096 switch (iinfo
->i_alloc_type
) {
2097 case ICBTAG_FLAG_AD_SHORT
:
2098 sad
= (struct short_ad
*)ptr
;
2099 sad
->extLength
= cpu_to_le32(elen
);
2100 sad
->extPosition
= cpu_to_le32(eloc
->logicalBlockNum
);
2101 adsize
= sizeof(struct short_ad
);
2103 case ICBTAG_FLAG_AD_LONG
:
2104 lad
= (struct long_ad
*)ptr
;
2105 lad
->extLength
= cpu_to_le32(elen
);
2106 lad
->extLocation
= cpu_to_lelb(*eloc
);
2107 memset(lad
->impUse
, 0x00, sizeof(lad
->impUse
));
2108 adsize
= sizeof(struct long_ad
);
2115 if (!UDF_QUERY_FLAG(inode
->i_sb
, UDF_FLAG_STRICT
) ||
2116 UDF_SB(inode
->i_sb
)->s_udfrev
>= 0x0201) {
2117 struct allocExtDesc
*aed
=
2118 (struct allocExtDesc
*)epos
->bh
->b_data
;
2119 udf_update_tag(epos
->bh
->b_data
,
2120 le32_to_cpu(aed
->lengthAllocDescs
) +
2121 sizeof(struct allocExtDesc
));
2123 mark_buffer_dirty_inode(epos
->bh
, inode
);
2125 mark_inode_dirty(inode
);
2129 epos
->offset
+= adsize
;
2133 * Only 1 indirect extent in a row really makes sense but allow upto 16 in case
2134 * someone does some weird stuff.
2136 #define UDF_MAX_INDIR_EXTS 16
2138 int8_t udf_next_aext(struct inode
*inode
, struct extent_position
*epos
,
2139 struct kernel_lb_addr
*eloc
, uint32_t *elen
, int inc
)
2142 unsigned int indirections
= 0;
2144 while ((etype
= udf_current_aext(inode
, epos
, eloc
, elen
, inc
)) ==
2145 (EXT_NEXT_EXTENT_ALLOCDESCS
>> 30)) {
2148 if (++indirections
> UDF_MAX_INDIR_EXTS
) {
2149 udf_err(inode
->i_sb
,
2150 "too many indirect extents in inode %lu\n",
2155 epos
->block
= *eloc
;
2156 epos
->offset
= sizeof(struct allocExtDesc
);
2158 block
= udf_get_lb_pblock(inode
->i_sb
, &epos
->block
, 0);
2159 epos
->bh
= udf_tread(inode
->i_sb
, block
);
2161 udf_debug("reading block %u failed!\n", block
);
2169 int8_t udf_current_aext(struct inode
*inode
, struct extent_position
*epos
,
2170 struct kernel_lb_addr
*eloc
, uint32_t *elen
, int inc
)
2175 struct short_ad
*sad
;
2176 struct long_ad
*lad
;
2177 struct udf_inode_info
*iinfo
= UDF_I(inode
);
2181 epos
->offset
= udf_file_entry_alloc_offset(inode
);
2182 ptr
= iinfo
->i_ext
.i_data
+ epos
->offset
-
2183 udf_file_entry_alloc_offset(inode
) +
2185 alen
= udf_file_entry_alloc_offset(inode
) +
2189 epos
->offset
= sizeof(struct allocExtDesc
);
2190 ptr
= epos
->bh
->b_data
+ epos
->offset
;
2191 alen
= sizeof(struct allocExtDesc
) +
2192 le32_to_cpu(((struct allocExtDesc
*)epos
->bh
->b_data
)->
2196 switch (iinfo
->i_alloc_type
) {
2197 case ICBTAG_FLAG_AD_SHORT
:
2198 sad
= udf_get_fileshortad(ptr
, alen
, &epos
->offset
, inc
);
2201 etype
= le32_to_cpu(sad
->extLength
) >> 30;
2202 eloc
->logicalBlockNum
= le32_to_cpu(sad
->extPosition
);
2203 eloc
->partitionReferenceNum
=
2204 iinfo
->i_location
.partitionReferenceNum
;
2205 *elen
= le32_to_cpu(sad
->extLength
) & UDF_EXTENT_LENGTH_MASK
;
2207 case ICBTAG_FLAG_AD_LONG
:
2208 lad
= udf_get_filelongad(ptr
, alen
, &epos
->offset
, inc
);
2211 etype
= le32_to_cpu(lad
->extLength
) >> 30;
2212 *eloc
= lelb_to_cpu(lad
->extLocation
);
2213 *elen
= le32_to_cpu(lad
->extLength
) & UDF_EXTENT_LENGTH_MASK
;
2216 udf_debug("alloc_type = %u unsupported\n", iinfo
->i_alloc_type
);
2223 static int8_t udf_insert_aext(struct inode
*inode
, struct extent_position epos
,
2224 struct kernel_lb_addr neloc
, uint32_t nelen
)
2226 struct kernel_lb_addr oeloc
;
2233 while ((etype
= udf_next_aext(inode
, &epos
, &oeloc
, &oelen
, 0)) != -1) {
2234 udf_write_aext(inode
, &epos
, &neloc
, nelen
, 1);
2236 nelen
= (etype
<< 30) | oelen
;
2238 udf_add_aext(inode
, &epos
, &neloc
, nelen
, 1);
2241 return (nelen
>> 30);
2244 int8_t udf_delete_aext(struct inode
*inode
, struct extent_position epos
)
2246 struct extent_position oepos
;
2249 struct allocExtDesc
*aed
;
2250 struct udf_inode_info
*iinfo
;
2251 struct kernel_lb_addr eloc
;
2259 iinfo
= UDF_I(inode
);
2260 if (iinfo
->i_alloc_type
== ICBTAG_FLAG_AD_SHORT
)
2261 adsize
= sizeof(struct short_ad
);
2262 else if (iinfo
->i_alloc_type
== ICBTAG_FLAG_AD_LONG
)
2263 adsize
= sizeof(struct long_ad
);
2268 if (udf_next_aext(inode
, &epos
, &eloc
, &elen
, 1) == -1)
2271 while ((etype
= udf_next_aext(inode
, &epos
, &eloc
, &elen
, 1)) != -1) {
2272 udf_write_aext(inode
, &oepos
, &eloc
, (etype
<< 30) | elen
, 1);
2273 if (oepos
.bh
!= epos
.bh
) {
2274 oepos
.block
= epos
.block
;
2278 oepos
.offset
= epos
.offset
- adsize
;
2281 memset(&eloc
, 0x00, sizeof(struct kernel_lb_addr
));
2284 if (epos
.bh
!= oepos
.bh
) {
2285 udf_free_blocks(inode
->i_sb
, inode
, &epos
.block
, 0, 1);
2286 udf_write_aext(inode
, &oepos
, &eloc
, elen
, 1);
2287 udf_write_aext(inode
, &oepos
, &eloc
, elen
, 1);
2289 iinfo
->i_lenAlloc
-= (adsize
* 2);
2290 mark_inode_dirty(inode
);
2292 aed
= (struct allocExtDesc
*)oepos
.bh
->b_data
;
2293 le32_add_cpu(&aed
->lengthAllocDescs
, -(2 * adsize
));
2294 if (!UDF_QUERY_FLAG(inode
->i_sb
, UDF_FLAG_STRICT
) ||
2295 UDF_SB(inode
->i_sb
)->s_udfrev
>= 0x0201)
2296 udf_update_tag(oepos
.bh
->b_data
,
2297 oepos
.offset
- (2 * adsize
));
2299 udf_update_tag(oepos
.bh
->b_data
,
2300 sizeof(struct allocExtDesc
));
2301 mark_buffer_dirty_inode(oepos
.bh
, inode
);
2304 udf_write_aext(inode
, &oepos
, &eloc
, elen
, 1);
2306 iinfo
->i_lenAlloc
-= adsize
;
2307 mark_inode_dirty(inode
);
2309 aed
= (struct allocExtDesc
*)oepos
.bh
->b_data
;
2310 le32_add_cpu(&aed
->lengthAllocDescs
, -adsize
);
2311 if (!UDF_QUERY_FLAG(inode
->i_sb
, UDF_FLAG_STRICT
) ||
2312 UDF_SB(inode
->i_sb
)->s_udfrev
>= 0x0201)
2313 udf_update_tag(oepos
.bh
->b_data
,
2314 epos
.offset
- adsize
);
2316 udf_update_tag(oepos
.bh
->b_data
,
2317 sizeof(struct allocExtDesc
));
2318 mark_buffer_dirty_inode(oepos
.bh
, inode
);
2325 return (elen
>> 30);
2328 int8_t inode_bmap(struct inode
*inode
, sector_t block
,
2329 struct extent_position
*pos
, struct kernel_lb_addr
*eloc
,
2330 uint32_t *elen
, sector_t
*offset
)
2332 unsigned char blocksize_bits
= inode
->i_sb
->s_blocksize_bits
;
2333 loff_t lbcount
= 0, bcount
= (loff_t
) block
<< blocksize_bits
;
2335 struct udf_inode_info
*iinfo
;
2337 iinfo
= UDF_I(inode
);
2338 if (!udf_read_extent_cache(inode
, bcount
, &lbcount
, pos
)) {
2340 pos
->block
= iinfo
->i_location
;
2345 etype
= udf_next_aext(inode
, pos
, eloc
, elen
, 1);
2347 *offset
= (bcount
- lbcount
) >> blocksize_bits
;
2348 iinfo
->i_lenExtents
= lbcount
;
2352 } while (lbcount
<= bcount
);
2353 /* update extent cache */
2354 udf_update_extent_cache(inode
, lbcount
- *elen
, pos
);
2355 *offset
= (bcount
+ *elen
- lbcount
) >> blocksize_bits
;
2360 udf_pblk_t
udf_block_map(struct inode
*inode
, sector_t block
)
2362 struct kernel_lb_addr eloc
;
2365 struct extent_position epos
= {};
2368 down_read(&UDF_I(inode
)->i_data_sem
);
2370 if (inode_bmap(inode
, block
, &epos
, &eloc
, &elen
, &offset
) ==
2371 (EXT_RECORDED_ALLOCATED
>> 30))
2372 ret
= udf_get_lb_pblock(inode
->i_sb
, &eloc
, offset
);
2376 up_read(&UDF_I(inode
)->i_data_sem
);
2379 if (UDF_QUERY_FLAG(inode
->i_sb
, UDF_FLAG_VARCONV
))
2380 return udf_fixed_to_variable(ret
);